]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
* regcache.c (realloc_register_cache): Invalidate inferior's
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
545587ee 2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4c38e0a4 3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
da6d8c04
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
19
20#include "server.h"
58caa3dc 21#include "linux-low.h"
da6d8c04 22
58caa3dc 23#include <sys/wait.h>
da6d8c04
DJ
24#include <stdio.h>
25#include <sys/param.h>
da6d8c04 26#include <sys/ptrace.h>
da6d8c04
DJ
27#include <signal.h>
28#include <sys/ioctl.h>
29#include <fcntl.h>
d07c63e7 30#include <string.h>
0a30fbc4
DJ
31#include <stdlib.h>
32#include <unistd.h>
fa6a77dc 33#include <errno.h>
fd500816 34#include <sys/syscall.h>
f9387fc3 35#include <sched.h>
07e059b5
VP
36#include <ctype.h>
37#include <pwd.h>
38#include <sys/types.h>
39#include <dirent.h>
efcbbd14
UW
40#include <sys/stat.h>
41#include <sys/vfs.h>
1570b33e 42#include <sys/uio.h>
957f3f49
DE
43#ifndef ELFMAG0
44/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
45 then ELFMAG0 will have been defined. If it didn't get included by
46 gdb_proc_service.h then including it will likely introduce a duplicate
47 definition of elf_fpregset_t. */
48#include <elf.h>
49#endif
efcbbd14
UW
50
51#ifndef SPUFS_MAGIC
52#define SPUFS_MAGIC 0x23c9b64e
53#endif
da6d8c04 54
32ca6d61
DJ
55#ifndef PTRACE_GETSIGINFO
56# define PTRACE_GETSIGINFO 0x4202
57# define PTRACE_SETSIGINFO 0x4203
58#endif
59
fd462a61
DJ
60#ifndef O_LARGEFILE
61#define O_LARGEFILE 0
62#endif
63
24a09b5f
DJ
64/* If the system headers did not provide the constants, hard-code the normal
65 values. */
66#ifndef PTRACE_EVENT_FORK
67
68#define PTRACE_SETOPTIONS 0x4200
69#define PTRACE_GETEVENTMSG 0x4201
70
71/* options set using PTRACE_SETOPTIONS */
72#define PTRACE_O_TRACESYSGOOD 0x00000001
73#define PTRACE_O_TRACEFORK 0x00000002
74#define PTRACE_O_TRACEVFORK 0x00000004
75#define PTRACE_O_TRACECLONE 0x00000008
76#define PTRACE_O_TRACEEXEC 0x00000010
77#define PTRACE_O_TRACEVFORKDONE 0x00000020
78#define PTRACE_O_TRACEEXIT 0x00000040
79
80/* Wait extended result codes for the above trace options. */
81#define PTRACE_EVENT_FORK 1
82#define PTRACE_EVENT_VFORK 2
83#define PTRACE_EVENT_CLONE 3
84#define PTRACE_EVENT_EXEC 4
85#define PTRACE_EVENT_VFORK_DONE 5
86#define PTRACE_EVENT_EXIT 6
87
88#endif /* PTRACE_EVENT_FORK */
89
90/* We can't always assume that this flag is available, but all systems
91 with the ptrace event handlers also have __WALL, so it's safe to use
92 in some contexts. */
93#ifndef __WALL
94#define __WALL 0x40000000 /* Wait for any child. */
95#endif
96
ec8ebe72
DE
97#ifndef W_STOPCODE
98#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
99#endif
100
42c81e2a
DJ
101#ifdef __UCLIBC__
102#if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
103#define HAS_NOMMU
104#endif
105#endif
106
24a09b5f
DJ
107/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
108 representation of the thread ID.
611cb4a5 109
54a0b537 110 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
95954743
PA
111 the same as the LWP ID.
112
113 ``all_processes'' is keyed by the "overall process ID", which
114 GNU/Linux calls tgid, "thread group ID". */
0d62e5e8 115
54a0b537 116struct inferior_list all_lwps;
0d62e5e8 117
24a09b5f
DJ
118/* A list of all unknown processes which receive stop signals. Some other
119 process will presumably claim each of these as forked children
120 momentarily. */
121
122struct inferior_list stopped_pids;
123
0d62e5e8
DJ
124/* FIXME this is a bit of a hack, and could be removed. */
125int stopping_threads;
126
127/* FIXME make into a target method? */
24a09b5f 128int using_threads = 1;
24a09b5f 129
95954743
PA
130/* This flag is true iff we've just created or attached to our first
131 inferior but it has not stopped yet. As soon as it does, we need
132 to call the low target's arch_setup callback. Doing this only on
133 the first inferior avoids reinializing the architecture on every
134 inferior, and avoids messing with the register caches of the
135 already running inferiors. NOTE: this assumes all inferiors under
136 control of gdbserver have the same architecture. */
d61ddec4
UW
137static int new_inferior;
138
2acc282a 139static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 140 int step, int signal, siginfo_t *info);
2bd7c093 141static void linux_resume (struct thread_resume *resume_info, size_t n);
54a0b537 142static void stop_all_lwps (void);
95954743 143static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
95954743 144static void *add_lwp (ptid_t ptid);
c35fafde 145static int linux_stopped_by_watchpoint (void);
95954743 146static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
dc146f7c 147static int linux_core_of_thread (ptid_t ptid);
d50171e4
PA
148static void proceed_all_lwps (void);
149static void unstop_all_lwps (struct lwp_info *except);
d50171e4
PA
150static int finish_step_over (struct lwp_info *lwp);
151static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
152static int kill_lwp (unsigned long lwpid, int signo);
153
154/* True if the low target can hardware single-step. Such targets
155 don't need a BREAKPOINT_REINSERT_ADDR callback. */
156
157static int
158can_hardware_single_step (void)
159{
160 return (the_low_target.breakpoint_reinsert_addr == NULL);
161}
162
163/* True if the low target supports memory breakpoints. If so, we'll
164 have a GET_PC implementation. */
165
166static int
167supports_breakpoints (void)
168{
169 return (the_low_target.get_pc != NULL);
170}
0d62e5e8
DJ
171
172struct pending_signals
173{
174 int signal;
32ca6d61 175 siginfo_t info;
0d62e5e8
DJ
176 struct pending_signals *prev;
177};
611cb4a5 178
14ce3065
DE
179#define PTRACE_ARG3_TYPE void *
180#define PTRACE_ARG4_TYPE void *
c6ecbae5 181#define PTRACE_XFER_TYPE long
da6d8c04 182
58caa3dc 183#ifdef HAVE_LINUX_REGSETS
52fa2412
UW
184static char *disabled_regsets;
185static int num_regsets;
58caa3dc
DJ
186#endif
187
bd99dc85
PA
188/* The read/write ends of the pipe registered as waitable file in the
189 event loop. */
190static int linux_event_pipe[2] = { -1, -1 };
191
192/* True if we're currently in async mode. */
193#define target_is_async_p() (linux_event_pipe[0] != -1)
194
195static void send_sigstop (struct inferior_list_entry *entry);
196static void wait_for_sigstop (struct inferior_list_entry *entry);
197
d0722149
DE
198/* Accepts an integer PID; Returns a string representing a file that
199 can be opened to get info for the child process.
200 Space for the result is malloc'd, caller must free. */
201
202char *
203linux_child_pid_to_exec_file (int pid)
204{
205 char *name1, *name2;
206
207 name1 = xmalloc (MAXPATHLEN);
208 name2 = xmalloc (MAXPATHLEN);
209 memset (name2, 0, MAXPATHLEN);
210
211 sprintf (name1, "/proc/%d/exe", pid);
212 if (readlink (name1, name2, MAXPATHLEN) > 0)
213 {
214 free (name1);
215 return name2;
216 }
217 else
218 {
219 free (name2);
220 return name1;
221 }
222}
223
224/* Return non-zero if HEADER is a 64-bit ELF file. */
225
226static int
957f3f49 227elf_64_header_p (const Elf64_Ehdr *header)
d0722149
DE
228{
229 return (header->e_ident[EI_MAG0] == ELFMAG0
230 && header->e_ident[EI_MAG1] == ELFMAG1
231 && header->e_ident[EI_MAG2] == ELFMAG2
232 && header->e_ident[EI_MAG3] == ELFMAG3
233 && header->e_ident[EI_CLASS] == ELFCLASS64);
234}
235
236/* Return non-zero if FILE is a 64-bit ELF file,
237 zero if the file is not a 64-bit ELF file,
238 and -1 if the file is not accessible or doesn't exist. */
239
240int
241elf_64_file_p (const char *file)
242{
957f3f49 243 Elf64_Ehdr header;
d0722149
DE
244 int fd;
245
246 fd = open (file, O_RDONLY);
247 if (fd < 0)
248 return -1;
249
250 if (read (fd, &header, sizeof (header)) != sizeof (header))
251 {
252 close (fd);
253 return 0;
254 }
255 close (fd);
256
257 return elf_64_header_p (&header);
258}
259
bd99dc85
PA
260static void
261delete_lwp (struct lwp_info *lwp)
262{
263 remove_thread (get_lwp_thread (lwp));
264 remove_inferior (&all_lwps, &lwp->head);
aa5ca48f 265 free (lwp->arch_private);
bd99dc85
PA
266 free (lwp);
267}
268
95954743
PA
269/* Add a process to the common process list, and set its private
270 data. */
271
272static struct process_info *
273linux_add_process (int pid, int attached)
274{
275 struct process_info *proc;
276
277 /* Is this the first process? If so, then set the arch. */
278 if (all_processes.head == NULL)
279 new_inferior = 1;
280
281 proc = add_process (pid, attached);
282 proc->private = xcalloc (1, sizeof (*proc->private));
283
aa5ca48f
DE
284 if (the_low_target.new_process != NULL)
285 proc->private->arch_private = the_low_target.new_process ();
286
95954743
PA
287 return proc;
288}
289
5091eb23
DE
290/* Remove a process from the common process list,
291 also freeing all private data. */
292
293static void
ca5c370d 294linux_remove_process (struct process_info *process)
5091eb23 295{
cdbfd419
PP
296 struct process_info_private *priv = process->private;
297
cdbfd419
PP
298 free (priv->arch_private);
299 free (priv);
5091eb23
DE
300 remove_process (process);
301}
302
07d4f67e
DE
303/* Wrapper function for waitpid which handles EINTR, and emulates
304 __WALL for systems where that is not available. */
305
306static int
307my_waitpid (int pid, int *status, int flags)
308{
309 int ret, out_errno;
310
311 if (debug_threads)
312 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
313
314 if (flags & __WALL)
315 {
316 sigset_t block_mask, org_mask, wake_mask;
317 int wnohang;
318
319 wnohang = (flags & WNOHANG) != 0;
320 flags &= ~(__WALL | __WCLONE);
321 flags |= WNOHANG;
322
323 /* Block all signals while here. This avoids knowing about
324 LinuxThread's signals. */
325 sigfillset (&block_mask);
326 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
327
328 /* ... except during the sigsuspend below. */
329 sigemptyset (&wake_mask);
330
331 while (1)
332 {
333 /* Since all signals are blocked, there's no need to check
334 for EINTR here. */
335 ret = waitpid (pid, status, flags);
336 out_errno = errno;
337
338 if (ret == -1 && out_errno != ECHILD)
339 break;
340 else if (ret > 0)
341 break;
342
343 if (flags & __WCLONE)
344 {
345 /* We've tried both flavors now. If WNOHANG is set,
346 there's nothing else to do, just bail out. */
347 if (wnohang)
348 break;
349
350 if (debug_threads)
351 fprintf (stderr, "blocking\n");
352
353 /* Block waiting for signals. */
354 sigsuspend (&wake_mask);
355 }
356
357 flags ^= __WCLONE;
358 }
359
360 sigprocmask (SIG_SETMASK, &org_mask, NULL);
361 }
362 else
363 {
364 do
365 ret = waitpid (pid, status, flags);
366 while (ret == -1 && errno == EINTR);
367 out_errno = errno;
368 }
369
370 if (debug_threads)
371 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
372 pid, flags, status ? *status : -1, ret);
373
374 errno = out_errno;
375 return ret;
376}
377
bd99dc85
PA
378/* Handle a GNU/Linux extended wait response. If we see a clone
379 event, we need to add the new LWP to our list (and not report the
380 trap to higher layers). */
0d62e5e8 381
24a09b5f 382static void
54a0b537 383handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f
DJ
384{
385 int event = wstat >> 16;
54a0b537 386 struct lwp_info *new_lwp;
24a09b5f
DJ
387
388 if (event == PTRACE_EVENT_CLONE)
389 {
95954743 390 ptid_t ptid;
24a09b5f 391 unsigned long new_pid;
836acd6d 392 int ret, status = W_STOPCODE (SIGSTOP);
24a09b5f 393
bd99dc85 394 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
24a09b5f
DJ
395
396 /* If we haven't already seen the new PID stop, wait for it now. */
397 if (! pull_pid_from_list (&stopped_pids, new_pid))
398 {
399 /* The new child has a pending SIGSTOP. We can't affect it until it
400 hits the SIGSTOP, but we're already attached. */
401
97438e3f 402 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
403
404 if (ret == -1)
405 perror_with_name ("waiting for new child");
406 else if (ret != new_pid)
407 warning ("wait returned unexpected PID %d", ret);
da5898ce 408 else if (!WIFSTOPPED (status))
24a09b5f
DJ
409 warning ("wait returned unexpected status 0x%x", status);
410 }
411
14ce3065 412 ptrace (PTRACE_SETOPTIONS, new_pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
24a09b5f 413
95954743
PA
414 ptid = ptid_build (pid_of (event_child), new_pid, 0);
415 new_lwp = (struct lwp_info *) add_lwp (ptid);
416 add_thread (ptid, new_lwp);
24a09b5f 417
e27d73f6
DE
418 /* Either we're going to immediately resume the new thread
419 or leave it stopped. linux_resume_one_lwp is a nop if it
420 thinks the thread is currently running, so set this first
421 before calling linux_resume_one_lwp. */
422 new_lwp->stopped = 1;
423
da5898ce
DJ
424 /* Normally we will get the pending SIGSTOP. But in some cases
425 we might get another signal delivered to the group first.
f21cc1a2 426 If we do get another signal, be sure not to lose it. */
da5898ce
DJ
427 if (WSTOPSIG (status) == SIGSTOP)
428 {
d50171e4
PA
429 if (stopping_threads)
430 new_lwp->stop_pc = get_stop_pc (new_lwp);
431 else
e27d73f6 432 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
da5898ce 433 }
24a09b5f 434 else
da5898ce 435 {
54a0b537 436 new_lwp->stop_expected = 1;
d50171e4 437
da5898ce
DJ
438 if (stopping_threads)
439 {
d50171e4 440 new_lwp->stop_pc = get_stop_pc (new_lwp);
54a0b537
PA
441 new_lwp->status_pending_p = 1;
442 new_lwp->status_pending = status;
da5898ce
DJ
443 }
444 else
445 /* Pass the signal on. This is what GDB does - except
446 shouldn't we really report it instead? */
e27d73f6 447 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
da5898ce 448 }
24a09b5f
DJ
449
450 /* Always resume the current thread. If we are stopping
451 threads, it will have a pending SIGSTOP; we may as well
452 collect it now. */
2acc282a 453 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
24a09b5f
DJ
454 }
455}
456
d50171e4
PA
457/* Return the PC as read from the regcache of LWP, without any
458 adjustment. */
459
460static CORE_ADDR
461get_pc (struct lwp_info *lwp)
462{
463 struct thread_info *saved_inferior;
464 struct regcache *regcache;
465 CORE_ADDR pc;
466
467 if (the_low_target.get_pc == NULL)
468 return 0;
469
470 saved_inferior = current_inferior;
471 current_inferior = get_lwp_thread (lwp);
472
473 regcache = get_thread_regcache (current_inferior, 1);
474 pc = (*the_low_target.get_pc) (regcache);
475
476 if (debug_threads)
477 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
478
479 current_inferior = saved_inferior;
480 return pc;
481}
482
483/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
484 The SIGTRAP could mean several things.
485
486 On i386, where decr_pc_after_break is non-zero:
487 If we were single-stepping this process using PTRACE_SINGLESTEP,
488 we will get only the one SIGTRAP (even if the instruction we
489 stepped over was a breakpoint). The value of $eip will be the
490 next instruction.
491 If we continue the process using PTRACE_CONT, we will get a
492 SIGTRAP when we hit a breakpoint. The value of $eip will be
493 the instruction after the breakpoint (i.e. needs to be
494 decremented). If we report the SIGTRAP to GDB, we must also
495 report the undecremented PC. If we cancel the SIGTRAP, we
496 must resume at the decremented PC.
497
498 (Presumably, not yet tested) On a non-decr_pc_after_break machine
499 with hardware or kernel single-step:
500 If we single-step over a breakpoint instruction, our PC will
501 point at the following instruction. If we continue and hit a
502 breakpoint instruction, our PC will point at the breakpoint
503 instruction. */
504
505static CORE_ADDR
d50171e4 506get_stop_pc (struct lwp_info *lwp)
0d62e5e8 507{
d50171e4
PA
508 CORE_ADDR stop_pc;
509
510 if (the_low_target.get_pc == NULL)
511 return 0;
0d62e5e8 512
d50171e4
PA
513 stop_pc = get_pc (lwp);
514
bdabb078
PA
515 if (WSTOPSIG (lwp->last_status) == SIGTRAP
516 && !lwp->stepping
517 && !lwp->stopped_by_watchpoint
518 && lwp->last_status >> 16 == 0)
47c0c975
DE
519 stop_pc -= the_low_target.decr_pc_after_break;
520
521 if (debug_threads)
522 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
523
524 return stop_pc;
0d62e5e8 525}
ce3a066d 526
0d62e5e8 527static void *
95954743 528add_lwp (ptid_t ptid)
611cb4a5 529{
54a0b537 530 struct lwp_info *lwp;
0d62e5e8 531
54a0b537
PA
532 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
533 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 534
95954743 535 lwp->head.id = ptid;
0d62e5e8 536
d50171e4
PA
537 lwp->last_resume_kind = resume_continue;
538
aa5ca48f
DE
539 if (the_low_target.new_thread != NULL)
540 lwp->arch_private = the_low_target.new_thread ();
541
54a0b537 542 add_inferior_to_list (&all_lwps, &lwp->head);
0d62e5e8 543
54a0b537 544 return lwp;
0d62e5e8 545}
611cb4a5 546
da6d8c04
DJ
547/* Start an inferior process and returns its pid.
548 ALLARGS is a vector of program-name and args. */
549
ce3a066d
DJ
550static int
551linux_create_inferior (char *program, char **allargs)
da6d8c04 552{
a6dbe5df 553 struct lwp_info *new_lwp;
da6d8c04 554 int pid;
95954743 555 ptid_t ptid;
da6d8c04 556
42c81e2a 557#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
558 pid = vfork ();
559#else
da6d8c04 560 pid = fork ();
52fb6437 561#endif
da6d8c04
DJ
562 if (pid < 0)
563 perror_with_name ("fork");
564
565 if (pid == 0)
566 {
567 ptrace (PTRACE_TRACEME, 0, 0, 0);
568
60c3d7b0 569#ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 570 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 571#endif
0d62e5e8 572
a9fa9f7d
DJ
573 setpgid (0, 0);
574
2b876972
DJ
575 execv (program, allargs);
576 if (errno == ENOENT)
577 execvp (program, allargs);
da6d8c04
DJ
578
579 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 580 strerror (errno));
da6d8c04
DJ
581 fflush (stderr);
582 _exit (0177);
583 }
584
95954743
PA
585 linux_add_process (pid, 0);
586
587 ptid = ptid_build (pid, pid, 0);
588 new_lwp = add_lwp (ptid);
589 add_thread (ptid, new_lwp);
a6dbe5df 590 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 591
a9fa9f7d 592 return pid;
da6d8c04
DJ
593}
594
595/* Attach to an inferior process. */
596
95954743
PA
597static void
598linux_attach_lwp_1 (unsigned long lwpid, int initial)
da6d8c04 599{
95954743 600 ptid_t ptid;
54a0b537 601 struct lwp_info *new_lwp;
611cb4a5 602
95954743 603 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
da6d8c04 604 {
95954743 605 if (!initial)
2d717e4f
DJ
606 {
607 /* If we fail to attach to an LWP, just warn. */
95954743 608 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
2d717e4f
DJ
609 strerror (errno), errno);
610 fflush (stderr);
611 return;
612 }
613 else
614 /* If we fail to attach to a process, report an error. */
95954743 615 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
43d5792c 616 strerror (errno), errno);
da6d8c04
DJ
617 }
618
95954743
PA
619 if (initial)
620 /* NOTE/FIXME: This lwp might have not been the tgid. */
621 ptid = ptid_build (lwpid, lwpid, 0);
622 else
623 {
624 /* Note that extracting the pid from the current inferior is
625 safe, since we're always called in the context of the same
626 process as this new thread. */
627 int pid = pid_of (get_thread_lwp (current_inferior));
628 ptid = ptid_build (pid, lwpid, 0);
629 }
24a09b5f 630
95954743
PA
631 new_lwp = (struct lwp_info *) add_lwp (ptid);
632 add_thread (ptid, new_lwp);
0d62e5e8 633
a6dbe5df
PA
634 /* We need to wait for SIGSTOP before being able to make the next
635 ptrace call on this LWP. */
636 new_lwp->must_set_ptrace_flags = 1;
637
0d62e5e8 638 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
639 brings it to a halt.
640
641 There are several cases to consider here:
642
643 1) gdbserver has already attached to the process and is being notified
1b3f6016 644 of a new thread that is being created.
d50171e4
PA
645 In this case we should ignore that SIGSTOP and resume the
646 process. This is handled below by setting stop_expected = 1,
647 and the fact that add_lwp sets last_resume_kind ==
648 resume_continue.
0e21c1ec
DE
649
650 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
651 to it via attach_inferior.
652 In this case we want the process thread to stop.
d50171e4
PA
653 This is handled by having linux_attach set last_resume_kind ==
654 resume_stop after we return.
1b3f6016
PA
655 ??? If the process already has several threads we leave the other
656 threads running.
0e21c1ec
DE
657
658 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
659 existing threads.
660 In this case we want the thread to stop.
661 FIXME: This case is currently not properly handled.
662 We should wait for the SIGSTOP but don't. Things work apparently
663 because enough time passes between when we ptrace (ATTACH) and when
664 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
665
666 On the other hand, if we are currently trying to stop all threads, we
667 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 668 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
669 end of the list, and so the new thread has not yet reached
670 wait_for_sigstop (but will). */
d50171e4 671 new_lwp->stop_expected = 1;
0d62e5e8
DJ
672}
673
95954743
PA
674void
675linux_attach_lwp (unsigned long lwpid)
676{
677 linux_attach_lwp_1 (lwpid, 0);
678}
679
0d62e5e8 680int
a1928bad 681linux_attach (unsigned long pid)
0d62e5e8 682{
54a0b537 683 struct lwp_info *lwp;
0d62e5e8 684
95954743
PA
685 linux_attach_lwp_1 (pid, 1);
686
687 linux_add_process (pid, 1);
0d62e5e8 688
bd99dc85
PA
689 if (!non_stop)
690 {
691 /* Don't ignore the initial SIGSTOP if we just attached to this
692 process. It will be collected by wait shortly. */
95954743
PA
693 lwp = (struct lwp_info *) find_inferior_id (&all_lwps,
694 ptid_build (pid, pid, 0));
d50171e4 695 lwp->last_resume_kind = resume_stop;
bd99dc85 696 }
0d62e5e8 697
95954743
PA
698 return 0;
699}
700
701struct counter
702{
703 int pid;
704 int count;
705};
706
707static int
708second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
709{
710 struct counter *counter = args;
711
712 if (ptid_get_pid (entry->id) == counter->pid)
713 {
714 if (++counter->count > 1)
715 return 1;
716 }
d61ddec4 717
da6d8c04
DJ
718 return 0;
719}
720
95954743
PA
721static int
722last_thread_of_process_p (struct thread_info *thread)
723{
724 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
725 int pid = ptid_get_pid (ptid);
726 struct counter counter = { pid , 0 };
da6d8c04 727
95954743
PA
728 return (find_inferior (&all_threads,
729 second_thread_of_pid_p, &counter) == NULL);
730}
731
732/* Kill the inferior lwp. */
733
734static int
735linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
da6d8c04 736{
0d62e5e8 737 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 738 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 739 int wstat;
95954743
PA
740 int pid = * (int *) args;
741
742 if (ptid_get_pid (entry->id) != pid)
743 return 0;
0d62e5e8 744
fd500816
DJ
745 /* We avoid killing the first thread here, because of a Linux kernel (at
746 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
747 the children get a chance to be reaped, it will remain a zombie
748 forever. */
95954743 749
12b42a12 750 if (lwpid_of (lwp) == pid)
95954743
PA
751 {
752 if (debug_threads)
753 fprintf (stderr, "lkop: is last of process %s\n",
754 target_pid_to_str (entry->id));
755 return 0;
756 }
fd500816 757
bd99dc85
PA
758 /* If we're killing a running inferior, make sure it is stopped
759 first, as PTRACE_KILL will not work otherwise. */
760 if (!lwp->stopped)
761 send_sigstop (&lwp->head);
762
0d62e5e8
DJ
763 do
764 {
bd99dc85 765 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
0d62e5e8
DJ
766
767 /* Make sure it died. The loop is most likely unnecessary. */
95954743 768 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
bd99dc85 769 } while (pid > 0 && WIFSTOPPED (wstat));
95954743
PA
770
771 return 0;
da6d8c04
DJ
772}
773
95954743
PA
774static int
775linux_kill (int pid)
0d62e5e8 776{
95954743 777 struct process_info *process;
54a0b537 778 struct lwp_info *lwp;
95954743 779 struct thread_info *thread;
fd500816 780 int wstat;
95954743 781 int lwpid;
fd500816 782
95954743
PA
783 process = find_process_pid (pid);
784 if (process == NULL)
785 return -1;
9d606399 786
95954743 787 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
fd500816 788
54a0b537 789 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 790 thread in the list, so do so now. */
95954743
PA
791 lwp = find_lwp_pid (pid_to_ptid (pid));
792 thread = get_lwp_thread (lwp);
bd99dc85
PA
793
794 if (debug_threads)
95954743
PA
795 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
796 lwpid_of (lwp), pid);
bd99dc85
PA
797
798 /* If we're killing a running inferior, make sure it is stopped
799 first, as PTRACE_KILL will not work otherwise. */
800 if (!lwp->stopped)
801 send_sigstop (&lwp->head);
802
fd500816
DJ
803 do
804 {
bd99dc85 805 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
fd500816
DJ
806
807 /* Make sure it died. The loop is most likely unnecessary. */
95954743
PA
808 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
809 } while (lwpid > 0 && WIFSTOPPED (wstat));
2d717e4f 810
ca5c370d
PA
811#ifdef USE_THREAD_DB
812 thread_db_free (process, 0);
813#endif
bd99dc85 814 delete_lwp (lwp);
ca5c370d 815 linux_remove_process (process);
95954743 816 return 0;
0d62e5e8
DJ
817}
818
95954743
PA
819static int
820linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
821{
822 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 823 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
824 int pid = * (int *) args;
825
826 if (ptid_get_pid (entry->id) != pid)
827 return 0;
6ad8ae5c 828
bd99dc85
PA
829 /* If we're detaching from a running inferior, make sure it is
830 stopped first, as PTRACE_DETACH will not work otherwise. */
831 if (!lwp->stopped)
832 {
95954743 833 int lwpid = lwpid_of (lwp);
bd99dc85
PA
834
835 stopping_threads = 1;
836 send_sigstop (&lwp->head);
837
838 /* If this detects a new thread through a clone event, the new
839 thread is appended to the end of the lwp list, so we'll
840 eventually detach from it. */
841 wait_for_sigstop (&lwp->head);
842 stopping_threads = 0;
843
844 /* If LWP exits while we're trying to stop it, there's nothing
845 left to do. */
95954743 846 lwp = find_lwp_pid (pid_to_ptid (lwpid));
bd99dc85 847 if (lwp == NULL)
95954743 848 return 0;
bd99dc85
PA
849 }
850
ae13219e
DJ
851 /* If this process is stopped but is expecting a SIGSTOP, then make
852 sure we take care of that now. This isn't absolutely guaranteed
853 to collect the SIGSTOP, but is fairly likely to. */
54a0b537 854 if (lwp->stop_expected)
ae13219e 855 {
bd99dc85 856 int wstat;
ae13219e 857 /* Clear stop_expected, so that the SIGSTOP will be reported. */
54a0b537
PA
858 lwp->stop_expected = 0;
859 if (lwp->stopped)
2acc282a 860 linux_resume_one_lwp (lwp, 0, 0, NULL);
95954743 861 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
ae13219e
DJ
862 }
863
864 /* Flush any pending changes to the process's registers. */
865 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 866 get_lwp_thread (lwp));
ae13219e
DJ
867
868 /* Finally, let it resume. */
bd99dc85
PA
869 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
870
871 delete_lwp (lwp);
95954743 872 return 0;
6ad8ae5c
DJ
873}
874
dd6953e1 875static int
95954743 876any_thread_of (struct inferior_list_entry *entry, void *args)
6ad8ae5c 877{
95954743
PA
878 int *pid_p = args;
879
880 if (ptid_get_pid (entry->id) == *pid_p)
881 return 1;
882
883 return 0;
884}
885
886static int
887linux_detach (int pid)
888{
889 struct process_info *process;
890
891 process = find_process_pid (pid);
892 if (process == NULL)
893 return -1;
894
ca5c370d
PA
895#ifdef USE_THREAD_DB
896 thread_db_free (process, 1);
897#endif
898
95954743
PA
899 current_inferior =
900 (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
901
ae13219e 902 delete_all_breakpoints ();
95954743 903 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
ca5c370d 904 linux_remove_process (process);
dd6953e1 905 return 0;
6ad8ae5c
DJ
906}
907
444d6139 908static void
95954743 909linux_join (int pid)
444d6139 910{
444d6139 911 int status, ret;
95954743 912 struct process_info *process;
bd99dc85 913
95954743
PA
914 process = find_process_pid (pid);
915 if (process == NULL)
916 return;
444d6139
PA
917
918 do {
95954743 919 ret = my_waitpid (pid, &status, 0);
444d6139
PA
920 if (WIFEXITED (status) || WIFSIGNALED (status))
921 break;
922 } while (ret != -1 || errno != ECHILD);
923}
924
6ad8ae5c 925/* Return nonzero if the given thread is still alive. */
0d62e5e8 926static int
95954743 927linux_thread_alive (ptid_t ptid)
0d62e5e8 928{
95954743
PA
929 struct lwp_info *lwp = find_lwp_pid (ptid);
930
931 /* We assume we always know if a thread exits. If a whole process
932 exited but we still haven't been able to report it to GDB, we'll
933 hold on to the last lwp of the dead process. */
934 if (lwp != NULL)
935 return !lwp->dead;
0d62e5e8
DJ
936 else
937 return 0;
938}
939
6bf5e0ba 940/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 941static int
d50171e4 942status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 943{
54a0b537 944 struct lwp_info *lwp = (struct lwp_info *) entry;
95954743 945 ptid_t ptid = * (ptid_t *) arg;
d50171e4 946 struct thread_info *thread = get_lwp_thread (lwp);
95954743
PA
947
948 /* Check if we're only interested in events from a specific process
949 or its lwps. */
950 if (!ptid_equal (minus_one_ptid, ptid)
951 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
952 return 0;
0d62e5e8 953
d50171e4
PA
954 thread = get_lwp_thread (lwp);
955
956 /* If we got a `vCont;t', but we haven't reported a stop yet, do
957 report any status pending the LWP may have. */
958 if (lwp->last_resume_kind == resume_stop
959 && thread->last_status.kind == TARGET_WAITKIND_STOPPED)
960 return 0;
0d62e5e8 961
d50171e4 962 return lwp->status_pending_p;
0d62e5e8
DJ
963}
964
95954743
PA
965static int
966same_lwp (struct inferior_list_entry *entry, void *data)
967{
968 ptid_t ptid = *(ptid_t *) data;
969 int lwp;
970
971 if (ptid_get_lwp (ptid) != 0)
972 lwp = ptid_get_lwp (ptid);
973 else
974 lwp = ptid_get_pid (ptid);
975
976 if (ptid_get_lwp (entry->id) == lwp)
977 return 1;
978
979 return 0;
980}
981
982struct lwp_info *
983find_lwp_pid (ptid_t ptid)
984{
985 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
986}
987
bd99dc85 988static struct lwp_info *
95954743 989linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
611cb4a5 990{
0d62e5e8 991 int ret;
95954743 992 int to_wait_for = -1;
bd99dc85 993 struct lwp_info *child = NULL;
0d62e5e8 994
bd99dc85 995 if (debug_threads)
95954743
PA
996 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
997
998 if (ptid_equal (ptid, minus_one_ptid))
999 to_wait_for = -1; /* any child */
1000 else
1001 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
0d62e5e8 1002
bd99dc85 1003 options |= __WALL;
0d62e5e8 1004
bd99dc85 1005retry:
0d62e5e8 1006
bd99dc85
PA
1007 ret = my_waitpid (to_wait_for, wstatp, options);
1008 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1009 return NULL;
1010 else if (ret == -1)
1011 perror_with_name ("waitpid");
0d62e5e8
DJ
1012
1013 if (debug_threads
1014 && (!WIFSTOPPED (*wstatp)
1015 || (WSTOPSIG (*wstatp) != 32
1016 && WSTOPSIG (*wstatp) != 33)))
1017 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1018
95954743 1019 child = find_lwp_pid (pid_to_ptid (ret));
0d62e5e8 1020
24a09b5f
DJ
1021 /* If we didn't find a process, one of two things presumably happened:
1022 - A process we started and then detached from has exited. Ignore it.
1023 - A process we are controlling has forked and the new child's stop
1024 was reported to us by the kernel. Save its PID. */
bd99dc85 1025 if (child == NULL && WIFSTOPPED (*wstatp))
24a09b5f
DJ
1026 {
1027 add_pid_to_list (&stopped_pids, ret);
1028 goto retry;
1029 }
bd99dc85 1030 else if (child == NULL)
24a09b5f
DJ
1031 goto retry;
1032
bd99dc85 1033 child->stopped = 1;
0d62e5e8 1034
bd99dc85 1035 child->last_status = *wstatp;
32ca6d61 1036
d61ddec4
UW
1037 /* Architecture-specific setup after inferior is running.
1038 This needs to happen after we have attached to the inferior
1039 and it is stopped for the first time, but before we access
1040 any inferior registers. */
1041 if (new_inferior)
1042 {
1043 the_low_target.arch_setup ();
52fa2412
UW
1044#ifdef HAVE_LINUX_REGSETS
1045 memset (disabled_regsets, 0, num_regsets);
1046#endif
d61ddec4
UW
1047 new_inferior = 0;
1048 }
1049
c3adc08c
PA
1050 /* Fetch the possibly triggered data watchpoint info and store it in
1051 CHILD.
1052
1053 On some archs, like x86, that use debug registers to set
1054 watchpoints, it's possible that the way to know which watched
1055 address trapped, is to check the register that is used to select
1056 which address to watch. Problem is, between setting the
1057 watchpoint and reading back which data address trapped, the user
1058 may change the set of watchpoints, and, as a consequence, GDB
1059 changes the debug registers in the inferior. To avoid reading
1060 back a stale stopped-data-address when that happens, we cache in
1061 LP the fact that a watchpoint trapped, and the corresponding data
1062 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1063 changes the debug registers meanwhile, we have the cached data we
1064 can rely on. */
1065
1066 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1067 {
1068 if (the_low_target.stopped_by_watchpoint == NULL)
1069 {
1070 child->stopped_by_watchpoint = 0;
1071 }
1072 else
1073 {
1074 struct thread_info *saved_inferior;
1075
1076 saved_inferior = current_inferior;
1077 current_inferior = get_lwp_thread (child);
1078
1079 child->stopped_by_watchpoint
1080 = the_low_target.stopped_by_watchpoint ();
1081
1082 if (child->stopped_by_watchpoint)
1083 {
1084 if (the_low_target.stopped_data_address != NULL)
1085 child->stopped_data_address
1086 = the_low_target.stopped_data_address ();
1087 else
1088 child->stopped_data_address = 0;
1089 }
1090
1091 current_inferior = saved_inferior;
1092 }
1093 }
1094
d50171e4
PA
1095 /* Store the STOP_PC, with adjustment applied. This depends on the
1096 architecture being defined already (so that CHILD has a valid
1097 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1098 not). */
1099 if (WIFSTOPPED (*wstatp))
1100 child->stop_pc = get_stop_pc (child);
1101
0d62e5e8 1102 if (debug_threads
47c0c975
DE
1103 && WIFSTOPPED (*wstatp)
1104 && the_low_target.get_pc != NULL)
0d62e5e8 1105 {
896c7fbb 1106 struct thread_info *saved_inferior = current_inferior;
bce522a2 1107 struct regcache *regcache;
47c0c975
DE
1108 CORE_ADDR pc;
1109
d50171e4 1110 current_inferior = get_lwp_thread (child);
bce522a2 1111 regcache = get_thread_regcache (current_inferior, 1);
442ea881 1112 pc = (*the_low_target.get_pc) (regcache);
47c0c975 1113 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
896c7fbb 1114 current_inferior = saved_inferior;
0d62e5e8 1115 }
bd99dc85
PA
1116
1117 return child;
0d62e5e8 1118}
611cb4a5 1119
219f2f23
PA
1120/* This function should only be called if the LWP got a SIGTRAP.
1121
1122 Handle any tracepoint steps or hits. Return true if a tracepoint
1123 event was handled, 0 otherwise. */
1124
1125static int
1126handle_tracepoints (struct lwp_info *lwp)
1127{
1128 struct thread_info *tinfo = get_lwp_thread (lwp);
1129 int tpoint_related_event = 0;
1130
1131 /* And we need to be sure that any all-threads-stopping doesn't try
1132 to move threads out of the jump pads, as it could deadlock the
1133 inferior (LWP could be in the jump pad, maybe even holding the
1134 lock.) */
1135
1136 /* Do any necessary step collect actions. */
1137 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1138
1139 /* See if we just hit a tracepoint and do its main collect
1140 actions. */
1141 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1142
1143 if (tpoint_related_event)
1144 {
1145 if (debug_threads)
1146 fprintf (stderr, "got a tracepoint event\n");
1147 return 1;
1148 }
1149
1150 return 0;
1151}
1152
d50171e4
PA
1153/* Arrange for a breakpoint to be hit again later. We don't keep the
1154 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1155 will handle the current event, eventually we will resume this LWP,
1156 and this breakpoint will trap again. */
1157
1158static int
1159cancel_breakpoint (struct lwp_info *lwp)
1160{
1161 struct thread_info *saved_inferior;
d50171e4
PA
1162
1163 /* There's nothing to do if we don't support breakpoints. */
1164 if (!supports_breakpoints ())
1165 return 0;
1166
d50171e4
PA
1167 /* breakpoint_at reads from current inferior. */
1168 saved_inferior = current_inferior;
1169 current_inferior = get_lwp_thread (lwp);
1170
1171 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1172 {
1173 if (debug_threads)
1174 fprintf (stderr,
1175 "CB: Push back breakpoint for %s\n",
fc7238bb 1176 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1177
1178 /* Back up the PC if necessary. */
1179 if (the_low_target.decr_pc_after_break)
1180 {
1181 struct regcache *regcache
fc7238bb 1182 = get_thread_regcache (current_inferior, 1);
d50171e4
PA
1183 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1184 }
1185
1186 current_inferior = saved_inferior;
1187 return 1;
1188 }
1189 else
1190 {
1191 if (debug_threads)
1192 fprintf (stderr,
1193 "CB: No breakpoint found at %s for [%s]\n",
1194 paddress (lwp->stop_pc),
fc7238bb 1195 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1196 }
1197
1198 current_inferior = saved_inferior;
1199 return 0;
1200}
1201
1202/* When the event-loop is doing a step-over, this points at the thread
1203 being stepped. */
1204ptid_t step_over_bkpt;
1205
bd99dc85
PA
1206/* Wait for an event from child PID. If PID is -1, wait for any
1207 child. Store the stop status through the status pointer WSTAT.
1208 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1209 event was found and OPTIONS contains WNOHANG. Return the PID of
1210 the stopped child otherwise. */
1211
0d62e5e8 1212static int
95954743 1213linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
0d62e5e8 1214{
d50171e4
PA
1215 struct lwp_info *event_child, *requested_child;
1216
d50171e4
PA
1217 event_child = NULL;
1218 requested_child = NULL;
0d62e5e8 1219
95954743 1220 /* Check for a lwp with a pending status. */
bd99dc85 1221
95954743
PA
1222 if (ptid_equal (ptid, minus_one_ptid)
1223 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
0d62e5e8 1224 {
54a0b537 1225 event_child = (struct lwp_info *)
d50171e4 1226 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
0d62e5e8 1227 if (debug_threads && event_child)
bd99dc85 1228 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
0d62e5e8
DJ
1229 }
1230 else
1231 {
95954743 1232 requested_child = find_lwp_pid (ptid);
d50171e4
PA
1233
1234 if (requested_child->status_pending_p)
bd99dc85 1235 event_child = requested_child;
0d62e5e8 1236 }
611cb4a5 1237
0d62e5e8
DJ
1238 if (event_child != NULL)
1239 {
bd99dc85
PA
1240 if (debug_threads)
1241 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1242 lwpid_of (event_child), event_child->status_pending);
1243 *wstat = event_child->status_pending;
1244 event_child->status_pending_p = 0;
1245 event_child->status_pending = 0;
1246 current_inferior = get_lwp_thread (event_child);
1247 return lwpid_of (event_child);
0d62e5e8
DJ
1248 }
1249
1250 /* We only enter this loop if no process has a pending wait status. Thus
1251 any action taken in response to a wait status inside this loop is
1252 responding as soon as we detect the status, not after any pending
1253 events. */
1254 while (1)
1255 {
6bf5e0ba 1256 event_child = linux_wait_for_lwp (ptid, wstat, options);
0d62e5e8 1257
bd99dc85 1258 if ((options & WNOHANG) && event_child == NULL)
d50171e4
PA
1259 {
1260 if (debug_threads)
1261 fprintf (stderr, "WNOHANG set, no event found\n");
1262 return 0;
1263 }
0d62e5e8
DJ
1264
1265 if (event_child == NULL)
1266 error ("event from unknown child");
611cb4a5 1267
bd99dc85 1268 current_inferior = get_lwp_thread (event_child);
0d62e5e8 1269
89be2091 1270 /* Check for thread exit. */
bd99dc85 1271 if (! WIFSTOPPED (*wstat))
0d62e5e8 1272 {
89be2091 1273 if (debug_threads)
95954743 1274 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
89be2091
DJ
1275
1276 /* If the last thread is exiting, just return. */
95954743 1277 if (last_thread_of_process_p (current_inferior))
bd99dc85
PA
1278 {
1279 if (debug_threads)
95954743
PA
1280 fprintf (stderr, "LWP %ld is last lwp of process\n",
1281 lwpid_of (event_child));
bd99dc85
PA
1282 return lwpid_of (event_child);
1283 }
89be2091 1284
bd99dc85
PA
1285 if (!non_stop)
1286 {
1287 current_inferior = (struct thread_info *) all_threads.head;
1288 if (debug_threads)
1289 fprintf (stderr, "Current inferior is now %ld\n",
1290 lwpid_of (get_thread_lwp (current_inferior)));
1291 }
1292 else
1293 {
1294 current_inferior = NULL;
1295 if (debug_threads)
1296 fprintf (stderr, "Current inferior is now <NULL>\n");
1297 }
89be2091
DJ
1298
1299 /* If we were waiting for this particular child to do something...
1300 well, it did something. */
bd99dc85 1301 if (requested_child != NULL)
d50171e4
PA
1302 {
1303 int lwpid = lwpid_of (event_child);
1304
1305 /* Cancel the step-over operation --- the thread that
1306 started it is gone. */
1307 if (finish_step_over (event_child))
1308 unstop_all_lwps (event_child);
1309 delete_lwp (event_child);
1310 return lwpid;
1311 }
1312
1313 delete_lwp (event_child);
89be2091
DJ
1314
1315 /* Wait for a more interesting event. */
1316 continue;
1317 }
1318
a6dbe5df
PA
1319 if (event_child->must_set_ptrace_flags)
1320 {
1321 ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child),
14ce3065 1322 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
a6dbe5df
PA
1323 event_child->must_set_ptrace_flags = 0;
1324 }
1325
bd99dc85
PA
1326 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1327 && *wstat >> 16 != 0)
24a09b5f 1328 {
bd99dc85 1329 handle_extended_wait (event_child, *wstat);
24a09b5f
DJ
1330 continue;
1331 }
1332
89be2091
DJ
1333 /* If GDB is not interested in this signal, don't stop other
1334 threads, and don't report it to GDB. Just resume the
1335 inferior right away. We do this for threading-related
69f223ed
DJ
1336 signals as well as any that GDB specifically requested we
1337 ignore. But never ignore SIGSTOP if we sent it ourselves,
1338 and do not ignore signals when stepping - they may require
1339 special handling to skip the signal handler. */
89be2091
DJ
1340 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1341 thread library? */
bd99dc85 1342 if (WIFSTOPPED (*wstat)
69f223ed 1343 && !event_child->stepping
24a09b5f 1344 && (
60c3d7b0 1345#if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
cdbfd419 1346 (current_process ()->private->thread_db != NULL
bd99dc85
PA
1347 && (WSTOPSIG (*wstat) == __SIGRTMIN
1348 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
24a09b5f
DJ
1349 ||
1350#endif
bd99dc85 1351 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
d50171e4
PA
1352 && !(WSTOPSIG (*wstat) == SIGSTOP
1353 && event_child->stop_expected))))
89be2091
DJ
1354 {
1355 siginfo_t info, *info_p;
1356
1357 if (debug_threads)
24a09b5f 1358 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
bd99dc85 1359 WSTOPSIG (*wstat), lwpid_of (event_child));
89be2091 1360
bd99dc85 1361 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
89be2091
DJ
1362 info_p = &info;
1363 else
1364 info_p = NULL;
d50171e4 1365 linux_resume_one_lwp (event_child, event_child->stepping,
bd99dc85 1366 WSTOPSIG (*wstat), info_p);
89be2091 1367 continue;
0d62e5e8 1368 }
611cb4a5 1369
d50171e4
PA
1370 if (WIFSTOPPED (*wstat)
1371 && WSTOPSIG (*wstat) == SIGSTOP
1372 && event_child->stop_expected)
1373 {
1374 int should_stop;
1375
1376 if (debug_threads)
1377 fprintf (stderr, "Expected stop.\n");
1378 event_child->stop_expected = 0;
1379
1380 should_stop = (event_child->last_resume_kind == resume_stop
1381 || stopping_threads);
1382
1383 if (!should_stop)
1384 {
1385 linux_resume_one_lwp (event_child,
1386 event_child->stepping, 0, NULL);
1387 continue;
1388 }
1389 }
1390
bd99dc85 1391 return lwpid_of (event_child);
611cb4a5 1392 }
0d62e5e8 1393
611cb4a5
DJ
1394 /* NOTREACHED */
1395 return 0;
1396}
1397
95954743
PA
1398static int
1399linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1400{
1401 ptid_t wait_ptid;
1402
1403 if (ptid_is_pid (ptid))
1404 {
1405 /* A request to wait for a specific tgid. This is not possible
1406 with waitpid, so instead, we wait for any child, and leave
1407 children we're not interested in right now with a pending
1408 status to report later. */
1409 wait_ptid = minus_one_ptid;
1410 }
1411 else
1412 wait_ptid = ptid;
1413
1414 while (1)
1415 {
1416 int event_pid;
1417
1418 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1419
1420 if (event_pid > 0
1421 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1422 {
1423 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1424
1425 if (! WIFSTOPPED (*wstat))
1426 mark_lwp_dead (event_child, *wstat);
1427 else
1428 {
1429 event_child->status_pending_p = 1;
1430 event_child->status_pending = *wstat;
1431 }
1432 }
1433 else
1434 return event_pid;
1435 }
1436}
1437
6bf5e0ba
PA
1438
1439/* Count the LWP's that have had events. */
1440
1441static int
1442count_events_callback (struct inferior_list_entry *entry, void *data)
1443{
1444 struct lwp_info *lp = (struct lwp_info *) entry;
1445 int *count = data;
1446
1447 gdb_assert (count != NULL);
1448
1449 /* Count only resumed LWPs that have a SIGTRAP event pending that
1450 should be reported to GDB. */
1451 if (get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1452 && lp->last_resume_kind != resume_stop
1453 && lp->status_pending_p
1454 && WIFSTOPPED (lp->status_pending)
1455 && WSTOPSIG (lp->status_pending) == SIGTRAP
1456 && !breakpoint_inserted_here (lp->stop_pc))
1457 (*count)++;
1458
1459 return 0;
1460}
1461
1462/* Select the LWP (if any) that is currently being single-stepped. */
1463
1464static int
1465select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1466{
1467 struct lwp_info *lp = (struct lwp_info *) entry;
1468
1469 if (get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1470 && lp->last_resume_kind == resume_step
1471 && lp->status_pending_p)
1472 return 1;
1473 else
1474 return 0;
1475}
1476
1477/* Select the Nth LWP that has had a SIGTRAP event that should be
1478 reported to GDB. */
1479
1480static int
1481select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1482{
1483 struct lwp_info *lp = (struct lwp_info *) entry;
1484 int *selector = data;
1485
1486 gdb_assert (selector != NULL);
1487
1488 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1489 if (lp->last_resume_kind != resume_stop
1490 && get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1491 && lp->status_pending_p
1492 && WIFSTOPPED (lp->status_pending)
1493 && WSTOPSIG (lp->status_pending) == SIGTRAP
1494 && !breakpoint_inserted_here (lp->stop_pc))
1495 if ((*selector)-- == 0)
1496 return 1;
1497
1498 return 0;
1499}
1500
1501static int
1502cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1503{
1504 struct lwp_info *lp = (struct lwp_info *) entry;
1505 struct lwp_info *event_lp = data;
1506
1507 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1508 if (lp == event_lp)
1509 return 0;
1510
1511 /* If a LWP other than the LWP that we're reporting an event for has
1512 hit a GDB breakpoint (as opposed to some random trap signal),
1513 then just arrange for it to hit it again later. We don't keep
1514 the SIGTRAP status and don't forward the SIGTRAP signal to the
1515 LWP. We will handle the current event, eventually we will resume
1516 all LWPs, and this one will get its breakpoint trap again.
1517
1518 If we do not do this, then we run the risk that the user will
1519 delete or disable the breakpoint, but the LWP will have already
1520 tripped on it. */
1521
1522 if (lp->last_resume_kind != resume_stop
1523 && get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1524 && lp->status_pending_p
1525 && WIFSTOPPED (lp->status_pending)
1526 && WSTOPSIG (lp->status_pending) == SIGTRAP
bdabb078
PA
1527 && !lp->stepping
1528 && !lp->stopped_by_watchpoint
6bf5e0ba
PA
1529 && cancel_breakpoint (lp))
1530 /* Throw away the SIGTRAP. */
1531 lp->status_pending_p = 0;
1532
1533 return 0;
1534}
1535
1536/* Select one LWP out of those that have events pending. */
1537
1538static void
1539select_event_lwp (struct lwp_info **orig_lp)
1540{
1541 int num_events = 0;
1542 int random_selector;
1543 struct lwp_info *event_lp;
1544
1545 /* Give preference to any LWP that is being single-stepped. */
1546 event_lp
1547 = (struct lwp_info *) find_inferior (&all_lwps,
1548 select_singlestep_lwp_callback, NULL);
1549 if (event_lp != NULL)
1550 {
1551 if (debug_threads)
1552 fprintf (stderr,
1553 "SEL: Select single-step %s\n",
1554 target_pid_to_str (ptid_of (event_lp)));
1555 }
1556 else
1557 {
1558 /* No single-stepping LWP. Select one at random, out of those
1559 which have had SIGTRAP events. */
1560
1561 /* First see how many SIGTRAP events we have. */
1562 find_inferior (&all_lwps, count_events_callback, &num_events);
1563
1564 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1565 random_selector = (int)
1566 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1567
1568 if (debug_threads && num_events > 1)
1569 fprintf (stderr,
1570 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1571 num_events, random_selector);
1572
1573 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1574 select_event_lwp_callback,
1575 &random_selector);
1576 }
1577
1578 if (event_lp != NULL)
1579 {
1580 /* Switch the event LWP. */
1581 *orig_lp = event_lp;
1582 }
1583}
1584
d50171e4
PA
1585/* Set this inferior LWP's state as "want-stopped". We won't resume
1586 this LWP until the client gives us another action for it. */
1587
1588static void
1589gdb_wants_lwp_stopped (struct inferior_list_entry *entry)
1590{
1591 struct lwp_info *lwp = (struct lwp_info *) entry;
1592 struct thread_info *thread = get_lwp_thread (lwp);
1593
1594 /* Most threads are stopped implicitly (all-stop); tag that with
1595 signal 0. The thread being explicitly reported stopped to the
1596 client, gets it's status fixed up afterwards. */
1597 thread->last_status.kind = TARGET_WAITKIND_STOPPED;
1598 thread->last_status.value.sig = TARGET_SIGNAL_0;
1599
1600 lwp->last_resume_kind = resume_stop;
1601}
1602
1603/* Set all LWP's states as "want-stopped". */
1604
1605static void
1606gdb_wants_all_stopped (void)
1607{
1608 for_each_inferior (&all_lwps, gdb_wants_lwp_stopped);
1609}
1610
0d62e5e8 1611/* Wait for process, returns status. */
da6d8c04 1612
95954743
PA
1613static ptid_t
1614linux_wait_1 (ptid_t ptid,
1615 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 1616{
e5f1222d 1617 int w;
fc7238bb 1618 struct lwp_info *event_child;
bd99dc85 1619 int options;
bd99dc85 1620 int pid;
6bf5e0ba
PA
1621 int step_over_finished;
1622 int bp_explains_trap;
1623 int maybe_internal_trap;
1624 int report_to_gdb;
219f2f23 1625 int trace_event;
bd99dc85
PA
1626
1627 /* Translate generic target options into linux options. */
1628 options = __WALL;
1629 if (target_options & TARGET_WNOHANG)
1630 options |= WNOHANG;
0d62e5e8
DJ
1631
1632retry:
bd99dc85
PA
1633 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1634
0d62e5e8
DJ
1635 /* If we were only supposed to resume one thread, only wait for
1636 that thread - if it's still alive. If it died, however - which
1637 can happen if we're coming from the thread death case below -
1638 then we need to make sure we restart the other threads. We could
1639 pick a thread at random or restart all; restarting all is less
1640 arbitrary. */
95954743
PA
1641 if (!non_stop
1642 && !ptid_equal (cont_thread, null_ptid)
1643 && !ptid_equal (cont_thread, minus_one_ptid))
0d62e5e8 1644 {
fc7238bb
PA
1645 struct thread_info *thread;
1646
bd99dc85
PA
1647 thread = (struct thread_info *) find_inferior_id (&all_threads,
1648 cont_thread);
0d62e5e8
DJ
1649
1650 /* No stepping, no signal - unless one is pending already, of course. */
bd99dc85 1651 if (thread == NULL)
64386c31
DJ
1652 {
1653 struct thread_resume resume_info;
95954743 1654 resume_info.thread = minus_one_ptid;
bd99dc85
PA
1655 resume_info.kind = resume_continue;
1656 resume_info.sig = 0;
2bd7c093 1657 linux_resume (&resume_info, 1);
64386c31 1658 }
bd99dc85 1659 else
95954743 1660 ptid = cont_thread;
0d62e5e8 1661 }
da6d8c04 1662
6bf5e0ba
PA
1663 if (ptid_equal (step_over_bkpt, null_ptid))
1664 pid = linux_wait_for_event (ptid, &w, options);
1665 else
1666 {
1667 if (debug_threads)
1668 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
1669 target_pid_to_str (step_over_bkpt));
1670 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
1671 }
1672
bd99dc85 1673 if (pid == 0) /* only if TARGET_WNOHANG */
95954743 1674 return null_ptid;
bd99dc85 1675
6bf5e0ba 1676 event_child = get_thread_lwp (current_inferior);
da6d8c04 1677
0d62e5e8
DJ
1678 /* If we are waiting for a particular child, and it exited,
1679 linux_wait_for_event will return its exit status. Similarly if
1680 the last child exited. If this is not the last child, however,
1681 do not report it as exited until there is a 'thread exited' response
1682 available in the remote protocol. Instead, just wait for another event.
1683 This should be safe, because if the thread crashed we will already
1684 have reported the termination signal to GDB; that should stop any
1685 in-progress stepping operations, etc.
1686
1687 Report the exit status of the last thread to exit. This matches
1688 LinuxThreads' behavior. */
1689
95954743 1690 if (last_thread_of_process_p (current_inferior))
da6d8c04 1691 {
bd99dc85 1692 if (WIFEXITED (w) || WIFSIGNALED (w))
0d62e5e8 1693 {
6bf5e0ba 1694 int pid = pid_of (event_child);
95954743 1695 struct process_info *process = find_process_pid (pid);
5b1c542e 1696
ca5c370d
PA
1697#ifdef USE_THREAD_DB
1698 thread_db_free (process, 0);
1699#endif
6bf5e0ba 1700 delete_lwp (event_child);
ca5c370d 1701 linux_remove_process (process);
5b1c542e 1702
bd99dc85 1703 current_inferior = NULL;
5b1c542e 1704
bd99dc85
PA
1705 if (WIFEXITED (w))
1706 {
1707 ourstatus->kind = TARGET_WAITKIND_EXITED;
1708 ourstatus->value.integer = WEXITSTATUS (w);
1709
1710 if (debug_threads)
1711 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1712 }
1713 else
1714 {
1715 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1716 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1717
1718 if (debug_threads)
1719 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1720
1721 }
5b1c542e 1722
95954743 1723 return pid_to_ptid (pid);
0d62e5e8 1724 }
da6d8c04 1725 }
0d62e5e8 1726 else
da6d8c04 1727 {
0d62e5e8
DJ
1728 if (!WIFSTOPPED (w))
1729 goto retry;
da6d8c04
DJ
1730 }
1731
6bf5e0ba
PA
1732 /* If this event was not handled before, and is not a SIGTRAP, we
1733 report it. SIGILL and SIGSEGV are also treated as traps in case
1734 a breakpoint is inserted at the current PC. If this target does
1735 not support internal breakpoints at all, we also report the
1736 SIGTRAP without further processing; it's of no concern to us. */
1737 maybe_internal_trap
1738 = (supports_breakpoints ()
1739 && (WSTOPSIG (w) == SIGTRAP
1740 || ((WSTOPSIG (w) == SIGILL
1741 || WSTOPSIG (w) == SIGSEGV)
1742 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
1743
1744 if (maybe_internal_trap)
1745 {
1746 /* Handle anything that requires bookkeeping before deciding to
1747 report the event or continue waiting. */
1748
1749 /* First check if we can explain the SIGTRAP with an internal
1750 breakpoint, or if we should possibly report the event to GDB.
1751 Do this before anything that may remove or insert a
1752 breakpoint. */
1753 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
1754
1755 /* We have a SIGTRAP, possibly a step-over dance has just
1756 finished. If so, tweak the state machine accordingly,
1757 reinsert breakpoints and delete any reinsert (software
1758 single-step) breakpoints. */
1759 step_over_finished = finish_step_over (event_child);
1760
1761 /* Now invoke the callbacks of any internal breakpoints there. */
1762 check_breakpoints (event_child->stop_pc);
1763
219f2f23
PA
1764 /* Handle tracepoint data collecting. This may overflow the
1765 trace buffer, and cause a tracing stop, removing
1766 breakpoints. */
1767 trace_event = handle_tracepoints (event_child);
1768
6bf5e0ba
PA
1769 if (bp_explains_trap)
1770 {
1771 /* If we stepped or ran into an internal breakpoint, we've
1772 already handled it. So next time we resume (from this
1773 PC), we should step over it. */
1774 if (debug_threads)
1775 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1776
8b07ae33
PA
1777 if (breakpoint_here (event_child->stop_pc))
1778 event_child->need_step_over = 1;
6bf5e0ba
PA
1779 }
1780 }
1781 else
1782 {
1783 /* We have some other signal, possibly a step-over dance was in
1784 progress, and it should be cancelled too. */
1785 step_over_finished = finish_step_over (event_child);
219f2f23
PA
1786
1787 trace_event = 0;
6bf5e0ba
PA
1788 }
1789
1790 /* We have all the data we need. Either report the event to GDB, or
1791 resume threads and keep waiting for more. */
1792
1793 /* Check If GDB would be interested in this event. If GDB wanted
1794 this thread to single step, we always want to report the SIGTRAP,
8b07ae33
PA
1795 and let GDB handle it. Watchpoints should always be reported.
1796 So should signals we can't explain. A SIGTRAP we can't explain
1797 could be a GDB breakpoint --- we may or not support Z0
1798 breakpoints. If we do, we're be able to handle GDB breakpoints
1799 on top of internal breakpoints, by handling the internal
1800 breakpoint and still reporting the event to GDB. If we don't,
1801 we're out of luck, GDB won't see the breakpoint hit. */
6bf5e0ba
PA
1802 report_to_gdb = (!maybe_internal_trap
1803 || event_child->last_resume_kind == resume_step
1804 || event_child->stopped_by_watchpoint
219f2f23 1805 || (!step_over_finished && !bp_explains_trap && !trace_event)
8b07ae33 1806 || gdb_breakpoint_here (event_child->stop_pc));
6bf5e0ba
PA
1807
1808 /* We found no reason GDB would want us to stop. We either hit one
1809 of our own breakpoints, or finished an internal step GDB
1810 shouldn't know about. */
1811 if (!report_to_gdb)
1812 {
1813 if (debug_threads)
1814 {
1815 if (bp_explains_trap)
1816 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1817 if (step_over_finished)
1818 fprintf (stderr, "Step-over finished.\n");
219f2f23
PA
1819 if (trace_event)
1820 fprintf (stderr, "Tracepoint event.\n");
6bf5e0ba
PA
1821 }
1822
1823 /* We're not reporting this breakpoint to GDB, so apply the
1824 decr_pc_after_break adjustment to the inferior's regcache
1825 ourselves. */
1826
1827 if (the_low_target.set_pc != NULL)
1828 {
1829 struct regcache *regcache
1830 = get_thread_regcache (get_lwp_thread (event_child), 1);
1831 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
1832 }
1833
1834 /* We've finished stepping over a breakpoint. We've stopped all
1835 LWPs momentarily except the stepping one. This is where we
1836 resume them all again. We're going to keep waiting, so use
1837 proceed, which handles stepping over the next breakpoint. */
1838 if (debug_threads)
1839 fprintf (stderr, "proceeding all threads.\n");
1840 proceed_all_lwps ();
1841 goto retry;
1842 }
1843
1844 if (debug_threads)
1845 {
1846 if (event_child->last_resume_kind == resume_step)
1847 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
1848 if (event_child->stopped_by_watchpoint)
1849 fprintf (stderr, "Stopped by watchpoint.\n");
8b07ae33
PA
1850 if (gdb_breakpoint_here (event_child->stop_pc))
1851 fprintf (stderr, "Stopped by GDB breakpoint.\n");
6bf5e0ba
PA
1852 if (debug_threads)
1853 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
1854 }
1855
1856 /* Alright, we're going to report a stop. */
1857
1858 if (!non_stop)
1859 {
1860 /* In all-stop, stop all threads. */
1861 stop_all_lwps ();
1862
1863 /* If we're not waiting for a specific LWP, choose an event LWP
1864 from among those that have had events. Giving equal priority
1865 to all LWPs that have had events helps prevent
1866 starvation. */
1867 if (ptid_equal (ptid, minus_one_ptid))
1868 {
1869 event_child->status_pending_p = 1;
1870 event_child->status_pending = w;
1871
1872 select_event_lwp (&event_child);
1873
1874 event_child->status_pending_p = 0;
1875 w = event_child->status_pending;
1876 }
1877
1878 /* Now that we've selected our final event LWP, cancel any
1879 breakpoints in other LWPs that have hit a GDB breakpoint.
1880 See the comment in cancel_breakpoints_callback to find out
1881 why. */
1882 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
1883 }
1884 else
1885 {
1886 /* If we just finished a step-over, then all threads had been
1887 momentarily paused. In all-stop, that's fine, we want
1888 threads stopped by now anyway. In non-stop, we need to
1889 re-resume threads that GDB wanted to be running. */
1890 if (step_over_finished)
1891 unstop_all_lwps (event_child);
1892 }
1893
5b1c542e 1894 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 1895
d50171e4
PA
1896 /* Do this before the gdb_wants_all_stopped calls below, since they
1897 always set last_resume_kind to resume_stop. */
6bf5e0ba 1898 if (event_child->last_resume_kind == resume_stop && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
1899 {
1900 /* A thread that has been requested to stop by GDB with vCont;t,
1901 and it stopped cleanly, so report as SIG0. The use of
1902 SIGSTOP is an implementation detail. */
1903 ourstatus->value.sig = TARGET_SIGNAL_0;
1904 }
6bf5e0ba 1905 else if (event_child->last_resume_kind == resume_stop && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
1906 {
1907 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 1908 but, it stopped for other reasons. */
bd99dc85
PA
1909 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1910 }
1911 else
1912 {
1913 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1914 }
1915
d50171e4
PA
1916 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
1917
1918 if (!non_stop)
1919 {
d50171e4
PA
1920 /* From GDB's perspective, all-stop mode always stops all
1921 threads implicitly. Tag all threads as "want-stopped". */
1922 gdb_wants_all_stopped ();
1923 }
1924 else
1925 {
1926 /* We're reporting this LWP as stopped. Update it's
1927 "want-stopped" state to what the client wants, until it gets
1928 a new resume action. */
6bf5e0ba 1929 gdb_wants_lwp_stopped (&event_child->head);
d50171e4
PA
1930 }
1931
bd99dc85 1932 if (debug_threads)
95954743 1933 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
6bf5e0ba 1934 target_pid_to_str (ptid_of (event_child)),
bd99dc85
PA
1935 ourstatus->kind,
1936 ourstatus->value.sig);
1937
6bf5e0ba
PA
1938 get_lwp_thread (event_child)->last_status = *ourstatus;
1939 return ptid_of (event_child);
bd99dc85
PA
1940}
1941
1942/* Get rid of any pending event in the pipe. */
1943static void
1944async_file_flush (void)
1945{
1946 int ret;
1947 char buf;
1948
1949 do
1950 ret = read (linux_event_pipe[0], &buf, 1);
1951 while (ret >= 0 || (ret == -1 && errno == EINTR));
1952}
1953
1954/* Put something in the pipe, so the event loop wakes up. */
1955static void
1956async_file_mark (void)
1957{
1958 int ret;
1959
1960 async_file_flush ();
1961
1962 do
1963 ret = write (linux_event_pipe[1], "+", 1);
1964 while (ret == 0 || (ret == -1 && errno == EINTR));
1965
1966 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1967 be awakened anyway. */
1968}
1969
95954743
PA
1970static ptid_t
1971linux_wait (ptid_t ptid,
1972 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 1973{
95954743 1974 ptid_t event_ptid;
bd99dc85
PA
1975
1976 if (debug_threads)
95954743 1977 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
bd99dc85
PA
1978
1979 /* Flush the async file first. */
1980 if (target_is_async_p ())
1981 async_file_flush ();
1982
95954743 1983 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
bd99dc85
PA
1984
1985 /* If at least one stop was reported, there may be more. A single
1986 SIGCHLD can signal more than one child stop. */
1987 if (target_is_async_p ()
1988 && (target_options & TARGET_WNOHANG) != 0
95954743 1989 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
1990 async_file_mark ();
1991
1992 return event_ptid;
da6d8c04
DJ
1993}
1994
c5f62d5f 1995/* Send a signal to an LWP. */
fd500816
DJ
1996
1997static int
a1928bad 1998kill_lwp (unsigned long lwpid, int signo)
fd500816 1999{
c5f62d5f
DE
2000 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2001 fails, then we are not using nptl threads and we should be using kill. */
fd500816 2002
c5f62d5f
DE
2003#ifdef __NR_tkill
2004 {
2005 static int tkill_failed;
fd500816 2006
c5f62d5f
DE
2007 if (!tkill_failed)
2008 {
2009 int ret;
2010
2011 errno = 0;
2012 ret = syscall (__NR_tkill, lwpid, signo);
2013 if (errno != ENOSYS)
2014 return ret;
2015 tkill_failed = 1;
2016 }
2017 }
fd500816
DJ
2018#endif
2019
2020 return kill (lwpid, signo);
2021}
2022
0d62e5e8
DJ
2023static void
2024send_sigstop (struct inferior_list_entry *entry)
2025{
54a0b537 2026 struct lwp_info *lwp = (struct lwp_info *) entry;
bd99dc85 2027 int pid;
0d62e5e8 2028
54a0b537 2029 if (lwp->stopped)
0d62e5e8
DJ
2030 return;
2031
bd99dc85
PA
2032 pid = lwpid_of (lwp);
2033
0d62e5e8
DJ
2034 /* If we already have a pending stop signal for this process, don't
2035 send another. */
54a0b537 2036 if (lwp->stop_expected)
0d62e5e8 2037 {
ae13219e 2038 if (debug_threads)
bd99dc85 2039 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
ae13219e 2040
0d62e5e8
DJ
2041 return;
2042 }
2043
2044 if (debug_threads)
bd99dc85 2045 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
0d62e5e8 2046
d50171e4 2047 lwp->stop_expected = 1;
bd99dc85 2048 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
2049}
2050
95954743
PA
2051static void
2052mark_lwp_dead (struct lwp_info *lwp, int wstat)
2053{
2054 /* It's dead, really. */
2055 lwp->dead = 1;
2056
2057 /* Store the exit status for later. */
2058 lwp->status_pending_p = 1;
2059 lwp->status_pending = wstat;
2060
95954743
PA
2061 /* Prevent trying to stop it. */
2062 lwp->stopped = 1;
2063
2064 /* No further stops are expected from a dead lwp. */
2065 lwp->stop_expected = 0;
2066}
2067
0d62e5e8
DJ
2068static void
2069wait_for_sigstop (struct inferior_list_entry *entry)
2070{
54a0b537 2071 struct lwp_info *lwp = (struct lwp_info *) entry;
bd99dc85 2072 struct thread_info *saved_inferior;
a1928bad 2073 int wstat;
95954743
PA
2074 ptid_t saved_tid;
2075 ptid_t ptid;
d50171e4 2076 int pid;
0d62e5e8 2077
54a0b537 2078 if (lwp->stopped)
d50171e4
PA
2079 {
2080 if (debug_threads)
2081 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2082 lwpid_of (lwp));
2083 return;
2084 }
0d62e5e8
DJ
2085
2086 saved_inferior = current_inferior;
bd99dc85
PA
2087 if (saved_inferior != NULL)
2088 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2089 else
95954743 2090 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 2091
95954743 2092 ptid = lwp->head.id;
bd99dc85 2093
d50171e4
PA
2094 if (debug_threads)
2095 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2096
2097 pid = linux_wait_for_event (ptid, &wstat, __WALL);
0d62e5e8
DJ
2098
2099 /* If we stopped with a non-SIGSTOP signal, save it for later
2100 and record the pending SIGSTOP. If the process exited, just
2101 return. */
d50171e4 2102 if (WIFSTOPPED (wstat))
0d62e5e8
DJ
2103 {
2104 if (debug_threads)
d50171e4
PA
2105 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2106 lwpid_of (lwp), WSTOPSIG (wstat));
c35fafde 2107
d50171e4 2108 if (WSTOPSIG (wstat) != SIGSTOP)
c35fafde
PA
2109 {
2110 if (debug_threads)
d50171e4
PA
2111 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2112 lwpid_of (lwp), wstat);
2113
c35fafde
PA
2114 lwp->status_pending_p = 1;
2115 lwp->status_pending = wstat;
2116 }
0d62e5e8 2117 }
d50171e4 2118 else
95954743
PA
2119 {
2120 if (debug_threads)
d50171e4 2121 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
95954743 2122
d50171e4
PA
2123 lwp = find_lwp_pid (pid_to_ptid (pid));
2124 if (lwp)
2125 {
2126 /* Leave this status pending for the next time we're able to
2127 report it. In the mean time, we'll report this lwp as
2128 dead to GDB, so GDB doesn't try to read registers and
2129 memory from it. This can only happen if this was the
2130 last thread of the process; otherwise, PID is removed
2131 from the thread tables before linux_wait_for_event
2132 returns. */
2133 mark_lwp_dead (lwp, wstat);
2134 }
95954743 2135 }
0d62e5e8 2136
bd99dc85 2137 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
0d62e5e8
DJ
2138 current_inferior = saved_inferior;
2139 else
2140 {
2141 if (debug_threads)
2142 fprintf (stderr, "Previously current thread died.\n");
2143
bd99dc85
PA
2144 if (non_stop)
2145 {
2146 /* We can't change the current inferior behind GDB's back,
2147 otherwise, a subsequent command may apply to the wrong
2148 process. */
2149 current_inferior = NULL;
2150 }
2151 else
2152 {
2153 /* Set a valid thread as current. */
2154 set_desired_inferior (0);
2155 }
0d62e5e8
DJ
2156 }
2157}
2158
2159static void
54a0b537 2160stop_all_lwps (void)
0d62e5e8
DJ
2161{
2162 stopping_threads = 1;
54a0b537
PA
2163 for_each_inferior (&all_lwps, send_sigstop);
2164 for_each_inferior (&all_lwps, wait_for_sigstop);
0d62e5e8
DJ
2165 stopping_threads = 0;
2166}
2167
da6d8c04
DJ
2168/* Resume execution of the inferior process.
2169 If STEP is nonzero, single-step it.
2170 If SIGNAL is nonzero, give it that signal. */
2171
ce3a066d 2172static void
2acc282a 2173linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 2174 int step, int signal, siginfo_t *info)
da6d8c04 2175{
0d62e5e8
DJ
2176 struct thread_info *saved_inferior;
2177
54a0b537 2178 if (lwp->stopped == 0)
0d62e5e8
DJ
2179 return;
2180
219f2f23
PA
2181 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2182 user used the "jump" command, or "set $pc = foo"). */
2183 if (lwp->stop_pc != get_pc (lwp))
2184 {
2185 /* Collecting 'while-stepping' actions doesn't make sense
2186 anymore. */
2187 release_while_stepping_state_list (get_lwp_thread (lwp));
2188 }
2189
0d62e5e8
DJ
2190 /* If we have pending signals or status, and a new signal, enqueue the
2191 signal. Also enqueue the signal if we are waiting to reinsert a
2192 breakpoint; it will be picked up again below. */
2193 if (signal != 0
54a0b537
PA
2194 && (lwp->status_pending_p || lwp->pending_signals != NULL
2195 || lwp->bp_reinsert != 0))
0d62e5e8
DJ
2196 {
2197 struct pending_signals *p_sig;
bca929d3 2198 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 2199 p_sig->prev = lwp->pending_signals;
0d62e5e8 2200 p_sig->signal = signal;
32ca6d61
DJ
2201 if (info == NULL)
2202 memset (&p_sig->info, 0, sizeof (siginfo_t));
2203 else
2204 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 2205 lwp->pending_signals = p_sig;
0d62e5e8
DJ
2206 }
2207
d50171e4
PA
2208 if (lwp->status_pending_p)
2209 {
2210 if (debug_threads)
2211 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2212 " has pending status\n",
2213 lwpid_of (lwp), step ? "step" : "continue", signal,
2214 lwp->stop_expected ? "expected" : "not expected");
2215 return;
2216 }
0d62e5e8
DJ
2217
2218 saved_inferior = current_inferior;
54a0b537 2219 current_inferior = get_lwp_thread (lwp);
0d62e5e8
DJ
2220
2221 if (debug_threads)
1b3f6016 2222 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
bd99dc85 2223 lwpid_of (lwp), step ? "step" : "continue", signal,
54a0b537 2224 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
2225
2226 /* This bit needs some thinking about. If we get a signal that
2227 we must report while a single-step reinsert is still pending,
2228 we often end up resuming the thread. It might be better to
2229 (ew) allow a stack of pending events; then we could be sure that
2230 the reinsert happened right away and not lose any signals.
2231
2232 Making this stack would also shrink the window in which breakpoints are
54a0b537 2233 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
2234 complete correctness, so it won't solve that problem. It may be
2235 worthwhile just to solve this one, however. */
54a0b537 2236 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
2237 {
2238 if (debug_threads)
d50171e4
PA
2239 fprintf (stderr, " pending reinsert at 0x%s\n",
2240 paddress (lwp->bp_reinsert));
2241
2242 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2243 {
2244 if (step == 0)
2245 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2246
2247 step = 1;
2248 }
0d62e5e8
DJ
2249
2250 /* Postpone any pending signal. It was enqueued above. */
2251 signal = 0;
2252 }
2253
219f2f23
PA
2254 /* If we have while-stepping actions in this thread set it stepping.
2255 If we have a signal to deliver, it may or may not be set to
2256 SIG_IGN, we don't know. Assume so, and allow collecting
2257 while-stepping into a signal handler. A possible smart thing to
2258 do would be to set an internal breakpoint at the signal return
2259 address, continue, and carry on catching this while-stepping
2260 action only when that breakpoint is hit. A future
2261 enhancement. */
2262 if (get_lwp_thread (lwp)->while_stepping != NULL
2263 && can_hardware_single_step ())
2264 {
2265 if (debug_threads)
2266 fprintf (stderr,
2267 "lwp %ld has a while-stepping action -> forcing step.\n",
2268 lwpid_of (lwp));
2269 step = 1;
2270 }
2271
aa691b87 2272 if (debug_threads && the_low_target.get_pc != NULL)
0d62e5e8 2273 {
442ea881
PA
2274 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2275 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
47c0c975 2276 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
0d62e5e8
DJ
2277 }
2278
2279 /* If we have pending signals, consume one unless we are trying to reinsert
2280 a breakpoint. */
54a0b537 2281 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
0d62e5e8
DJ
2282 {
2283 struct pending_signals **p_sig;
2284
54a0b537 2285 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
2286 while ((*p_sig)->prev != NULL)
2287 p_sig = &(*p_sig)->prev;
2288
2289 signal = (*p_sig)->signal;
32ca6d61 2290 if ((*p_sig)->info.si_signo != 0)
bd99dc85 2291 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
32ca6d61 2292
0d62e5e8
DJ
2293 free (*p_sig);
2294 *p_sig = NULL;
2295 }
2296
aa5ca48f
DE
2297 if (the_low_target.prepare_to_resume != NULL)
2298 the_low_target.prepare_to_resume (lwp);
2299
0d62e5e8 2300 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 2301 get_lwp_thread (lwp));
da6d8c04 2302 errno = 0;
54a0b537 2303 lwp->stopped = 0;
c3adc08c 2304 lwp->stopped_by_watchpoint = 0;
54a0b537 2305 lwp->stepping = step;
14ce3065
DE
2306 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2307 /* Coerce to a uintptr_t first to avoid potential gcc warning
2308 of coercing an 8 byte integer to a 4 byte pointer. */
2309 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
0d62e5e8
DJ
2310
2311 current_inferior = saved_inferior;
da6d8c04 2312 if (errno)
3221518c
UW
2313 {
2314 /* ESRCH from ptrace either means that the thread was already
2315 running (an error) or that it is gone (a race condition). If
2316 it's gone, we will get a notification the next time we wait,
2317 so we can ignore the error. We could differentiate these
2318 two, but it's tricky without waiting; the thread still exists
2319 as a zombie, so sending it signal 0 would succeed. So just
2320 ignore ESRCH. */
2321 if (errno == ESRCH)
2322 return;
2323
2324 perror_with_name ("ptrace");
2325 }
da6d8c04
DJ
2326}
2327
2bd7c093
PA
2328struct thread_resume_array
2329{
2330 struct thread_resume *resume;
2331 size_t n;
2332};
64386c31
DJ
2333
2334/* This function is called once per thread. We look up the thread
5544ad89
DJ
2335 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2336 resume request.
2337
2338 This algorithm is O(threads * resume elements), but resume elements
2339 is small (and will remain small at least until GDB supports thread
2340 suspension). */
2bd7c093
PA
2341static int
2342linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 2343{
54a0b537 2344 struct lwp_info *lwp;
64386c31 2345 struct thread_info *thread;
5544ad89 2346 int ndx;
2bd7c093 2347 struct thread_resume_array *r;
64386c31
DJ
2348
2349 thread = (struct thread_info *) entry;
54a0b537 2350 lwp = get_thread_lwp (thread);
2bd7c093 2351 r = arg;
64386c31 2352
2bd7c093 2353 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
2354 {
2355 ptid_t ptid = r->resume[ndx].thread;
2356 if (ptid_equal (ptid, minus_one_ptid)
2357 || ptid_equal (ptid, entry->id)
2358 || (ptid_is_pid (ptid)
2359 && (ptid_get_pid (ptid) == pid_of (lwp)))
2360 || (ptid_get_lwp (ptid) == -1
2361 && (ptid_get_pid (ptid) == pid_of (lwp))))
2362 {
d50171e4
PA
2363 if (r->resume[ndx].kind == resume_stop
2364 && lwp->last_resume_kind == resume_stop)
2365 {
2366 if (debug_threads)
2367 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
2368 thread->last_status.kind == TARGET_WAITKIND_STOPPED
2369 ? "stopped"
2370 : "stopping",
2371 lwpid_of (lwp));
2372
2373 continue;
2374 }
2375
95954743 2376 lwp->resume = &r->resume[ndx];
d50171e4 2377 lwp->last_resume_kind = lwp->resume->kind;
95954743
PA
2378 return 0;
2379 }
2380 }
2bd7c093
PA
2381
2382 /* No resume action for this thread. */
2383 lwp->resume = NULL;
64386c31 2384
2bd7c093 2385 return 0;
5544ad89
DJ
2386}
2387
5544ad89 2388
bd99dc85
PA
2389/* Set *FLAG_P if this lwp has an interesting status pending. */
2390static int
2391resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 2392{
bd99dc85 2393 struct lwp_info *lwp = (struct lwp_info *) entry;
5544ad89 2394
bd99dc85
PA
2395 /* LWPs which will not be resumed are not interesting, because
2396 we might not wait for them next time through linux_wait. */
2bd7c093 2397 if (lwp->resume == NULL)
bd99dc85 2398 return 0;
64386c31 2399
bd99dc85 2400 if (lwp->status_pending_p)
d50171e4
PA
2401 * (int *) flag_p = 1;
2402
2403 return 0;
2404}
2405
2406/* Return 1 if this lwp that GDB wants running is stopped at an
2407 internal breakpoint that we need to step over. It assumes that any
2408 required STOP_PC adjustment has already been propagated to the
2409 inferior's regcache. */
2410
2411static int
2412need_step_over_p (struct inferior_list_entry *entry, void *dummy)
2413{
2414 struct lwp_info *lwp = (struct lwp_info *) entry;
2415 struct thread_info *saved_inferior;
2416 CORE_ADDR pc;
2417
2418 /* LWPs which will not be resumed are not interesting, because we
2419 might not wait for them next time through linux_wait. */
2420
2421 if (!lwp->stopped)
2422 {
2423 if (debug_threads)
2424 fprintf (stderr,
2425 "Need step over [LWP %ld]? Ignoring, not stopped\n",
2426 lwpid_of (lwp));
2427 return 0;
2428 }
2429
2430 if (lwp->last_resume_kind == resume_stop)
2431 {
2432 if (debug_threads)
2433 fprintf (stderr,
2434 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
2435 lwpid_of (lwp));
2436 return 0;
2437 }
2438
2439 if (!lwp->need_step_over)
2440 {
2441 if (debug_threads)
2442 fprintf (stderr,
2443 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
2444 }
5544ad89 2445
bd99dc85 2446 if (lwp->status_pending_p)
d50171e4
PA
2447 {
2448 if (debug_threads)
2449 fprintf (stderr,
2450 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
2451 lwpid_of (lwp));
2452 return 0;
2453 }
2454
2455 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
2456 or we have. */
2457 pc = get_pc (lwp);
2458
2459 /* If the PC has changed since we stopped, then don't do anything,
2460 and let the breakpoint/tracepoint be hit. This happens if, for
2461 instance, GDB handled the decr_pc_after_break subtraction itself,
2462 GDB is OOL stepping this thread, or the user has issued a "jump"
2463 command, or poked thread's registers herself. */
2464 if (pc != lwp->stop_pc)
2465 {
2466 if (debug_threads)
2467 fprintf (stderr,
2468 "Need step over [LWP %ld]? Cancelling, PC was changed. "
2469 "Old stop_pc was 0x%s, PC is now 0x%s\n",
2470 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
2471
2472 lwp->need_step_over = 0;
2473 return 0;
2474 }
2475
2476 saved_inferior = current_inferior;
2477 current_inferior = get_lwp_thread (lwp);
2478
8b07ae33 2479 /* We can only step over breakpoints we know about. */
d50171e4
PA
2480 if (breakpoint_here (pc))
2481 {
8b07ae33
PA
2482 /* Don't step over a breakpoint that GDB expects to hit
2483 though. */
2484 if (gdb_breakpoint_here (pc))
2485 {
2486 if (debug_threads)
2487 fprintf (stderr,
2488 "Need step over [LWP %ld]? yes, but found"
2489 " GDB breakpoint at 0x%s; skipping step over\n",
2490 lwpid_of (lwp), paddress (pc));
d50171e4 2491
8b07ae33
PA
2492 current_inferior = saved_inferior;
2493 return 0;
2494 }
2495 else
2496 {
2497 if (debug_threads)
2498 fprintf (stderr,
2499 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
2500 lwpid_of (lwp), paddress (pc));
d50171e4 2501
8b07ae33
PA
2502 /* We've found an lwp that needs stepping over --- return 1 so
2503 that find_inferior stops looking. */
2504 current_inferior = saved_inferior;
2505
2506 /* If the step over is cancelled, this is set again. */
2507 lwp->need_step_over = 0;
2508 return 1;
2509 }
d50171e4
PA
2510 }
2511
2512 current_inferior = saved_inferior;
2513
2514 if (debug_threads)
2515 fprintf (stderr,
2516 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
2517 lwpid_of (lwp), paddress (pc));
c6ecbae5 2518
bd99dc85 2519 return 0;
5544ad89
DJ
2520}
2521
d50171e4
PA
2522/* Start a step-over operation on LWP. When LWP stopped at a
2523 breakpoint, to make progress, we need to remove the breakpoint out
2524 of the way. If we let other threads run while we do that, they may
2525 pass by the breakpoint location and miss hitting it. To avoid
2526 that, a step-over momentarily stops all threads while LWP is
2527 single-stepped while the breakpoint is temporarily uninserted from
2528 the inferior. When the single-step finishes, we reinsert the
2529 breakpoint, and let all threads that are supposed to be running,
2530 run again.
2531
2532 On targets that don't support hardware single-step, we don't
2533 currently support full software single-stepping. Instead, we only
2534 support stepping over the thread event breakpoint, by asking the
2535 low target where to place a reinsert breakpoint. Since this
2536 routine assumes the breakpoint being stepped over is a thread event
2537 breakpoint, it usually assumes the return address of the current
2538 function is a good enough place to set the reinsert breakpoint. */
2539
2540static int
2541start_step_over (struct lwp_info *lwp)
2542{
2543 struct thread_info *saved_inferior;
2544 CORE_ADDR pc;
2545 int step;
2546
2547 if (debug_threads)
2548 fprintf (stderr,
2549 "Starting step-over on LWP %ld. Stopping all threads\n",
2550 lwpid_of (lwp));
2551
2552 stop_all_lwps ();
2553
2554 if (debug_threads)
2555 fprintf (stderr, "Done stopping all threads for step-over.\n");
2556
2557 /* Note, we should always reach here with an already adjusted PC,
2558 either by GDB (if we're resuming due to GDB's request), or by our
2559 caller, if we just finished handling an internal breakpoint GDB
2560 shouldn't care about. */
2561 pc = get_pc (lwp);
2562
2563 saved_inferior = current_inferior;
2564 current_inferior = get_lwp_thread (lwp);
2565
2566 lwp->bp_reinsert = pc;
2567 uninsert_breakpoints_at (pc);
2568
2569 if (can_hardware_single_step ())
2570 {
2571 step = 1;
2572 }
2573 else
2574 {
2575 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
2576 set_reinsert_breakpoint (raddr);
2577 step = 0;
2578 }
2579
2580 current_inferior = saved_inferior;
2581
2582 linux_resume_one_lwp (lwp, step, 0, NULL);
2583
2584 /* Require next event from this LWP. */
2585 step_over_bkpt = lwp->head.id;
2586 return 1;
2587}
2588
2589/* Finish a step-over. Reinsert the breakpoint we had uninserted in
2590 start_step_over, if still there, and delete any reinsert
2591 breakpoints we've set, on non hardware single-step targets. */
2592
2593static int
2594finish_step_over (struct lwp_info *lwp)
2595{
2596 if (lwp->bp_reinsert != 0)
2597 {
2598 if (debug_threads)
2599 fprintf (stderr, "Finished step over.\n");
2600
2601 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
2602 may be no breakpoint to reinsert there by now. */
2603 reinsert_breakpoints_at (lwp->bp_reinsert);
2604
2605 lwp->bp_reinsert = 0;
2606
2607 /* Delete any software-single-step reinsert breakpoints. No
2608 longer needed. We don't have to worry about other threads
2609 hitting this trap, and later not being able to explain it,
2610 because we were stepping over a breakpoint, and we hold all
2611 threads but LWP stopped while doing that. */
2612 if (!can_hardware_single_step ())
2613 delete_reinsert_breakpoints ();
2614
2615 step_over_bkpt = null_ptid;
2616 return 1;
2617 }
2618 else
2619 return 0;
2620}
2621
5544ad89
DJ
2622/* This function is called once per thread. We check the thread's resume
2623 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 2624 stopped; and what signal, if any, it should be sent.
5544ad89 2625
bd99dc85
PA
2626 For threads which we aren't explicitly told otherwise, we preserve
2627 the stepping flag; this is used for stepping over gdbserver-placed
2628 breakpoints.
2629
2630 If pending_flags was set in any thread, we queue any needed
2631 signals, since we won't actually resume. We already have a pending
2632 event to report, so we don't need to preserve any step requests;
2633 they should be re-issued if necessary. */
2634
2635static int
2636linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 2637{
54a0b537 2638 struct lwp_info *lwp;
5544ad89 2639 struct thread_info *thread;
bd99dc85 2640 int step;
d50171e4
PA
2641 int leave_all_stopped = * (int *) arg;
2642 int leave_pending;
5544ad89
DJ
2643
2644 thread = (struct thread_info *) entry;
54a0b537 2645 lwp = get_thread_lwp (thread);
5544ad89 2646
2bd7c093 2647 if (lwp->resume == NULL)
bd99dc85 2648 return 0;
5544ad89 2649
bd99dc85 2650 if (lwp->resume->kind == resume_stop)
5544ad89 2651 {
bd99dc85 2652 if (debug_threads)
d50171e4 2653 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
bd99dc85
PA
2654
2655 if (!lwp->stopped)
2656 {
2657 if (debug_threads)
d50171e4 2658 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
bd99dc85 2659
d50171e4
PA
2660 /* Stop the thread, and wait for the event asynchronously,
2661 through the event loop. */
bd99dc85
PA
2662 send_sigstop (&lwp->head);
2663 }
2664 else
2665 {
2666 if (debug_threads)
d50171e4
PA
2667 fprintf (stderr, "already stopped LWP %ld\n",
2668 lwpid_of (lwp));
2669
2670 /* The LWP may have been stopped in an internal event that
2671 was not meant to be notified back to GDB (e.g., gdbserver
2672 breakpoint), so we should be reporting a stop event in
2673 this case too. */
2674
2675 /* If the thread already has a pending SIGSTOP, this is a
2676 no-op. Otherwise, something later will presumably resume
2677 the thread and this will cause it to cancel any pending
2678 operation, due to last_resume_kind == resume_stop. If
2679 the thread already has a pending status to report, we
2680 will still report it the next time we wait - see
2681 status_pending_p_callback. */
2682 send_sigstop (&lwp->head);
bd99dc85 2683 }
32ca6d61 2684
bd99dc85
PA
2685 /* For stop requests, we're done. */
2686 lwp->resume = NULL;
fc7238bb 2687 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 2688 return 0;
5544ad89
DJ
2689 }
2690
bd99dc85
PA
2691 /* If this thread which is about to be resumed has a pending status,
2692 then don't resume any threads - we can just report the pending
2693 status. Make sure to queue any signals that would otherwise be
2694 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
2695 thread has a pending status. If there's a thread that needs the
2696 step-over-breakpoint dance, then don't resume any other thread
2697 but that particular one. */
2698 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 2699
d50171e4 2700 if (!leave_pending)
bd99dc85
PA
2701 {
2702 if (debug_threads)
2703 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
5544ad89 2704
d50171e4 2705 step = (lwp->resume->kind == resume_step);
2acc282a 2706 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
2707 }
2708 else
2709 {
2710 if (debug_threads)
2711 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
5544ad89 2712
bd99dc85
PA
2713 /* If we have a new signal, enqueue the signal. */
2714 if (lwp->resume->sig != 0)
2715 {
2716 struct pending_signals *p_sig;
2717 p_sig = xmalloc (sizeof (*p_sig));
2718 p_sig->prev = lwp->pending_signals;
2719 p_sig->signal = lwp->resume->sig;
2720 memset (&p_sig->info, 0, sizeof (siginfo_t));
2721
2722 /* If this is the same signal we were previously stopped by,
2723 make sure to queue its siginfo. We can ignore the return
2724 value of ptrace; if it fails, we'll skip
2725 PTRACE_SETSIGINFO. */
2726 if (WIFSTOPPED (lwp->last_status)
2727 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2728 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2729
2730 lwp->pending_signals = p_sig;
2731 }
2732 }
5544ad89 2733
fc7238bb 2734 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 2735 lwp->resume = NULL;
5544ad89 2736 return 0;
0d62e5e8
DJ
2737}
2738
2739static void
2bd7c093 2740linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 2741{
2bd7c093 2742 struct thread_resume_array array = { resume_info, n };
d50171e4
PA
2743 struct lwp_info *need_step_over = NULL;
2744 int any_pending;
2745 int leave_all_stopped;
c6ecbae5 2746
2bd7c093 2747 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 2748
d50171e4
PA
2749 /* If there is a thread which would otherwise be resumed, which has
2750 a pending status, then don't resume any threads - we can just
2751 report the pending status. Make sure to queue any signals that
2752 would otherwise be sent. In non-stop mode, we'll apply this
2753 logic to each thread individually. We consume all pending events
2754 before considering to start a step-over (in all-stop). */
2755 any_pending = 0;
bd99dc85 2756 if (!non_stop)
d50171e4
PA
2757 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
2758
2759 /* If there is a thread which would otherwise be resumed, which is
2760 stopped at a breakpoint that needs stepping over, then don't
2761 resume any threads - have it step over the breakpoint with all
2762 other threads stopped, then resume all threads again. Make sure
2763 to queue any signals that would otherwise be delivered or
2764 queued. */
2765 if (!any_pending && supports_breakpoints ())
2766 need_step_over
2767 = (struct lwp_info *) find_inferior (&all_lwps,
2768 need_step_over_p, NULL);
2769
2770 leave_all_stopped = (need_step_over != NULL || any_pending);
2771
2772 if (debug_threads)
2773 {
2774 if (need_step_over != NULL)
2775 fprintf (stderr, "Not resuming all, need step over\n");
2776 else if (any_pending)
2777 fprintf (stderr,
2778 "Not resuming, all-stop and found "
2779 "an LWP with pending status\n");
2780 else
2781 fprintf (stderr, "Resuming, no pending status or step over needed\n");
2782 }
2783
2784 /* Even if we're leaving threads stopped, queue all signals we'd
2785 otherwise deliver. */
2786 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
2787
2788 if (need_step_over)
2789 start_step_over (need_step_over);
2790}
2791
2792/* This function is called once per thread. We check the thread's
2793 last resume request, which will tell us whether to resume, step, or
2794 leave the thread stopped. Any signal the client requested to be
2795 delivered has already been enqueued at this point.
2796
2797 If any thread that GDB wants running is stopped at an internal
2798 breakpoint that needs stepping over, we start a step-over operation
2799 on that particular thread, and leave all others stopped. */
2800
2801static void
2802proceed_one_lwp (struct inferior_list_entry *entry)
2803{
2804 struct lwp_info *lwp;
2805 int step;
2806
2807 lwp = (struct lwp_info *) entry;
2808
2809 if (debug_threads)
2810 fprintf (stderr,
2811 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
2812
2813 if (!lwp->stopped)
2814 {
2815 if (debug_threads)
2816 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
2817 return;
2818 }
2819
2820 if (lwp->last_resume_kind == resume_stop)
2821 {
2822 if (debug_threads)
2823 fprintf (stderr, " client wants LWP %ld stopped\n", lwpid_of (lwp));
2824 return;
2825 }
2826
2827 if (lwp->status_pending_p)
2828 {
2829 if (debug_threads)
2830 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
2831 lwpid_of (lwp));
2832 return;
2833 }
2834
2835 if (lwp->suspended)
2836 {
2837 if (debug_threads)
2838 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
2839 return;
2840 }
2841
2842 step = lwp->last_resume_kind == resume_step;
2843 linux_resume_one_lwp (lwp, step, 0, NULL);
2844}
2845
2846/* When we finish a step-over, set threads running again. If there's
2847 another thread that may need a step-over, now's the time to start
2848 it. Eventually, we'll move all threads past their breakpoints. */
2849
2850static void
2851proceed_all_lwps (void)
2852{
2853 struct lwp_info *need_step_over;
2854
2855 /* If there is a thread which would otherwise be resumed, which is
2856 stopped at a breakpoint that needs stepping over, then don't
2857 resume any threads - have it step over the breakpoint with all
2858 other threads stopped, then resume all threads again. */
2859
2860 if (supports_breakpoints ())
2861 {
2862 need_step_over
2863 = (struct lwp_info *) find_inferior (&all_lwps,
2864 need_step_over_p, NULL);
2865
2866 if (need_step_over != NULL)
2867 {
2868 if (debug_threads)
2869 fprintf (stderr, "proceed_all_lwps: found "
2870 "thread %ld needing a step-over\n",
2871 lwpid_of (need_step_over));
2872
2873 start_step_over (need_step_over);
2874 return;
2875 }
2876 }
5544ad89 2877
d50171e4
PA
2878 if (debug_threads)
2879 fprintf (stderr, "Proceeding, no step-over needed\n");
2880
2881 for_each_inferior (&all_lwps, proceed_one_lwp);
2882}
2883
2884/* Stopped LWPs that the client wanted to be running, that don't have
2885 pending statuses, are set to run again, except for EXCEPT, if not
2886 NULL. This undoes a stop_all_lwps call. */
2887
2888static void
2889unstop_all_lwps (struct lwp_info *except)
2890{
5544ad89
DJ
2891 if (debug_threads)
2892 {
d50171e4
PA
2893 if (except)
2894 fprintf (stderr,
2895 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
5544ad89 2896 else
d50171e4
PA
2897 fprintf (stderr,
2898 "unstopping all lwps\n");
5544ad89
DJ
2899 }
2900
d50171e4
PA
2901 /* Make sure proceed_one_lwp doesn't try to resume this thread. */
2902 if (except != NULL)
2903 ++except->suspended;
2904
2905 for_each_inferior (&all_lwps, proceed_one_lwp);
2906
2907 if (except != NULL)
2908 --except->suspended;
0d62e5e8
DJ
2909}
2910
2911#ifdef HAVE_LINUX_USRREGS
da6d8c04
DJ
2912
2913int
0a30fbc4 2914register_addr (int regnum)
da6d8c04
DJ
2915{
2916 int addr;
2917
2ec06d2e 2918 if (regnum < 0 || regnum >= the_low_target.num_regs)
da6d8c04
DJ
2919 error ("Invalid register number %d.", regnum);
2920
2ec06d2e 2921 addr = the_low_target.regmap[regnum];
da6d8c04
DJ
2922
2923 return addr;
2924}
2925
58caa3dc 2926/* Fetch one register. */
da6d8c04 2927static void
442ea881 2928fetch_register (struct regcache *regcache, int regno)
da6d8c04
DJ
2929{
2930 CORE_ADDR regaddr;
48d93c75 2931 int i, size;
0d62e5e8 2932 char *buf;
95954743 2933 int pid;
da6d8c04 2934
2ec06d2e 2935 if (regno >= the_low_target.num_regs)
0a30fbc4 2936 return;
2ec06d2e 2937 if ((*the_low_target.cannot_fetch_register) (regno))
0a30fbc4 2938 return;
da6d8c04 2939
0a30fbc4
DJ
2940 regaddr = register_addr (regno);
2941 if (regaddr == -1)
2942 return;
95954743
PA
2943
2944 pid = lwpid_of (get_thread_lwp (current_inferior));
1b3f6016
PA
2945 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2946 & - sizeof (PTRACE_XFER_TYPE));
48d93c75
UW
2947 buf = alloca (size);
2948 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
da6d8c04
DJ
2949 {
2950 errno = 0;
0d62e5e8 2951 *(PTRACE_XFER_TYPE *) (buf + i) =
14ce3065
DE
2952 ptrace (PTRACE_PEEKUSER, pid,
2953 /* Coerce to a uintptr_t first to avoid potential gcc warning
2954 of coercing an 8 byte integer to a 4 byte pointer. */
2955 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
da6d8c04
DJ
2956 regaddr += sizeof (PTRACE_XFER_TYPE);
2957 if (errno != 0)
f52cd8cd 2958 error ("reading register %d: %s", regno, strerror (errno));
da6d8c04 2959 }
ee1a7ae4
UW
2960
2961 if (the_low_target.supply_ptrace_register)
442ea881 2962 the_low_target.supply_ptrace_register (regcache, regno, buf);
5a1f5858 2963 else
442ea881 2964 supply_register (regcache, regno, buf);
da6d8c04
DJ
2965}
2966
2967/* Fetch all registers, or just one, from the child process. */
58caa3dc 2968static void
442ea881 2969usr_fetch_inferior_registers (struct regcache *regcache, int regno)
da6d8c04 2970{
4463ce24 2971 if (regno == -1)
2ec06d2e 2972 for (regno = 0; regno < the_low_target.num_regs; regno++)
442ea881 2973 fetch_register (regcache, regno);
da6d8c04 2974 else
442ea881 2975 fetch_register (regcache, regno);
da6d8c04
DJ
2976}
2977
2978/* Store our register values back into the inferior.
2979 If REGNO is -1, do this for all registers.
2980 Otherwise, REGNO specifies which register (so we can save time). */
58caa3dc 2981static void
442ea881 2982usr_store_inferior_registers (struct regcache *regcache, int regno)
da6d8c04
DJ
2983{
2984 CORE_ADDR regaddr;
48d93c75 2985 int i, size;
0d62e5e8 2986 char *buf;
55ac2b99 2987 int pid;
da6d8c04
DJ
2988
2989 if (regno >= 0)
2990 {
2ec06d2e 2991 if (regno >= the_low_target.num_regs)
0a30fbc4
DJ
2992 return;
2993
bc1e36ca 2994 if ((*the_low_target.cannot_store_register) (regno) == 1)
0a30fbc4
DJ
2995 return;
2996
2997 regaddr = register_addr (regno);
2998 if (regaddr == -1)
da6d8c04 2999 return;
da6d8c04 3000 errno = 0;
48d93c75
UW
3001 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3002 & - sizeof (PTRACE_XFER_TYPE);
3003 buf = alloca (size);
3004 memset (buf, 0, size);
ee1a7ae4
UW
3005
3006 if (the_low_target.collect_ptrace_register)
442ea881 3007 the_low_target.collect_ptrace_register (regcache, regno, buf);
5a1f5858 3008 else
442ea881 3009 collect_register (regcache, regno, buf);
ee1a7ae4 3010
95954743 3011 pid = lwpid_of (get_thread_lwp (current_inferior));
48d93c75 3012 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
da6d8c04 3013 {
0a30fbc4 3014 errno = 0;
14ce3065
DE
3015 ptrace (PTRACE_POKEUSER, pid,
3016 /* Coerce to a uintptr_t first to avoid potential gcc warning
3017 about coercing an 8 byte integer to a 4 byte pointer. */
3018 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3019 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
da6d8c04
DJ
3020 if (errno != 0)
3021 {
1b3f6016
PA
3022 /* At this point, ESRCH should mean the process is
3023 already gone, in which case we simply ignore attempts
3024 to change its registers. See also the related
3025 comment in linux_resume_one_lwp. */
3221518c
UW
3026 if (errno == ESRCH)
3027 return;
3028
bc1e36ca 3029 if ((*the_low_target.cannot_store_register) (regno) == 0)
f52cd8cd 3030 error ("writing register %d: %s", regno, strerror (errno));
da6d8c04 3031 }
2ff29de4 3032 regaddr += sizeof (PTRACE_XFER_TYPE);
da6d8c04 3033 }
da6d8c04
DJ
3034 }
3035 else
2ec06d2e 3036 for (regno = 0; regno < the_low_target.num_regs; regno++)
442ea881 3037 usr_store_inferior_registers (regcache, regno);
da6d8c04 3038}
58caa3dc
DJ
3039#endif /* HAVE_LINUX_USRREGS */
3040
3041
3042
3043#ifdef HAVE_LINUX_REGSETS
3044
3045static int
442ea881 3046regsets_fetch_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3047{
3048 struct regset_info *regset;
e9d25b98 3049 int saw_general_regs = 0;
95954743 3050 int pid;
1570b33e 3051 struct iovec iov;
58caa3dc
DJ
3052
3053 regset = target_regsets;
3054
95954743 3055 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3056 while (regset->size >= 0)
3057 {
1570b33e
L
3058 void *buf, *data;
3059 int nt_type, res;
58caa3dc 3060
52fa2412 3061 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3062 {
3063 regset ++;
3064 continue;
3065 }
3066
bca929d3 3067 buf = xmalloc (regset->size);
1570b33e
L
3068
3069 nt_type = regset->nt_type;
3070 if (nt_type)
3071 {
3072 iov.iov_base = buf;
3073 iov.iov_len = regset->size;
3074 data = (void *) &iov;
3075 }
3076 else
3077 data = buf;
3078
dfb64f85 3079#ifndef __sparc__
1570b33e 3080 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3081#else
1570b33e 3082 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 3083#endif
58caa3dc
DJ
3084 if (res < 0)
3085 {
3086 if (errno == EIO)
3087 {
52fa2412
UW
3088 /* If we get EIO on a regset, do not try it again for
3089 this process. */
3090 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3091 free (buf);
52fa2412 3092 continue;
58caa3dc
DJ
3093 }
3094 else
3095 {
0d62e5e8 3096 char s[256];
95954743
PA
3097 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3098 pid);
0d62e5e8 3099 perror (s);
58caa3dc
DJ
3100 }
3101 }
e9d25b98
DJ
3102 else if (regset->type == GENERAL_REGS)
3103 saw_general_regs = 1;
442ea881 3104 regset->store_function (regcache, buf);
58caa3dc 3105 regset ++;
fdeb2a12 3106 free (buf);
58caa3dc 3107 }
e9d25b98
DJ
3108 if (saw_general_regs)
3109 return 0;
3110 else
3111 return 1;
58caa3dc
DJ
3112}
3113
3114static int
442ea881 3115regsets_store_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3116{
3117 struct regset_info *regset;
e9d25b98 3118 int saw_general_regs = 0;
95954743 3119 int pid;
1570b33e 3120 struct iovec iov;
58caa3dc
DJ
3121
3122 regset = target_regsets;
3123
95954743 3124 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3125 while (regset->size >= 0)
3126 {
1570b33e
L
3127 void *buf, *data;
3128 int nt_type, res;
58caa3dc 3129
52fa2412 3130 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3131 {
3132 regset ++;
3133 continue;
3134 }
3135
bca929d3 3136 buf = xmalloc (regset->size);
545587ee
DJ
3137
3138 /* First fill the buffer with the current register set contents,
3139 in case there are any items in the kernel's regset that are
3140 not in gdbserver's regcache. */
1570b33e
L
3141
3142 nt_type = regset->nt_type;
3143 if (nt_type)
3144 {
3145 iov.iov_base = buf;
3146 iov.iov_len = regset->size;
3147 data = (void *) &iov;
3148 }
3149 else
3150 data = buf;
3151
dfb64f85 3152#ifndef __sparc__
1570b33e 3153 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3154#else
1570b33e 3155 res = ptrace (regset->get_request, pid, &iov, data);
dfb64f85 3156#endif
545587ee
DJ
3157
3158 if (res == 0)
3159 {
3160 /* Then overlay our cached registers on that. */
442ea881 3161 regset->fill_function (regcache, buf);
545587ee
DJ
3162
3163 /* Only now do we write the register set. */
dfb64f85 3164#ifndef __sparc__
1570b33e 3165 res = ptrace (regset->set_request, pid, nt_type, data);
dfb64f85 3166#else
1570b33e 3167 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 3168#endif
545587ee
DJ
3169 }
3170
58caa3dc
DJ
3171 if (res < 0)
3172 {
3173 if (errno == EIO)
3174 {
52fa2412
UW
3175 /* If we get EIO on a regset, do not try it again for
3176 this process. */
3177 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3178 free (buf);
52fa2412 3179 continue;
58caa3dc 3180 }
3221518c
UW
3181 else if (errno == ESRCH)
3182 {
1b3f6016
PA
3183 /* At this point, ESRCH should mean the process is
3184 already gone, in which case we simply ignore attempts
3185 to change its registers. See also the related
3186 comment in linux_resume_one_lwp. */
fdeb2a12 3187 free (buf);
3221518c
UW
3188 return 0;
3189 }
58caa3dc
DJ
3190 else
3191 {
ce3a066d 3192 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
3193 }
3194 }
e9d25b98
DJ
3195 else if (regset->type == GENERAL_REGS)
3196 saw_general_regs = 1;
58caa3dc 3197 regset ++;
09ec9b38 3198 free (buf);
58caa3dc 3199 }
e9d25b98
DJ
3200 if (saw_general_regs)
3201 return 0;
3202 else
3203 return 1;
ce3a066d 3204 return 0;
58caa3dc
DJ
3205}
3206
3207#endif /* HAVE_LINUX_REGSETS */
3208
3209
3210void
442ea881 3211linux_fetch_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
3212{
3213#ifdef HAVE_LINUX_REGSETS
442ea881 3214 if (regsets_fetch_inferior_registers (regcache) == 0)
52fa2412 3215 return;
58caa3dc
DJ
3216#endif
3217#ifdef HAVE_LINUX_USRREGS
442ea881 3218 usr_fetch_inferior_registers (regcache, regno);
58caa3dc
DJ
3219#endif
3220}
3221
3222void
442ea881 3223linux_store_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
3224{
3225#ifdef HAVE_LINUX_REGSETS
442ea881 3226 if (regsets_store_inferior_registers (regcache) == 0)
52fa2412 3227 return;
58caa3dc
DJ
3228#endif
3229#ifdef HAVE_LINUX_USRREGS
442ea881 3230 usr_store_inferior_registers (regcache, regno);
58caa3dc
DJ
3231#endif
3232}
3233
da6d8c04 3234
da6d8c04
DJ
3235/* Copy LEN bytes from inferior's memory starting at MEMADDR
3236 to debugger memory starting at MYADDR. */
3237
c3e735a6 3238static int
f450004a 3239linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04
DJ
3240{
3241 register int i;
3242 /* Round starting address down to longword boundary. */
3243 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3244 /* Round ending address up; get number of longwords that makes. */
aa691b87
RM
3245 register int count
3246 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
da6d8c04
DJ
3247 / sizeof (PTRACE_XFER_TYPE);
3248 /* Allocate buffer of that many longwords. */
aa691b87 3249 register PTRACE_XFER_TYPE *buffer
da6d8c04 3250 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
fd462a61
DJ
3251 int fd;
3252 char filename[64];
95954743 3253 int pid = lwpid_of (get_thread_lwp (current_inferior));
fd462a61
DJ
3254
3255 /* Try using /proc. Don't bother for one word. */
3256 if (len >= 3 * sizeof (long))
3257 {
3258 /* We could keep this file open and cache it - possibly one per
3259 thread. That requires some juggling, but is even faster. */
95954743 3260 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
3261 fd = open (filename, O_RDONLY | O_LARGEFILE);
3262 if (fd == -1)
3263 goto no_proc;
3264
3265 /* If pread64 is available, use it. It's faster if the kernel
3266 supports it (only one syscall), and it's 64-bit safe even on
3267 32-bit platforms (for instance, SPARC debugging a SPARC64
3268 application). */
3269#ifdef HAVE_PREAD64
3270 if (pread64 (fd, myaddr, len, memaddr) != len)
3271#else
1de1badb 3272 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
fd462a61
DJ
3273#endif
3274 {
3275 close (fd);
3276 goto no_proc;
3277 }
3278
3279 close (fd);
3280 return 0;
3281 }
da6d8c04 3282
fd462a61 3283 no_proc:
da6d8c04
DJ
3284 /* Read all the longwords */
3285 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3286 {
c3e735a6 3287 errno = 0;
14ce3065
DE
3288 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3289 about coercing an 8 byte integer to a 4 byte pointer. */
3290 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
3291 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
c3e735a6
DJ
3292 if (errno)
3293 return errno;
da6d8c04
DJ
3294 }
3295
3296 /* Copy appropriate bytes out of the buffer. */
1b3f6016
PA
3297 memcpy (myaddr,
3298 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
3299 len);
c3e735a6
DJ
3300
3301 return 0;
da6d8c04
DJ
3302}
3303
93ae6fdc
PA
3304/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
3305 memory at MEMADDR. On failure (cannot write to the inferior)
da6d8c04
DJ
3306 returns the value of errno. */
3307
ce3a066d 3308static int
f450004a 3309linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
3310{
3311 register int i;
3312 /* Round starting address down to longword boundary. */
3313 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3314 /* Round ending address up; get number of longwords that makes. */
3315 register int count
3316 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
3317 /* Allocate buffer of that many longwords. */
3318 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
95954743 3319 int pid = lwpid_of (get_thread_lwp (current_inferior));
da6d8c04 3320
0d62e5e8
DJ
3321 if (debug_threads)
3322 {
58d6951d
DJ
3323 /* Dump up to four bytes. */
3324 unsigned int val = * (unsigned int *) myaddr;
3325 if (len == 1)
3326 val = val & 0xff;
3327 else if (len == 2)
3328 val = val & 0xffff;
3329 else if (len == 3)
3330 val = val & 0xffffff;
3331 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
3332 val, (long)memaddr);
0d62e5e8
DJ
3333 }
3334
da6d8c04
DJ
3335 /* Fill start and end extra bytes of buffer with existing memory data. */
3336
93ae6fdc 3337 errno = 0;
14ce3065
DE
3338 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3339 about coercing an 8 byte integer to a 4 byte pointer. */
3340 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
3341 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
93ae6fdc
PA
3342 if (errno)
3343 return errno;
da6d8c04
DJ
3344
3345 if (count > 1)
3346 {
93ae6fdc 3347 errno = 0;
da6d8c04 3348 buffer[count - 1]
95954743 3349 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
3350 /* Coerce to a uintptr_t first to avoid potential gcc warning
3351 about coercing an 8 byte integer to a 4 byte pointer. */
3352 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
3353 * sizeof (PTRACE_XFER_TYPE)),
d844cde6 3354 0);
93ae6fdc
PA
3355 if (errno)
3356 return errno;
da6d8c04
DJ
3357 }
3358
93ae6fdc 3359 /* Copy data to be written over corresponding part of buffer. */
da6d8c04
DJ
3360
3361 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
3362
3363 /* Write the entire buffer. */
3364
3365 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3366 {
3367 errno = 0;
14ce3065
DE
3368 ptrace (PTRACE_POKETEXT, pid,
3369 /* Coerce to a uintptr_t first to avoid potential gcc warning
3370 about coercing an 8 byte integer to a 4 byte pointer. */
3371 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
3372 (PTRACE_ARG4_TYPE) buffer[i]);
da6d8c04
DJ
3373 if (errno)
3374 return errno;
3375 }
3376
3377 return 0;
3378}
2f2893d9 3379
6076632b 3380/* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
24a09b5f
DJ
3381static int linux_supports_tracefork_flag;
3382
51c2684e 3383/* Helper functions for linux_test_for_tracefork, called via clone (). */
24a09b5f 3384
51c2684e
DJ
3385static int
3386linux_tracefork_grandchild (void *arg)
3387{
3388 _exit (0);
3389}
3390
7407e2de
AS
3391#define STACK_SIZE 4096
3392
51c2684e
DJ
3393static int
3394linux_tracefork_child (void *arg)
24a09b5f
DJ
3395{
3396 ptrace (PTRACE_TRACEME, 0, 0, 0);
3397 kill (getpid (), SIGSTOP);
e4b7f41c
JK
3398
3399#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3400
3401 if (fork () == 0)
3402 linux_tracefork_grandchild (NULL);
3403
3404#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3405
7407e2de
AS
3406#ifdef __ia64__
3407 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
3408 CLONE_VM | SIGCHLD, NULL);
3409#else
3410 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
3411 CLONE_VM | SIGCHLD, NULL);
3412#endif
e4b7f41c
JK
3413
3414#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3415
24a09b5f
DJ
3416 _exit (0);
3417}
3418
24a09b5f
DJ
3419/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
3420 sure that we can enable the option, and that it had the desired
3421 effect. */
3422
3423static void
3424linux_test_for_tracefork (void)
3425{
3426 int child_pid, ret, status;
3427 long second_pid;
e4b7f41c 3428#if defined(__UCLIBC__) && defined(HAS_NOMMU)
bca929d3 3429 char *stack = xmalloc (STACK_SIZE * 4);
e4b7f41c 3430#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
3431
3432 linux_supports_tracefork_flag = 0;
3433
e4b7f41c
JK
3434#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3435
3436 child_pid = fork ();
3437 if (child_pid == 0)
3438 linux_tracefork_child (NULL);
3439
3440#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3441
51c2684e 3442 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
7407e2de
AS
3443#ifdef __ia64__
3444 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
3445 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c 3446#else /* !__ia64__ */
7407e2de
AS
3447 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
3448 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c
JK
3449#endif /* !__ia64__ */
3450
3451#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3452
24a09b5f 3453 if (child_pid == -1)
51c2684e 3454 perror_with_name ("clone");
24a09b5f
DJ
3455
3456 ret = my_waitpid (child_pid, &status, 0);
3457 if (ret == -1)
3458 perror_with_name ("waitpid");
3459 else if (ret != child_pid)
3460 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
3461 if (! WIFSTOPPED (status))
3462 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
3463
14ce3065
DE
3464 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
3465 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
24a09b5f
DJ
3466 if (ret != 0)
3467 {
3468 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3469 if (ret != 0)
3470 {
3471 warning ("linux_test_for_tracefork: failed to kill child");
3472 return;
3473 }
3474
3475 ret = my_waitpid (child_pid, &status, 0);
3476 if (ret != child_pid)
3477 warning ("linux_test_for_tracefork: failed to wait for killed child");
3478 else if (!WIFSIGNALED (status))
3479 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
3480 "killed child", status);
3481
3482 return;
3483 }
3484
3485 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
3486 if (ret != 0)
3487 warning ("linux_test_for_tracefork: failed to resume child");
3488
3489 ret = my_waitpid (child_pid, &status, 0);
3490
3491 if (ret == child_pid && WIFSTOPPED (status)
3492 && status >> 16 == PTRACE_EVENT_FORK)
3493 {
3494 second_pid = 0;
3495 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
3496 if (ret == 0 && second_pid != 0)
3497 {
3498 int second_status;
3499
3500 linux_supports_tracefork_flag = 1;
3501 my_waitpid (second_pid, &second_status, 0);
3502 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
3503 if (ret != 0)
3504 warning ("linux_test_for_tracefork: failed to kill second child");
3505 my_waitpid (second_pid, &status, 0);
3506 }
3507 }
3508 else
3509 warning ("linux_test_for_tracefork: unexpected result from waitpid "
3510 "(%d, status 0x%x)", ret, status);
3511
3512 do
3513 {
3514 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3515 if (ret != 0)
3516 warning ("linux_test_for_tracefork: failed to kill child");
3517 my_waitpid (child_pid, &status, 0);
3518 }
3519 while (WIFSTOPPED (status));
51c2684e 3520
e4b7f41c 3521#if defined(__UCLIBC__) && defined(HAS_NOMMU)
51c2684e 3522 free (stack);
e4b7f41c 3523#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
3524}
3525
3526
2f2893d9
DJ
3527static void
3528linux_look_up_symbols (void)
3529{
0d62e5e8 3530#ifdef USE_THREAD_DB
95954743
PA
3531 struct process_info *proc = current_process ();
3532
cdbfd419 3533 if (proc->private->thread_db != NULL)
0d62e5e8
DJ
3534 return;
3535
6076632b
DE
3536 /* If the kernel supports tracing forks then it also supports tracing
3537 clones, and then we don't need to use the magic thread event breakpoint
3538 to learn about threads. */
cdbfd419 3539 thread_db_init (!linux_supports_tracefork_flag);
0d62e5e8
DJ
3540#endif
3541}
3542
e5379b03 3543static void
ef57601b 3544linux_request_interrupt (void)
e5379b03 3545{
a1928bad 3546 extern unsigned long signal_pid;
e5379b03 3547
95954743
PA
3548 if (!ptid_equal (cont_thread, null_ptid)
3549 && !ptid_equal (cont_thread, minus_one_ptid))
e5379b03 3550 {
54a0b537 3551 struct lwp_info *lwp;
bd99dc85 3552 int lwpid;
e5379b03 3553
54a0b537 3554 lwp = get_thread_lwp (current_inferior);
bd99dc85
PA
3555 lwpid = lwpid_of (lwp);
3556 kill_lwp (lwpid, SIGINT);
e5379b03
DJ
3557 }
3558 else
ef57601b 3559 kill_lwp (signal_pid, SIGINT);
e5379b03
DJ
3560}
3561
aa691b87
RM
3562/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
3563 to debugger memory starting at MYADDR. */
3564
3565static int
f450004a 3566linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
3567{
3568 char filename[PATH_MAX];
3569 int fd, n;
95954743 3570 int pid = lwpid_of (get_thread_lwp (current_inferior));
aa691b87 3571
95954743 3572 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
3573
3574 fd = open (filename, O_RDONLY);
3575 if (fd < 0)
3576 return -1;
3577
3578 if (offset != (CORE_ADDR) 0
3579 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3580 n = -1;
3581 else
3582 n = read (fd, myaddr, len);
3583
3584 close (fd);
3585
3586 return n;
3587}
3588
d993e290
PA
3589/* These breakpoint and watchpoint related wrapper functions simply
3590 pass on the function call if the target has registered a
3591 corresponding function. */
e013ee27
OF
3592
3593static int
d993e290 3594linux_insert_point (char type, CORE_ADDR addr, int len)
e013ee27 3595{
d993e290
PA
3596 if (the_low_target.insert_point != NULL)
3597 return the_low_target.insert_point (type, addr, len);
e013ee27
OF
3598 else
3599 /* Unsupported (see target.h). */
3600 return 1;
3601}
3602
3603static int
d993e290 3604linux_remove_point (char type, CORE_ADDR addr, int len)
e013ee27 3605{
d993e290
PA
3606 if (the_low_target.remove_point != NULL)
3607 return the_low_target.remove_point (type, addr, len);
e013ee27
OF
3608 else
3609 /* Unsupported (see target.h). */
3610 return 1;
3611}
3612
3613static int
3614linux_stopped_by_watchpoint (void)
3615{
c3adc08c
PA
3616 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3617
3618 return lwp->stopped_by_watchpoint;
e013ee27
OF
3619}
3620
3621static CORE_ADDR
3622linux_stopped_data_address (void)
3623{
c3adc08c
PA
3624 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3625
3626 return lwp->stopped_data_address;
e013ee27
OF
3627}
3628
42c81e2a 3629#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
3630#if defined(__mcoldfire__)
3631/* These should really be defined in the kernel's ptrace.h header. */
3632#define PT_TEXT_ADDR 49*4
3633#define PT_DATA_ADDR 50*4
3634#define PT_TEXT_END_ADDR 51*4
3635#endif
3636
3637/* Under uClinux, programs are loaded at non-zero offsets, which we need
3638 to tell gdb about. */
3639
3640static int
3641linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
3642{
3643#if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
3644 unsigned long text, text_end, data;
bd99dc85 3645 int pid = lwpid_of (get_thread_lwp (current_inferior));
52fb6437
NS
3646
3647 errno = 0;
3648
3649 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
3650 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
3651 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
3652
3653 if (errno == 0)
3654 {
3655 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
3656 used by gdb) are relative to the beginning of the program,
3657 with the data segment immediately following the text segment.
3658 However, the actual runtime layout in memory may put the data
3659 somewhere else, so when we send gdb a data base-address, we
3660 use the real data base address and subtract the compile-time
3661 data base-address from it (which is just the length of the
3662 text segment). BSS immediately follows data in both
3663 cases. */
52fb6437
NS
3664 *text_p = text;
3665 *data_p = data - (text_end - text);
1b3f6016 3666
52fb6437
NS
3667 return 1;
3668 }
3669#endif
3670 return 0;
3671}
3672#endif
3673
dc146f7c
VP
3674static int
3675compare_ints (const void *xa, const void *xb)
3676{
3677 int a = *(const int *)xa;
3678 int b = *(const int *)xb;
3679
3680 return a - b;
3681}
3682
3683static int *
3684unique (int *b, int *e)
3685{
3686 int *d = b;
3687 while (++b != e)
3688 if (*d != *b)
3689 *++d = *b;
3690 return ++d;
3691}
3692
3693/* Given PID, iterates over all threads in that process.
3694
3695 Information about each thread, in a format suitable for qXfer:osdata:thread
3696 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
3697 initialized, and the caller is responsible for finishing and appending '\0'
3698 to it.
3699
3700 The list of cores that threads are running on is assigned to *CORES, if it
3701 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
3702 should free *CORES. */
3703
3704static void
3705list_threads (int pid, struct buffer *buffer, char **cores)
3706{
3707 int count = 0;
3708 int allocated = 10;
3709 int *core_numbers = xmalloc (sizeof (int) * allocated);
3710 char pathname[128];
3711 DIR *dir;
3712 struct dirent *dp;
3713 struct stat statbuf;
3714
3715 sprintf (pathname, "/proc/%d/task", pid);
3716 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
3717 {
3718 dir = opendir (pathname);
3719 if (!dir)
3720 {
3721 free (core_numbers);
3722 return;
3723 }
3724
3725 while ((dp = readdir (dir)) != NULL)
3726 {
3727 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
3728
3729 if (lwp != 0)
3730 {
3731 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
3732
3733 if (core != -1)
3734 {
3735 char s[sizeof ("4294967295")];
3736 sprintf (s, "%u", core);
3737
3738 if (count == allocated)
3739 {
3740 allocated *= 2;
3741 core_numbers = realloc (core_numbers,
3742 sizeof (int) * allocated);
3743 }
3744 core_numbers[count++] = core;
3745 if (buffer)
3746 buffer_xml_printf (buffer,
3747 "<item>"
3748 "<column name=\"pid\">%d</column>"
3749 "<column name=\"tid\">%s</column>"
3750 "<column name=\"core\">%s</column>"
3751 "</item>", pid, dp->d_name, s);
3752 }
3753 else
3754 {
3755 if (buffer)
3756 buffer_xml_printf (buffer,
3757 "<item>"
3758 "<column name=\"pid\">%d</column>"
3759 "<column name=\"tid\">%s</column>"
3760 "</item>", pid, dp->d_name);
3761 }
3762 }
3763 }
3764 }
3765
3766 if (cores)
3767 {
3768 *cores = NULL;
3769 if (count > 0)
3770 {
3771 struct buffer buffer2;
3772 int *b;
3773 int *e;
3774 qsort (core_numbers, count, sizeof (int), compare_ints);
3775
3776 /* Remove duplicates. */
3777 b = core_numbers;
3778 e = unique (b, core_numbers + count);
3779
3780 buffer_init (&buffer2);
3781
3782 for (b = core_numbers; b != e; ++b)
3783 {
3784 char number[sizeof ("4294967295")];
3785 sprintf (number, "%u", *b);
3786 buffer_xml_printf (&buffer2, "%s%s",
3787 (b == core_numbers) ? "" : ",", number);
3788 }
3789 buffer_grow_str0 (&buffer2, "");
3790
3791 *cores = buffer_finish (&buffer2);
3792 }
3793 }
3794 free (core_numbers);
3795}
3796
3797static void
3798show_process (int pid, const char *username, struct buffer *buffer)
3799{
3800 char pathname[128];
3801 FILE *f;
3802 char cmd[MAXPATHLEN + 1];
3803
3804 sprintf (pathname, "/proc/%d/cmdline", pid);
3805
3806 if ((f = fopen (pathname, "r")) != NULL)
3807 {
3808 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
3809 if (len > 0)
3810 {
3811 char *cores = 0;
3812 int i;
3813 for (i = 0; i < len; i++)
3814 if (cmd[i] == '\0')
3815 cmd[i] = ' ';
3816 cmd[len] = '\0';
3817
3818 buffer_xml_printf (buffer,
3819 "<item>"
3820 "<column name=\"pid\">%d</column>"
3821 "<column name=\"user\">%s</column>"
3822 "<column name=\"command\">%s</column>",
3823 pid,
3824 username,
3825 cmd);
3826
3827 /* This only collects core numbers, and does not print threads. */
3828 list_threads (pid, NULL, &cores);
3829
3830 if (cores)
3831 {
3832 buffer_xml_printf (buffer,
3833 "<column name=\"cores\">%s</column>", cores);
3834 free (cores);
3835 }
3836
3837 buffer_xml_printf (buffer, "</item>");
3838 }
3839 fclose (f);
3840 }
3841}
3842
07e059b5
VP
3843static int
3844linux_qxfer_osdata (const char *annex,
1b3f6016
PA
3845 unsigned char *readbuf, unsigned const char *writebuf,
3846 CORE_ADDR offset, int len)
07e059b5
VP
3847{
3848 /* We make the process list snapshot when the object starts to be
3849 read. */
3850 static const char *buf;
3851 static long len_avail = -1;
3852 static struct buffer buffer;
dc146f7c
VP
3853 int processes = 0;
3854 int threads = 0;
07e059b5
VP
3855
3856 DIR *dirp;
3857
dc146f7c
VP
3858 if (strcmp (annex, "processes") == 0)
3859 processes = 1;
3860 else if (strcmp (annex, "threads") == 0)
3861 threads = 1;
3862 else
07e059b5
VP
3863 return 0;
3864
3865 if (!readbuf || writebuf)
3866 return 0;
3867
3868 if (offset == 0)
3869 {
3870 if (len_avail != -1 && len_avail != 0)
3871 buffer_free (&buffer);
3872 len_avail = 0;
3873 buf = NULL;
3874 buffer_init (&buffer);
dc146f7c
VP
3875 if (processes)
3876 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
3877 else if (threads)
3878 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
07e059b5
VP
3879
3880 dirp = opendir ("/proc");
3881 if (dirp)
3882 {
1b3f6016
PA
3883 struct dirent *dp;
3884 while ((dp = readdir (dirp)) != NULL)
3885 {
3886 struct stat statbuf;
3887 char procentry[sizeof ("/proc/4294967295")];
3888
3889 if (!isdigit (dp->d_name[0])
3890 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
3891 continue;
3892
3893 sprintf (procentry, "/proc/%s", dp->d_name);
3894 if (stat (procentry, &statbuf) == 0
3895 && S_ISDIR (statbuf.st_mode))
3896 {
dc146f7c 3897 int pid = (int) strtoul (dp->d_name, NULL, 10);
1b3f6016 3898
dc146f7c 3899 if (processes)
1b3f6016 3900 {
dc146f7c
VP
3901 struct passwd *entry = getpwuid (statbuf.st_uid);
3902 show_process (pid, entry ? entry->pw_name : "?", &buffer);
3903 }
3904 else if (threads)
3905 {
3906 list_threads (pid, &buffer, NULL);
1b3f6016
PA
3907 }
3908 }
3909 }
07e059b5 3910
1b3f6016 3911 closedir (dirp);
07e059b5
VP
3912 }
3913 buffer_grow_str0 (&buffer, "</osdata>\n");
3914 buf = buffer_finish (&buffer);
3915 len_avail = strlen (buf);
3916 }
3917
3918 if (offset >= len_avail)
3919 {
3920 /* Done. Get rid of the data. */
3921 buffer_free (&buffer);
3922 buf = NULL;
3923 len_avail = 0;
3924 return 0;
3925 }
3926
3927 if (len > len_avail - offset)
3928 len = len_avail - offset;
3929 memcpy (readbuf, buf + offset, len);
3930
3931 return len;
3932}
3933
d0722149
DE
3934/* Convert a native/host siginfo object, into/from the siginfo in the
3935 layout of the inferiors' architecture. */
3936
3937static void
3938siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
3939{
3940 int done = 0;
3941
3942 if (the_low_target.siginfo_fixup != NULL)
3943 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
3944
3945 /* If there was no callback, or the callback didn't do anything,
3946 then just do a straight memcpy. */
3947 if (!done)
3948 {
3949 if (direction == 1)
3950 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3951 else
3952 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3953 }
3954}
3955
4aa995e1
PA
3956static int
3957linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
3958 unsigned const char *writebuf, CORE_ADDR offset, int len)
3959{
d0722149 3960 int pid;
4aa995e1 3961 struct siginfo siginfo;
d0722149 3962 char inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
3963
3964 if (current_inferior == NULL)
3965 return -1;
3966
bd99dc85 3967 pid = lwpid_of (get_thread_lwp (current_inferior));
4aa995e1
PA
3968
3969 if (debug_threads)
d0722149 3970 fprintf (stderr, "%s siginfo for lwp %d.\n",
4aa995e1
PA
3971 readbuf != NULL ? "Reading" : "Writing",
3972 pid);
3973
3974 if (offset > sizeof (siginfo))
3975 return -1;
3976
3977 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
3978 return -1;
3979
d0722149
DE
3980 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
3981 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3982 inferior with a 64-bit GDBSERVER should look the same as debugging it
3983 with a 32-bit GDBSERVER, we need to convert it. */
3984 siginfo_fixup (&siginfo, inf_siginfo, 0);
3985
4aa995e1
PA
3986 if (offset + len > sizeof (siginfo))
3987 len = sizeof (siginfo) - offset;
3988
3989 if (readbuf != NULL)
d0722149 3990 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3991 else
3992 {
d0722149
DE
3993 memcpy (inf_siginfo + offset, writebuf, len);
3994
3995 /* Convert back to ptrace layout before flushing it out. */
3996 siginfo_fixup (&siginfo, inf_siginfo, 1);
3997
4aa995e1
PA
3998 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
3999 return -1;
4000 }
4001
4002 return len;
4003}
4004
bd99dc85
PA
4005/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4006 so we notice when children change state; as the handler for the
4007 sigsuspend in my_waitpid. */
4008
4009static void
4010sigchld_handler (int signo)
4011{
4012 int old_errno = errno;
4013
4014 if (debug_threads)
4015 /* fprintf is not async-signal-safe, so call write directly. */
4016 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
4017
4018 if (target_is_async_p ())
4019 async_file_mark (); /* trigger a linux_wait */
4020
4021 errno = old_errno;
4022}
4023
4024static int
4025linux_supports_non_stop (void)
4026{
4027 return 1;
4028}
4029
4030static int
4031linux_async (int enable)
4032{
4033 int previous = (linux_event_pipe[0] != -1);
4034
4035 if (previous != enable)
4036 {
4037 sigset_t mask;
4038 sigemptyset (&mask);
4039 sigaddset (&mask, SIGCHLD);
4040
4041 sigprocmask (SIG_BLOCK, &mask, NULL);
4042
4043 if (enable)
4044 {
4045 if (pipe (linux_event_pipe) == -1)
4046 fatal ("creating event pipe failed.");
4047
4048 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4049 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4050
4051 /* Register the event loop handler. */
4052 add_file_handler (linux_event_pipe[0],
4053 handle_target_event, NULL);
4054
4055 /* Always trigger a linux_wait. */
4056 async_file_mark ();
4057 }
4058 else
4059 {
4060 delete_file_handler (linux_event_pipe[0]);
4061
4062 close (linux_event_pipe[0]);
4063 close (linux_event_pipe[1]);
4064 linux_event_pipe[0] = -1;
4065 linux_event_pipe[1] = -1;
4066 }
4067
4068 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4069 }
4070
4071 return previous;
4072}
4073
4074static int
4075linux_start_non_stop (int nonstop)
4076{
4077 /* Register or unregister from event-loop accordingly. */
4078 linux_async (nonstop);
4079 return 0;
4080}
4081
cf8fd78b
PA
4082static int
4083linux_supports_multi_process (void)
4084{
4085 return 1;
4086}
4087
efcbbd14
UW
4088
4089/* Enumerate spufs IDs for process PID. */
4090static int
4091spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4092{
4093 int pos = 0;
4094 int written = 0;
4095 char path[128];
4096 DIR *dir;
4097 struct dirent *entry;
4098
4099 sprintf (path, "/proc/%ld/fd", pid);
4100 dir = opendir (path);
4101 if (!dir)
4102 return -1;
4103
4104 rewinddir (dir);
4105 while ((entry = readdir (dir)) != NULL)
4106 {
4107 struct stat st;
4108 struct statfs stfs;
4109 int fd;
4110
4111 fd = atoi (entry->d_name);
4112 if (!fd)
4113 continue;
4114
4115 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4116 if (stat (path, &st) != 0)
4117 continue;
4118 if (!S_ISDIR (st.st_mode))
4119 continue;
4120
4121 if (statfs (path, &stfs) != 0)
4122 continue;
4123 if (stfs.f_type != SPUFS_MAGIC)
4124 continue;
4125
4126 if (pos >= offset && pos + 4 <= offset + len)
4127 {
4128 *(unsigned int *)(buf + pos - offset) = fd;
4129 written += 4;
4130 }
4131 pos += 4;
4132 }
4133
4134 closedir (dir);
4135 return written;
4136}
4137
4138/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4139 object type, using the /proc file system. */
4140static int
4141linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4142 unsigned const char *writebuf,
4143 CORE_ADDR offset, int len)
4144{
4145 long pid = lwpid_of (get_thread_lwp (current_inferior));
4146 char buf[128];
4147 int fd = 0;
4148 int ret = 0;
4149
4150 if (!writebuf && !readbuf)
4151 return -1;
4152
4153 if (!*annex)
4154 {
4155 if (!readbuf)
4156 return -1;
4157 else
4158 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4159 }
4160
4161 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4162 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4163 if (fd <= 0)
4164 return -1;
4165
4166 if (offset != 0
4167 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4168 {
4169 close (fd);
4170 return 0;
4171 }
4172
4173 if (writebuf)
4174 ret = write (fd, writebuf, (size_t) len);
4175 else
4176 ret = read (fd, readbuf, (size_t) len);
4177
4178 close (fd);
4179 return ret;
4180}
4181
dc146f7c
VP
4182static int
4183linux_core_of_thread (ptid_t ptid)
4184{
4185 char filename[sizeof ("/proc//task//stat")
4186 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4187 + 1];
4188 FILE *f;
4189 char *content = NULL;
4190 char *p;
4191 char *ts = 0;
4192 int content_read = 0;
4193 int i;
4194 int core;
4195
4196 sprintf (filename, "/proc/%d/task/%ld/stat",
4197 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4198 f = fopen (filename, "r");
4199 if (!f)
4200 return -1;
4201
4202 for (;;)
4203 {
4204 int n;
4205 content = realloc (content, content_read + 1024);
4206 n = fread (content + content_read, 1, 1024, f);
4207 content_read += n;
4208 if (n < 1024)
4209 {
4210 content[content_read] = '\0';
4211 break;
4212 }
4213 }
4214
4215 p = strchr (content, '(');
4216 p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
4217
4218 p = strtok_r (p, " ", &ts);
4219 for (i = 0; i != 36; ++i)
4220 p = strtok_r (NULL, " ", &ts);
4221
4222 if (sscanf (p, "%d", &core) == 0)
4223 core = -1;
4224
4225 free (content);
4226 fclose (f);
4227
4228 return core;
4229}
4230
1570b33e
L
4231static void
4232linux_process_qsupported (const char *query)
4233{
4234 if (the_low_target.process_qsupported != NULL)
4235 the_low_target.process_qsupported (query);
4236}
4237
219f2f23
PA
4238static int
4239linux_supports_tracepoints (void)
4240{
4241 if (*the_low_target.supports_tracepoints == NULL)
4242 return 0;
4243
4244 return (*the_low_target.supports_tracepoints) ();
4245}
4246
4247static CORE_ADDR
4248linux_read_pc (struct regcache *regcache)
4249{
4250 if (the_low_target.get_pc == NULL)
4251 return 0;
4252
4253 return (*the_low_target.get_pc) (regcache);
4254}
4255
4256static void
4257linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4258{
4259 gdb_assert (the_low_target.set_pc != NULL);
4260
4261 (*the_low_target.set_pc) (regcache, pc);
4262}
4263
ce3a066d
DJ
4264static struct target_ops linux_target_ops = {
4265 linux_create_inferior,
4266 linux_attach,
4267 linux_kill,
6ad8ae5c 4268 linux_detach,
444d6139 4269 linux_join,
ce3a066d
DJ
4270 linux_thread_alive,
4271 linux_resume,
4272 linux_wait,
4273 linux_fetch_registers,
4274 linux_store_registers,
4275 linux_read_memory,
4276 linux_write_memory,
2f2893d9 4277 linux_look_up_symbols,
ef57601b 4278 linux_request_interrupt,
aa691b87 4279 linux_read_auxv,
d993e290
PA
4280 linux_insert_point,
4281 linux_remove_point,
e013ee27
OF
4282 linux_stopped_by_watchpoint,
4283 linux_stopped_data_address,
42c81e2a 4284#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437 4285 linux_read_offsets,
dae5f5cf
DJ
4286#else
4287 NULL,
4288#endif
4289#ifdef USE_THREAD_DB
4290 thread_db_get_tls_address,
4291#else
4292 NULL,
52fb6437 4293#endif
efcbbd14 4294 linux_qxfer_spu,
59a016f0 4295 hostio_last_error_from_errno,
07e059b5 4296 linux_qxfer_osdata,
4aa995e1 4297 linux_xfer_siginfo,
bd99dc85
PA
4298 linux_supports_non_stop,
4299 linux_async,
4300 linux_start_non_stop,
cdbfd419
PP
4301 linux_supports_multi_process,
4302#ifdef USE_THREAD_DB
dc146f7c 4303 thread_db_handle_monitor_command,
cdbfd419 4304#else
dc146f7c 4305 NULL,
cdbfd419 4306#endif
1570b33e 4307 linux_core_of_thread,
219f2f23
PA
4308 linux_process_qsupported,
4309 linux_supports_tracepoints,
4310 linux_read_pc,
4311 linux_write_pc
ce3a066d
DJ
4312};
4313
0d62e5e8
DJ
4314static void
4315linux_init_signals ()
4316{
4317 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4318 to find what the cancel signal actually is. */
60c3d7b0 4319#ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 4320 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 4321#endif
0d62e5e8
DJ
4322}
4323
da6d8c04
DJ
4324void
4325initialize_low (void)
4326{
bd99dc85
PA
4327 struct sigaction sigchld_action;
4328 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 4329 set_target_ops (&linux_target_ops);
611cb4a5
DJ
4330 set_breakpoint_data (the_low_target.breakpoint,
4331 the_low_target.breakpoint_len);
0d62e5e8 4332 linux_init_signals ();
24a09b5f 4333 linux_test_for_tracefork ();
52fa2412
UW
4334#ifdef HAVE_LINUX_REGSETS
4335 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
4336 ;
bca929d3 4337 disabled_regsets = xmalloc (num_regsets);
52fa2412 4338#endif
bd99dc85
PA
4339
4340 sigchld_action.sa_handler = sigchld_handler;
4341 sigemptyset (&sigchld_action.sa_mask);
4342 sigchld_action.sa_flags = SA_RESTART;
4343 sigaction (SIGCHLD, &sigchld_action, NULL);
da6d8c04 4344}