]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
2012-03-05 Pedro Alves <palves@redhat.com>
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
0b302171 2 Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
d26e3629 21#include "linux-osdata.h"
58b4daa5 22#include "agent.h"
da6d8c04 23
58caa3dc 24#include <sys/wait.h>
da6d8c04
DJ
25#include <stdio.h>
26#include <sys/param.h>
da6d8c04 27#include <sys/ptrace.h>
af96c192 28#include "linux-ptrace.h"
e3deef73 29#include "linux-procfs.h"
da6d8c04
DJ
30#include <signal.h>
31#include <sys/ioctl.h>
32#include <fcntl.h>
d07c63e7 33#include <string.h>
0a30fbc4
DJ
34#include <stdlib.h>
35#include <unistd.h>
fa6a77dc 36#include <errno.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
efcbbd14
UW
43#include <sys/stat.h>
44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
957f3f49
DE
46#ifndef ELFMAG0
47/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51#include <elf.h>
52#endif
efcbbd14
UW
53
54#ifndef SPUFS_MAGIC
55#define SPUFS_MAGIC 0x23c9b64e
56#endif
da6d8c04 57
03583c20
UW
58#ifdef HAVE_PERSONALITY
59# include <sys/personality.h>
60# if !HAVE_DECL_ADDR_NO_RANDOMIZE
61# define ADDR_NO_RANDOMIZE 0x0040000
62# endif
63#endif
64
fd462a61
DJ
65#ifndef O_LARGEFILE
66#define O_LARGEFILE 0
67#endif
68
ec8ebe72
DE
69#ifndef W_STOPCODE
70#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71#endif
72
1a981360
PA
73/* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75#ifndef __SIGRTMIN
76#define __SIGRTMIN 32
77#endif
78
42c81e2a
DJ
79#ifdef __UCLIBC__
80#if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
81#define HAS_NOMMU
82#endif
83#endif
84
24a09b5f
DJ
85/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
86 representation of the thread ID.
611cb4a5 87
54a0b537 88 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
95954743
PA
89 the same as the LWP ID.
90
91 ``all_processes'' is keyed by the "overall process ID", which
92 GNU/Linux calls tgid, "thread group ID". */
0d62e5e8 93
54a0b537 94struct inferior_list all_lwps;
0d62e5e8 95
05044653
PA
96/* A list of all unknown processes which receive stop signals. Some
97 other process will presumably claim each of these as forked
98 children momentarily. */
24a09b5f 99
05044653
PA
100struct simple_pid_list
101{
102 /* The process ID. */
103 int pid;
104
105 /* The status as reported by waitpid. */
106 int status;
107
108 /* Next in chain. */
109 struct simple_pid_list *next;
110};
111struct simple_pid_list *stopped_pids;
112
113/* Trivial list manipulation functions to keep track of a list of new
114 stopped processes. */
115
116static void
117add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
118{
119 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
120
121 new_pid->pid = pid;
122 new_pid->status = status;
123 new_pid->next = *listp;
124 *listp = new_pid;
125}
126
127static int
128pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
129{
130 struct simple_pid_list **p;
131
132 for (p = listp; *p != NULL; p = &(*p)->next)
133 if ((*p)->pid == pid)
134 {
135 struct simple_pid_list *next = (*p)->next;
136
137 *statusp = (*p)->status;
138 xfree (*p);
139 *p = next;
140 return 1;
141 }
142 return 0;
143}
24a09b5f 144
0d62e5e8
DJ
145/* FIXME this is a bit of a hack, and could be removed. */
146int stopping_threads;
147
148/* FIXME make into a target method? */
24a09b5f 149int using_threads = 1;
24a09b5f 150
fa593d66
PA
151/* True if we're presently stabilizing threads (moving them out of
152 jump pads). */
153static int stabilizing_threads;
154
95954743
PA
155/* This flag is true iff we've just created or attached to our first
156 inferior but it has not stopped yet. As soon as it does, we need
157 to call the low target's arch_setup callback. Doing this only on
158 the first inferior avoids reinializing the architecture on every
159 inferior, and avoids messing with the register caches of the
160 already running inferiors. NOTE: this assumes all inferiors under
161 control of gdbserver have the same architecture. */
d61ddec4
UW
162static int new_inferior;
163
2acc282a 164static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 165 int step, int signal, siginfo_t *info);
2bd7c093 166static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
167static void stop_all_lwps (int suspend, struct lwp_info *except);
168static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
95954743 169static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
95954743 170static void *add_lwp (ptid_t ptid);
c35fafde 171static int linux_stopped_by_watchpoint (void);
95954743 172static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
d50171e4 173static void proceed_all_lwps (void);
d50171e4
PA
174static int finish_step_over (struct lwp_info *lwp);
175static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
176static int kill_lwp (unsigned long lwpid, int signo);
1e7fc18c 177static void linux_enable_event_reporting (int pid);
d50171e4
PA
178
179/* True if the low target can hardware single-step. Such targets
180 don't need a BREAKPOINT_REINSERT_ADDR callback. */
181
182static int
183can_hardware_single_step (void)
184{
185 return (the_low_target.breakpoint_reinsert_addr == NULL);
186}
187
188/* True if the low target supports memory breakpoints. If so, we'll
189 have a GET_PC implementation. */
190
191static int
192supports_breakpoints (void)
193{
194 return (the_low_target.get_pc != NULL);
195}
0d62e5e8 196
fa593d66
PA
197/* Returns true if this target can support fast tracepoints. This
198 does not mean that the in-process agent has been loaded in the
199 inferior. */
200
201static int
202supports_fast_tracepoints (void)
203{
204 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
205}
206
0d62e5e8
DJ
207struct pending_signals
208{
209 int signal;
32ca6d61 210 siginfo_t info;
0d62e5e8
DJ
211 struct pending_signals *prev;
212};
611cb4a5 213
14ce3065
DE
214#define PTRACE_ARG3_TYPE void *
215#define PTRACE_ARG4_TYPE void *
c6ecbae5 216#define PTRACE_XFER_TYPE long
da6d8c04 217
58caa3dc 218#ifdef HAVE_LINUX_REGSETS
52fa2412
UW
219static char *disabled_regsets;
220static int num_regsets;
58caa3dc
DJ
221#endif
222
bd99dc85
PA
223/* The read/write ends of the pipe registered as waitable file in the
224 event loop. */
225static int linux_event_pipe[2] = { -1, -1 };
226
227/* True if we're currently in async mode. */
228#define target_is_async_p() (linux_event_pipe[0] != -1)
229
02fc4de7 230static void send_sigstop (struct lwp_info *lwp);
bd99dc85
PA
231static void wait_for_sigstop (struct inferior_list_entry *entry);
232
d0722149
DE
233/* Return non-zero if HEADER is a 64-bit ELF file. */
234
235static int
957f3f49 236elf_64_header_p (const Elf64_Ehdr *header)
d0722149
DE
237{
238 return (header->e_ident[EI_MAG0] == ELFMAG0
239 && header->e_ident[EI_MAG1] == ELFMAG1
240 && header->e_ident[EI_MAG2] == ELFMAG2
241 && header->e_ident[EI_MAG3] == ELFMAG3
242 && header->e_ident[EI_CLASS] == ELFCLASS64);
243}
244
245/* Return non-zero if FILE is a 64-bit ELF file,
246 zero if the file is not a 64-bit ELF file,
247 and -1 if the file is not accessible or doesn't exist. */
248
be07f1a2 249static int
d0722149
DE
250elf_64_file_p (const char *file)
251{
957f3f49 252 Elf64_Ehdr header;
d0722149
DE
253 int fd;
254
255 fd = open (file, O_RDONLY);
256 if (fd < 0)
257 return -1;
258
259 if (read (fd, &header, sizeof (header)) != sizeof (header))
260 {
261 close (fd);
262 return 0;
263 }
264 close (fd);
265
266 return elf_64_header_p (&header);
267}
268
be07f1a2
PA
269/* Accepts an integer PID; Returns true if the executable PID is
270 running is a 64-bit ELF file.. */
271
272int
273linux_pid_exe_is_elf_64_file (int pid)
274{
275 char file[MAXPATHLEN];
276
277 sprintf (file, "/proc/%d/exe", pid);
278 return elf_64_file_p (file);
279}
280
bd99dc85
PA
281static void
282delete_lwp (struct lwp_info *lwp)
283{
284 remove_thread (get_lwp_thread (lwp));
285 remove_inferior (&all_lwps, &lwp->head);
aa5ca48f 286 free (lwp->arch_private);
bd99dc85
PA
287 free (lwp);
288}
289
95954743
PA
290/* Add a process to the common process list, and set its private
291 data. */
292
293static struct process_info *
294linux_add_process (int pid, int attached)
295{
296 struct process_info *proc;
297
298 /* Is this the first process? If so, then set the arch. */
299 if (all_processes.head == NULL)
300 new_inferior = 1;
301
302 proc = add_process (pid, attached);
303 proc->private = xcalloc (1, sizeof (*proc->private));
304
aa5ca48f
DE
305 if (the_low_target.new_process != NULL)
306 proc->private->arch_private = the_low_target.new_process ();
307
95954743
PA
308 return proc;
309}
310
07d4f67e
DE
311/* Wrapper function for waitpid which handles EINTR, and emulates
312 __WALL for systems where that is not available. */
313
314static int
315my_waitpid (int pid, int *status, int flags)
316{
317 int ret, out_errno;
318
319 if (debug_threads)
320 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
321
322 if (flags & __WALL)
323 {
324 sigset_t block_mask, org_mask, wake_mask;
325 int wnohang;
326
327 wnohang = (flags & WNOHANG) != 0;
328 flags &= ~(__WALL | __WCLONE);
329 flags |= WNOHANG;
330
331 /* Block all signals while here. This avoids knowing about
332 LinuxThread's signals. */
333 sigfillset (&block_mask);
334 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
335
336 /* ... except during the sigsuspend below. */
337 sigemptyset (&wake_mask);
338
339 while (1)
340 {
341 /* Since all signals are blocked, there's no need to check
342 for EINTR here. */
343 ret = waitpid (pid, status, flags);
344 out_errno = errno;
345
346 if (ret == -1 && out_errno != ECHILD)
347 break;
348 else if (ret > 0)
349 break;
350
351 if (flags & __WCLONE)
352 {
353 /* We've tried both flavors now. If WNOHANG is set,
354 there's nothing else to do, just bail out. */
355 if (wnohang)
356 break;
357
358 if (debug_threads)
359 fprintf (stderr, "blocking\n");
360
361 /* Block waiting for signals. */
362 sigsuspend (&wake_mask);
363 }
364
365 flags ^= __WCLONE;
366 }
367
368 sigprocmask (SIG_SETMASK, &org_mask, NULL);
369 }
370 else
371 {
372 do
373 ret = waitpid (pid, status, flags);
374 while (ret == -1 && errno == EINTR);
375 out_errno = errno;
376 }
377
378 if (debug_threads)
379 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
380 pid, flags, status ? *status : -1, ret);
381
382 errno = out_errno;
383 return ret;
384}
385
bd99dc85
PA
386/* Handle a GNU/Linux extended wait response. If we see a clone
387 event, we need to add the new LWP to our list (and not report the
388 trap to higher layers). */
0d62e5e8 389
24a09b5f 390static void
54a0b537 391handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f
DJ
392{
393 int event = wstat >> 16;
54a0b537 394 struct lwp_info *new_lwp;
24a09b5f
DJ
395
396 if (event == PTRACE_EVENT_CLONE)
397 {
95954743 398 ptid_t ptid;
24a09b5f 399 unsigned long new_pid;
05044653 400 int ret, status;
24a09b5f 401
bd99dc85 402 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
24a09b5f
DJ
403
404 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 405 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
406 {
407 /* The new child has a pending SIGSTOP. We can't affect it until it
408 hits the SIGSTOP, but we're already attached. */
409
97438e3f 410 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
411
412 if (ret == -1)
413 perror_with_name ("waiting for new child");
414 else if (ret != new_pid)
415 warning ("wait returned unexpected PID %d", ret);
da5898ce 416 else if (!WIFSTOPPED (status))
24a09b5f
DJ
417 warning ("wait returned unexpected status 0x%x", status);
418 }
419
1e7fc18c 420 linux_enable_event_reporting (new_pid);
24a09b5f 421
95954743
PA
422 ptid = ptid_build (pid_of (event_child), new_pid, 0);
423 new_lwp = (struct lwp_info *) add_lwp (ptid);
424 add_thread (ptid, new_lwp);
24a09b5f 425
e27d73f6
DE
426 /* Either we're going to immediately resume the new thread
427 or leave it stopped. linux_resume_one_lwp is a nop if it
428 thinks the thread is currently running, so set this first
429 before calling linux_resume_one_lwp. */
430 new_lwp->stopped = 1;
431
da5898ce
DJ
432 /* Normally we will get the pending SIGSTOP. But in some cases
433 we might get another signal delivered to the group first.
f21cc1a2 434 If we do get another signal, be sure not to lose it. */
da5898ce
DJ
435 if (WSTOPSIG (status) == SIGSTOP)
436 {
d50171e4
PA
437 if (stopping_threads)
438 new_lwp->stop_pc = get_stop_pc (new_lwp);
439 else
e27d73f6 440 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
da5898ce 441 }
24a09b5f 442 else
da5898ce 443 {
54a0b537 444 new_lwp->stop_expected = 1;
d50171e4 445
da5898ce
DJ
446 if (stopping_threads)
447 {
d50171e4 448 new_lwp->stop_pc = get_stop_pc (new_lwp);
54a0b537
PA
449 new_lwp->status_pending_p = 1;
450 new_lwp->status_pending = status;
da5898ce
DJ
451 }
452 else
453 /* Pass the signal on. This is what GDB does - except
454 shouldn't we really report it instead? */
e27d73f6 455 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
da5898ce 456 }
24a09b5f
DJ
457
458 /* Always resume the current thread. If we are stopping
459 threads, it will have a pending SIGSTOP; we may as well
460 collect it now. */
2acc282a 461 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
24a09b5f
DJ
462 }
463}
464
d50171e4
PA
465/* Return the PC as read from the regcache of LWP, without any
466 adjustment. */
467
468static CORE_ADDR
469get_pc (struct lwp_info *lwp)
470{
471 struct thread_info *saved_inferior;
472 struct regcache *regcache;
473 CORE_ADDR pc;
474
475 if (the_low_target.get_pc == NULL)
476 return 0;
477
478 saved_inferior = current_inferior;
479 current_inferior = get_lwp_thread (lwp);
480
481 regcache = get_thread_regcache (current_inferior, 1);
482 pc = (*the_low_target.get_pc) (regcache);
483
484 if (debug_threads)
485 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
486
487 current_inferior = saved_inferior;
488 return pc;
489}
490
491/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
492 The SIGTRAP could mean several things.
493
494 On i386, where decr_pc_after_break is non-zero:
495 If we were single-stepping this process using PTRACE_SINGLESTEP,
496 we will get only the one SIGTRAP (even if the instruction we
497 stepped over was a breakpoint). The value of $eip will be the
498 next instruction.
499 If we continue the process using PTRACE_CONT, we will get a
500 SIGTRAP when we hit a breakpoint. The value of $eip will be
501 the instruction after the breakpoint (i.e. needs to be
502 decremented). If we report the SIGTRAP to GDB, we must also
503 report the undecremented PC. If we cancel the SIGTRAP, we
504 must resume at the decremented PC.
505
506 (Presumably, not yet tested) On a non-decr_pc_after_break machine
507 with hardware or kernel single-step:
508 If we single-step over a breakpoint instruction, our PC will
509 point at the following instruction. If we continue and hit a
510 breakpoint instruction, our PC will point at the breakpoint
511 instruction. */
512
513static CORE_ADDR
d50171e4 514get_stop_pc (struct lwp_info *lwp)
0d62e5e8 515{
d50171e4
PA
516 CORE_ADDR stop_pc;
517
518 if (the_low_target.get_pc == NULL)
519 return 0;
0d62e5e8 520
d50171e4
PA
521 stop_pc = get_pc (lwp);
522
bdabb078
PA
523 if (WSTOPSIG (lwp->last_status) == SIGTRAP
524 && !lwp->stepping
525 && !lwp->stopped_by_watchpoint
526 && lwp->last_status >> 16 == 0)
47c0c975
DE
527 stop_pc -= the_low_target.decr_pc_after_break;
528
529 if (debug_threads)
530 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
531
532 return stop_pc;
0d62e5e8 533}
ce3a066d 534
0d62e5e8 535static void *
95954743 536add_lwp (ptid_t ptid)
611cb4a5 537{
54a0b537 538 struct lwp_info *lwp;
0d62e5e8 539
54a0b537
PA
540 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
541 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 542
95954743 543 lwp->head.id = ptid;
0d62e5e8 544
aa5ca48f
DE
545 if (the_low_target.new_thread != NULL)
546 lwp->arch_private = the_low_target.new_thread ();
547
54a0b537 548 add_inferior_to_list (&all_lwps, &lwp->head);
0d62e5e8 549
54a0b537 550 return lwp;
0d62e5e8 551}
611cb4a5 552
da6d8c04
DJ
553/* Start an inferior process and returns its pid.
554 ALLARGS is a vector of program-name and args. */
555
ce3a066d
DJ
556static int
557linux_create_inferior (char *program, char **allargs)
da6d8c04 558{
03583c20
UW
559#ifdef HAVE_PERSONALITY
560 int personality_orig = 0, personality_set = 0;
561#endif
a6dbe5df 562 struct lwp_info *new_lwp;
da6d8c04 563 int pid;
95954743 564 ptid_t ptid;
da6d8c04 565
03583c20
UW
566#ifdef HAVE_PERSONALITY
567 if (disable_randomization)
568 {
569 errno = 0;
570 personality_orig = personality (0xffffffff);
571 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
572 {
573 personality_set = 1;
574 personality (personality_orig | ADDR_NO_RANDOMIZE);
575 }
576 if (errno != 0 || (personality_set
577 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
578 warning ("Error disabling address space randomization: %s",
579 strerror (errno));
580 }
581#endif
582
42c81e2a 583#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
584 pid = vfork ();
585#else
da6d8c04 586 pid = fork ();
52fb6437 587#endif
da6d8c04
DJ
588 if (pid < 0)
589 perror_with_name ("fork");
590
591 if (pid == 0)
592 {
593 ptrace (PTRACE_TRACEME, 0, 0, 0);
594
1a981360 595#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 596 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 597#endif
0d62e5e8 598
a9fa9f7d
DJ
599 setpgid (0, 0);
600
e0f9f062
DE
601 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
602 stdout to stderr so that inferior i/o doesn't corrupt the connection.
603 Also, redirect stdin to /dev/null. */
604 if (remote_connection_is_stdio ())
605 {
606 close (0);
607 open ("/dev/null", O_RDONLY);
608 dup2 (2, 1);
3e52c33d
JK
609 if (write (2, "stdin/stdout redirected\n",
610 sizeof ("stdin/stdout redirected\n") - 1) < 0)
611 /* Errors ignored. */;
e0f9f062
DE
612 }
613
2b876972
DJ
614 execv (program, allargs);
615 if (errno == ENOENT)
616 execvp (program, allargs);
da6d8c04
DJ
617
618 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 619 strerror (errno));
da6d8c04
DJ
620 fflush (stderr);
621 _exit (0177);
622 }
623
03583c20
UW
624#ifdef HAVE_PERSONALITY
625 if (personality_set)
626 {
627 errno = 0;
628 personality (personality_orig);
629 if (errno != 0)
630 warning ("Error restoring address space randomization: %s",
631 strerror (errno));
632 }
633#endif
634
95954743
PA
635 linux_add_process (pid, 0);
636
637 ptid = ptid_build (pid, pid, 0);
638 new_lwp = add_lwp (ptid);
639 add_thread (ptid, new_lwp);
a6dbe5df 640 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 641
a9fa9f7d 642 return pid;
da6d8c04
DJ
643}
644
645/* Attach to an inferior process. */
646
95954743
PA
647static void
648linux_attach_lwp_1 (unsigned long lwpid, int initial)
da6d8c04 649{
95954743 650 ptid_t ptid;
54a0b537 651 struct lwp_info *new_lwp;
611cb4a5 652
95954743 653 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
da6d8c04 654 {
95954743 655 if (!initial)
2d717e4f
DJ
656 {
657 /* If we fail to attach to an LWP, just warn. */
95954743 658 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
2d717e4f
DJ
659 strerror (errno), errno);
660 fflush (stderr);
661 return;
662 }
663 else
664 /* If we fail to attach to a process, report an error. */
95954743 665 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
43d5792c 666 strerror (errno), errno);
da6d8c04
DJ
667 }
668
95954743 669 if (initial)
e3deef73
LM
670 /* If lwp is the tgid, we handle adding existing threads later.
671 Otherwise we just add lwp without bothering about any other
672 threads. */
95954743
PA
673 ptid = ptid_build (lwpid, lwpid, 0);
674 else
675 {
676 /* Note that extracting the pid from the current inferior is
677 safe, since we're always called in the context of the same
678 process as this new thread. */
679 int pid = pid_of (get_thread_lwp (current_inferior));
680 ptid = ptid_build (pid, lwpid, 0);
681 }
24a09b5f 682
95954743
PA
683 new_lwp = (struct lwp_info *) add_lwp (ptid);
684 add_thread (ptid, new_lwp);
0d62e5e8 685
a6dbe5df
PA
686 /* We need to wait for SIGSTOP before being able to make the next
687 ptrace call on this LWP. */
688 new_lwp->must_set_ptrace_flags = 1;
689
644cebc9 690 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
691 {
692 if (debug_threads)
693 fprintf (stderr,
694 "Attached to a stopped process\n");
695
696 /* The process is definitely stopped. It is in a job control
697 stop, unless the kernel predates the TASK_STOPPED /
698 TASK_TRACED distinction, in which case it might be in a
699 ptrace stop. Make sure it is in a ptrace stop; from there we
700 can kill it, signal it, et cetera.
701
702 First make sure there is a pending SIGSTOP. Since we are
703 already attached, the process can not transition from stopped
704 to running without a PTRACE_CONT; so we know this signal will
705 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
706 probably already in the queue (unless this kernel is old
707 enough to use TASK_STOPPED for ptrace stops); but since
708 SIGSTOP is not an RT signal, it can only be queued once. */
709 kill_lwp (lwpid, SIGSTOP);
710
711 /* Finally, resume the stopped process. This will deliver the
712 SIGSTOP (or a higher priority signal, just like normal
713 PTRACE_ATTACH), which we'll catch later on. */
714 ptrace (PTRACE_CONT, lwpid, 0, 0);
715 }
716
0d62e5e8 717 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
718 brings it to a halt.
719
720 There are several cases to consider here:
721
722 1) gdbserver has already attached to the process and is being notified
1b3f6016 723 of a new thread that is being created.
d50171e4
PA
724 In this case we should ignore that SIGSTOP and resume the
725 process. This is handled below by setting stop_expected = 1,
8336d594 726 and the fact that add_thread sets last_resume_kind ==
d50171e4 727 resume_continue.
0e21c1ec
DE
728
729 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
730 to it via attach_inferior.
731 In this case we want the process thread to stop.
d50171e4
PA
732 This is handled by having linux_attach set last_resume_kind ==
733 resume_stop after we return.
e3deef73
LM
734
735 If the pid we are attaching to is also the tgid, we attach to and
736 stop all the existing threads. Otherwise, we attach to pid and
737 ignore any other threads in the same group as this pid.
0e21c1ec
DE
738
739 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
740 existing threads.
741 In this case we want the thread to stop.
742 FIXME: This case is currently not properly handled.
743 We should wait for the SIGSTOP but don't. Things work apparently
744 because enough time passes between when we ptrace (ATTACH) and when
745 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
746
747 On the other hand, if we are currently trying to stop all threads, we
748 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 749 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
750 end of the list, and so the new thread has not yet reached
751 wait_for_sigstop (but will). */
d50171e4 752 new_lwp->stop_expected = 1;
0d62e5e8
DJ
753}
754
95954743
PA
755void
756linux_attach_lwp (unsigned long lwpid)
757{
758 linux_attach_lwp_1 (lwpid, 0);
759}
760
e3deef73
LM
761/* Attach to PID. If PID is the tgid, attach to it and all
762 of its threads. */
763
0d62e5e8 764int
a1928bad 765linux_attach (unsigned long pid)
0d62e5e8 766{
e3deef73
LM
767 /* Attach to PID. We will check for other threads
768 soon. */
95954743 769 linux_attach_lwp_1 (pid, 1);
95954743 770 linux_add_process (pid, 1);
0d62e5e8 771
bd99dc85
PA
772 if (!non_stop)
773 {
8336d594
PA
774 struct thread_info *thread;
775
776 /* Don't ignore the initial SIGSTOP if we just attached to this
777 process. It will be collected by wait shortly. */
778 thread = find_thread_ptid (ptid_build (pid, pid, 0));
779 thread->last_resume_kind = resume_stop;
bd99dc85 780 }
0d62e5e8 781
e3deef73
LM
782 if (linux_proc_get_tgid (pid) == pid)
783 {
784 DIR *dir;
785 char pathname[128];
786
787 sprintf (pathname, "/proc/%ld/task", pid);
788
789 dir = opendir (pathname);
790
791 if (!dir)
792 {
793 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
794 fflush (stderr);
795 }
796 else
797 {
798 /* At this point we attached to the tgid. Scan the task for
799 existing threads. */
800 unsigned long lwp;
801 int new_threads_found;
802 int iterations = 0;
803 struct dirent *dp;
804
805 while (iterations < 2)
806 {
807 new_threads_found = 0;
808 /* Add all the other threads. While we go through the
809 threads, new threads may be spawned. Cycle through
810 the list of threads until we have done two iterations without
811 finding new threads. */
812 while ((dp = readdir (dir)) != NULL)
813 {
814 /* Fetch one lwp. */
815 lwp = strtoul (dp->d_name, NULL, 10);
816
817 /* Is this a new thread? */
818 if (lwp
819 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
820 {
821 linux_attach_lwp_1 (lwp, 0);
822 new_threads_found++;
823
824 if (debug_threads)
825 fprintf (stderr, "\
826Found and attached to new lwp %ld\n", lwp);
827 }
828 }
829
830 if (!new_threads_found)
831 iterations++;
832 else
833 iterations = 0;
834
835 rewinddir (dir);
836 }
837 closedir (dir);
838 }
839 }
840
95954743
PA
841 return 0;
842}
843
844struct counter
845{
846 int pid;
847 int count;
848};
849
850static int
851second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
852{
853 struct counter *counter = args;
854
855 if (ptid_get_pid (entry->id) == counter->pid)
856 {
857 if (++counter->count > 1)
858 return 1;
859 }
d61ddec4 860
da6d8c04
DJ
861 return 0;
862}
863
95954743
PA
864static int
865last_thread_of_process_p (struct thread_info *thread)
866{
867 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
868 int pid = ptid_get_pid (ptid);
869 struct counter counter = { pid , 0 };
da6d8c04 870
95954743
PA
871 return (find_inferior (&all_threads,
872 second_thread_of_pid_p, &counter) == NULL);
873}
874
da84f473
PA
875/* Kill LWP. */
876
877static void
878linux_kill_one_lwp (struct lwp_info *lwp)
879{
880 int pid = lwpid_of (lwp);
881
882 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
883 there is no signal context, and ptrace(PTRACE_KILL) (or
884 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
885 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
886 alternative is to kill with SIGKILL. We only need one SIGKILL
887 per process, not one for each thread. But since we still support
888 linuxthreads, and we also support debugging programs using raw
889 clone without CLONE_THREAD, we send one for each thread. For
890 years, we used PTRACE_KILL only, so we're being a bit paranoid
891 about some old kernels where PTRACE_KILL might work better
892 (dubious if there are any such, but that's why it's paranoia), so
893 we try SIGKILL first, PTRACE_KILL second, and so we're fine
894 everywhere. */
895
896 errno = 0;
897 kill (pid, SIGKILL);
898 if (debug_threads)
899 fprintf (stderr,
900 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
901 target_pid_to_str (ptid_of (lwp)),
902 errno ? strerror (errno) : "OK");
903
904 errno = 0;
905 ptrace (PTRACE_KILL, pid, 0, 0);
906 if (debug_threads)
907 fprintf (stderr,
908 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
909 target_pid_to_str (ptid_of (lwp)),
910 errno ? strerror (errno) : "OK");
911}
912
913/* Callback for `find_inferior'. Kills an lwp of a given process,
914 except the leader. */
95954743
PA
915
916static int
da84f473 917kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
da6d8c04 918{
0d62e5e8 919 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 920 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 921 int wstat;
95954743
PA
922 int pid = * (int *) args;
923
924 if (ptid_get_pid (entry->id) != pid)
925 return 0;
0d62e5e8 926
fd500816
DJ
927 /* We avoid killing the first thread here, because of a Linux kernel (at
928 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
929 the children get a chance to be reaped, it will remain a zombie
930 forever. */
95954743 931
12b42a12 932 if (lwpid_of (lwp) == pid)
95954743
PA
933 {
934 if (debug_threads)
935 fprintf (stderr, "lkop: is last of process %s\n",
936 target_pid_to_str (entry->id));
937 return 0;
938 }
fd500816 939
0d62e5e8
DJ
940 do
941 {
da84f473 942 linux_kill_one_lwp (lwp);
0d62e5e8
DJ
943
944 /* Make sure it died. The loop is most likely unnecessary. */
95954743 945 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
bd99dc85 946 } while (pid > 0 && WIFSTOPPED (wstat));
95954743
PA
947
948 return 0;
da6d8c04
DJ
949}
950
95954743
PA
951static int
952linux_kill (int pid)
0d62e5e8 953{
95954743 954 struct process_info *process;
54a0b537 955 struct lwp_info *lwp;
fd500816 956 int wstat;
95954743 957 int lwpid;
fd500816 958
95954743
PA
959 process = find_process_pid (pid);
960 if (process == NULL)
961 return -1;
9d606399 962
f9e39928
PA
963 /* If we're killing a running inferior, make sure it is stopped
964 first, as PTRACE_KILL will not work otherwise. */
7984d532 965 stop_all_lwps (0, NULL);
f9e39928 966
da84f473 967 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
fd500816 968
54a0b537 969 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 970 thread in the list, so do so now. */
95954743 971 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 972
784867a5 973 if (lwp == NULL)
fd500816 974 {
784867a5
JK
975 if (debug_threads)
976 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
977 lwpid_of (lwp), pid);
978 }
979 else
980 {
981 if (debug_threads)
982 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
983 lwpid_of (lwp), pid);
fd500816 984
784867a5
JK
985 do
986 {
da84f473 987 linux_kill_one_lwp (lwp);
784867a5
JK
988
989 /* Make sure it died. The loop is most likely unnecessary. */
990 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
991 } while (lwpid > 0 && WIFSTOPPED (wstat));
992 }
2d717e4f 993
8336d594 994 the_target->mourn (process);
f9e39928
PA
995
996 /* Since we presently can only stop all lwps of all processes, we
997 need to unstop lwps of other processes. */
7984d532 998 unstop_all_lwps (0, NULL);
95954743 999 return 0;
0d62e5e8
DJ
1000}
1001
95954743
PA
1002static int
1003linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
1004{
1005 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1006 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
1007 int pid = * (int *) args;
1008
1009 if (ptid_get_pid (entry->id) != pid)
1010 return 0;
6ad8ae5c 1011
ae13219e
DJ
1012 /* If this process is stopped but is expecting a SIGSTOP, then make
1013 sure we take care of that now. This isn't absolutely guaranteed
1014 to collect the SIGSTOP, but is fairly likely to. */
54a0b537 1015 if (lwp->stop_expected)
ae13219e 1016 {
bd99dc85 1017 int wstat;
ae13219e 1018 /* Clear stop_expected, so that the SIGSTOP will be reported. */
54a0b537 1019 lwp->stop_expected = 0;
f9e39928 1020 linux_resume_one_lwp (lwp, 0, 0, NULL);
95954743 1021 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
ae13219e
DJ
1022 }
1023
1024 /* Flush any pending changes to the process's registers. */
1025 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 1026 get_lwp_thread (lwp));
ae13219e
DJ
1027
1028 /* Finally, let it resume. */
82bfbe7e
PA
1029 if (the_low_target.prepare_to_resume != NULL)
1030 the_low_target.prepare_to_resume (lwp);
bd99dc85
PA
1031 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
1032
1033 delete_lwp (lwp);
95954743 1034 return 0;
6ad8ae5c
DJ
1035}
1036
95954743
PA
1037static int
1038linux_detach (int pid)
1039{
1040 struct process_info *process;
1041
1042 process = find_process_pid (pid);
1043 if (process == NULL)
1044 return -1;
1045
f9e39928
PA
1046 /* Stop all threads before detaching. First, ptrace requires that
1047 the thread is stopped to sucessfully detach. Second, thread_db
1048 may need to uninstall thread event breakpoints from memory, which
1049 only works with a stopped process anyway. */
7984d532 1050 stop_all_lwps (0, NULL);
f9e39928 1051
ca5c370d 1052#ifdef USE_THREAD_DB
8336d594 1053 thread_db_detach (process);
ca5c370d
PA
1054#endif
1055
fa593d66
PA
1056 /* Stabilize threads (move out of jump pads). */
1057 stabilize_threads ();
1058
95954743 1059 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
1060
1061 the_target->mourn (process);
f9e39928
PA
1062
1063 /* Since we presently can only stop all lwps of all processes, we
1064 need to unstop lwps of other processes. */
7984d532 1065 unstop_all_lwps (0, NULL);
f9e39928
PA
1066 return 0;
1067}
1068
1069/* Remove all LWPs that belong to process PROC from the lwp list. */
1070
1071static int
1072delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1073{
1074 struct lwp_info *lwp = (struct lwp_info *) entry;
1075 struct process_info *process = proc;
1076
1077 if (pid_of (lwp) == pid_of (process))
1078 delete_lwp (lwp);
1079
dd6953e1 1080 return 0;
6ad8ae5c
DJ
1081}
1082
8336d594
PA
1083static void
1084linux_mourn (struct process_info *process)
1085{
1086 struct process_info_private *priv;
1087
1088#ifdef USE_THREAD_DB
1089 thread_db_mourn (process);
1090#endif
1091
f9e39928
PA
1092 find_inferior (&all_lwps, delete_lwp_callback, process);
1093
8336d594
PA
1094 /* Freeing all private data. */
1095 priv = process->private;
1096 free (priv->arch_private);
1097 free (priv);
1098 process->private = NULL;
505106cd
PA
1099
1100 remove_process (process);
8336d594
PA
1101}
1102
444d6139 1103static void
95954743 1104linux_join (int pid)
444d6139 1105{
444d6139
PA
1106 int status, ret;
1107
1108 do {
95954743 1109 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1110 if (WIFEXITED (status) || WIFSIGNALED (status))
1111 break;
1112 } while (ret != -1 || errno != ECHILD);
1113}
1114
6ad8ae5c 1115/* Return nonzero if the given thread is still alive. */
0d62e5e8 1116static int
95954743 1117linux_thread_alive (ptid_t ptid)
0d62e5e8 1118{
95954743
PA
1119 struct lwp_info *lwp = find_lwp_pid (ptid);
1120
1121 /* We assume we always know if a thread exits. If a whole process
1122 exited but we still haven't been able to report it to GDB, we'll
1123 hold on to the last lwp of the dead process. */
1124 if (lwp != NULL)
1125 return !lwp->dead;
0d62e5e8
DJ
1126 else
1127 return 0;
1128}
1129
6bf5e0ba 1130/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1131static int
d50171e4 1132status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1133{
54a0b537 1134 struct lwp_info *lwp = (struct lwp_info *) entry;
95954743 1135 ptid_t ptid = * (ptid_t *) arg;
7984d532 1136 struct thread_info *thread;
95954743
PA
1137
1138 /* Check if we're only interested in events from a specific process
1139 or its lwps. */
1140 if (!ptid_equal (minus_one_ptid, ptid)
1141 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1142 return 0;
0d62e5e8 1143
d50171e4
PA
1144 thread = get_lwp_thread (lwp);
1145
1146 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1147 report any status pending the LWP may have. */
8336d594 1148 if (thread->last_resume_kind == resume_stop
7984d532 1149 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4 1150 return 0;
0d62e5e8 1151
d50171e4 1152 return lwp->status_pending_p;
0d62e5e8
DJ
1153}
1154
95954743
PA
1155static int
1156same_lwp (struct inferior_list_entry *entry, void *data)
1157{
1158 ptid_t ptid = *(ptid_t *) data;
1159 int lwp;
1160
1161 if (ptid_get_lwp (ptid) != 0)
1162 lwp = ptid_get_lwp (ptid);
1163 else
1164 lwp = ptid_get_pid (ptid);
1165
1166 if (ptid_get_lwp (entry->id) == lwp)
1167 return 1;
1168
1169 return 0;
1170}
1171
1172struct lwp_info *
1173find_lwp_pid (ptid_t ptid)
1174{
1175 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1176}
1177
bd99dc85 1178static struct lwp_info *
95954743 1179linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
611cb4a5 1180{
0d62e5e8 1181 int ret;
95954743 1182 int to_wait_for = -1;
bd99dc85 1183 struct lwp_info *child = NULL;
0d62e5e8 1184
bd99dc85 1185 if (debug_threads)
95954743
PA
1186 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1187
1188 if (ptid_equal (ptid, minus_one_ptid))
1189 to_wait_for = -1; /* any child */
1190 else
1191 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
0d62e5e8 1192
bd99dc85 1193 options |= __WALL;
0d62e5e8 1194
bd99dc85 1195retry:
0d62e5e8 1196
bd99dc85
PA
1197 ret = my_waitpid (to_wait_for, wstatp, options);
1198 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1199 return NULL;
1200 else if (ret == -1)
1201 perror_with_name ("waitpid");
0d62e5e8
DJ
1202
1203 if (debug_threads
1204 && (!WIFSTOPPED (*wstatp)
1205 || (WSTOPSIG (*wstatp) != 32
1206 && WSTOPSIG (*wstatp) != 33)))
1207 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1208
95954743 1209 child = find_lwp_pid (pid_to_ptid (ret));
0d62e5e8 1210
24a09b5f
DJ
1211 /* If we didn't find a process, one of two things presumably happened:
1212 - A process we started and then detached from has exited. Ignore it.
1213 - A process we are controlling has forked and the new child's stop
1214 was reported to us by the kernel. Save its PID. */
bd99dc85 1215 if (child == NULL && WIFSTOPPED (*wstatp))
24a09b5f 1216 {
05044653 1217 add_to_pid_list (&stopped_pids, ret, *wstatp);
24a09b5f
DJ
1218 goto retry;
1219 }
bd99dc85 1220 else if (child == NULL)
24a09b5f
DJ
1221 goto retry;
1222
bd99dc85 1223 child->stopped = 1;
0d62e5e8 1224
bd99dc85 1225 child->last_status = *wstatp;
32ca6d61 1226
d61ddec4
UW
1227 /* Architecture-specific setup after inferior is running.
1228 This needs to happen after we have attached to the inferior
1229 and it is stopped for the first time, but before we access
1230 any inferior registers. */
1231 if (new_inferior)
1232 {
1233 the_low_target.arch_setup ();
52fa2412
UW
1234#ifdef HAVE_LINUX_REGSETS
1235 memset (disabled_regsets, 0, num_regsets);
1236#endif
d61ddec4
UW
1237 new_inferior = 0;
1238 }
1239
c3adc08c
PA
1240 /* Fetch the possibly triggered data watchpoint info and store it in
1241 CHILD.
1242
1243 On some archs, like x86, that use debug registers to set
1244 watchpoints, it's possible that the way to know which watched
1245 address trapped, is to check the register that is used to select
1246 which address to watch. Problem is, between setting the
1247 watchpoint and reading back which data address trapped, the user
1248 may change the set of watchpoints, and, as a consequence, GDB
1249 changes the debug registers in the inferior. To avoid reading
1250 back a stale stopped-data-address when that happens, we cache in
1251 LP the fact that a watchpoint trapped, and the corresponding data
1252 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1253 changes the debug registers meanwhile, we have the cached data we
1254 can rely on. */
1255
1256 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1257 {
1258 if (the_low_target.stopped_by_watchpoint == NULL)
1259 {
1260 child->stopped_by_watchpoint = 0;
1261 }
1262 else
1263 {
1264 struct thread_info *saved_inferior;
1265
1266 saved_inferior = current_inferior;
1267 current_inferior = get_lwp_thread (child);
1268
1269 child->stopped_by_watchpoint
1270 = the_low_target.stopped_by_watchpoint ();
1271
1272 if (child->stopped_by_watchpoint)
1273 {
1274 if (the_low_target.stopped_data_address != NULL)
1275 child->stopped_data_address
1276 = the_low_target.stopped_data_address ();
1277 else
1278 child->stopped_data_address = 0;
1279 }
1280
1281 current_inferior = saved_inferior;
1282 }
1283 }
1284
d50171e4
PA
1285 /* Store the STOP_PC, with adjustment applied. This depends on the
1286 architecture being defined already (so that CHILD has a valid
1287 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1288 not). */
1289 if (WIFSTOPPED (*wstatp))
1290 child->stop_pc = get_stop_pc (child);
1291
0d62e5e8 1292 if (debug_threads
47c0c975
DE
1293 && WIFSTOPPED (*wstatp)
1294 && the_low_target.get_pc != NULL)
0d62e5e8 1295 {
896c7fbb 1296 struct thread_info *saved_inferior = current_inferior;
bce522a2 1297 struct regcache *regcache;
47c0c975
DE
1298 CORE_ADDR pc;
1299
d50171e4 1300 current_inferior = get_lwp_thread (child);
bce522a2 1301 regcache = get_thread_regcache (current_inferior, 1);
442ea881 1302 pc = (*the_low_target.get_pc) (regcache);
47c0c975 1303 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
896c7fbb 1304 current_inferior = saved_inferior;
0d62e5e8 1305 }
bd99dc85
PA
1306
1307 return child;
0d62e5e8 1308}
611cb4a5 1309
219f2f23
PA
1310/* This function should only be called if the LWP got a SIGTRAP.
1311
1312 Handle any tracepoint steps or hits. Return true if a tracepoint
1313 event was handled, 0 otherwise. */
1314
1315static int
1316handle_tracepoints (struct lwp_info *lwp)
1317{
1318 struct thread_info *tinfo = get_lwp_thread (lwp);
1319 int tpoint_related_event = 0;
1320
7984d532
PA
1321 /* If this tracepoint hit causes a tracing stop, we'll immediately
1322 uninsert tracepoints. To do this, we temporarily pause all
1323 threads, unpatch away, and then unpause threads. We need to make
1324 sure the unpausing doesn't resume LWP too. */
1325 lwp->suspended++;
1326
219f2f23
PA
1327 /* And we need to be sure that any all-threads-stopping doesn't try
1328 to move threads out of the jump pads, as it could deadlock the
1329 inferior (LWP could be in the jump pad, maybe even holding the
1330 lock.) */
1331
1332 /* Do any necessary step collect actions. */
1333 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1334
fa593d66
PA
1335 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1336
219f2f23
PA
1337 /* See if we just hit a tracepoint and do its main collect
1338 actions. */
1339 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1340
7984d532
PA
1341 lwp->suspended--;
1342
1343 gdb_assert (lwp->suspended == 0);
fa593d66 1344 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1345
219f2f23
PA
1346 if (tpoint_related_event)
1347 {
1348 if (debug_threads)
1349 fprintf (stderr, "got a tracepoint event\n");
1350 return 1;
1351 }
1352
1353 return 0;
1354}
1355
fa593d66
PA
1356/* Convenience wrapper. Returns true if LWP is presently collecting a
1357 fast tracepoint. */
1358
1359static int
1360linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1361 struct fast_tpoint_collect_status *status)
1362{
1363 CORE_ADDR thread_area;
1364
1365 if (the_low_target.get_thread_area == NULL)
1366 return 0;
1367
1368 /* Get the thread area address. This is used to recognize which
1369 thread is which when tracing with the in-process agent library.
1370 We don't read anything from the address, and treat it as opaque;
1371 it's the address itself that we assume is unique per-thread. */
1372 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1373 return 0;
1374
1375 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1376}
1377
1378/* The reason we resume in the caller, is because we want to be able
1379 to pass lwp->status_pending as WSTAT, and we need to clear
1380 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1381 refuses to resume. */
1382
1383static int
1384maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1385{
1386 struct thread_info *saved_inferior;
1387
1388 saved_inferior = current_inferior;
1389 current_inferior = get_lwp_thread (lwp);
1390
1391 if ((wstat == NULL
1392 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1393 && supports_fast_tracepoints ()
58b4daa5 1394 && agent_loaded_p ())
fa593d66
PA
1395 {
1396 struct fast_tpoint_collect_status status;
1397 int r;
1398
1399 if (debug_threads)
1400 fprintf (stderr, "\
1401Checking whether LWP %ld needs to move out of the jump pad.\n",
1402 lwpid_of (lwp));
1403
1404 r = linux_fast_tracepoint_collecting (lwp, &status);
1405
1406 if (wstat == NULL
1407 || (WSTOPSIG (*wstat) != SIGILL
1408 && WSTOPSIG (*wstat) != SIGFPE
1409 && WSTOPSIG (*wstat) != SIGSEGV
1410 && WSTOPSIG (*wstat) != SIGBUS))
1411 {
1412 lwp->collecting_fast_tracepoint = r;
1413
1414 if (r != 0)
1415 {
1416 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1417 {
1418 /* Haven't executed the original instruction yet.
1419 Set breakpoint there, and wait till it's hit,
1420 then single-step until exiting the jump pad. */
1421 lwp->exit_jump_pad_bkpt
1422 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1423 }
1424
1425 if (debug_threads)
1426 fprintf (stderr, "\
1427Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1428 lwpid_of (lwp));
0cccb683 1429 current_inferior = saved_inferior;
fa593d66
PA
1430
1431 return 1;
1432 }
1433 }
1434 else
1435 {
1436 /* If we get a synchronous signal while collecting, *and*
1437 while executing the (relocated) original instruction,
1438 reset the PC to point at the tpoint address, before
1439 reporting to GDB. Otherwise, it's an IPA lib bug: just
1440 report the signal to GDB, and pray for the best. */
1441
1442 lwp->collecting_fast_tracepoint = 0;
1443
1444 if (r != 0
1445 && (status.adjusted_insn_addr <= lwp->stop_pc
1446 && lwp->stop_pc < status.adjusted_insn_addr_end))
1447 {
1448 siginfo_t info;
1449 struct regcache *regcache;
1450
1451 /* The si_addr on a few signals references the address
1452 of the faulting instruction. Adjust that as
1453 well. */
1454 if ((WSTOPSIG (*wstat) == SIGILL
1455 || WSTOPSIG (*wstat) == SIGFPE
1456 || WSTOPSIG (*wstat) == SIGBUS
1457 || WSTOPSIG (*wstat) == SIGSEGV)
1458 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1459 /* Final check just to make sure we don't clobber
1460 the siginfo of non-kernel-sent signals. */
1461 && (uintptr_t) info.si_addr == lwp->stop_pc)
1462 {
1463 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1464 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1465 }
1466
1467 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1468 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1469 lwp->stop_pc = status.tpoint_addr;
1470
1471 /* Cancel any fast tracepoint lock this thread was
1472 holding. */
1473 force_unlock_trace_buffer ();
1474 }
1475
1476 if (lwp->exit_jump_pad_bkpt != NULL)
1477 {
1478 if (debug_threads)
1479 fprintf (stderr,
1480 "Cancelling fast exit-jump-pad: removing bkpt. "
1481 "stopping all threads momentarily.\n");
1482
1483 stop_all_lwps (1, lwp);
1484 cancel_breakpoints ();
1485
1486 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1487 lwp->exit_jump_pad_bkpt = NULL;
1488
1489 unstop_all_lwps (1, lwp);
1490
1491 gdb_assert (lwp->suspended >= 0);
1492 }
1493 }
1494 }
1495
1496 if (debug_threads)
1497 fprintf (stderr, "\
1498Checking whether LWP %ld needs to move out of the jump pad...no\n",
1499 lwpid_of (lwp));
0cccb683
YQ
1500
1501 current_inferior = saved_inferior;
fa593d66
PA
1502 return 0;
1503}
1504
1505/* Enqueue one signal in the "signals to report later when out of the
1506 jump pad" list. */
1507
1508static void
1509enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1510{
1511 struct pending_signals *p_sig;
1512
1513 if (debug_threads)
1514 fprintf (stderr, "\
1515Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1516
1517 if (debug_threads)
1518 {
1519 struct pending_signals *sig;
1520
1521 for (sig = lwp->pending_signals_to_report;
1522 sig != NULL;
1523 sig = sig->prev)
1524 fprintf (stderr,
1525 " Already queued %d\n",
1526 sig->signal);
1527
1528 fprintf (stderr, " (no more currently queued signals)\n");
1529 }
1530
1a981360
PA
1531 /* Don't enqueue non-RT signals if they are already in the deferred
1532 queue. (SIGSTOP being the easiest signal to see ending up here
1533 twice) */
1534 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1535 {
1536 struct pending_signals *sig;
1537
1538 for (sig = lwp->pending_signals_to_report;
1539 sig != NULL;
1540 sig = sig->prev)
1541 {
1542 if (sig->signal == WSTOPSIG (*wstat))
1543 {
1544 if (debug_threads)
1545 fprintf (stderr,
1546 "Not requeuing already queued non-RT signal %d"
1547 " for LWP %ld\n",
1548 sig->signal,
1549 lwpid_of (lwp));
1550 return;
1551 }
1552 }
1553 }
1554
fa593d66
PA
1555 p_sig = xmalloc (sizeof (*p_sig));
1556 p_sig->prev = lwp->pending_signals_to_report;
1557 p_sig->signal = WSTOPSIG (*wstat);
1558 memset (&p_sig->info, 0, sizeof (siginfo_t));
1559 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1560
1561 lwp->pending_signals_to_report = p_sig;
1562}
1563
1564/* Dequeue one signal from the "signals to report later when out of
1565 the jump pad" list. */
1566
1567static int
1568dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1569{
1570 if (lwp->pending_signals_to_report != NULL)
1571 {
1572 struct pending_signals **p_sig;
1573
1574 p_sig = &lwp->pending_signals_to_report;
1575 while ((*p_sig)->prev != NULL)
1576 p_sig = &(*p_sig)->prev;
1577
1578 *wstat = W_STOPCODE ((*p_sig)->signal);
1579 if ((*p_sig)->info.si_signo != 0)
1580 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1581 free (*p_sig);
1582 *p_sig = NULL;
1583
1584 if (debug_threads)
1585 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1586 WSTOPSIG (*wstat), lwpid_of (lwp));
1587
1588 if (debug_threads)
1589 {
1590 struct pending_signals *sig;
1591
1592 for (sig = lwp->pending_signals_to_report;
1593 sig != NULL;
1594 sig = sig->prev)
1595 fprintf (stderr,
1596 " Still queued %d\n",
1597 sig->signal);
1598
1599 fprintf (stderr, " (no more queued signals)\n");
1600 }
1601
1602 return 1;
1603 }
1604
1605 return 0;
1606}
1607
d50171e4
PA
1608/* Arrange for a breakpoint to be hit again later. We don't keep the
1609 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1610 will handle the current event, eventually we will resume this LWP,
1611 and this breakpoint will trap again. */
1612
1613static int
1614cancel_breakpoint (struct lwp_info *lwp)
1615{
1616 struct thread_info *saved_inferior;
d50171e4
PA
1617
1618 /* There's nothing to do if we don't support breakpoints. */
1619 if (!supports_breakpoints ())
1620 return 0;
1621
d50171e4
PA
1622 /* breakpoint_at reads from current inferior. */
1623 saved_inferior = current_inferior;
1624 current_inferior = get_lwp_thread (lwp);
1625
1626 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1627 {
1628 if (debug_threads)
1629 fprintf (stderr,
1630 "CB: Push back breakpoint for %s\n",
fc7238bb 1631 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1632
1633 /* Back up the PC if necessary. */
1634 if (the_low_target.decr_pc_after_break)
1635 {
1636 struct regcache *regcache
fc7238bb 1637 = get_thread_regcache (current_inferior, 1);
d50171e4
PA
1638 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1639 }
1640
1641 current_inferior = saved_inferior;
1642 return 1;
1643 }
1644 else
1645 {
1646 if (debug_threads)
1647 fprintf (stderr,
1648 "CB: No breakpoint found at %s for [%s]\n",
1649 paddress (lwp->stop_pc),
fc7238bb 1650 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1651 }
1652
1653 current_inferior = saved_inferior;
1654 return 0;
1655}
1656
1657/* When the event-loop is doing a step-over, this points at the thread
1658 being stepped. */
1659ptid_t step_over_bkpt;
1660
bd99dc85
PA
1661/* Wait for an event from child PID. If PID is -1, wait for any
1662 child. Store the stop status through the status pointer WSTAT.
1663 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1664 event was found and OPTIONS contains WNOHANG. Return the PID of
1665 the stopped child otherwise. */
1666
0d62e5e8 1667static int
d8301ad1 1668linux_wait_for_event (ptid_t ptid, int *wstat, int options)
0d62e5e8 1669{
d50171e4 1670 struct lwp_info *event_child, *requested_child;
d8301ad1 1671 ptid_t wait_ptid;
d50171e4 1672
d50171e4
PA
1673 event_child = NULL;
1674 requested_child = NULL;
0d62e5e8 1675
95954743 1676 /* Check for a lwp with a pending status. */
bd99dc85 1677
e825046f 1678 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
0d62e5e8 1679 {
54a0b537 1680 event_child = (struct lwp_info *)
d50171e4 1681 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
0d62e5e8 1682 if (debug_threads && event_child)
bd99dc85 1683 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
0d62e5e8
DJ
1684 }
1685 else
1686 {
95954743 1687 requested_child = find_lwp_pid (ptid);
d50171e4 1688
fa593d66
PA
1689 if (!stopping_threads
1690 && requested_child->status_pending_p
1691 && requested_child->collecting_fast_tracepoint)
1692 {
1693 enqueue_one_deferred_signal (requested_child,
1694 &requested_child->status_pending);
1695 requested_child->status_pending_p = 0;
1696 requested_child->status_pending = 0;
1697 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1698 }
1699
1700 if (requested_child->suspended
1701 && requested_child->status_pending_p)
1702 fatal ("requesting an event out of a suspended child?");
1703
d50171e4 1704 if (requested_child->status_pending_p)
bd99dc85 1705 event_child = requested_child;
0d62e5e8 1706 }
611cb4a5 1707
0d62e5e8
DJ
1708 if (event_child != NULL)
1709 {
bd99dc85
PA
1710 if (debug_threads)
1711 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1712 lwpid_of (event_child), event_child->status_pending);
1713 *wstat = event_child->status_pending;
1714 event_child->status_pending_p = 0;
1715 event_child->status_pending = 0;
1716 current_inferior = get_lwp_thread (event_child);
1717 return lwpid_of (event_child);
0d62e5e8
DJ
1718 }
1719
d8301ad1
JK
1720 if (ptid_is_pid (ptid))
1721 {
1722 /* A request to wait for a specific tgid. This is not possible
1723 with waitpid, so instead, we wait for any child, and leave
1724 children we're not interested in right now with a pending
1725 status to report later. */
1726 wait_ptid = minus_one_ptid;
1727 }
1728 else
1729 wait_ptid = ptid;
1730
0d62e5e8
DJ
1731 /* We only enter this loop if no process has a pending wait status. Thus
1732 any action taken in response to a wait status inside this loop is
1733 responding as soon as we detect the status, not after any pending
1734 events. */
1735 while (1)
1736 {
d8301ad1 1737 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
0d62e5e8 1738
bd99dc85 1739 if ((options & WNOHANG) && event_child == NULL)
d50171e4
PA
1740 {
1741 if (debug_threads)
1742 fprintf (stderr, "WNOHANG set, no event found\n");
1743 return 0;
1744 }
0d62e5e8
DJ
1745
1746 if (event_child == NULL)
1747 error ("event from unknown child");
611cb4a5 1748
d8301ad1
JK
1749 if (ptid_is_pid (ptid)
1750 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1751 {
1752 if (! WIFSTOPPED (*wstat))
1753 mark_lwp_dead (event_child, *wstat);
1754 else
1755 {
1756 event_child->status_pending_p = 1;
1757 event_child->status_pending = *wstat;
1758 }
1759 continue;
1760 }
1761
bd99dc85 1762 current_inferior = get_lwp_thread (event_child);
0d62e5e8 1763
89be2091 1764 /* Check for thread exit. */
bd99dc85 1765 if (! WIFSTOPPED (*wstat))
0d62e5e8 1766 {
89be2091 1767 if (debug_threads)
95954743 1768 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
89be2091
DJ
1769
1770 /* If the last thread is exiting, just return. */
95954743 1771 if (last_thread_of_process_p (current_inferior))
bd99dc85
PA
1772 {
1773 if (debug_threads)
95954743
PA
1774 fprintf (stderr, "LWP %ld is last lwp of process\n",
1775 lwpid_of (event_child));
bd99dc85
PA
1776 return lwpid_of (event_child);
1777 }
89be2091 1778
bd99dc85
PA
1779 if (!non_stop)
1780 {
1781 current_inferior = (struct thread_info *) all_threads.head;
1782 if (debug_threads)
1783 fprintf (stderr, "Current inferior is now %ld\n",
1784 lwpid_of (get_thread_lwp (current_inferior)));
1785 }
1786 else
1787 {
1788 current_inferior = NULL;
1789 if (debug_threads)
1790 fprintf (stderr, "Current inferior is now <NULL>\n");
1791 }
89be2091
DJ
1792
1793 /* If we were waiting for this particular child to do something...
1794 well, it did something. */
bd99dc85 1795 if (requested_child != NULL)
d50171e4
PA
1796 {
1797 int lwpid = lwpid_of (event_child);
1798
1799 /* Cancel the step-over operation --- the thread that
1800 started it is gone. */
1801 if (finish_step_over (event_child))
7984d532 1802 unstop_all_lwps (1, event_child);
d50171e4
PA
1803 delete_lwp (event_child);
1804 return lwpid;
1805 }
1806
1807 delete_lwp (event_child);
89be2091
DJ
1808
1809 /* Wait for a more interesting event. */
1810 continue;
1811 }
1812
a6dbe5df
PA
1813 if (event_child->must_set_ptrace_flags)
1814 {
1e7fc18c 1815 linux_enable_event_reporting (lwpid_of (event_child));
a6dbe5df
PA
1816 event_child->must_set_ptrace_flags = 0;
1817 }
1818
bd99dc85
PA
1819 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1820 && *wstat >> 16 != 0)
24a09b5f 1821 {
bd99dc85 1822 handle_extended_wait (event_child, *wstat);
24a09b5f
DJ
1823 continue;
1824 }
1825
d50171e4
PA
1826 if (WIFSTOPPED (*wstat)
1827 && WSTOPSIG (*wstat) == SIGSTOP
1828 && event_child->stop_expected)
1829 {
1830 int should_stop;
1831
1832 if (debug_threads)
1833 fprintf (stderr, "Expected stop.\n");
1834 event_child->stop_expected = 0;
1835
8336d594 1836 should_stop = (current_inferior->last_resume_kind == resume_stop
d50171e4
PA
1837 || stopping_threads);
1838
1839 if (!should_stop)
1840 {
1841 linux_resume_one_lwp (event_child,
1842 event_child->stepping, 0, NULL);
1843 continue;
1844 }
1845 }
1846
bd99dc85 1847 return lwpid_of (event_child);
611cb4a5 1848 }
0d62e5e8 1849
611cb4a5
DJ
1850 /* NOTREACHED */
1851 return 0;
1852}
1853
6bf5e0ba
PA
1854/* Count the LWP's that have had events. */
1855
1856static int
1857count_events_callback (struct inferior_list_entry *entry, void *data)
1858{
1859 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1860 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1861 int *count = data;
1862
1863 gdb_assert (count != NULL);
1864
1865 /* Count only resumed LWPs that have a SIGTRAP event pending that
1866 should be reported to GDB. */
8336d594
PA
1867 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1868 && thread->last_resume_kind != resume_stop
6bf5e0ba
PA
1869 && lp->status_pending_p
1870 && WIFSTOPPED (lp->status_pending)
1871 && WSTOPSIG (lp->status_pending) == SIGTRAP
1872 && !breakpoint_inserted_here (lp->stop_pc))
1873 (*count)++;
1874
1875 return 0;
1876}
1877
1878/* Select the LWP (if any) that is currently being single-stepped. */
1879
1880static int
1881select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1882{
1883 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1884 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba 1885
8336d594
PA
1886 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1887 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
1888 && lp->status_pending_p)
1889 return 1;
1890 else
1891 return 0;
1892}
1893
1894/* Select the Nth LWP that has had a SIGTRAP event that should be
1895 reported to GDB. */
1896
1897static int
1898select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1899{
1900 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1901 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1902 int *selector = data;
1903
1904 gdb_assert (selector != NULL);
1905
1906 /* Select only resumed LWPs that have a SIGTRAP event pending. */
8336d594
PA
1907 if (thread->last_resume_kind != resume_stop
1908 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
1909 && lp->status_pending_p
1910 && WIFSTOPPED (lp->status_pending)
1911 && WSTOPSIG (lp->status_pending) == SIGTRAP
1912 && !breakpoint_inserted_here (lp->stop_pc))
1913 if ((*selector)-- == 0)
1914 return 1;
1915
1916 return 0;
1917}
1918
1919static int
1920cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1921{
1922 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1923 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1924 struct lwp_info *event_lp = data;
1925
1926 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1927 if (lp == event_lp)
1928 return 0;
1929
1930 /* If a LWP other than the LWP that we're reporting an event for has
1931 hit a GDB breakpoint (as opposed to some random trap signal),
1932 then just arrange for it to hit it again later. We don't keep
1933 the SIGTRAP status and don't forward the SIGTRAP signal to the
1934 LWP. We will handle the current event, eventually we will resume
1935 all LWPs, and this one will get its breakpoint trap again.
1936
1937 If we do not do this, then we run the risk that the user will
1938 delete or disable the breakpoint, but the LWP will have already
1939 tripped on it. */
1940
8336d594
PA
1941 if (thread->last_resume_kind != resume_stop
1942 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
1943 && lp->status_pending_p
1944 && WIFSTOPPED (lp->status_pending)
1945 && WSTOPSIG (lp->status_pending) == SIGTRAP
bdabb078
PA
1946 && !lp->stepping
1947 && !lp->stopped_by_watchpoint
6bf5e0ba
PA
1948 && cancel_breakpoint (lp))
1949 /* Throw away the SIGTRAP. */
1950 lp->status_pending_p = 0;
1951
1952 return 0;
1953}
1954
7984d532
PA
1955static void
1956linux_cancel_breakpoints (void)
1957{
1958 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1959}
1960
6bf5e0ba
PA
1961/* Select one LWP out of those that have events pending. */
1962
1963static void
1964select_event_lwp (struct lwp_info **orig_lp)
1965{
1966 int num_events = 0;
1967 int random_selector;
1968 struct lwp_info *event_lp;
1969
1970 /* Give preference to any LWP that is being single-stepped. */
1971 event_lp
1972 = (struct lwp_info *) find_inferior (&all_lwps,
1973 select_singlestep_lwp_callback, NULL);
1974 if (event_lp != NULL)
1975 {
1976 if (debug_threads)
1977 fprintf (stderr,
1978 "SEL: Select single-step %s\n",
1979 target_pid_to_str (ptid_of (event_lp)));
1980 }
1981 else
1982 {
1983 /* No single-stepping LWP. Select one at random, out of those
1984 which have had SIGTRAP events. */
1985
1986 /* First see how many SIGTRAP events we have. */
1987 find_inferior (&all_lwps, count_events_callback, &num_events);
1988
1989 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1990 random_selector = (int)
1991 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1992
1993 if (debug_threads && num_events > 1)
1994 fprintf (stderr,
1995 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1996 num_events, random_selector);
1997
1998 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1999 select_event_lwp_callback,
2000 &random_selector);
2001 }
2002
2003 if (event_lp != NULL)
2004 {
2005 /* Switch the event LWP. */
2006 *orig_lp = event_lp;
2007 }
2008}
2009
7984d532
PA
2010/* Decrement the suspend count of an LWP. */
2011
2012static int
2013unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2014{
2015 struct lwp_info *lwp = (struct lwp_info *) entry;
2016
2017 /* Ignore EXCEPT. */
2018 if (lwp == except)
2019 return 0;
2020
2021 lwp->suspended--;
2022
2023 gdb_assert (lwp->suspended >= 0);
2024 return 0;
2025}
2026
2027/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2028 NULL. */
2029
2030static void
2031unsuspend_all_lwps (struct lwp_info *except)
2032{
2033 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2034}
2035
fa593d66
PA
2036static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2037static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2038 void *data);
2039static int lwp_running (struct inferior_list_entry *entry, void *data);
2040static ptid_t linux_wait_1 (ptid_t ptid,
2041 struct target_waitstatus *ourstatus,
2042 int target_options);
2043
2044/* Stabilize threads (move out of jump pads).
2045
2046 If a thread is midway collecting a fast tracepoint, we need to
2047 finish the collection and move it out of the jump pad before
2048 reporting the signal.
2049
2050 This avoids recursion while collecting (when a signal arrives
2051 midway, and the signal handler itself collects), which would trash
2052 the trace buffer. In case the user set a breakpoint in a signal
2053 handler, this avoids the backtrace showing the jump pad, etc..
2054 Most importantly, there are certain things we can't do safely if
2055 threads are stopped in a jump pad (or in its callee's). For
2056 example:
2057
2058 - starting a new trace run. A thread still collecting the
2059 previous run, could trash the trace buffer when resumed. The trace
2060 buffer control structures would have been reset but the thread had
2061 no way to tell. The thread could even midway memcpy'ing to the
2062 buffer, which would mean that when resumed, it would clobber the
2063 trace buffer that had been set for a new run.
2064
2065 - we can't rewrite/reuse the jump pads for new tracepoints
2066 safely. Say you do tstart while a thread is stopped midway while
2067 collecting. When the thread is later resumed, it finishes the
2068 collection, and returns to the jump pad, to execute the original
2069 instruction that was under the tracepoint jump at the time the
2070 older run had been started. If the jump pad had been rewritten
2071 since for something else in the new run, the thread would now
2072 execute the wrong / random instructions. */
2073
2074static void
2075linux_stabilize_threads (void)
2076{
2077 struct thread_info *save_inferior;
2078 struct lwp_info *lwp_stuck;
2079
2080 lwp_stuck
2081 = (struct lwp_info *) find_inferior (&all_lwps,
2082 stuck_in_jump_pad_callback, NULL);
2083 if (lwp_stuck != NULL)
2084 {
b4d51a55
PA
2085 if (debug_threads)
2086 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2087 lwpid_of (lwp_stuck));
fa593d66
PA
2088 return;
2089 }
2090
2091 save_inferior = current_inferior;
2092
2093 stabilizing_threads = 1;
2094
2095 /* Kick 'em all. */
2096 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2097
2098 /* Loop until all are stopped out of the jump pads. */
2099 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2100 {
2101 struct target_waitstatus ourstatus;
2102 struct lwp_info *lwp;
fa593d66
PA
2103 int wstat;
2104
2105 /* Note that we go through the full wait even loop. While
2106 moving threads out of jump pad, we need to be able to step
2107 over internal breakpoints and such. */
32fcada3 2108 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2109
2110 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2111 {
2112 lwp = get_thread_lwp (current_inferior);
2113
2114 /* Lock it. */
2115 lwp->suspended++;
2116
2117 if (ourstatus.value.sig != TARGET_SIGNAL_0
2118 || current_inferior->last_resume_kind == resume_stop)
2119 {
2120 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
2121 enqueue_one_deferred_signal (lwp, &wstat);
2122 }
2123 }
2124 }
2125
2126 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2127
2128 stabilizing_threads = 0;
2129
2130 current_inferior = save_inferior;
2131
b4d51a55 2132 if (debug_threads)
fa593d66 2133 {
b4d51a55
PA
2134 lwp_stuck
2135 = (struct lwp_info *) find_inferior (&all_lwps,
2136 stuck_in_jump_pad_callback, NULL);
2137 if (lwp_stuck != NULL)
fa593d66
PA
2138 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2139 lwpid_of (lwp_stuck));
2140 }
2141}
2142
0d62e5e8 2143/* Wait for process, returns status. */
da6d8c04 2144
95954743
PA
2145static ptid_t
2146linux_wait_1 (ptid_t ptid,
2147 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2148{
e5f1222d 2149 int w;
fc7238bb 2150 struct lwp_info *event_child;
bd99dc85 2151 int options;
bd99dc85 2152 int pid;
6bf5e0ba
PA
2153 int step_over_finished;
2154 int bp_explains_trap;
2155 int maybe_internal_trap;
2156 int report_to_gdb;
219f2f23 2157 int trace_event;
bd99dc85
PA
2158
2159 /* Translate generic target options into linux options. */
2160 options = __WALL;
2161 if (target_options & TARGET_WNOHANG)
2162 options |= WNOHANG;
0d62e5e8
DJ
2163
2164retry:
fa593d66
PA
2165 bp_explains_trap = 0;
2166 trace_event = 0;
bd99dc85
PA
2167 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2168
0d62e5e8
DJ
2169 /* If we were only supposed to resume one thread, only wait for
2170 that thread - if it's still alive. If it died, however - which
2171 can happen if we're coming from the thread death case below -
2172 then we need to make sure we restart the other threads. We could
2173 pick a thread at random or restart all; restarting all is less
2174 arbitrary. */
95954743
PA
2175 if (!non_stop
2176 && !ptid_equal (cont_thread, null_ptid)
2177 && !ptid_equal (cont_thread, minus_one_ptid))
0d62e5e8 2178 {
fc7238bb
PA
2179 struct thread_info *thread;
2180
bd99dc85
PA
2181 thread = (struct thread_info *) find_inferior_id (&all_threads,
2182 cont_thread);
0d62e5e8
DJ
2183
2184 /* No stepping, no signal - unless one is pending already, of course. */
bd99dc85 2185 if (thread == NULL)
64386c31
DJ
2186 {
2187 struct thread_resume resume_info;
95954743 2188 resume_info.thread = minus_one_ptid;
bd99dc85
PA
2189 resume_info.kind = resume_continue;
2190 resume_info.sig = 0;
2bd7c093 2191 linux_resume (&resume_info, 1);
64386c31 2192 }
bd99dc85 2193 else
95954743 2194 ptid = cont_thread;
0d62e5e8 2195 }
da6d8c04 2196
6bf5e0ba
PA
2197 if (ptid_equal (step_over_bkpt, null_ptid))
2198 pid = linux_wait_for_event (ptid, &w, options);
2199 else
2200 {
2201 if (debug_threads)
2202 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2203 target_pid_to_str (step_over_bkpt));
2204 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2205 }
2206
bd99dc85 2207 if (pid == 0) /* only if TARGET_WNOHANG */
95954743 2208 return null_ptid;
bd99dc85 2209
6bf5e0ba 2210 event_child = get_thread_lwp (current_inferior);
da6d8c04 2211
0d62e5e8
DJ
2212 /* If we are waiting for a particular child, and it exited,
2213 linux_wait_for_event will return its exit status. Similarly if
2214 the last child exited. If this is not the last child, however,
2215 do not report it as exited until there is a 'thread exited' response
2216 available in the remote protocol. Instead, just wait for another event.
2217 This should be safe, because if the thread crashed we will already
2218 have reported the termination signal to GDB; that should stop any
2219 in-progress stepping operations, etc.
2220
2221 Report the exit status of the last thread to exit. This matches
2222 LinuxThreads' behavior. */
2223
95954743 2224 if (last_thread_of_process_p (current_inferior))
da6d8c04 2225 {
bd99dc85 2226 if (WIFEXITED (w) || WIFSIGNALED (w))
0d62e5e8 2227 {
bd99dc85
PA
2228 if (WIFEXITED (w))
2229 {
2230 ourstatus->kind = TARGET_WAITKIND_EXITED;
2231 ourstatus->value.integer = WEXITSTATUS (w);
2232
2233 if (debug_threads)
493e2a69
MS
2234 fprintf (stderr,
2235 "\nChild exited with retcode = %x \n",
2236 WEXITSTATUS (w));
bd99dc85
PA
2237 }
2238 else
2239 {
2240 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2241 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2242
2243 if (debug_threads)
493e2a69
MS
2244 fprintf (stderr,
2245 "\nChild terminated with signal = %x \n",
2246 WTERMSIG (w));
bd99dc85
PA
2247
2248 }
5b1c542e 2249
3e4c1235 2250 return ptid_of (event_child);
0d62e5e8 2251 }
da6d8c04 2252 }
0d62e5e8 2253 else
da6d8c04 2254 {
0d62e5e8
DJ
2255 if (!WIFSTOPPED (w))
2256 goto retry;
da6d8c04
DJ
2257 }
2258
6bf5e0ba
PA
2259 /* If this event was not handled before, and is not a SIGTRAP, we
2260 report it. SIGILL and SIGSEGV are also treated as traps in case
2261 a breakpoint is inserted at the current PC. If this target does
2262 not support internal breakpoints at all, we also report the
2263 SIGTRAP without further processing; it's of no concern to us. */
2264 maybe_internal_trap
2265 = (supports_breakpoints ()
2266 && (WSTOPSIG (w) == SIGTRAP
2267 || ((WSTOPSIG (w) == SIGILL
2268 || WSTOPSIG (w) == SIGSEGV)
2269 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2270
2271 if (maybe_internal_trap)
2272 {
2273 /* Handle anything that requires bookkeeping before deciding to
2274 report the event or continue waiting. */
2275
2276 /* First check if we can explain the SIGTRAP with an internal
2277 breakpoint, or if we should possibly report the event to GDB.
2278 Do this before anything that may remove or insert a
2279 breakpoint. */
2280 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2281
2282 /* We have a SIGTRAP, possibly a step-over dance has just
2283 finished. If so, tweak the state machine accordingly,
2284 reinsert breakpoints and delete any reinsert (software
2285 single-step) breakpoints. */
2286 step_over_finished = finish_step_over (event_child);
2287
2288 /* Now invoke the callbacks of any internal breakpoints there. */
2289 check_breakpoints (event_child->stop_pc);
2290
219f2f23
PA
2291 /* Handle tracepoint data collecting. This may overflow the
2292 trace buffer, and cause a tracing stop, removing
2293 breakpoints. */
2294 trace_event = handle_tracepoints (event_child);
2295
6bf5e0ba
PA
2296 if (bp_explains_trap)
2297 {
2298 /* If we stepped or ran into an internal breakpoint, we've
2299 already handled it. So next time we resume (from this
2300 PC), we should step over it. */
2301 if (debug_threads)
2302 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2303
8b07ae33
PA
2304 if (breakpoint_here (event_child->stop_pc))
2305 event_child->need_step_over = 1;
6bf5e0ba
PA
2306 }
2307 }
2308 else
2309 {
2310 /* We have some other signal, possibly a step-over dance was in
2311 progress, and it should be cancelled too. */
2312 step_over_finished = finish_step_over (event_child);
fa593d66
PA
2313 }
2314
2315 /* We have all the data we need. Either report the event to GDB, or
2316 resume threads and keep waiting for more. */
2317
2318 /* If we're collecting a fast tracepoint, finish the collection and
2319 move out of the jump pad before delivering a signal. See
2320 linux_stabilize_threads. */
2321
2322 if (WIFSTOPPED (w)
2323 && WSTOPSIG (w) != SIGTRAP
2324 && supports_fast_tracepoints ()
58b4daa5 2325 && agent_loaded_p ())
fa593d66
PA
2326 {
2327 if (debug_threads)
2328 fprintf (stderr,
2329 "Got signal %d for LWP %ld. Check if we need "
2330 "to defer or adjust it.\n",
2331 WSTOPSIG (w), lwpid_of (event_child));
2332
2333 /* Allow debugging the jump pad itself. */
2334 if (current_inferior->last_resume_kind != resume_step
2335 && maybe_move_out_of_jump_pad (event_child, &w))
2336 {
2337 enqueue_one_deferred_signal (event_child, &w);
2338
2339 if (debug_threads)
2340 fprintf (stderr,
2341 "Signal %d for LWP %ld deferred (in jump pad)\n",
2342 WSTOPSIG (w), lwpid_of (event_child));
2343
2344 linux_resume_one_lwp (event_child, 0, 0, NULL);
2345 goto retry;
2346 }
2347 }
219f2f23 2348
fa593d66
PA
2349 if (event_child->collecting_fast_tracepoint)
2350 {
2351 if (debug_threads)
2352 fprintf (stderr, "\
2353LWP %ld was trying to move out of the jump pad (%d). \
2354Check if we're already there.\n",
2355 lwpid_of (event_child),
2356 event_child->collecting_fast_tracepoint);
2357
2358 trace_event = 1;
2359
2360 event_child->collecting_fast_tracepoint
2361 = linux_fast_tracepoint_collecting (event_child, NULL);
2362
2363 if (event_child->collecting_fast_tracepoint != 1)
2364 {
2365 /* No longer need this breakpoint. */
2366 if (event_child->exit_jump_pad_bkpt != NULL)
2367 {
2368 if (debug_threads)
2369 fprintf (stderr,
2370 "No longer need exit-jump-pad bkpt; removing it."
2371 "stopping all threads momentarily.\n");
2372
2373 /* Other running threads could hit this breakpoint.
2374 We don't handle moribund locations like GDB does,
2375 instead we always pause all threads when removing
2376 breakpoints, so that any step-over or
2377 decr_pc_after_break adjustment is always taken
2378 care of while the breakpoint is still
2379 inserted. */
2380 stop_all_lwps (1, event_child);
2381 cancel_breakpoints ();
2382
2383 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2384 event_child->exit_jump_pad_bkpt = NULL;
2385
2386 unstop_all_lwps (1, event_child);
2387
2388 gdb_assert (event_child->suspended >= 0);
2389 }
2390 }
2391
2392 if (event_child->collecting_fast_tracepoint == 0)
2393 {
2394 if (debug_threads)
2395 fprintf (stderr,
2396 "fast tracepoint finished "
2397 "collecting successfully.\n");
2398
2399 /* We may have a deferred signal to report. */
2400 if (dequeue_one_deferred_signal (event_child, &w))
2401 {
2402 if (debug_threads)
2403 fprintf (stderr, "dequeued one signal.\n");
2404 }
3c11dd79 2405 else
fa593d66 2406 {
3c11dd79
PA
2407 if (debug_threads)
2408 fprintf (stderr, "no deferred signals.\n");
fa593d66
PA
2409
2410 if (stabilizing_threads)
2411 {
2412 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2413 ourstatus->value.sig = TARGET_SIGNAL_0;
2414 return ptid_of (event_child);
2415 }
2416 }
2417 }
6bf5e0ba
PA
2418 }
2419
e471f25b
PA
2420 /* Check whether GDB would be interested in this event. */
2421
2422 /* If GDB is not interested in this signal, don't stop other
2423 threads, and don't report it to GDB. Just resume the inferior
2424 right away. We do this for threading-related signals as well as
2425 any that GDB specifically requested we ignore. But never ignore
2426 SIGSTOP if we sent it ourselves, and do not ignore signals when
2427 stepping - they may require special handling to skip the signal
2428 handler. */
2429 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2430 thread library? */
2431 if (WIFSTOPPED (w)
2432 && current_inferior->last_resume_kind != resume_step
2433 && (
1a981360 2434#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
e471f25b
PA
2435 (current_process ()->private->thread_db != NULL
2436 && (WSTOPSIG (w) == __SIGRTMIN
2437 || WSTOPSIG (w) == __SIGRTMIN + 1))
2438 ||
2439#endif
2440 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2441 && !(WSTOPSIG (w) == SIGSTOP
2442 && current_inferior->last_resume_kind == resume_stop))))
2443 {
2444 siginfo_t info, *info_p;
2445
2446 if (debug_threads)
2447 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2448 WSTOPSIG (w), lwpid_of (event_child));
2449
2450 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2451 info_p = &info;
2452 else
2453 info_p = NULL;
2454 linux_resume_one_lwp (event_child, event_child->stepping,
2455 WSTOPSIG (w), info_p);
2456 goto retry;
2457 }
2458
2459 /* If GDB wanted this thread to single step, we always want to
2460 report the SIGTRAP, and let GDB handle it. Watchpoints should
2461 always be reported. So should signals we can't explain. A
2462 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2463 not support Z0 breakpoints. If we do, we're be able to handle
2464 GDB breakpoints on top of internal breakpoints, by handling the
2465 internal breakpoint and still reporting the event to GDB. If we
2466 don't, we're out of luck, GDB won't see the breakpoint hit. */
6bf5e0ba 2467 report_to_gdb = (!maybe_internal_trap
8336d594 2468 || current_inferior->last_resume_kind == resume_step
6bf5e0ba 2469 || event_child->stopped_by_watchpoint
493e2a69
MS
2470 || (!step_over_finished
2471 && !bp_explains_trap && !trace_event)
9f3a5c85
LM
2472 || (gdb_breakpoint_here (event_child->stop_pc)
2473 && gdb_condition_true_at_breakpoint (event_child->stop_pc)));
6bf5e0ba
PA
2474
2475 /* We found no reason GDB would want us to stop. We either hit one
2476 of our own breakpoints, or finished an internal step GDB
2477 shouldn't know about. */
2478 if (!report_to_gdb)
2479 {
2480 if (debug_threads)
2481 {
2482 if (bp_explains_trap)
2483 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2484 if (step_over_finished)
2485 fprintf (stderr, "Step-over finished.\n");
219f2f23
PA
2486 if (trace_event)
2487 fprintf (stderr, "Tracepoint event.\n");
6bf5e0ba
PA
2488 }
2489
2490 /* We're not reporting this breakpoint to GDB, so apply the
2491 decr_pc_after_break adjustment to the inferior's regcache
2492 ourselves. */
2493
2494 if (the_low_target.set_pc != NULL)
2495 {
2496 struct regcache *regcache
2497 = get_thread_regcache (get_lwp_thread (event_child), 1);
2498 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2499 }
2500
7984d532
PA
2501 /* We may have finished stepping over a breakpoint. If so,
2502 we've stopped and suspended all LWPs momentarily except the
2503 stepping one. This is where we resume them all again. We're
2504 going to keep waiting, so use proceed, which handles stepping
2505 over the next breakpoint. */
6bf5e0ba
PA
2506 if (debug_threads)
2507 fprintf (stderr, "proceeding all threads.\n");
7984d532
PA
2508
2509 if (step_over_finished)
2510 unsuspend_all_lwps (event_child);
2511
6bf5e0ba
PA
2512 proceed_all_lwps ();
2513 goto retry;
2514 }
2515
2516 if (debug_threads)
2517 {
8336d594 2518 if (current_inferior->last_resume_kind == resume_step)
6bf5e0ba
PA
2519 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2520 if (event_child->stopped_by_watchpoint)
2521 fprintf (stderr, "Stopped by watchpoint.\n");
8b07ae33
PA
2522 if (gdb_breakpoint_here (event_child->stop_pc))
2523 fprintf (stderr, "Stopped by GDB breakpoint.\n");
6bf5e0ba
PA
2524 if (debug_threads)
2525 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2526 }
2527
2528 /* Alright, we're going to report a stop. */
2529
fa593d66 2530 if (!non_stop && !stabilizing_threads)
6bf5e0ba
PA
2531 {
2532 /* In all-stop, stop all threads. */
7984d532 2533 stop_all_lwps (0, NULL);
6bf5e0ba
PA
2534
2535 /* If we're not waiting for a specific LWP, choose an event LWP
2536 from among those that have had events. Giving equal priority
2537 to all LWPs that have had events helps prevent
2538 starvation. */
2539 if (ptid_equal (ptid, minus_one_ptid))
2540 {
2541 event_child->status_pending_p = 1;
2542 event_child->status_pending = w;
2543
2544 select_event_lwp (&event_child);
2545
2546 event_child->status_pending_p = 0;
2547 w = event_child->status_pending;
2548 }
2549
2550 /* Now that we've selected our final event LWP, cancel any
2551 breakpoints in other LWPs that have hit a GDB breakpoint.
2552 See the comment in cancel_breakpoints_callback to find out
2553 why. */
2554 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
fa593d66 2555
c03e6ccc
YQ
2556 /* If we were going a step-over, all other threads but the stepping one
2557 had been paused in start_step_over, with their suspend counts
2558 incremented. We don't want to do a full unstop/unpause, because we're
2559 in all-stop mode (so we want threads stopped), but we still need to
2560 unsuspend the other threads, to decrement their `suspended' count
2561 back. */
2562 if (step_over_finished)
2563 unsuspend_all_lwps (event_child);
2564
fa593d66
PA
2565 /* Stabilize threads (move out of jump pads). */
2566 stabilize_threads ();
6bf5e0ba
PA
2567 }
2568 else
2569 {
2570 /* If we just finished a step-over, then all threads had been
2571 momentarily paused. In all-stop, that's fine, we want
2572 threads stopped by now anyway. In non-stop, we need to
2573 re-resume threads that GDB wanted to be running. */
2574 if (step_over_finished)
7984d532 2575 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
2576 }
2577
5b1c542e 2578 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 2579
8336d594
PA
2580 if (current_inferior->last_resume_kind == resume_stop
2581 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
2582 {
2583 /* A thread that has been requested to stop by GDB with vCont;t,
2584 and it stopped cleanly, so report as SIG0. The use of
2585 SIGSTOP is an implementation detail. */
2586 ourstatus->value.sig = TARGET_SIGNAL_0;
2587 }
8336d594
PA
2588 else if (current_inferior->last_resume_kind == resume_stop
2589 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
2590 {
2591 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 2592 but, it stopped for other reasons. */
bd99dc85
PA
2593 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2594 }
2595 else
2596 {
2597 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2598 }
2599
d50171e4
PA
2600 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2601
bd99dc85 2602 if (debug_threads)
95954743 2603 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
6bf5e0ba 2604 target_pid_to_str (ptid_of (event_child)),
bd99dc85
PA
2605 ourstatus->kind,
2606 ourstatus->value.sig);
2607
6bf5e0ba 2608 return ptid_of (event_child);
bd99dc85
PA
2609}
2610
2611/* Get rid of any pending event in the pipe. */
2612static void
2613async_file_flush (void)
2614{
2615 int ret;
2616 char buf;
2617
2618 do
2619 ret = read (linux_event_pipe[0], &buf, 1);
2620 while (ret >= 0 || (ret == -1 && errno == EINTR));
2621}
2622
2623/* Put something in the pipe, so the event loop wakes up. */
2624static void
2625async_file_mark (void)
2626{
2627 int ret;
2628
2629 async_file_flush ();
2630
2631 do
2632 ret = write (linux_event_pipe[1], "+", 1);
2633 while (ret == 0 || (ret == -1 && errno == EINTR));
2634
2635 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2636 be awakened anyway. */
2637}
2638
95954743
PA
2639static ptid_t
2640linux_wait (ptid_t ptid,
2641 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 2642{
95954743 2643 ptid_t event_ptid;
bd99dc85
PA
2644
2645 if (debug_threads)
95954743 2646 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
bd99dc85
PA
2647
2648 /* Flush the async file first. */
2649 if (target_is_async_p ())
2650 async_file_flush ();
2651
95954743 2652 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
bd99dc85
PA
2653
2654 /* If at least one stop was reported, there may be more. A single
2655 SIGCHLD can signal more than one child stop. */
2656 if (target_is_async_p ()
2657 && (target_options & TARGET_WNOHANG) != 0
95954743 2658 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
2659 async_file_mark ();
2660
2661 return event_ptid;
da6d8c04
DJ
2662}
2663
c5f62d5f 2664/* Send a signal to an LWP. */
fd500816
DJ
2665
2666static int
a1928bad 2667kill_lwp (unsigned long lwpid, int signo)
fd500816 2668{
c5f62d5f
DE
2669 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2670 fails, then we are not using nptl threads and we should be using kill. */
fd500816 2671
c5f62d5f
DE
2672#ifdef __NR_tkill
2673 {
2674 static int tkill_failed;
fd500816 2675
c5f62d5f
DE
2676 if (!tkill_failed)
2677 {
2678 int ret;
2679
2680 errno = 0;
2681 ret = syscall (__NR_tkill, lwpid, signo);
2682 if (errno != ENOSYS)
2683 return ret;
2684 tkill_failed = 1;
2685 }
2686 }
fd500816
DJ
2687#endif
2688
2689 return kill (lwpid, signo);
2690}
2691
964e4306
PA
2692void
2693linux_stop_lwp (struct lwp_info *lwp)
2694{
2695 send_sigstop (lwp);
2696}
2697
0d62e5e8 2698static void
02fc4de7 2699send_sigstop (struct lwp_info *lwp)
0d62e5e8 2700{
bd99dc85 2701 int pid;
0d62e5e8 2702
bd99dc85
PA
2703 pid = lwpid_of (lwp);
2704
0d62e5e8
DJ
2705 /* If we already have a pending stop signal for this process, don't
2706 send another. */
54a0b537 2707 if (lwp->stop_expected)
0d62e5e8 2708 {
ae13219e 2709 if (debug_threads)
bd99dc85 2710 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
ae13219e 2711
0d62e5e8
DJ
2712 return;
2713 }
2714
2715 if (debug_threads)
bd99dc85 2716 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
0d62e5e8 2717
d50171e4 2718 lwp->stop_expected = 1;
bd99dc85 2719 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
2720}
2721
7984d532
PA
2722static int
2723send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7
PA
2724{
2725 struct lwp_info *lwp = (struct lwp_info *) entry;
2726
7984d532
PA
2727 /* Ignore EXCEPT. */
2728 if (lwp == except)
2729 return 0;
2730
02fc4de7 2731 if (lwp->stopped)
7984d532 2732 return 0;
02fc4de7
PA
2733
2734 send_sigstop (lwp);
7984d532
PA
2735 return 0;
2736}
2737
2738/* Increment the suspend count of an LWP, and stop it, if not stopped
2739 yet. */
2740static int
2741suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2742 void *except)
2743{
2744 struct lwp_info *lwp = (struct lwp_info *) entry;
2745
2746 /* Ignore EXCEPT. */
2747 if (lwp == except)
2748 return 0;
2749
2750 lwp->suspended++;
2751
2752 return send_sigstop_callback (entry, except);
02fc4de7
PA
2753}
2754
95954743
PA
2755static void
2756mark_lwp_dead (struct lwp_info *lwp, int wstat)
2757{
2758 /* It's dead, really. */
2759 lwp->dead = 1;
2760
2761 /* Store the exit status for later. */
2762 lwp->status_pending_p = 1;
2763 lwp->status_pending = wstat;
2764
95954743
PA
2765 /* Prevent trying to stop it. */
2766 lwp->stopped = 1;
2767
2768 /* No further stops are expected from a dead lwp. */
2769 lwp->stop_expected = 0;
2770}
2771
0d62e5e8
DJ
2772static void
2773wait_for_sigstop (struct inferior_list_entry *entry)
2774{
54a0b537 2775 struct lwp_info *lwp = (struct lwp_info *) entry;
bd99dc85 2776 struct thread_info *saved_inferior;
a1928bad 2777 int wstat;
95954743
PA
2778 ptid_t saved_tid;
2779 ptid_t ptid;
d50171e4 2780 int pid;
0d62e5e8 2781
54a0b537 2782 if (lwp->stopped)
d50171e4
PA
2783 {
2784 if (debug_threads)
2785 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2786 lwpid_of (lwp));
2787 return;
2788 }
0d62e5e8
DJ
2789
2790 saved_inferior = current_inferior;
bd99dc85
PA
2791 if (saved_inferior != NULL)
2792 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2793 else
95954743 2794 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 2795
95954743 2796 ptid = lwp->head.id;
bd99dc85 2797
d50171e4
PA
2798 if (debug_threads)
2799 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2800
2801 pid = linux_wait_for_event (ptid, &wstat, __WALL);
0d62e5e8
DJ
2802
2803 /* If we stopped with a non-SIGSTOP signal, save it for later
2804 and record the pending SIGSTOP. If the process exited, just
2805 return. */
d50171e4 2806 if (WIFSTOPPED (wstat))
0d62e5e8
DJ
2807 {
2808 if (debug_threads)
d50171e4
PA
2809 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2810 lwpid_of (lwp), WSTOPSIG (wstat));
c35fafde 2811
d50171e4 2812 if (WSTOPSIG (wstat) != SIGSTOP)
c35fafde
PA
2813 {
2814 if (debug_threads)
d50171e4
PA
2815 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2816 lwpid_of (lwp), wstat);
2817
c35fafde
PA
2818 lwp->status_pending_p = 1;
2819 lwp->status_pending = wstat;
2820 }
0d62e5e8 2821 }
d50171e4 2822 else
95954743
PA
2823 {
2824 if (debug_threads)
d50171e4 2825 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
95954743 2826
d50171e4
PA
2827 lwp = find_lwp_pid (pid_to_ptid (pid));
2828 if (lwp)
2829 {
2830 /* Leave this status pending for the next time we're able to
2831 report it. In the mean time, we'll report this lwp as
2832 dead to GDB, so GDB doesn't try to read registers and
2833 memory from it. This can only happen if this was the
2834 last thread of the process; otherwise, PID is removed
2835 from the thread tables before linux_wait_for_event
2836 returns. */
2837 mark_lwp_dead (lwp, wstat);
2838 }
95954743 2839 }
0d62e5e8 2840
bd99dc85 2841 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
0d62e5e8
DJ
2842 current_inferior = saved_inferior;
2843 else
2844 {
2845 if (debug_threads)
2846 fprintf (stderr, "Previously current thread died.\n");
2847
bd99dc85
PA
2848 if (non_stop)
2849 {
2850 /* We can't change the current inferior behind GDB's back,
2851 otherwise, a subsequent command may apply to the wrong
2852 process. */
2853 current_inferior = NULL;
2854 }
2855 else
2856 {
2857 /* Set a valid thread as current. */
2858 set_desired_inferior (0);
2859 }
0d62e5e8
DJ
2860 }
2861}
2862
fa593d66
PA
2863/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2864 move it out, because we need to report the stop event to GDB. For
2865 example, if the user puts a breakpoint in the jump pad, it's
2866 because she wants to debug it. */
2867
2868static int
2869stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2870{
2871 struct lwp_info *lwp = (struct lwp_info *) entry;
2872 struct thread_info *thread = get_lwp_thread (lwp);
2873
2874 gdb_assert (lwp->suspended == 0);
2875 gdb_assert (lwp->stopped);
2876
2877 /* Allow debugging the jump pad, gdb_collect, etc.. */
2878 return (supports_fast_tracepoints ()
58b4daa5 2879 && agent_loaded_p ()
fa593d66
PA
2880 && (gdb_breakpoint_here (lwp->stop_pc)
2881 || lwp->stopped_by_watchpoint
2882 || thread->last_resume_kind == resume_step)
2883 && linux_fast_tracepoint_collecting (lwp, NULL));
2884}
2885
2886static void
2887move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2888{
2889 struct lwp_info *lwp = (struct lwp_info *) entry;
2890 struct thread_info *thread = get_lwp_thread (lwp);
2891 int *wstat;
2892
2893 gdb_assert (lwp->suspended == 0);
2894 gdb_assert (lwp->stopped);
2895
2896 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2897
2898 /* Allow debugging the jump pad, gdb_collect, etc. */
2899 if (!gdb_breakpoint_here (lwp->stop_pc)
2900 && !lwp->stopped_by_watchpoint
2901 && thread->last_resume_kind != resume_step
2902 && maybe_move_out_of_jump_pad (lwp, wstat))
2903 {
2904 if (debug_threads)
2905 fprintf (stderr,
2906 "LWP %ld needs stabilizing (in jump pad)\n",
2907 lwpid_of (lwp));
2908
2909 if (wstat)
2910 {
2911 lwp->status_pending_p = 0;
2912 enqueue_one_deferred_signal (lwp, wstat);
2913
2914 if (debug_threads)
2915 fprintf (stderr,
2916 "Signal %d for LWP %ld deferred "
2917 "(in jump pad)\n",
2918 WSTOPSIG (*wstat), lwpid_of (lwp));
2919 }
2920
2921 linux_resume_one_lwp (lwp, 0, 0, NULL);
2922 }
2923 else
2924 lwp->suspended++;
2925}
2926
2927static int
2928lwp_running (struct inferior_list_entry *entry, void *data)
2929{
2930 struct lwp_info *lwp = (struct lwp_info *) entry;
2931
2932 if (lwp->dead)
2933 return 0;
2934 if (lwp->stopped)
2935 return 0;
2936 return 1;
2937}
2938
7984d532
PA
2939/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2940 If SUSPEND, then also increase the suspend count of every LWP,
2941 except EXCEPT. */
2942
0d62e5e8 2943static void
7984d532 2944stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8
DJ
2945{
2946 stopping_threads = 1;
7984d532
PA
2947
2948 if (suspend)
2949 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2950 else
2951 find_inferior (&all_lwps, send_sigstop_callback, except);
54a0b537 2952 for_each_inferior (&all_lwps, wait_for_sigstop);
0d62e5e8
DJ
2953 stopping_threads = 0;
2954}
2955
da6d8c04
DJ
2956/* Resume execution of the inferior process.
2957 If STEP is nonzero, single-step it.
2958 If SIGNAL is nonzero, give it that signal. */
2959
ce3a066d 2960static void
2acc282a 2961linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 2962 int step, int signal, siginfo_t *info)
da6d8c04 2963{
0d62e5e8 2964 struct thread_info *saved_inferior;
fa593d66 2965 int fast_tp_collecting;
0d62e5e8 2966
54a0b537 2967 if (lwp->stopped == 0)
0d62e5e8
DJ
2968 return;
2969
fa593d66
PA
2970 fast_tp_collecting = lwp->collecting_fast_tracepoint;
2971
2972 gdb_assert (!stabilizing_threads || fast_tp_collecting);
2973
219f2f23
PA
2974 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2975 user used the "jump" command, or "set $pc = foo"). */
2976 if (lwp->stop_pc != get_pc (lwp))
2977 {
2978 /* Collecting 'while-stepping' actions doesn't make sense
2979 anymore. */
2980 release_while_stepping_state_list (get_lwp_thread (lwp));
2981 }
2982
0d62e5e8
DJ
2983 /* If we have pending signals or status, and a new signal, enqueue the
2984 signal. Also enqueue the signal if we are waiting to reinsert a
2985 breakpoint; it will be picked up again below. */
2986 if (signal != 0
fa593d66
PA
2987 && (lwp->status_pending_p
2988 || lwp->pending_signals != NULL
2989 || lwp->bp_reinsert != 0
2990 || fast_tp_collecting))
0d62e5e8
DJ
2991 {
2992 struct pending_signals *p_sig;
bca929d3 2993 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 2994 p_sig->prev = lwp->pending_signals;
0d62e5e8 2995 p_sig->signal = signal;
32ca6d61
DJ
2996 if (info == NULL)
2997 memset (&p_sig->info, 0, sizeof (siginfo_t));
2998 else
2999 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 3000 lwp->pending_signals = p_sig;
0d62e5e8
DJ
3001 }
3002
d50171e4
PA
3003 if (lwp->status_pending_p)
3004 {
3005 if (debug_threads)
3006 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3007 " has pending status\n",
3008 lwpid_of (lwp), step ? "step" : "continue", signal,
3009 lwp->stop_expected ? "expected" : "not expected");
3010 return;
3011 }
0d62e5e8
DJ
3012
3013 saved_inferior = current_inferior;
54a0b537 3014 current_inferior = get_lwp_thread (lwp);
0d62e5e8
DJ
3015
3016 if (debug_threads)
1b3f6016 3017 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
bd99dc85 3018 lwpid_of (lwp), step ? "step" : "continue", signal,
54a0b537 3019 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
3020
3021 /* This bit needs some thinking about. If we get a signal that
3022 we must report while a single-step reinsert is still pending,
3023 we often end up resuming the thread. It might be better to
3024 (ew) allow a stack of pending events; then we could be sure that
3025 the reinsert happened right away and not lose any signals.
3026
3027 Making this stack would also shrink the window in which breakpoints are
54a0b537 3028 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
3029 complete correctness, so it won't solve that problem. It may be
3030 worthwhile just to solve this one, however. */
54a0b537 3031 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
3032 {
3033 if (debug_threads)
d50171e4
PA
3034 fprintf (stderr, " pending reinsert at 0x%s\n",
3035 paddress (lwp->bp_reinsert));
3036
3037 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
3038 {
fa593d66
PA
3039 if (fast_tp_collecting == 0)
3040 {
3041 if (step == 0)
3042 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3043 if (lwp->suspended)
3044 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3045 lwp->suspended);
3046 }
d50171e4
PA
3047
3048 step = 1;
3049 }
0d62e5e8
DJ
3050
3051 /* Postpone any pending signal. It was enqueued above. */
3052 signal = 0;
3053 }
3054
fa593d66
PA
3055 if (fast_tp_collecting == 1)
3056 {
3057 if (debug_threads)
3058 fprintf (stderr, "\
3059lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3060 lwpid_of (lwp));
3061
3062 /* Postpone any pending signal. It was enqueued above. */
3063 signal = 0;
3064 }
3065 else if (fast_tp_collecting == 2)
3066 {
3067 if (debug_threads)
3068 fprintf (stderr, "\
3069lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3070 lwpid_of (lwp));
3071
3072 if (can_hardware_single_step ())
3073 step = 1;
3074 else
3075 fatal ("moving out of jump pad single-stepping"
3076 " not implemented on this target");
3077
3078 /* Postpone any pending signal. It was enqueued above. */
3079 signal = 0;
3080 }
3081
219f2f23
PA
3082 /* If we have while-stepping actions in this thread set it stepping.
3083 If we have a signal to deliver, it may or may not be set to
3084 SIG_IGN, we don't know. Assume so, and allow collecting
3085 while-stepping into a signal handler. A possible smart thing to
3086 do would be to set an internal breakpoint at the signal return
3087 address, continue, and carry on catching this while-stepping
3088 action only when that breakpoint is hit. A future
3089 enhancement. */
3090 if (get_lwp_thread (lwp)->while_stepping != NULL
3091 && can_hardware_single_step ())
3092 {
3093 if (debug_threads)
3094 fprintf (stderr,
3095 "lwp %ld has a while-stepping action -> forcing step.\n",
3096 lwpid_of (lwp));
3097 step = 1;
3098 }
3099
aa691b87 3100 if (debug_threads && the_low_target.get_pc != NULL)
0d62e5e8 3101 {
442ea881
PA
3102 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3103 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
47c0c975 3104 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
0d62e5e8
DJ
3105 }
3106
fa593d66
PA
3107 /* If we have pending signals, consume one unless we are trying to
3108 reinsert a breakpoint or we're trying to finish a fast tracepoint
3109 collect. */
3110 if (lwp->pending_signals != NULL
3111 && lwp->bp_reinsert == 0
3112 && fast_tp_collecting == 0)
0d62e5e8
DJ
3113 {
3114 struct pending_signals **p_sig;
3115
54a0b537 3116 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
3117 while ((*p_sig)->prev != NULL)
3118 p_sig = &(*p_sig)->prev;
3119
3120 signal = (*p_sig)->signal;
32ca6d61 3121 if ((*p_sig)->info.si_signo != 0)
bd99dc85 3122 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
32ca6d61 3123
0d62e5e8
DJ
3124 free (*p_sig);
3125 *p_sig = NULL;
3126 }
3127
aa5ca48f
DE
3128 if (the_low_target.prepare_to_resume != NULL)
3129 the_low_target.prepare_to_resume (lwp);
3130
0d62e5e8 3131 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 3132 get_lwp_thread (lwp));
da6d8c04 3133 errno = 0;
54a0b537 3134 lwp->stopped = 0;
c3adc08c 3135 lwp->stopped_by_watchpoint = 0;
54a0b537 3136 lwp->stepping = step;
14ce3065
DE
3137 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
3138 /* Coerce to a uintptr_t first to avoid potential gcc warning
3139 of coercing an 8 byte integer to a 4 byte pointer. */
3140 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
0d62e5e8
DJ
3141
3142 current_inferior = saved_inferior;
da6d8c04 3143 if (errno)
3221518c
UW
3144 {
3145 /* ESRCH from ptrace either means that the thread was already
3146 running (an error) or that it is gone (a race condition). If
3147 it's gone, we will get a notification the next time we wait,
3148 so we can ignore the error. We could differentiate these
3149 two, but it's tricky without waiting; the thread still exists
3150 as a zombie, so sending it signal 0 would succeed. So just
3151 ignore ESRCH. */
3152 if (errno == ESRCH)
3153 return;
3154
3155 perror_with_name ("ptrace");
3156 }
da6d8c04
DJ
3157}
3158
2bd7c093
PA
3159struct thread_resume_array
3160{
3161 struct thread_resume *resume;
3162 size_t n;
3163};
64386c31
DJ
3164
3165/* This function is called once per thread. We look up the thread
5544ad89
DJ
3166 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3167 resume request.
3168
3169 This algorithm is O(threads * resume elements), but resume elements
3170 is small (and will remain small at least until GDB supports thread
3171 suspension). */
2bd7c093
PA
3172static int
3173linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 3174{
54a0b537 3175 struct lwp_info *lwp;
64386c31 3176 struct thread_info *thread;
5544ad89 3177 int ndx;
2bd7c093 3178 struct thread_resume_array *r;
64386c31
DJ
3179
3180 thread = (struct thread_info *) entry;
54a0b537 3181 lwp = get_thread_lwp (thread);
2bd7c093 3182 r = arg;
64386c31 3183
2bd7c093 3184 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
3185 {
3186 ptid_t ptid = r->resume[ndx].thread;
3187 if (ptid_equal (ptid, minus_one_ptid)
3188 || ptid_equal (ptid, entry->id)
3189 || (ptid_is_pid (ptid)
3190 && (ptid_get_pid (ptid) == pid_of (lwp)))
3191 || (ptid_get_lwp (ptid) == -1
3192 && (ptid_get_pid (ptid) == pid_of (lwp))))
3193 {
d50171e4 3194 if (r->resume[ndx].kind == resume_stop
8336d594 3195 && thread->last_resume_kind == resume_stop)
d50171e4
PA
3196 {
3197 if (debug_threads)
3198 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3199 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3200 ? "stopped"
3201 : "stopping",
3202 lwpid_of (lwp));
3203
3204 continue;
3205 }
3206
95954743 3207 lwp->resume = &r->resume[ndx];
8336d594 3208 thread->last_resume_kind = lwp->resume->kind;
fa593d66
PA
3209
3210 /* If we had a deferred signal to report, dequeue one now.
3211 This can happen if LWP gets more than one signal while
3212 trying to get out of a jump pad. */
3213 if (lwp->stopped
3214 && !lwp->status_pending_p
3215 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3216 {
3217 lwp->status_pending_p = 1;
3218
3219 if (debug_threads)
3220 fprintf (stderr,
3221 "Dequeueing deferred signal %d for LWP %ld, "
3222 "leaving status pending.\n",
3223 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3224 }
3225
95954743
PA
3226 return 0;
3227 }
3228 }
2bd7c093
PA
3229
3230 /* No resume action for this thread. */
3231 lwp->resume = NULL;
64386c31 3232
2bd7c093 3233 return 0;
5544ad89
DJ
3234}
3235
5544ad89 3236
bd99dc85
PA
3237/* Set *FLAG_P if this lwp has an interesting status pending. */
3238static int
3239resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 3240{
bd99dc85 3241 struct lwp_info *lwp = (struct lwp_info *) entry;
5544ad89 3242
bd99dc85
PA
3243 /* LWPs which will not be resumed are not interesting, because
3244 we might not wait for them next time through linux_wait. */
2bd7c093 3245 if (lwp->resume == NULL)
bd99dc85 3246 return 0;
64386c31 3247
bd99dc85 3248 if (lwp->status_pending_p)
d50171e4
PA
3249 * (int *) flag_p = 1;
3250
3251 return 0;
3252}
3253
3254/* Return 1 if this lwp that GDB wants running is stopped at an
3255 internal breakpoint that we need to step over. It assumes that any
3256 required STOP_PC adjustment has already been propagated to the
3257 inferior's regcache. */
3258
3259static int
3260need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3261{
3262 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 3263 struct thread_info *thread;
d50171e4
PA
3264 struct thread_info *saved_inferior;
3265 CORE_ADDR pc;
3266
3267 /* LWPs which will not be resumed are not interesting, because we
3268 might not wait for them next time through linux_wait. */
3269
3270 if (!lwp->stopped)
3271 {
3272 if (debug_threads)
3273 fprintf (stderr,
3274 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3275 lwpid_of (lwp));
3276 return 0;
3277 }
3278
8336d594
PA
3279 thread = get_lwp_thread (lwp);
3280
3281 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
3282 {
3283 if (debug_threads)
3284 fprintf (stderr,
3285 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3286 lwpid_of (lwp));
3287 return 0;
3288 }
3289
7984d532
PA
3290 gdb_assert (lwp->suspended >= 0);
3291
3292 if (lwp->suspended)
3293 {
3294 if (debug_threads)
3295 fprintf (stderr,
3296 "Need step over [LWP %ld]? Ignoring, suspended\n",
3297 lwpid_of (lwp));
3298 return 0;
3299 }
3300
d50171e4
PA
3301 if (!lwp->need_step_over)
3302 {
3303 if (debug_threads)
3304 fprintf (stderr,
3305 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3306 }
5544ad89 3307
bd99dc85 3308 if (lwp->status_pending_p)
d50171e4
PA
3309 {
3310 if (debug_threads)
3311 fprintf (stderr,
3312 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3313 lwpid_of (lwp));
3314 return 0;
3315 }
3316
3317 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3318 or we have. */
3319 pc = get_pc (lwp);
3320
3321 /* If the PC has changed since we stopped, then don't do anything,
3322 and let the breakpoint/tracepoint be hit. This happens if, for
3323 instance, GDB handled the decr_pc_after_break subtraction itself,
3324 GDB is OOL stepping this thread, or the user has issued a "jump"
3325 command, or poked thread's registers herself. */
3326 if (pc != lwp->stop_pc)
3327 {
3328 if (debug_threads)
3329 fprintf (stderr,
3330 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3331 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3332 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3333
3334 lwp->need_step_over = 0;
3335 return 0;
3336 }
3337
3338 saved_inferior = current_inferior;
8336d594 3339 current_inferior = thread;
d50171e4 3340
8b07ae33 3341 /* We can only step over breakpoints we know about. */
fa593d66 3342 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 3343 {
8b07ae33 3344 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
3345 though. If the condition is being evaluated on the target's side
3346 and it evaluate to false, step over this breakpoint as well. */
3347 if (gdb_breakpoint_here (pc)
3348 && gdb_condition_true_at_breakpoint (pc))
8b07ae33
PA
3349 {
3350 if (debug_threads)
3351 fprintf (stderr,
3352 "Need step over [LWP %ld]? yes, but found"
3353 " GDB breakpoint at 0x%s; skipping step over\n",
3354 lwpid_of (lwp), paddress (pc));
d50171e4 3355
8b07ae33
PA
3356 current_inferior = saved_inferior;
3357 return 0;
3358 }
3359 else
3360 {
3361 if (debug_threads)
3362 fprintf (stderr,
493e2a69
MS
3363 "Need step over [LWP %ld]? yes, "
3364 "found breakpoint at 0x%s\n",
8b07ae33 3365 lwpid_of (lwp), paddress (pc));
d50171e4 3366
8b07ae33
PA
3367 /* We've found an lwp that needs stepping over --- return 1 so
3368 that find_inferior stops looking. */
3369 current_inferior = saved_inferior;
3370
3371 /* If the step over is cancelled, this is set again. */
3372 lwp->need_step_over = 0;
3373 return 1;
3374 }
d50171e4
PA
3375 }
3376
3377 current_inferior = saved_inferior;
3378
3379 if (debug_threads)
3380 fprintf (stderr,
3381 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3382 lwpid_of (lwp), paddress (pc));
c6ecbae5 3383
bd99dc85 3384 return 0;
5544ad89
DJ
3385}
3386
d50171e4
PA
3387/* Start a step-over operation on LWP. When LWP stopped at a
3388 breakpoint, to make progress, we need to remove the breakpoint out
3389 of the way. If we let other threads run while we do that, they may
3390 pass by the breakpoint location and miss hitting it. To avoid
3391 that, a step-over momentarily stops all threads while LWP is
3392 single-stepped while the breakpoint is temporarily uninserted from
3393 the inferior. When the single-step finishes, we reinsert the
3394 breakpoint, and let all threads that are supposed to be running,
3395 run again.
3396
3397 On targets that don't support hardware single-step, we don't
3398 currently support full software single-stepping. Instead, we only
3399 support stepping over the thread event breakpoint, by asking the
3400 low target where to place a reinsert breakpoint. Since this
3401 routine assumes the breakpoint being stepped over is a thread event
3402 breakpoint, it usually assumes the return address of the current
3403 function is a good enough place to set the reinsert breakpoint. */
3404
3405static int
3406start_step_over (struct lwp_info *lwp)
3407{
3408 struct thread_info *saved_inferior;
3409 CORE_ADDR pc;
3410 int step;
3411
3412 if (debug_threads)
3413 fprintf (stderr,
3414 "Starting step-over on LWP %ld. Stopping all threads\n",
3415 lwpid_of (lwp));
3416
7984d532
PA
3417 stop_all_lwps (1, lwp);
3418 gdb_assert (lwp->suspended == 0);
d50171e4
PA
3419
3420 if (debug_threads)
3421 fprintf (stderr, "Done stopping all threads for step-over.\n");
3422
3423 /* Note, we should always reach here with an already adjusted PC,
3424 either by GDB (if we're resuming due to GDB's request), or by our
3425 caller, if we just finished handling an internal breakpoint GDB
3426 shouldn't care about. */
3427 pc = get_pc (lwp);
3428
3429 saved_inferior = current_inferior;
3430 current_inferior = get_lwp_thread (lwp);
3431
3432 lwp->bp_reinsert = pc;
3433 uninsert_breakpoints_at (pc);
fa593d66 3434 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
3435
3436 if (can_hardware_single_step ())
3437 {
3438 step = 1;
3439 }
3440 else
3441 {
3442 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3443 set_reinsert_breakpoint (raddr);
3444 step = 0;
3445 }
3446
3447 current_inferior = saved_inferior;
3448
3449 linux_resume_one_lwp (lwp, step, 0, NULL);
3450
3451 /* Require next event from this LWP. */
3452 step_over_bkpt = lwp->head.id;
3453 return 1;
3454}
3455
3456/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3457 start_step_over, if still there, and delete any reinsert
3458 breakpoints we've set, on non hardware single-step targets. */
3459
3460static int
3461finish_step_over (struct lwp_info *lwp)
3462{
3463 if (lwp->bp_reinsert != 0)
3464 {
3465 if (debug_threads)
3466 fprintf (stderr, "Finished step over.\n");
3467
3468 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3469 may be no breakpoint to reinsert there by now. */
3470 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 3471 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
3472
3473 lwp->bp_reinsert = 0;
3474
3475 /* Delete any software-single-step reinsert breakpoints. No
3476 longer needed. We don't have to worry about other threads
3477 hitting this trap, and later not being able to explain it,
3478 because we were stepping over a breakpoint, and we hold all
3479 threads but LWP stopped while doing that. */
3480 if (!can_hardware_single_step ())
3481 delete_reinsert_breakpoints ();
3482
3483 step_over_bkpt = null_ptid;
3484 return 1;
3485 }
3486 else
3487 return 0;
3488}
3489
5544ad89
DJ
3490/* This function is called once per thread. We check the thread's resume
3491 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 3492 stopped; and what signal, if any, it should be sent.
5544ad89 3493
bd99dc85
PA
3494 For threads which we aren't explicitly told otherwise, we preserve
3495 the stepping flag; this is used for stepping over gdbserver-placed
3496 breakpoints.
3497
3498 If pending_flags was set in any thread, we queue any needed
3499 signals, since we won't actually resume. We already have a pending
3500 event to report, so we don't need to preserve any step requests;
3501 they should be re-issued if necessary. */
3502
3503static int
3504linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 3505{
54a0b537 3506 struct lwp_info *lwp;
5544ad89 3507 struct thread_info *thread;
bd99dc85 3508 int step;
d50171e4
PA
3509 int leave_all_stopped = * (int *) arg;
3510 int leave_pending;
5544ad89
DJ
3511
3512 thread = (struct thread_info *) entry;
54a0b537 3513 lwp = get_thread_lwp (thread);
5544ad89 3514
2bd7c093 3515 if (lwp->resume == NULL)
bd99dc85 3516 return 0;
5544ad89 3517
bd99dc85 3518 if (lwp->resume->kind == resume_stop)
5544ad89 3519 {
bd99dc85 3520 if (debug_threads)
d50171e4 3521 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
bd99dc85
PA
3522
3523 if (!lwp->stopped)
3524 {
3525 if (debug_threads)
d50171e4 3526 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
bd99dc85 3527
d50171e4
PA
3528 /* Stop the thread, and wait for the event asynchronously,
3529 through the event loop. */
02fc4de7 3530 send_sigstop (lwp);
bd99dc85
PA
3531 }
3532 else
3533 {
3534 if (debug_threads)
d50171e4
PA
3535 fprintf (stderr, "already stopped LWP %ld\n",
3536 lwpid_of (lwp));
3537
3538 /* The LWP may have been stopped in an internal event that
3539 was not meant to be notified back to GDB (e.g., gdbserver
3540 breakpoint), so we should be reporting a stop event in
3541 this case too. */
3542
3543 /* If the thread already has a pending SIGSTOP, this is a
3544 no-op. Otherwise, something later will presumably resume
3545 the thread and this will cause it to cancel any pending
3546 operation, due to last_resume_kind == resume_stop. If
3547 the thread already has a pending status to report, we
3548 will still report it the next time we wait - see
3549 status_pending_p_callback. */
1a981360
PA
3550
3551 /* If we already have a pending signal to report, then
3552 there's no need to queue a SIGSTOP, as this means we're
3553 midway through moving the LWP out of the jumppad, and we
3554 will report the pending signal as soon as that is
3555 finished. */
3556 if (lwp->pending_signals_to_report == NULL)
3557 send_sigstop (lwp);
bd99dc85 3558 }
32ca6d61 3559
bd99dc85
PA
3560 /* For stop requests, we're done. */
3561 lwp->resume = NULL;
fc7238bb 3562 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3563 return 0;
5544ad89
DJ
3564 }
3565
bd99dc85
PA
3566 /* If this thread which is about to be resumed has a pending status,
3567 then don't resume any threads - we can just report the pending
3568 status. Make sure to queue any signals that would otherwise be
3569 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
3570 thread has a pending status. If there's a thread that needs the
3571 step-over-breakpoint dance, then don't resume any other thread
3572 but that particular one. */
3573 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 3574
d50171e4 3575 if (!leave_pending)
bd99dc85
PA
3576 {
3577 if (debug_threads)
3578 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
5544ad89 3579
d50171e4 3580 step = (lwp->resume->kind == resume_step);
2acc282a 3581 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
3582 }
3583 else
3584 {
3585 if (debug_threads)
3586 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
5544ad89 3587
bd99dc85
PA
3588 /* If we have a new signal, enqueue the signal. */
3589 if (lwp->resume->sig != 0)
3590 {
3591 struct pending_signals *p_sig;
3592 p_sig = xmalloc (sizeof (*p_sig));
3593 p_sig->prev = lwp->pending_signals;
3594 p_sig->signal = lwp->resume->sig;
3595 memset (&p_sig->info, 0, sizeof (siginfo_t));
3596
3597 /* If this is the same signal we were previously stopped by,
3598 make sure to queue its siginfo. We can ignore the return
3599 value of ptrace; if it fails, we'll skip
3600 PTRACE_SETSIGINFO. */
3601 if (WIFSTOPPED (lwp->last_status)
3602 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3603 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3604
3605 lwp->pending_signals = p_sig;
3606 }
3607 }
5544ad89 3608
fc7238bb 3609 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3610 lwp->resume = NULL;
5544ad89 3611 return 0;
0d62e5e8
DJ
3612}
3613
3614static void
2bd7c093 3615linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 3616{
2bd7c093 3617 struct thread_resume_array array = { resume_info, n };
d50171e4
PA
3618 struct lwp_info *need_step_over = NULL;
3619 int any_pending;
3620 int leave_all_stopped;
c6ecbae5 3621
2bd7c093 3622 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 3623
d50171e4
PA
3624 /* If there is a thread which would otherwise be resumed, which has
3625 a pending status, then don't resume any threads - we can just
3626 report the pending status. Make sure to queue any signals that
3627 would otherwise be sent. In non-stop mode, we'll apply this
3628 logic to each thread individually. We consume all pending events
3629 before considering to start a step-over (in all-stop). */
3630 any_pending = 0;
bd99dc85 3631 if (!non_stop)
d50171e4
PA
3632 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3633
3634 /* If there is a thread which would otherwise be resumed, which is
3635 stopped at a breakpoint that needs stepping over, then don't
3636 resume any threads - have it step over the breakpoint with all
3637 other threads stopped, then resume all threads again. Make sure
3638 to queue any signals that would otherwise be delivered or
3639 queued. */
3640 if (!any_pending && supports_breakpoints ())
3641 need_step_over
3642 = (struct lwp_info *) find_inferior (&all_lwps,
3643 need_step_over_p, NULL);
3644
3645 leave_all_stopped = (need_step_over != NULL || any_pending);
3646
3647 if (debug_threads)
3648 {
3649 if (need_step_over != NULL)
3650 fprintf (stderr, "Not resuming all, need step over\n");
3651 else if (any_pending)
3652 fprintf (stderr,
3653 "Not resuming, all-stop and found "
3654 "an LWP with pending status\n");
3655 else
3656 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3657 }
3658
3659 /* Even if we're leaving threads stopped, queue all signals we'd
3660 otherwise deliver. */
3661 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3662
3663 if (need_step_over)
3664 start_step_over (need_step_over);
3665}
3666
3667/* This function is called once per thread. We check the thread's
3668 last resume request, which will tell us whether to resume, step, or
3669 leave the thread stopped. Any signal the client requested to be
3670 delivered has already been enqueued at this point.
3671
3672 If any thread that GDB wants running is stopped at an internal
3673 breakpoint that needs stepping over, we start a step-over operation
3674 on that particular thread, and leave all others stopped. */
3675
7984d532
PA
3676static int
3677proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 3678{
7984d532 3679 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 3680 struct thread_info *thread;
d50171e4
PA
3681 int step;
3682
7984d532
PA
3683 if (lwp == except)
3684 return 0;
d50171e4
PA
3685
3686 if (debug_threads)
3687 fprintf (stderr,
3688 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3689
3690 if (!lwp->stopped)
3691 {
3692 if (debug_threads)
3693 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
7984d532 3694 return 0;
d50171e4
PA
3695 }
3696
8336d594
PA
3697 thread = get_lwp_thread (lwp);
3698
02fc4de7
PA
3699 if (thread->last_resume_kind == resume_stop
3700 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
3701 {
3702 if (debug_threads)
02fc4de7
PA
3703 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3704 lwpid_of (lwp));
7984d532 3705 return 0;
d50171e4
PA
3706 }
3707
3708 if (lwp->status_pending_p)
3709 {
3710 if (debug_threads)
3711 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3712 lwpid_of (lwp));
7984d532 3713 return 0;
d50171e4
PA
3714 }
3715
7984d532
PA
3716 gdb_assert (lwp->suspended >= 0);
3717
d50171e4
PA
3718 if (lwp->suspended)
3719 {
3720 if (debug_threads)
3721 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
7984d532 3722 return 0;
d50171e4
PA
3723 }
3724
1a981360
PA
3725 if (thread->last_resume_kind == resume_stop
3726 && lwp->pending_signals_to_report == NULL
3727 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
3728 {
3729 /* We haven't reported this LWP as stopped yet (otherwise, the
3730 last_status.kind check above would catch it, and we wouldn't
3731 reach here. This LWP may have been momentarily paused by a
3732 stop_all_lwps call while handling for example, another LWP's
3733 step-over. In that case, the pending expected SIGSTOP signal
3734 that was queued at vCont;t handling time will have already
3735 been consumed by wait_for_sigstop, and so we need to requeue
3736 another one here. Note that if the LWP already has a SIGSTOP
3737 pending, this is a no-op. */
3738
3739 if (debug_threads)
3740 fprintf (stderr,
3741 "Client wants LWP %ld to stop. "
3742 "Making sure it has a SIGSTOP pending\n",
3743 lwpid_of (lwp));
3744
3745 send_sigstop (lwp);
3746 }
3747
8336d594 3748 step = thread->last_resume_kind == resume_step;
d50171e4 3749 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
3750 return 0;
3751}
3752
3753static int
3754unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3755{
3756 struct lwp_info *lwp = (struct lwp_info *) entry;
3757
3758 if (lwp == except)
3759 return 0;
3760
3761 lwp->suspended--;
3762 gdb_assert (lwp->suspended >= 0);
3763
3764 return proceed_one_lwp (entry, except);
d50171e4
PA
3765}
3766
3767/* When we finish a step-over, set threads running again. If there's
3768 another thread that may need a step-over, now's the time to start
3769 it. Eventually, we'll move all threads past their breakpoints. */
3770
3771static void
3772proceed_all_lwps (void)
3773{
3774 struct lwp_info *need_step_over;
3775
3776 /* If there is a thread which would otherwise be resumed, which is
3777 stopped at a breakpoint that needs stepping over, then don't
3778 resume any threads - have it step over the breakpoint with all
3779 other threads stopped, then resume all threads again. */
3780
3781 if (supports_breakpoints ())
3782 {
3783 need_step_over
3784 = (struct lwp_info *) find_inferior (&all_lwps,
3785 need_step_over_p, NULL);
3786
3787 if (need_step_over != NULL)
3788 {
3789 if (debug_threads)
3790 fprintf (stderr, "proceed_all_lwps: found "
3791 "thread %ld needing a step-over\n",
3792 lwpid_of (need_step_over));
3793
3794 start_step_over (need_step_over);
3795 return;
3796 }
3797 }
5544ad89 3798
d50171e4
PA
3799 if (debug_threads)
3800 fprintf (stderr, "Proceeding, no step-over needed\n");
3801
7984d532 3802 find_inferior (&all_lwps, proceed_one_lwp, NULL);
d50171e4
PA
3803}
3804
3805/* Stopped LWPs that the client wanted to be running, that don't have
3806 pending statuses, are set to run again, except for EXCEPT, if not
3807 NULL. This undoes a stop_all_lwps call. */
3808
3809static void
7984d532 3810unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 3811{
5544ad89
DJ
3812 if (debug_threads)
3813 {
d50171e4
PA
3814 if (except)
3815 fprintf (stderr,
3816 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
5544ad89 3817 else
d50171e4
PA
3818 fprintf (stderr,
3819 "unstopping all lwps\n");
5544ad89
DJ
3820 }
3821
7984d532
PA
3822 if (unsuspend)
3823 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3824 else
3825 find_inferior (&all_lwps, proceed_one_lwp, except);
0d62e5e8
DJ
3826}
3827
58caa3dc
DJ
3828
3829#ifdef HAVE_LINUX_REGSETS
3830
1faeff08
MR
3831#define use_linux_regsets 1
3832
58caa3dc 3833static int
442ea881 3834regsets_fetch_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3835{
3836 struct regset_info *regset;
e9d25b98 3837 int saw_general_regs = 0;
95954743 3838 int pid;
1570b33e 3839 struct iovec iov;
58caa3dc
DJ
3840
3841 regset = target_regsets;
3842
95954743 3843 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3844 while (regset->size >= 0)
3845 {
1570b33e
L
3846 void *buf, *data;
3847 int nt_type, res;
58caa3dc 3848
52fa2412 3849 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3850 {
3851 regset ++;
3852 continue;
3853 }
3854
bca929d3 3855 buf = xmalloc (regset->size);
1570b33e
L
3856
3857 nt_type = regset->nt_type;
3858 if (nt_type)
3859 {
3860 iov.iov_base = buf;
3861 iov.iov_len = regset->size;
3862 data = (void *) &iov;
3863 }
3864 else
3865 data = buf;
3866
dfb64f85 3867#ifndef __sparc__
1570b33e 3868 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3869#else
1570b33e 3870 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 3871#endif
58caa3dc
DJ
3872 if (res < 0)
3873 {
3874 if (errno == EIO)
3875 {
52fa2412
UW
3876 /* If we get EIO on a regset, do not try it again for
3877 this process. */
3878 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3879 free (buf);
52fa2412 3880 continue;
58caa3dc
DJ
3881 }
3882 else
3883 {
0d62e5e8 3884 char s[256];
95954743
PA
3885 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3886 pid);
0d62e5e8 3887 perror (s);
58caa3dc
DJ
3888 }
3889 }
e9d25b98
DJ
3890 else if (regset->type == GENERAL_REGS)
3891 saw_general_regs = 1;
442ea881 3892 regset->store_function (regcache, buf);
58caa3dc 3893 regset ++;
fdeb2a12 3894 free (buf);
58caa3dc 3895 }
e9d25b98
DJ
3896 if (saw_general_regs)
3897 return 0;
3898 else
3899 return 1;
58caa3dc
DJ
3900}
3901
3902static int
442ea881 3903regsets_store_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3904{
3905 struct regset_info *regset;
e9d25b98 3906 int saw_general_regs = 0;
95954743 3907 int pid;
1570b33e 3908 struct iovec iov;
58caa3dc
DJ
3909
3910 regset = target_regsets;
3911
95954743 3912 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3913 while (regset->size >= 0)
3914 {
1570b33e
L
3915 void *buf, *data;
3916 int nt_type, res;
58caa3dc 3917
52fa2412 3918 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3919 {
3920 regset ++;
3921 continue;
3922 }
3923
bca929d3 3924 buf = xmalloc (regset->size);
545587ee
DJ
3925
3926 /* First fill the buffer with the current register set contents,
3927 in case there are any items in the kernel's regset that are
3928 not in gdbserver's regcache. */
1570b33e
L
3929
3930 nt_type = regset->nt_type;
3931 if (nt_type)
3932 {
3933 iov.iov_base = buf;
3934 iov.iov_len = regset->size;
3935 data = (void *) &iov;
3936 }
3937 else
3938 data = buf;
3939
dfb64f85 3940#ifndef __sparc__
1570b33e 3941 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3942#else
1570b33e 3943 res = ptrace (regset->get_request, pid, &iov, data);
dfb64f85 3944#endif
545587ee
DJ
3945
3946 if (res == 0)
3947 {
3948 /* Then overlay our cached registers on that. */
442ea881 3949 regset->fill_function (regcache, buf);
545587ee
DJ
3950
3951 /* Only now do we write the register set. */
dfb64f85 3952#ifndef __sparc__
1570b33e 3953 res = ptrace (regset->set_request, pid, nt_type, data);
dfb64f85 3954#else
1570b33e 3955 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 3956#endif
545587ee
DJ
3957 }
3958
58caa3dc
DJ
3959 if (res < 0)
3960 {
3961 if (errno == EIO)
3962 {
52fa2412
UW
3963 /* If we get EIO on a regset, do not try it again for
3964 this process. */
3965 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3966 free (buf);
52fa2412 3967 continue;
58caa3dc 3968 }
3221518c
UW
3969 else if (errno == ESRCH)
3970 {
1b3f6016
PA
3971 /* At this point, ESRCH should mean the process is
3972 already gone, in which case we simply ignore attempts
3973 to change its registers. See also the related
3974 comment in linux_resume_one_lwp. */
fdeb2a12 3975 free (buf);
3221518c
UW
3976 return 0;
3977 }
58caa3dc
DJ
3978 else
3979 {
ce3a066d 3980 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
3981 }
3982 }
e9d25b98
DJ
3983 else if (regset->type == GENERAL_REGS)
3984 saw_general_regs = 1;
58caa3dc 3985 regset ++;
09ec9b38 3986 free (buf);
58caa3dc 3987 }
e9d25b98
DJ
3988 if (saw_general_regs)
3989 return 0;
3990 else
3991 return 1;
58caa3dc
DJ
3992}
3993
1faeff08 3994#else /* !HAVE_LINUX_REGSETS */
58caa3dc 3995
1faeff08
MR
3996#define use_linux_regsets 0
3997#define regsets_fetch_inferior_registers(regcache) 1
3998#define regsets_store_inferior_registers(regcache) 1
58caa3dc 3999
58caa3dc 4000#endif
1faeff08
MR
4001
4002/* Return 1 if register REGNO is supported by one of the regset ptrace
4003 calls or 0 if it has to be transferred individually. */
4004
4005static int
4006linux_register_in_regsets (int regno)
4007{
4008 unsigned char mask = 1 << (regno % 8);
4009 size_t index = regno / 8;
4010
4011 return (use_linux_regsets
4012 && (the_low_target.regset_bitmap == NULL
4013 || (the_low_target.regset_bitmap[index] & mask) != 0));
4014}
4015
58caa3dc 4016#ifdef HAVE_LINUX_USRREGS
1faeff08
MR
4017
4018int
4019register_addr (int regnum)
4020{
4021 int addr;
4022
4023 if (regnum < 0 || regnum >= the_low_target.num_regs)
4024 error ("Invalid register number %d.", regnum);
4025
4026 addr = the_low_target.regmap[regnum];
4027
4028 return addr;
4029}
4030
4031/* Fetch one register. */
4032static void
4033fetch_register (struct regcache *regcache, int regno)
4034{
4035 CORE_ADDR regaddr;
4036 int i, size;
4037 char *buf;
4038 int pid;
4039
4040 if (regno >= the_low_target.num_regs)
4041 return;
4042 if ((*the_low_target.cannot_fetch_register) (regno))
4043 return;
4044
4045 regaddr = register_addr (regno);
4046 if (regaddr == -1)
4047 return;
4048
4049 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4050 & -sizeof (PTRACE_XFER_TYPE));
4051 buf = alloca (size);
4052
4053 pid = lwpid_of (get_thread_lwp (current_inferior));
4054 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4055 {
4056 errno = 0;
4057 *(PTRACE_XFER_TYPE *) (buf + i) =
4058 ptrace (PTRACE_PEEKUSER, pid,
4059 /* Coerce to a uintptr_t first to avoid potential gcc warning
4060 of coercing an 8 byte integer to a 4 byte pointer. */
4061 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
4062 regaddr += sizeof (PTRACE_XFER_TYPE);
4063 if (errno != 0)
4064 error ("reading register %d: %s", regno, strerror (errno));
4065 }
4066
4067 if (the_low_target.supply_ptrace_register)
4068 the_low_target.supply_ptrace_register (regcache, regno, buf);
4069 else
4070 supply_register (regcache, regno, buf);
4071}
4072
4073/* Store one register. */
4074static void
4075store_register (struct regcache *regcache, int regno)
4076{
4077 CORE_ADDR regaddr;
4078 int i, size;
4079 char *buf;
4080 int pid;
4081
4082 if (regno >= the_low_target.num_regs)
4083 return;
4084 if ((*the_low_target.cannot_store_register) (regno))
4085 return;
4086
4087 regaddr = register_addr (regno);
4088 if (regaddr == -1)
4089 return;
4090
4091 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4092 & -sizeof (PTRACE_XFER_TYPE));
4093 buf = alloca (size);
4094 memset (buf, 0, size);
4095
4096 if (the_low_target.collect_ptrace_register)
4097 the_low_target.collect_ptrace_register (regcache, regno, buf);
4098 else
4099 collect_register (regcache, regno, buf);
4100
4101 pid = lwpid_of (get_thread_lwp (current_inferior));
4102 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4103 {
4104 errno = 0;
4105 ptrace (PTRACE_POKEUSER, pid,
4106 /* Coerce to a uintptr_t first to avoid potential gcc warning
4107 about coercing an 8 byte integer to a 4 byte pointer. */
4108 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
4109 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
4110 if (errno != 0)
4111 {
4112 /* At this point, ESRCH should mean the process is
4113 already gone, in which case we simply ignore attempts
4114 to change its registers. See also the related
4115 comment in linux_resume_one_lwp. */
4116 if (errno == ESRCH)
4117 return;
4118
4119 if ((*the_low_target.cannot_store_register) (regno) == 0)
4120 error ("writing register %d: %s", regno, strerror (errno));
4121 }
4122 regaddr += sizeof (PTRACE_XFER_TYPE);
4123 }
4124}
4125
4126/* Fetch all registers, or just one, from the child process.
4127 If REGNO is -1, do this for all registers, skipping any that are
4128 assumed to have been retrieved by regsets_fetch_inferior_registers,
4129 unless ALL is non-zero.
4130 Otherwise, REGNO specifies which register (so we can save time). */
4131static void
4132usr_fetch_inferior_registers (struct regcache *regcache, int regno, int all)
4133{
4134 if (regno == -1)
4135 {
4136 for (regno = 0; regno < the_low_target.num_regs; regno++)
4137 if (all || !linux_register_in_regsets (regno))
4138 fetch_register (regcache, regno);
4139 }
4140 else
4141 fetch_register (regcache, regno);
4142}
4143
4144/* Store our register values back into the inferior.
4145 If REGNO is -1, do this for all registers, skipping any that are
4146 assumed to have been saved by regsets_store_inferior_registers,
4147 unless ALL is non-zero.
4148 Otherwise, REGNO specifies which register (so we can save time). */
4149static void
4150usr_store_inferior_registers (struct regcache *regcache, int regno, int all)
4151{
4152 if (regno == -1)
4153 {
4154 for (regno = 0; regno < the_low_target.num_regs; regno++)
4155 if (all || !linux_register_in_regsets (regno))
4156 store_register (regcache, regno);
4157 }
4158 else
4159 store_register (regcache, regno);
4160}
4161
4162#else /* !HAVE_LINUX_USRREGS */
4163
4164#define usr_fetch_inferior_registers(regcache, regno, all) do {} while (0)
4165#define usr_store_inferior_registers(regcache, regno, all) do {} while (0)
4166
58caa3dc 4167#endif
1faeff08
MR
4168
4169
4170void
4171linux_fetch_registers (struct regcache *regcache, int regno)
4172{
4173 int use_regsets;
4174 int all = 0;
4175
4176 if (regno == -1)
4177 {
4178 all = regsets_fetch_inferior_registers (regcache);
4179 usr_fetch_inferior_registers (regcache, regno, all);
4180 }
4181 else
4182 {
4183 use_regsets = linux_register_in_regsets (regno);
4184 if (use_regsets)
4185 all = regsets_fetch_inferior_registers (regcache);
4186 if (!use_regsets || all)
4187 usr_fetch_inferior_registers (regcache, regno, 1);
4188 }
58caa3dc
DJ
4189}
4190
4191void
442ea881 4192linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 4193{
1faeff08
MR
4194 int use_regsets;
4195 int all = 0;
4196
4197 if (regno == -1)
4198 {
4199 all = regsets_store_inferior_registers (regcache);
4200 usr_store_inferior_registers (regcache, regno, all);
4201 }
4202 else
4203 {
4204 use_regsets = linux_register_in_regsets (regno);
4205 if (use_regsets)
4206 all = regsets_store_inferior_registers (regcache);
4207 if (!use_regsets || all)
4208 usr_store_inferior_registers (regcache, regno, 1);
4209 }
58caa3dc
DJ
4210}
4211
da6d8c04 4212
da6d8c04
DJ
4213/* Copy LEN bytes from inferior's memory starting at MEMADDR
4214 to debugger memory starting at MYADDR. */
4215
c3e735a6 4216static int
f450004a 4217linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04
DJ
4218{
4219 register int i;
4220 /* Round starting address down to longword boundary. */
4221 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4222 /* Round ending address up; get number of longwords that makes. */
aa691b87
RM
4223 register int count
4224 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
da6d8c04
DJ
4225 / sizeof (PTRACE_XFER_TYPE);
4226 /* Allocate buffer of that many longwords. */
aa691b87 4227 register PTRACE_XFER_TYPE *buffer
da6d8c04 4228 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
fd462a61
DJ
4229 int fd;
4230 char filename[64];
95954743 4231 int pid = lwpid_of (get_thread_lwp (current_inferior));
fd462a61
DJ
4232
4233 /* Try using /proc. Don't bother for one word. */
4234 if (len >= 3 * sizeof (long))
4235 {
4236 /* We could keep this file open and cache it - possibly one per
4237 thread. That requires some juggling, but is even faster. */
95954743 4238 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
4239 fd = open (filename, O_RDONLY | O_LARGEFILE);
4240 if (fd == -1)
4241 goto no_proc;
4242
4243 /* If pread64 is available, use it. It's faster if the kernel
4244 supports it (only one syscall), and it's 64-bit safe even on
4245 32-bit platforms (for instance, SPARC debugging a SPARC64
4246 application). */
4247#ifdef HAVE_PREAD64
4248 if (pread64 (fd, myaddr, len, memaddr) != len)
4249#else
1de1badb 4250 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
fd462a61
DJ
4251#endif
4252 {
4253 close (fd);
4254 goto no_proc;
4255 }
4256
4257 close (fd);
4258 return 0;
4259 }
da6d8c04 4260
fd462a61 4261 no_proc:
da6d8c04
DJ
4262 /* Read all the longwords */
4263 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4264 {
c3e735a6 4265 errno = 0;
14ce3065
DE
4266 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4267 about coercing an 8 byte integer to a 4 byte pointer. */
4268 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4269 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
c3e735a6
DJ
4270 if (errno)
4271 return errno;
da6d8c04
DJ
4272 }
4273
4274 /* Copy appropriate bytes out of the buffer. */
1b3f6016
PA
4275 memcpy (myaddr,
4276 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4277 len);
c3e735a6
DJ
4278
4279 return 0;
da6d8c04
DJ
4280}
4281
93ae6fdc
PA
4282/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4283 memory at MEMADDR. On failure (cannot write to the inferior)
da6d8c04
DJ
4284 returns the value of errno. */
4285
ce3a066d 4286static int
f450004a 4287linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
4288{
4289 register int i;
4290 /* Round starting address down to longword boundary. */
4291 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4292 /* Round ending address up; get number of longwords that makes. */
4293 register int count
493e2a69
MS
4294 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4295 / sizeof (PTRACE_XFER_TYPE);
4296
da6d8c04 4297 /* Allocate buffer of that many longwords. */
493e2a69
MS
4298 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4299 alloca (count * sizeof (PTRACE_XFER_TYPE));
4300
95954743 4301 int pid = lwpid_of (get_thread_lwp (current_inferior));
da6d8c04 4302
0d62e5e8
DJ
4303 if (debug_threads)
4304 {
58d6951d
DJ
4305 /* Dump up to four bytes. */
4306 unsigned int val = * (unsigned int *) myaddr;
4307 if (len == 1)
4308 val = val & 0xff;
4309 else if (len == 2)
4310 val = val & 0xffff;
4311 else if (len == 3)
4312 val = val & 0xffffff;
4313 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4314 val, (long)memaddr);
0d62e5e8
DJ
4315 }
4316
da6d8c04
DJ
4317 /* Fill start and end extra bytes of buffer with existing memory data. */
4318
93ae6fdc 4319 errno = 0;
14ce3065
DE
4320 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4321 about coercing an 8 byte integer to a 4 byte pointer. */
4322 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4323 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
93ae6fdc
PA
4324 if (errno)
4325 return errno;
da6d8c04
DJ
4326
4327 if (count > 1)
4328 {
93ae6fdc 4329 errno = 0;
da6d8c04 4330 buffer[count - 1]
95954743 4331 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
4332 /* Coerce to a uintptr_t first to avoid potential gcc warning
4333 about coercing an 8 byte integer to a 4 byte pointer. */
4334 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4335 * sizeof (PTRACE_XFER_TYPE)),
d844cde6 4336 0);
93ae6fdc
PA
4337 if (errno)
4338 return errno;
da6d8c04
DJ
4339 }
4340
93ae6fdc 4341 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 4342
493e2a69
MS
4343 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4344 myaddr, len);
da6d8c04
DJ
4345
4346 /* Write the entire buffer. */
4347
4348 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4349 {
4350 errno = 0;
14ce3065
DE
4351 ptrace (PTRACE_POKETEXT, pid,
4352 /* Coerce to a uintptr_t first to avoid potential gcc warning
4353 about coercing an 8 byte integer to a 4 byte pointer. */
4354 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4355 (PTRACE_ARG4_TYPE) buffer[i]);
da6d8c04
DJ
4356 if (errno)
4357 return errno;
4358 }
4359
4360 return 0;
4361}
2f2893d9 4362
6076632b 4363/* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
24a09b5f
DJ
4364static int linux_supports_tracefork_flag;
4365
1e7fc18c
PA
4366static void
4367linux_enable_event_reporting (int pid)
4368{
4369 if (!linux_supports_tracefork_flag)
4370 return;
4371
4372 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4373}
4374
51c2684e 4375/* Helper functions for linux_test_for_tracefork, called via clone (). */
24a09b5f 4376
51c2684e
DJ
4377static int
4378linux_tracefork_grandchild (void *arg)
4379{
4380 _exit (0);
4381}
4382
7407e2de
AS
4383#define STACK_SIZE 4096
4384
51c2684e
DJ
4385static int
4386linux_tracefork_child (void *arg)
24a09b5f
DJ
4387{
4388 ptrace (PTRACE_TRACEME, 0, 0, 0);
4389 kill (getpid (), SIGSTOP);
e4b7f41c
JK
4390
4391#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4392
4393 if (fork () == 0)
4394 linux_tracefork_grandchild (NULL);
4395
4396#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4397
7407e2de
AS
4398#ifdef __ia64__
4399 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4400 CLONE_VM | SIGCHLD, NULL);
4401#else
a1f2ce7d 4402 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
7407e2de
AS
4403 CLONE_VM | SIGCHLD, NULL);
4404#endif
e4b7f41c
JK
4405
4406#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4407
24a09b5f
DJ
4408 _exit (0);
4409}
4410
24a09b5f
DJ
4411/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4412 sure that we can enable the option, and that it had the desired
4413 effect. */
4414
4415static void
4416linux_test_for_tracefork (void)
4417{
4418 int child_pid, ret, status;
4419 long second_pid;
e4b7f41c 4420#if defined(__UCLIBC__) && defined(HAS_NOMMU)
bca929d3 4421 char *stack = xmalloc (STACK_SIZE * 4);
e4b7f41c 4422#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
4423
4424 linux_supports_tracefork_flag = 0;
4425
e4b7f41c
JK
4426#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4427
4428 child_pid = fork ();
4429 if (child_pid == 0)
4430 linux_tracefork_child (NULL);
4431
4432#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4433
51c2684e 4434 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
7407e2de
AS
4435#ifdef __ia64__
4436 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4437 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c 4438#else /* !__ia64__ */
7407e2de
AS
4439 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4440 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c
JK
4441#endif /* !__ia64__ */
4442
4443#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4444
24a09b5f 4445 if (child_pid == -1)
51c2684e 4446 perror_with_name ("clone");
24a09b5f
DJ
4447
4448 ret = my_waitpid (child_pid, &status, 0);
4449 if (ret == -1)
4450 perror_with_name ("waitpid");
4451 else if (ret != child_pid)
4452 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4453 if (! WIFSTOPPED (status))
4454 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4455
14ce3065
DE
4456 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4457 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
24a09b5f
DJ
4458 if (ret != 0)
4459 {
4460 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4461 if (ret != 0)
4462 {
4463 warning ("linux_test_for_tracefork: failed to kill child");
4464 return;
4465 }
4466
4467 ret = my_waitpid (child_pid, &status, 0);
4468 if (ret != child_pid)
4469 warning ("linux_test_for_tracefork: failed to wait for killed child");
4470 else if (!WIFSIGNALED (status))
4471 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4472 "killed child", status);
4473
4474 return;
4475 }
4476
4477 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4478 if (ret != 0)
4479 warning ("linux_test_for_tracefork: failed to resume child");
4480
4481 ret = my_waitpid (child_pid, &status, 0);
4482
4483 if (ret == child_pid && WIFSTOPPED (status)
4484 && status >> 16 == PTRACE_EVENT_FORK)
4485 {
4486 second_pid = 0;
4487 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4488 if (ret == 0 && second_pid != 0)
4489 {
4490 int second_status;
4491
4492 linux_supports_tracefork_flag = 1;
4493 my_waitpid (second_pid, &second_status, 0);
4494 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4495 if (ret != 0)
4496 warning ("linux_test_for_tracefork: failed to kill second child");
4497 my_waitpid (second_pid, &status, 0);
4498 }
4499 }
4500 else
4501 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4502 "(%d, status 0x%x)", ret, status);
4503
4504 do
4505 {
4506 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4507 if (ret != 0)
4508 warning ("linux_test_for_tracefork: failed to kill child");
4509 my_waitpid (child_pid, &status, 0);
4510 }
4511 while (WIFSTOPPED (status));
51c2684e 4512
e4b7f41c 4513#if defined(__UCLIBC__) && defined(HAS_NOMMU)
51c2684e 4514 free (stack);
e4b7f41c 4515#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
4516}
4517
4518
2f2893d9
DJ
4519static void
4520linux_look_up_symbols (void)
4521{
0d62e5e8 4522#ifdef USE_THREAD_DB
95954743
PA
4523 struct process_info *proc = current_process ();
4524
cdbfd419 4525 if (proc->private->thread_db != NULL)
0d62e5e8
DJ
4526 return;
4527
6076632b
DE
4528 /* If the kernel supports tracing forks then it also supports tracing
4529 clones, and then we don't need to use the magic thread event breakpoint
4530 to learn about threads. */
cdbfd419 4531 thread_db_init (!linux_supports_tracefork_flag);
0d62e5e8
DJ
4532#endif
4533}
4534
e5379b03 4535static void
ef57601b 4536linux_request_interrupt (void)
e5379b03 4537{
a1928bad 4538 extern unsigned long signal_pid;
e5379b03 4539
95954743
PA
4540 if (!ptid_equal (cont_thread, null_ptid)
4541 && !ptid_equal (cont_thread, minus_one_ptid))
e5379b03 4542 {
54a0b537 4543 struct lwp_info *lwp;
bd99dc85 4544 int lwpid;
e5379b03 4545
54a0b537 4546 lwp = get_thread_lwp (current_inferior);
bd99dc85
PA
4547 lwpid = lwpid_of (lwp);
4548 kill_lwp (lwpid, SIGINT);
e5379b03
DJ
4549 }
4550 else
ef57601b 4551 kill_lwp (signal_pid, SIGINT);
e5379b03
DJ
4552}
4553
aa691b87
RM
4554/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4555 to debugger memory starting at MYADDR. */
4556
4557static int
f450004a 4558linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
4559{
4560 char filename[PATH_MAX];
4561 int fd, n;
95954743 4562 int pid = lwpid_of (get_thread_lwp (current_inferior));
aa691b87 4563
6cebaf6e 4564 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
4565
4566 fd = open (filename, O_RDONLY);
4567 if (fd < 0)
4568 return -1;
4569
4570 if (offset != (CORE_ADDR) 0
4571 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4572 n = -1;
4573 else
4574 n = read (fd, myaddr, len);
4575
4576 close (fd);
4577
4578 return n;
4579}
4580
d993e290
PA
4581/* These breakpoint and watchpoint related wrapper functions simply
4582 pass on the function call if the target has registered a
4583 corresponding function. */
e013ee27
OF
4584
4585static int
d993e290 4586linux_insert_point (char type, CORE_ADDR addr, int len)
e013ee27 4587{
d993e290
PA
4588 if (the_low_target.insert_point != NULL)
4589 return the_low_target.insert_point (type, addr, len);
e013ee27
OF
4590 else
4591 /* Unsupported (see target.h). */
4592 return 1;
4593}
4594
4595static int
d993e290 4596linux_remove_point (char type, CORE_ADDR addr, int len)
e013ee27 4597{
d993e290
PA
4598 if (the_low_target.remove_point != NULL)
4599 return the_low_target.remove_point (type, addr, len);
e013ee27
OF
4600 else
4601 /* Unsupported (see target.h). */
4602 return 1;
4603}
4604
4605static int
4606linux_stopped_by_watchpoint (void)
4607{
c3adc08c
PA
4608 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4609
4610 return lwp->stopped_by_watchpoint;
e013ee27
OF
4611}
4612
4613static CORE_ADDR
4614linux_stopped_data_address (void)
4615{
c3adc08c
PA
4616 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4617
4618 return lwp->stopped_data_address;
e013ee27
OF
4619}
4620
42c81e2a 4621#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
4622#if defined(__mcoldfire__)
4623/* These should really be defined in the kernel's ptrace.h header. */
4624#define PT_TEXT_ADDR 49*4
4625#define PT_DATA_ADDR 50*4
4626#define PT_TEXT_END_ADDR 51*4
eb826dc6
MF
4627#elif defined(BFIN)
4628#define PT_TEXT_ADDR 220
4629#define PT_TEXT_END_ADDR 224
4630#define PT_DATA_ADDR 228
58dbd541
YQ
4631#elif defined(__TMS320C6X__)
4632#define PT_TEXT_ADDR (0x10000*4)
4633#define PT_DATA_ADDR (0x10004*4)
4634#define PT_TEXT_END_ADDR (0x10008*4)
52fb6437
NS
4635#endif
4636
4637/* Under uClinux, programs are loaded at non-zero offsets, which we need
4638 to tell gdb about. */
4639
4640static int
4641linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4642{
4643#if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4644 unsigned long text, text_end, data;
bd99dc85 4645 int pid = lwpid_of (get_thread_lwp (current_inferior));
52fb6437
NS
4646
4647 errno = 0;
4648
4649 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4650 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4651 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4652
4653 if (errno == 0)
4654 {
4655 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
4656 used by gdb) are relative to the beginning of the program,
4657 with the data segment immediately following the text segment.
4658 However, the actual runtime layout in memory may put the data
4659 somewhere else, so when we send gdb a data base-address, we
4660 use the real data base address and subtract the compile-time
4661 data base-address from it (which is just the length of the
4662 text segment). BSS immediately follows data in both
4663 cases. */
52fb6437
NS
4664 *text_p = text;
4665 *data_p = data - (text_end - text);
1b3f6016 4666
52fb6437
NS
4667 return 1;
4668 }
4669#endif
4670 return 0;
4671}
4672#endif
4673
07e059b5
VP
4674static int
4675linux_qxfer_osdata (const char *annex,
1b3f6016
PA
4676 unsigned char *readbuf, unsigned const char *writebuf,
4677 CORE_ADDR offset, int len)
07e059b5 4678{
d26e3629 4679 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
4680}
4681
d0722149
DE
4682/* Convert a native/host siginfo object, into/from the siginfo in the
4683 layout of the inferiors' architecture. */
4684
4685static void
4686siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4687{
4688 int done = 0;
4689
4690 if (the_low_target.siginfo_fixup != NULL)
4691 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4692
4693 /* If there was no callback, or the callback didn't do anything,
4694 then just do a straight memcpy. */
4695 if (!done)
4696 {
4697 if (direction == 1)
4698 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4699 else
4700 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4701 }
4702}
4703
4aa995e1
PA
4704static int
4705linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4706 unsigned const char *writebuf, CORE_ADDR offset, int len)
4707{
d0722149 4708 int pid;
4aa995e1 4709 struct siginfo siginfo;
d0722149 4710 char inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
4711
4712 if (current_inferior == NULL)
4713 return -1;
4714
bd99dc85 4715 pid = lwpid_of (get_thread_lwp (current_inferior));
4aa995e1
PA
4716
4717 if (debug_threads)
d0722149 4718 fprintf (stderr, "%s siginfo for lwp %d.\n",
4aa995e1
PA
4719 readbuf != NULL ? "Reading" : "Writing",
4720 pid);
4721
0adea5f7 4722 if (offset >= sizeof (siginfo))
4aa995e1
PA
4723 return -1;
4724
4725 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4726 return -1;
4727
d0722149
DE
4728 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4729 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4730 inferior with a 64-bit GDBSERVER should look the same as debugging it
4731 with a 32-bit GDBSERVER, we need to convert it. */
4732 siginfo_fixup (&siginfo, inf_siginfo, 0);
4733
4aa995e1
PA
4734 if (offset + len > sizeof (siginfo))
4735 len = sizeof (siginfo) - offset;
4736
4737 if (readbuf != NULL)
d0722149 4738 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4739 else
4740 {
d0722149
DE
4741 memcpy (inf_siginfo + offset, writebuf, len);
4742
4743 /* Convert back to ptrace layout before flushing it out. */
4744 siginfo_fixup (&siginfo, inf_siginfo, 1);
4745
4aa995e1
PA
4746 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4747 return -1;
4748 }
4749
4750 return len;
4751}
4752
bd99dc85
PA
4753/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4754 so we notice when children change state; as the handler for the
4755 sigsuspend in my_waitpid. */
4756
4757static void
4758sigchld_handler (int signo)
4759{
4760 int old_errno = errno;
4761
4762 if (debug_threads)
e581f2b4
PA
4763 {
4764 do
4765 {
4766 /* fprintf is not async-signal-safe, so call write
4767 directly. */
4768 if (write (2, "sigchld_handler\n",
4769 sizeof ("sigchld_handler\n") - 1) < 0)
4770 break; /* just ignore */
4771 } while (0);
4772 }
bd99dc85
PA
4773
4774 if (target_is_async_p ())
4775 async_file_mark (); /* trigger a linux_wait */
4776
4777 errno = old_errno;
4778}
4779
4780static int
4781linux_supports_non_stop (void)
4782{
4783 return 1;
4784}
4785
4786static int
4787linux_async (int enable)
4788{
4789 int previous = (linux_event_pipe[0] != -1);
4790
8336d594
PA
4791 if (debug_threads)
4792 fprintf (stderr, "linux_async (%d), previous=%d\n",
4793 enable, previous);
4794
bd99dc85
PA
4795 if (previous != enable)
4796 {
4797 sigset_t mask;
4798 sigemptyset (&mask);
4799 sigaddset (&mask, SIGCHLD);
4800
4801 sigprocmask (SIG_BLOCK, &mask, NULL);
4802
4803 if (enable)
4804 {
4805 if (pipe (linux_event_pipe) == -1)
4806 fatal ("creating event pipe failed.");
4807
4808 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4809 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4810
4811 /* Register the event loop handler. */
4812 add_file_handler (linux_event_pipe[0],
4813 handle_target_event, NULL);
4814
4815 /* Always trigger a linux_wait. */
4816 async_file_mark ();
4817 }
4818 else
4819 {
4820 delete_file_handler (linux_event_pipe[0]);
4821
4822 close (linux_event_pipe[0]);
4823 close (linux_event_pipe[1]);
4824 linux_event_pipe[0] = -1;
4825 linux_event_pipe[1] = -1;
4826 }
4827
4828 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4829 }
4830
4831 return previous;
4832}
4833
4834static int
4835linux_start_non_stop (int nonstop)
4836{
4837 /* Register or unregister from event-loop accordingly. */
4838 linux_async (nonstop);
4839 return 0;
4840}
4841
cf8fd78b
PA
4842static int
4843linux_supports_multi_process (void)
4844{
4845 return 1;
4846}
4847
03583c20
UW
4848static int
4849linux_supports_disable_randomization (void)
4850{
4851#ifdef HAVE_PERSONALITY
4852 return 1;
4853#else
4854 return 0;
4855#endif
4856}
efcbbd14 4857
d1feda86
YQ
4858static int
4859linux_supports_agent (void)
4860{
4861 return 1;
4862}
4863
efcbbd14
UW
4864/* Enumerate spufs IDs for process PID. */
4865static int
4866spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4867{
4868 int pos = 0;
4869 int written = 0;
4870 char path[128];
4871 DIR *dir;
4872 struct dirent *entry;
4873
4874 sprintf (path, "/proc/%ld/fd", pid);
4875 dir = opendir (path);
4876 if (!dir)
4877 return -1;
4878
4879 rewinddir (dir);
4880 while ((entry = readdir (dir)) != NULL)
4881 {
4882 struct stat st;
4883 struct statfs stfs;
4884 int fd;
4885
4886 fd = atoi (entry->d_name);
4887 if (!fd)
4888 continue;
4889
4890 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4891 if (stat (path, &st) != 0)
4892 continue;
4893 if (!S_ISDIR (st.st_mode))
4894 continue;
4895
4896 if (statfs (path, &stfs) != 0)
4897 continue;
4898 if (stfs.f_type != SPUFS_MAGIC)
4899 continue;
4900
4901 if (pos >= offset && pos + 4 <= offset + len)
4902 {
4903 *(unsigned int *)(buf + pos - offset) = fd;
4904 written += 4;
4905 }
4906 pos += 4;
4907 }
4908
4909 closedir (dir);
4910 return written;
4911}
4912
4913/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4914 object type, using the /proc file system. */
4915static int
4916linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4917 unsigned const char *writebuf,
4918 CORE_ADDR offset, int len)
4919{
4920 long pid = lwpid_of (get_thread_lwp (current_inferior));
4921 char buf[128];
4922 int fd = 0;
4923 int ret = 0;
4924
4925 if (!writebuf && !readbuf)
4926 return -1;
4927
4928 if (!*annex)
4929 {
4930 if (!readbuf)
4931 return -1;
4932 else
4933 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4934 }
4935
4936 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4937 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4938 if (fd <= 0)
4939 return -1;
4940
4941 if (offset != 0
4942 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4943 {
4944 close (fd);
4945 return 0;
4946 }
4947
4948 if (writebuf)
4949 ret = write (fd, writebuf, (size_t) len);
4950 else
4951 ret = read (fd, readbuf, (size_t) len);
4952
4953 close (fd);
4954 return ret;
4955}
4956
723b724b 4957#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
4958struct target_loadseg
4959{
4960 /* Core address to which the segment is mapped. */
4961 Elf32_Addr addr;
4962 /* VMA recorded in the program header. */
4963 Elf32_Addr p_vaddr;
4964 /* Size of this segment in memory. */
4965 Elf32_Word p_memsz;
4966};
4967
723b724b 4968# if defined PT_GETDSBT
78d85199
YQ
4969struct target_loadmap
4970{
4971 /* Protocol version number, must be zero. */
4972 Elf32_Word version;
4973 /* Pointer to the DSBT table, its size, and the DSBT index. */
4974 unsigned *dsbt_table;
4975 unsigned dsbt_size, dsbt_index;
4976 /* Number of segments in this map. */
4977 Elf32_Word nsegs;
4978 /* The actual memory map. */
4979 struct target_loadseg segs[/*nsegs*/];
4980};
723b724b
MF
4981# define LINUX_LOADMAP PT_GETDSBT
4982# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
4983# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
4984# else
4985struct target_loadmap
4986{
4987 /* Protocol version number, must be zero. */
4988 Elf32_Half version;
4989 /* Number of segments in this map. */
4990 Elf32_Half nsegs;
4991 /* The actual memory map. */
4992 struct target_loadseg segs[/*nsegs*/];
4993};
4994# define LINUX_LOADMAP PTRACE_GETFDPIC
4995# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
4996# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
4997# endif
78d85199 4998
78d85199
YQ
4999static int
5000linux_read_loadmap (const char *annex, CORE_ADDR offset,
5001 unsigned char *myaddr, unsigned int len)
5002{
5003 int pid = lwpid_of (get_thread_lwp (current_inferior));
5004 int addr = -1;
5005 struct target_loadmap *data = NULL;
5006 unsigned int actual_length, copy_length;
5007
5008 if (strcmp (annex, "exec") == 0)
723b724b 5009 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 5010 else if (strcmp (annex, "interp") == 0)
723b724b 5011 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
5012 else
5013 return -1;
5014
723b724b 5015 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
5016 return -1;
5017
5018 if (data == NULL)
5019 return -1;
5020
5021 actual_length = sizeof (struct target_loadmap)
5022 + sizeof (struct target_loadseg) * data->nsegs;
5023
5024 if (offset < 0 || offset > actual_length)
5025 return -1;
5026
5027 copy_length = actual_length - offset < len ? actual_length - offset : len;
5028 memcpy (myaddr, (char *) data + offset, copy_length);
5029 return copy_length;
5030}
723b724b
MF
5031#else
5032# define linux_read_loadmap NULL
5033#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 5034
1570b33e
L
5035static void
5036linux_process_qsupported (const char *query)
5037{
5038 if (the_low_target.process_qsupported != NULL)
5039 the_low_target.process_qsupported (query);
5040}
5041
219f2f23
PA
5042static int
5043linux_supports_tracepoints (void)
5044{
5045 if (*the_low_target.supports_tracepoints == NULL)
5046 return 0;
5047
5048 return (*the_low_target.supports_tracepoints) ();
5049}
5050
5051static CORE_ADDR
5052linux_read_pc (struct regcache *regcache)
5053{
5054 if (the_low_target.get_pc == NULL)
5055 return 0;
5056
5057 return (*the_low_target.get_pc) (regcache);
5058}
5059
5060static void
5061linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5062{
5063 gdb_assert (the_low_target.set_pc != NULL);
5064
5065 (*the_low_target.set_pc) (regcache, pc);
5066}
5067
8336d594
PA
5068static int
5069linux_thread_stopped (struct thread_info *thread)
5070{
5071 return get_thread_lwp (thread)->stopped;
5072}
5073
5074/* This exposes stop-all-threads functionality to other modules. */
5075
5076static void
7984d532 5077linux_pause_all (int freeze)
8336d594 5078{
7984d532
PA
5079 stop_all_lwps (freeze, NULL);
5080}
5081
5082/* This exposes unstop-all-threads functionality to other gdbserver
5083 modules. */
5084
5085static void
5086linux_unpause_all (int unfreeze)
5087{
5088 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
5089}
5090
90d74c30
PA
5091static int
5092linux_prepare_to_access_memory (void)
5093{
5094 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5095 running LWP. */
5096 if (non_stop)
5097 linux_pause_all (1);
5098 return 0;
5099}
5100
5101static void
0146f85b 5102linux_done_accessing_memory (void)
90d74c30
PA
5103{
5104 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5105 running LWP. */
5106 if (non_stop)
5107 linux_unpause_all (1);
5108}
5109
fa593d66
PA
5110static int
5111linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5112 CORE_ADDR collector,
5113 CORE_ADDR lockaddr,
5114 ULONGEST orig_size,
5115 CORE_ADDR *jump_entry,
405f8e94
SS
5116 CORE_ADDR *trampoline,
5117 ULONGEST *trampoline_size,
fa593d66
PA
5118 unsigned char *jjump_pad_insn,
5119 ULONGEST *jjump_pad_insn_size,
5120 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
5121 CORE_ADDR *adjusted_insn_addr_end,
5122 char *err)
fa593d66
PA
5123{
5124 return (*the_low_target.install_fast_tracepoint_jump_pad)
5125 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
5126 jump_entry, trampoline, trampoline_size,
5127 jjump_pad_insn, jjump_pad_insn_size,
5128 adjusted_insn_addr, adjusted_insn_addr_end,
5129 err);
fa593d66
PA
5130}
5131
6a271cae
PA
5132static struct emit_ops *
5133linux_emit_ops (void)
5134{
5135 if (the_low_target.emit_ops != NULL)
5136 return (*the_low_target.emit_ops) ();
5137 else
5138 return NULL;
5139}
5140
405f8e94
SS
5141static int
5142linux_get_min_fast_tracepoint_insn_len (void)
5143{
5144 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5145}
5146
2268b414
JK
5147/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5148
5149static int
5150get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5151 CORE_ADDR *phdr_memaddr, int *num_phdr)
5152{
5153 char filename[PATH_MAX];
5154 int fd;
5155 const int auxv_size = is_elf64
5156 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5157 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5158
5159 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5160
5161 fd = open (filename, O_RDONLY);
5162 if (fd < 0)
5163 return 1;
5164
5165 *phdr_memaddr = 0;
5166 *num_phdr = 0;
5167 while (read (fd, buf, auxv_size) == auxv_size
5168 && (*phdr_memaddr == 0 || *num_phdr == 0))
5169 {
5170 if (is_elf64)
5171 {
5172 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5173
5174 switch (aux->a_type)
5175 {
5176 case AT_PHDR:
5177 *phdr_memaddr = aux->a_un.a_val;
5178 break;
5179 case AT_PHNUM:
5180 *num_phdr = aux->a_un.a_val;
5181 break;
5182 }
5183 }
5184 else
5185 {
5186 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5187
5188 switch (aux->a_type)
5189 {
5190 case AT_PHDR:
5191 *phdr_memaddr = aux->a_un.a_val;
5192 break;
5193 case AT_PHNUM:
5194 *num_phdr = aux->a_un.a_val;
5195 break;
5196 }
5197 }
5198 }
5199
5200 close (fd);
5201
5202 if (*phdr_memaddr == 0 || *num_phdr == 0)
5203 {
5204 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5205 "phdr_memaddr = %ld, phdr_num = %d",
5206 (long) *phdr_memaddr, *num_phdr);
5207 return 2;
5208 }
5209
5210 return 0;
5211}
5212
5213/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5214
5215static CORE_ADDR
5216get_dynamic (const int pid, const int is_elf64)
5217{
5218 CORE_ADDR phdr_memaddr, relocation;
5219 int num_phdr, i;
5220 unsigned char *phdr_buf;
5221 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5222
5223 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5224 return 0;
5225
5226 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5227 phdr_buf = alloca (num_phdr * phdr_size);
5228
5229 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5230 return 0;
5231
5232 /* Compute relocation: it is expected to be 0 for "regular" executables,
5233 non-zero for PIE ones. */
5234 relocation = -1;
5235 for (i = 0; relocation == -1 && i < num_phdr; i++)
5236 if (is_elf64)
5237 {
5238 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5239
5240 if (p->p_type == PT_PHDR)
5241 relocation = phdr_memaddr - p->p_vaddr;
5242 }
5243 else
5244 {
5245 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5246
5247 if (p->p_type == PT_PHDR)
5248 relocation = phdr_memaddr - p->p_vaddr;
5249 }
5250
5251 if (relocation == -1)
5252 {
5253 warning ("Unexpected missing PT_PHDR");
5254 return 0;
5255 }
5256
5257 for (i = 0; i < num_phdr; i++)
5258 {
5259 if (is_elf64)
5260 {
5261 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5262
5263 if (p->p_type == PT_DYNAMIC)
5264 return p->p_vaddr + relocation;
5265 }
5266 else
5267 {
5268 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5269
5270 if (p->p_type == PT_DYNAMIC)
5271 return p->p_vaddr + relocation;
5272 }
5273 }
5274
5275 return 0;
5276}
5277
5278/* Return &_r_debug in the inferior, or -1 if not present. Return value
5279 can be 0 if the inferior does not yet have the library list initialized. */
5280
5281static CORE_ADDR
5282get_r_debug (const int pid, const int is_elf64)
5283{
5284 CORE_ADDR dynamic_memaddr;
5285 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5286 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5287
5288 dynamic_memaddr = get_dynamic (pid, is_elf64);
5289 if (dynamic_memaddr == 0)
5290 return (CORE_ADDR) -1;
5291
5292 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5293 {
5294 if (is_elf64)
5295 {
5296 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5297
5298 if (dyn->d_tag == DT_DEBUG)
5299 return dyn->d_un.d_val;
5300
5301 if (dyn->d_tag == DT_NULL)
5302 break;
5303 }
5304 else
5305 {
5306 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5307
5308 if (dyn->d_tag == DT_DEBUG)
5309 return dyn->d_un.d_val;
5310
5311 if (dyn->d_tag == DT_NULL)
5312 break;
5313 }
5314
5315 dynamic_memaddr += dyn_size;
5316 }
5317
5318 return (CORE_ADDR) -1;
5319}
5320
5321/* Read one pointer from MEMADDR in the inferior. */
5322
5323static int
5324read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5325{
5326 *ptr = 0;
5327 return linux_read_memory (memaddr, (unsigned char *) ptr, ptr_size);
5328}
5329
5330struct link_map_offsets
5331 {
5332 /* Offset and size of r_debug.r_version. */
5333 int r_version_offset;
5334
5335 /* Offset and size of r_debug.r_map. */
5336 int r_map_offset;
5337
5338 /* Offset to l_addr field in struct link_map. */
5339 int l_addr_offset;
5340
5341 /* Offset to l_name field in struct link_map. */
5342 int l_name_offset;
5343
5344 /* Offset to l_ld field in struct link_map. */
5345 int l_ld_offset;
5346
5347 /* Offset to l_next field in struct link_map. */
5348 int l_next_offset;
5349
5350 /* Offset to l_prev field in struct link_map. */
5351 int l_prev_offset;
5352 };
5353
5354/* Construct qXfer:libraries:read reply. */
5355
5356static int
5357linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5358 unsigned const char *writebuf,
5359 CORE_ADDR offset, int len)
5360{
5361 char *document;
5362 unsigned document_len;
5363 struct process_info_private *const priv = current_process ()->private;
5364 char filename[PATH_MAX];
5365 int pid, is_elf64;
5366
5367 static const struct link_map_offsets lmo_32bit_offsets =
5368 {
5369 0, /* r_version offset. */
5370 4, /* r_debug.r_map offset. */
5371 0, /* l_addr offset in link_map. */
5372 4, /* l_name offset in link_map. */
5373 8, /* l_ld offset in link_map. */
5374 12, /* l_next offset in link_map. */
5375 16 /* l_prev offset in link_map. */
5376 };
5377
5378 static const struct link_map_offsets lmo_64bit_offsets =
5379 {
5380 0, /* r_version offset. */
5381 8, /* r_debug.r_map offset. */
5382 0, /* l_addr offset in link_map. */
5383 8, /* l_name offset in link_map. */
5384 16, /* l_ld offset in link_map. */
5385 24, /* l_next offset in link_map. */
5386 32 /* l_prev offset in link_map. */
5387 };
5388 const struct link_map_offsets *lmo;
5389
5390 if (writebuf != NULL)
5391 return -2;
5392 if (readbuf == NULL)
5393 return -1;
5394
5395 pid = lwpid_of (get_thread_lwp (current_inferior));
5396 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5397 is_elf64 = elf_64_file_p (filename);
5398 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5399
5400 if (priv->r_debug == 0)
5401 priv->r_debug = get_r_debug (pid, is_elf64);
5402
5403 if (priv->r_debug == (CORE_ADDR) -1 || priv->r_debug == 0)
5404 {
5405 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5406 }
5407 else
5408 {
5409 int allocated = 1024;
5410 char *p;
5411 const int ptr_size = is_elf64 ? 8 : 4;
5412 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5413 int r_version, header_done = 0;
5414
5415 document = xmalloc (allocated);
5416 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5417 p = document + strlen (document);
5418
5419 r_version = 0;
5420 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5421 (unsigned char *) &r_version,
5422 sizeof (r_version)) != 0
5423 || r_version != 1)
5424 {
5425 warning ("unexpected r_debug version %d", r_version);
5426 goto done;
5427 }
5428
5429 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5430 &lm_addr, ptr_size) != 0)
5431 {
5432 warning ("unable to read r_map from 0x%lx",
5433 (long) priv->r_debug + lmo->r_map_offset);
5434 goto done;
5435 }
5436
5437 lm_prev = 0;
5438 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5439 &l_name, ptr_size) == 0
5440 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5441 &l_addr, ptr_size) == 0
5442 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5443 &l_ld, ptr_size) == 0
5444 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5445 &l_prev, ptr_size) == 0
5446 && read_one_ptr (lm_addr + lmo->l_next_offset,
5447 &l_next, ptr_size) == 0)
5448 {
5449 unsigned char libname[PATH_MAX];
5450
5451 if (lm_prev != l_prev)
5452 {
5453 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5454 (long) lm_prev, (long) l_prev);
5455 break;
5456 }
5457
5458 /* Not checking for error because reading may stop before
5459 we've got PATH_MAX worth of characters. */
5460 libname[0] = '\0';
5461 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5462 libname[sizeof (libname) - 1] = '\0';
5463 if (libname[0] != '\0')
5464 {
5465 /* 6x the size for xml_escape_text below. */
5466 size_t len = 6 * strlen ((char *) libname);
5467 char *name;
5468
5469 if (!header_done)
5470 {
5471 /* Terminate `<library-list-svr4'. */
5472 *p++ = '>';
5473 header_done = 1;
5474 }
5475
5476 while (allocated < p - document + len + 200)
5477 {
5478 /* Expand to guarantee sufficient storage. */
5479 uintptr_t document_len = p - document;
5480
5481 document = xrealloc (document, 2 * allocated);
5482 allocated *= 2;
5483 p = document + document_len;
5484 }
5485
5486 name = xml_escape_text ((char *) libname);
5487 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5488 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5489 name, (unsigned long) lm_addr,
5490 (unsigned long) l_addr, (unsigned long) l_ld);
5491 free (name);
5492 }
5493 else if (lm_prev == 0)
5494 {
5495 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5496 p = p + strlen (p);
5497 }
5498
5499 if (l_next == 0)
5500 break;
5501
5502 lm_prev = lm_addr;
5503 lm_addr = l_next;
5504 }
5505 done:
5506 strcpy (p, "</library-list-svr4>");
5507 }
5508
5509 document_len = strlen (document);
5510 if (offset < document_len)
5511 document_len -= offset;
5512 else
5513 document_len = 0;
5514 if (len > document_len)
5515 len = document_len;
5516
5517 memcpy (readbuf, document + offset, len);
5518 xfree (document);
5519
5520 return len;
5521}
5522
ce3a066d
DJ
5523static struct target_ops linux_target_ops = {
5524 linux_create_inferior,
5525 linux_attach,
5526 linux_kill,
6ad8ae5c 5527 linux_detach,
8336d594 5528 linux_mourn,
444d6139 5529 linux_join,
ce3a066d
DJ
5530 linux_thread_alive,
5531 linux_resume,
5532 linux_wait,
5533 linux_fetch_registers,
5534 linux_store_registers,
90d74c30 5535 linux_prepare_to_access_memory,
0146f85b 5536 linux_done_accessing_memory,
ce3a066d
DJ
5537 linux_read_memory,
5538 linux_write_memory,
2f2893d9 5539 linux_look_up_symbols,
ef57601b 5540 linux_request_interrupt,
aa691b87 5541 linux_read_auxv,
d993e290
PA
5542 linux_insert_point,
5543 linux_remove_point,
e013ee27
OF
5544 linux_stopped_by_watchpoint,
5545 linux_stopped_data_address,
42c81e2a 5546#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437 5547 linux_read_offsets,
dae5f5cf
DJ
5548#else
5549 NULL,
5550#endif
5551#ifdef USE_THREAD_DB
5552 thread_db_get_tls_address,
5553#else
5554 NULL,
52fb6437 5555#endif
efcbbd14 5556 linux_qxfer_spu,
59a016f0 5557 hostio_last_error_from_errno,
07e059b5 5558 linux_qxfer_osdata,
4aa995e1 5559 linux_xfer_siginfo,
bd99dc85
PA
5560 linux_supports_non_stop,
5561 linux_async,
5562 linux_start_non_stop,
cdbfd419
PP
5563 linux_supports_multi_process,
5564#ifdef USE_THREAD_DB
dc146f7c 5565 thread_db_handle_monitor_command,
cdbfd419 5566#else
dc146f7c 5567 NULL,
cdbfd419 5568#endif
d26e3629 5569 linux_common_core_of_thread,
78d85199 5570 linux_read_loadmap,
219f2f23
PA
5571 linux_process_qsupported,
5572 linux_supports_tracepoints,
5573 linux_read_pc,
8336d594
PA
5574 linux_write_pc,
5575 linux_thread_stopped,
7984d532 5576 NULL,
711e434b 5577 linux_pause_all,
7984d532 5578 linux_unpause_all,
fa593d66
PA
5579 linux_cancel_breakpoints,
5580 linux_stabilize_threads,
6a271cae 5581 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
5582 linux_emit_ops,
5583 linux_supports_disable_randomization,
405f8e94 5584 linux_get_min_fast_tracepoint_insn_len,
2268b414 5585 linux_qxfer_libraries_svr4,
d1feda86 5586 linux_supports_agent,
ce3a066d
DJ
5587};
5588
0d62e5e8
DJ
5589static void
5590linux_init_signals ()
5591{
5592 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5593 to find what the cancel signal actually is. */
1a981360 5594#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 5595 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 5596#endif
0d62e5e8
DJ
5597}
5598
da6d8c04
DJ
5599void
5600initialize_low (void)
5601{
bd99dc85
PA
5602 struct sigaction sigchld_action;
5603 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 5604 set_target_ops (&linux_target_ops);
611cb4a5
DJ
5605 set_breakpoint_data (the_low_target.breakpoint,
5606 the_low_target.breakpoint_len);
0d62e5e8 5607 linux_init_signals ();
24a09b5f 5608 linux_test_for_tracefork ();
52fa2412
UW
5609#ifdef HAVE_LINUX_REGSETS
5610 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5611 ;
bca929d3 5612 disabled_regsets = xmalloc (num_regsets);
52fa2412 5613#endif
bd99dc85
PA
5614
5615 sigchld_action.sa_handler = sigchld_handler;
5616 sigemptyset (&sigchld_action.sa_mask);
5617 sigchld_action.sa_flags = SA_RESTART;
5618 sigaction (SIGCHLD, &sigchld_action, NULL);
da6d8c04 5619}