/* Low level interface to ptrace, for the remote server for GDB.
- Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc.
+ Copyright (C) 1995-2014 Free Software Foundation, Inc.
This file is part of GDB.
#include "server.h"
#include "linux-low.h"
-#include "linux-osdata.h"
+#include "nat/linux-osdata.h"
#include "agent.h"
-#include <sys/wait.h>
+#include "nat/linux-nat.h"
+#include "nat/linux-waitpid.h"
+#include "gdb_wait.h"
#include <stdio.h>
-#include <sys/param.h>
#include <sys/ptrace.h>
-#include "linux-ptrace.h"
-#include "linux-procfs.h"
+#include "nat/linux-ptrace.h"
+#include "nat/linux-procfs.h"
#include <signal.h>
#include <sys/ioctl.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/vfs.h>
#include <sys/uio.h>
+#include "filestuff.h"
+#include "tracepoint.h"
+#include "hostio.h"
#ifndef ELFMAG0
/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
then ELFMAG0 will have been defined. If it didn't get included by
#define __SIGRTMIN 32
#endif
-#ifdef __UCLIBC__
-#if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
-#define HAS_NOMMU
+/* Some targets did not define these ptrace constants from the start,
+ so gdbserver defines them locally here. In the future, these may
+ be removed after they are added to asm/ptrace.h. */
+#if !(defined(PT_TEXT_ADDR) \
+ || defined(PT_DATA_ADDR) \
+ || defined(PT_TEXT_END_ADDR))
+#if defined(__mcoldfire__)
+/* These are still undefined in 3.10 kernels. */
+#define PT_TEXT_ADDR 49*4
+#define PT_DATA_ADDR 50*4
+#define PT_TEXT_END_ADDR 51*4
+/* BFIN already defines these since at least 2.6.32 kernels. */
+#elif defined(BFIN)
+#define PT_TEXT_ADDR 220
+#define PT_TEXT_END_ADDR 224
+#define PT_DATA_ADDR 228
+/* These are still undefined in 3.10 kernels. */
+#elif defined(__TMS320C6X__)
+#define PT_TEXT_ADDR (0x10000*4)
+#define PT_DATA_ADDR (0x10004*4)
+#define PT_TEXT_END_ADDR (0x10008*4)
#endif
#endif
-/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
- representation of the thread ID.
-
- ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
- the same as the LWP ID.
+#ifdef HAVE_LINUX_BTRACE
+# include "nat/linux-btrace.h"
+#endif
- ``all_processes'' is keyed by the "overall process ID", which
- GNU/Linux calls tgid, "thread group ID". */
+#ifndef HAVE_ELF32_AUXV_T
+/* Copied from glibc's elf.h. */
+typedef struct
+{
+ uint32_t a_type; /* Entry type */
+ union
+ {
+ uint32_t a_val; /* Integer value */
+ /* We use to have pointer elements added here. We cannot do that,
+ though, since it does not work when using 32-bit definitions
+ on 64-bit platforms and vice versa. */
+ } a_un;
+} Elf32_auxv_t;
+#endif
-struct inferior_list all_lwps;
+#ifndef HAVE_ELF64_AUXV_T
+/* Copied from glibc's elf.h. */
+typedef struct
+{
+ uint64_t a_type; /* Entry type */
+ union
+ {
+ uint64_t a_val; /* Integer value */
+ /* We use to have pointer elements added here. We cannot do that,
+ though, since it does not work when using 32-bit definitions
+ on 64-bit platforms and vice versa. */
+ } a_un;
+} Elf64_auxv_t;
+#endif
/* A list of all unknown processes which receive stop signals. Some
other process will presumably claim each of these as forked
return 0;
}
-/* FIXME this is a bit of a hack, and could be removed. */
-int stopping_threads;
+enum stopping_threads_kind
+ {
+ /* Not stopping threads presently. */
+ NOT_STOPPING_THREADS,
+
+ /* Stopping threads. */
+ STOPPING_THREADS,
+
+ /* Stopping and suspending threads. */
+ STOPPING_AND_SUSPENDING_THREADS
+ };
+
+/* This is set while stop_all_lwps is in effect. */
+enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
/* FIXME make into a target method? */
int using_threads = 1;
jump pads). */
static int stabilizing_threads;
-/* This flag is true iff we've just created or attached to our first
- inferior but it has not stopped yet. As soon as it does, we need
- to call the low target's arch_setup callback. Doing this only on
- the first inferior avoids reinializing the architecture on every
- inferior, and avoids messing with the register caches of the
- already running inferiors. NOTE: this assumes all inferiors under
- control of gdbserver have the same architecture. */
-static int new_inferior;
-
static void linux_resume_one_lwp (struct lwp_info *lwp,
int step, int signal, siginfo_t *info);
static void linux_resume (struct thread_resume *resume_info, size_t n);
static void stop_all_lwps (int suspend, struct lwp_info *except);
static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
+static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
+ int *wstat, int options);
static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
-static void *add_lwp (ptid_t ptid);
+static struct lwp_info *add_lwp (ptid_t ptid);
static int linux_stopped_by_watchpoint (void);
static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
static void proceed_all_lwps (void);
static int finish_step_over (struct lwp_info *lwp);
static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
static int kill_lwp (unsigned long lwpid, int signo);
-static void linux_enable_event_reporting (int pid);
/* True if the low target can hardware single-step. Such targets
don't need a BREAKPOINT_REINSERT_ADDR callback. */
return the_low_target.install_fast_tracepoint_jump_pad != NULL;
}
+/* True if LWP is stopped in its stepping range. */
+
+static int
+lwp_in_step_range (struct lwp_info *lwp)
+{
+ CORE_ADDR pc = lwp->stop_pc;
+
+ return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
+}
+
struct pending_signals
{
int signal;
struct pending_signals *prev;
};
-#define PTRACE_ARG3_TYPE void *
-#define PTRACE_ARG4_TYPE void *
-#define PTRACE_XFER_TYPE long
-
-#ifdef HAVE_LINUX_REGSETS
-static char *disabled_regsets;
-static int num_regsets;
-#endif
-
/* The read/write ends of the pipe registered as waitable file in the
event loop. */
static int linux_event_pipe[2] = { -1, -1 };
#define target_is_async_p() (linux_event_pipe[0] != -1)
static void send_sigstop (struct lwp_info *lwp);
-static void wait_for_sigstop (struct inferior_list_entry *entry);
+static void wait_for_sigstop (void);
/* Return non-zero if HEADER is a 64-bit ELF file. */
static int
-elf_64_header_p (const Elf64_Ehdr *header)
+elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
{
- return (header->e_ident[EI_MAG0] == ELFMAG0
- && header->e_ident[EI_MAG1] == ELFMAG1
- && header->e_ident[EI_MAG2] == ELFMAG2
- && header->e_ident[EI_MAG3] == ELFMAG3
- && header->e_ident[EI_CLASS] == ELFCLASS64);
+ if (header->e_ident[EI_MAG0] == ELFMAG0
+ && header->e_ident[EI_MAG1] == ELFMAG1
+ && header->e_ident[EI_MAG2] == ELFMAG2
+ && header->e_ident[EI_MAG3] == ELFMAG3)
+ {
+ *machine = header->e_machine;
+ return header->e_ident[EI_CLASS] == ELFCLASS64;
+
+ }
+ *machine = EM_NONE;
+ return -1;
}
/* Return non-zero if FILE is a 64-bit ELF file,
and -1 if the file is not accessible or doesn't exist. */
static int
-elf_64_file_p (const char *file)
+elf_64_file_p (const char *file, unsigned int *machine)
{
Elf64_Ehdr header;
int fd;
}
close (fd);
- return elf_64_header_p (&header);
+ return elf_64_header_p (&header, machine);
}
/* Accepts an integer PID; Returns true if the executable PID is
running is a 64-bit ELF file.. */
int
-linux_pid_exe_is_elf_64_file (int pid)
+linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
{
- char file[MAXPATHLEN];
+ char file[PATH_MAX];
sprintf (file, "/proc/%d/exe", pid);
- return elf_64_file_p (file);
+ return elf_64_file_p (file, machine);
}
static void
delete_lwp (struct lwp_info *lwp)
{
- remove_thread (get_lwp_thread (lwp));
- remove_inferior (&all_lwps, &lwp->head);
+ struct thread_info *thr = get_lwp_thread (lwp);
+
+ if (debug_threads)
+ debug_printf ("deleting %ld\n", lwpid_of (thr));
+
+ remove_thread (thr);
free (lwp->arch_private);
free (lwp);
}
{
struct process_info *proc;
- /* Is this the first process? If so, then set the arch. */
- if (all_processes.head == NULL)
- new_inferior = 1;
-
proc = add_process (pid, attached);
proc->private = xcalloc (1, sizeof (*proc->private));
+ /* Set the arch when the first LWP stops. */
+ proc->private->new_inferior = 1;
+
if (the_low_target.new_process != NULL)
proc->private->arch_private = the_low_target.new_process ();
return proc;
}
-/* Wrapper function for waitpid which handles EINTR, and emulates
- __WALL for systems where that is not available. */
-
-static int
-my_waitpid (int pid, int *status, int flags)
-{
- int ret, out_errno;
-
- if (debug_threads)
- fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
-
- if (flags & __WALL)
- {
- sigset_t block_mask, org_mask, wake_mask;
- int wnohang;
-
- wnohang = (flags & WNOHANG) != 0;
- flags &= ~(__WALL | __WCLONE);
- flags |= WNOHANG;
-
- /* Block all signals while here. This avoids knowing about
- LinuxThread's signals. */
- sigfillset (&block_mask);
- sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
-
- /* ... except during the sigsuspend below. */
- sigemptyset (&wake_mask);
-
- while (1)
- {
- /* Since all signals are blocked, there's no need to check
- for EINTR here. */
- ret = waitpid (pid, status, flags);
- out_errno = errno;
-
- if (ret == -1 && out_errno != ECHILD)
- break;
- else if (ret > 0)
- break;
-
- if (flags & __WCLONE)
- {
- /* We've tried both flavors now. If WNOHANG is set,
- there's nothing else to do, just bail out. */
- if (wnohang)
- break;
-
- if (debug_threads)
- fprintf (stderr, "blocking\n");
-
- /* Block waiting for signals. */
- sigsuspend (&wake_mask);
- }
-
- flags ^= __WCLONE;
- }
-
- sigprocmask (SIG_SETMASK, &org_mask, NULL);
- }
- else
- {
- do
- ret = waitpid (pid, status, flags);
- while (ret == -1 && errno == EINTR);
- out_errno = errno;
- }
-
- if (debug_threads)
- fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
- pid, flags, status ? *status : -1, ret);
-
- errno = out_errno;
- return ret;
-}
-
/* Handle a GNU/Linux extended wait response. If we see a clone
event, we need to add the new LWP to our list (and not report the
trap to higher layers). */
handle_extended_wait (struct lwp_info *event_child, int wstat)
{
int event = wstat >> 16;
+ struct thread_info *event_thr = get_lwp_thread (event_child);
struct lwp_info *new_lwp;
if (event == PTRACE_EVENT_CLONE)
unsigned long new_pid;
int ret, status;
- ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
+ ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
+ &new_pid);
/* If we haven't already seen the new PID stop, wait for it now. */
if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
warning ("wait returned unexpected status 0x%x", status);
}
- linux_enable_event_reporting (new_pid);
+ if (debug_threads)
+ debug_printf ("HEW: Got clone event "
+ "from LWP %ld, new child is LWP %ld\n",
+ lwpid_of (event_thr), new_pid);
- ptid = ptid_build (pid_of (event_child), new_pid, 0);
- new_lwp = (struct lwp_info *) add_lwp (ptid);
- add_thread (ptid, new_lwp);
+ ptid = ptid_build (pid_of (event_thr), new_pid, 0);
+ new_lwp = add_lwp (ptid);
/* Either we're going to immediately resume the new thread
or leave it stopped. linux_resume_one_lwp is a nop if it
before calling linux_resume_one_lwp. */
new_lwp->stopped = 1;
+ /* If we're suspending all threads, leave this one suspended
+ too. */
+ if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
+ new_lwp->suspended = 1;
+
/* Normally we will get the pending SIGSTOP. But in some cases
we might get another signal delivered to the group first.
If we do get another signal, be sure not to lose it. */
if (WSTOPSIG (status) == SIGSTOP)
{
- if (stopping_threads)
+ if (stopping_threads != NOT_STOPPING_THREADS)
new_lwp->stop_pc = get_stop_pc (new_lwp);
else
linux_resume_one_lwp (new_lwp, 0, 0, NULL);
{
new_lwp->stop_expected = 1;
- if (stopping_threads)
+ if (stopping_threads != NOT_STOPPING_THREADS)
{
new_lwp->stop_pc = get_stop_pc (new_lwp);
new_lwp->status_pending_p = 1;
pc = (*the_low_target.get_pc) (regcache);
if (debug_threads)
- fprintf (stderr, "pc is 0x%lx\n", (long) pc);
+ debug_printf ("pc is 0x%lx\n", (long) pc);
current_inferior = saved_inferior;
return pc;
stop_pc -= the_low_target.decr_pc_after_break;
if (debug_threads)
- fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
+ debug_printf ("stop pc is 0x%lx\n", (long) stop_pc);
return stop_pc;
}
-static void *
+static struct lwp_info *
add_lwp (ptid_t ptid)
{
struct lwp_info *lwp;
lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
memset (lwp, 0, sizeof (*lwp));
- lwp->head.id = ptid;
-
if (the_low_target.new_thread != NULL)
lwp->arch_private = the_low_target.new_thread ();
- add_inferior_to_list (&all_lwps, &lwp->head);
+ lwp->thread = add_thread (ptid, lwp);
return lwp;
}
if (pid == 0)
{
- ptrace (PTRACE_TRACEME, 0, 0, 0);
+ close_most_fds ();
+ ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
signal (__SIGRTMIN + 1, SIG_DFL);
dup2 (2, 1);
if (write (2, "stdin/stdout redirected\n",
sizeof ("stdin/stdout redirected\n") - 1) < 0)
- /* Errors ignored. */;
+ {
+ /* Errors ignored. */;
+ }
}
execv (program, allargs);
ptid = ptid_build (pid, pid, 0);
new_lwp = add_lwp (ptid);
- add_thread (ptid, new_lwp);
new_lwp->must_set_ptrace_flags = 1;
return pid;
}
-/* Attach to an inferior process. */
-
-static void
-linux_attach_lwp_1 (unsigned long lwpid, int initial)
+char *
+linux_attach_fail_reason_string (ptid_t ptid, int err)
{
- ptid_t ptid;
- struct lwp_info *new_lwp;
+ static char *reason_string;
+ struct buffer buffer;
+ char *warnings;
+ long lwpid = ptid_get_lwp (ptid);
- if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
- {
- struct buffer buffer;
+ xfree (reason_string);
- if (!initial)
- {
- /* If we fail to attach to an LWP, just warn. */
- fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
- strerror (errno), errno);
- fflush (stderr);
- return;
- }
+ buffer_init (&buffer);
+ linux_ptrace_attach_fail_reason (lwpid, &buffer);
+ buffer_grow_str0 (&buffer, "");
+ warnings = buffer_finish (&buffer);
+ if (warnings[0] != '\0')
+ reason_string = xstrprintf ("%s (%d), %s",
+ strerror (err), err, warnings);
+ else
+ reason_string = xstrprintf ("%s (%d)",
+ strerror (err), err);
+ xfree (warnings);
+ return reason_string;
+}
- /* If we fail to attach to a process, report an error. */
- buffer_init (&buffer);
- linux_ptrace_attach_warnings (lwpid, &buffer);
- buffer_grow_str0 (&buffer, "");
- error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
- lwpid, strerror (errno), errno);
- }
+/* Attach to an inferior process. */
- if (initial)
- /* If lwp is the tgid, we handle adding existing threads later.
- Otherwise we just add lwp without bothering about any other
- threads. */
- ptid = ptid_build (lwpid, lwpid, 0);
- else
- {
- /* Note that extracting the pid from the current inferior is
- safe, since we're always called in the context of the same
- process as this new thread. */
- int pid = pid_of (get_thread_lwp (current_inferior));
- ptid = ptid_build (pid, lwpid, 0);
- }
+int
+linux_attach_lwp (ptid_t ptid)
+{
+ struct lwp_info *new_lwp;
+ int lwpid = ptid_get_lwp (ptid);
- new_lwp = (struct lwp_info *) add_lwp (ptid);
- add_thread (ptid, new_lwp);
+ if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
+ != 0)
+ return errno;
+
+ new_lwp = add_lwp (ptid);
/* We need to wait for SIGSTOP before being able to make the next
ptrace call on this LWP. */
if (linux_proc_pid_is_stopped (lwpid))
{
if (debug_threads)
- fprintf (stderr,
- "Attached to a stopped process\n");
+ debug_printf ("Attached to a stopped process\n");
/* The process is definitely stopped. It is in a job control
stop, unless the kernel predates the TASK_STOPPED /
/* Finally, resume the stopped process. This will deliver the
SIGSTOP (or a higher priority signal, just like normal
PTRACE_ATTACH), which we'll catch later on. */
- ptrace (PTRACE_CONT, lwpid, 0, 0);
+ ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
}
/* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
end of the list, and so the new thread has not yet reached
wait_for_sigstop (but will). */
new_lwp->stop_expected = 1;
-}
-void
-linux_attach_lwp (unsigned long lwpid)
-{
- linux_attach_lwp_1 (lwpid, 0);
+ return 0;
}
/* Attach to PID. If PID is the tgid, attach to it and all
of its threads. */
-int
+static int
linux_attach (unsigned long pid)
{
+ ptid_t ptid = ptid_build (pid, pid, 0);
+ int err;
+
/* Attach to PID. We will check for other threads
soon. */
- linux_attach_lwp_1 (pid, 1);
+ err = linux_attach_lwp (ptid);
+ if (err != 0)
+ error ("Cannot attach to process %ld: %s",
+ pid, linux_attach_fail_reason_string (ptid, err));
+
linux_add_process (pid, 1);
if (!non_stop)
{
/* At this point we attached to the tgid. Scan the task for
existing threads. */
- unsigned long lwp;
int new_threads_found;
int iterations = 0;
- struct dirent *dp;
while (iterations < 2)
{
+ struct dirent *dp;
+
new_threads_found = 0;
/* Add all the other threads. While we go through the
threads, new threads may be spawned. Cycle through
finding new threads. */
while ((dp = readdir (dir)) != NULL)
{
+ unsigned long lwp;
+ ptid_t ptid;
+
/* Fetch one lwp. */
lwp = strtoul (dp->d_name, NULL, 10);
+ ptid = ptid_build (pid, lwp, 0);
+
/* Is this a new thread? */
- if (lwp
- && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
+ if (lwp != 0 && find_thread_ptid (ptid) == NULL)
{
- linux_attach_lwp_1 (lwp, 0);
- new_threads_found++;
+ int err;
if (debug_threads)
- fprintf (stderr, "\
-Found and attached to new lwp %ld\n", lwp);
+ debug_printf ("Found new lwp %ld\n", lwp);
+
+ err = linux_attach_lwp (ptid);
+ if (err != 0)
+ warning ("Cannot attach to lwp %ld: %s",
+ lwp,
+ linux_attach_fail_reason_string (ptid, err));
+
+ new_threads_found++;
}
}
}
static int
-last_thread_of_process_p (struct thread_info *thread)
+last_thread_of_process_p (int pid)
{
- ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
- int pid = ptid_get_pid (ptid);
struct counter counter = { pid , 0 };
return (find_inferior (&all_threads,
static void
linux_kill_one_lwp (struct lwp_info *lwp)
{
- int pid = lwpid_of (lwp);
+ struct thread_info *thr = get_lwp_thread (lwp);
+ int pid = lwpid_of (thr);
/* PTRACE_KILL is unreliable. After stepping into a signal handler,
there is no signal context, and ptrace(PTRACE_KILL) (or
errno = 0;
kill (pid, SIGKILL);
if (debug_threads)
- fprintf (stderr,
- "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
- target_pid_to_str (ptid_of (lwp)),
- errno ? strerror (errno) : "OK");
+ debug_printf ("LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
+ target_pid_to_str (ptid_of (thr)),
+ errno ? strerror (errno) : "OK");
errno = 0;
- ptrace (PTRACE_KILL, pid, 0, 0);
+ ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
if (debug_threads)
- fprintf (stderr,
- "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
- target_pid_to_str (ptid_of (lwp)),
- errno ? strerror (errno) : "OK");
+ debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
+ target_pid_to_str (ptid_of (thr)),
+ errno ? strerror (errno) : "OK");
}
/* Callback for `find_inferior'. Kills an lwp of a given process,
the children get a chance to be reaped, it will remain a zombie
forever. */
- if (lwpid_of (lwp) == pid)
+ if (lwpid_of (thread) == pid)
{
if (debug_threads)
- fprintf (stderr, "lkop: is last of process %s\n",
- target_pid_to_str (entry->id));
+ debug_printf ("lkop: is last of process %s\n",
+ target_pid_to_str (entry->id));
return 0;
}
linux_kill_one_lwp (lwp);
/* Make sure it died. The loop is most likely unnecessary. */
- pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
+ pid = linux_wait_for_event (thread->entry.id, &wstat, __WALL);
} while (pid > 0 && WIFSTOPPED (wstat));
return 0;
if (lwp == NULL)
{
if (debug_threads)
- fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
- lwpid_of (lwp), pid);
+ debug_printf ("lk_1: cannot find lwp for pid: %d\n",
+ pid);
}
else
{
+ struct thread_info *thr = get_lwp_thread (lwp);
+
if (debug_threads)
- fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
- lwpid_of (lwp), pid);
+ debug_printf ("lk_1: killing lwp %ld, for pid: %d\n",
+ lwpid_of (thr), pid);
do
{
linux_kill_one_lwp (lwp);
/* Make sure it died. The loop is most likely unnecessary. */
- lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
+ lwpid = linux_wait_for_event (thr->entry.id, &wstat, __WALL);
} while (lwpid > 0 && WIFSTOPPED (wstat));
}
static int
get_detach_signal (struct thread_info *thread)
{
- enum target_signal signo = TARGET_SIGNAL_0;
+ enum gdb_signal signo = GDB_SIGNAL_0;
int status;
struct lwp_info *lp = get_thread_lwp (thread);
cleanly, then it'll have stopped with SIGSTOP. But we don't
want to deliver that SIGSTOP. */
if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
- || thread->last_status.value.sig == TARGET_SIGNAL_0)
+ || thread->last_status.value.sig == GDB_SIGNAL_0)
return 0;
/* Otherwise, we may need to deliver the signal we
if (!WIFSTOPPED (status))
{
if (debug_threads)
- fprintf (stderr,
- "GPS: lwp %s hasn't stopped: no pending signal\n",
- target_pid_to_str (ptid_of (lp)));
+ debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
+ target_pid_to_str (ptid_of (thread)));
return 0;
}
if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
{
if (debug_threads)
- fprintf (stderr,
- "GPS: lwp %s had stopped with extended "
- "status: no pending signal\n",
- target_pid_to_str (ptid_of (lp)));
+ debug_printf ("GPS: lwp %s had stopped with extended "
+ "status: no pending signal\n",
+ target_pid_to_str (ptid_of (thread)));
return 0;
}
- signo = target_signal_from_host (WSTOPSIG (status));
+ signo = gdb_signal_from_host (WSTOPSIG (status));
if (program_signals_p && !program_signals[signo])
{
if (debug_threads)
- fprintf (stderr,
- "GPS: lwp %s had signal %s, but it is in nopass state\n",
- target_pid_to_str (ptid_of (lp)),
- target_signal_to_string (signo));
+ debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
+ target_pid_to_str (ptid_of (thread)),
+ gdb_signal_to_string (signo));
return 0;
}
else if (!program_signals_p
/* If we have no way to know which signals GDB does not
want to have passed to the program, assume
SIGTRAP/SIGINT, which is GDB's default. */
- && (signo == TARGET_SIGNAL_TRAP || signo == TARGET_SIGNAL_INT))
+ && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
{
if (debug_threads)
- fprintf (stderr,
- "GPS: lwp %s had signal %s, "
- "but we don't know if we should pass it. Default to not.\n",
- target_pid_to_str (ptid_of (lp)),
- target_signal_to_string (signo));
+ debug_printf ("GPS: lwp %s had signal %s, "
+ "but we don't know if we should pass it. "
+ "Default to not.\n",
+ target_pid_to_str (ptid_of (thread)),
+ gdb_signal_to_string (signo));
return 0;
}
else
{
if (debug_threads)
- fprintf (stderr,
- "GPS: lwp %s has pending signal %s: delivering it.\n",
- target_pid_to_str (ptid_of (lp)),
- target_signal_to_string (signo));
+ debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
+ target_pid_to_str (ptid_of (thread)),
+ gdb_signal_to_string (signo));
return WSTOPSIG (status);
}
if (lwp->stop_expected)
{
if (debug_threads)
- fprintf (stderr,
- "Sending SIGCONT to %s\n",
- target_pid_to_str (ptid_of (lwp)));
+ debug_printf ("Sending SIGCONT to %s\n",
+ target_pid_to_str (ptid_of (thread)));
- kill_lwp (lwpid_of (lwp), SIGCONT);
+ kill_lwp (lwpid_of (thread), SIGCONT);
lwp->stop_expected = 0;
}
/* Flush any pending changes to the process's registers. */
- regcache_invalidate_one ((struct inferior_list_entry *)
- get_lwp_thread (lwp));
+ regcache_invalidate_thread (thread);
/* Pass on any pending signal for this thread. */
sig = get_detach_signal (thread);
/* Finally, let it resume. */
if (the_low_target.prepare_to_resume != NULL)
the_low_target.prepare_to_resume (lwp);
- if (ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, sig) < 0)
+ if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
+ (PTRACE_TYPE_ARG4) (long) sig) < 0)
error (_("Can't detach %s: %s"),
- target_pid_to_str (ptid_of (lwp)),
+ target_pid_to_str (ptid_of (thread)),
strerror (errno));
delete_lwp (lwp);
static int
delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
{
- struct lwp_info *lwp = (struct lwp_info *) entry;
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
struct process_info *process = proc;
- if (pid_of (lwp) == pid_of (process))
+ if (pid_of (thread) == pid_of (process))
delete_lwp (lwp);
return 0;
thread_db_mourn (process);
#endif
- find_inferior (&all_lwps, delete_lwp_callback, process);
+ find_inferior (&all_threads, delete_lwp_callback, process);
/* Freeing all private data. */
priv = process->private;
static int
status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
{
- struct lwp_info *lwp = (struct lwp_info *) entry;
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
ptid_t ptid = * (ptid_t *) arg;
- struct thread_info *thread;
/* Check if we're only interested in events from a specific process
or its lwps. */
if (!ptid_equal (minus_one_ptid, ptid)
- && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
+ && ptid_get_pid (ptid) != ptid_get_pid (thread->entry.id))
return 0;
- thread = get_lwp_thread (lwp);
-
/* If we got a `vCont;t', but we haven't reported a stop yet, do
report any status pending the LWP may have. */
if (thread->last_resume_kind == resume_stop
struct lwp_info *
find_lwp_pid (ptid_t ptid)
{
- return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
-}
-
-static struct lwp_info *
-linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
-{
- int ret;
- int to_wait_for = -1;
- struct lwp_info *child = NULL;
-
- if (debug_threads)
- fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
+ struct inferior_list_entry *thread
+ = find_inferior (&all_threads, same_lwp, &ptid);
- if (ptid_equal (ptid, minus_one_ptid))
- to_wait_for = -1; /* any child */
- else
- to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
-
- options |= __WALL;
-
-retry:
-
- ret = my_waitpid (to_wait_for, wstatp, options);
- if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
+ if (thread == NULL)
return NULL;
- else if (ret == -1)
- perror_with_name ("waitpid");
- if (debug_threads
- && (!WIFSTOPPED (*wstatp)
- || (WSTOPSIG (*wstatp) != 32
- && WSTOPSIG (*wstatp) != 33)))
- fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
-
- child = find_lwp_pid (pid_to_ptid (ret));
-
- /* If we didn't find a process, one of two things presumably happened:
- - A process we started and then detached from has exited. Ignore it.
- - A process we are controlling has forked and the new child's stop
- was reported to us by the kernel. Save its PID. */
- if (child == NULL && WIFSTOPPED (*wstatp))
- {
- add_to_pid_list (&stopped_pids, ret, *wstatp);
- goto retry;
- }
- else if (child == NULL)
- goto retry;
+ return get_thread_lwp ((struct thread_info *) thread);
+}
- child->stopped = 1;
+/* Return the number of known LWPs in the tgid given by PID. */
- child->last_status = *wstatp;
+static int
+num_lwps (int pid)
+{
+ struct inferior_list_entry *inf, *tmp;
+ int count = 0;
- /* Architecture-specific setup after inferior is running.
- This needs to happen after we have attached to the inferior
- and it is stopped for the first time, but before we access
- any inferior registers. */
- if (new_inferior)
+ ALL_INFERIORS (&all_threads, inf, tmp)
{
- the_low_target.arch_setup ();
-#ifdef HAVE_LINUX_REGSETS
- memset (disabled_regsets, 0, num_regsets);
-#endif
- new_inferior = 0;
+ if (ptid_get_pid (inf->id) == pid)
+ count++;
}
- /* Fetch the possibly triggered data watchpoint info and store it in
- CHILD.
+ return count;
+}
- On some archs, like x86, that use debug registers to set
- watchpoints, it's possible that the way to know which watched
- address trapped, is to check the register that is used to select
- which address to watch. Problem is, between setting the
- watchpoint and reading back which data address trapped, the user
- may change the set of watchpoints, and, as a consequence, GDB
- changes the debug registers in the inferior. To avoid reading
- back a stale stopped-data-address when that happens, we cache in
- LP the fact that a watchpoint trapped, and the corresponding data
- address, as soon as we see CHILD stop with a SIGTRAP. If GDB
- changes the debug registers meanwhile, we have the cached data we
- can rely on. */
+/* Detect zombie thread group leaders, and "exit" them. We can't reap
+ their exits until all other threads in the group have exited. */
- if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
+static void
+check_zombie_leaders (void)
+{
+ struct process_info *proc, *tmp;
+
+ ALL_PROCESSES (proc, tmp)
{
- if (the_low_target.stopped_by_watchpoint == NULL)
- {
- child->stopped_by_watchpoint = 0;
- }
- else
- {
- struct thread_info *saved_inferior;
+ pid_t leader_pid = pid_of (proc);
+ struct lwp_info *leader_lp;
- saved_inferior = current_inferior;
- current_inferior = get_lwp_thread (child);
+ leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
- child->stopped_by_watchpoint
- = the_low_target.stopped_by_watchpoint ();
+ if (debug_threads)
+ debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
+ "num_lwps=%d, zombie=%d\n",
+ leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
+ linux_proc_pid_is_zombie (leader_pid));
+
+ if (leader_lp != NULL
+ /* Check if there are other threads in the group, as we may
+ have raced with the inferior simply exiting. */
+ && !last_thread_of_process_p (leader_pid)
+ && linux_proc_pid_is_zombie (leader_pid))
+ {
+ /* A leader zombie can mean one of two things:
+
+ - It exited, and there's an exit status pending
+ available, or only the leader exited (not the whole
+ program). In the latter case, we can't waitpid the
+ leader's exit status until all other threads are gone.
+
+ - There are 3 or more threads in the group, and a thread
+ other than the leader exec'd. On an exec, the Linux
+ kernel destroys all other threads (except the execing
+ one) in the thread group, and resets the execing thread's
+ tid to the tgid. No exit notification is sent for the
+ execing thread -- from the ptracer's perspective, it
+ appears as though the execing thread just vanishes.
+ Until we reap all other threads except the leader and the
+ execing thread, the leader will be zombie, and the
+ execing thread will be in `D (disc sleep)'. As soon as
+ all other threads are reaped, the execing thread changes
+ it's tid to the tgid, and the previous (zombie) leader
+ vanishes, giving place to the "new" leader. We could try
+ distinguishing the exit and exec cases, by waiting once
+ more, and seeing if something comes out, but it doesn't
+ sound useful. The previous leader _does_ go away, and
+ we'll re-add the new one once we see the exec event
+ (which is just the same as what would happen if the
+ previous leader did exit voluntarily before some other
+ thread execs). */
- if (child->stopped_by_watchpoint)
- {
- if (the_low_target.stopped_data_address != NULL)
- child->stopped_data_address
- = the_low_target.stopped_data_address ();
- else
- child->stopped_data_address = 0;
- }
+ if (debug_threads)
+ fprintf (stderr,
+ "CZL: Thread group leader %d zombie "
+ "(it exited, or another thread execd).\n",
+ leader_pid);
- current_inferior = saved_inferior;
+ delete_lwp (leader_lp);
}
}
+}
- /* Store the STOP_PC, with adjustment applied. This depends on the
- architecture being defined already (so that CHILD has a valid
- regcache), and on LAST_STATUS being set (to check for SIGTRAP or
- not). */
- if (WIFSTOPPED (*wstatp))
- child->stop_pc = get_stop_pc (child);
+/* Callback for `find_inferior'. Returns the first LWP that is not
+ stopped. ARG is a PTID filter. */
- if (debug_threads
- && WIFSTOPPED (*wstatp)
- && the_low_target.get_pc != NULL)
- {
- struct thread_info *saved_inferior = current_inferior;
- struct regcache *regcache;
- CORE_ADDR pc;
+static int
+not_stopped_callback (struct inferior_list_entry *entry, void *arg)
+{
+ struct thread_info *thr = (struct thread_info *) entry;
+ struct lwp_info *lwp;
+ ptid_t filter = *(ptid_t *) arg;
- current_inferior = get_lwp_thread (child);
- regcache = get_thread_regcache (current_inferior, 1);
- pc = (*the_low_target.get_pc) (regcache);
- fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
- current_inferior = saved_inferior;
- }
+ if (!ptid_match (ptid_of (thr), filter))
+ return 0;
- return child;
+ lwp = get_thread_lwp (thr);
+ if (!lwp->stopped)
+ return 1;
+
+ return 0;
}
/* This function should only be called if the LWP got a SIGTRAP.
if (tpoint_related_event)
{
if (debug_threads)
- fprintf (stderr, "got a tracepoint event\n");
+ debug_printf ("got a tracepoint event\n");
return 1;
}
struct fast_tpoint_collect_status *status)
{
CORE_ADDR thread_area;
+ struct thread_info *thread = get_lwp_thread (lwp);
if (the_low_target.get_thread_area == NULL)
return 0;
thread is which when tracing with the in-process agent library.
We don't read anything from the address, and treat it as opaque;
it's the address itself that we assume is unique per-thread. */
- if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
+ if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
return 0;
return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
int r;
if (debug_threads)
- fprintf (stderr, "\
-Checking whether LWP %ld needs to move out of the jump pad.\n",
- lwpid_of (lwp));
+ debug_printf ("Checking whether LWP %ld needs to move out of the "
+ "jump pad.\n",
+ lwpid_of (current_inferior));
r = linux_fast_tracepoint_collecting (lwp, &status);
}
if (debug_threads)
- fprintf (stderr, "\
-Checking whether LWP %ld needs to move out of the jump pad...it does\n",
- lwpid_of (lwp));
+ debug_printf ("Checking whether LWP %ld needs to move out of "
+ "the jump pad...it does\n",
+ lwpid_of (current_inferior));
current_inferior = saved_inferior;
return 1;
|| WSTOPSIG (*wstat) == SIGFPE
|| WSTOPSIG (*wstat) == SIGBUS
|| WSTOPSIG (*wstat) == SIGSEGV)
- && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
+ && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_inferior),
+ (PTRACE_TYPE_ARG3) 0, &info) == 0
/* Final check just to make sure we don't clobber
the siginfo of non-kernel-sent signals. */
&& (uintptr_t) info.si_addr == lwp->stop_pc)
{
info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
- ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
+ ptrace (PTRACE_SETSIGINFO, lwpid_of (current_inferior),
+ (PTRACE_TYPE_ARG3) 0, &info);
}
- regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
+ regcache = get_thread_regcache (current_inferior, 1);
(*the_low_target.set_pc) (regcache, status.tpoint_addr);
lwp->stop_pc = status.tpoint_addr;
if (lwp->exit_jump_pad_bkpt != NULL)
{
if (debug_threads)
- fprintf (stderr,
- "Cancelling fast exit-jump-pad: removing bkpt. "
- "stopping all threads momentarily.\n");
+ debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
+ "stopping all threads momentarily.\n");
stop_all_lwps (1, lwp);
cancel_breakpoints ();
}
if (debug_threads)
- fprintf (stderr, "\
-Checking whether LWP %ld needs to move out of the jump pad...no\n",
- lwpid_of (lwp));
+ debug_printf ("Checking whether LWP %ld needs to move out of the "
+ "jump pad...no\n",
+ lwpid_of (current_inferior));
current_inferior = saved_inferior;
return 0;
enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
{
struct pending_signals *p_sig;
+ struct thread_info *thread = get_lwp_thread (lwp);
if (debug_threads)
- fprintf (stderr, "\
-Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
+ debug_printf ("Deferring signal %d for LWP %ld.\n",
+ WSTOPSIG (*wstat), lwpid_of (thread));
if (debug_threads)
{
for (sig = lwp->pending_signals_to_report;
sig != NULL;
sig = sig->prev)
- fprintf (stderr,
- " Already queued %d\n",
- sig->signal);
+ debug_printf (" Already queued %d\n",
+ sig->signal);
- fprintf (stderr, " (no more currently queued signals)\n");
+ debug_printf (" (no more currently queued signals)\n");
}
/* Don't enqueue non-RT signals if they are already in the deferred
if (sig->signal == WSTOPSIG (*wstat))
{
if (debug_threads)
- fprintf (stderr,
- "Not requeuing already queued non-RT signal %d"
- " for LWP %ld\n",
- sig->signal,
- lwpid_of (lwp));
+ debug_printf ("Not requeuing already queued non-RT signal %d"
+ " for LWP %ld\n",
+ sig->signal,
+ lwpid_of (thread));
return;
}
}
p_sig->prev = lwp->pending_signals_to_report;
p_sig->signal = WSTOPSIG (*wstat);
memset (&p_sig->info, 0, sizeof (siginfo_t));
- ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
+ ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
+ &p_sig->info);
lwp->pending_signals_to_report = p_sig;
}
static int
dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
{
+ struct thread_info *thread = get_lwp_thread (lwp);
+
if (lwp->pending_signals_to_report != NULL)
{
struct pending_signals **p_sig;
*wstat = W_STOPCODE ((*p_sig)->signal);
if ((*p_sig)->info.si_signo != 0)
- ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
+ ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
+ &(*p_sig)->info);
free (*p_sig);
*p_sig = NULL;
if (debug_threads)
- fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
- WSTOPSIG (*wstat), lwpid_of (lwp));
+ debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
+ WSTOPSIG (*wstat), lwpid_of (thread));
if (debug_threads)
{
for (sig = lwp->pending_signals_to_report;
sig != NULL;
sig = sig->prev)
- fprintf (stderr,
- " Still queued %d\n",
- sig->signal);
+ debug_printf (" Still queued %d\n",
+ sig->signal);
- fprintf (stderr, " (no more queued signals)\n");
+ debug_printf (" (no more queued signals)\n");
}
return 1;
{
struct thread_info *saved_inferior;
- /* There's nothing to do if we don't support breakpoints. */
- if (!supports_breakpoints ())
- return 0;
+ /* There's nothing to do if we don't support breakpoints. */
+ if (!supports_breakpoints ())
+ return 0;
+
+ /* breakpoint_at reads from current inferior. */
+ saved_inferior = current_inferior;
+ current_inferior = get_lwp_thread (lwp);
+
+ if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
+ {
+ if (debug_threads)
+ debug_printf ("CB: Push back breakpoint for %s\n",
+ target_pid_to_str (ptid_of (current_inferior)));
+
+ /* Back up the PC if necessary. */
+ if (the_low_target.decr_pc_after_break)
+ {
+ struct regcache *regcache
+ = get_thread_regcache (current_inferior, 1);
+ (*the_low_target.set_pc) (regcache, lwp->stop_pc);
+ }
+
+ current_inferior = saved_inferior;
+ return 1;
+ }
+ else
+ {
+ if (debug_threads)
+ debug_printf ("CB: No breakpoint found at %s for [%s]\n",
+ paddress (lwp->stop_pc),
+ target_pid_to_str (ptid_of (current_inferior)));
+ }
+
+ current_inferior = saved_inferior;
+ return 0;
+}
+
+/* Do low-level handling of the event, and check if we should go on
+ and pass it to caller code. Return the affected lwp if we are, or
+ NULL otherwise. */
+
+static struct lwp_info *
+linux_low_filter_event (ptid_t filter_ptid, int lwpid, int wstat)
+{
+ struct lwp_info *child;
+ struct thread_info *thread;
+
+ child = find_lwp_pid (pid_to_ptid (lwpid));
+
+ /* If we didn't find a process, one of two things presumably happened:
+ - A process we started and then detached from has exited. Ignore it.
+ - A process we are controlling has forked and the new child's stop
+ was reported to us by the kernel. Save its PID. */
+ if (child == NULL && WIFSTOPPED (wstat))
+ {
+ add_to_pid_list (&stopped_pids, lwpid, wstat);
+ return NULL;
+ }
+ else if (child == NULL)
+ return NULL;
+
+ thread = get_lwp_thread (child);
+
+ child->stopped = 1;
+
+ child->last_status = wstat;
+
+ if (WIFSTOPPED (wstat))
+ {
+ struct process_info *proc;
+
+ /* Architecture-specific setup after inferior is running. This
+ needs to happen after we have attached to the inferior and it
+ is stopped for the first time, but before we access any
+ inferior registers. */
+ proc = find_process_pid (pid_of (thread));
+ if (proc->private->new_inferior)
+ {
+ struct thread_info *saved_inferior;
+
+ saved_inferior = current_inferior;
+ current_inferior = thread;
+
+ the_low_target.arch_setup ();
+
+ current_inferior = saved_inferior;
+
+ proc->private->new_inferior = 0;
+ }
+ }
+
+ /* Store the STOP_PC, with adjustment applied. This depends on the
+ architecture being defined already (so that CHILD has a valid
+ regcache), and on LAST_STATUS being set (to check for SIGTRAP or
+ not). */
+ if (WIFSTOPPED (wstat))
+ {
+ if (debug_threads
+ && the_low_target.get_pc != NULL)
+ {
+ struct thread_info *saved_inferior;
+ struct regcache *regcache;
+ CORE_ADDR pc;
+
+ saved_inferior = current_inferior;
+ current_inferior = thread;
+ regcache = get_thread_regcache (current_inferior, 1);
+ pc = (*the_low_target.get_pc) (regcache);
+ debug_printf ("linux_low_filter_event: pc is 0x%lx\n", (long) pc);
+ current_inferior = saved_inferior;
+ }
+
+ child->stop_pc = get_stop_pc (child);
+ }
+
+ /* Fetch the possibly triggered data watchpoint info and store it in
+ CHILD.
+
+ On some archs, like x86, that use debug registers to set
+ watchpoints, it's possible that the way to know which watched
+ address trapped, is to check the register that is used to select
+ which address to watch. Problem is, between setting the
+ watchpoint and reading back which data address trapped, the user
+ may change the set of watchpoints, and, as a consequence, GDB
+ changes the debug registers in the inferior. To avoid reading
+ back a stale stopped-data-address when that happens, we cache in
+ LP the fact that a watchpoint trapped, and the corresponding data
+ address, as soon as we see CHILD stop with a SIGTRAP. If GDB
+ changes the debug registers meanwhile, we have the cached data we
+ can rely on. */
+
+ if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP)
+ {
+ if (the_low_target.stopped_by_watchpoint == NULL)
+ {
+ child->stopped_by_watchpoint = 0;
+ }
+ else
+ {
+ struct thread_info *saved_inferior;
+
+ saved_inferior = current_inferior;
+ current_inferior = thread;
+
+ child->stopped_by_watchpoint
+ = the_low_target.stopped_by_watchpoint ();
+
+ if (child->stopped_by_watchpoint)
+ {
+ if (the_low_target.stopped_data_address != NULL)
+ child->stopped_data_address
+ = the_low_target.stopped_data_address ();
+ else
+ child->stopped_data_address = 0;
+ }
+
+ current_inferior = saved_inferior;
+ }
+ }
- /* breakpoint_at reads from current inferior. */
- saved_inferior = current_inferior;
- current_inferior = get_lwp_thread (lwp);
+ if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
+ {
+ linux_enable_event_reporting (lwpid);
+ child->must_set_ptrace_flags = 0;
+ }
- if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
+ if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
+ && wstat >> 16 != 0)
+ {
+ handle_extended_wait (child, wstat);
+ return NULL;
+ }
+
+ if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
+ && child->stop_expected)
{
if (debug_threads)
- fprintf (stderr,
- "CB: Push back breakpoint for %s\n",
- target_pid_to_str (ptid_of (lwp)));
+ debug_printf ("Expected stop.\n");
+ child->stop_expected = 0;
- /* Back up the PC if necessary. */
- if (the_low_target.decr_pc_after_break)
+ if (thread->last_resume_kind == resume_stop)
{
- struct regcache *regcache
- = get_thread_regcache (current_inferior, 1);
- (*the_low_target.set_pc) (regcache, lwp->stop_pc);
+ /* We want to report the stop to the core. Treat the
+ SIGSTOP as a normal event. */
+ }
+ else if (stopping_threads != NOT_STOPPING_THREADS)
+ {
+ /* Stopping threads. We don't want this SIGSTOP to end up
+ pending in the FILTER_PTID handling below. */
+ return NULL;
}
+ else
+ {
+ /* Filter out the event. */
+ linux_resume_one_lwp (child, child->stepping, 0, NULL);
+ return NULL;
+ }
+ }
- current_inferior = saved_inferior;
- return 1;
+ /* Check if the thread has exited. */
+ if ((WIFEXITED (wstat) || WIFSIGNALED (wstat))
+ && num_lwps (pid_of (thread)) > 1)
+ {
+ if (debug_threads)
+ debug_printf ("LLW: %d exited.\n", lwpid);
+
+ /* If there is at least one more LWP, then the exit signal
+ was not the end of the debugged application and should be
+ ignored. */
+ delete_lwp (child);
+ return NULL;
}
- else
+
+ if (!ptid_match (ptid_of (thread), filter_ptid))
{
if (debug_threads)
- fprintf (stderr,
- "CB: No breakpoint found at %s for [%s]\n",
- paddress (lwp->stop_pc),
- target_pid_to_str (ptid_of (lwp)));
+ debug_printf ("LWP %d got an event %06x, leaving pending.\n",
+ lwpid, wstat);
+
+ if (WIFSTOPPED (wstat))
+ {
+ child->status_pending_p = 1;
+ child->status_pending = wstat;
+
+ if (WSTOPSIG (wstat) != SIGSTOP)
+ {
+ /* Cancel breakpoint hits. The breakpoint may be
+ removed before we fetch events from this process to
+ report to the core. It is best not to assume the
+ moribund breakpoints heuristic always handles these
+ cases --- it could be too many events go through to
+ the core before this one is handled. All-stop always
+ cancels breakpoint hits in all threads. */
+ if (non_stop
+ && WSTOPSIG (wstat) == SIGTRAP
+ && cancel_breakpoint (child))
+ {
+ /* Throw away the SIGTRAP. */
+ child->status_pending_p = 0;
+
+ if (debug_threads)
+ debug_printf ("LLW: LWP %d hit a breakpoint while"
+ " waiting for another process;"
+ " cancelled it\n", lwpid);
+ }
+ }
+ }
+ else if (WIFEXITED (wstat) || WIFSIGNALED (wstat))
+ {
+ if (debug_threads)
+ debug_printf ("LLWE: process %d exited while fetching "
+ "event from another LWP\n", lwpid);
+
+ /* This was the last lwp in the process. Since events are
+ serialized to GDB core, and we can't report this one
+ right now, but GDB core and the other target layers will
+ want to be notified about the exit code/signal, leave the
+ status pending for the next time we're able to report
+ it. */
+ mark_lwp_dead (child, wstat);
+ }
+
+ return NULL;
}
- current_inferior = saved_inferior;
- return 0;
+ return child;
}
/* When the event-loop is doing a step-over, this points at the thread
being stepped. */
ptid_t step_over_bkpt;
-/* Wait for an event from child PID. If PID is -1, wait for any
- child. Store the stop status through the status pointer WSTAT.
- OPTIONS is passed to the waitpid call. Return 0 if no child stop
- event was found and OPTIONS contains WNOHANG. Return the PID of
- the stopped child otherwise. */
+/* Wait for an event from child(ren) WAIT_PTID, and return any that
+ match FILTER_PTID (leaving others pending). The PTIDs can be:
+ minus_one_ptid, to specify any child; a pid PTID, specifying all
+ lwps of a thread group; or a PTID representing a single lwp. Store
+ the stop status through the status pointer WSTAT. OPTIONS is
+ passed to the waitpid call. Return 0 if no event was found and
+ OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
+ was found. Return the PID of the stopped child otherwise. */
static int
-linux_wait_for_event (ptid_t ptid, int *wstat, int options)
+linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
+ int *wstatp, int options)
{
+ struct thread_info *event_thread;
struct lwp_info *event_child, *requested_child;
- ptid_t wait_ptid;
+ sigset_t block_mask, prev_mask;
+ retry:
+ /* N.B. event_thread points to the thread_info struct that contains
+ event_child. Keep them in sync. */
+ event_thread = NULL;
event_child = NULL;
requested_child = NULL;
/* Check for a lwp with a pending status. */
- if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
+ if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
{
- event_child = (struct lwp_info *)
- find_inferior (&all_lwps, status_pending_p_callback, &ptid);
- if (debug_threads && event_child)
- fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
+ event_thread = (struct thread_info *)
+ find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
+ if (event_thread != NULL)
+ event_child = get_thread_lwp (event_thread);
+ if (debug_threads && event_thread)
+ debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
}
- else
+ else if (!ptid_equal (filter_ptid, null_ptid))
{
- requested_child = find_lwp_pid (ptid);
+ requested_child = find_lwp_pid (filter_ptid);
- if (!stopping_threads
+ if (stopping_threads == NOT_STOPPING_THREADS
&& requested_child->status_pending_p
&& requested_child->collecting_fast_tracepoint)
{
fatal ("requesting an event out of a suspended child?");
if (requested_child->status_pending_p)
- event_child = requested_child;
+ {
+ event_child = requested_child;
+ event_thread = get_lwp_thread (event_child);
+ }
}
if (event_child != NULL)
{
if (debug_threads)
- fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
- lwpid_of (event_child), event_child->status_pending);
- *wstat = event_child->status_pending;
+ debug_printf ("Got an event from pending child %ld (%04x)\n",
+ lwpid_of (event_thread), event_child->status_pending);
+ *wstatp = event_child->status_pending;
event_child->status_pending_p = 0;
event_child->status_pending = 0;
- current_inferior = get_lwp_thread (event_child);
- return lwpid_of (event_child);
+ current_inferior = event_thread;
+ return lwpid_of (event_thread);
}
- if (ptid_is_pid (ptid))
- {
- /* A request to wait for a specific tgid. This is not possible
- with waitpid, so instead, we wait for any child, and leave
- children we're not interested in right now with a pending
- status to report later. */
- wait_ptid = minus_one_ptid;
- }
- else
- wait_ptid = ptid;
+ /* But if we don't find a pending event, we'll have to wait.
+
+ We only enter this loop if no process has a pending wait status.
+ Thus any action taken in response to a wait status inside this
+ loop is responding as soon as we detect the status, not after any
+ pending events. */
- /* We only enter this loop if no process has a pending wait status. Thus
- any action taken in response to a wait status inside this loop is
- responding as soon as we detect the status, not after any pending
- events. */
- while (1)
+ /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
+ all signals while here. */
+ sigfillset (&block_mask);
+ sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
+
+ while (event_child == NULL)
{
- event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
+ pid_t ret = 0;
- if ((options & WNOHANG) && event_child == NULL)
- {
- if (debug_threads)
- fprintf (stderr, "WNOHANG set, no event found\n");
- return 0;
- }
+ /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
+ quirks:
- if (event_child == NULL)
- error ("event from unknown child");
+ - If the thread group leader exits while other threads in the
+ thread group still exist, waitpid(TGID, ...) hangs. That
+ waitpid won't return an exit status until the other threads
+ in the group are reaped.
- if (ptid_is_pid (ptid)
- && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
- {
- if (! WIFSTOPPED (*wstat))
- mark_lwp_dead (event_child, *wstat);
- else
- {
- event_child->status_pending_p = 1;
- event_child->status_pending = *wstat;
- }
- continue;
- }
+ - When a non-leader thread execs, that thread just vanishes
+ without reporting an exit (so we'd hang if we waited for it
+ explicitly in that case). The exec event is reported to
+ the TGID pid (although we don't currently enable exec
+ events). */
+ errno = 0;
+ ret = my_waitpid (-1, wstatp, options | WNOHANG);
- current_inferior = get_lwp_thread (event_child);
+ if (debug_threads)
+ debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
+ ret, errno ? strerror (errno) : "ERRNO-OK");
- /* Check for thread exit. */
- if (! WIFSTOPPED (*wstat))
+ if (ret > 0)
{
if (debug_threads)
- fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
-
- /* If the last thread is exiting, just return. */
- if (last_thread_of_process_p (current_inferior))
- {
- if (debug_threads)
- fprintf (stderr, "LWP %ld is last lwp of process\n",
- lwpid_of (event_child));
- return lwpid_of (event_child);
- }
-
- if (!non_stop)
- {
- current_inferior = (struct thread_info *) all_threads.head;
- if (debug_threads)
- fprintf (stderr, "Current inferior is now %ld\n",
- lwpid_of (get_thread_lwp (current_inferior)));
- }
- else
{
- current_inferior = NULL;
- if (debug_threads)
- fprintf (stderr, "Current inferior is now <NULL>\n");
+ debug_printf ("LLW: waitpid %ld received %s\n",
+ (long) ret, status_to_str (*wstatp));
}
- /* If we were waiting for this particular child to do something...
- well, it did something. */
- if (requested_child != NULL)
+ event_child = linux_low_filter_event (filter_ptid,
+ ret, *wstatp);
+ if (event_child != NULL)
{
- int lwpid = lwpid_of (event_child);
-
- /* Cancel the step-over operation --- the thread that
- started it is gone. */
- if (finish_step_over (event_child))
- unstop_all_lwps (1, event_child);
- delete_lwp (event_child);
- return lwpid;
+ /* We got an event to report to the core. */
+ event_thread = get_lwp_thread (event_child);
+ break;
}
- delete_lwp (event_child);
-
- /* Wait for a more interesting event. */
+ /* Retry until nothing comes out of waitpid. A single
+ SIGCHLD can indicate more than one child stopped. */
continue;
}
- if (event_child->must_set_ptrace_flags)
+ /* Check for zombie thread group leaders. Those can't be reaped
+ until all other threads in the thread group are. */
+ check_zombie_leaders ();
+
+ /* If there are no resumed children left in the set of LWPs we
+ want to wait for, bail. We can't just block in
+ waitpid/sigsuspend, because lwps might have been left stopped
+ in trace-stop state, and we'd be stuck forever waiting for
+ their status to change (which would only happen if we resumed
+ them). Even if WNOHANG is set, this return code is preferred
+ over 0 (below), as it is more detailed. */
+ if ((find_inferior (&all_threads,
+ not_stopped_callback,
+ &wait_ptid) == NULL))
{
- linux_enable_event_reporting (lwpid_of (event_child));
- event_child->must_set_ptrace_flags = 0;
+ if (debug_threads)
+ debug_printf ("LLW: exit (no unwaited-for LWP)\n");
+ sigprocmask (SIG_SETMASK, &prev_mask, NULL);
+ return -1;
}
- if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
- && *wstat >> 16 != 0)
+ /* No interesting event to report to the caller. */
+ if ((options & WNOHANG))
{
- handle_extended_wait (event_child, *wstat);
- continue;
+ if (debug_threads)
+ debug_printf ("WNOHANG set, no event found\n");
+
+ sigprocmask (SIG_SETMASK, &prev_mask, NULL);
+ return 0;
}
- if (WIFSTOPPED (*wstat)
- && WSTOPSIG (*wstat) == SIGSTOP
- && event_child->stop_expected)
- {
- int should_stop;
+ /* Block until we get an event reported with SIGCHLD. */
+ if (debug_threads)
+ debug_printf ("sigsuspend'ing\n");
- if (debug_threads)
- fprintf (stderr, "Expected stop.\n");
- event_child->stop_expected = 0;
+ sigsuspend (&prev_mask);
+ sigprocmask (SIG_SETMASK, &prev_mask, NULL);
+ goto retry;
+ }
- should_stop = (current_inferior->last_resume_kind == resume_stop
- || stopping_threads);
+ sigprocmask (SIG_SETMASK, &prev_mask, NULL);
- if (!should_stop)
- {
- linux_resume_one_lwp (event_child,
- event_child->stepping, 0, NULL);
- continue;
- }
- }
+ current_inferior = event_thread;
+
+ /* Check for thread exit. */
+ if (! WIFSTOPPED (*wstatp))
+ {
+ gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
- return lwpid_of (event_child);
+ if (debug_threads)
+ debug_printf ("LWP %d is the last lwp of process. "
+ "Process %ld exiting.\n",
+ pid_of (event_thread), lwpid_of (event_thread));
+ return lwpid_of (event_thread);
}
- /* NOTREACHED */
- return 0;
+ return lwpid_of (event_thread);
+}
+
+/* Wait for an event from child(ren) PTID. PTIDs can be:
+ minus_one_ptid, to specify any child; a pid PTID, specifying all
+ lwps of a thread group; or a PTID representing a single lwp. Store
+ the stop status through the status pointer WSTAT. OPTIONS is
+ passed to the waitpid call. Return 0 if no event was found and
+ OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
+ was found. Return the PID of the stopped child otherwise. */
+
+static int
+linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
+{
+ return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
}
/* Count the LWP's that have had events. */
static int
count_events_callback (struct inferior_list_entry *entry, void *data)
{
- struct lwp_info *lp = (struct lwp_info *) entry;
- struct thread_info *thread = get_lwp_thread (lp);
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lp = get_thread_lwp (thread);
int *count = data;
gdb_assert (count != NULL);
static int
select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
{
- struct lwp_info *lp = (struct lwp_info *) entry;
- struct thread_info *thread = get_lwp_thread (lp);
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lp = get_thread_lwp (thread);
if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
&& thread->last_resume_kind == resume_step
static int
select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
{
- struct lwp_info *lp = (struct lwp_info *) entry;
- struct thread_info *thread = get_lwp_thread (lp);
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lp = get_thread_lwp (thread);
int *selector = data;
gdb_assert (selector != NULL);
static int
cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
{
- struct lwp_info *lp = (struct lwp_info *) entry;
- struct thread_info *thread = get_lwp_thread (lp);
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lp = get_thread_lwp (thread);
struct lwp_info *event_lp = data;
/* Leave the LWP that has been elected to receive a SIGTRAP alone. */
static void
linux_cancel_breakpoints (void)
{
- find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
+ find_inferior (&all_threads, cancel_breakpoints_callback, NULL);
}
/* Select one LWP out of those that have events pending. */
{
int num_events = 0;
int random_selector;
- struct lwp_info *event_lp;
+ struct thread_info *event_thread;
/* Give preference to any LWP that is being single-stepped. */
- event_lp
- = (struct lwp_info *) find_inferior (&all_lwps,
- select_singlestep_lwp_callback, NULL);
- if (event_lp != NULL)
+ event_thread
+ = (struct thread_info *) find_inferior (&all_threads,
+ select_singlestep_lwp_callback,
+ NULL);
+ if (event_thread != NULL)
{
if (debug_threads)
- fprintf (stderr,
- "SEL: Select single-step %s\n",
- target_pid_to_str (ptid_of (event_lp)));
+ debug_printf ("SEL: Select single-step %s\n",
+ target_pid_to_str (ptid_of (event_thread)));
}
else
{
which have had SIGTRAP events. */
/* First see how many SIGTRAP events we have. */
- find_inferior (&all_lwps, count_events_callback, &num_events);
+ find_inferior (&all_threads, count_events_callback, &num_events);
/* Now randomly pick a LWP out of those that have had a SIGTRAP. */
random_selector = (int)
((num_events * (double) rand ()) / (RAND_MAX + 1.0));
if (debug_threads && num_events > 1)
- fprintf (stderr,
- "SEL: Found %d SIGTRAP events, selecting #%d\n",
- num_events, random_selector);
+ debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
+ num_events, random_selector);
- event_lp = (struct lwp_info *) find_inferior (&all_lwps,
- select_event_lwp_callback,
- &random_selector);
+ event_thread
+ = (struct thread_info *) find_inferior (&all_threads,
+ select_event_lwp_callback,
+ &random_selector);
}
- if (event_lp != NULL)
+ if (event_thread != NULL)
{
+ struct lwp_info *event_lp = get_thread_lwp (event_thread);
+
/* Switch the event LWP. */
*orig_lp = event_lp;
}
static int
unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
{
- struct lwp_info *lwp = (struct lwp_info *) entry;
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
/* Ignore EXCEPT. */
if (lwp == except)
static void
unsuspend_all_lwps (struct lwp_info *except)
{
- find_inferior (&all_lwps, unsuspend_one_lwp, except);
+ find_inferior (&all_threads, unsuspend_one_lwp, except);
}
static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
linux_stabilize_threads (void)
{
struct thread_info *save_inferior;
- struct lwp_info *lwp_stuck;
+ struct thread_info *thread_stuck;
- lwp_stuck
- = (struct lwp_info *) find_inferior (&all_lwps,
- stuck_in_jump_pad_callback, NULL);
- if (lwp_stuck != NULL)
+ thread_stuck
+ = (struct thread_info *) find_inferior (&all_threads,
+ stuck_in_jump_pad_callback,
+ NULL);
+ if (thread_stuck != NULL)
{
if (debug_threads)
- fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
- lwpid_of (lwp_stuck));
+ debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
+ lwpid_of (thread_stuck));
return;
}
stabilizing_threads = 1;
/* Kick 'em all. */
- for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
+ for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
/* Loop until all are stopped out of the jump pads. */
- while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
+ while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
{
struct target_waitstatus ourstatus;
struct lwp_info *lwp;
/* Lock it. */
lwp->suspended++;
- if (ourstatus.value.sig != TARGET_SIGNAL_0
+ if (ourstatus.value.sig != GDB_SIGNAL_0
|| current_inferior->last_resume_kind == resume_stop)
{
- wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
+ wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
enqueue_one_deferred_signal (lwp, &wstat);
}
}
}
- find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
+ find_inferior (&all_threads, unsuspend_one_lwp, NULL);
stabilizing_threads = 0;
if (debug_threads)
{
- lwp_stuck
- = (struct lwp_info *) find_inferior (&all_lwps,
- stuck_in_jump_pad_callback, NULL);
- if (lwp_stuck != NULL)
- fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
- lwpid_of (lwp_stuck));
+ thread_stuck
+ = (struct thread_info *) find_inferior (&all_threads,
+ stuck_in_jump_pad_callback,
+ NULL);
+ if (thread_stuck != NULL)
+ debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
+ lwpid_of (thread_stuck));
}
}
int maybe_internal_trap;
int report_to_gdb;
int trace_event;
+ int in_step_range;
+
+ if (debug_threads)
+ {
+ debug_enter ();
+ debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
+ }
/* Translate generic target options into linux options. */
options = __WALL;
retry:
bp_explains_trap = 0;
trace_event = 0;
+ in_step_range = 0;
ourstatus->kind = TARGET_WAITKIND_IGNORE;
/* If we were only supposed to resume one thread, only wait for
else
{
if (debug_threads)
- fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
- target_pid_to_str (step_over_bkpt));
+ debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
+ target_pid_to_str (step_over_bkpt));
pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
}
- if (pid == 0) /* only if TARGET_WNOHANG */
- return null_ptid;
+ if (pid == 0)
+ {
+ gdb_assert (target_options & TARGET_WNOHANG);
- event_child = get_thread_lwp (current_inferior);
+ if (debug_threads)
+ {
+ debug_printf ("linux_wait_1 ret = null_ptid, "
+ "TARGET_WAITKIND_IGNORE\n");
+ debug_exit ();
+ }
+
+ ourstatus->kind = TARGET_WAITKIND_IGNORE;
+ return null_ptid;
+ }
+ else if (pid == -1)
+ {
+ if (debug_threads)
+ {
+ debug_printf ("linux_wait_1 ret = null_ptid, "
+ "TARGET_WAITKIND_NO_RESUMED\n");
+ debug_exit ();
+ }
- /* If we are waiting for a particular child, and it exited,
- linux_wait_for_event will return its exit status. Similarly if
- the last child exited. If this is not the last child, however,
- do not report it as exited until there is a 'thread exited' response
- available in the remote protocol. Instead, just wait for another event.
- This should be safe, because if the thread crashed we will already
- have reported the termination signal to GDB; that should stop any
- in-progress stepping operations, etc.
+ ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
+ return null_ptid;
+ }
- Report the exit status of the last thread to exit. This matches
- LinuxThreads' behavior. */
+ event_child = get_thread_lwp (current_inferior);
- if (last_thread_of_process_p (current_inferior))
+ /* linux_wait_for_event only returns an exit status for the last
+ child of a process. Report it. */
+ if (WIFEXITED (w) || WIFSIGNALED (w))
{
- if (WIFEXITED (w) || WIFSIGNALED (w))
+ if (WIFEXITED (w))
{
- if (WIFEXITED (w))
- {
- ourstatus->kind = TARGET_WAITKIND_EXITED;
- ourstatus->value.integer = WEXITSTATUS (w);
+ ourstatus->kind = TARGET_WAITKIND_EXITED;
+ ourstatus->value.integer = WEXITSTATUS (w);
- if (debug_threads)
- fprintf (stderr,
- "\nChild exited with retcode = %x \n",
- WEXITSTATUS (w));
- }
- else
+ if (debug_threads)
{
- ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
- ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
-
- if (debug_threads)
- fprintf (stderr,
- "\nChild terminated with signal = %x \n",
- WTERMSIG (w));
-
+ debug_printf ("linux_wait_1 ret = %s, exited with "
+ "retcode %d\n",
+ target_pid_to_str (ptid_of (current_inferior)),
+ WEXITSTATUS (w));
+ debug_exit ();
}
+ }
+ else
+ {
+ ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
+ ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
- return ptid_of (event_child);
+ if (debug_threads)
+ {
+ debug_printf ("linux_wait_1 ret = %s, terminated with "
+ "signal %d\n",
+ target_pid_to_str (ptid_of (current_inferior)),
+ WTERMSIG (w));
+ debug_exit ();
+ }
}
- }
- else
- {
- if (!WIFSTOPPED (w))
- goto retry;
+
+ return ptid_of (current_inferior);
}
/* If this event was not handled before, and is not a SIGTRAP, we
already handled it. So next time we resume (from this
PC), we should step over it. */
if (debug_threads)
- fprintf (stderr, "Hit a gdbserver breakpoint.\n");
+ debug_printf ("Hit a gdbserver breakpoint.\n");
if (breakpoint_here (event_child->stop_pc))
event_child->need_step_over = 1;
&& agent_loaded_p ())
{
if (debug_threads)
- fprintf (stderr,
- "Got signal %d for LWP %ld. Check if we need "
- "to defer or adjust it.\n",
- WSTOPSIG (w), lwpid_of (event_child));
+ debug_printf ("Got signal %d for LWP %ld. Check if we need "
+ "to defer or adjust it.\n",
+ WSTOPSIG (w), lwpid_of (current_inferior));
/* Allow debugging the jump pad itself. */
if (current_inferior->last_resume_kind != resume_step
enqueue_one_deferred_signal (event_child, &w);
if (debug_threads)
- fprintf (stderr,
- "Signal %d for LWP %ld deferred (in jump pad)\n",
- WSTOPSIG (w), lwpid_of (event_child));
+ debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
+ WSTOPSIG (w), lwpid_of (current_inferior));
linux_resume_one_lwp (event_child, 0, 0, NULL);
goto retry;
if (event_child->collecting_fast_tracepoint)
{
if (debug_threads)
- fprintf (stderr, "\
-LWP %ld was trying to move out of the jump pad (%d). \
-Check if we're already there.\n",
- lwpid_of (event_child),
- event_child->collecting_fast_tracepoint);
+ debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
+ "Check if we're already there.\n",
+ lwpid_of (current_inferior),
+ event_child->collecting_fast_tracepoint);
trace_event = 1;
if (event_child->exit_jump_pad_bkpt != NULL)
{
if (debug_threads)
- fprintf (stderr,
- "No longer need exit-jump-pad bkpt; removing it."
- "stopping all threads momentarily.\n");
+ debug_printf ("No longer need exit-jump-pad bkpt; removing it."
+ "stopping all threads momentarily.\n");
/* Other running threads could hit this breakpoint.
We don't handle moribund locations like GDB does,
if (event_child->collecting_fast_tracepoint == 0)
{
if (debug_threads)
- fprintf (stderr,
- "fast tracepoint finished "
- "collecting successfully.\n");
+ debug_printf ("fast tracepoint finished "
+ "collecting successfully.\n");
/* We may have a deferred signal to report. */
if (dequeue_one_deferred_signal (event_child, &w))
{
if (debug_threads)
- fprintf (stderr, "dequeued one signal.\n");
+ debug_printf ("dequeued one signal.\n");
}
else
{
if (debug_threads)
- fprintf (stderr, "no deferred signals.\n");
+ debug_printf ("no deferred signals.\n");
if (stabilizing_threads)
{
ourstatus->kind = TARGET_WAITKIND_STOPPED;
- ourstatus->value.sig = TARGET_SIGNAL_0;
- return ptid_of (event_child);
+ ourstatus->value.sig = GDB_SIGNAL_0;
+
+ if (debug_threads)
+ {
+ debug_printf ("linux_wait_1 ret = %s, stopped "
+ "while stabilizing threads\n",
+ target_pid_to_str (ptid_of (current_inferior)));
+ debug_exit ();
+ }
+
+ return ptid_of (current_inferior);
}
}
}
|| WSTOPSIG (w) == __SIGRTMIN + 1))
||
#endif
- (pass_signals[target_signal_from_host (WSTOPSIG (w))]
+ (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
&& !(WSTOPSIG (w) == SIGSTOP
&& current_inferior->last_resume_kind == resume_stop))))
{
siginfo_t info, *info_p;
if (debug_threads)
- fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
- WSTOPSIG (w), lwpid_of (event_child));
+ debug_printf ("Ignored signal %d for LWP %ld.\n",
+ WSTOPSIG (w), lwpid_of (current_inferior));
- if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
+ if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_inferior),
+ (PTRACE_TYPE_ARG3) 0, &info) == 0)
info_p = &info;
else
info_p = NULL;
goto retry;
}
- /* If GDB wanted this thread to single step, we always want to
- report the SIGTRAP, and let GDB handle it. Watchpoints should
- always be reported. So should signals we can't explain. A
- SIGTRAP we can't explain could be a GDB breakpoint --- we may or
- not support Z0 breakpoints. If we do, we're be able to handle
- GDB breakpoints on top of internal breakpoints, by handling the
- internal breakpoint and still reporting the event to GDB. If we
- don't, we're out of luck, GDB won't see the breakpoint hit. */
+ /* Note that all addresses are always "out of the step range" when
+ there's no range to begin with. */
+ in_step_range = lwp_in_step_range (event_child);
+
+ /* If GDB wanted this thread to single step, and the thread is out
+ of the step range, we always want to report the SIGTRAP, and let
+ GDB handle it. Watchpoints should always be reported. So should
+ signals we can't explain. A SIGTRAP we can't explain could be a
+ GDB breakpoint --- we may or not support Z0 breakpoints. If we
+ do, we're be able to handle GDB breakpoints on top of internal
+ breakpoints, by handling the internal breakpoint and still
+ reporting the event to GDB. If we don't, we're out of luck, GDB
+ won't see the breakpoint hit. */
report_to_gdb = (!maybe_internal_trap
- || current_inferior->last_resume_kind == resume_step
+ || (current_inferior->last_resume_kind == resume_step
+ && !in_step_range)
|| event_child->stopped_by_watchpoint
- || (!step_over_finished
+ || (!step_over_finished && !in_step_range
&& !bp_explains_trap && !trace_event)
|| (gdb_breakpoint_here (event_child->stop_pc)
- && gdb_condition_true_at_breakpoint (event_child->stop_pc)));
+ && gdb_condition_true_at_breakpoint (event_child->stop_pc)
+ && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
+
+ run_breakpoint_commands (event_child->stop_pc);
/* We found no reason GDB would want us to stop. We either hit one
of our own breakpoints, or finished an internal step GDB
if (debug_threads)
{
if (bp_explains_trap)
- fprintf (stderr, "Hit a gdbserver breakpoint.\n");
+ debug_printf ("Hit a gdbserver breakpoint.\n");
if (step_over_finished)
- fprintf (stderr, "Step-over finished.\n");
+ debug_printf ("Step-over finished.\n");
if (trace_event)
- fprintf (stderr, "Tracepoint event.\n");
+ debug_printf ("Tracepoint event.\n");
+ if (lwp_in_step_range (event_child))
+ debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
+ paddress (event_child->stop_pc),
+ paddress (event_child->step_range_start),
+ paddress (event_child->step_range_end));
}
/* We're not reporting this breakpoint to GDB, so apply the
if (the_low_target.set_pc != NULL)
{
struct regcache *regcache
- = get_thread_regcache (get_lwp_thread (event_child), 1);
+ = get_thread_regcache (current_inferior, 1);
(*the_low_target.set_pc) (regcache, event_child->stop_pc);
}
going to keep waiting, so use proceed, which handles stepping
over the next breakpoint. */
if (debug_threads)
- fprintf (stderr, "proceeding all threads.\n");
+ debug_printf ("proceeding all threads.\n");
if (step_over_finished)
unsuspend_all_lwps (event_child);
if (debug_threads)
{
if (current_inferior->last_resume_kind == resume_step)
- fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
+ {
+ if (event_child->step_range_start == event_child->step_range_end)
+ debug_printf ("GDB wanted to single-step, reporting event.\n");
+ else if (!lwp_in_step_range (event_child))
+ debug_printf ("Out of step range, reporting event.\n");
+ }
if (event_child->stopped_by_watchpoint)
- fprintf (stderr, "Stopped by watchpoint.\n");
+ debug_printf ("Stopped by watchpoint.\n");
if (gdb_breakpoint_here (event_child->stop_pc))
- fprintf (stderr, "Stopped by GDB breakpoint.\n");
+ debug_printf ("Stopped by GDB breakpoint.\n");
if (debug_threads)
- fprintf (stderr, "Hit a non-gdbserver trap event.\n");
+ debug_printf ("Hit a non-gdbserver trap event.\n");
}
/* Alright, we're going to report a stop. */
select_event_lwp (&event_child);
+ /* current_inferior and event_child must stay in sync. */
+ current_inferior = get_lwp_thread (event_child);
+
event_child->status_pending_p = 0;
w = event_child->status_pending;
}
breakpoints in other LWPs that have hit a GDB breakpoint.
See the comment in cancel_breakpoints_callback to find out
why. */
- find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
+ find_inferior (&all_threads, cancel_breakpoints_callback, event_child);
/* If we were going a step-over, all other threads but the stepping one
had been paused in start_step_over, with their suspend counts
/* A thread that has been requested to stop by GDB with vCont;t,
and it stopped cleanly, so report as SIG0. The use of
SIGSTOP is an implementation detail. */
- ourstatus->value.sig = TARGET_SIGNAL_0;
+ ourstatus->value.sig = GDB_SIGNAL_0;
}
else if (current_inferior->last_resume_kind == resume_stop
&& WSTOPSIG (w) != SIGSTOP)
{
/* A thread that has been requested to stop by GDB with vCont;t,
but, it stopped for other reasons. */
- ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
+ ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
}
else
{
- ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
+ ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
}
gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
if (debug_threads)
- fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
- target_pid_to_str (ptid_of (event_child)),
- ourstatus->kind,
- ourstatus->value.sig);
+ {
+ debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
+ target_pid_to_str (ptid_of (current_inferior)),
+ ourstatus->kind, ourstatus->value.sig);
+ debug_exit ();
+ }
- return ptid_of (event_child);
+ return ptid_of (current_inferior);
}
/* Get rid of any pending event in the pipe. */
{
ptid_t event_ptid;
- if (debug_threads)
- fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
-
/* Flush the async file first. */
if (target_is_async_p ())
async_file_flush ();
{
int pid;
- pid = lwpid_of (lwp);
+ pid = lwpid_of (get_lwp_thread (lwp));
/* If we already have a pending stop signal for this process, don't
send another. */
if (lwp->stop_expected)
{
if (debug_threads)
- fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
+ debug_printf ("Have pending sigstop for lwp %d\n", pid);
return;
}
if (debug_threads)
- fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
+ debug_printf ("Sending sigstop to lwp %d\n", pid);
lwp->stop_expected = 1;
kill_lwp (pid, SIGSTOP);
static int
send_sigstop_callback (struct inferior_list_entry *entry, void *except)
{
- struct lwp_info *lwp = (struct lwp_info *) entry;
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
/* Ignore EXCEPT. */
if (lwp == except)
suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
void *except)
{
- struct lwp_info *lwp = (struct lwp_info *) entry;
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
/* Ignore EXCEPT. */
if (lwp == except)
lwp->stop_expected = 0;
}
+/* Wait for all children to stop for the SIGSTOPs we just queued. */
+
static void
-wait_for_sigstop (struct inferior_list_entry *entry)
+wait_for_sigstop (void)
{
- struct lwp_info *lwp = (struct lwp_info *) entry;
struct thread_info *saved_inferior;
- int wstat;
ptid_t saved_tid;
- ptid_t ptid;
- int pid;
-
- if (lwp->stopped)
- {
- if (debug_threads)
- fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
- lwpid_of (lwp));
- return;
- }
+ int wstat;
+ int ret;
saved_inferior = current_inferior;
if (saved_inferior != NULL)
- saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
+ saved_tid = saved_inferior->entry.id;
else
saved_tid = null_ptid; /* avoid bogus unused warning */
- ptid = lwp->head.id;
-
if (debug_threads)
- fprintf (stderr, "wait_for_sigstop: pulling one event\n");
-
- pid = linux_wait_for_event (ptid, &wstat, __WALL);
-
- /* If we stopped with a non-SIGSTOP signal, save it for later
- and record the pending SIGSTOP. If the process exited, just
- return. */
- if (WIFSTOPPED (wstat))
- {
- if (debug_threads)
- fprintf (stderr, "LWP %ld stopped with signal %d\n",
- lwpid_of (lwp), WSTOPSIG (wstat));
-
- if (WSTOPSIG (wstat) != SIGSTOP)
- {
- if (debug_threads)
- fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
- lwpid_of (lwp), wstat);
-
- lwp->status_pending_p = 1;
- lwp->status_pending = wstat;
- }
- }
- else
- {
- if (debug_threads)
- fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
+ debug_printf ("wait_for_sigstop: pulling events\n");
- lwp = find_lwp_pid (pid_to_ptid (pid));
- if (lwp)
- {
- /* Leave this status pending for the next time we're able to
- report it. In the mean time, we'll report this lwp as
- dead to GDB, so GDB doesn't try to read registers and
- memory from it. This can only happen if this was the
- last thread of the process; otherwise, PID is removed
- from the thread tables before linux_wait_for_event
- returns. */
- mark_lwp_dead (lwp, wstat);
- }
- }
+ /* Passing NULL_PTID as filter indicates we want all events to be
+ left pending. Eventually this returns when there are no
+ unwaited-for children left. */
+ ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
+ &wstat, __WALL);
+ gdb_assert (ret == -1);
if (saved_inferior == NULL || linux_thread_alive (saved_tid))
current_inferior = saved_inferior;
else
{
if (debug_threads)
- fprintf (stderr, "Previously current thread died.\n");
+ debug_printf ("Previously current thread died.\n");
if (non_stop)
{
static int
stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
{
- struct lwp_info *lwp = (struct lwp_info *) entry;
- struct thread_info *thread = get_lwp_thread (lwp);
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
gdb_assert (lwp->suspended == 0);
gdb_assert (lwp->stopped);
static void
move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
{
- struct lwp_info *lwp = (struct lwp_info *) entry;
- struct thread_info *thread = get_lwp_thread (lwp);
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
int *wstat;
gdb_assert (lwp->suspended == 0);
&& maybe_move_out_of_jump_pad (lwp, wstat))
{
if (debug_threads)
- fprintf (stderr,
- "LWP %ld needs stabilizing (in jump pad)\n",
- lwpid_of (lwp));
+ debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
+ lwpid_of (thread));
if (wstat)
{
enqueue_one_deferred_signal (lwp, wstat);
if (debug_threads)
- fprintf (stderr,
- "Signal %d for LWP %ld deferred "
- "(in jump pad)\n",
- WSTOPSIG (*wstat), lwpid_of (lwp));
+ debug_printf ("Signal %d for LWP %ld deferred "
+ "(in jump pad)\n",
+ WSTOPSIG (*wstat), lwpid_of (thread));
}
linux_resume_one_lwp (lwp, 0, 0, NULL);
static int
lwp_running (struct inferior_list_entry *entry, void *data)
{
- struct lwp_info *lwp = (struct lwp_info *) entry;
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
if (lwp->dead)
return 0;
static void
stop_all_lwps (int suspend, struct lwp_info *except)
{
- stopping_threads = 1;
+ /* Should not be called recursively. */
+ gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
+
+ if (debug_threads)
+ {
+ debug_enter ();
+ debug_printf ("stop_all_lwps (%s, except=%s)\n",
+ suspend ? "stop-and-suspend" : "stop",
+ except != NULL
+ ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
+ : "none");
+ }
+
+ stopping_threads = (suspend
+ ? STOPPING_AND_SUSPENDING_THREADS
+ : STOPPING_THREADS);
if (suspend)
- find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
+ find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
else
- find_inferior (&all_lwps, send_sigstop_callback, except);
- for_each_inferior (&all_lwps, wait_for_sigstop);
- stopping_threads = 0;
+ find_inferior (&all_threads, send_sigstop_callback, except);
+ wait_for_sigstop ();
+ stopping_threads = NOT_STOPPING_THREADS;
+
+ if (debug_threads)
+ {
+ debug_printf ("stop_all_lwps done, setting stopping_threads "
+ "back to !stopping\n");
+ debug_exit ();
+ }
}
/* Resume execution of the inferior process.
linux_resume_one_lwp (struct lwp_info *lwp,
int step, int signal, siginfo_t *info)
{
+ struct thread_info *thread = get_lwp_thread (lwp);
struct thread_info *saved_inferior;
int fast_tp_collecting;
{
/* Collecting 'while-stepping' actions doesn't make sense
anymore. */
- release_while_stepping_state_list (get_lwp_thread (lwp));
+ release_while_stepping_state_list (thread);
}
/* If we have pending signals or status, and a new signal, enqueue the
if (lwp->status_pending_p)
{
if (debug_threads)
- fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
- " has pending status\n",
- lwpid_of (lwp), step ? "step" : "continue", signal,
- lwp->stop_expected ? "expected" : "not expected");
+ debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
+ " has pending status\n",
+ lwpid_of (thread), step ? "step" : "continue", signal,
+ lwp->stop_expected ? "expected" : "not expected");
return;
}
saved_inferior = current_inferior;
- current_inferior = get_lwp_thread (lwp);
+ current_inferior = thread;
if (debug_threads)
- fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
- lwpid_of (lwp), step ? "step" : "continue", signal,
- lwp->stop_expected ? "expected" : "not expected");
+ debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
+ lwpid_of (thread), step ? "step" : "continue", signal,
+ lwp->stop_expected ? "expected" : "not expected");
/* This bit needs some thinking about. If we get a signal that
we must report while a single-step reinsert is still pending,
if (lwp->bp_reinsert != 0)
{
if (debug_threads)
- fprintf (stderr, " pending reinsert at 0x%s\n",
- paddress (lwp->bp_reinsert));
+ debug_printf (" pending reinsert at 0x%s\n",
+ paddress (lwp->bp_reinsert));
- if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
+ if (can_hardware_single_step ())
{
if (fast_tp_collecting == 0)
{
if (fast_tp_collecting == 1)
{
if (debug_threads)
- fprintf (stderr, "\
-lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
- lwpid_of (lwp));
+ debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
+ " (exit-jump-pad-bkpt)\n",
+ lwpid_of (thread));
/* Postpone any pending signal. It was enqueued above. */
signal = 0;
else if (fast_tp_collecting == 2)
{
if (debug_threads)
- fprintf (stderr, "\
-lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
- lwpid_of (lwp));
+ debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
+ " single-stepping\n",
+ lwpid_of (thread));
if (can_hardware_single_step ())
step = 1;
address, continue, and carry on catching this while-stepping
action only when that breakpoint is hit. A future
enhancement. */
- if (get_lwp_thread (lwp)->while_stepping != NULL
+ if (thread->while_stepping != NULL
&& can_hardware_single_step ())
{
if (debug_threads)
- fprintf (stderr,
- "lwp %ld has a while-stepping action -> forcing step.\n",
- lwpid_of (lwp));
+ debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
+ lwpid_of (thread));
step = 1;
}
{
struct regcache *regcache = get_thread_regcache (current_inferior, 1);
CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
- fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
+ debug_printf (" resuming from pc 0x%lx\n", (long) pc);
}
/* If we have pending signals, consume one unless we are trying to
signal = (*p_sig)->signal;
if ((*p_sig)->info.si_signo != 0)
- ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
+ ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
+ &(*p_sig)->info);
free (*p_sig);
*p_sig = NULL;
if (the_low_target.prepare_to_resume != NULL)
the_low_target.prepare_to_resume (lwp);
- regcache_invalidate_one ((struct inferior_list_entry *)
- get_lwp_thread (lwp));
+ regcache_invalidate_thread (thread);
errno = 0;
lwp->stopped = 0;
lwp->stopped_by_watchpoint = 0;
lwp->stepping = step;
- ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
+ ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
+ (PTRACE_TYPE_ARG3) 0,
/* Coerce to a uintptr_t first to avoid potential gcc warning
of coercing an 8 byte integer to a 4 byte pointer. */
- (PTRACE_ARG4_TYPE) (uintptr_t) signal);
+ (PTRACE_TYPE_ARG4) (uintptr_t) signal);
current_inferior = saved_inferior;
if (errno)
size_t n;
};
-/* This function is called once per thread. We look up the thread
- in RESUME_PTR, and mark the thread with a pointer to the appropriate
- resume request.
+/* This function is called once per thread via find_inferior.
+ ARG is a pointer to a thread_resume_array struct.
+ We look up the thread specified by ENTRY in ARG, and mark the thread
+ with a pointer to the appropriate resume request.
This algorithm is O(threads * resume elements), but resume elements
is small (and will remain small at least until GDB supports thread
suspension). */
+
static int
linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
{
- struct lwp_info *lwp;
- struct thread_info *thread;
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
int ndx;
struct thread_resume_array *r;
- thread = (struct thread_info *) entry;
- lwp = get_thread_lwp (thread);
r = arg;
for (ndx = 0; ndx < r->n; ndx++)
ptid_t ptid = r->resume[ndx].thread;
if (ptid_equal (ptid, minus_one_ptid)
|| ptid_equal (ptid, entry->id)
- || (ptid_is_pid (ptid)
- && (ptid_get_pid (ptid) == pid_of (lwp)))
- || (ptid_get_lwp (ptid) == -1
- && (ptid_get_pid (ptid) == pid_of (lwp))))
+ /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
+ of PID'. */
+ || (ptid_get_pid (ptid) == pid_of (thread)
+ && (ptid_is_pid (ptid)
+ || ptid_get_lwp (ptid) == -1)))
{
if (r->resume[ndx].kind == resume_stop
&& thread->last_resume_kind == resume_stop)
{
if (debug_threads)
- fprintf (stderr, "already %s LWP %ld at GDB's request\n",
- thread->last_status.kind == TARGET_WAITKIND_STOPPED
- ? "stopped"
- : "stopping",
- lwpid_of (lwp));
+ debug_printf ("already %s LWP %ld at GDB's request\n",
+ (thread->last_status.kind
+ == TARGET_WAITKIND_STOPPED)
+ ? "stopped"
+ : "stopping",
+ lwpid_of (thread));
continue;
}
lwp->resume = &r->resume[ndx];
thread->last_resume_kind = lwp->resume->kind;
+ lwp->step_range_start = lwp->resume->step_range_start;
+ lwp->step_range_end = lwp->resume->step_range_end;
+
/* If we had a deferred signal to report, dequeue one now.
This can happen if LWP gets more than one signal while
trying to get out of a jump pad. */
lwp->status_pending_p = 1;
if (debug_threads)
- fprintf (stderr,
- "Dequeueing deferred signal %d for LWP %ld, "
- "leaving status pending.\n",
- WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
+ debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
+ "leaving status pending.\n",
+ WSTOPSIG (lwp->status_pending),
+ lwpid_of (thread));
}
return 0;
return 0;
}
+/* find_inferior callback for linux_resume.
+ Set *FLAG_P if this lwp has an interesting status pending. */
-/* Set *FLAG_P if this lwp has an interesting status pending. */
static int
resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
{
- struct lwp_info *lwp = (struct lwp_info *) entry;
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
/* LWPs which will not be resumed are not interesting, because
we might not wait for them next time through linux_wait. */
static int
need_step_over_p (struct inferior_list_entry *entry, void *dummy)
{
- struct lwp_info *lwp = (struct lwp_info *) entry;
- struct thread_info *thread;
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
struct thread_info *saved_inferior;
CORE_ADDR pc;
if (!lwp->stopped)
{
if (debug_threads)
- fprintf (stderr,
- "Need step over [LWP %ld]? Ignoring, not stopped\n",
- lwpid_of (lwp));
+ debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
+ lwpid_of (thread));
return 0;
}
- thread = get_lwp_thread (lwp);
-
if (thread->last_resume_kind == resume_stop)
{
if (debug_threads)
- fprintf (stderr,
- "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
- lwpid_of (lwp));
+ debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
+ " stopped\n",
+ lwpid_of (thread));
return 0;
}
if (lwp->suspended)
{
if (debug_threads)
- fprintf (stderr,
- "Need step over [LWP %ld]? Ignoring, suspended\n",
- lwpid_of (lwp));
+ debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
+ lwpid_of (thread));
return 0;
}
if (!lwp->need_step_over)
{
if (debug_threads)
- fprintf (stderr,
- "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
+ debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
}
if (lwp->status_pending_p)
{
if (debug_threads)
- fprintf (stderr,
- "Need step over [LWP %ld]? Ignoring, has pending status.\n",
- lwpid_of (lwp));
+ debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
+ " status.\n",
+ lwpid_of (thread));
return 0;
}
if (pc != lwp->stop_pc)
{
if (debug_threads)
- fprintf (stderr,
- "Need step over [LWP %ld]? Cancelling, PC was changed. "
- "Old stop_pc was 0x%s, PC is now 0x%s\n",
- lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
+ debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
+ "Old stop_pc was 0x%s, PC is now 0x%s\n",
+ lwpid_of (thread),
+ paddress (lwp->stop_pc), paddress (pc));
lwp->need_step_over = 0;
return 0;
though. If the condition is being evaluated on the target's side
and it evaluate to false, step over this breakpoint as well. */
if (gdb_breakpoint_here (pc)
- && gdb_condition_true_at_breakpoint (pc))
+ && gdb_condition_true_at_breakpoint (pc)
+ && gdb_no_commands_at_breakpoint (pc))
{
if (debug_threads)
- fprintf (stderr,
- "Need step over [LWP %ld]? yes, but found"
- " GDB breakpoint at 0x%s; skipping step over\n",
- lwpid_of (lwp), paddress (pc));
+ debug_printf ("Need step over [LWP %ld]? yes, but found"
+ " GDB breakpoint at 0x%s; skipping step over\n",
+ lwpid_of (thread), paddress (pc));
current_inferior = saved_inferior;
return 0;
else
{
if (debug_threads)
- fprintf (stderr,
- "Need step over [LWP %ld]? yes, "
- "found breakpoint at 0x%s\n",
- lwpid_of (lwp), paddress (pc));
+ debug_printf ("Need step over [LWP %ld]? yes, "
+ "found breakpoint at 0x%s\n",
+ lwpid_of (thread), paddress (pc));
/* We've found an lwp that needs stepping over --- return 1 so
that find_inferior stops looking. */
current_inferior = saved_inferior;
if (debug_threads)
- fprintf (stderr,
- "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
- lwpid_of (lwp), paddress (pc));
+ debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
+ " at 0x%s\n",
+ lwpid_of (thread), paddress (pc));
return 0;
}
static int
start_step_over (struct lwp_info *lwp)
{
+ struct thread_info *thread = get_lwp_thread (lwp);
struct thread_info *saved_inferior;
CORE_ADDR pc;
int step;
if (debug_threads)
- fprintf (stderr,
- "Starting step-over on LWP %ld. Stopping all threads\n",
- lwpid_of (lwp));
+ debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
+ lwpid_of (thread));
stop_all_lwps (1, lwp);
gdb_assert (lwp->suspended == 0);
if (debug_threads)
- fprintf (stderr, "Done stopping all threads for step-over.\n");
+ debug_printf ("Done stopping all threads for step-over.\n");
/* Note, we should always reach here with an already adjusted PC,
either by GDB (if we're resuming due to GDB's request), or by our
pc = get_pc (lwp);
saved_inferior = current_inferior;
- current_inferior = get_lwp_thread (lwp);
+ current_inferior = thread;
lwp->bp_reinsert = pc;
uninsert_breakpoints_at (pc);
linux_resume_one_lwp (lwp, step, 0, NULL);
/* Require next event from this LWP. */
- step_over_bkpt = lwp->head.id;
+ step_over_bkpt = thread->entry.id;
return 1;
}
if (lwp->bp_reinsert != 0)
{
if (debug_threads)
- fprintf (stderr, "Finished step over.\n");
+ debug_printf ("Finished step over.\n");
/* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
may be no breakpoint to reinsert there by now. */
static int
linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
{
- struct lwp_info *lwp;
- struct thread_info *thread;
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
int step;
int leave_all_stopped = * (int *) arg;
int leave_pending;
- thread = (struct thread_info *) entry;
- lwp = get_thread_lwp (thread);
-
if (lwp->resume == NULL)
return 0;
if (lwp->resume->kind == resume_stop)
{
if (debug_threads)
- fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
+ debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
if (!lwp->stopped)
{
if (debug_threads)
- fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
+ debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
/* Stop the thread, and wait for the event asynchronously,
through the event loop. */
else
{
if (debug_threads)
- fprintf (stderr, "already stopped LWP %ld\n",
- lwpid_of (lwp));
+ debug_printf ("already stopped LWP %ld\n",
+ lwpid_of (thread));
/* The LWP may have been stopped in an internal event that
was not meant to be notified back to GDB (e.g., gdbserver
if (!leave_pending)
{
if (debug_threads)
- fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
+ debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
step = (lwp->resume->kind == resume_step);
linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
else
{
if (debug_threads)
- fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
+ debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
/* If we have a new signal, enqueue the signal. */
if (lwp->resume->sig != 0)
PTRACE_SETSIGINFO. */
if (WIFSTOPPED (lwp->last_status)
&& WSTOPSIG (lwp->last_status) == lwp->resume->sig)
- ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
+ ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
+ &p_sig->info);
lwp->pending_signals = p_sig;
}
linux_resume (struct thread_resume *resume_info, size_t n)
{
struct thread_resume_array array = { resume_info, n };
- struct lwp_info *need_step_over = NULL;
+ struct thread_info *need_step_over = NULL;
int any_pending;
int leave_all_stopped;
+ if (debug_threads)
+ {
+ debug_enter ();
+ debug_printf ("linux_resume:\n");
+ }
+
find_inferior (&all_threads, linux_set_resume_request, &array);
/* If there is a thread which would otherwise be resumed, which has
before considering to start a step-over (in all-stop). */
any_pending = 0;
if (!non_stop)
- find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
+ find_inferior (&all_threads, resume_status_pending_p, &any_pending);
/* If there is a thread which would otherwise be resumed, which is
stopped at a breakpoint that needs stepping over, then don't
queued. */
if (!any_pending && supports_breakpoints ())
need_step_over
- = (struct lwp_info *) find_inferior (&all_lwps,
- need_step_over_p, NULL);
+ = (struct thread_info *) find_inferior (&all_threads,
+ need_step_over_p, NULL);
leave_all_stopped = (need_step_over != NULL || any_pending);
if (debug_threads)
{
if (need_step_over != NULL)
- fprintf (stderr, "Not resuming all, need step over\n");
+ debug_printf ("Not resuming all, need step over\n");
else if (any_pending)
- fprintf (stderr,
- "Not resuming, all-stop and found "
- "an LWP with pending status\n");
+ debug_printf ("Not resuming, all-stop and found "
+ "an LWP with pending status\n");
else
- fprintf (stderr, "Resuming, no pending status or step over needed\n");
+ debug_printf ("Resuming, no pending status or step over needed\n");
}
/* Even if we're leaving threads stopped, queue all signals we'd
find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
if (need_step_over)
- start_step_over (need_step_over);
+ start_step_over (get_thread_lwp (need_step_over));
+
+ if (debug_threads)
+ {
+ debug_printf ("linux_resume done\n");
+ debug_exit ();
+ }
}
/* This function is called once per thread. We check the thread's
static int
proceed_one_lwp (struct inferior_list_entry *entry, void *except)
{
- struct lwp_info *lwp = (struct lwp_info *) entry;
- struct thread_info *thread;
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
int step;
if (lwp == except)
return 0;
if (debug_threads)
- fprintf (stderr,
- "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
+ debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
if (!lwp->stopped)
{
if (debug_threads)
- fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
+ debug_printf (" LWP %ld already running\n", lwpid_of (thread));
return 0;
}
- thread = get_lwp_thread (lwp);
-
if (thread->last_resume_kind == resume_stop
&& thread->last_status.kind != TARGET_WAITKIND_IGNORE)
{
if (debug_threads)
- fprintf (stderr, " client wants LWP to remain %ld stopped\n",
- lwpid_of (lwp));
+ debug_printf (" client wants LWP to remain %ld stopped\n",
+ lwpid_of (thread));
return 0;
}
if (lwp->status_pending_p)
{
if (debug_threads)
- fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
- lwpid_of (lwp));
+ debug_printf (" LWP %ld has pending status, leaving stopped\n",
+ lwpid_of (thread));
return 0;
}
if (lwp->suspended)
{
if (debug_threads)
- fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
+ debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
return 0;
}
pending, this is a no-op. */
if (debug_threads)
- fprintf (stderr,
- "Client wants LWP %ld to stop. "
- "Making sure it has a SIGSTOP pending\n",
- lwpid_of (lwp));
+ debug_printf ("Client wants LWP %ld to stop. "
+ "Making sure it has a SIGSTOP pending\n",
+ lwpid_of (thread));
send_sigstop (lwp);
}
static int
unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
{
- struct lwp_info *lwp = (struct lwp_info *) entry;
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
if (lwp == except)
return 0;
static void
proceed_all_lwps (void)
{
- struct lwp_info *need_step_over;
+ struct thread_info *need_step_over;
/* If there is a thread which would otherwise be resumed, which is
stopped at a breakpoint that needs stepping over, then don't
if (supports_breakpoints ())
{
need_step_over
- = (struct lwp_info *) find_inferior (&all_lwps,
- need_step_over_p, NULL);
+ = (struct thread_info *) find_inferior (&all_threads,
+ need_step_over_p, NULL);
if (need_step_over != NULL)
{
if (debug_threads)
- fprintf (stderr, "proceed_all_lwps: found "
- "thread %ld needing a step-over\n",
- lwpid_of (need_step_over));
+ debug_printf ("proceed_all_lwps: found "
+ "thread %ld needing a step-over\n",
+ lwpid_of (need_step_over));
- start_step_over (need_step_over);
+ start_step_over (get_thread_lwp (need_step_over));
return;
}
}
if (debug_threads)
- fprintf (stderr, "Proceeding, no step-over needed\n");
+ debug_printf ("Proceeding, no step-over needed\n");
- find_inferior (&all_lwps, proceed_one_lwp, NULL);
+ find_inferior (&all_threads, proceed_one_lwp, NULL);
}
/* Stopped LWPs that the client wanted to be running, that don't have
{
if (debug_threads)
{
+ debug_enter ();
if (except)
- fprintf (stderr,
- "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
+ debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
+ lwpid_of (get_lwp_thread (except)));
else
- fprintf (stderr,
- "unstopping all lwps\n");
+ debug_printf ("unstopping all lwps\n");
}
if (unsuspend)
- find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
+ find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
else
- find_inferior (&all_lwps, proceed_one_lwp, except);
+ find_inferior (&all_threads, proceed_one_lwp, except);
+
+ if (debug_threads)
+ {
+ debug_printf ("unstop_all_lwps done\n");
+ debug_exit ();
+ }
}
#define use_linux_regsets 1
+/* Returns true if REGSET has been disabled. */
+
+static int
+regset_disabled (struct regsets_info *info, struct regset_info *regset)
+{
+ return (info->disabled_regsets != NULL
+ && info->disabled_regsets[regset - info->regsets]);
+}
+
+/* Disable REGSET. */
+
+static void
+disable_regset (struct regsets_info *info, struct regset_info *regset)
+{
+ int dr_offset;
+
+ dr_offset = regset - info->regsets;
+ if (info->disabled_regsets == NULL)
+ info->disabled_regsets = xcalloc (1, info->num_regsets);
+ info->disabled_regsets[dr_offset] = 1;
+}
+
static int
-regsets_fetch_inferior_registers (struct regcache *regcache)
+regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
+ struct regcache *regcache)
{
struct regset_info *regset;
int saw_general_regs = 0;
int pid;
struct iovec iov;
- regset = target_regsets;
+ regset = regsets_info->regsets;
- pid = lwpid_of (get_thread_lwp (current_inferior));
+ pid = lwpid_of (current_inferior);
while (regset->size >= 0)
{
void *buf, *data;
int nt_type, res;
- if (regset->size == 0 || disabled_regsets[regset - target_regsets])
+ if (regset->size == 0 || regset_disabled (regsets_info, regset))
{
regset ++;
continue;
data = buf;
#ifndef __sparc__
- res = ptrace (regset->get_request, pid, nt_type, data);
+ res = ptrace (regset->get_request, pid,
+ (PTRACE_TYPE_ARG3) (long) nt_type, data);
#else
res = ptrace (regset->get_request, pid, data, nt_type);
#endif
if (errno == EIO)
{
/* If we get EIO on a regset, do not try it again for
- this process. */
- disabled_regsets[regset - target_regsets] = 1;
+ this process mode. */
+ disable_regset (regsets_info, regset);
free (buf);
continue;
}
}
static int
-regsets_store_inferior_registers (struct regcache *regcache)
+regsets_store_inferior_registers (struct regsets_info *regsets_info,
+ struct regcache *regcache)
{
struct regset_info *regset;
int saw_general_regs = 0;
int pid;
struct iovec iov;
- regset = target_regsets;
+ regset = regsets_info->regsets;
- pid = lwpid_of (get_thread_lwp (current_inferior));
+ pid = lwpid_of (current_inferior);
while (regset->size >= 0)
{
void *buf, *data;
int nt_type, res;
- if (regset->size == 0 || disabled_regsets[regset - target_regsets])
+ if (regset->size == 0 || regset_disabled (regsets_info, regset))
{
regset ++;
continue;
data = buf;
#ifndef __sparc__
- res = ptrace (regset->get_request, pid, nt_type, data);
+ res = ptrace (regset->get_request, pid,
+ (PTRACE_TYPE_ARG3) (long) nt_type, data);
#else
- res = ptrace (regset->get_request, pid, &iov, data);
+ res = ptrace (regset->get_request, pid, data, nt_type);
#endif
if (res == 0)
/* Only now do we write the register set. */
#ifndef __sparc__
- res = ptrace (regset->set_request, pid, nt_type, data);
+ res = ptrace (regset->set_request, pid,
+ (PTRACE_TYPE_ARG3) (long) nt_type, data);
#else
res = ptrace (regset->set_request, pid, data, nt_type);
#endif
if (errno == EIO)
{
/* If we get EIO on a regset, do not try it again for
- this process. */
- disabled_regsets[regset - target_regsets] = 1;
+ this process mode. */
+ disable_regset (regsets_info, regset);
free (buf);
continue;
}
#else /* !HAVE_LINUX_REGSETS */
#define use_linux_regsets 0
-#define regsets_fetch_inferior_registers(regcache) 1
-#define regsets_store_inferior_registers(regcache) 1
+#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
+#define regsets_store_inferior_registers(regsets_info, regcache) 1
#endif
calls or 0 if it has to be transferred individually. */
static int
-linux_register_in_regsets (int regno)
+linux_register_in_regsets (const struct regs_info *regs_info, int regno)
{
unsigned char mask = 1 << (regno % 8);
size_t index = regno / 8;
return (use_linux_regsets
- && (the_low_target.regset_bitmap == NULL
- || (the_low_target.regset_bitmap[index] & mask) != 0));
+ && (regs_info->regset_bitmap == NULL
+ || (regs_info->regset_bitmap[index] & mask) != 0));
}
#ifdef HAVE_LINUX_USRREGS
int
-register_addr (int regnum)
+register_addr (const struct usrregs_info *usrregs, int regnum)
{
int addr;
- if (regnum < 0 || regnum >= the_low_target.num_regs)
+ if (regnum < 0 || regnum >= usrregs->num_regs)
error ("Invalid register number %d.", regnum);
- addr = the_low_target.regmap[regnum];
+ addr = usrregs->regmap[regnum];
return addr;
}
/* Fetch one register. */
static void
-fetch_register (struct regcache *regcache, int regno)
+fetch_register (const struct usrregs_info *usrregs,
+ struct regcache *regcache, int regno)
{
CORE_ADDR regaddr;
int i, size;
char *buf;
int pid;
- if (regno >= the_low_target.num_regs)
+ if (regno >= usrregs->num_regs)
return;
if ((*the_low_target.cannot_fetch_register) (regno))
return;
- regaddr = register_addr (regno);
+ regaddr = register_addr (usrregs, regno);
if (regaddr == -1)
return;
- size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
+ size = ((register_size (regcache->tdesc, regno)
+ + sizeof (PTRACE_XFER_TYPE) - 1)
& -sizeof (PTRACE_XFER_TYPE));
buf = alloca (size);
- pid = lwpid_of (get_thread_lwp (current_inferior));
+ pid = lwpid_of (current_inferior);
for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
{
errno = 0;
ptrace (PTRACE_PEEKUSER, pid,
/* Coerce to a uintptr_t first to avoid potential gcc warning
of coercing an 8 byte integer to a 4 byte pointer. */
- (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
+ (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
regaddr += sizeof (PTRACE_XFER_TYPE);
if (errno != 0)
error ("reading register %d: %s", regno, strerror (errno));
/* Store one register. */
static void
-store_register (struct regcache *regcache, int regno)
+store_register (const struct usrregs_info *usrregs,
+ struct regcache *regcache, int regno)
{
CORE_ADDR regaddr;
int i, size;
char *buf;
int pid;
- if (regno >= the_low_target.num_regs)
+ if (regno >= usrregs->num_regs)
return;
if ((*the_low_target.cannot_store_register) (regno))
return;
- regaddr = register_addr (regno);
+ regaddr = register_addr (usrregs, regno);
if (regaddr == -1)
return;
- size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
+ size = ((register_size (regcache->tdesc, regno)
+ + sizeof (PTRACE_XFER_TYPE) - 1)
& -sizeof (PTRACE_XFER_TYPE));
buf = alloca (size);
memset (buf, 0, size);
else
collect_register (regcache, regno, buf);
- pid = lwpid_of (get_thread_lwp (current_inferior));
+ pid = lwpid_of (current_inferior);
for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
{
errno = 0;
ptrace (PTRACE_POKEUSER, pid,
/* Coerce to a uintptr_t first to avoid potential gcc warning
about coercing an 8 byte integer to a 4 byte pointer. */
- (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
- (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
+ (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
+ (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
if (errno != 0)
{
/* At this point, ESRCH should mean the process is
unless ALL is non-zero.
Otherwise, REGNO specifies which register (so we can save time). */
static void
-usr_fetch_inferior_registers (struct regcache *regcache, int regno, int all)
+usr_fetch_inferior_registers (const struct regs_info *regs_info,
+ struct regcache *regcache, int regno, int all)
{
+ struct usrregs_info *usr = regs_info->usrregs;
+
if (regno == -1)
{
- for (regno = 0; regno < the_low_target.num_regs; regno++)
- if (all || !linux_register_in_regsets (regno))
- fetch_register (regcache, regno);
+ for (regno = 0; regno < usr->num_regs; regno++)
+ if (all || !linux_register_in_regsets (regs_info, regno))
+ fetch_register (usr, regcache, regno);
}
else
- fetch_register (regcache, regno);
+ fetch_register (usr, regcache, regno);
}
/* Store our register values back into the inferior.
unless ALL is non-zero.
Otherwise, REGNO specifies which register (so we can save time). */
static void
-usr_store_inferior_registers (struct regcache *regcache, int regno, int all)
+usr_store_inferior_registers (const struct regs_info *regs_info,
+ struct regcache *regcache, int regno, int all)
{
+ struct usrregs_info *usr = regs_info->usrregs;
+
if (regno == -1)
{
- for (regno = 0; regno < the_low_target.num_regs; regno++)
- if (all || !linux_register_in_regsets (regno))
- store_register (regcache, regno);
+ for (regno = 0; regno < usr->num_regs; regno++)
+ if (all || !linux_register_in_regsets (regs_info, regno))
+ store_register (usr, regcache, regno);
}
else
- store_register (regcache, regno);
+ store_register (usr, regcache, regno);
}
#else /* !HAVE_LINUX_USRREGS */
-#define usr_fetch_inferior_registers(regcache, regno, all) do {} while (0)
-#define usr_store_inferior_registers(regcache, regno, all) do {} while (0)
+#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
+#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
#endif
{
int use_regsets;
int all = 0;
+ const struct regs_info *regs_info = (*the_low_target.regs_info) ();
if (regno == -1)
{
- all = regsets_fetch_inferior_registers (regcache);
- usr_fetch_inferior_registers (regcache, regno, all);
+ if (the_low_target.fetch_register != NULL
+ && regs_info->usrregs != NULL)
+ for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
+ (*the_low_target.fetch_register) (regcache, regno);
+
+ all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
+ if (regs_info->usrregs != NULL)
+ usr_fetch_inferior_registers (regs_info, regcache, -1, all);
}
else
{
- use_regsets = linux_register_in_regsets (regno);
+ if (the_low_target.fetch_register != NULL
+ && (*the_low_target.fetch_register) (regcache, regno))
+ return;
+
+ use_regsets = linux_register_in_regsets (regs_info, regno);
if (use_regsets)
- all = regsets_fetch_inferior_registers (regcache);
- if (!use_regsets || all)
- usr_fetch_inferior_registers (regcache, regno, 1);
+ all = regsets_fetch_inferior_registers (regs_info->regsets_info,
+ regcache);
+ if ((!use_regsets || all) && regs_info->usrregs != NULL)
+ usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
}
}
{
int use_regsets;
int all = 0;
+ const struct regs_info *regs_info = (*the_low_target.regs_info) ();
if (regno == -1)
{
- all = regsets_store_inferior_registers (regcache);
- usr_store_inferior_registers (regcache, regno, all);
+ all = regsets_store_inferior_registers (regs_info->regsets_info,
+ regcache);
+ if (regs_info->usrregs != NULL)
+ usr_store_inferior_registers (regs_info, regcache, regno, all);
}
else
{
- use_regsets = linux_register_in_regsets (regno);
+ use_regsets = linux_register_in_regsets (regs_info, regno);
if (use_regsets)
- all = regsets_store_inferior_registers (regcache);
- if (!use_regsets || all)
- usr_store_inferior_registers (regcache, regno, 1);
+ all = regsets_store_inferior_registers (regs_info->regsets_info,
+ regcache);
+ if ((!use_regsets || all) && regs_info->usrregs != NULL)
+ usr_store_inferior_registers (regs_info, regcache, regno, 1);
}
}
static int
linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
{
+ int pid = lwpid_of (current_inferior);
+ register PTRACE_XFER_TYPE *buffer;
+ register CORE_ADDR addr;
+ register int count;
+ char filename[64];
register int i;
- /* Round starting address down to longword boundary. */
- register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
- /* Round ending address up; get number of longwords that makes. */
- register int count
- = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
- / sizeof (PTRACE_XFER_TYPE);
- /* Allocate buffer of that many longwords. */
- register PTRACE_XFER_TYPE *buffer
- = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
+ int ret;
int fd;
- char filename[64];
- int pid = lwpid_of (get_thread_lwp (current_inferior));
/* Try using /proc. Don't bother for one word. */
if (len >= 3 * sizeof (long))
{
+ int bytes;
+
/* We could keep this file open and cache it - possibly one per
thread. That requires some juggling, but is even faster. */
sprintf (filename, "/proc/%d/mem", pid);
32-bit platforms (for instance, SPARC debugging a SPARC64
application). */
#ifdef HAVE_PREAD64
- if (pread64 (fd, myaddr, len, memaddr) != len)
+ bytes = pread64 (fd, myaddr, len, memaddr);
#else
- if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
+ bytes = -1;
+ if (lseek (fd, memaddr, SEEK_SET) != -1)
+ bytes = read (fd, myaddr, len);
#endif
- {
- close (fd);
- goto no_proc;
- }
close (fd);
- return 0;
+ if (bytes == len)
+ return 0;
+
+ /* Some data was read, we'll try to get the rest with ptrace. */
+ if (bytes > 0)
+ {
+ memaddr += bytes;
+ myaddr += bytes;
+ len -= bytes;
+ }
}
no_proc:
+ /* Round starting address down to longword boundary. */
+ addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
+ /* Round ending address up; get number of longwords that makes. */
+ count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
+ / sizeof (PTRACE_XFER_TYPE));
+ /* Allocate buffer of that many longwords. */
+ buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
+
/* Read all the longwords */
+ errno = 0;
for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
{
- errno = 0;
/* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
about coercing an 8 byte integer to a 4 byte pointer. */
buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
- (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
+ (PTRACE_TYPE_ARG3) (uintptr_t) addr,
+ (PTRACE_TYPE_ARG4) 0);
if (errno)
- return errno;
+ break;
}
+ ret = errno;
/* Copy appropriate bytes out of the buffer. */
- memcpy (myaddr,
- (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
- len);
+ if (i > 0)
+ {
+ i *= sizeof (PTRACE_XFER_TYPE);
+ i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
+ memcpy (myaddr,
+ (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
+ i < len ? i : len);
+ }
- return 0;
+ return ret;
}
/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
memory at MEMADDR. On failure (cannot write to the inferior)
- returns the value of errno. */
+ returns the value of errno. Always succeeds if LEN is zero. */
static int
linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
alloca (count * sizeof (PTRACE_XFER_TYPE));
- int pid = lwpid_of (get_thread_lwp (current_inferior));
+ int pid = lwpid_of (current_inferior);
+
+ if (len == 0)
+ {
+ /* Zero length write always succeeds. */
+ return 0;
+ }
if (debug_threads)
{
val = val & 0xffff;
else if (len == 3)
val = val & 0xffffff;
- fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
- val, (long)memaddr);
+ debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
+ val, (long)memaddr);
}
/* Fill start and end extra bytes of buffer with existing memory data. */
/* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
about coercing an 8 byte integer to a 4 byte pointer. */
buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
- (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
+ (PTRACE_TYPE_ARG3) (uintptr_t) addr,
+ (PTRACE_TYPE_ARG4) 0);
if (errno)
return errno;
= ptrace (PTRACE_PEEKTEXT, pid,
/* Coerce to a uintptr_t first to avoid potential gcc warning
about coercing an 8 byte integer to a 4 byte pointer. */
- (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
+ (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
* sizeof (PTRACE_XFER_TYPE)),
- 0);
+ (PTRACE_TYPE_ARG4) 0);
if (errno)
return errno;
}
ptrace (PTRACE_POKETEXT, pid,
/* Coerce to a uintptr_t first to avoid potential gcc warning
about coercing an 8 byte integer to a 4 byte pointer. */
- (PTRACE_ARG3_TYPE) (uintptr_t) addr,
- (PTRACE_ARG4_TYPE) buffer[i]);
+ (PTRACE_TYPE_ARG3) (uintptr_t) addr,
+ (PTRACE_TYPE_ARG4) buffer[i]);
if (errno)
return errno;
}
return 0;
}
-/* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
-static int linux_supports_tracefork_flag;
-
-static void
-linux_enable_event_reporting (int pid)
-{
- if (!linux_supports_tracefork_flag)
- return;
-
- ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
-}
-
-/* Helper functions for linux_test_for_tracefork, called via clone (). */
-
-static int
-linux_tracefork_grandchild (void *arg)
-{
- _exit (0);
-}
-
-#define STACK_SIZE 4096
-
-static int
-linux_tracefork_child (void *arg)
-{
- ptrace (PTRACE_TRACEME, 0, 0, 0);
- kill (getpid (), SIGSTOP);
-
-#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
-
- if (fork () == 0)
- linux_tracefork_grandchild (NULL);
-
-#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
-
-#ifdef __ia64__
- __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
- CLONE_VM | SIGCHLD, NULL);
-#else
- clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
- CLONE_VM | SIGCHLD, NULL);
-#endif
-
-#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
-
- _exit (0);
-}
-
-/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
- sure that we can enable the option, and that it had the desired
- effect. */
-
-static void
-linux_test_for_tracefork (void)
-{
- int child_pid, ret, status;
- long second_pid;
-#if defined(__UCLIBC__) && defined(HAS_NOMMU)
- char *stack = xmalloc (STACK_SIZE * 4);
-#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
-
- linux_supports_tracefork_flag = 0;
-
-#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
-
- child_pid = fork ();
- if (child_pid == 0)
- linux_tracefork_child (NULL);
-
-#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
-
- /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
-#ifdef __ia64__
- child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
- CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
-#else /* !__ia64__ */
- child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
- CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
-#endif /* !__ia64__ */
-
-#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
-
- if (child_pid == -1)
- perror_with_name ("clone");
-
- ret = my_waitpid (child_pid, &status, 0);
- if (ret == -1)
- perror_with_name ("waitpid");
- else if (ret != child_pid)
- error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
- if (! WIFSTOPPED (status))
- error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
-
- ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
- (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
- if (ret != 0)
- {
- ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
- if (ret != 0)
- {
- warning ("linux_test_for_tracefork: failed to kill child");
- return;
- }
-
- ret = my_waitpid (child_pid, &status, 0);
- if (ret != child_pid)
- warning ("linux_test_for_tracefork: failed to wait for killed child");
- else if (!WIFSIGNALED (status))
- warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
- "killed child", status);
-
- return;
- }
-
- ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
- if (ret != 0)
- warning ("linux_test_for_tracefork: failed to resume child");
-
- ret = my_waitpid (child_pid, &status, 0);
-
- if (ret == child_pid && WIFSTOPPED (status)
- && status >> 16 == PTRACE_EVENT_FORK)
- {
- second_pid = 0;
- ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
- if (ret == 0 && second_pid != 0)
- {
- int second_status;
-
- linux_supports_tracefork_flag = 1;
- my_waitpid (second_pid, &second_status, 0);
- ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
- if (ret != 0)
- warning ("linux_test_for_tracefork: failed to kill second child");
- my_waitpid (second_pid, &status, 0);
- }
- }
- else
- warning ("linux_test_for_tracefork: unexpected result from waitpid "
- "(%d, status 0x%x)", ret, status);
-
- do
- {
- ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
- if (ret != 0)
- warning ("linux_test_for_tracefork: failed to kill child");
- my_waitpid (child_pid, &status, 0);
- }
- while (WIFSTOPPED (status));
-
-#if defined(__UCLIBC__) && defined(HAS_NOMMU)
- free (stack);
-#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
-}
-
-
static void
linux_look_up_symbols (void)
{
if (proc->private->thread_db != NULL)
return;
- /* If the kernel supports tracing forks then it also supports tracing
- clones, and then we don't need to use the magic thread event breakpoint
- to learn about threads. */
- thread_db_init (!linux_supports_tracefork_flag);
+ /* If the kernel supports tracing clones, then we don't need to
+ use the magic thread event breakpoint to learn about
+ threads. */
+ thread_db_init (!linux_supports_traceclone ());
#endif
}
if (!ptid_equal (cont_thread, null_ptid)
&& !ptid_equal (cont_thread, minus_one_ptid))
{
- struct lwp_info *lwp;
int lwpid;
- lwp = get_thread_lwp (current_inferior);
- lwpid = lwpid_of (lwp);
+ lwpid = lwpid_of (current_inferior);
kill_lwp (lwpid, SIGINT);
}
else
{
char filename[PATH_MAX];
int fd, n;
- int pid = lwpid_of (get_thread_lwp (current_inferior));
+ int pid = lwpid_of (current_inferior);
xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
corresponding function. */
static int
-linux_insert_point (char type, CORE_ADDR addr, int len)
+linux_supports_z_point_type (char z_type)
+{
+ return (the_low_target.supports_z_point_type != NULL
+ && the_low_target.supports_z_point_type (z_type));
+}
+
+static int
+linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
+ int size, struct raw_breakpoint *bp)
{
if (the_low_target.insert_point != NULL)
- return the_low_target.insert_point (type, addr, len);
+ return the_low_target.insert_point (type, addr, size, bp);
else
/* Unsupported (see target.h). */
return 1;
}
static int
-linux_remove_point (char type, CORE_ADDR addr, int len)
+linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
+ int size, struct raw_breakpoint *bp)
{
if (the_low_target.remove_point != NULL)
- return the_low_target.remove_point (type, addr, len);
+ return the_low_target.remove_point (type, addr, size, bp);
else
/* Unsupported (see target.h). */
return 1;
return lwp->stopped_data_address;
}
-#if defined(__UCLIBC__) && defined(HAS_NOMMU)
-#if defined(__mcoldfire__)
-/* These should really be defined in the kernel's ptrace.h header. */
-#define PT_TEXT_ADDR 49*4
-#define PT_DATA_ADDR 50*4
-#define PT_TEXT_END_ADDR 51*4
-#elif defined(BFIN)
-#define PT_TEXT_ADDR 220
-#define PT_TEXT_END_ADDR 224
-#define PT_DATA_ADDR 228
-#elif defined(__TMS320C6X__)
-#define PT_TEXT_ADDR (0x10000*4)
-#define PT_DATA_ADDR (0x10004*4)
-#define PT_TEXT_END_ADDR (0x10008*4)
-#endif
+#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
+ && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
+ && defined(PT_TEXT_END_ADDR)
+
+/* This is only used for targets that define PT_TEXT_ADDR,
+ PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
+ the target has different ways of acquiring this information, like
+ loadmaps. */
/* Under uClinux, programs are loaded at non-zero offsets, which we need
to tell gdb about. */
static int
linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
{
-#if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
unsigned long text, text_end, data;
int pid = lwpid_of (get_thread_lwp (current_inferior));
errno = 0;
- text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
- text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
- data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
+ text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
+ (PTRACE_TYPE_ARG4) 0);
+ text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
+ (PTRACE_TYPE_ARG4) 0);
+ data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
+ (PTRACE_TYPE_ARG4) 0);
if (errno == 0)
{
return 1;
}
-#endif
return 0;
}
#endif
if (current_inferior == NULL)
return -1;
- pid = lwpid_of (get_thread_lwp (current_inferior));
+ pid = lwpid_of (current_inferior);
if (debug_threads)
- fprintf (stderr, "%s siginfo for lwp %d.\n",
- readbuf != NULL ? "Reading" : "Writing",
- pid);
+ debug_printf ("%s siginfo for lwp %d.\n",
+ readbuf != NULL ? "Reading" : "Writing",
+ pid);
if (offset >= sizeof (siginfo))
return -1;
- if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
+ if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
return -1;
/* When GDBSERVER is built as a 64-bit application, ptrace writes into
/* Convert back to ptrace layout before flushing it out. */
siginfo_fixup (&siginfo, inf_siginfo, 1);
- if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
+ if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
return -1;
}
int previous = (linux_event_pipe[0] != -1);
if (debug_threads)
- fprintf (stderr, "linux_async (%d), previous=%d\n",
- enable, previous);
+ debug_printf ("linux_async (%d), previous=%d\n",
+ enable, previous);
if (previous != enable)
{
return 1;
}
+static int
+linux_supports_range_stepping (void)
+{
+ if (*the_low_target.supports_range_stepping == NULL)
+ return 0;
+
+ return (*the_low_target.supports_range_stepping) ();
+}
+
/* Enumerate spufs IDs for process PID. */
static int
spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
unsigned const char *writebuf,
CORE_ADDR offset, int len)
{
- long pid = lwpid_of (get_thread_lwp (current_inferior));
+ long pid = lwpid_of (current_inferior);
char buf[128];
int fd = 0;
int ret = 0;
linux_read_loadmap (const char *annex, CORE_ADDR offset,
unsigned char *myaddr, unsigned int len)
{
- int pid = lwpid_of (get_thread_lwp (current_inferior));
+ int pid = lwpid_of (current_inferior);
int addr = -1;
struct target_loadmap *data = NULL;
unsigned int actual_length, copy_length;
}
/* Return &_r_debug in the inferior, or -1 if not present. Return value
- can be 0 if the inferior does not yet have the library list initialized. */
+ can be 0 if the inferior does not yet have the library list initialized.
+ We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
+ DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
static CORE_ADDR
get_r_debug (const int pid, const int is_elf64)
CORE_ADDR dynamic_memaddr;
const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
+ CORE_ADDR map = -1;
dynamic_memaddr = get_dynamic (pid, is_elf64);
if (dynamic_memaddr == 0)
- return (CORE_ADDR) -1;
+ return map;
while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
{
if (is_elf64)
{
Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
+#ifdef DT_MIPS_RLD_MAP
+ union
+ {
+ Elf64_Xword map;
+ unsigned char buf[sizeof (Elf64_Xword)];
+ }
+ rld_map;
+
+ if (dyn->d_tag == DT_MIPS_RLD_MAP)
+ {
+ if (linux_read_memory (dyn->d_un.d_val,
+ rld_map.buf, sizeof (rld_map.buf)) == 0)
+ return rld_map.map;
+ else
+ break;
+ }
+#endif /* DT_MIPS_RLD_MAP */
- if (dyn->d_tag == DT_DEBUG)
- return dyn->d_un.d_val;
+ if (dyn->d_tag == DT_DEBUG && map == -1)
+ map = dyn->d_un.d_val;
if (dyn->d_tag == DT_NULL)
break;
else
{
Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
+#ifdef DT_MIPS_RLD_MAP
+ union
+ {
+ Elf32_Word map;
+ unsigned char buf[sizeof (Elf32_Word)];
+ }
+ rld_map;
+
+ if (dyn->d_tag == DT_MIPS_RLD_MAP)
+ {
+ if (linux_read_memory (dyn->d_un.d_val,
+ rld_map.buf, sizeof (rld_map.buf)) == 0)
+ return rld_map.map;
+ else
+ break;
+ }
+#endif /* DT_MIPS_RLD_MAP */
- if (dyn->d_tag == DT_DEBUG)
- return dyn->d_un.d_val;
+ if (dyn->d_tag == DT_DEBUG && map == -1)
+ map = dyn->d_un.d_val;
if (dyn->d_tag == DT_NULL)
break;
dynamic_memaddr += dyn_size;
}
- return (CORE_ADDR) -1;
+ return map;
}
/* Read one pointer from MEMADDR in the inferior. */
int l_prev_offset;
};
-/* Construct qXfer:libraries:read reply. */
+/* Construct qXfer:libraries-svr4:read reply. */
static int
linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
32 /* l_prev offset in link_map. */
};
const struct link_map_offsets *lmo;
+ unsigned int machine;
+ int ptr_size;
+ CORE_ADDR lm_addr = 0, lm_prev = 0;
+ int allocated = 1024;
+ char *p;
+ CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
+ int header_done = 0;
if (writebuf != NULL)
return -2;
if (readbuf == NULL)
return -1;
- pid = lwpid_of (get_thread_lwp (current_inferior));
+ pid = lwpid_of (current_inferior);
xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
- is_elf64 = elf_64_file_p (filename);
+ is_elf64 = elf_64_file_p (filename, &machine);
lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
+ ptr_size = is_elf64 ? 8 : 4;
- if (priv->r_debug == 0)
- priv->r_debug = get_r_debug (pid, is_elf64);
-
- if (priv->r_debug == (CORE_ADDR) -1 || priv->r_debug == 0)
+ while (annex[0] != '\0')
{
- document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
+ const char *sep;
+ CORE_ADDR *addrp;
+ int len;
+
+ sep = strchr (annex, '=');
+ if (sep == NULL)
+ break;
+
+ len = sep - annex;
+ if (len == 5 && strncmp (annex, "start", 5) == 0)
+ addrp = &lm_addr;
+ else if (len == 4 && strncmp (annex, "prev", 4) == 0)
+ addrp = &lm_prev;
+ else
+ {
+ annex = strchr (sep, ';');
+ if (annex == NULL)
+ break;
+ annex++;
+ continue;
+ }
+
+ annex = decode_address_to_semicolon (addrp, sep + 1);
}
- else
+
+ if (lm_addr == 0)
{
- int allocated = 1024;
- char *p;
- const int ptr_size = is_elf64 ? 8 : 4;
- CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
- int r_version, header_done = 0;
-
- document = xmalloc (allocated);
- strcpy (document, "<library-list-svr4 version=\"1.0\"");
- p = document + strlen (document);
-
- r_version = 0;
- if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
- (unsigned char *) &r_version,
- sizeof (r_version)) != 0
- || r_version != 1)
+ int r_version = 0;
+
+ if (priv->r_debug == 0)
+ priv->r_debug = get_r_debug (pid, is_elf64);
+
+ /* We failed to find DT_DEBUG. Such situation will not change
+ for this inferior - do not retry it. Report it to GDB as
+ E01, see for the reasons at the GDB solib-svr4.c side. */
+ if (priv->r_debug == (CORE_ADDR) -1)
+ return -1;
+
+ if (priv->r_debug != 0)
{
- warning ("unexpected r_debug version %d", r_version);
- goto done;
+ if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
+ (unsigned char *) &r_version,
+ sizeof (r_version)) != 0
+ || r_version != 1)
+ {
+ warning ("unexpected r_debug version %d", r_version);
+ }
+ else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
+ &lm_addr, ptr_size) != 0)
+ {
+ warning ("unable to read r_map from 0x%lx",
+ (long) priv->r_debug + lmo->r_map_offset);
+ }
}
+ }
+
+ document = xmalloc (allocated);
+ strcpy (document, "<library-list-svr4 version=\"1.0\"");
+ p = document + strlen (document);
+
+ while (lm_addr
+ && read_one_ptr (lm_addr + lmo->l_name_offset,
+ &l_name, ptr_size) == 0
+ && read_one_ptr (lm_addr + lmo->l_addr_offset,
+ &l_addr, ptr_size) == 0
+ && read_one_ptr (lm_addr + lmo->l_ld_offset,
+ &l_ld, ptr_size) == 0
+ && read_one_ptr (lm_addr + lmo->l_prev_offset,
+ &l_prev, ptr_size) == 0
+ && read_one_ptr (lm_addr + lmo->l_next_offset,
+ &l_next, ptr_size) == 0)
+ {
+ unsigned char libname[PATH_MAX];
- if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
- &lm_addr, ptr_size) != 0)
+ if (lm_prev != l_prev)
{
- warning ("unable to read r_map from 0x%lx",
- (long) priv->r_debug + lmo->r_map_offset);
- goto done;
+ warning ("Corrupted shared library list: 0x%lx != 0x%lx",
+ (long) lm_prev, (long) l_prev);
+ break;
}
- lm_prev = 0;
- while (read_one_ptr (lm_addr + lmo->l_name_offset,
- &l_name, ptr_size) == 0
- && read_one_ptr (lm_addr + lmo->l_addr_offset,
- &l_addr, ptr_size) == 0
- && read_one_ptr (lm_addr + lmo->l_ld_offset,
- &l_ld, ptr_size) == 0
- && read_one_ptr (lm_addr + lmo->l_prev_offset,
- &l_prev, ptr_size) == 0
- && read_one_ptr (lm_addr + lmo->l_next_offset,
- &l_next, ptr_size) == 0)
+ /* Ignore the first entry even if it has valid name as the first entry
+ corresponds to the main executable. The first entry should not be
+ skipped if the dynamic loader was loaded late by a static executable
+ (see solib-svr4.c parameter ignore_first). But in such case the main
+ executable does not have PT_DYNAMIC present and this function already
+ exited above due to failed get_r_debug. */
+ if (lm_prev == 0)
+ {
+ sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
+ p = p + strlen (p);
+ }
+ else
{
- unsigned char libname[PATH_MAX];
-
- if (lm_prev != l_prev)
- {
- warning ("Corrupted shared library list: 0x%lx != 0x%lx",
- (long) lm_prev, (long) l_prev);
- break;
- }
-
/* Not checking for error because reading may stop before
we've got PATH_MAX worth of characters. */
libname[0] = '\0';
name = xml_escape_text ((char *) libname);
p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
- "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
+ "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
name, (unsigned long) lm_addr,
(unsigned long) l_addr, (unsigned long) l_ld);
free (name);
}
- else if (lm_prev == 0)
- {
- sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
- p = p + strlen (p);
- }
+ }
- if (l_next == 0)
- break;
+ lm_prev = lm_addr;
+ lm_addr = l_next;
+ }
- lm_prev = lm_addr;
- lm_addr = l_next;
- }
- done:
- strcpy (p, "</library-list-svr4>");
+ if (!header_done)
+ {
+ /* Empty list; terminate `<library-list-svr4'. */
+ strcpy (p, "/>");
}
+ else
+ strcpy (p, "</library-list-svr4>");
document_len = strlen (document);
if (offset < document_len)
return len;
}
+#ifdef HAVE_LINUX_BTRACE
+
+/* See to_enable_btrace target method. */
+
+static struct btrace_target_info *
+linux_low_enable_btrace (ptid_t ptid)
+{
+ struct btrace_target_info *tinfo;
+
+ tinfo = linux_enable_btrace (ptid);
+
+ if (tinfo != NULL)
+ {
+ struct thread_info *thread = find_thread_ptid (ptid);
+ struct regcache *regcache = get_thread_regcache (thread, 0);
+
+ tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
+ }
+
+ return tinfo;
+}
+
+/* See to_disable_btrace target method. */
+
+static int
+linux_low_disable_btrace (struct btrace_target_info *tinfo)
+{
+ enum btrace_error err;
+
+ err = linux_disable_btrace (tinfo);
+ return (err == BTRACE_ERR_NONE ? 0 : -1);
+}
+
+/* See to_read_btrace target method. */
+
+static int
+linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
+ int type)
+{
+ VEC (btrace_block_s) *btrace;
+ struct btrace_block *block;
+ enum btrace_error err;
+ int i;
+
+ btrace = NULL;
+ err = linux_read_btrace (&btrace, tinfo, type);
+ if (err != BTRACE_ERR_NONE)
+ {
+ if (err == BTRACE_ERR_OVERFLOW)
+ buffer_grow_str0 (buffer, "E.Overflow.");
+ else
+ buffer_grow_str0 (buffer, "E.Generic Error.");
+
+ return -1;
+ }
+
+ buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
+ buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
+
+ for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
+ buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
+ paddress (block->begin), paddress (block->end));
+
+ buffer_grow_str0 (buffer, "</btrace>\n");
+
+ VEC_free (btrace_block_s, btrace);
+
+ return 0;
+}
+#endif /* HAVE_LINUX_BTRACE */
+
static struct target_ops linux_target_ops = {
linux_create_inferior,
linux_attach,
linux_look_up_symbols,
linux_request_interrupt,
linux_read_auxv,
+ linux_supports_z_point_type,
linux_insert_point,
linux_remove_point,
linux_stopped_by_watchpoint,
linux_stopped_data_address,
-#if defined(__UCLIBC__) && defined(HAS_NOMMU)
+#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
+ && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
+ && defined(PT_TEXT_END_ADDR)
linux_read_offsets,
#else
NULL,
linux_get_min_fast_tracepoint_insn_len,
linux_qxfer_libraries_svr4,
linux_supports_agent,
+#ifdef HAVE_LINUX_BTRACE
+ linux_supports_btrace,
+ linux_low_enable_btrace,
+ linux_low_disable_btrace,
+ linux_low_read_btrace,
+#else
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+#endif
+ linux_supports_range_stepping,
};
static void
#endif
}
+#ifdef HAVE_LINUX_REGSETS
+void
+initialize_regsets_info (struct regsets_info *info)
+{
+ for (info->num_regsets = 0;
+ info->regsets[info->num_regsets].size >= 0;
+ info->num_regsets++)
+ ;
+}
+#endif
+
void
initialize_low (void)
{
set_breakpoint_data (the_low_target.breakpoint,
the_low_target.breakpoint_len);
linux_init_signals ();
- linux_test_for_tracefork ();
-#ifdef HAVE_LINUX_REGSETS
- for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
- ;
- disabled_regsets = xmalloc (num_regsets);
-#endif
+ linux_ptrace_init_warnings ();
sigchld_action.sa_handler = sigchld_handler;
sigemptyset (&sigchld_action.sa_mask);
sigchld_action.sa_flags = SA_RESTART;
sigaction (SIGCHLD, &sigchld_action, NULL);
+
+ initialize_low_arch ();
}