/* Low level interface to ptrace, for the remote server for GDB.
- Copyright (C) 1995-2016 Free Software Foundation, Inc.
+ Copyright (C) 1995-2017 Free Software Foundation, Inc.
This file is part of GDB.
#include "agent.h"
#include "tdesc.h"
#include "rsp-low.h"
-
+#include "signals-state-save-restore.h"
#include "nat/linux-nat.h"
#include "nat/linux-waitpid.h"
#include "gdb_wait.h"
#include "tracepoint.h"
#include "hostio.h"
#include <inttypes.h>
+#include "common-inferior.h"
+#include "nat/fork-inferior.h"
+#include "environ.h"
#ifndef ELFMAG0
/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
then ELFMAG0 will have been defined. If it didn't get included by
return lwp->stop_reason;
}
+/* See nat/linux-nat.h. */
+
+int
+lwp_is_stepping (struct lwp_info *lwp)
+{
+ return lwp->stepping;
+}
+
/* A list of all unknown processes which receive stop signals. Some
other process will presumably claim each of these as forked
children momentarily. */
static void complete_ongoing_step_over (void);
static int linux_low_ptrace_options (int attached);
static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
-static int proceed_one_lwp (struct inferior_list_entry *entry, void *except);
+static int proceed_one_lwp (thread_info *thread, void *except);
/* When the event-loop is doing a step-over, this points at the thread
being stepped. */
debug_printf ("deleting %ld\n", lwpid_of (thr));
remove_thread (thr);
- free (lwp->arch_private);
+
+ if (the_low_target.delete_thread != NULL)
+ the_low_target.delete_thread (lwp->arch_private);
+ else
+ gdb_assert (lwp->arch_private == NULL);
+
free (lwp);
}
&& can_software_single_step ()
&& event == PTRACE_EVENT_VFORK)
{
- /* If we leave reinsert breakpoints there, child will
- hit it, so uninsert reinsert breakpoints from parent
+ /* If we leave single-step breakpoints there, child will
+ hit it, so uninsert single-step breakpoints from parent
(and child). Once vfork child is done, reinsert
them back to parent. */
- uninsert_reinsert_breakpoints (event_thr);
+ uninsert_single_step_breakpoints (event_thr);
}
clone_all_breakpoints (child_thr, event_thr);
- tdesc = XNEW (struct target_desc);
+ tdesc = allocate_target_description ();
copy_target_description (tdesc, parent_proc->tdesc);
child_proc->tdesc = tdesc;
event_lwp->status_pending_p = 1;
event_lwp->status_pending = wstat;
- /* If the parent thread is doing step-over with reinsert
- breakpoints, the list of reinsert breakpoints are cloned
+ /* Link the threads until the parent event is passed on to
+ higher layers. */
+ event_lwp->fork_relative = child_lwp;
+ child_lwp->fork_relative = event_lwp;
+
+ /* If the parent thread is doing step-over with single-step
+ breakpoints, the list of single-step breakpoints are cloned
from the parent's. Remove them from the child process.
In case of vfork, we'll reinsert them back once vforked
child is done. */
/* The child process is forked and stopped, so it is safe
to access its memory without stopping all other threads
from other processes. */
- delete_reinsert_breakpoints (child_thr);
+ delete_single_step_breakpoints (child_thr);
- gdb_assert (has_reinsert_breakpoints (event_thr));
- gdb_assert (!has_reinsert_breakpoints (child_thr));
+ gdb_assert (has_single_step_breakpoints (event_thr));
+ gdb_assert (!has_single_step_breakpoints (child_thr));
}
/* Report the event. */
new_lwp->status_pending = status;
}
+ thread_db_notice_clone (event_thr, ptid);
+
/* Don't report the event. */
return 1;
}
if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
{
- reinsert_reinsert_breakpoints (event_thr);
+ reinsert_single_step_breakpoints (event_thr);
- gdb_assert (has_reinsert_breakpoints (event_thr));
+ gdb_assert (has_single_step_breakpoints (event_thr));
}
/* Report the event. */
else if (event == PTRACE_EVENT_EXEC && report_exec_events)
{
struct process_info *proc;
- VEC (int) *syscalls_to_catch;
+ std::vector<int> syscalls_to_catch;
ptid_t event_ptid;
pid_t event_pid;
/* Save the syscall list from the execing process. */
proc = get_thread_process (event_thr);
- syscalls_to_catch = proc->syscalls_to_catch;
- proc->syscalls_to_catch = NULL;
+ syscalls_to_catch = std::move (proc->syscalls_to_catch);
/* Delete the execing process and all its threads. */
linux_mourn (proc);
/* Restore the list to catch. Don't rely on the client, which is free
to avoid sending a new list when the architecture doesn't change.
Also, for ANY_SYSCALL, the architecture doesn't really matter. */
- proc->syscalls_to_catch = syscalls_to_catch;
+ proc->syscalls_to_catch = std::move (syscalls_to_catch);
/* Report the event. */
*orig_event_lwp = event_lwp;
return lwp;
}
+/* Callback to be used when calling fork_inferior, responsible for
+ actually initiating the tracing of the inferior. */
+
+static void
+linux_ptrace_fun ()
+{
+ if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
+ (PTRACE_TYPE_ARG4) 0) < 0)
+ trace_start_error_with_name ("ptrace");
+
+ if (setpgid (0, 0) < 0)
+ trace_start_error_with_name ("setpgid");
+
+ /* If GDBserver is connected to gdb via stdio, redirect the inferior's
+ stdout to stderr so that inferior i/o doesn't corrupt the connection.
+ Also, redirect stdin to /dev/null. */
+ if (remote_connection_is_stdio ())
+ {
+ if (close (0) < 0)
+ trace_start_error_with_name ("close");
+ if (open ("/dev/null", O_RDONLY) < 0)
+ trace_start_error_with_name ("open");
+ if (dup2 (2, 1) < 0)
+ trace_start_error_with_name ("dup2");
+ if (write (2, "stdin/stdout redirected\n",
+ sizeof ("stdin/stdout redirected\n") - 1) < 0)
+ {
+ /* Errors ignored. */;
+ }
+ }
+}
+
/* Start an inferior process and returns its pid.
- ALLARGS is a vector of program-name and args. */
+ PROGRAM is the name of the program to be started, and PROGRAM_ARGS
+ are its arguments. */
static int
-linux_create_inferior (char *program, char **allargs)
+linux_create_inferior (const char *program,
+ const std::vector<char *> &program_args)
{
struct lwp_info *new_lwp;
int pid;
ptid_t ptid;
struct cleanup *restore_personality
= maybe_disable_address_space_randomization (disable_randomization);
+ std::string str_program_args = stringify_argv (program_args);
-#if defined(__UCLIBC__) && defined(HAS_NOMMU)
- pid = vfork ();
-#else
- pid = fork ();
-#endif
- if (pid < 0)
- perror_with_name ("fork");
-
- if (pid == 0)
- {
- close_most_fds ();
- ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
-
- setpgid (0, 0);
-
- /* If gdbserver is connected to gdb via stdio, redirect the inferior's
- stdout to stderr so that inferior i/o doesn't corrupt the connection.
- Also, redirect stdin to /dev/null. */
- if (remote_connection_is_stdio ())
- {
- close (0);
- open ("/dev/null", O_RDONLY);
- dup2 (2, 1);
- if (write (2, "stdin/stdout redirected\n",
- sizeof ("stdin/stdout redirected\n") - 1) < 0)
- {
- /* Errors ignored. */;
- }
- }
-
- execv (program, allargs);
- if (errno == ENOENT)
- execvp (program, allargs);
-
- fprintf (stderr, "Cannot exec %s: %s.\n", program,
- strerror (errno));
- fflush (stderr);
- _exit (0177);
- }
+ pid = fork_inferior (program,
+ str_program_args.c_str (),
+ get_environ ()->envp (), linux_ptrace_fun,
+ NULL, NULL, NULL, NULL);
do_cleanups (restore_personality);
new_lwp = add_lwp (ptid);
new_lwp->must_set_ptrace_flags = 1;
+ post_fork_inferior (pid, program);
+
return pid;
}
};
static int
-second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
+second_thread_of_pid_p (thread_info *thread, void *args)
{
struct counter *counter = (struct counter *) args;
- if (ptid_get_pid (entry->id) == counter->pid)
+ if (thread->id.pid () == counter->pid)
{
if (++counter->count > 1)
return 1;
except the leader. */
static int
-kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
+kill_one_lwp_callback (thread_info *thread, void *args)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
int pid = * (int *) args;
- if (ptid_get_pid (entry->id) != pid)
+ if (thread->id.pid () != pid)
return 0;
/* We avoid killing the first thread here, because of a Linux kernel (at
{
if (debug_threads)
debug_printf ("lkop: is last of process %s\n",
- target_pid_to_str (entry->id));
+ target_pid_to_str (thread->id));
return 0;
}
given process. */
static int
-linux_detach_lwp_callback (struct inferior_list_entry *entry, void *args)
+linux_detach_lwp_callback (thread_info *thread, void *args)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
int pid = *(int *) args;
int lwpid = lwpid_of (thread);
/* Skip other processes. */
- if (ptid_get_pid (entry->id) != pid)
+ if (thread->id.pid () != pid)
return 0;
/* We don't actually detach from the thread group leader just yet.
If the thread group exits, we must reap the zombie clone lwps
before we're able to reap the leader. */
- if (ptid_get_pid (entry->id) == lwpid)
+ if (thread->id.pid () == lwpid)
return 0;
linux_detach_one_lwp (lwp);
/* Remove all LWPs that belong to process PROC from the lwp list. */
static int
-delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
+delete_lwp_callback (thread_info *thread, void *proc)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
struct process_info *process = (struct process_info *) proc;
/* Freeing all private data. */
priv = process->priv;
- free (priv->arch_private);
+ if (the_low_target.delete_process != NULL)
+ the_low_target.delete_process (priv->arch_private);
+ else
+ gdb_assert (priv->arch_private == NULL);
free (priv);
process->priv = NULL;
/* Return 1 if this lwp has an interesting status pending. */
static int
-status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
+status_pending_p_callback (thread_info *thread, void *arg)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lp = get_thread_lwp (thread);
ptid_t ptid = * (ptid_t *) arg;
}
static int
-same_lwp (struct inferior_list_entry *entry, void *data)
+same_lwp (thread_info *thread, void *data)
{
ptid_t ptid = *(ptid_t *) data;
int lwp;
else
lwp = ptid_get_pid (ptid);
- if (ptid_get_lwp (entry->id) == lwp)
+ if (thread->id.lwp () == lwp)
return 1;
return 0;
struct lwp_info *
find_lwp_pid (ptid_t ptid)
{
- struct inferior_list_entry *thread
- = find_inferior (&all_threads, same_lwp, &ptid);
+ thread_info *thread = find_inferior (&all_threads, same_lwp, &ptid);
if (thread == NULL)
return NULL;
- return get_thread_lwp ((struct thread_info *) thread);
+ return get_thread_lwp (thread);
}
/* Return the number of known LWPs in the tgid given by PID. */
static int
num_lwps (int pid)
{
- struct inferior_list_entry *inf, *tmp;
int count = 0;
- ALL_INFERIORS (&all_threads, inf, tmp)
+ for_each_thread (pid, [&] (thread_info *thread)
{
- if (ptid_get_pid (inf->id) == pid)
- count++;
- }
+ count++;
+ });
return count;
}
-/* The arguments passed to iterate_over_lwps. */
-
-struct iterate_over_lwps_args
-{
- /* The FILTER argument passed to iterate_over_lwps. */
- ptid_t filter;
-
- /* The CALLBACK argument passed to iterate_over_lwps. */
- iterate_over_lwps_ftype *callback;
-
- /* The DATA argument passed to iterate_over_lwps. */
- void *data;
-};
-
-/* Callback for find_inferior used by iterate_over_lwps to filter
- calls to the callback supplied to that function. Returning a
- nonzero value causes find_inferiors to stop iterating and return
- the current inferior_list_entry. Returning zero indicates that
- find_inferiors should continue iterating. */
-
-static int
-iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
-{
- struct iterate_over_lwps_args *args
- = (struct iterate_over_lwps_args *) args_p;
-
- if (ptid_match (entry->id, args->filter))
- {
- struct thread_info *thr = (struct thread_info *) entry;
- struct lwp_info *lwp = get_thread_lwp (thr);
-
- return (*args->callback) (lwp, args->data);
- }
-
- return 0;
-}
-
/* See nat/linux-nat.h. */
struct lwp_info *
iterate_over_lwps_ftype callback,
void *data)
{
- struct iterate_over_lwps_args args = {filter, callback, data};
- struct inferior_list_entry *entry;
+ thread_info *thread = find_thread (filter, [&] (thread_info *thread)
+ {
+ lwp_info *lwp = get_thread_lwp (thread);
- entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
- if (entry == NULL)
+ return callback (lwp, data);
+ });
+
+ if (thread == NULL)
return NULL;
- return get_thread_lwp ((struct thread_info *) entry);
+ return get_thread_lwp (thread);
}
/* Detect zombie thread group leaders, and "exit" them. We can't reap
static void
check_zombie_leaders (void)
{
- struct process_info *proc, *tmp;
+ for_each_process ([] (process_info *proc) {
+ pid_t leader_pid = pid_of (proc);
+ struct lwp_info *leader_lp;
- ALL_PROCESSES (proc, tmp)
- {
- pid_t leader_pid = pid_of (proc);
- struct lwp_info *leader_lp;
+ leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
- leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
+ if (debug_threads)
+ debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
+ "num_lwps=%d, zombie=%d\n",
+ leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
+ linux_proc_pid_is_zombie (leader_pid));
- if (debug_threads)
- debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
- "num_lwps=%d, zombie=%d\n",
- leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
- linux_proc_pid_is_zombie (leader_pid));
-
- if (leader_lp != NULL && !leader_lp->stopped
- /* Check if there are other threads in the group, as we may
- have raced with the inferior simply exiting. */
- && !last_thread_of_process_p (leader_pid)
- && linux_proc_pid_is_zombie (leader_pid))
- {
- /* A leader zombie can mean one of two things:
-
- - It exited, and there's an exit status pending
- available, or only the leader exited (not the whole
- program). In the latter case, we can't waitpid the
- leader's exit status until all other threads are gone.
-
- - There are 3 or more threads in the group, and a thread
- other than the leader exec'd. On an exec, the Linux
- kernel destroys all other threads (except the execing
- one) in the thread group, and resets the execing thread's
- tid to the tgid. No exit notification is sent for the
- execing thread -- from the ptracer's perspective, it
- appears as though the execing thread just vanishes.
- Until we reap all other threads except the leader and the
- execing thread, the leader will be zombie, and the
- execing thread will be in `D (disc sleep)'. As soon as
- all other threads are reaped, the execing thread changes
- it's tid to the tgid, and the previous (zombie) leader
- vanishes, giving place to the "new" leader. We could try
- distinguishing the exit and exec cases, by waiting once
- more, and seeing if something comes out, but it doesn't
- sound useful. The previous leader _does_ go away, and
- we'll re-add the new one once we see the exec event
- (which is just the same as what would happen if the
- previous leader did exit voluntarily before some other
- thread execs). */
-
- if (debug_threads)
- fprintf (stderr,
- "CZL: Thread group leader %d zombie "
- "(it exited, or another thread execd).\n",
- leader_pid);
-
- delete_lwp (leader_lp);
- }
- }
+ if (leader_lp != NULL && !leader_lp->stopped
+ /* Check if there are other threads in the group, as we may
+ have raced with the inferior simply exiting. */
+ && !last_thread_of_process_p (leader_pid)
+ && linux_proc_pid_is_zombie (leader_pid))
+ {
+ /* A leader zombie can mean one of two things:
+
+ - It exited, and there's an exit status pending
+ available, or only the leader exited (not the whole
+ program). In the latter case, we can't waitpid the
+ leader's exit status until all other threads are gone.
+
+ - There are 3 or more threads in the group, and a thread
+ other than the leader exec'd. On an exec, the Linux
+ kernel destroys all other threads (except the execing
+ one) in the thread group, and resets the execing thread's
+ tid to the tgid. No exit notification is sent for the
+ execing thread -- from the ptracer's perspective, it
+ appears as though the execing thread just vanishes.
+ Until we reap all other threads except the leader and the
+ execing thread, the leader will be zombie, and the
+ execing thread will be in `D (disc sleep)'. As soon as
+ all other threads are reaped, the execing thread changes
+ it's tid to the tgid, and the previous (zombie) leader
+ vanishes, giving place to the "new" leader. We could try
+ distinguishing the exit and exec cases, by waiting once
+ more, and seeing if something comes out, but it doesn't
+ sound useful. The previous leader _does_ go away, and
+ we'll re-add the new one once we see the exec event
+ (which is just the same as what would happen if the
+ previous leader did exit voluntarily before some other
+ thread execs). */
+
+ if (debug_threads)
+ debug_printf ("CZL: Thread group leader %d zombie "
+ "(it exited, or another thread execd).\n",
+ leader_pid);
+
+ delete_lwp (leader_lp);
+ }
+ });
}
/* Callback for `find_inferior'. Returns the first LWP that is not
stopped. ARG is a PTID filter. */
static int
-not_stopped_callback (struct inferior_list_entry *entry, void *arg)
+not_stopped_callback (thread_info *thread, void *arg)
{
- struct thread_info *thr = (struct thread_info *) entry;
struct lwp_info *lwp;
ptid_t filter = *(ptid_t *) arg;
- if (!ptid_match (ptid_of (thr), filter))
+ if (!ptid_match (ptid_of (thread), filter))
return 0;
- lwp = get_thread_lwp (thr);
+ lwp = get_thread_lwp (thread);
if (!lwp->stopped)
return 1;
lwp_suspended_decr (lwp);
gdb_assert (lwp->suspended == 0);
- gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
+ gdb_assert (!stabilizing_threads
+ || (lwp->collecting_fast_tracepoint
+ != fast_tpoint_collect_result::not_collecting));
if (tpoint_related_event)
{
return 0;
}
-/* Convenience wrapper. Returns true if LWP is presently collecting a
- fast tracepoint. */
+/* Convenience wrapper. Returns information about LWP's fast tracepoint
+ collection status. */
-static int
+static fast_tpoint_collect_result
linux_fast_tracepoint_collecting (struct lwp_info *lwp,
struct fast_tpoint_collect_status *status)
{
struct thread_info *thread = get_lwp_thread (lwp);
if (the_low_target.get_thread_area == NULL)
- return 0;
+ return fast_tpoint_collect_result::not_collecting;
/* Get the thread area address. This is used to recognize which
thread is which when tracing with the in-process agent library.
We don't read anything from the address, and treat it as opaque;
it's the address itself that we assume is unique per-thread. */
if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
- return 0;
+ return fast_tpoint_collect_result::not_collecting;
return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
}
&& agent_loaded_p ())
{
struct fast_tpoint_collect_status status;
- int r;
if (debug_threads)
debug_printf ("Checking whether LWP %ld needs to move out of the "
"jump pad.\n",
lwpid_of (current_thread));
- r = linux_fast_tracepoint_collecting (lwp, &status);
+ fast_tpoint_collect_result r
+ = linux_fast_tracepoint_collecting (lwp, &status);
if (wstat == NULL
|| (WSTOPSIG (*wstat) != SIGILL
{
lwp->collecting_fast_tracepoint = r;
- if (r != 0)
+ if (r != fast_tpoint_collect_result::not_collecting)
{
- if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
+ if (r == fast_tpoint_collect_result::before_insn
+ && lwp->exit_jump_pad_bkpt == NULL)
{
/* Haven't executed the original instruction yet.
Set breakpoint there, and wait till it's hit,
reporting to GDB. Otherwise, it's an IPA lib bug: just
report the signal to GDB, and pray for the best. */
- lwp->collecting_fast_tracepoint = 0;
+ lwp->collecting_fast_tracepoint
+ = fast_tpoint_collect_result::not_collecting;
- if (r != 0
+ if (r != fast_tpoint_collect_result::not_collecting
&& (status.adjusted_insn_addr <= lwp->stop_pc
&& lwp->stop_pc < status.adjusted_insn_addr_end))
{
return 1;
else
{
- /* GDBserver must insert reinsert breakpoint for software
+ /* GDBserver must insert single-step breakpoint for software
single step. */
- gdb_assert (has_reinsert_breakpoints (thread));
+ gdb_assert (has_single_step_breakpoints (thread));
return 0;
}
}
to report, but are resumed from the core's perspective. */
static void
-resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
+resume_stopped_resumed_lwps (thread_info *thread)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lp = get_thread_lwp (thread);
if (lp->stopped
if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
{
event_thread = (struct thread_info *)
- find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
+ find_inferior_in_random (&all_threads, status_pending_p_callback,
+ &filter_ptid);
if (event_thread != NULL)
event_child = get_thread_lwp (event_thread);
if (debug_threads && event_thread)
if (stopping_threads == NOT_STOPPING_THREADS
&& requested_child->status_pending_p
- && requested_child->collecting_fast_tracepoint)
+ && (requested_child->collecting_fast_tracepoint
+ != fast_tpoint_collect_result::not_collecting))
{
enqueue_one_deferred_signal (requested_child,
&requested_child->status_pending);
/* ... and find an LWP with a status to report to the core, if
any. */
event_thread = (struct thread_info *)
- find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
+ find_inferior_in_random (&all_threads, status_pending_p_callback,
+ &filter_ptid);
if (event_thread != NULL)
{
event_child = get_thread_lwp (event_thread);
/* Count the LWP's that have had events. */
static int
-count_events_callback (struct inferior_list_entry *entry, void *data)
+count_events_callback (thread_info *thread, void *data)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lp = get_thread_lwp (thread);
int *count = (int *) data;
/* Select the LWP (if any) that is currently being single-stepped. */
static int
-select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
+select_singlestep_lwp_callback (thread_info *thread, void *data)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lp = get_thread_lwp (thread);
if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
/* Select the Nth LWP that has had an event. */
static int
-select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
+select_event_lwp_callback (thread_info *thread, void *data)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lp = get_thread_lwp (thread);
int *selector = (int *) data;
}
}
-/* Decrement the suspend count of an LWP. */
-
-static int
-unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
-{
- struct thread_info *thread = (struct thread_info *) entry;
- struct lwp_info *lwp = get_thread_lwp (thread);
-
- /* Ignore EXCEPT. */
- if (lwp == except)
- return 0;
-
- lwp_suspended_decr (lwp);
- return 0;
-}
-
/* Decrement the suspend count of all LWPs, except EXCEPT, if non
NULL. */
static void
unsuspend_all_lwps (struct lwp_info *except)
{
- find_inferior (&all_threads, unsuspend_one_lwp, except);
+ for_each_thread ([&] (thread_info *thread)
+ {
+ lwp_info *lwp = get_thread_lwp (thread);
+
+ if (lwp != except)
+ lwp_suspended_decr (lwp);
+ });
}
-static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
-static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
- void *data);
-static int lwp_running (struct inferior_list_entry *entry, void *data);
+static void move_out_of_jump_pad_callback (thread_info *thread);
+static bool stuck_in_jump_pad_callback (thread_info *thread);
+static int lwp_running (thread_info *thread, void *data);
static ptid_t linux_wait_1 (ptid_t ptid,
struct target_waitstatus *ourstatus,
int target_options);
static void
linux_stabilize_threads (void)
{
- struct thread_info *saved_thread;
- struct thread_info *thread_stuck;
+ thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
- thread_stuck
- = (struct thread_info *) find_inferior (&all_threads,
- stuck_in_jump_pad_callback,
- NULL);
if (thread_stuck != NULL)
{
if (debug_threads)
return;
}
- saved_thread = current_thread;
+ thread_info *saved_thread = current_thread;
stabilizing_threads = 1;
if (debug_threads)
{
- thread_stuck
- = (struct thread_info *) find_inferior (&all_threads,
- stuck_in_jump_pad_callback,
- NULL);
+ thread_stuck = find_thread (stuck_in_jump_pad_callback);
+
if (thread_stuck != NULL)
debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
lwpid_of (thread_stuck));
struct thread_info *thread = get_lwp_thread (event_child);
struct process_info *proc = get_thread_process (thread);
- return !VEC_empty (int, proc->syscalls_to_catch);
+ return !proc->syscalls_to_catch.empty ();
}
/* Returns 1 if GDB is interested in the event_child syscall.
static int
gdb_catch_this_syscall_p (struct lwp_info *event_child)
{
- int i, iter;
int sysno;
struct thread_info *thread = get_lwp_thread (event_child);
struct process_info *proc = get_thread_process (thread);
- if (VEC_empty (int, proc->syscalls_to_catch))
+ if (proc->syscalls_to_catch.empty ())
return 0;
- if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
+ if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
return 1;
get_syscall_trapinfo (event_child, &sysno);
- for (i = 0;
- VEC_iterate (int, proc->syscalls_to_catch, i, iter);
- i++)
+
+ for (int iter : proc->syscalls_to_catch)
if (iter == sysno)
return 1;
the breakpoint address.
So in the case of the hardware single step advance the PC manually
past the breakpoint and in the case of software single step advance only
- if it's not the reinsert_breakpoint we are hitting.
+ if it's not the single_step_breakpoint we are hitting.
This avoids that a program would keep trapping a permanent breakpoint
forever. */
if (!ptid_equal (step_over_bkpt, null_ptid)
&& event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
&& (event_child->stepping
- || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
+ || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
{
int increment_pc = 0;
int breakpoint_kind = 0;
/* We have a SIGTRAP, possibly a step-over dance has just
finished. If so, tweak the state machine accordingly,
- reinsert breakpoints and delete any reinsert (software
- single-step) breakpoints. */
+ reinsert breakpoints and delete any single-step
+ breakpoints. */
step_over_finished = finish_step_over (event_child);
/* Now invoke the callbacks of any internal breakpoints there. */
linux_resume_one_lwp (event_child, 0, 0, NULL);
+ if (debug_threads)
+ debug_exit ();
return ignore_event (ourstatus);
}
}
- if (event_child->collecting_fast_tracepoint)
+ if (event_child->collecting_fast_tracepoint
+ != fast_tpoint_collect_result::not_collecting)
{
if (debug_threads)
debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
"Check if we're already there.\n",
lwpid_of (current_thread),
- event_child->collecting_fast_tracepoint);
+ (int) event_child->collecting_fast_tracepoint);
trace_event = 1;
event_child->collecting_fast_tracepoint
= linux_fast_tracepoint_collecting (event_child, NULL);
- if (event_child->collecting_fast_tracepoint != 1)
+ if (event_child->collecting_fast_tracepoint
+ != fast_tpoint_collect_result::before_insn)
{
/* No longer need this breakpoint. */
if (event_child->exit_jump_pad_bkpt != NULL)
}
}
- if (event_child->collecting_fast_tracepoint == 0)
+ if (event_child->collecting_fast_tracepoint
+ == fast_tpoint_collect_result::not_collecting)
{
if (debug_threads)
debug_printf ("fast tracepoint finished "
linux_resume_one_lwp (event_child, event_child->stepping,
0, NULL);
+
+ if (debug_threads)
+ debug_exit ();
return ignore_event (ourstatus);
}
linux_resume_one_lwp (event_child, event_child->stepping,
WSTOPSIG (w), info_p);
}
+
+ if (debug_threads)
+ debug_exit ();
+
return ignore_event (ourstatus);
}
(*the_low_target.set_pc) (regcache, event_child->stop_pc);
}
- /* We may have finished stepping over a breakpoint. If so,
- we've stopped and suspended all LWPs momentarily except the
- stepping one. This is where we resume them all again. We're
- going to keep waiting, so use proceed, which handles stepping
- over the next breakpoint. */
+ if (step_over_finished)
+ {
+ /* If we have finished stepping over a breakpoint, we've
+ stopped and suspended all LWPs momentarily except the
+ stepping one. This is where we resume them all again.
+ We're going to keep waiting, so use proceed, which
+ handles stepping over the next breakpoint. */
+ unsuspend_all_lwps (event_child);
+ }
+ else
+ {
+ /* Remove the single-step breakpoints if any. Note that
+ there isn't single-step breakpoint if we finished stepping
+ over. */
+ if (can_software_single_step ()
+ && has_single_step_breakpoints (current_thread))
+ {
+ stop_all_lwps (0, event_child);
+ delete_single_step_breakpoints (current_thread);
+ unstop_all_lwps (0, event_child);
+ }
+ }
+
if (debug_threads)
debug_printf ("proceeding all threads.\n");
+ proceed_all_lwps ();
- if (step_over_finished)
- unsuspend_all_lwps (event_child);
+ if (debug_threads)
+ debug_exit ();
- proceed_all_lwps ();
return ignore_event (ourstatus);
}
{
if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
{
- char *str;
+ std::string str
+ = target_waitstatus_to_string (&event_child->waitstatus);
- str = target_waitstatus_to_string (&event_child->waitstatus);
debug_printf ("LWP %ld: extended event with waitstatus %s\n",
- lwpid_of (get_lwp_thread (event_child)), str);
- xfree (str);
+ lwpid_of (get_lwp_thread (event_child)), str.c_str ());
}
if (current_thread->last_resume_kind == resume_step)
{
/* Alright, we're going to report a stop. */
- /* Remove reinsert breakpoints. */
+ /* Remove single-step breakpoints. */
if (can_software_single_step ())
{
- /* Remove reinsert breakpoints or not. It it is true, stop all
+ /* Remove single-step breakpoints or not. It it is true, stop all
lwps, so that other threads won't hit the breakpoint in the
staled memory. */
- int remove_reinsert_breakpoints_p = 0;
+ int remove_single_step_breakpoints_p = 0;
if (non_stop)
{
- remove_reinsert_breakpoints_p
- = has_reinsert_breakpoints (current_thread);
+ remove_single_step_breakpoints_p
+ = has_single_step_breakpoints (current_thread);
}
else
{
/* In all-stop, a stop reply cancels all previous resume
- requests. Delete all reinsert breakpoints. */
- struct inferior_list_entry *inf, *tmp;
+ requests. Delete all single-step breakpoints. */
- ALL_INFERIORS (&all_threads, inf, tmp)
- {
- struct thread_info *thread = (struct thread_info *) inf;
+ find_thread ([&] (thread_info *thread) {
+ if (has_single_step_breakpoints (thread))
+ {
+ remove_single_step_breakpoints_p = 1;
+ return true;
+ }
- if (has_reinsert_breakpoints (thread))
- {
- remove_reinsert_breakpoints_p = 1;
- break;
- }
- }
+ return false;
+ });
}
- if (remove_reinsert_breakpoints_p)
+ if (remove_single_step_breakpoints_p)
{
- /* If we remove reinsert breakpoints from memory, stop all lwps,
+ /* If we remove single-step breakpoints from memory, stop all lwps,
so that other threads won't hit the breakpoint in the staled
memory. */
stop_all_lwps (0, event_child);
if (non_stop)
{
- gdb_assert (has_reinsert_breakpoints (current_thread));
- delete_reinsert_breakpoints (current_thread);
+ gdb_assert (has_single_step_breakpoints (current_thread));
+ delete_single_step_breakpoints (current_thread);
}
else
{
- struct inferior_list_entry *inf, *tmp;
-
- ALL_INFERIORS (&all_threads, inf, tmp)
- {
- struct thread_info *thread = (struct thread_info *) inf;
-
- if (has_reinsert_breakpoints (thread))
- delete_reinsert_breakpoints (thread);
- }
+ for_each_thread ([] (thread_info *thread){
+ if (has_single_step_breakpoints (thread))
+ delete_single_step_breakpoints (thread);
+ });
}
unstop_all_lwps (0, event_child);
if (!non_stop)
stop_all_lwps (0, NULL);
- /* If we're not waiting for a specific LWP, choose an event LWP
- from among those that have had events. Giving equal priority
- to all LWPs that have had events helps prevent
- starvation. */
- if (ptid_equal (ptid, minus_one_ptid))
- {
- event_child->status_pending_p = 1;
- event_child->status_pending = w;
-
- select_event_lwp (&event_child);
-
- /* current_thread and event_child must stay in sync. */
- current_thread = get_lwp_thread (event_child);
-
- event_child->status_pending_p = 0;
- w = event_child->status_pending;
- }
-
if (step_over_finished)
{
if (!non_stop)
}
}
+ /* If we're not waiting for a specific LWP, choose an event LWP
+ from among those that have had events. Giving equal priority
+ to all LWPs that have had events helps prevent
+ starvation. */
+ if (ptid_equal (ptid, minus_one_ptid))
+ {
+ event_child->status_pending_p = 1;
+ event_child->status_pending = w;
+
+ select_event_lwp (&event_child);
+
+ /* current_thread and event_child must stay in sync. */
+ current_thread = get_lwp_thread (event_child);
+
+ event_child->status_pending_p = 0;
+ w = event_child->status_pending;
+ }
+
+
/* Stabilize threads (move out of jump pads). */
if (!non_stop)
stabilize_threads ();
{
/* If the reported event is an exit, fork, vfork or exec, let
GDB know. */
+
+ /* Break the unreported fork relationship chain. */
+ if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
+ || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
+ {
+ event_child->fork_relative->fork_relative = NULL;
+ event_child->fork_relative = NULL;
+ }
+
*ourstatus = event_child->waitstatus;
/* Clear the event lwp's waitstatus since we handled it already. */
event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
}
static int
-send_sigstop_callback (struct inferior_list_entry *entry, void *except)
+send_sigstop_callback (thread_info *thread, void *except)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
/* Ignore EXCEPT. */
/* Increment the suspend count of an LWP, and stop it, if not stopped
yet. */
static int
-suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
- void *except)
+suspend_and_send_sigstop_callback (thread_info *thread, void *except)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
/* Ignore EXCEPT. */
lwp_suspended_inc (lwp);
- return send_sigstop_callback (entry, except);
+ return send_sigstop_callback (thread, except);
}
static void
saved_thread = current_thread;
if (saved_thread != NULL)
- saved_tid = saved_thread->entry.id;
+ saved_tid = saved_thread->id;
else
saved_tid = null_ptid; /* avoid bogus unused warning */
}
}
-/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
+/* Returns true if THREAD is stopped in a jump pad, and we can't
move it out, because we need to report the stop event to GDB. For
example, if the user puts a breakpoint in the jump pad, it's
because she wants to debug it. */
-static int
-stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
+static bool
+stuck_in_jump_pad_callback (thread_info *thread)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
if (lwp->suspended != 0)
&& (gdb_breakpoint_here (lwp->stop_pc)
|| lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
|| thread->last_resume_kind == resume_step)
- && linux_fast_tracepoint_collecting (lwp, NULL));
+ && (linux_fast_tracepoint_collecting (lwp, NULL)
+ != fast_tpoint_collect_result::not_collecting));
}
static void
-move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
+move_out_of_jump_pad_callback (thread_info *thread)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct thread_info *saved_thread;
struct lwp_info *lwp = get_thread_lwp (thread);
int *wstat;
}
static int
-lwp_running (struct inferior_list_entry *entry, void *data)
+lwp_running (thread_info *thread, void *data)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
if (lwp_is_marked_dead (lwp))
static void
install_software_single_step_breakpoints (struct lwp_info *lwp)
{
- int i;
- CORE_ADDR pc;
struct thread_info *thread = get_lwp_thread (lwp);
struct regcache *regcache = get_thread_regcache (thread, 1);
- VEC (CORE_ADDR) *next_pcs = NULL;
struct cleanup *old_chain = make_cleanup_restore_current_thread ();
- make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
-
current_thread = thread;
- next_pcs = (*the_low_target.get_next_pcs) (regcache);
+ std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
- for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
- set_reinsert_breakpoint (pc, current_ptid);
+ for (CORE_ADDR pc : next_pcs)
+ set_single_step_breakpoint (pc, current_ptid);
do_cleanups (old_chain);
}
static int
lwp_signal_can_be_delivered (struct lwp_info *lwp)
{
- return !lwp->collecting_fast_tracepoint;
+ return (lwp->collecting_fast_tracepoint
+ == fast_tpoint_collect_result::not_collecting);
}
/* Resume execution of LWP. If STEP is nonzero, single-step it. If
{
struct thread_info *thread = get_lwp_thread (lwp);
struct thread_info *saved_thread;
- int fast_tp_collecting;
int ptrace_request;
struct process_info *proc = get_thread_process (thread);
gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
- fast_tp_collecting = lwp->collecting_fast_tracepoint;
+ fast_tpoint_collect_result fast_tp_collecting
+ = lwp->collecting_fast_tracepoint;
- gdb_assert (!stabilizing_threads || fast_tp_collecting);
+ gdb_assert (!stabilizing_threads
+ || (fast_tp_collecting
+ != fast_tpoint_collect_result::not_collecting));
/* Cancel actions that rely on GDB not changing the PC (e.g., the
user used the "jump" command, or "set $pc = foo"). */
if (can_hardware_single_step ())
{
- if (fast_tp_collecting == 0)
+ if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
{
if (step == 0)
- fprintf (stderr, "BAD - reinserting but not stepping.\n");
+ warning ("BAD - reinserting but not stepping.");
if (lwp->suspended)
- fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
- lwp->suspended);
+ warning ("BAD - reinserting and suspended(%d).",
+ lwp->suspended);
}
}
step = maybe_hw_step (thread);
}
- if (fast_tp_collecting == 1)
+ if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
{
if (debug_threads)
debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
" (exit-jump-pad-bkpt)\n",
lwpid_of (thread));
}
- else if (fast_tp_collecting == 2)
+ else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
{
if (debug_threads)
debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
suspension). */
static int
-linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
+linux_set_resume_request (thread_info *thread, void *arg)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
int ndx;
struct thread_resume_array *r;
{
ptid_t ptid = r->resume[ndx].thread;
if (ptid_equal (ptid, minus_one_ptid)
- || ptid_equal (ptid, entry->id)
+ || ptid == thread->id
/* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
of PID'. */
|| (ptid_get_pid (ptid) == pid_of (thread)
continue;
}
+ /* Ignore (wildcard) resume requests for already-resumed
+ threads. */
+ if (r->resume[ndx].kind != resume_stop
+ && thread->last_resume_kind != resume_stop)
+ {
+ if (debug_threads)
+ debug_printf ("already %s LWP %ld at GDB's request\n",
+ (thread->last_resume_kind
+ == resume_step)
+ ? "stepping"
+ : "continuing",
+ lwpid_of (thread));
+ continue;
+ }
+
+ /* Don't let wildcard resumes resume fork children that GDB
+ does not yet know are new fork children. */
+ if (lwp->fork_relative != NULL)
+ {
+ struct lwp_info *rel = lwp->fork_relative;
+
+ if (rel->status_pending_p
+ && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
+ || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
+ {
+ if (debug_threads)
+ debug_printf ("not resuming LWP %ld: has queued stop reply\n",
+ lwpid_of (thread));
+ continue;
+ }
+ }
+
+ /* If the thread has a pending event that has already been
+ reported to GDBserver core, but GDB has not pulled the
+ event out of the vStopped queue yet, likewise, ignore the
+ (wildcard) resume request. */
+ if (in_queued_stop_replies (thread->id))
+ {
+ if (debug_threads)
+ debug_printf ("not resuming LWP %ld: has queued stop reply\n",
+ lwpid_of (thread));
+ continue;
+ }
+
lwp->resume = &r->resume[ndx];
thread->last_resume_kind = lwp->resume->kind;
Set *FLAG_P if this lwp has an interesting status pending. */
static int
-resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
+resume_status_pending_p (thread_info *thread, void *flag_p)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
/* LWPs which will not be resumed are not interesting, because
inferior's regcache. */
static int
-need_step_over_p (struct inferior_list_entry *entry, void *dummy)
+need_step_over_p (thread_info *thread, void *dummy)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
struct thread_info *saved_thread;
CORE_ADDR pc;
linux_resume_one_lwp (lwp, step, 0, NULL);
/* Require next event from this LWP. */
- step_over_bkpt = thread->entry.id;
+ step_over_bkpt = thread->id;
return 1;
}
/* Finish a step-over. Reinsert the breakpoint we had uninserted in
- start_step_over, if still there, and delete any reinsert
+ start_step_over, if still there, and delete any single-step
breakpoints we've set, on non hardware single-step targets. */
static int
lwp->bp_reinsert = 0;
- /* Delete any software-single-step reinsert breakpoints. No
- longer needed. We don't have to worry about other threads
- hitting this trap, and later not being able to explain it,
- because we were stepping over a breakpoint, and we hold all
- threads but LWP stopped while doing that. */
+ /* Delete any single-step breakpoints. No longer needed. We
+ don't have to worry about other threads hitting this trap,
+ and later not being able to explain it, because we were
+ stepping over a breakpoint, and we hold all threads but
+ LWP stopped while doing that. */
if (!can_hardware_single_step ())
{
- gdb_assert (has_reinsert_breakpoints (current_thread));
- delete_reinsert_breakpoints (current_thread);
+ gdb_assert (has_single_step_breakpoints (current_thread));
+ delete_single_step_breakpoints (current_thread);
}
step_over_bkpt = null_ptid;
they should be re-issued if necessary. */
static int
-linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
+linux_resume_one_thread (thread_info *thread, void *arg)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
int leave_all_stopped = * (int *) arg;
int leave_pending;
if (debug_threads)
debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
- proceed_one_lwp (entry, NULL);
+ proceed_one_lwp (thread, NULL);
}
else
{
on that particular thread, and leave all others stopped. */
static int
-proceed_one_lwp (struct inferior_list_entry *entry, void *except)
+proceed_one_lwp (thread_info *thread, void *except)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
int step;
if (thread->last_resume_kind == resume_stop
&& lwp->pending_signals_to_report == NULL
- && lwp->collecting_fast_tracepoint == 0)
+ && (lwp->collecting_fast_tracepoint
+ == fast_tpoint_collect_result::not_collecting))
{
/* We haven't reported this LWP as stopped yet (otherwise, the
last_status.kind check above would catch it, and we wouldn't
debug_printf (" stepping LWP %ld, client wants it stepping\n",
lwpid_of (thread));
- /* If resume_step is requested by GDB, install reinsert
+ /* If resume_step is requested by GDB, install single-step
breakpoints when the thread is about to be actually resumed if
- the reinsert breakpoints weren't removed. */
- if (can_software_single_step () && !has_reinsert_breakpoints (thread))
+ the single-step breakpoints weren't removed. */
+ if (can_software_single_step ()
+ && !has_single_step_breakpoints (thread))
install_software_single_step_breakpoints (lwp);
step = maybe_hw_step (thread);
}
static int
-unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
+unsuspend_and_proceed_one_lwp (thread_info *thread, void *except)
{
- struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
if (lwp == except)
lwp_suspended_decr (lwp);
- return proceed_one_lwp (entry, except);
+ return proceed_one_lwp (thread, except);
}
/* When we finish a step-over, set threads running again. If there's
linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
{
int pid = lwpid_of (current_thread);
- register PTRACE_XFER_TYPE *buffer;
- register CORE_ADDR addr;
- register int count;
+ PTRACE_XFER_TYPE *buffer;
+ CORE_ADDR addr;
+ int count;
char filename[64];
- register int i;
+ int i;
int ret;
int fd;
static int
linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
{
- register int i;
+ int i;
/* Round starting address down to longword boundary. */
- register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
+ CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
/* Round ending address up; get number of longwords that makes. */
- register int count
+ int count
= (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
/ sizeof (PTRACE_XFER_TYPE);
/* Allocate buffer of that many longwords. */
- register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
+ PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
int pid = lwpid_of (current_thread);
static void
linux_request_interrupt (void)
{
- extern unsigned long signal_pid;
-
/* Send a SIGINT to the process group. This acts just like the user
typed a ^C on the controlling terminal. */
kill (-signal_pid, SIGINT);
return linux_supports_traceexec ();
}
-/* Callback for 'find_inferior'. Set the (possibly changed) ptrace
- options for the specified lwp. */
-
-static int
-reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
- void *args)
-{
- struct thread_info *thread = (struct thread_info *) entry;
- struct lwp_info *lwp = get_thread_lwp (thread);
-
- if (!lwp->stopped)
- {
- /* Stop the lwp so we can modify its ptrace options. */
- lwp->must_set_ptrace_flags = 1;
- linux_stop_lwp (lwp);
- }
- else
- {
- /* Already stopped; go ahead and set the ptrace options. */
- struct process_info *proc = find_process_pid (pid_of (thread));
- int options = linux_low_ptrace_options (proc->attached);
-
- linux_enable_event_reporting (lwpid_of (thread), options);
- lwp->must_set_ptrace_flags = 0;
- }
-
- return 0;
-}
-
/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
ptrace flags for all inferiors. This is in case the new GDB connection
doesn't support the same set of events that the previous one did. */
static void
linux_handle_new_gdb_connection (void)
{
- pid_t pid;
-
/* Request that all the lwps reset their ptrace options. */
- find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
+ for_each_thread ([] (thread_info *thread)
+ {
+ struct lwp_info *lwp = get_thread_lwp (thread);
+
+ if (!lwp->stopped)
+ {
+ /* Stop the lwp so we can modify its ptrace options. */
+ lwp->must_set_ptrace_flags = 1;
+ linux_stop_lwp (lwp);
+ }
+ else
+ {
+ /* Already stopped; go ahead and set the ptrace options. */
+ struct process_info *proc = find_process_pid (pid_of (thread));
+ int options = linux_low_ptrace_options (proc->attached);
+
+ linux_enable_event_reporting (lwpid_of (thread), options);
+ lwp->must_set_ptrace_flags = 0;
+ }
+ });
}
static int
static int
linux_supports_range_stepping (void)
{
+ if (can_software_single_step ())
+ return 1;
if (*the_low_target.supports_range_stepping == NULL)
return 0;
{
/* 6x the size for xml_escape_text below. */
size_t len = 6 * strlen ((char *) libname);
- char *name;
if (!header_done)
{
p = document + document_len;
}
- name = xml_escape_text ((char *) libname);
+ std::string name = xml_escape_text ((char *) libname);
p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
"l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
- name, (unsigned long) lm_addr,
+ name.c_str (), (unsigned long) lm_addr,
(unsigned long) l_addr, (unsigned long) l_ld);
- free (name);
}
}
linux_supports_software_single_step,
linux_supports_catch_syscall,
linux_get_ipa_tdesc_idx,
+#if USE_THREAD_DB
+ thread_db_thread_handle,
+#else
+ NULL,
+#endif
};
#ifdef HAVE_LINUX_REGSETS