1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "linux-low.h"
21 #include "linux-osdata.h"
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include "linux-ptrace.h"
28 #include "linux-procfs.h"
30 #include <sys/ioctl.h>
36 #include <sys/syscall.h>
40 #include <sys/types.h>
46 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
47 then ELFMAG0 will have been defined. If it didn't get included by
48 gdb_proc_service.h then including it will likely introduce a duplicate
49 definition of elf_fpregset_t. */
54 #define SPUFS_MAGIC 0x23c9b64e
57 #ifdef HAVE_PERSONALITY
58 # include <sys/personality.h>
59 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
60 # define ADDR_NO_RANDOMIZE 0x0040000
69 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
72 /* This is the kernel's hard limit. Not to be confused with
79 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
84 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
85 representation of the thread ID.
87 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
88 the same as the LWP ID.
90 ``all_processes'' is keyed by the "overall process ID", which
91 GNU/Linux calls tgid, "thread group ID". */
93 struct inferior_list all_lwps
;
95 /* A list of all unknown processes which receive stop signals. Some
96 other process will presumably claim each of these as forked
97 children momentarily. */
99 struct simple_pid_list
101 /* The process ID. */
104 /* The status as reported by waitpid. */
108 struct simple_pid_list
*next
;
110 struct simple_pid_list
*stopped_pids
;
112 /* Trivial list manipulation functions to keep track of a list of new
113 stopped processes. */
116 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
118 struct simple_pid_list
*new_pid
= xmalloc (sizeof (struct simple_pid_list
));
121 new_pid
->status
= status
;
122 new_pid
->next
= *listp
;
127 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *statusp
)
129 struct simple_pid_list
**p
;
131 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
132 if ((*p
)->pid
== pid
)
134 struct simple_pid_list
*next
= (*p
)->next
;
136 *statusp
= (*p
)->status
;
144 /* FIXME this is a bit of a hack, and could be removed. */
145 int stopping_threads
;
147 /* FIXME make into a target method? */
148 int using_threads
= 1;
150 /* True if we're presently stabilizing threads (moving them out of
152 static int stabilizing_threads
;
154 /* This flag is true iff we've just created or attached to our first
155 inferior but it has not stopped yet. As soon as it does, we need
156 to call the low target's arch_setup callback. Doing this only on
157 the first inferior avoids reinializing the architecture on every
158 inferior, and avoids messing with the register caches of the
159 already running inferiors. NOTE: this assumes all inferiors under
160 control of gdbserver have the same architecture. */
161 static int new_inferior
;
163 static void linux_resume_one_lwp (struct lwp_info
*lwp
,
164 int step
, int signal
, siginfo_t
*info
);
165 static void linux_resume (struct thread_resume
*resume_info
, size_t n
);
166 static void stop_all_lwps (int suspend
, struct lwp_info
*except
);
167 static void unstop_all_lwps (int unsuspend
, struct lwp_info
*except
);
168 static int linux_wait_for_event (ptid_t ptid
, int *wstat
, int options
);
169 static void *add_lwp (ptid_t ptid
);
170 static int linux_stopped_by_watchpoint (void);
171 static void mark_lwp_dead (struct lwp_info
*lwp
, int wstat
);
172 static void proceed_all_lwps (void);
173 static int finish_step_over (struct lwp_info
*lwp
);
174 static CORE_ADDR
get_stop_pc (struct lwp_info
*lwp
);
175 static int kill_lwp (unsigned long lwpid
, int signo
);
176 static void linux_enable_event_reporting (int pid
);
178 /* True if the low target can hardware single-step. Such targets
179 don't need a BREAKPOINT_REINSERT_ADDR callback. */
182 can_hardware_single_step (void)
184 return (the_low_target
.breakpoint_reinsert_addr
== NULL
);
187 /* True if the low target supports memory breakpoints. If so, we'll
188 have a GET_PC implementation. */
191 supports_breakpoints (void)
193 return (the_low_target
.get_pc
!= NULL
);
196 /* Returns true if this target can support fast tracepoints. This
197 does not mean that the in-process agent has been loaded in the
201 supports_fast_tracepoints (void)
203 return the_low_target
.install_fast_tracepoint_jump_pad
!= NULL
;
206 struct pending_signals
210 struct pending_signals
*prev
;
213 #define PTRACE_ARG3_TYPE void *
214 #define PTRACE_ARG4_TYPE void *
215 #define PTRACE_XFER_TYPE long
217 #ifdef HAVE_LINUX_REGSETS
218 static char *disabled_regsets
;
219 static int num_regsets
;
222 /* The read/write ends of the pipe registered as waitable file in the
224 static int linux_event_pipe
[2] = { -1, -1 };
226 /* True if we're currently in async mode. */
227 #define target_is_async_p() (linux_event_pipe[0] != -1)
229 static void send_sigstop (struct lwp_info
*lwp
);
230 static void wait_for_sigstop (struct inferior_list_entry
*entry
);
232 /* Return non-zero if HEADER is a 64-bit ELF file. */
235 elf_64_header_p (const Elf64_Ehdr
*header
)
237 return (header
->e_ident
[EI_MAG0
] == ELFMAG0
238 && header
->e_ident
[EI_MAG1
] == ELFMAG1
239 && header
->e_ident
[EI_MAG2
] == ELFMAG2
240 && header
->e_ident
[EI_MAG3
] == ELFMAG3
241 && header
->e_ident
[EI_CLASS
] == ELFCLASS64
);
244 /* Return non-zero if FILE is a 64-bit ELF file,
245 zero if the file is not a 64-bit ELF file,
246 and -1 if the file is not accessible or doesn't exist. */
249 elf_64_file_p (const char *file
)
254 fd
= open (file
, O_RDONLY
);
258 if (read (fd
, &header
, sizeof (header
)) != sizeof (header
))
265 return elf_64_header_p (&header
);
268 /* Accepts an integer PID; Returns true if the executable PID is
269 running is a 64-bit ELF file.. */
272 linux_pid_exe_is_elf_64_file (int pid
)
274 char file
[MAXPATHLEN
];
276 sprintf (file
, "/proc/%d/exe", pid
);
277 return elf_64_file_p (file
);
281 delete_lwp (struct lwp_info
*lwp
)
283 remove_thread (get_lwp_thread (lwp
));
284 remove_inferior (&all_lwps
, &lwp
->head
);
285 free (lwp
->arch_private
);
289 /* Add a process to the common process list, and set its private
292 static struct process_info
*
293 linux_add_process (int pid
, int attached
)
295 struct process_info
*proc
;
297 /* Is this the first process? If so, then set the arch. */
298 if (all_processes
.head
== NULL
)
301 proc
= add_process (pid
, attached
);
302 proc
->private = xcalloc (1, sizeof (*proc
->private));
304 if (the_low_target
.new_process
!= NULL
)
305 proc
->private->arch_private
= the_low_target
.new_process ();
310 /* Wrapper function for waitpid which handles EINTR, and emulates
311 __WALL for systems where that is not available. */
314 my_waitpid (int pid
, int *status
, int flags
)
319 fprintf (stderr
, "my_waitpid (%d, 0x%x)\n", pid
, flags
);
323 sigset_t block_mask
, org_mask
, wake_mask
;
326 wnohang
= (flags
& WNOHANG
) != 0;
327 flags
&= ~(__WALL
| __WCLONE
);
330 /* Block all signals while here. This avoids knowing about
331 LinuxThread's signals. */
332 sigfillset (&block_mask
);
333 sigprocmask (SIG_BLOCK
, &block_mask
, &org_mask
);
335 /* ... except during the sigsuspend below. */
336 sigemptyset (&wake_mask
);
340 /* Since all signals are blocked, there's no need to check
342 ret
= waitpid (pid
, status
, flags
);
345 if (ret
== -1 && out_errno
!= ECHILD
)
350 if (flags
& __WCLONE
)
352 /* We've tried both flavors now. If WNOHANG is set,
353 there's nothing else to do, just bail out. */
358 fprintf (stderr
, "blocking\n");
360 /* Block waiting for signals. */
361 sigsuspend (&wake_mask
);
367 sigprocmask (SIG_SETMASK
, &org_mask
, NULL
);
372 ret
= waitpid (pid
, status
, flags
);
373 while (ret
== -1 && errno
== EINTR
);
378 fprintf (stderr
, "my_waitpid (%d, 0x%x): status(%x), %d\n",
379 pid
, flags
, status
? *status
: -1, ret
);
385 /* Handle a GNU/Linux extended wait response. If we see a clone
386 event, we need to add the new LWP to our list (and not report the
387 trap to higher layers). */
390 handle_extended_wait (struct lwp_info
*event_child
, int wstat
)
392 int event
= wstat
>> 16;
393 struct lwp_info
*new_lwp
;
395 if (event
== PTRACE_EVENT_CLONE
)
398 unsigned long new_pid
;
401 ptrace (PTRACE_GETEVENTMSG
, lwpid_of (event_child
), 0, &new_pid
);
403 /* If we haven't already seen the new PID stop, wait for it now. */
404 if (!pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
406 /* The new child has a pending SIGSTOP. We can't affect it until it
407 hits the SIGSTOP, but we're already attached. */
409 ret
= my_waitpid (new_pid
, &status
, __WALL
);
412 perror_with_name ("waiting for new child");
413 else if (ret
!= new_pid
)
414 warning ("wait returned unexpected PID %d", ret
);
415 else if (!WIFSTOPPED (status
))
416 warning ("wait returned unexpected status 0x%x", status
);
419 linux_enable_event_reporting (new_pid
);
421 ptid
= ptid_build (pid_of (event_child
), new_pid
, 0);
422 new_lwp
= (struct lwp_info
*) add_lwp (ptid
);
423 add_thread (ptid
, new_lwp
);
425 /* Either we're going to immediately resume the new thread
426 or leave it stopped. linux_resume_one_lwp is a nop if it
427 thinks the thread is currently running, so set this first
428 before calling linux_resume_one_lwp. */
429 new_lwp
->stopped
= 1;
431 /* Normally we will get the pending SIGSTOP. But in some cases
432 we might get another signal delivered to the group first.
433 If we do get another signal, be sure not to lose it. */
434 if (WSTOPSIG (status
) == SIGSTOP
)
436 if (stopping_threads
)
437 new_lwp
->stop_pc
= get_stop_pc (new_lwp
);
439 linux_resume_one_lwp (new_lwp
, 0, 0, NULL
);
443 new_lwp
->stop_expected
= 1;
445 if (stopping_threads
)
447 new_lwp
->stop_pc
= get_stop_pc (new_lwp
);
448 new_lwp
->status_pending_p
= 1;
449 new_lwp
->status_pending
= status
;
452 /* Pass the signal on. This is what GDB does - except
453 shouldn't we really report it instead? */
454 linux_resume_one_lwp (new_lwp
, 0, WSTOPSIG (status
), NULL
);
457 /* Always resume the current thread. If we are stopping
458 threads, it will have a pending SIGSTOP; we may as well
460 linux_resume_one_lwp (event_child
, event_child
->stepping
, 0, NULL
);
464 /* Return the PC as read from the regcache of LWP, without any
468 get_pc (struct lwp_info
*lwp
)
470 struct thread_info
*saved_inferior
;
471 struct regcache
*regcache
;
474 if (the_low_target
.get_pc
== NULL
)
477 saved_inferior
= current_inferior
;
478 current_inferior
= get_lwp_thread (lwp
);
480 regcache
= get_thread_regcache (current_inferior
, 1);
481 pc
= (*the_low_target
.get_pc
) (regcache
);
484 fprintf (stderr
, "pc is 0x%lx\n", (long) pc
);
486 current_inferior
= saved_inferior
;
490 /* This function should only be called if LWP got a SIGTRAP.
491 The SIGTRAP could mean several things.
493 On i386, where decr_pc_after_break is non-zero:
494 If we were single-stepping this process using PTRACE_SINGLESTEP,
495 we will get only the one SIGTRAP (even if the instruction we
496 stepped over was a breakpoint). The value of $eip will be the
498 If we continue the process using PTRACE_CONT, we will get a
499 SIGTRAP when we hit a breakpoint. The value of $eip will be
500 the instruction after the breakpoint (i.e. needs to be
501 decremented). If we report the SIGTRAP to GDB, we must also
502 report the undecremented PC. If we cancel the SIGTRAP, we
503 must resume at the decremented PC.
505 (Presumably, not yet tested) On a non-decr_pc_after_break machine
506 with hardware or kernel single-step:
507 If we single-step over a breakpoint instruction, our PC will
508 point at the following instruction. If we continue and hit a
509 breakpoint instruction, our PC will point at the breakpoint
513 get_stop_pc (struct lwp_info
*lwp
)
517 if (the_low_target
.get_pc
== NULL
)
520 stop_pc
= get_pc (lwp
);
522 if (WSTOPSIG (lwp
->last_status
) == SIGTRAP
524 && !lwp
->stopped_by_watchpoint
525 && lwp
->last_status
>> 16 == 0)
526 stop_pc
-= the_low_target
.decr_pc_after_break
;
529 fprintf (stderr
, "stop pc is 0x%lx\n", (long) stop_pc
);
535 add_lwp (ptid_t ptid
)
537 struct lwp_info
*lwp
;
539 lwp
= (struct lwp_info
*) xmalloc (sizeof (*lwp
));
540 memset (lwp
, 0, sizeof (*lwp
));
544 if (the_low_target
.new_thread
!= NULL
)
545 lwp
->arch_private
= the_low_target
.new_thread ();
547 add_inferior_to_list (&all_lwps
, &lwp
->head
);
552 /* Start an inferior process and returns its pid.
553 ALLARGS is a vector of program-name and args. */
556 linux_create_inferior (char *program
, char **allargs
)
558 #ifdef HAVE_PERSONALITY
559 int personality_orig
= 0, personality_set
= 0;
561 struct lwp_info
*new_lwp
;
565 #ifdef HAVE_PERSONALITY
566 if (disable_randomization
)
569 personality_orig
= personality (0xffffffff);
570 if (errno
== 0 && !(personality_orig
& ADDR_NO_RANDOMIZE
))
573 personality (personality_orig
| ADDR_NO_RANDOMIZE
);
575 if (errno
!= 0 || (personality_set
576 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE
)))
577 warning ("Error disabling address space randomization: %s",
582 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
588 perror_with_name ("fork");
592 ptrace (PTRACE_TRACEME
, 0, 0, 0);
594 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
595 signal (__SIGRTMIN
+ 1, SIG_DFL
);
600 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
601 stdout to stderr so that inferior i/o doesn't corrupt the connection.
602 Also, redirect stdin to /dev/null. */
603 if (remote_connection_is_stdio ())
606 open ("/dev/null", O_RDONLY
);
608 if (write (2, "stdin/stdout redirected\n",
609 sizeof ("stdin/stdout redirected\n") - 1) < 0)
610 /* Errors ignored. */;
613 execv (program
, allargs
);
615 execvp (program
, allargs
);
617 fprintf (stderr
, "Cannot exec %s: %s.\n", program
,
623 #ifdef HAVE_PERSONALITY
627 personality (personality_orig
);
629 warning ("Error restoring address space randomization: %s",
634 linux_add_process (pid
, 0);
636 ptid
= ptid_build (pid
, pid
, 0);
637 new_lwp
= add_lwp (ptid
);
638 add_thread (ptid
, new_lwp
);
639 new_lwp
->must_set_ptrace_flags
= 1;
644 /* Attach to an inferior process. */
647 linux_attach_lwp_1 (unsigned long lwpid
, int initial
)
650 struct lwp_info
*new_lwp
;
652 if (ptrace (PTRACE_ATTACH
, lwpid
, 0, 0) != 0)
656 /* If we fail to attach to an LWP, just warn. */
657 fprintf (stderr
, "Cannot attach to lwp %ld: %s (%d)\n", lwpid
,
658 strerror (errno
), errno
);
663 /* If we fail to attach to a process, report an error. */
664 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid
,
665 strerror (errno
), errno
);
669 /* If lwp is the tgid, we handle adding existing threads later.
670 Otherwise we just add lwp without bothering about any other
672 ptid
= ptid_build (lwpid
, lwpid
, 0);
675 /* Note that extracting the pid from the current inferior is
676 safe, since we're always called in the context of the same
677 process as this new thread. */
678 int pid
= pid_of (get_thread_lwp (current_inferior
));
679 ptid
= ptid_build (pid
, lwpid
, 0);
682 new_lwp
= (struct lwp_info
*) add_lwp (ptid
);
683 add_thread (ptid
, new_lwp
);
685 /* We need to wait for SIGSTOP before being able to make the next
686 ptrace call on this LWP. */
687 new_lwp
->must_set_ptrace_flags
= 1;
689 if (linux_proc_pid_is_stopped (lwpid
))
693 "Attached to a stopped process\n");
695 /* The process is definitely stopped. It is in a job control
696 stop, unless the kernel predates the TASK_STOPPED /
697 TASK_TRACED distinction, in which case it might be in a
698 ptrace stop. Make sure it is in a ptrace stop; from there we
699 can kill it, signal it, et cetera.
701 First make sure there is a pending SIGSTOP. Since we are
702 already attached, the process can not transition from stopped
703 to running without a PTRACE_CONT; so we know this signal will
704 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
705 probably already in the queue (unless this kernel is old
706 enough to use TASK_STOPPED for ptrace stops); but since
707 SIGSTOP is not an RT signal, it can only be queued once. */
708 kill_lwp (lwpid
, SIGSTOP
);
710 /* Finally, resume the stopped process. This will deliver the
711 SIGSTOP (or a higher priority signal, just like normal
712 PTRACE_ATTACH), which we'll catch later on. */
713 ptrace (PTRACE_CONT
, lwpid
, 0, 0);
716 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
719 There are several cases to consider here:
721 1) gdbserver has already attached to the process and is being notified
722 of a new thread that is being created.
723 In this case we should ignore that SIGSTOP and resume the
724 process. This is handled below by setting stop_expected = 1,
725 and the fact that add_thread sets last_resume_kind ==
728 2) This is the first thread (the process thread), and we're attaching
729 to it via attach_inferior.
730 In this case we want the process thread to stop.
731 This is handled by having linux_attach set last_resume_kind ==
732 resume_stop after we return.
734 If the pid we are attaching to is also the tgid, we attach to and
735 stop all the existing threads. Otherwise, we attach to pid and
736 ignore any other threads in the same group as this pid.
738 3) GDB is connecting to gdbserver and is requesting an enumeration of all
740 In this case we want the thread to stop.
741 FIXME: This case is currently not properly handled.
742 We should wait for the SIGSTOP but don't. Things work apparently
743 because enough time passes between when we ptrace (ATTACH) and when
744 gdb makes the next ptrace call on the thread.
746 On the other hand, if we are currently trying to stop all threads, we
747 should treat the new thread as if we had sent it a SIGSTOP. This works
748 because we are guaranteed that the add_lwp call above added us to the
749 end of the list, and so the new thread has not yet reached
750 wait_for_sigstop (but will). */
751 new_lwp
->stop_expected
= 1;
755 linux_attach_lwp (unsigned long lwpid
)
757 linux_attach_lwp_1 (lwpid
, 0);
760 /* Attach to PID. If PID is the tgid, attach to it and all
764 linux_attach (unsigned long pid
)
766 /* Attach to PID. We will check for other threads
768 linux_attach_lwp_1 (pid
, 1);
769 linux_add_process (pid
, 1);
773 struct thread_info
*thread
;
775 /* Don't ignore the initial SIGSTOP if we just attached to this
776 process. It will be collected by wait shortly. */
777 thread
= find_thread_ptid (ptid_build (pid
, pid
, 0));
778 thread
->last_resume_kind
= resume_stop
;
781 if (linux_proc_get_tgid (pid
) == pid
)
786 sprintf (pathname
, "/proc/%ld/task", pid
);
788 dir
= opendir (pathname
);
792 fprintf (stderr
, "Could not open /proc/%ld/task.\n", pid
);
797 /* At this point we attached to the tgid. Scan the task for
800 int new_threads_found
;
804 while (iterations
< 2)
806 new_threads_found
= 0;
807 /* Add all the other threads. While we go through the
808 threads, new threads may be spawned. Cycle through
809 the list of threads until we have done two iterations without
810 finding new threads. */
811 while ((dp
= readdir (dir
)) != NULL
)
814 lwp
= strtoul (dp
->d_name
, NULL
, 10);
816 /* Is this a new thread? */
818 && find_thread_ptid (ptid_build (pid
, lwp
, 0)) == NULL
)
820 linux_attach_lwp_1 (lwp
, 0);
825 Found and attached to new lwp %ld\n", lwp
);
829 if (!new_threads_found
)
850 second_thread_of_pid_p (struct inferior_list_entry
*entry
, void *args
)
852 struct counter
*counter
= args
;
854 if (ptid_get_pid (entry
->id
) == counter
->pid
)
856 if (++counter
->count
> 1)
864 last_thread_of_process_p (struct thread_info
*thread
)
866 ptid_t ptid
= ((struct inferior_list_entry
*)thread
)->id
;
867 int pid
= ptid_get_pid (ptid
);
868 struct counter counter
= { pid
, 0 };
870 return (find_inferior (&all_threads
,
871 second_thread_of_pid_p
, &counter
) == NULL
);
877 linux_kill_one_lwp (struct lwp_info
*lwp
)
879 int pid
= lwpid_of (lwp
);
881 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
882 there is no signal context, and ptrace(PTRACE_KILL) (or
883 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
884 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
885 alternative is to kill with SIGKILL. We only need one SIGKILL
886 per process, not one for each thread. But since we still support
887 linuxthreads, and we also support debugging programs using raw
888 clone without CLONE_THREAD, we send one for each thread. For
889 years, we used PTRACE_KILL only, so we're being a bit paranoid
890 about some old kernels where PTRACE_KILL might work better
891 (dubious if there are any such, but that's why it's paranoia), so
892 we try SIGKILL first, PTRACE_KILL second, and so we're fine
899 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
900 target_pid_to_str (ptid_of (lwp
)),
901 errno
? strerror (errno
) : "OK");
904 ptrace (PTRACE_KILL
, pid
, 0, 0);
907 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
908 target_pid_to_str (ptid_of (lwp
)),
909 errno
? strerror (errno
) : "OK");
912 /* Callback for `find_inferior'. Kills an lwp of a given process,
913 except the leader. */
916 kill_one_lwp_callback (struct inferior_list_entry
*entry
, void *args
)
918 struct thread_info
*thread
= (struct thread_info
*) entry
;
919 struct lwp_info
*lwp
= get_thread_lwp (thread
);
921 int pid
= * (int *) args
;
923 if (ptid_get_pid (entry
->id
) != pid
)
926 /* We avoid killing the first thread here, because of a Linux kernel (at
927 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
928 the children get a chance to be reaped, it will remain a zombie
931 if (lwpid_of (lwp
) == pid
)
934 fprintf (stderr
, "lkop: is last of process %s\n",
935 target_pid_to_str (entry
->id
));
941 linux_kill_one_lwp (lwp
);
943 /* Make sure it died. The loop is most likely unnecessary. */
944 pid
= linux_wait_for_event (lwp
->head
.id
, &wstat
, __WALL
);
945 } while (pid
> 0 && WIFSTOPPED (wstat
));
953 struct process_info
*process
;
954 struct lwp_info
*lwp
;
958 process
= find_process_pid (pid
);
962 /* If we're killing a running inferior, make sure it is stopped
963 first, as PTRACE_KILL will not work otherwise. */
964 stop_all_lwps (0, NULL
);
966 find_inferior (&all_threads
, kill_one_lwp_callback
, &pid
);
968 /* See the comment in linux_kill_one_lwp. We did not kill the first
969 thread in the list, so do so now. */
970 lwp
= find_lwp_pid (pid_to_ptid (pid
));
975 fprintf (stderr
, "lk_1: cannot find lwp %ld, for pid: %d\n",
976 lwpid_of (lwp
), pid
);
981 fprintf (stderr
, "lk_1: killing lwp %ld, for pid: %d\n",
982 lwpid_of (lwp
), pid
);
986 linux_kill_one_lwp (lwp
);
988 /* Make sure it died. The loop is most likely unnecessary. */
989 lwpid
= linux_wait_for_event (lwp
->head
.id
, &wstat
, __WALL
);
990 } while (lwpid
> 0 && WIFSTOPPED (wstat
));
993 the_target
->mourn (process
);
995 /* Since we presently can only stop all lwps of all processes, we
996 need to unstop lwps of other processes. */
997 unstop_all_lwps (0, NULL
);
1002 linux_detach_one_lwp (struct inferior_list_entry
*entry
, void *args
)
1004 struct thread_info
*thread
= (struct thread_info
*) entry
;
1005 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1006 int pid
= * (int *) args
;
1008 if (ptid_get_pid (entry
->id
) != pid
)
1011 /* If this process is stopped but is expecting a SIGSTOP, then make
1012 sure we take care of that now. This isn't absolutely guaranteed
1013 to collect the SIGSTOP, but is fairly likely to. */
1014 if (lwp
->stop_expected
)
1017 /* Clear stop_expected, so that the SIGSTOP will be reported. */
1018 lwp
->stop_expected
= 0;
1019 linux_resume_one_lwp (lwp
, 0, 0, NULL
);
1020 linux_wait_for_event (lwp
->head
.id
, &wstat
, __WALL
);
1023 /* Flush any pending changes to the process's registers. */
1024 regcache_invalidate_one ((struct inferior_list_entry
*)
1025 get_lwp_thread (lwp
));
1027 /* Finally, let it resume. */
1028 if (the_low_target
.prepare_to_resume
!= NULL
)
1029 the_low_target
.prepare_to_resume (lwp
);
1030 ptrace (PTRACE_DETACH
, lwpid_of (lwp
), 0, 0);
1037 linux_detach (int pid
)
1039 struct process_info
*process
;
1041 process
= find_process_pid (pid
);
1042 if (process
== NULL
)
1045 /* Stop all threads before detaching. First, ptrace requires that
1046 the thread is stopped to sucessfully detach. Second, thread_db
1047 may need to uninstall thread event breakpoints from memory, which
1048 only works with a stopped process anyway. */
1049 stop_all_lwps (0, NULL
);
1051 #ifdef USE_THREAD_DB
1052 thread_db_detach (process
);
1055 /* Stabilize threads (move out of jump pads). */
1056 stabilize_threads ();
1058 find_inferior (&all_threads
, linux_detach_one_lwp
, &pid
);
1060 the_target
->mourn (process
);
1062 /* Since we presently can only stop all lwps of all processes, we
1063 need to unstop lwps of other processes. */
1064 unstop_all_lwps (0, NULL
);
1068 /* Remove all LWPs that belong to process PROC from the lwp list. */
1071 delete_lwp_callback (struct inferior_list_entry
*entry
, void *proc
)
1073 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
1074 struct process_info
*process
= proc
;
1076 if (pid_of (lwp
) == pid_of (process
))
1083 linux_mourn (struct process_info
*process
)
1085 struct process_info_private
*priv
;
1087 #ifdef USE_THREAD_DB
1088 thread_db_mourn (process
);
1091 find_inferior (&all_lwps
, delete_lwp_callback
, process
);
1093 /* Freeing all private data. */
1094 priv
= process
->private;
1095 free (priv
->arch_private
);
1097 process
->private = NULL
;
1099 remove_process (process
);
1103 linux_join (int pid
)
1108 ret
= my_waitpid (pid
, &status
, 0);
1109 if (WIFEXITED (status
) || WIFSIGNALED (status
))
1111 } while (ret
!= -1 || errno
!= ECHILD
);
1114 /* Return nonzero if the given thread is still alive. */
1116 linux_thread_alive (ptid_t ptid
)
1118 struct lwp_info
*lwp
= find_lwp_pid (ptid
);
1120 /* We assume we always know if a thread exits. If a whole process
1121 exited but we still haven't been able to report it to GDB, we'll
1122 hold on to the last lwp of the dead process. */
1129 /* Return 1 if this lwp has an interesting status pending. */
1131 status_pending_p_callback (struct inferior_list_entry
*entry
, void *arg
)
1133 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
1134 ptid_t ptid
= * (ptid_t
*) arg
;
1135 struct thread_info
*thread
;
1137 /* Check if we're only interested in events from a specific process
1139 if (!ptid_equal (minus_one_ptid
, ptid
)
1140 && ptid_get_pid (ptid
) != ptid_get_pid (lwp
->head
.id
))
1143 thread
= get_lwp_thread (lwp
);
1145 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1146 report any status pending the LWP may have. */
1147 if (thread
->last_resume_kind
== resume_stop
1148 && thread
->last_status
.kind
!= TARGET_WAITKIND_IGNORE
)
1151 return lwp
->status_pending_p
;
1155 same_lwp (struct inferior_list_entry
*entry
, void *data
)
1157 ptid_t ptid
= *(ptid_t
*) data
;
1160 if (ptid_get_lwp (ptid
) != 0)
1161 lwp
= ptid_get_lwp (ptid
);
1163 lwp
= ptid_get_pid (ptid
);
1165 if (ptid_get_lwp (entry
->id
) == lwp
)
1172 find_lwp_pid (ptid_t ptid
)
1174 return (struct lwp_info
*) find_inferior (&all_lwps
, same_lwp
, &ptid
);
1177 static struct lwp_info
*
1178 linux_wait_for_lwp (ptid_t ptid
, int *wstatp
, int options
)
1181 int to_wait_for
= -1;
1182 struct lwp_info
*child
= NULL
;
1185 fprintf (stderr
, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid
));
1187 if (ptid_equal (ptid
, minus_one_ptid
))
1188 to_wait_for
= -1; /* any child */
1190 to_wait_for
= ptid_get_lwp (ptid
); /* this lwp only */
1196 ret
= my_waitpid (to_wait_for
, wstatp
, options
);
1197 if (ret
== 0 || (ret
== -1 && errno
== ECHILD
&& (options
& WNOHANG
)))
1200 perror_with_name ("waitpid");
1203 && (!WIFSTOPPED (*wstatp
)
1204 || (WSTOPSIG (*wstatp
) != 32
1205 && WSTOPSIG (*wstatp
) != 33)))
1206 fprintf (stderr
, "Got an event from %d (%x)\n", ret
, *wstatp
);
1208 child
= find_lwp_pid (pid_to_ptid (ret
));
1210 /* If we didn't find a process, one of two things presumably happened:
1211 - A process we started and then detached from has exited. Ignore it.
1212 - A process we are controlling has forked and the new child's stop
1213 was reported to us by the kernel. Save its PID. */
1214 if (child
== NULL
&& WIFSTOPPED (*wstatp
))
1216 add_to_pid_list (&stopped_pids
, ret
, *wstatp
);
1219 else if (child
== NULL
)
1224 child
->last_status
= *wstatp
;
1226 /* Architecture-specific setup after inferior is running.
1227 This needs to happen after we have attached to the inferior
1228 and it is stopped for the first time, but before we access
1229 any inferior registers. */
1232 the_low_target
.arch_setup ();
1233 #ifdef HAVE_LINUX_REGSETS
1234 memset (disabled_regsets
, 0, num_regsets
);
1239 /* Fetch the possibly triggered data watchpoint info and store it in
1242 On some archs, like x86, that use debug registers to set
1243 watchpoints, it's possible that the way to know which watched
1244 address trapped, is to check the register that is used to select
1245 which address to watch. Problem is, between setting the
1246 watchpoint and reading back which data address trapped, the user
1247 may change the set of watchpoints, and, as a consequence, GDB
1248 changes the debug registers in the inferior. To avoid reading
1249 back a stale stopped-data-address when that happens, we cache in
1250 LP the fact that a watchpoint trapped, and the corresponding data
1251 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1252 changes the debug registers meanwhile, we have the cached data we
1255 if (WIFSTOPPED (*wstatp
) && WSTOPSIG (*wstatp
) == SIGTRAP
)
1257 if (the_low_target
.stopped_by_watchpoint
== NULL
)
1259 child
->stopped_by_watchpoint
= 0;
1263 struct thread_info
*saved_inferior
;
1265 saved_inferior
= current_inferior
;
1266 current_inferior
= get_lwp_thread (child
);
1268 child
->stopped_by_watchpoint
1269 = the_low_target
.stopped_by_watchpoint ();
1271 if (child
->stopped_by_watchpoint
)
1273 if (the_low_target
.stopped_data_address
!= NULL
)
1274 child
->stopped_data_address
1275 = the_low_target
.stopped_data_address ();
1277 child
->stopped_data_address
= 0;
1280 current_inferior
= saved_inferior
;
1284 /* Store the STOP_PC, with adjustment applied. This depends on the
1285 architecture being defined already (so that CHILD has a valid
1286 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1288 if (WIFSTOPPED (*wstatp
))
1289 child
->stop_pc
= get_stop_pc (child
);
1292 && WIFSTOPPED (*wstatp
)
1293 && the_low_target
.get_pc
!= NULL
)
1295 struct thread_info
*saved_inferior
= current_inferior
;
1296 struct regcache
*regcache
;
1299 current_inferior
= get_lwp_thread (child
);
1300 regcache
= get_thread_regcache (current_inferior
, 1);
1301 pc
= (*the_low_target
.get_pc
) (regcache
);
1302 fprintf (stderr
, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc
);
1303 current_inferior
= saved_inferior
;
1309 /* This function should only be called if the LWP got a SIGTRAP.
1311 Handle any tracepoint steps or hits. Return true if a tracepoint
1312 event was handled, 0 otherwise. */
1315 handle_tracepoints (struct lwp_info
*lwp
)
1317 struct thread_info
*tinfo
= get_lwp_thread (lwp
);
1318 int tpoint_related_event
= 0;
1320 /* If this tracepoint hit causes a tracing stop, we'll immediately
1321 uninsert tracepoints. To do this, we temporarily pause all
1322 threads, unpatch away, and then unpause threads. We need to make
1323 sure the unpausing doesn't resume LWP too. */
1326 /* And we need to be sure that any all-threads-stopping doesn't try
1327 to move threads out of the jump pads, as it could deadlock the
1328 inferior (LWP could be in the jump pad, maybe even holding the
1331 /* Do any necessary step collect actions. */
1332 tpoint_related_event
|= tracepoint_finished_step (tinfo
, lwp
->stop_pc
);
1334 tpoint_related_event
|= handle_tracepoint_bkpts (tinfo
, lwp
->stop_pc
);
1336 /* See if we just hit a tracepoint and do its main collect
1338 tpoint_related_event
|= tracepoint_was_hit (tinfo
, lwp
->stop_pc
);
1342 gdb_assert (lwp
->suspended
== 0);
1343 gdb_assert (!stabilizing_threads
|| lwp
->collecting_fast_tracepoint
);
1345 if (tpoint_related_event
)
1348 fprintf (stderr
, "got a tracepoint event\n");
1355 /* Convenience wrapper. Returns true if LWP is presently collecting a
1359 linux_fast_tracepoint_collecting (struct lwp_info
*lwp
,
1360 struct fast_tpoint_collect_status
*status
)
1362 CORE_ADDR thread_area
;
1364 if (the_low_target
.get_thread_area
== NULL
)
1367 /* Get the thread area address. This is used to recognize which
1368 thread is which when tracing with the in-process agent library.
1369 We don't read anything from the address, and treat it as opaque;
1370 it's the address itself that we assume is unique per-thread. */
1371 if ((*the_low_target
.get_thread_area
) (lwpid_of (lwp
), &thread_area
) == -1)
1374 return fast_tracepoint_collecting (thread_area
, lwp
->stop_pc
, status
);
1377 /* The reason we resume in the caller, is because we want to be able
1378 to pass lwp->status_pending as WSTAT, and we need to clear
1379 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1380 refuses to resume. */
1383 maybe_move_out_of_jump_pad (struct lwp_info
*lwp
, int *wstat
)
1385 struct thread_info
*saved_inferior
;
1387 saved_inferior
= current_inferior
;
1388 current_inferior
= get_lwp_thread (lwp
);
1391 || (WIFSTOPPED (*wstat
) && WSTOPSIG (*wstat
) != SIGTRAP
))
1392 && supports_fast_tracepoints ()
1393 && in_process_agent_loaded ())
1395 struct fast_tpoint_collect_status status
;
1400 Checking whether LWP %ld needs to move out of the jump pad.\n",
1403 r
= linux_fast_tracepoint_collecting (lwp
, &status
);
1406 || (WSTOPSIG (*wstat
) != SIGILL
1407 && WSTOPSIG (*wstat
) != SIGFPE
1408 && WSTOPSIG (*wstat
) != SIGSEGV
1409 && WSTOPSIG (*wstat
) != SIGBUS
))
1411 lwp
->collecting_fast_tracepoint
= r
;
1415 if (r
== 1 && lwp
->exit_jump_pad_bkpt
== NULL
)
1417 /* Haven't executed the original instruction yet.
1418 Set breakpoint there, and wait till it's hit,
1419 then single-step until exiting the jump pad. */
1420 lwp
->exit_jump_pad_bkpt
1421 = set_breakpoint_at (status
.adjusted_insn_addr
, NULL
);
1426 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1428 current_inferior
= saved_inferior
;
1435 /* If we get a synchronous signal while collecting, *and*
1436 while executing the (relocated) original instruction,
1437 reset the PC to point at the tpoint address, before
1438 reporting to GDB. Otherwise, it's an IPA lib bug: just
1439 report the signal to GDB, and pray for the best. */
1441 lwp
->collecting_fast_tracepoint
= 0;
1444 && (status
.adjusted_insn_addr
<= lwp
->stop_pc
1445 && lwp
->stop_pc
< status
.adjusted_insn_addr_end
))
1448 struct regcache
*regcache
;
1450 /* The si_addr on a few signals references the address
1451 of the faulting instruction. Adjust that as
1453 if ((WSTOPSIG (*wstat
) == SIGILL
1454 || WSTOPSIG (*wstat
) == SIGFPE
1455 || WSTOPSIG (*wstat
) == SIGBUS
1456 || WSTOPSIG (*wstat
) == SIGSEGV
)
1457 && ptrace (PTRACE_GETSIGINFO
, lwpid_of (lwp
), 0, &info
) == 0
1458 /* Final check just to make sure we don't clobber
1459 the siginfo of non-kernel-sent signals. */
1460 && (uintptr_t) info
.si_addr
== lwp
->stop_pc
)
1462 info
.si_addr
= (void *) (uintptr_t) status
.tpoint_addr
;
1463 ptrace (PTRACE_SETSIGINFO
, lwpid_of (lwp
), 0, &info
);
1466 regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
1467 (*the_low_target
.set_pc
) (regcache
, status
.tpoint_addr
);
1468 lwp
->stop_pc
= status
.tpoint_addr
;
1470 /* Cancel any fast tracepoint lock this thread was
1472 force_unlock_trace_buffer ();
1475 if (lwp
->exit_jump_pad_bkpt
!= NULL
)
1479 "Cancelling fast exit-jump-pad: removing bkpt. "
1480 "stopping all threads momentarily.\n");
1482 stop_all_lwps (1, lwp
);
1483 cancel_breakpoints ();
1485 delete_breakpoint (lwp
->exit_jump_pad_bkpt
);
1486 lwp
->exit_jump_pad_bkpt
= NULL
;
1488 unstop_all_lwps (1, lwp
);
1490 gdb_assert (lwp
->suspended
>= 0);
1497 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1500 current_inferior
= saved_inferior
;
1504 /* Enqueue one signal in the "signals to report later when out of the
1508 enqueue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
1510 struct pending_signals
*p_sig
;
1514 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat
), lwpid_of (lwp
));
1518 struct pending_signals
*sig
;
1520 for (sig
= lwp
->pending_signals_to_report
;
1524 " Already queued %d\n",
1527 fprintf (stderr
, " (no more currently queued signals)\n");
1530 /* Don't enqueue non-RT signals if they are already in the deferred
1531 queue. (SIGSTOP being the easiest signal to see ending up here
1533 if (WSTOPSIG (*wstat
) < __SIGRTMIN
)
1535 struct pending_signals
*sig
;
1537 for (sig
= lwp
->pending_signals_to_report
;
1541 if (sig
->signal
== WSTOPSIG (*wstat
))
1545 "Not requeuing already queued non-RT signal %d"
1554 p_sig
= xmalloc (sizeof (*p_sig
));
1555 p_sig
->prev
= lwp
->pending_signals_to_report
;
1556 p_sig
->signal
= WSTOPSIG (*wstat
);
1557 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
1558 ptrace (PTRACE_GETSIGINFO
, lwpid_of (lwp
), 0, &p_sig
->info
);
1560 lwp
->pending_signals_to_report
= p_sig
;
1563 /* Dequeue one signal from the "signals to report later when out of
1564 the jump pad" list. */
1567 dequeue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
1569 if (lwp
->pending_signals_to_report
!= NULL
)
1571 struct pending_signals
**p_sig
;
1573 p_sig
= &lwp
->pending_signals_to_report
;
1574 while ((*p_sig
)->prev
!= NULL
)
1575 p_sig
= &(*p_sig
)->prev
;
1577 *wstat
= W_STOPCODE ((*p_sig
)->signal
);
1578 if ((*p_sig
)->info
.si_signo
!= 0)
1579 ptrace (PTRACE_SETSIGINFO
, lwpid_of (lwp
), 0, &(*p_sig
)->info
);
1584 fprintf (stderr
, "Reporting deferred signal %d for LWP %ld.\n",
1585 WSTOPSIG (*wstat
), lwpid_of (lwp
));
1589 struct pending_signals
*sig
;
1591 for (sig
= lwp
->pending_signals_to_report
;
1595 " Still queued %d\n",
1598 fprintf (stderr
, " (no more queued signals)\n");
1607 /* Arrange for a breakpoint to be hit again later. We don't keep the
1608 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1609 will handle the current event, eventually we will resume this LWP,
1610 and this breakpoint will trap again. */
1613 cancel_breakpoint (struct lwp_info
*lwp
)
1615 struct thread_info
*saved_inferior
;
1617 /* There's nothing to do if we don't support breakpoints. */
1618 if (!supports_breakpoints ())
1621 /* breakpoint_at reads from current inferior. */
1622 saved_inferior
= current_inferior
;
1623 current_inferior
= get_lwp_thread (lwp
);
1625 if ((*the_low_target
.breakpoint_at
) (lwp
->stop_pc
))
1629 "CB: Push back breakpoint for %s\n",
1630 target_pid_to_str (ptid_of (lwp
)));
1632 /* Back up the PC if necessary. */
1633 if (the_low_target
.decr_pc_after_break
)
1635 struct regcache
*regcache
1636 = get_thread_regcache (current_inferior
, 1);
1637 (*the_low_target
.set_pc
) (regcache
, lwp
->stop_pc
);
1640 current_inferior
= saved_inferior
;
1647 "CB: No breakpoint found at %s for [%s]\n",
1648 paddress (lwp
->stop_pc
),
1649 target_pid_to_str (ptid_of (lwp
)));
1652 current_inferior
= saved_inferior
;
1656 /* When the event-loop is doing a step-over, this points at the thread
1658 ptid_t step_over_bkpt
;
1660 /* Wait for an event from child PID. If PID is -1, wait for any
1661 child. Store the stop status through the status pointer WSTAT.
1662 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1663 event was found and OPTIONS contains WNOHANG. Return the PID of
1664 the stopped child otherwise. */
1667 linux_wait_for_event (ptid_t ptid
, int *wstat
, int options
)
1669 struct lwp_info
*event_child
, *requested_child
;
1673 requested_child
= NULL
;
1675 /* Check for a lwp with a pending status. */
1677 if (ptid_equal (ptid
, minus_one_ptid
) || ptid_is_pid (ptid
))
1679 event_child
= (struct lwp_info
*)
1680 find_inferior (&all_lwps
, status_pending_p_callback
, &ptid
);
1681 if (debug_threads
&& event_child
)
1682 fprintf (stderr
, "Got a pending child %ld\n", lwpid_of (event_child
));
1686 requested_child
= find_lwp_pid (ptid
);
1688 if (!stopping_threads
1689 && requested_child
->status_pending_p
1690 && requested_child
->collecting_fast_tracepoint
)
1692 enqueue_one_deferred_signal (requested_child
,
1693 &requested_child
->status_pending
);
1694 requested_child
->status_pending_p
= 0;
1695 requested_child
->status_pending
= 0;
1696 linux_resume_one_lwp (requested_child
, 0, 0, NULL
);
1699 if (requested_child
->suspended
1700 && requested_child
->status_pending_p
)
1701 fatal ("requesting an event out of a suspended child?");
1703 if (requested_child
->status_pending_p
)
1704 event_child
= requested_child
;
1707 if (event_child
!= NULL
)
1710 fprintf (stderr
, "Got an event from pending child %ld (%04x)\n",
1711 lwpid_of (event_child
), event_child
->status_pending
);
1712 *wstat
= event_child
->status_pending
;
1713 event_child
->status_pending_p
= 0;
1714 event_child
->status_pending
= 0;
1715 current_inferior
= get_lwp_thread (event_child
);
1716 return lwpid_of (event_child
);
1719 if (ptid_is_pid (ptid
))
1721 /* A request to wait for a specific tgid. This is not possible
1722 with waitpid, so instead, we wait for any child, and leave
1723 children we're not interested in right now with a pending
1724 status to report later. */
1725 wait_ptid
= minus_one_ptid
;
1730 /* We only enter this loop if no process has a pending wait status. Thus
1731 any action taken in response to a wait status inside this loop is
1732 responding as soon as we detect the status, not after any pending
1736 event_child
= linux_wait_for_lwp (wait_ptid
, wstat
, options
);
1738 if ((options
& WNOHANG
) && event_child
== NULL
)
1741 fprintf (stderr
, "WNOHANG set, no event found\n");
1745 if (event_child
== NULL
)
1746 error ("event from unknown child");
1748 if (ptid_is_pid (ptid
)
1749 && ptid_get_pid (ptid
) != ptid_get_pid (ptid_of (event_child
)))
1751 if (! WIFSTOPPED (*wstat
))
1752 mark_lwp_dead (event_child
, *wstat
);
1755 event_child
->status_pending_p
= 1;
1756 event_child
->status_pending
= *wstat
;
1761 current_inferior
= get_lwp_thread (event_child
);
1763 /* Check for thread exit. */
1764 if (! WIFSTOPPED (*wstat
))
1767 fprintf (stderr
, "LWP %ld exiting\n", lwpid_of (event_child
));
1769 /* If the last thread is exiting, just return. */
1770 if (last_thread_of_process_p (current_inferior
))
1773 fprintf (stderr
, "LWP %ld is last lwp of process\n",
1774 lwpid_of (event_child
));
1775 return lwpid_of (event_child
);
1780 current_inferior
= (struct thread_info
*) all_threads
.head
;
1782 fprintf (stderr
, "Current inferior is now %ld\n",
1783 lwpid_of (get_thread_lwp (current_inferior
)));
1787 current_inferior
= NULL
;
1789 fprintf (stderr
, "Current inferior is now <NULL>\n");
1792 /* If we were waiting for this particular child to do something...
1793 well, it did something. */
1794 if (requested_child
!= NULL
)
1796 int lwpid
= lwpid_of (event_child
);
1798 /* Cancel the step-over operation --- the thread that
1799 started it is gone. */
1800 if (finish_step_over (event_child
))
1801 unstop_all_lwps (1, event_child
);
1802 delete_lwp (event_child
);
1806 delete_lwp (event_child
);
1808 /* Wait for a more interesting event. */
1812 if (event_child
->must_set_ptrace_flags
)
1814 linux_enable_event_reporting (lwpid_of (event_child
));
1815 event_child
->must_set_ptrace_flags
= 0;
1818 if (WIFSTOPPED (*wstat
) && WSTOPSIG (*wstat
) == SIGTRAP
1819 && *wstat
>> 16 != 0)
1821 handle_extended_wait (event_child
, *wstat
);
1825 if (WIFSTOPPED (*wstat
)
1826 && WSTOPSIG (*wstat
) == SIGSTOP
1827 && event_child
->stop_expected
)
1832 fprintf (stderr
, "Expected stop.\n");
1833 event_child
->stop_expected
= 0;
1835 should_stop
= (current_inferior
->last_resume_kind
== resume_stop
1836 || stopping_threads
);
1840 linux_resume_one_lwp (event_child
,
1841 event_child
->stepping
, 0, NULL
);
1846 return lwpid_of (event_child
);
1853 /* Count the LWP's that have had events. */
1856 count_events_callback (struct inferior_list_entry
*entry
, void *data
)
1858 struct lwp_info
*lp
= (struct lwp_info
*) entry
;
1859 struct thread_info
*thread
= get_lwp_thread (lp
);
1862 gdb_assert (count
!= NULL
);
1864 /* Count only resumed LWPs that have a SIGTRAP event pending that
1865 should be reported to GDB. */
1866 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
1867 && thread
->last_resume_kind
!= resume_stop
1868 && lp
->status_pending_p
1869 && WIFSTOPPED (lp
->status_pending
)
1870 && WSTOPSIG (lp
->status_pending
) == SIGTRAP
1871 && !breakpoint_inserted_here (lp
->stop_pc
))
1877 /* Select the LWP (if any) that is currently being single-stepped. */
1880 select_singlestep_lwp_callback (struct inferior_list_entry
*entry
, void *data
)
1882 struct lwp_info
*lp
= (struct lwp_info
*) entry
;
1883 struct thread_info
*thread
= get_lwp_thread (lp
);
1885 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
1886 && thread
->last_resume_kind
== resume_step
1887 && lp
->status_pending_p
)
1893 /* Select the Nth LWP that has had a SIGTRAP event that should be
1897 select_event_lwp_callback (struct inferior_list_entry
*entry
, void *data
)
1899 struct lwp_info
*lp
= (struct lwp_info
*) entry
;
1900 struct thread_info
*thread
= get_lwp_thread (lp
);
1901 int *selector
= data
;
1903 gdb_assert (selector
!= NULL
);
1905 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1906 if (thread
->last_resume_kind
!= resume_stop
1907 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
1908 && lp
->status_pending_p
1909 && WIFSTOPPED (lp
->status_pending
)
1910 && WSTOPSIG (lp
->status_pending
) == SIGTRAP
1911 && !breakpoint_inserted_here (lp
->stop_pc
))
1912 if ((*selector
)-- == 0)
1919 cancel_breakpoints_callback (struct inferior_list_entry
*entry
, void *data
)
1921 struct lwp_info
*lp
= (struct lwp_info
*) entry
;
1922 struct thread_info
*thread
= get_lwp_thread (lp
);
1923 struct lwp_info
*event_lp
= data
;
1925 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1929 /* If a LWP other than the LWP that we're reporting an event for has
1930 hit a GDB breakpoint (as opposed to some random trap signal),
1931 then just arrange for it to hit it again later. We don't keep
1932 the SIGTRAP status and don't forward the SIGTRAP signal to the
1933 LWP. We will handle the current event, eventually we will resume
1934 all LWPs, and this one will get its breakpoint trap again.
1936 If we do not do this, then we run the risk that the user will
1937 delete or disable the breakpoint, but the LWP will have already
1940 if (thread
->last_resume_kind
!= resume_stop
1941 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
1942 && lp
->status_pending_p
1943 && WIFSTOPPED (lp
->status_pending
)
1944 && WSTOPSIG (lp
->status_pending
) == SIGTRAP
1946 && !lp
->stopped_by_watchpoint
1947 && cancel_breakpoint (lp
))
1948 /* Throw away the SIGTRAP. */
1949 lp
->status_pending_p
= 0;
1955 linux_cancel_breakpoints (void)
1957 find_inferior (&all_lwps
, cancel_breakpoints_callback
, NULL
);
1960 /* Select one LWP out of those that have events pending. */
1963 select_event_lwp (struct lwp_info
**orig_lp
)
1966 int random_selector
;
1967 struct lwp_info
*event_lp
;
1969 /* Give preference to any LWP that is being single-stepped. */
1971 = (struct lwp_info
*) find_inferior (&all_lwps
,
1972 select_singlestep_lwp_callback
, NULL
);
1973 if (event_lp
!= NULL
)
1977 "SEL: Select single-step %s\n",
1978 target_pid_to_str (ptid_of (event_lp
)));
1982 /* No single-stepping LWP. Select one at random, out of those
1983 which have had SIGTRAP events. */
1985 /* First see how many SIGTRAP events we have. */
1986 find_inferior (&all_lwps
, count_events_callback
, &num_events
);
1988 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1989 random_selector
= (int)
1990 ((num_events
* (double) rand ()) / (RAND_MAX
+ 1.0));
1992 if (debug_threads
&& num_events
> 1)
1994 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1995 num_events
, random_selector
);
1997 event_lp
= (struct lwp_info
*) find_inferior (&all_lwps
,
1998 select_event_lwp_callback
,
2002 if (event_lp
!= NULL
)
2004 /* Switch the event LWP. */
2005 *orig_lp
= event_lp
;
2009 /* Decrement the suspend count of an LWP. */
2012 unsuspend_one_lwp (struct inferior_list_entry
*entry
, void *except
)
2014 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2016 /* Ignore EXCEPT. */
2022 gdb_assert (lwp
->suspended
>= 0);
2026 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2030 unsuspend_all_lwps (struct lwp_info
*except
)
2032 find_inferior (&all_lwps
, unsuspend_one_lwp
, except
);
2035 static void move_out_of_jump_pad_callback (struct inferior_list_entry
*entry
);
2036 static int stuck_in_jump_pad_callback (struct inferior_list_entry
*entry
,
2038 static int lwp_running (struct inferior_list_entry
*entry
, void *data
);
2039 static ptid_t
linux_wait_1 (ptid_t ptid
,
2040 struct target_waitstatus
*ourstatus
,
2041 int target_options
);
2043 /* Stabilize threads (move out of jump pads).
2045 If a thread is midway collecting a fast tracepoint, we need to
2046 finish the collection and move it out of the jump pad before
2047 reporting the signal.
2049 This avoids recursion while collecting (when a signal arrives
2050 midway, and the signal handler itself collects), which would trash
2051 the trace buffer. In case the user set a breakpoint in a signal
2052 handler, this avoids the backtrace showing the jump pad, etc..
2053 Most importantly, there are certain things we can't do safely if
2054 threads are stopped in a jump pad (or in its callee's). For
2057 - starting a new trace run. A thread still collecting the
2058 previous run, could trash the trace buffer when resumed. The trace
2059 buffer control structures would have been reset but the thread had
2060 no way to tell. The thread could even midway memcpy'ing to the
2061 buffer, which would mean that when resumed, it would clobber the
2062 trace buffer that had been set for a new run.
2064 - we can't rewrite/reuse the jump pads for new tracepoints
2065 safely. Say you do tstart while a thread is stopped midway while
2066 collecting. When the thread is later resumed, it finishes the
2067 collection, and returns to the jump pad, to execute the original
2068 instruction that was under the tracepoint jump at the time the
2069 older run had been started. If the jump pad had been rewritten
2070 since for something else in the new run, the thread would now
2071 execute the wrong / random instructions. */
2074 linux_stabilize_threads (void)
2076 struct thread_info
*save_inferior
;
2077 struct lwp_info
*lwp_stuck
;
2080 = (struct lwp_info
*) find_inferior (&all_lwps
,
2081 stuck_in_jump_pad_callback
, NULL
);
2082 if (lwp_stuck
!= NULL
)
2085 fprintf (stderr
, "can't stabilize, LWP %ld is stuck in jump pad\n",
2086 lwpid_of (lwp_stuck
));
2090 save_inferior
= current_inferior
;
2092 stabilizing_threads
= 1;
2095 for_each_inferior (&all_lwps
, move_out_of_jump_pad_callback
);
2097 /* Loop until all are stopped out of the jump pads. */
2098 while (find_inferior (&all_lwps
, lwp_running
, NULL
) != NULL
)
2100 struct target_waitstatus ourstatus
;
2101 struct lwp_info
*lwp
;
2104 /* Note that we go through the full wait even loop. While
2105 moving threads out of jump pad, we need to be able to step
2106 over internal breakpoints and such. */
2107 linux_wait_1 (minus_one_ptid
, &ourstatus
, 0);
2109 if (ourstatus
.kind
== TARGET_WAITKIND_STOPPED
)
2111 lwp
= get_thread_lwp (current_inferior
);
2116 if (ourstatus
.value
.sig
!= TARGET_SIGNAL_0
2117 || current_inferior
->last_resume_kind
== resume_stop
)
2119 wstat
= W_STOPCODE (target_signal_to_host (ourstatus
.value
.sig
));
2120 enqueue_one_deferred_signal (lwp
, &wstat
);
2125 find_inferior (&all_lwps
, unsuspend_one_lwp
, NULL
);
2127 stabilizing_threads
= 0;
2129 current_inferior
= save_inferior
;
2134 = (struct lwp_info
*) find_inferior (&all_lwps
,
2135 stuck_in_jump_pad_callback
, NULL
);
2136 if (lwp_stuck
!= NULL
)
2137 fprintf (stderr
, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2138 lwpid_of (lwp_stuck
));
2142 /* Wait for process, returns status. */
2145 linux_wait_1 (ptid_t ptid
,
2146 struct target_waitstatus
*ourstatus
, int target_options
)
2149 struct lwp_info
*event_child
;
2152 int step_over_finished
;
2153 int bp_explains_trap
;
2154 int maybe_internal_trap
;
2158 /* Translate generic target options into linux options. */
2160 if (target_options
& TARGET_WNOHANG
)
2164 bp_explains_trap
= 0;
2166 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2168 /* If we were only supposed to resume one thread, only wait for
2169 that thread - if it's still alive. If it died, however - which
2170 can happen if we're coming from the thread death case below -
2171 then we need to make sure we restart the other threads. We could
2172 pick a thread at random or restart all; restarting all is less
2175 && !ptid_equal (cont_thread
, null_ptid
)
2176 && !ptid_equal (cont_thread
, minus_one_ptid
))
2178 struct thread_info
*thread
;
2180 thread
= (struct thread_info
*) find_inferior_id (&all_threads
,
2183 /* No stepping, no signal - unless one is pending already, of course. */
2186 struct thread_resume resume_info
;
2187 resume_info
.thread
= minus_one_ptid
;
2188 resume_info
.kind
= resume_continue
;
2189 resume_info
.sig
= 0;
2190 linux_resume (&resume_info
, 1);
2196 if (ptid_equal (step_over_bkpt
, null_ptid
))
2197 pid
= linux_wait_for_event (ptid
, &w
, options
);
2201 fprintf (stderr
, "step_over_bkpt set [%s], doing a blocking wait\n",
2202 target_pid_to_str (step_over_bkpt
));
2203 pid
= linux_wait_for_event (step_over_bkpt
, &w
, options
& ~WNOHANG
);
2206 if (pid
== 0) /* only if TARGET_WNOHANG */
2209 event_child
= get_thread_lwp (current_inferior
);
2211 /* If we are waiting for a particular child, and it exited,
2212 linux_wait_for_event will return its exit status. Similarly if
2213 the last child exited. If this is not the last child, however,
2214 do not report it as exited until there is a 'thread exited' response
2215 available in the remote protocol. Instead, just wait for another event.
2216 This should be safe, because if the thread crashed we will already
2217 have reported the termination signal to GDB; that should stop any
2218 in-progress stepping operations, etc.
2220 Report the exit status of the last thread to exit. This matches
2221 LinuxThreads' behavior. */
2223 if (last_thread_of_process_p (current_inferior
))
2225 if (WIFEXITED (w
) || WIFSIGNALED (w
))
2229 ourstatus
->kind
= TARGET_WAITKIND_EXITED
;
2230 ourstatus
->value
.integer
= WEXITSTATUS (w
);
2234 "\nChild exited with retcode = %x \n",
2239 ourstatus
->kind
= TARGET_WAITKIND_SIGNALLED
;
2240 ourstatus
->value
.sig
= target_signal_from_host (WTERMSIG (w
));
2244 "\nChild terminated with signal = %x \n",
2249 return ptid_of (event_child
);
2254 if (!WIFSTOPPED (w
))
2258 /* If this event was not handled before, and is not a SIGTRAP, we
2259 report it. SIGILL and SIGSEGV are also treated as traps in case
2260 a breakpoint is inserted at the current PC. If this target does
2261 not support internal breakpoints at all, we also report the
2262 SIGTRAP without further processing; it's of no concern to us. */
2264 = (supports_breakpoints ()
2265 && (WSTOPSIG (w
) == SIGTRAP
2266 || ((WSTOPSIG (w
) == SIGILL
2267 || WSTOPSIG (w
) == SIGSEGV
)
2268 && (*the_low_target
.breakpoint_at
) (event_child
->stop_pc
))));
2270 if (maybe_internal_trap
)
2272 /* Handle anything that requires bookkeeping before deciding to
2273 report the event or continue waiting. */
2275 /* First check if we can explain the SIGTRAP with an internal
2276 breakpoint, or if we should possibly report the event to GDB.
2277 Do this before anything that may remove or insert a
2279 bp_explains_trap
= breakpoint_inserted_here (event_child
->stop_pc
);
2281 /* We have a SIGTRAP, possibly a step-over dance has just
2282 finished. If so, tweak the state machine accordingly,
2283 reinsert breakpoints and delete any reinsert (software
2284 single-step) breakpoints. */
2285 step_over_finished
= finish_step_over (event_child
);
2287 /* Now invoke the callbacks of any internal breakpoints there. */
2288 check_breakpoints (event_child
->stop_pc
);
2290 /* Handle tracepoint data collecting. This may overflow the
2291 trace buffer, and cause a tracing stop, removing
2293 trace_event
= handle_tracepoints (event_child
);
2295 if (bp_explains_trap
)
2297 /* If we stepped or ran into an internal breakpoint, we've
2298 already handled it. So next time we resume (from this
2299 PC), we should step over it. */
2301 fprintf (stderr
, "Hit a gdbserver breakpoint.\n");
2303 if (breakpoint_here (event_child
->stop_pc
))
2304 event_child
->need_step_over
= 1;
2309 /* We have some other signal, possibly a step-over dance was in
2310 progress, and it should be cancelled too. */
2311 step_over_finished
= finish_step_over (event_child
);
2314 /* We have all the data we need. Either report the event to GDB, or
2315 resume threads and keep waiting for more. */
2317 /* If we're collecting a fast tracepoint, finish the collection and
2318 move out of the jump pad before delivering a signal. See
2319 linux_stabilize_threads. */
2322 && WSTOPSIG (w
) != SIGTRAP
2323 && supports_fast_tracepoints ()
2324 && in_process_agent_loaded ())
2328 "Got signal %d for LWP %ld. Check if we need "
2329 "to defer or adjust it.\n",
2330 WSTOPSIG (w
), lwpid_of (event_child
));
2332 /* Allow debugging the jump pad itself. */
2333 if (current_inferior
->last_resume_kind
!= resume_step
2334 && maybe_move_out_of_jump_pad (event_child
, &w
))
2336 enqueue_one_deferred_signal (event_child
, &w
);
2340 "Signal %d for LWP %ld deferred (in jump pad)\n",
2341 WSTOPSIG (w
), lwpid_of (event_child
));
2343 linux_resume_one_lwp (event_child
, 0, 0, NULL
);
2348 if (event_child
->collecting_fast_tracepoint
)
2352 LWP %ld was trying to move out of the jump pad (%d). \
2353 Check if we're already there.\n",
2354 lwpid_of (event_child
),
2355 event_child
->collecting_fast_tracepoint
);
2359 event_child
->collecting_fast_tracepoint
2360 = linux_fast_tracepoint_collecting (event_child
, NULL
);
2362 if (event_child
->collecting_fast_tracepoint
!= 1)
2364 /* No longer need this breakpoint. */
2365 if (event_child
->exit_jump_pad_bkpt
!= NULL
)
2369 "No longer need exit-jump-pad bkpt; removing it."
2370 "stopping all threads momentarily.\n");
2372 /* Other running threads could hit this breakpoint.
2373 We don't handle moribund locations like GDB does,
2374 instead we always pause all threads when removing
2375 breakpoints, so that any step-over or
2376 decr_pc_after_break adjustment is always taken
2377 care of while the breakpoint is still
2379 stop_all_lwps (1, event_child
);
2380 cancel_breakpoints ();
2382 delete_breakpoint (event_child
->exit_jump_pad_bkpt
);
2383 event_child
->exit_jump_pad_bkpt
= NULL
;
2385 unstop_all_lwps (1, event_child
);
2387 gdb_assert (event_child
->suspended
>= 0);
2391 if (event_child
->collecting_fast_tracepoint
== 0)
2395 "fast tracepoint finished "
2396 "collecting successfully.\n");
2398 /* We may have a deferred signal to report. */
2399 if (dequeue_one_deferred_signal (event_child
, &w
))
2402 fprintf (stderr
, "dequeued one signal.\n");
2407 fprintf (stderr
, "no deferred signals.\n");
2409 if (stabilizing_threads
)
2411 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
2412 ourstatus
->value
.sig
= TARGET_SIGNAL_0
;
2413 return ptid_of (event_child
);
2419 /* Check whether GDB would be interested in this event. */
2421 /* If GDB is not interested in this signal, don't stop other
2422 threads, and don't report it to GDB. Just resume the inferior
2423 right away. We do this for threading-related signals as well as
2424 any that GDB specifically requested we ignore. But never ignore
2425 SIGSTOP if we sent it ourselves, and do not ignore signals when
2426 stepping - they may require special handling to skip the signal
2428 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2431 && current_inferior
->last_resume_kind
!= resume_step
2433 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2434 (current_process ()->private->thread_db
!= NULL
2435 && (WSTOPSIG (w
) == __SIGRTMIN
2436 || WSTOPSIG (w
) == __SIGRTMIN
+ 1))
2439 (pass_signals
[target_signal_from_host (WSTOPSIG (w
))]
2440 && !(WSTOPSIG (w
) == SIGSTOP
2441 && current_inferior
->last_resume_kind
== resume_stop
))))
2443 siginfo_t info
, *info_p
;
2446 fprintf (stderr
, "Ignored signal %d for LWP %ld.\n",
2447 WSTOPSIG (w
), lwpid_of (event_child
));
2449 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (event_child
), 0, &info
) == 0)
2453 linux_resume_one_lwp (event_child
, event_child
->stepping
,
2454 WSTOPSIG (w
), info_p
);
2458 /* If GDB wanted this thread to single step, we always want to
2459 report the SIGTRAP, and let GDB handle it. Watchpoints should
2460 always be reported. So should signals we can't explain. A
2461 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2462 not support Z0 breakpoints. If we do, we're be able to handle
2463 GDB breakpoints on top of internal breakpoints, by handling the
2464 internal breakpoint and still reporting the event to GDB. If we
2465 don't, we're out of luck, GDB won't see the breakpoint hit. */
2466 report_to_gdb
= (!maybe_internal_trap
2467 || current_inferior
->last_resume_kind
== resume_step
2468 || event_child
->stopped_by_watchpoint
2469 || (!step_over_finished
2470 && !bp_explains_trap
&& !trace_event
)
2471 || (gdb_breakpoint_here (event_child
->stop_pc
)
2472 && gdb_condition_true_at_breakpoint (event_child
->stop_pc
)));
2474 /* We found no reason GDB would want us to stop. We either hit one
2475 of our own breakpoints, or finished an internal step GDB
2476 shouldn't know about. */
2481 if (bp_explains_trap
)
2482 fprintf (stderr
, "Hit a gdbserver breakpoint.\n");
2483 if (step_over_finished
)
2484 fprintf (stderr
, "Step-over finished.\n");
2486 fprintf (stderr
, "Tracepoint event.\n");
2489 /* We're not reporting this breakpoint to GDB, so apply the
2490 decr_pc_after_break adjustment to the inferior's regcache
2493 if (the_low_target
.set_pc
!= NULL
)
2495 struct regcache
*regcache
2496 = get_thread_regcache (get_lwp_thread (event_child
), 1);
2497 (*the_low_target
.set_pc
) (regcache
, event_child
->stop_pc
);
2500 /* We may have finished stepping over a breakpoint. If so,
2501 we've stopped and suspended all LWPs momentarily except the
2502 stepping one. This is where we resume them all again. We're
2503 going to keep waiting, so use proceed, which handles stepping
2504 over the next breakpoint. */
2506 fprintf (stderr
, "proceeding all threads.\n");
2508 if (step_over_finished
)
2509 unsuspend_all_lwps (event_child
);
2511 proceed_all_lwps ();
2517 if (current_inferior
->last_resume_kind
== resume_step
)
2518 fprintf (stderr
, "GDB wanted to single-step, reporting event.\n");
2519 if (event_child
->stopped_by_watchpoint
)
2520 fprintf (stderr
, "Stopped by watchpoint.\n");
2521 if (gdb_breakpoint_here (event_child
->stop_pc
))
2522 fprintf (stderr
, "Stopped by GDB breakpoint.\n");
2524 fprintf (stderr
, "Hit a non-gdbserver trap event.\n");
2527 /* Alright, we're going to report a stop. */
2529 if (!non_stop
&& !stabilizing_threads
)
2531 /* In all-stop, stop all threads. */
2532 stop_all_lwps (0, NULL
);
2534 /* If we're not waiting for a specific LWP, choose an event LWP
2535 from among those that have had events. Giving equal priority
2536 to all LWPs that have had events helps prevent
2538 if (ptid_equal (ptid
, minus_one_ptid
))
2540 event_child
->status_pending_p
= 1;
2541 event_child
->status_pending
= w
;
2543 select_event_lwp (&event_child
);
2545 event_child
->status_pending_p
= 0;
2546 w
= event_child
->status_pending
;
2549 /* Now that we've selected our final event LWP, cancel any
2550 breakpoints in other LWPs that have hit a GDB breakpoint.
2551 See the comment in cancel_breakpoints_callback to find out
2553 find_inferior (&all_lwps
, cancel_breakpoints_callback
, event_child
);
2555 /* If we were going a step-over, all other threads but the stepping one
2556 had been paused in start_step_over, with their suspend counts
2557 incremented. We don't want to do a full unstop/unpause, because we're
2558 in all-stop mode (so we want threads stopped), but we still need to
2559 unsuspend the other threads, to decrement their `suspended' count
2561 if (step_over_finished
)
2562 unsuspend_all_lwps (event_child
);
2564 /* Stabilize threads (move out of jump pads). */
2565 stabilize_threads ();
2569 /* If we just finished a step-over, then all threads had been
2570 momentarily paused. In all-stop, that's fine, we want
2571 threads stopped by now anyway. In non-stop, we need to
2572 re-resume threads that GDB wanted to be running. */
2573 if (step_over_finished
)
2574 unstop_all_lwps (1, event_child
);
2577 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
2579 if (current_inferior
->last_resume_kind
== resume_stop
2580 && WSTOPSIG (w
) == SIGSTOP
)
2582 /* A thread that has been requested to stop by GDB with vCont;t,
2583 and it stopped cleanly, so report as SIG0. The use of
2584 SIGSTOP is an implementation detail. */
2585 ourstatus
->value
.sig
= TARGET_SIGNAL_0
;
2587 else if (current_inferior
->last_resume_kind
== resume_stop
2588 && WSTOPSIG (w
) != SIGSTOP
)
2590 /* A thread that has been requested to stop by GDB with vCont;t,
2591 but, it stopped for other reasons. */
2592 ourstatus
->value
.sig
= target_signal_from_host (WSTOPSIG (w
));
2596 ourstatus
->value
.sig
= target_signal_from_host (WSTOPSIG (w
));
2599 gdb_assert (ptid_equal (step_over_bkpt
, null_ptid
));
2602 fprintf (stderr
, "linux_wait ret = %s, %d, %d\n",
2603 target_pid_to_str (ptid_of (event_child
)),
2605 ourstatus
->value
.sig
);
2607 return ptid_of (event_child
);
2610 /* Get rid of any pending event in the pipe. */
2612 async_file_flush (void)
2618 ret
= read (linux_event_pipe
[0], &buf
, 1);
2619 while (ret
>= 0 || (ret
== -1 && errno
== EINTR
));
2622 /* Put something in the pipe, so the event loop wakes up. */
2624 async_file_mark (void)
2628 async_file_flush ();
2631 ret
= write (linux_event_pipe
[1], "+", 1);
2632 while (ret
== 0 || (ret
== -1 && errno
== EINTR
));
2634 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2635 be awakened anyway. */
2639 linux_wait (ptid_t ptid
,
2640 struct target_waitstatus
*ourstatus
, int target_options
)
2645 fprintf (stderr
, "linux_wait: [%s]\n", target_pid_to_str (ptid
));
2647 /* Flush the async file first. */
2648 if (target_is_async_p ())
2649 async_file_flush ();
2651 event_ptid
= linux_wait_1 (ptid
, ourstatus
, target_options
);
2653 /* If at least one stop was reported, there may be more. A single
2654 SIGCHLD can signal more than one child stop. */
2655 if (target_is_async_p ()
2656 && (target_options
& TARGET_WNOHANG
) != 0
2657 && !ptid_equal (event_ptid
, null_ptid
))
2663 /* Send a signal to an LWP. */
2666 kill_lwp (unsigned long lwpid
, int signo
)
2668 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2669 fails, then we are not using nptl threads and we should be using kill. */
2673 static int tkill_failed
;
2680 ret
= syscall (__NR_tkill
, lwpid
, signo
);
2681 if (errno
!= ENOSYS
)
2688 return kill (lwpid
, signo
);
2692 linux_stop_lwp (struct lwp_info
*lwp
)
2698 send_sigstop (struct lwp_info
*lwp
)
2702 pid
= lwpid_of (lwp
);
2704 /* If we already have a pending stop signal for this process, don't
2706 if (lwp
->stop_expected
)
2709 fprintf (stderr
, "Have pending sigstop for lwp %d\n", pid
);
2715 fprintf (stderr
, "Sending sigstop to lwp %d\n", pid
);
2717 lwp
->stop_expected
= 1;
2718 kill_lwp (pid
, SIGSTOP
);
2722 send_sigstop_callback (struct inferior_list_entry
*entry
, void *except
)
2724 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2726 /* Ignore EXCEPT. */
2737 /* Increment the suspend count of an LWP, and stop it, if not stopped
2740 suspend_and_send_sigstop_callback (struct inferior_list_entry
*entry
,
2743 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2745 /* Ignore EXCEPT. */
2751 return send_sigstop_callback (entry
, except
);
2755 mark_lwp_dead (struct lwp_info
*lwp
, int wstat
)
2757 /* It's dead, really. */
2760 /* Store the exit status for later. */
2761 lwp
->status_pending_p
= 1;
2762 lwp
->status_pending
= wstat
;
2764 /* Prevent trying to stop it. */
2767 /* No further stops are expected from a dead lwp. */
2768 lwp
->stop_expected
= 0;
2772 wait_for_sigstop (struct inferior_list_entry
*entry
)
2774 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2775 struct thread_info
*saved_inferior
;
2784 fprintf (stderr
, "wait_for_sigstop: LWP %ld already stopped\n",
2789 saved_inferior
= current_inferior
;
2790 if (saved_inferior
!= NULL
)
2791 saved_tid
= ((struct inferior_list_entry
*) saved_inferior
)->id
;
2793 saved_tid
= null_ptid
; /* avoid bogus unused warning */
2795 ptid
= lwp
->head
.id
;
2798 fprintf (stderr
, "wait_for_sigstop: pulling one event\n");
2800 pid
= linux_wait_for_event (ptid
, &wstat
, __WALL
);
2802 /* If we stopped with a non-SIGSTOP signal, save it for later
2803 and record the pending SIGSTOP. If the process exited, just
2805 if (WIFSTOPPED (wstat
))
2808 fprintf (stderr
, "LWP %ld stopped with signal %d\n",
2809 lwpid_of (lwp
), WSTOPSIG (wstat
));
2811 if (WSTOPSIG (wstat
) != SIGSTOP
)
2814 fprintf (stderr
, "LWP %ld stopped with non-sigstop status %06x\n",
2815 lwpid_of (lwp
), wstat
);
2817 lwp
->status_pending_p
= 1;
2818 lwp
->status_pending
= wstat
;
2824 fprintf (stderr
, "Process %d exited while stopping LWPs\n", pid
);
2826 lwp
= find_lwp_pid (pid_to_ptid (pid
));
2829 /* Leave this status pending for the next time we're able to
2830 report it. In the mean time, we'll report this lwp as
2831 dead to GDB, so GDB doesn't try to read registers and
2832 memory from it. This can only happen if this was the
2833 last thread of the process; otherwise, PID is removed
2834 from the thread tables before linux_wait_for_event
2836 mark_lwp_dead (lwp
, wstat
);
2840 if (saved_inferior
== NULL
|| linux_thread_alive (saved_tid
))
2841 current_inferior
= saved_inferior
;
2845 fprintf (stderr
, "Previously current thread died.\n");
2849 /* We can't change the current inferior behind GDB's back,
2850 otherwise, a subsequent command may apply to the wrong
2852 current_inferior
= NULL
;
2856 /* Set a valid thread as current. */
2857 set_desired_inferior (0);
2862 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2863 move it out, because we need to report the stop event to GDB. For
2864 example, if the user puts a breakpoint in the jump pad, it's
2865 because she wants to debug it. */
2868 stuck_in_jump_pad_callback (struct inferior_list_entry
*entry
, void *data
)
2870 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2871 struct thread_info
*thread
= get_lwp_thread (lwp
);
2873 gdb_assert (lwp
->suspended
== 0);
2874 gdb_assert (lwp
->stopped
);
2876 /* Allow debugging the jump pad, gdb_collect, etc.. */
2877 return (supports_fast_tracepoints ()
2878 && in_process_agent_loaded ()
2879 && (gdb_breakpoint_here (lwp
->stop_pc
)
2880 || lwp
->stopped_by_watchpoint
2881 || thread
->last_resume_kind
== resume_step
)
2882 && linux_fast_tracepoint_collecting (lwp
, NULL
));
2886 move_out_of_jump_pad_callback (struct inferior_list_entry
*entry
)
2888 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2889 struct thread_info
*thread
= get_lwp_thread (lwp
);
2892 gdb_assert (lwp
->suspended
== 0);
2893 gdb_assert (lwp
->stopped
);
2895 wstat
= lwp
->status_pending_p
? &lwp
->status_pending
: NULL
;
2897 /* Allow debugging the jump pad, gdb_collect, etc. */
2898 if (!gdb_breakpoint_here (lwp
->stop_pc
)
2899 && !lwp
->stopped_by_watchpoint
2900 && thread
->last_resume_kind
!= resume_step
2901 && maybe_move_out_of_jump_pad (lwp
, wstat
))
2905 "LWP %ld needs stabilizing (in jump pad)\n",
2910 lwp
->status_pending_p
= 0;
2911 enqueue_one_deferred_signal (lwp
, wstat
);
2915 "Signal %d for LWP %ld deferred "
2917 WSTOPSIG (*wstat
), lwpid_of (lwp
));
2920 linux_resume_one_lwp (lwp
, 0, 0, NULL
);
2927 lwp_running (struct inferior_list_entry
*entry
, void *data
)
2929 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2938 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2939 If SUSPEND, then also increase the suspend count of every LWP,
2943 stop_all_lwps (int suspend
, struct lwp_info
*except
)
2945 stopping_threads
= 1;
2948 find_inferior (&all_lwps
, suspend_and_send_sigstop_callback
, except
);
2950 find_inferior (&all_lwps
, send_sigstop_callback
, except
);
2951 for_each_inferior (&all_lwps
, wait_for_sigstop
);
2952 stopping_threads
= 0;
2955 /* Resume execution of the inferior process.
2956 If STEP is nonzero, single-step it.
2957 If SIGNAL is nonzero, give it that signal. */
2960 linux_resume_one_lwp (struct lwp_info
*lwp
,
2961 int step
, int signal
, siginfo_t
*info
)
2963 struct thread_info
*saved_inferior
;
2964 int fast_tp_collecting
;
2966 if (lwp
->stopped
== 0)
2969 fast_tp_collecting
= lwp
->collecting_fast_tracepoint
;
2971 gdb_assert (!stabilizing_threads
|| fast_tp_collecting
);
2973 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2974 user used the "jump" command, or "set $pc = foo"). */
2975 if (lwp
->stop_pc
!= get_pc (lwp
))
2977 /* Collecting 'while-stepping' actions doesn't make sense
2979 release_while_stepping_state_list (get_lwp_thread (lwp
));
2982 /* If we have pending signals or status, and a new signal, enqueue the
2983 signal. Also enqueue the signal if we are waiting to reinsert a
2984 breakpoint; it will be picked up again below. */
2986 && (lwp
->status_pending_p
2987 || lwp
->pending_signals
!= NULL
2988 || lwp
->bp_reinsert
!= 0
2989 || fast_tp_collecting
))
2991 struct pending_signals
*p_sig
;
2992 p_sig
= xmalloc (sizeof (*p_sig
));
2993 p_sig
->prev
= lwp
->pending_signals
;
2994 p_sig
->signal
= signal
;
2996 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
2998 memcpy (&p_sig
->info
, info
, sizeof (siginfo_t
));
2999 lwp
->pending_signals
= p_sig
;
3002 if (lwp
->status_pending_p
)
3005 fprintf (stderr
, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3006 " has pending status\n",
3007 lwpid_of (lwp
), step
? "step" : "continue", signal
,
3008 lwp
->stop_expected
? "expected" : "not expected");
3012 saved_inferior
= current_inferior
;
3013 current_inferior
= get_lwp_thread (lwp
);
3016 fprintf (stderr
, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
3017 lwpid_of (lwp
), step
? "step" : "continue", signal
,
3018 lwp
->stop_expected
? "expected" : "not expected");
3020 /* This bit needs some thinking about. If we get a signal that
3021 we must report while a single-step reinsert is still pending,
3022 we often end up resuming the thread. It might be better to
3023 (ew) allow a stack of pending events; then we could be sure that
3024 the reinsert happened right away and not lose any signals.
3026 Making this stack would also shrink the window in which breakpoints are
3027 uninserted (see comment in linux_wait_for_lwp) but not enough for
3028 complete correctness, so it won't solve that problem. It may be
3029 worthwhile just to solve this one, however. */
3030 if (lwp
->bp_reinsert
!= 0)
3033 fprintf (stderr
, " pending reinsert at 0x%s\n",
3034 paddress (lwp
->bp_reinsert
));
3036 if (lwp
->bp_reinsert
!= 0 && can_hardware_single_step ())
3038 if (fast_tp_collecting
== 0)
3041 fprintf (stderr
, "BAD - reinserting but not stepping.\n");
3043 fprintf (stderr
, "BAD - reinserting and suspended(%d).\n",
3050 /* Postpone any pending signal. It was enqueued above. */
3054 if (fast_tp_collecting
== 1)
3058 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3061 /* Postpone any pending signal. It was enqueued above. */
3064 else if (fast_tp_collecting
== 2)
3068 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3071 if (can_hardware_single_step ())
3074 fatal ("moving out of jump pad single-stepping"
3075 " not implemented on this target");
3077 /* Postpone any pending signal. It was enqueued above. */
3081 /* If we have while-stepping actions in this thread set it stepping.
3082 If we have a signal to deliver, it may or may not be set to
3083 SIG_IGN, we don't know. Assume so, and allow collecting
3084 while-stepping into a signal handler. A possible smart thing to
3085 do would be to set an internal breakpoint at the signal return
3086 address, continue, and carry on catching this while-stepping
3087 action only when that breakpoint is hit. A future
3089 if (get_lwp_thread (lwp
)->while_stepping
!= NULL
3090 && can_hardware_single_step ())
3094 "lwp %ld has a while-stepping action -> forcing step.\n",
3099 if (debug_threads
&& the_low_target
.get_pc
!= NULL
)
3101 struct regcache
*regcache
= get_thread_regcache (current_inferior
, 1);
3102 CORE_ADDR pc
= (*the_low_target
.get_pc
) (regcache
);
3103 fprintf (stderr
, " resuming from pc 0x%lx\n", (long) pc
);
3106 /* If we have pending signals, consume one unless we are trying to
3107 reinsert a breakpoint or we're trying to finish a fast tracepoint
3109 if (lwp
->pending_signals
!= NULL
3110 && lwp
->bp_reinsert
== 0
3111 && fast_tp_collecting
== 0)
3113 struct pending_signals
**p_sig
;
3115 p_sig
= &lwp
->pending_signals
;
3116 while ((*p_sig
)->prev
!= NULL
)
3117 p_sig
= &(*p_sig
)->prev
;
3119 signal
= (*p_sig
)->signal
;
3120 if ((*p_sig
)->info
.si_signo
!= 0)
3121 ptrace (PTRACE_SETSIGINFO
, lwpid_of (lwp
), 0, &(*p_sig
)->info
);
3127 if (the_low_target
.prepare_to_resume
!= NULL
)
3128 the_low_target
.prepare_to_resume (lwp
);
3130 regcache_invalidate_one ((struct inferior_list_entry
*)
3131 get_lwp_thread (lwp
));
3134 lwp
->stopped_by_watchpoint
= 0;
3135 lwp
->stepping
= step
;
3136 ptrace (step
? PTRACE_SINGLESTEP
: PTRACE_CONT
, lwpid_of (lwp
), 0,
3137 /* Coerce to a uintptr_t first to avoid potential gcc warning
3138 of coercing an 8 byte integer to a 4 byte pointer. */
3139 (PTRACE_ARG4_TYPE
) (uintptr_t) signal
);
3141 current_inferior
= saved_inferior
;
3144 /* ESRCH from ptrace either means that the thread was already
3145 running (an error) or that it is gone (a race condition). If
3146 it's gone, we will get a notification the next time we wait,
3147 so we can ignore the error. We could differentiate these
3148 two, but it's tricky without waiting; the thread still exists
3149 as a zombie, so sending it signal 0 would succeed. So just
3154 perror_with_name ("ptrace");
3158 struct thread_resume_array
3160 struct thread_resume
*resume
;
3164 /* This function is called once per thread. We look up the thread
3165 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3168 This algorithm is O(threads * resume elements), but resume elements
3169 is small (and will remain small at least until GDB supports thread
3172 linux_set_resume_request (struct inferior_list_entry
*entry
, void *arg
)
3174 struct lwp_info
*lwp
;
3175 struct thread_info
*thread
;
3177 struct thread_resume_array
*r
;
3179 thread
= (struct thread_info
*) entry
;
3180 lwp
= get_thread_lwp (thread
);
3183 for (ndx
= 0; ndx
< r
->n
; ndx
++)
3185 ptid_t ptid
= r
->resume
[ndx
].thread
;
3186 if (ptid_equal (ptid
, minus_one_ptid
)
3187 || ptid_equal (ptid
, entry
->id
)
3188 || (ptid_is_pid (ptid
)
3189 && (ptid_get_pid (ptid
) == pid_of (lwp
)))
3190 || (ptid_get_lwp (ptid
) == -1
3191 && (ptid_get_pid (ptid
) == pid_of (lwp
))))
3193 if (r
->resume
[ndx
].kind
== resume_stop
3194 && thread
->last_resume_kind
== resume_stop
)
3197 fprintf (stderr
, "already %s LWP %ld at GDB's request\n",
3198 thread
->last_status
.kind
== TARGET_WAITKIND_STOPPED
3206 lwp
->resume
= &r
->resume
[ndx
];
3207 thread
->last_resume_kind
= lwp
->resume
->kind
;
3209 /* If we had a deferred signal to report, dequeue one now.
3210 This can happen if LWP gets more than one signal while
3211 trying to get out of a jump pad. */
3213 && !lwp
->status_pending_p
3214 && dequeue_one_deferred_signal (lwp
, &lwp
->status_pending
))
3216 lwp
->status_pending_p
= 1;
3220 "Dequeueing deferred signal %d for LWP %ld, "
3221 "leaving status pending.\n",
3222 WSTOPSIG (lwp
->status_pending
), lwpid_of (lwp
));
3229 /* No resume action for this thread. */
3236 /* Set *FLAG_P if this lwp has an interesting status pending. */
3238 resume_status_pending_p (struct inferior_list_entry
*entry
, void *flag_p
)
3240 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
3242 /* LWPs which will not be resumed are not interesting, because
3243 we might not wait for them next time through linux_wait. */
3244 if (lwp
->resume
== NULL
)
3247 if (lwp
->status_pending_p
)
3248 * (int *) flag_p
= 1;
3253 /* Return 1 if this lwp that GDB wants running is stopped at an
3254 internal breakpoint that we need to step over. It assumes that any
3255 required STOP_PC adjustment has already been propagated to the
3256 inferior's regcache. */
3259 need_step_over_p (struct inferior_list_entry
*entry
, void *dummy
)
3261 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
3262 struct thread_info
*thread
;
3263 struct thread_info
*saved_inferior
;
3266 /* LWPs which will not be resumed are not interesting, because we
3267 might not wait for them next time through linux_wait. */
3273 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3278 thread
= get_lwp_thread (lwp
);
3280 if (thread
->last_resume_kind
== resume_stop
)
3284 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3289 gdb_assert (lwp
->suspended
>= 0);
3295 "Need step over [LWP %ld]? Ignoring, suspended\n",
3300 if (!lwp
->need_step_over
)
3304 "Need step over [LWP %ld]? No\n", lwpid_of (lwp
));
3307 if (lwp
->status_pending_p
)
3311 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3316 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3320 /* If the PC has changed since we stopped, then don't do anything,
3321 and let the breakpoint/tracepoint be hit. This happens if, for
3322 instance, GDB handled the decr_pc_after_break subtraction itself,
3323 GDB is OOL stepping this thread, or the user has issued a "jump"
3324 command, or poked thread's registers herself. */
3325 if (pc
!= lwp
->stop_pc
)
3329 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3330 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3331 lwpid_of (lwp
), paddress (lwp
->stop_pc
), paddress (pc
));
3333 lwp
->need_step_over
= 0;
3337 saved_inferior
= current_inferior
;
3338 current_inferior
= thread
;
3340 /* We can only step over breakpoints we know about. */
3341 if (breakpoint_here (pc
) || fast_tracepoint_jump_here (pc
))
3343 /* Don't step over a breakpoint that GDB expects to hit
3344 though. If the condition is being evaluated on the target's side
3345 and it evaluate to false, step over this breakpoint as well. */
3346 if (gdb_breakpoint_here (pc
)
3347 && gdb_condition_true_at_breakpoint (pc
))
3351 "Need step over [LWP %ld]? yes, but found"
3352 " GDB breakpoint at 0x%s; skipping step over\n",
3353 lwpid_of (lwp
), paddress (pc
));
3355 current_inferior
= saved_inferior
;
3362 "Need step over [LWP %ld]? yes, "
3363 "found breakpoint at 0x%s\n",
3364 lwpid_of (lwp
), paddress (pc
));
3366 /* We've found an lwp that needs stepping over --- return 1 so
3367 that find_inferior stops looking. */
3368 current_inferior
= saved_inferior
;
3370 /* If the step over is cancelled, this is set again. */
3371 lwp
->need_step_over
= 0;
3376 current_inferior
= saved_inferior
;
3380 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3381 lwpid_of (lwp
), paddress (pc
));
3386 /* Start a step-over operation on LWP. When LWP stopped at a
3387 breakpoint, to make progress, we need to remove the breakpoint out
3388 of the way. If we let other threads run while we do that, they may
3389 pass by the breakpoint location and miss hitting it. To avoid
3390 that, a step-over momentarily stops all threads while LWP is
3391 single-stepped while the breakpoint is temporarily uninserted from
3392 the inferior. When the single-step finishes, we reinsert the
3393 breakpoint, and let all threads that are supposed to be running,
3396 On targets that don't support hardware single-step, we don't
3397 currently support full software single-stepping. Instead, we only
3398 support stepping over the thread event breakpoint, by asking the
3399 low target where to place a reinsert breakpoint. Since this
3400 routine assumes the breakpoint being stepped over is a thread event
3401 breakpoint, it usually assumes the return address of the current
3402 function is a good enough place to set the reinsert breakpoint. */
3405 start_step_over (struct lwp_info
*lwp
)
3407 struct thread_info
*saved_inferior
;
3413 "Starting step-over on LWP %ld. Stopping all threads\n",
3416 stop_all_lwps (1, lwp
);
3417 gdb_assert (lwp
->suspended
== 0);
3420 fprintf (stderr
, "Done stopping all threads for step-over.\n");
3422 /* Note, we should always reach here with an already adjusted PC,
3423 either by GDB (if we're resuming due to GDB's request), or by our
3424 caller, if we just finished handling an internal breakpoint GDB
3425 shouldn't care about. */
3428 saved_inferior
= current_inferior
;
3429 current_inferior
= get_lwp_thread (lwp
);
3431 lwp
->bp_reinsert
= pc
;
3432 uninsert_breakpoints_at (pc
);
3433 uninsert_fast_tracepoint_jumps_at (pc
);
3435 if (can_hardware_single_step ())
3441 CORE_ADDR raddr
= (*the_low_target
.breakpoint_reinsert_addr
) ();
3442 set_reinsert_breakpoint (raddr
);
3446 current_inferior
= saved_inferior
;
3448 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
3450 /* Require next event from this LWP. */
3451 step_over_bkpt
= lwp
->head
.id
;
3455 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3456 start_step_over, if still there, and delete any reinsert
3457 breakpoints we've set, on non hardware single-step targets. */
3460 finish_step_over (struct lwp_info
*lwp
)
3462 if (lwp
->bp_reinsert
!= 0)
3465 fprintf (stderr
, "Finished step over.\n");
3467 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3468 may be no breakpoint to reinsert there by now. */
3469 reinsert_breakpoints_at (lwp
->bp_reinsert
);
3470 reinsert_fast_tracepoint_jumps_at (lwp
->bp_reinsert
);
3472 lwp
->bp_reinsert
= 0;
3474 /* Delete any software-single-step reinsert breakpoints. No
3475 longer needed. We don't have to worry about other threads
3476 hitting this trap, and later not being able to explain it,
3477 because we were stepping over a breakpoint, and we hold all
3478 threads but LWP stopped while doing that. */
3479 if (!can_hardware_single_step ())
3480 delete_reinsert_breakpoints ();
3482 step_over_bkpt
= null_ptid
;
3489 /* This function is called once per thread. We check the thread's resume
3490 request, which will tell us whether to resume, step, or leave the thread
3491 stopped; and what signal, if any, it should be sent.
3493 For threads which we aren't explicitly told otherwise, we preserve
3494 the stepping flag; this is used for stepping over gdbserver-placed
3497 If pending_flags was set in any thread, we queue any needed
3498 signals, since we won't actually resume. We already have a pending
3499 event to report, so we don't need to preserve any step requests;
3500 they should be re-issued if necessary. */
3503 linux_resume_one_thread (struct inferior_list_entry
*entry
, void *arg
)
3505 struct lwp_info
*lwp
;
3506 struct thread_info
*thread
;
3508 int leave_all_stopped
= * (int *) arg
;
3511 thread
= (struct thread_info
*) entry
;
3512 lwp
= get_thread_lwp (thread
);
3514 if (lwp
->resume
== NULL
)
3517 if (lwp
->resume
->kind
== resume_stop
)
3520 fprintf (stderr
, "resume_stop request for LWP %ld\n", lwpid_of (lwp
));
3525 fprintf (stderr
, "stopping LWP %ld\n", lwpid_of (lwp
));
3527 /* Stop the thread, and wait for the event asynchronously,
3528 through the event loop. */
3534 fprintf (stderr
, "already stopped LWP %ld\n",
3537 /* The LWP may have been stopped in an internal event that
3538 was not meant to be notified back to GDB (e.g., gdbserver
3539 breakpoint), so we should be reporting a stop event in
3542 /* If the thread already has a pending SIGSTOP, this is a
3543 no-op. Otherwise, something later will presumably resume
3544 the thread and this will cause it to cancel any pending
3545 operation, due to last_resume_kind == resume_stop. If
3546 the thread already has a pending status to report, we
3547 will still report it the next time we wait - see
3548 status_pending_p_callback. */
3550 /* If we already have a pending signal to report, then
3551 there's no need to queue a SIGSTOP, as this means we're
3552 midway through moving the LWP out of the jumppad, and we
3553 will report the pending signal as soon as that is
3555 if (lwp
->pending_signals_to_report
== NULL
)
3559 /* For stop requests, we're done. */
3561 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
3565 /* If this thread which is about to be resumed has a pending status,
3566 then don't resume any threads - we can just report the pending
3567 status. Make sure to queue any signals that would otherwise be
3568 sent. In all-stop mode, we do this decision based on if *any*
3569 thread has a pending status. If there's a thread that needs the
3570 step-over-breakpoint dance, then don't resume any other thread
3571 but that particular one. */
3572 leave_pending
= (lwp
->status_pending_p
|| leave_all_stopped
);
3577 fprintf (stderr
, "resuming LWP %ld\n", lwpid_of (lwp
));
3579 step
= (lwp
->resume
->kind
== resume_step
);
3580 linux_resume_one_lwp (lwp
, step
, lwp
->resume
->sig
, NULL
);
3585 fprintf (stderr
, "leaving LWP %ld stopped\n", lwpid_of (lwp
));
3587 /* If we have a new signal, enqueue the signal. */
3588 if (lwp
->resume
->sig
!= 0)
3590 struct pending_signals
*p_sig
;
3591 p_sig
= xmalloc (sizeof (*p_sig
));
3592 p_sig
->prev
= lwp
->pending_signals
;
3593 p_sig
->signal
= lwp
->resume
->sig
;
3594 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
3596 /* If this is the same signal we were previously stopped by,
3597 make sure to queue its siginfo. We can ignore the return
3598 value of ptrace; if it fails, we'll skip
3599 PTRACE_SETSIGINFO. */
3600 if (WIFSTOPPED (lwp
->last_status
)
3601 && WSTOPSIG (lwp
->last_status
) == lwp
->resume
->sig
)
3602 ptrace (PTRACE_GETSIGINFO
, lwpid_of (lwp
), 0, &p_sig
->info
);
3604 lwp
->pending_signals
= p_sig
;
3608 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
3614 linux_resume (struct thread_resume
*resume_info
, size_t n
)
3616 struct thread_resume_array array
= { resume_info
, n
};
3617 struct lwp_info
*need_step_over
= NULL
;
3619 int leave_all_stopped
;
3621 find_inferior (&all_threads
, linux_set_resume_request
, &array
);
3623 /* If there is a thread which would otherwise be resumed, which has
3624 a pending status, then don't resume any threads - we can just
3625 report the pending status. Make sure to queue any signals that
3626 would otherwise be sent. In non-stop mode, we'll apply this
3627 logic to each thread individually. We consume all pending events
3628 before considering to start a step-over (in all-stop). */
3631 find_inferior (&all_lwps
, resume_status_pending_p
, &any_pending
);
3633 /* If there is a thread which would otherwise be resumed, which is
3634 stopped at a breakpoint that needs stepping over, then don't
3635 resume any threads - have it step over the breakpoint with all
3636 other threads stopped, then resume all threads again. Make sure
3637 to queue any signals that would otherwise be delivered or
3639 if (!any_pending
&& supports_breakpoints ())
3641 = (struct lwp_info
*) find_inferior (&all_lwps
,
3642 need_step_over_p
, NULL
);
3644 leave_all_stopped
= (need_step_over
!= NULL
|| any_pending
);
3648 if (need_step_over
!= NULL
)
3649 fprintf (stderr
, "Not resuming all, need step over\n");
3650 else if (any_pending
)
3652 "Not resuming, all-stop and found "
3653 "an LWP with pending status\n");
3655 fprintf (stderr
, "Resuming, no pending status or step over needed\n");
3658 /* Even if we're leaving threads stopped, queue all signals we'd
3659 otherwise deliver. */
3660 find_inferior (&all_threads
, linux_resume_one_thread
, &leave_all_stopped
);
3663 start_step_over (need_step_over
);
3666 /* This function is called once per thread. We check the thread's
3667 last resume request, which will tell us whether to resume, step, or
3668 leave the thread stopped. Any signal the client requested to be
3669 delivered has already been enqueued at this point.
3671 If any thread that GDB wants running is stopped at an internal
3672 breakpoint that needs stepping over, we start a step-over operation
3673 on that particular thread, and leave all others stopped. */
3676 proceed_one_lwp (struct inferior_list_entry
*entry
, void *except
)
3678 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
3679 struct thread_info
*thread
;
3687 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp
));
3692 fprintf (stderr
, " LWP %ld already running\n", lwpid_of (lwp
));
3696 thread
= get_lwp_thread (lwp
);
3698 if (thread
->last_resume_kind
== resume_stop
3699 && thread
->last_status
.kind
!= TARGET_WAITKIND_IGNORE
)
3702 fprintf (stderr
, " client wants LWP to remain %ld stopped\n",
3707 if (lwp
->status_pending_p
)
3710 fprintf (stderr
, " LWP %ld has pending status, leaving stopped\n",
3715 gdb_assert (lwp
->suspended
>= 0);
3720 fprintf (stderr
, " LWP %ld is suspended\n", lwpid_of (lwp
));
3724 if (thread
->last_resume_kind
== resume_stop
3725 && lwp
->pending_signals_to_report
== NULL
3726 && lwp
->collecting_fast_tracepoint
== 0)
3728 /* We haven't reported this LWP as stopped yet (otherwise, the
3729 last_status.kind check above would catch it, and we wouldn't
3730 reach here. This LWP may have been momentarily paused by a
3731 stop_all_lwps call while handling for example, another LWP's
3732 step-over. In that case, the pending expected SIGSTOP signal
3733 that was queued at vCont;t handling time will have already
3734 been consumed by wait_for_sigstop, and so we need to requeue
3735 another one here. Note that if the LWP already has a SIGSTOP
3736 pending, this is a no-op. */
3740 "Client wants LWP %ld to stop. "
3741 "Making sure it has a SIGSTOP pending\n",
3747 step
= thread
->last_resume_kind
== resume_step
;
3748 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
3753 unsuspend_and_proceed_one_lwp (struct inferior_list_entry
*entry
, void *except
)
3755 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
3761 gdb_assert (lwp
->suspended
>= 0);
3763 return proceed_one_lwp (entry
, except
);
3766 /* When we finish a step-over, set threads running again. If there's
3767 another thread that may need a step-over, now's the time to start
3768 it. Eventually, we'll move all threads past their breakpoints. */
3771 proceed_all_lwps (void)
3773 struct lwp_info
*need_step_over
;
3775 /* If there is a thread which would otherwise be resumed, which is
3776 stopped at a breakpoint that needs stepping over, then don't
3777 resume any threads - have it step over the breakpoint with all
3778 other threads stopped, then resume all threads again. */
3780 if (supports_breakpoints ())
3783 = (struct lwp_info
*) find_inferior (&all_lwps
,
3784 need_step_over_p
, NULL
);
3786 if (need_step_over
!= NULL
)
3789 fprintf (stderr
, "proceed_all_lwps: found "
3790 "thread %ld needing a step-over\n",
3791 lwpid_of (need_step_over
));
3793 start_step_over (need_step_over
);
3799 fprintf (stderr
, "Proceeding, no step-over needed\n");
3801 find_inferior (&all_lwps
, proceed_one_lwp
, NULL
);
3804 /* Stopped LWPs that the client wanted to be running, that don't have
3805 pending statuses, are set to run again, except for EXCEPT, if not
3806 NULL. This undoes a stop_all_lwps call. */
3809 unstop_all_lwps (int unsuspend
, struct lwp_info
*except
)
3815 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except
));
3818 "unstopping all lwps\n");
3822 find_inferior (&all_lwps
, unsuspend_and_proceed_one_lwp
, except
);
3824 find_inferior (&all_lwps
, proceed_one_lwp
, except
);
3828 #ifdef HAVE_LINUX_REGSETS
3830 #define use_linux_regsets 1
3833 regsets_fetch_inferior_registers (struct regcache
*regcache
)
3835 struct regset_info
*regset
;
3836 int saw_general_regs
= 0;
3840 regset
= target_regsets
;
3842 pid
= lwpid_of (get_thread_lwp (current_inferior
));
3843 while (regset
->size
>= 0)
3848 if (regset
->size
== 0 || disabled_regsets
[regset
- target_regsets
])
3854 buf
= xmalloc (regset
->size
);
3856 nt_type
= regset
->nt_type
;
3860 iov
.iov_len
= regset
->size
;
3861 data
= (void *) &iov
;
3867 res
= ptrace (regset
->get_request
, pid
, nt_type
, data
);
3869 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
3875 /* If we get EIO on a regset, do not try it again for
3877 disabled_regsets
[regset
- target_regsets
] = 1;
3884 sprintf (s
, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3889 else if (regset
->type
== GENERAL_REGS
)
3890 saw_general_regs
= 1;
3891 regset
->store_function (regcache
, buf
);
3895 if (saw_general_regs
)
3902 regsets_store_inferior_registers (struct regcache
*regcache
)
3904 struct regset_info
*regset
;
3905 int saw_general_regs
= 0;
3909 regset
= target_regsets
;
3911 pid
= lwpid_of (get_thread_lwp (current_inferior
));
3912 while (regset
->size
>= 0)
3917 if (regset
->size
== 0 || disabled_regsets
[regset
- target_regsets
])
3923 buf
= xmalloc (regset
->size
);
3925 /* First fill the buffer with the current register set contents,
3926 in case there are any items in the kernel's regset that are
3927 not in gdbserver's regcache. */
3929 nt_type
= regset
->nt_type
;
3933 iov
.iov_len
= regset
->size
;
3934 data
= (void *) &iov
;
3940 res
= ptrace (regset
->get_request
, pid
, nt_type
, data
);
3942 res
= ptrace (regset
->get_request
, pid
, &iov
, data
);
3947 /* Then overlay our cached registers on that. */
3948 regset
->fill_function (regcache
, buf
);
3950 /* Only now do we write the register set. */
3952 res
= ptrace (regset
->set_request
, pid
, nt_type
, data
);
3954 res
= ptrace (regset
->set_request
, pid
, data
, nt_type
);
3962 /* If we get EIO on a regset, do not try it again for
3964 disabled_regsets
[regset
- target_regsets
] = 1;
3968 else if (errno
== ESRCH
)
3970 /* At this point, ESRCH should mean the process is
3971 already gone, in which case we simply ignore attempts
3972 to change its registers. See also the related
3973 comment in linux_resume_one_lwp. */
3979 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3982 else if (regset
->type
== GENERAL_REGS
)
3983 saw_general_regs
= 1;
3987 if (saw_general_regs
)
3993 #else /* !HAVE_LINUX_REGSETS */
3995 #define use_linux_regsets 0
3996 #define regsets_fetch_inferior_registers(regcache) 1
3997 #define regsets_store_inferior_registers(regcache) 1
4001 /* Return 1 if register REGNO is supported by one of the regset ptrace
4002 calls or 0 if it has to be transferred individually. */
4005 linux_register_in_regsets (int regno
)
4007 unsigned char mask
= 1 << (regno
% 8);
4008 size_t index
= regno
/ 8;
4010 return (use_linux_regsets
4011 && (the_low_target
.regset_bitmap
== NULL
4012 || (the_low_target
.regset_bitmap
[index
] & mask
) != 0));
4015 #ifdef HAVE_LINUX_USRREGS
4018 register_addr (int regnum
)
4022 if (regnum
< 0 || regnum
>= the_low_target
.num_regs
)
4023 error ("Invalid register number %d.", regnum
);
4025 addr
= the_low_target
.regmap
[regnum
];
4030 /* Fetch one register. */
4032 fetch_register (struct regcache
*regcache
, int regno
)
4039 if (regno
>= the_low_target
.num_regs
)
4041 if ((*the_low_target
.cannot_fetch_register
) (regno
))
4044 regaddr
= register_addr (regno
);
4048 size
= ((register_size (regno
) + sizeof (PTRACE_XFER_TYPE
) - 1)
4049 & -sizeof (PTRACE_XFER_TYPE
));
4050 buf
= alloca (size
);
4052 pid
= lwpid_of (get_thread_lwp (current_inferior
));
4053 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
4056 *(PTRACE_XFER_TYPE
*) (buf
+ i
) =
4057 ptrace (PTRACE_PEEKUSER
, pid
,
4058 /* Coerce to a uintptr_t first to avoid potential gcc warning
4059 of coercing an 8 byte integer to a 4 byte pointer. */
4060 (PTRACE_ARG3_TYPE
) (uintptr_t) regaddr
, 0);
4061 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
4063 error ("reading register %d: %s", regno
, strerror (errno
));
4066 if (the_low_target
.supply_ptrace_register
)
4067 the_low_target
.supply_ptrace_register (regcache
, regno
, buf
);
4069 supply_register (regcache
, regno
, buf
);
4072 /* Store one register. */
4074 store_register (struct regcache
*regcache
, int regno
)
4081 if (regno
>= the_low_target
.num_regs
)
4083 if ((*the_low_target
.cannot_store_register
) (regno
))
4086 regaddr
= register_addr (regno
);
4090 size
= ((register_size (regno
) + sizeof (PTRACE_XFER_TYPE
) - 1)
4091 & -sizeof (PTRACE_XFER_TYPE
));
4092 buf
= alloca (size
);
4093 memset (buf
, 0, size
);
4095 if (the_low_target
.collect_ptrace_register
)
4096 the_low_target
.collect_ptrace_register (regcache
, regno
, buf
);
4098 collect_register (regcache
, regno
, buf
);
4100 pid
= lwpid_of (get_thread_lwp (current_inferior
));
4101 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
4104 ptrace (PTRACE_POKEUSER
, pid
,
4105 /* Coerce to a uintptr_t first to avoid potential gcc warning
4106 about coercing an 8 byte integer to a 4 byte pointer. */
4107 (PTRACE_ARG3_TYPE
) (uintptr_t) regaddr
,
4108 (PTRACE_ARG4_TYPE
) *(PTRACE_XFER_TYPE
*) (buf
+ i
));
4111 /* At this point, ESRCH should mean the process is
4112 already gone, in which case we simply ignore attempts
4113 to change its registers. See also the related
4114 comment in linux_resume_one_lwp. */
4118 if ((*the_low_target
.cannot_store_register
) (regno
) == 0)
4119 error ("writing register %d: %s", regno
, strerror (errno
));
4121 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
4125 /* Fetch all registers, or just one, from the child process.
4126 If REGNO is -1, do this for all registers, skipping any that are
4127 assumed to have been retrieved by regsets_fetch_inferior_registers,
4128 unless ALL is non-zero.
4129 Otherwise, REGNO specifies which register (so we can save time). */
4131 usr_fetch_inferior_registers (struct regcache
*regcache
, int regno
, int all
)
4135 for (regno
= 0; regno
< the_low_target
.num_regs
; regno
++)
4136 if (all
|| !linux_register_in_regsets (regno
))
4137 fetch_register (regcache
, regno
);
4140 fetch_register (regcache
, regno
);
4143 /* Store our register values back into the inferior.
4144 If REGNO is -1, do this for all registers, skipping any that are
4145 assumed to have been saved by regsets_store_inferior_registers,
4146 unless ALL is non-zero.
4147 Otherwise, REGNO specifies which register (so we can save time). */
4149 usr_store_inferior_registers (struct regcache
*regcache
, int regno
, int all
)
4153 for (regno
= 0; regno
< the_low_target
.num_regs
; regno
++)
4154 if (all
|| !linux_register_in_regsets (regno
))
4155 store_register (regcache
, regno
);
4158 store_register (regcache
, regno
);
4161 #else /* !HAVE_LINUX_USRREGS */
4163 #define usr_fetch_inferior_registers(regcache, regno, all) do {} while (0)
4164 #define usr_store_inferior_registers(regcache, regno, all) do {} while (0)
4170 linux_fetch_registers (struct regcache
*regcache
, int regno
)
4177 all
= regsets_fetch_inferior_registers (regcache
);
4178 usr_fetch_inferior_registers (regcache
, regno
, all
);
4182 use_regsets
= linux_register_in_regsets (regno
);
4184 all
= regsets_fetch_inferior_registers (regcache
);
4185 if (!use_regsets
|| all
)
4186 usr_fetch_inferior_registers (regcache
, regno
, 1);
4191 linux_store_registers (struct regcache
*regcache
, int regno
)
4198 all
= regsets_store_inferior_registers (regcache
);
4199 usr_store_inferior_registers (regcache
, regno
, all
);
4203 use_regsets
= linux_register_in_regsets (regno
);
4205 all
= regsets_store_inferior_registers (regcache
);
4206 if (!use_regsets
|| all
)
4207 usr_store_inferior_registers (regcache
, regno
, 1);
4212 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4213 to debugger memory starting at MYADDR. */
4216 linux_read_memory (CORE_ADDR memaddr
, unsigned char *myaddr
, int len
)
4219 /* Round starting address down to longword boundary. */
4220 register CORE_ADDR addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
4221 /* Round ending address up; get number of longwords that makes. */
4223 = (((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
4224 / sizeof (PTRACE_XFER_TYPE
);
4225 /* Allocate buffer of that many longwords. */
4226 register PTRACE_XFER_TYPE
*buffer
4227 = (PTRACE_XFER_TYPE
*) alloca (count
* sizeof (PTRACE_XFER_TYPE
));
4230 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
4232 /* Try using /proc. Don't bother for one word. */
4233 if (len
>= 3 * sizeof (long))
4235 /* We could keep this file open and cache it - possibly one per
4236 thread. That requires some juggling, but is even faster. */
4237 sprintf (filename
, "/proc/%d/mem", pid
);
4238 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
4242 /* If pread64 is available, use it. It's faster if the kernel
4243 supports it (only one syscall), and it's 64-bit safe even on
4244 32-bit platforms (for instance, SPARC debugging a SPARC64
4247 if (pread64 (fd
, myaddr
, len
, memaddr
) != len
)
4249 if (lseek (fd
, memaddr
, SEEK_SET
) == -1 || read (fd
, myaddr
, len
) != len
)
4261 /* Read all the longwords */
4262 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
4265 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4266 about coercing an 8 byte integer to a 4 byte pointer. */
4267 buffer
[i
] = ptrace (PTRACE_PEEKTEXT
, pid
,
4268 (PTRACE_ARG3_TYPE
) (uintptr_t) addr
, 0);
4273 /* Copy appropriate bytes out of the buffer. */
4275 (char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
4281 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4282 memory at MEMADDR. On failure (cannot write to the inferior)
4283 returns the value of errno. */
4286 linux_write_memory (CORE_ADDR memaddr
, const unsigned char *myaddr
, int len
)
4289 /* Round starting address down to longword boundary. */
4290 register CORE_ADDR addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
4291 /* Round ending address up; get number of longwords that makes. */
4293 = (((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
4294 / sizeof (PTRACE_XFER_TYPE
);
4296 /* Allocate buffer of that many longwords. */
4297 register PTRACE_XFER_TYPE
*buffer
= (PTRACE_XFER_TYPE
*)
4298 alloca (count
* sizeof (PTRACE_XFER_TYPE
));
4300 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
4304 /* Dump up to four bytes. */
4305 unsigned int val
= * (unsigned int *) myaddr
;
4311 val
= val
& 0xffffff;
4312 fprintf (stderr
, "Writing %0*x to 0x%08lx\n", 2 * ((len
< 4) ? len
: 4),
4313 val
, (long)memaddr
);
4316 /* Fill start and end extra bytes of buffer with existing memory data. */
4319 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4320 about coercing an 8 byte integer to a 4 byte pointer. */
4321 buffer
[0] = ptrace (PTRACE_PEEKTEXT
, pid
,
4322 (PTRACE_ARG3_TYPE
) (uintptr_t) addr
, 0);
4330 = ptrace (PTRACE_PEEKTEXT
, pid
,
4331 /* Coerce to a uintptr_t first to avoid potential gcc warning
4332 about coercing an 8 byte integer to a 4 byte pointer. */
4333 (PTRACE_ARG3_TYPE
) (uintptr_t) (addr
+ (count
- 1)
4334 * sizeof (PTRACE_XFER_TYPE
)),
4340 /* Copy data to be written over corresponding part of buffer. */
4342 memcpy ((char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
4345 /* Write the entire buffer. */
4347 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
4350 ptrace (PTRACE_POKETEXT
, pid
,
4351 /* Coerce to a uintptr_t first to avoid potential gcc warning
4352 about coercing an 8 byte integer to a 4 byte pointer. */
4353 (PTRACE_ARG3_TYPE
) (uintptr_t) addr
,
4354 (PTRACE_ARG4_TYPE
) buffer
[i
]);
4362 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4363 static int linux_supports_tracefork_flag
;
4366 linux_enable_event_reporting (int pid
)
4368 if (!linux_supports_tracefork_flag
)
4371 ptrace (PTRACE_SETOPTIONS
, pid
, 0, (PTRACE_ARG4_TYPE
) PTRACE_O_TRACECLONE
);
4374 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4377 linux_tracefork_grandchild (void *arg
)
4382 #define STACK_SIZE 4096
4385 linux_tracefork_child (void *arg
)
4387 ptrace (PTRACE_TRACEME
, 0, 0, 0);
4388 kill (getpid (), SIGSTOP
);
4390 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4393 linux_tracefork_grandchild (NULL
);
4395 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4398 __clone2 (linux_tracefork_grandchild
, arg
, STACK_SIZE
,
4399 CLONE_VM
| SIGCHLD
, NULL
);
4401 clone (linux_tracefork_grandchild
, (char *) arg
+ STACK_SIZE
,
4402 CLONE_VM
| SIGCHLD
, NULL
);
4405 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4410 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4411 sure that we can enable the option, and that it had the desired
4415 linux_test_for_tracefork (void)
4417 int child_pid
, ret
, status
;
4419 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4420 char *stack
= xmalloc (STACK_SIZE
* 4);
4421 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4423 linux_supports_tracefork_flag
= 0;
4425 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4427 child_pid
= fork ();
4429 linux_tracefork_child (NULL
);
4431 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4433 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4435 child_pid
= __clone2 (linux_tracefork_child
, stack
, STACK_SIZE
,
4436 CLONE_VM
| SIGCHLD
, stack
+ STACK_SIZE
* 2);
4437 #else /* !__ia64__ */
4438 child_pid
= clone (linux_tracefork_child
, stack
+ STACK_SIZE
,
4439 CLONE_VM
| SIGCHLD
, stack
+ STACK_SIZE
* 2);
4440 #endif /* !__ia64__ */
4442 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4444 if (child_pid
== -1)
4445 perror_with_name ("clone");
4447 ret
= my_waitpid (child_pid
, &status
, 0);
4449 perror_with_name ("waitpid");
4450 else if (ret
!= child_pid
)
4451 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret
);
4452 if (! WIFSTOPPED (status
))
4453 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status
);
4455 ret
= ptrace (PTRACE_SETOPTIONS
, child_pid
, 0,
4456 (PTRACE_ARG4_TYPE
) PTRACE_O_TRACEFORK
);
4459 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
4462 warning ("linux_test_for_tracefork: failed to kill child");
4466 ret
= my_waitpid (child_pid
, &status
, 0);
4467 if (ret
!= child_pid
)
4468 warning ("linux_test_for_tracefork: failed to wait for killed child");
4469 else if (!WIFSIGNALED (status
))
4470 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4471 "killed child", status
);
4476 ret
= ptrace (PTRACE_CONT
, child_pid
, 0, 0);
4478 warning ("linux_test_for_tracefork: failed to resume child");
4480 ret
= my_waitpid (child_pid
, &status
, 0);
4482 if (ret
== child_pid
&& WIFSTOPPED (status
)
4483 && status
>> 16 == PTRACE_EVENT_FORK
)
4486 ret
= ptrace (PTRACE_GETEVENTMSG
, child_pid
, 0, &second_pid
);
4487 if (ret
== 0 && second_pid
!= 0)
4491 linux_supports_tracefork_flag
= 1;
4492 my_waitpid (second_pid
, &second_status
, 0);
4493 ret
= ptrace (PTRACE_KILL
, second_pid
, 0, 0);
4495 warning ("linux_test_for_tracefork: failed to kill second child");
4496 my_waitpid (second_pid
, &status
, 0);
4500 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4501 "(%d, status 0x%x)", ret
, status
);
4505 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
4507 warning ("linux_test_for_tracefork: failed to kill child");
4508 my_waitpid (child_pid
, &status
, 0);
4510 while (WIFSTOPPED (status
));
4512 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4514 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4519 linux_look_up_symbols (void)
4521 #ifdef USE_THREAD_DB
4522 struct process_info
*proc
= current_process ();
4524 if (proc
->private->thread_db
!= NULL
)
4527 /* If the kernel supports tracing forks then it also supports tracing
4528 clones, and then we don't need to use the magic thread event breakpoint
4529 to learn about threads. */
4530 thread_db_init (!linux_supports_tracefork_flag
);
4535 linux_request_interrupt (void)
4537 extern unsigned long signal_pid
;
4539 if (!ptid_equal (cont_thread
, null_ptid
)
4540 && !ptid_equal (cont_thread
, minus_one_ptid
))
4542 struct lwp_info
*lwp
;
4545 lwp
= get_thread_lwp (current_inferior
);
4546 lwpid
= lwpid_of (lwp
);
4547 kill_lwp (lwpid
, SIGINT
);
4550 kill_lwp (signal_pid
, SIGINT
);
4553 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4554 to debugger memory starting at MYADDR. */
4557 linux_read_auxv (CORE_ADDR offset
, unsigned char *myaddr
, unsigned int len
)
4559 char filename
[PATH_MAX
];
4561 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
4563 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
4565 fd
= open (filename
, O_RDONLY
);
4569 if (offset
!= (CORE_ADDR
) 0
4570 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
4573 n
= read (fd
, myaddr
, len
);
4580 /* These breakpoint and watchpoint related wrapper functions simply
4581 pass on the function call if the target has registered a
4582 corresponding function. */
4585 linux_insert_point (char type
, CORE_ADDR addr
, int len
)
4587 if (the_low_target
.insert_point
!= NULL
)
4588 return the_low_target
.insert_point (type
, addr
, len
);
4590 /* Unsupported (see target.h). */
4595 linux_remove_point (char type
, CORE_ADDR addr
, int len
)
4597 if (the_low_target
.remove_point
!= NULL
)
4598 return the_low_target
.remove_point (type
, addr
, len
);
4600 /* Unsupported (see target.h). */
4605 linux_stopped_by_watchpoint (void)
4607 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
4609 return lwp
->stopped_by_watchpoint
;
4613 linux_stopped_data_address (void)
4615 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
4617 return lwp
->stopped_data_address
;
4620 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4621 #if defined(__mcoldfire__)
4622 /* These should really be defined in the kernel's ptrace.h header. */
4623 #define PT_TEXT_ADDR 49*4
4624 #define PT_DATA_ADDR 50*4
4625 #define PT_TEXT_END_ADDR 51*4
4627 #define PT_TEXT_ADDR 220
4628 #define PT_TEXT_END_ADDR 224
4629 #define PT_DATA_ADDR 228
4630 #elif defined(__TMS320C6X__)
4631 #define PT_TEXT_ADDR (0x10000*4)
4632 #define PT_DATA_ADDR (0x10004*4)
4633 #define PT_TEXT_END_ADDR (0x10008*4)
4636 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4637 to tell gdb about. */
4640 linux_read_offsets (CORE_ADDR
*text_p
, CORE_ADDR
*data_p
)
4642 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4643 unsigned long text
, text_end
, data
;
4644 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
4648 text
= ptrace (PTRACE_PEEKUSER
, pid
, (long)PT_TEXT_ADDR
, 0);
4649 text_end
= ptrace (PTRACE_PEEKUSER
, pid
, (long)PT_TEXT_END_ADDR
, 0);
4650 data
= ptrace (PTRACE_PEEKUSER
, pid
, (long)PT_DATA_ADDR
, 0);
4654 /* Both text and data offsets produced at compile-time (and so
4655 used by gdb) are relative to the beginning of the program,
4656 with the data segment immediately following the text segment.
4657 However, the actual runtime layout in memory may put the data
4658 somewhere else, so when we send gdb a data base-address, we
4659 use the real data base address and subtract the compile-time
4660 data base-address from it (which is just the length of the
4661 text segment). BSS immediately follows data in both
4664 *data_p
= data
- (text_end
- text
);
4674 linux_qxfer_osdata (const char *annex
,
4675 unsigned char *readbuf
, unsigned const char *writebuf
,
4676 CORE_ADDR offset
, int len
)
4678 return linux_common_xfer_osdata (annex
, readbuf
, offset
, len
);
4681 /* Convert a native/host siginfo object, into/from the siginfo in the
4682 layout of the inferiors' architecture. */
4685 siginfo_fixup (struct siginfo
*siginfo
, void *inf_siginfo
, int direction
)
4689 if (the_low_target
.siginfo_fixup
!= NULL
)
4690 done
= the_low_target
.siginfo_fixup (siginfo
, inf_siginfo
, direction
);
4692 /* If there was no callback, or the callback didn't do anything,
4693 then just do a straight memcpy. */
4697 memcpy (siginfo
, inf_siginfo
, sizeof (struct siginfo
));
4699 memcpy (inf_siginfo
, siginfo
, sizeof (struct siginfo
));
4704 linux_xfer_siginfo (const char *annex
, unsigned char *readbuf
,
4705 unsigned const char *writebuf
, CORE_ADDR offset
, int len
)
4708 struct siginfo siginfo
;
4709 char inf_siginfo
[sizeof (struct siginfo
)];
4711 if (current_inferior
== NULL
)
4714 pid
= lwpid_of (get_thread_lwp (current_inferior
));
4717 fprintf (stderr
, "%s siginfo for lwp %d.\n",
4718 readbuf
!= NULL
? "Reading" : "Writing",
4721 if (offset
>= sizeof (siginfo
))
4724 if (ptrace (PTRACE_GETSIGINFO
, pid
, 0, &siginfo
) != 0)
4727 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4728 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4729 inferior with a 64-bit GDBSERVER should look the same as debugging it
4730 with a 32-bit GDBSERVER, we need to convert it. */
4731 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
4733 if (offset
+ len
> sizeof (siginfo
))
4734 len
= sizeof (siginfo
) - offset
;
4736 if (readbuf
!= NULL
)
4737 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
4740 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
4742 /* Convert back to ptrace layout before flushing it out. */
4743 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
4745 if (ptrace (PTRACE_SETSIGINFO
, pid
, 0, &siginfo
) != 0)
4752 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4753 so we notice when children change state; as the handler for the
4754 sigsuspend in my_waitpid. */
4757 sigchld_handler (int signo
)
4759 int old_errno
= errno
;
4765 /* fprintf is not async-signal-safe, so call write
4767 if (write (2, "sigchld_handler\n",
4768 sizeof ("sigchld_handler\n") - 1) < 0)
4769 break; /* just ignore */
4773 if (target_is_async_p ())
4774 async_file_mark (); /* trigger a linux_wait */
4780 linux_supports_non_stop (void)
4786 linux_async (int enable
)
4788 int previous
= (linux_event_pipe
[0] != -1);
4791 fprintf (stderr
, "linux_async (%d), previous=%d\n",
4794 if (previous
!= enable
)
4797 sigemptyset (&mask
);
4798 sigaddset (&mask
, SIGCHLD
);
4800 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
4804 if (pipe (linux_event_pipe
) == -1)
4805 fatal ("creating event pipe failed.");
4807 fcntl (linux_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
4808 fcntl (linux_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
4810 /* Register the event loop handler. */
4811 add_file_handler (linux_event_pipe
[0],
4812 handle_target_event
, NULL
);
4814 /* Always trigger a linux_wait. */
4819 delete_file_handler (linux_event_pipe
[0]);
4821 close (linux_event_pipe
[0]);
4822 close (linux_event_pipe
[1]);
4823 linux_event_pipe
[0] = -1;
4824 linux_event_pipe
[1] = -1;
4827 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
4834 linux_start_non_stop (int nonstop
)
4836 /* Register or unregister from event-loop accordingly. */
4837 linux_async (nonstop
);
4842 linux_supports_multi_process (void)
4848 linux_supports_disable_randomization (void)
4850 #ifdef HAVE_PERSONALITY
4857 /* Enumerate spufs IDs for process PID. */
4859 spu_enumerate_spu_ids (long pid
, unsigned char *buf
, CORE_ADDR offset
, int len
)
4865 struct dirent
*entry
;
4867 sprintf (path
, "/proc/%ld/fd", pid
);
4868 dir
= opendir (path
);
4873 while ((entry
= readdir (dir
)) != NULL
)
4879 fd
= atoi (entry
->d_name
);
4883 sprintf (path
, "/proc/%ld/fd/%d", pid
, fd
);
4884 if (stat (path
, &st
) != 0)
4886 if (!S_ISDIR (st
.st_mode
))
4889 if (statfs (path
, &stfs
) != 0)
4891 if (stfs
.f_type
!= SPUFS_MAGIC
)
4894 if (pos
>= offset
&& pos
+ 4 <= offset
+ len
)
4896 *(unsigned int *)(buf
+ pos
- offset
) = fd
;
4906 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4907 object type, using the /proc file system. */
4909 linux_qxfer_spu (const char *annex
, unsigned char *readbuf
,
4910 unsigned const char *writebuf
,
4911 CORE_ADDR offset
, int len
)
4913 long pid
= lwpid_of (get_thread_lwp (current_inferior
));
4918 if (!writebuf
&& !readbuf
)
4926 return spu_enumerate_spu_ids (pid
, readbuf
, offset
, len
);
4929 sprintf (buf
, "/proc/%ld/fd/%s", pid
, annex
);
4930 fd
= open (buf
, writebuf
? O_WRONLY
: O_RDONLY
);
4935 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
4942 ret
= write (fd
, writebuf
, (size_t) len
);
4944 ret
= read (fd
, readbuf
, (size_t) len
);
4950 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
4951 struct target_loadseg
4953 /* Core address to which the segment is mapped. */
4955 /* VMA recorded in the program header. */
4957 /* Size of this segment in memory. */
4961 # if defined PT_GETDSBT
4962 struct target_loadmap
4964 /* Protocol version number, must be zero. */
4966 /* Pointer to the DSBT table, its size, and the DSBT index. */
4967 unsigned *dsbt_table
;
4968 unsigned dsbt_size
, dsbt_index
;
4969 /* Number of segments in this map. */
4971 /* The actual memory map. */
4972 struct target_loadseg segs
[/*nsegs*/];
4974 # define LINUX_LOADMAP PT_GETDSBT
4975 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
4976 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
4978 struct target_loadmap
4980 /* Protocol version number, must be zero. */
4982 /* Number of segments in this map. */
4984 /* The actual memory map. */
4985 struct target_loadseg segs
[/*nsegs*/];
4987 # define LINUX_LOADMAP PTRACE_GETFDPIC
4988 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
4989 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
4993 linux_read_loadmap (const char *annex
, CORE_ADDR offset
,
4994 unsigned char *myaddr
, unsigned int len
)
4996 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
4998 struct target_loadmap
*data
= NULL
;
4999 unsigned int actual_length
, copy_length
;
5001 if (strcmp (annex
, "exec") == 0)
5002 addr
= (int) LINUX_LOADMAP_EXEC
;
5003 else if (strcmp (annex
, "interp") == 0)
5004 addr
= (int) LINUX_LOADMAP_INTERP
;
5008 if (ptrace (LINUX_LOADMAP
, pid
, addr
, &data
) != 0)
5014 actual_length
= sizeof (struct target_loadmap
)
5015 + sizeof (struct target_loadseg
) * data
->nsegs
;
5017 if (offset
< 0 || offset
> actual_length
)
5020 copy_length
= actual_length
- offset
< len
? actual_length
- offset
: len
;
5021 memcpy (myaddr
, (char *) data
+ offset
, copy_length
);
5025 # define linux_read_loadmap NULL
5026 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5029 linux_process_qsupported (const char *query
)
5031 if (the_low_target
.process_qsupported
!= NULL
)
5032 the_low_target
.process_qsupported (query
);
5036 linux_supports_tracepoints (void)
5038 if (*the_low_target
.supports_tracepoints
== NULL
)
5041 return (*the_low_target
.supports_tracepoints
) ();
5045 linux_read_pc (struct regcache
*regcache
)
5047 if (the_low_target
.get_pc
== NULL
)
5050 return (*the_low_target
.get_pc
) (regcache
);
5054 linux_write_pc (struct regcache
*regcache
, CORE_ADDR pc
)
5056 gdb_assert (the_low_target
.set_pc
!= NULL
);
5058 (*the_low_target
.set_pc
) (regcache
, pc
);
5062 linux_thread_stopped (struct thread_info
*thread
)
5064 return get_thread_lwp (thread
)->stopped
;
5067 /* This exposes stop-all-threads functionality to other modules. */
5070 linux_pause_all (int freeze
)
5072 stop_all_lwps (freeze
, NULL
);
5075 /* This exposes unstop-all-threads functionality to other gdbserver
5079 linux_unpause_all (int unfreeze
)
5081 unstop_all_lwps (unfreeze
, NULL
);
5085 linux_prepare_to_access_memory (void)
5087 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5090 linux_pause_all (1);
5095 linux_done_accessing_memory (void)
5097 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5100 linux_unpause_all (1);
5104 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
5105 CORE_ADDR collector
,
5108 CORE_ADDR
*jump_entry
,
5109 CORE_ADDR
*trampoline
,
5110 ULONGEST
*trampoline_size
,
5111 unsigned char *jjump_pad_insn
,
5112 ULONGEST
*jjump_pad_insn_size
,
5113 CORE_ADDR
*adjusted_insn_addr
,
5114 CORE_ADDR
*adjusted_insn_addr_end
,
5117 return (*the_low_target
.install_fast_tracepoint_jump_pad
)
5118 (tpoint
, tpaddr
, collector
, lockaddr
, orig_size
,
5119 jump_entry
, trampoline
, trampoline_size
,
5120 jjump_pad_insn
, jjump_pad_insn_size
,
5121 adjusted_insn_addr
, adjusted_insn_addr_end
,
5125 static struct emit_ops
*
5126 linux_emit_ops (void)
5128 if (the_low_target
.emit_ops
!= NULL
)
5129 return (*the_low_target
.emit_ops
) ();
5135 linux_get_min_fast_tracepoint_insn_len (void)
5137 return (*the_low_target
.get_min_fast_tracepoint_insn_len
) ();
5140 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5143 get_phdr_phnum_from_proc_auxv (const int pid
, const int is_elf64
,
5144 CORE_ADDR
*phdr_memaddr
, int *num_phdr
)
5146 char filename
[PATH_MAX
];
5148 const int auxv_size
= is_elf64
5149 ? sizeof (Elf64_auxv_t
) : sizeof (Elf32_auxv_t
);
5150 char buf
[sizeof (Elf64_auxv_t
)]; /* The larger of the two. */
5152 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
5154 fd
= open (filename
, O_RDONLY
);
5160 while (read (fd
, buf
, auxv_size
) == auxv_size
5161 && (*phdr_memaddr
== 0 || *num_phdr
== 0))
5165 Elf64_auxv_t
*const aux
= (Elf64_auxv_t
*) buf
;
5167 switch (aux
->a_type
)
5170 *phdr_memaddr
= aux
->a_un
.a_val
;
5173 *num_phdr
= aux
->a_un
.a_val
;
5179 Elf32_auxv_t
*const aux
= (Elf32_auxv_t
*) buf
;
5181 switch (aux
->a_type
)
5184 *phdr_memaddr
= aux
->a_un
.a_val
;
5187 *num_phdr
= aux
->a_un
.a_val
;
5195 if (*phdr_memaddr
== 0 || *num_phdr
== 0)
5197 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5198 "phdr_memaddr = %ld, phdr_num = %d",
5199 (long) *phdr_memaddr
, *num_phdr
);
5206 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5209 get_dynamic (const int pid
, const int is_elf64
)
5211 CORE_ADDR phdr_memaddr
, relocation
;
5213 unsigned char *phdr_buf
;
5214 const int phdr_size
= is_elf64
? sizeof (Elf64_Phdr
) : sizeof (Elf32_Phdr
);
5216 if (get_phdr_phnum_from_proc_auxv (pid
, is_elf64
, &phdr_memaddr
, &num_phdr
))
5219 gdb_assert (num_phdr
< 100); /* Basic sanity check. */
5220 phdr_buf
= alloca (num_phdr
* phdr_size
);
5222 if (linux_read_memory (phdr_memaddr
, phdr_buf
, num_phdr
* phdr_size
))
5225 /* Compute relocation: it is expected to be 0 for "regular" executables,
5226 non-zero for PIE ones. */
5228 for (i
= 0; relocation
== -1 && i
< num_phdr
; i
++)
5231 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
5233 if (p
->p_type
== PT_PHDR
)
5234 relocation
= phdr_memaddr
- p
->p_vaddr
;
5238 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
5240 if (p
->p_type
== PT_PHDR
)
5241 relocation
= phdr_memaddr
- p
->p_vaddr
;
5244 if (relocation
== -1)
5246 warning ("Unexpected missing PT_PHDR");
5250 for (i
= 0; i
< num_phdr
; i
++)
5254 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
5256 if (p
->p_type
== PT_DYNAMIC
)
5257 return p
->p_vaddr
+ relocation
;
5261 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
5263 if (p
->p_type
== PT_DYNAMIC
)
5264 return p
->p_vaddr
+ relocation
;
5271 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5272 can be 0 if the inferior does not yet have the library list initialized. */
5275 get_r_debug (const int pid
, const int is_elf64
)
5277 CORE_ADDR dynamic_memaddr
;
5278 const int dyn_size
= is_elf64
? sizeof (Elf64_Dyn
) : sizeof (Elf32_Dyn
);
5279 unsigned char buf
[sizeof (Elf64_Dyn
)]; /* The larger of the two. */
5281 dynamic_memaddr
= get_dynamic (pid
, is_elf64
);
5282 if (dynamic_memaddr
== 0)
5283 return (CORE_ADDR
) -1;
5285 while (linux_read_memory (dynamic_memaddr
, buf
, dyn_size
) == 0)
5289 Elf64_Dyn
*const dyn
= (Elf64_Dyn
*) buf
;
5291 if (dyn
->d_tag
== DT_DEBUG
)
5292 return dyn
->d_un
.d_val
;
5294 if (dyn
->d_tag
== DT_NULL
)
5299 Elf32_Dyn
*const dyn
= (Elf32_Dyn
*) buf
;
5301 if (dyn
->d_tag
== DT_DEBUG
)
5302 return dyn
->d_un
.d_val
;
5304 if (dyn
->d_tag
== DT_NULL
)
5308 dynamic_memaddr
+= dyn_size
;
5311 return (CORE_ADDR
) -1;
5314 /* Read one pointer from MEMADDR in the inferior. */
5317 read_one_ptr (CORE_ADDR memaddr
, CORE_ADDR
*ptr
, int ptr_size
)
5320 return linux_read_memory (memaddr
, (unsigned char *) ptr
, ptr_size
);
5323 struct link_map_offsets
5325 /* Offset and size of r_debug.r_version. */
5326 int r_version_offset
;
5328 /* Offset and size of r_debug.r_map. */
5331 /* Offset to l_addr field in struct link_map. */
5334 /* Offset to l_name field in struct link_map. */
5337 /* Offset to l_ld field in struct link_map. */
5340 /* Offset to l_next field in struct link_map. */
5343 /* Offset to l_prev field in struct link_map. */
5347 /* Construct qXfer:libraries:read reply. */
5350 linux_qxfer_libraries_svr4 (const char *annex
, unsigned char *readbuf
,
5351 unsigned const char *writebuf
,
5352 CORE_ADDR offset
, int len
)
5355 unsigned document_len
;
5356 struct process_info_private
*const priv
= current_process ()->private;
5357 char filename
[PATH_MAX
];
5360 static const struct link_map_offsets lmo_32bit_offsets
=
5362 0, /* r_version offset. */
5363 4, /* r_debug.r_map offset. */
5364 0, /* l_addr offset in link_map. */
5365 4, /* l_name offset in link_map. */
5366 8, /* l_ld offset in link_map. */
5367 12, /* l_next offset in link_map. */
5368 16 /* l_prev offset in link_map. */
5371 static const struct link_map_offsets lmo_64bit_offsets
=
5373 0, /* r_version offset. */
5374 8, /* r_debug.r_map offset. */
5375 0, /* l_addr offset in link_map. */
5376 8, /* l_name offset in link_map. */
5377 16, /* l_ld offset in link_map. */
5378 24, /* l_next offset in link_map. */
5379 32 /* l_prev offset in link_map. */
5381 const struct link_map_offsets
*lmo
;
5383 if (writebuf
!= NULL
)
5385 if (readbuf
== NULL
)
5388 pid
= lwpid_of (get_thread_lwp (current_inferior
));
5389 xsnprintf (filename
, sizeof filename
, "/proc/%d/exe", pid
);
5390 is_elf64
= elf_64_file_p (filename
);
5391 lmo
= is_elf64
? &lmo_64bit_offsets
: &lmo_32bit_offsets
;
5393 if (priv
->r_debug
== 0)
5394 priv
->r_debug
= get_r_debug (pid
, is_elf64
);
5396 if (priv
->r_debug
== (CORE_ADDR
) -1 || priv
->r_debug
== 0)
5398 document
= xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5402 int allocated
= 1024;
5404 const int ptr_size
= is_elf64
? 8 : 4;
5405 CORE_ADDR lm_addr
, lm_prev
, l_name
, l_addr
, l_ld
, l_next
, l_prev
;
5406 int r_version
, header_done
= 0;
5408 document
= xmalloc (allocated
);
5409 strcpy (document
, "<library-list-svr4 version=\"1.0\"");
5410 p
= document
+ strlen (document
);
5413 if (linux_read_memory (priv
->r_debug
+ lmo
->r_version_offset
,
5414 (unsigned char *) &r_version
,
5415 sizeof (r_version
)) != 0
5418 warning ("unexpected r_debug version %d", r_version
);
5422 if (read_one_ptr (priv
->r_debug
+ lmo
->r_map_offset
,
5423 &lm_addr
, ptr_size
) != 0)
5425 warning ("unable to read r_map from 0x%lx",
5426 (long) priv
->r_debug
+ lmo
->r_map_offset
);
5431 while (read_one_ptr (lm_addr
+ lmo
->l_name_offset
,
5432 &l_name
, ptr_size
) == 0
5433 && read_one_ptr (lm_addr
+ lmo
->l_addr_offset
,
5434 &l_addr
, ptr_size
) == 0
5435 && read_one_ptr (lm_addr
+ lmo
->l_ld_offset
,
5436 &l_ld
, ptr_size
) == 0
5437 && read_one_ptr (lm_addr
+ lmo
->l_prev_offset
,
5438 &l_prev
, ptr_size
) == 0
5439 && read_one_ptr (lm_addr
+ lmo
->l_next_offset
,
5440 &l_next
, ptr_size
) == 0)
5442 unsigned char libname
[PATH_MAX
];
5444 if (lm_prev
!= l_prev
)
5446 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5447 (long) lm_prev
, (long) l_prev
);
5451 /* Not checking for error because reading may stop before
5452 we've got PATH_MAX worth of characters. */
5454 linux_read_memory (l_name
, libname
, sizeof (libname
) - 1);
5455 libname
[sizeof (libname
) - 1] = '\0';
5456 if (libname
[0] != '\0')
5458 /* 6x the size for xml_escape_text below. */
5459 size_t len
= 6 * strlen ((char *) libname
);
5464 /* Terminate `<library-list-svr4'. */
5469 while (allocated
< p
- document
+ len
+ 200)
5471 /* Expand to guarantee sufficient storage. */
5472 uintptr_t document_len
= p
- document
;
5474 document
= xrealloc (document
, 2 * allocated
);
5476 p
= document
+ document_len
;
5479 name
= xml_escape_text ((char *) libname
);
5480 p
+= sprintf (p
, "<library name=\"%s\" lm=\"0x%lx\" "
5481 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5482 name
, (unsigned long) lm_addr
,
5483 (unsigned long) l_addr
, (unsigned long) l_ld
);
5486 else if (lm_prev
== 0)
5488 sprintf (p
, " main-lm=\"0x%lx\"", (unsigned long) lm_addr
);
5499 strcpy (p
, "</library-list-svr4>");
5502 document_len
= strlen (document
);
5503 if (offset
< document_len
)
5504 document_len
-= offset
;
5507 if (len
> document_len
)
5510 memcpy (readbuf
, document
+ offset
, len
);
5516 static struct target_ops linux_target_ops
= {
5517 linux_create_inferior
,
5526 linux_fetch_registers
,
5527 linux_store_registers
,
5528 linux_prepare_to_access_memory
,
5529 linux_done_accessing_memory
,
5532 linux_look_up_symbols
,
5533 linux_request_interrupt
,
5537 linux_stopped_by_watchpoint
,
5538 linux_stopped_data_address
,
5539 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5544 #ifdef USE_THREAD_DB
5545 thread_db_get_tls_address
,
5550 hostio_last_error_from_errno
,
5553 linux_supports_non_stop
,
5555 linux_start_non_stop
,
5556 linux_supports_multi_process
,
5557 #ifdef USE_THREAD_DB
5558 thread_db_handle_monitor_command
,
5562 linux_common_core_of_thread
,
5564 linux_process_qsupported
,
5565 linux_supports_tracepoints
,
5568 linux_thread_stopped
,
5572 linux_cancel_breakpoints
,
5573 linux_stabilize_threads
,
5574 linux_install_fast_tracepoint_jump_pad
,
5576 linux_supports_disable_randomization
,
5577 linux_get_min_fast_tracepoint_insn_len
,
5578 linux_qxfer_libraries_svr4
,
5582 linux_init_signals ()
5584 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5585 to find what the cancel signal actually is. */
5586 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5587 signal (__SIGRTMIN
+1, SIG_IGN
);
5592 initialize_low (void)
5594 struct sigaction sigchld_action
;
5595 memset (&sigchld_action
, 0, sizeof (sigchld_action
));
5596 set_target_ops (&linux_target_ops
);
5597 set_breakpoint_data (the_low_target
.breakpoint
,
5598 the_low_target
.breakpoint_len
);
5599 linux_init_signals ();
5600 linux_test_for_tracefork ();
5601 #ifdef HAVE_LINUX_REGSETS
5602 for (num_regsets
= 0; target_regsets
[num_regsets
].size
>= 0; num_regsets
++)
5604 disabled_regsets
= xmalloc (num_regsets
);
5607 sigchld_action
.sa_handler
= sigchld_handler
;
5608 sigemptyset (&sigchld_action
.sa_mask
);
5609 sigchld_action
.sa_flags
= SA_RESTART
;
5610 sigaction (SIGCHLD
, &sigchld_action
, NULL
);