]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame_incremental - gdb/gdbserver/linux-low.c
Enqueue signal even when resuming threads
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
... / ...
CommitLineData
1/* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2016 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19#include "server.h"
20#include "linux-low.h"
21#include "nat/linux-osdata.h"
22#include "agent.h"
23#include "tdesc.h"
24#include "rsp-low.h"
25
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
28#include "gdb_wait.h"
29#include "nat/gdb_ptrace.h"
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
32#include "nat/linux-personality.h"
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
36#include <unistd.h>
37#include <sys/syscall.h>
38#include <sched.h>
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
43#include <sys/stat.h>
44#include <sys/vfs.h>
45#include <sys/uio.h>
46#include "filestuff.h"
47#include "tracepoint.h"
48#include "hostio.h"
49#include <inttypes.h>
50#ifndef ELFMAG0
51/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55#include <elf.h>
56#endif
57#include "nat/linux-namespaces.h"
58
59#ifndef SPUFS_MAGIC
60#define SPUFS_MAGIC 0x23c9b64e
61#endif
62
63#ifdef HAVE_PERSONALITY
64# include <sys/personality.h>
65# if !HAVE_DECL_ADDR_NO_RANDOMIZE
66# define ADDR_NO_RANDOMIZE 0x0040000
67# endif
68#endif
69
70#ifndef O_LARGEFILE
71#define O_LARGEFILE 0
72#endif
73
74/* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77#if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80#if defined(__mcoldfire__)
81/* These are still undefined in 3.10 kernels. */
82#define PT_TEXT_ADDR 49*4
83#define PT_DATA_ADDR 50*4
84#define PT_TEXT_END_ADDR 51*4
85/* BFIN already defines these since at least 2.6.32 kernels. */
86#elif defined(BFIN)
87#define PT_TEXT_ADDR 220
88#define PT_TEXT_END_ADDR 224
89#define PT_DATA_ADDR 228
90/* These are still undefined in 3.10 kernels. */
91#elif defined(__TMS320C6X__)
92#define PT_TEXT_ADDR (0x10000*4)
93#define PT_DATA_ADDR (0x10004*4)
94#define PT_TEXT_END_ADDR (0x10008*4)
95#endif
96#endif
97
98#ifdef HAVE_LINUX_BTRACE
99# include "nat/linux-btrace.h"
100# include "btrace-common.h"
101#endif
102
103#ifndef HAVE_ELF32_AUXV_T
104/* Copied from glibc's elf.h. */
105typedef struct
106{
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115} Elf32_auxv_t;
116#endif
117
118#ifndef HAVE_ELF64_AUXV_T
119/* Copied from glibc's elf.h. */
120typedef struct
121{
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130} Elf64_auxv_t;
131#endif
132
133/* Does the current host support PTRACE_GETREGSET? */
134int have_ptrace_getregset = -1;
135
136/* LWP accessors. */
137
138/* See nat/linux-nat.h. */
139
140ptid_t
141ptid_of_lwp (struct lwp_info *lwp)
142{
143 return ptid_of (get_lwp_thread (lwp));
144}
145
146/* See nat/linux-nat.h. */
147
148void
149lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151{
152 lwp->arch_private = info;
153}
154
155/* See nat/linux-nat.h. */
156
157struct arch_lwp_info *
158lwp_arch_private_info (struct lwp_info *lwp)
159{
160 return lwp->arch_private;
161}
162
163/* See nat/linux-nat.h. */
164
165int
166lwp_is_stopped (struct lwp_info *lwp)
167{
168 return lwp->stopped;
169}
170
171/* See nat/linux-nat.h. */
172
173enum target_stop_reason
174lwp_stop_reason (struct lwp_info *lwp)
175{
176 return lwp->stop_reason;
177}
178
179/* A list of all unknown processes which receive stop signals. Some
180 other process will presumably claim each of these as forked
181 children momentarily. */
182
183struct simple_pid_list
184{
185 /* The process ID. */
186 int pid;
187
188 /* The status as reported by waitpid. */
189 int status;
190
191 /* Next in chain. */
192 struct simple_pid_list *next;
193};
194struct simple_pid_list *stopped_pids;
195
196/* Trivial list manipulation functions to keep track of a list of new
197 stopped processes. */
198
199static void
200add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
201{
202 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
203
204 new_pid->pid = pid;
205 new_pid->status = status;
206 new_pid->next = *listp;
207 *listp = new_pid;
208}
209
210static int
211pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
212{
213 struct simple_pid_list **p;
214
215 for (p = listp; *p != NULL; p = &(*p)->next)
216 if ((*p)->pid == pid)
217 {
218 struct simple_pid_list *next = (*p)->next;
219
220 *statusp = (*p)->status;
221 xfree (*p);
222 *p = next;
223 return 1;
224 }
225 return 0;
226}
227
228enum stopping_threads_kind
229 {
230 /* Not stopping threads presently. */
231 NOT_STOPPING_THREADS,
232
233 /* Stopping threads. */
234 STOPPING_THREADS,
235
236 /* Stopping and suspending threads. */
237 STOPPING_AND_SUSPENDING_THREADS
238 };
239
240/* This is set while stop_all_lwps is in effect. */
241enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
242
243/* FIXME make into a target method? */
244int using_threads = 1;
245
246/* True if we're presently stabilizing threads (moving them out of
247 jump pads). */
248static int stabilizing_threads;
249
250static void linux_resume_one_lwp (struct lwp_info *lwp,
251 int step, int signal, siginfo_t *info);
252static void linux_resume (struct thread_resume *resume_info, size_t n);
253static void stop_all_lwps (int suspend, struct lwp_info *except);
254static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
255static void unsuspend_all_lwps (struct lwp_info *except);
256static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
257 int *wstat, int options);
258static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
259static struct lwp_info *add_lwp (ptid_t ptid);
260static void linux_mourn (struct process_info *process);
261static int linux_stopped_by_watchpoint (void);
262static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
263static int lwp_is_marked_dead (struct lwp_info *lwp);
264static void proceed_all_lwps (void);
265static int finish_step_over (struct lwp_info *lwp);
266static int kill_lwp (unsigned long lwpid, int signo);
267static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
268static void complete_ongoing_step_over (void);
269static int linux_low_ptrace_options (int attached);
270static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
271static int proceed_one_lwp (struct inferior_list_entry *entry, void *except);
272
273/* When the event-loop is doing a step-over, this points at the thread
274 being stepped. */
275ptid_t step_over_bkpt;
276
277/* True if the low target can hardware single-step. */
278
279static int
280can_hardware_single_step (void)
281{
282 if (the_low_target.supports_hardware_single_step != NULL)
283 return the_low_target.supports_hardware_single_step ();
284 else
285 return 0;
286}
287
288/* True if the low target can software single-step. Such targets
289 implement the GET_NEXT_PCS callback. */
290
291static int
292can_software_single_step (void)
293{
294 return (the_low_target.get_next_pcs != NULL);
295}
296
297/* True if the low target supports memory breakpoints. If so, we'll
298 have a GET_PC implementation. */
299
300static int
301supports_breakpoints (void)
302{
303 return (the_low_target.get_pc != NULL);
304}
305
306/* Returns true if this target can support fast tracepoints. This
307 does not mean that the in-process agent has been loaded in the
308 inferior. */
309
310static int
311supports_fast_tracepoints (void)
312{
313 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
314}
315
316/* True if LWP is stopped in its stepping range. */
317
318static int
319lwp_in_step_range (struct lwp_info *lwp)
320{
321 CORE_ADDR pc = lwp->stop_pc;
322
323 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
324}
325
326struct pending_signals
327{
328 int signal;
329 siginfo_t info;
330 struct pending_signals *prev;
331};
332
333/* The read/write ends of the pipe registered as waitable file in the
334 event loop. */
335static int linux_event_pipe[2] = { -1, -1 };
336
337/* True if we're currently in async mode. */
338#define target_is_async_p() (linux_event_pipe[0] != -1)
339
340static void send_sigstop (struct lwp_info *lwp);
341static void wait_for_sigstop (void);
342
343/* Return non-zero if HEADER is a 64-bit ELF file. */
344
345static int
346elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
347{
348 if (header->e_ident[EI_MAG0] == ELFMAG0
349 && header->e_ident[EI_MAG1] == ELFMAG1
350 && header->e_ident[EI_MAG2] == ELFMAG2
351 && header->e_ident[EI_MAG3] == ELFMAG3)
352 {
353 *machine = header->e_machine;
354 return header->e_ident[EI_CLASS] == ELFCLASS64;
355
356 }
357 *machine = EM_NONE;
358 return -1;
359}
360
361/* Return non-zero if FILE is a 64-bit ELF file,
362 zero if the file is not a 64-bit ELF file,
363 and -1 if the file is not accessible or doesn't exist. */
364
365static int
366elf_64_file_p (const char *file, unsigned int *machine)
367{
368 Elf64_Ehdr header;
369 int fd;
370
371 fd = open (file, O_RDONLY);
372 if (fd < 0)
373 return -1;
374
375 if (read (fd, &header, sizeof (header)) != sizeof (header))
376 {
377 close (fd);
378 return 0;
379 }
380 close (fd);
381
382 return elf_64_header_p (&header, machine);
383}
384
385/* Accepts an integer PID; Returns true if the executable PID is
386 running is a 64-bit ELF file.. */
387
388int
389linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
390{
391 char file[PATH_MAX];
392
393 sprintf (file, "/proc/%d/exe", pid);
394 return elf_64_file_p (file, machine);
395}
396
397static void
398delete_lwp (struct lwp_info *lwp)
399{
400 struct thread_info *thr = get_lwp_thread (lwp);
401
402 if (debug_threads)
403 debug_printf ("deleting %ld\n", lwpid_of (thr));
404
405 remove_thread (thr);
406 free (lwp->arch_private);
407 free (lwp);
408}
409
410/* Add a process to the common process list, and set its private
411 data. */
412
413static struct process_info *
414linux_add_process (int pid, int attached)
415{
416 struct process_info *proc;
417
418 proc = add_process (pid, attached);
419 proc->priv = XCNEW (struct process_info_private);
420
421 if (the_low_target.new_process != NULL)
422 proc->priv->arch_private = the_low_target.new_process ();
423
424 return proc;
425}
426
427static CORE_ADDR get_pc (struct lwp_info *lwp);
428
429/* Call the target arch_setup function on the current thread. */
430
431static void
432linux_arch_setup (void)
433{
434 the_low_target.arch_setup ();
435}
436
437/* Call the target arch_setup function on THREAD. */
438
439static void
440linux_arch_setup_thread (struct thread_info *thread)
441{
442 struct thread_info *saved_thread;
443
444 saved_thread = current_thread;
445 current_thread = thread;
446
447 linux_arch_setup ();
448
449 current_thread = saved_thread;
450}
451
452/* Handle a GNU/Linux extended wait response. If we see a clone,
453 fork, or vfork event, we need to add the new LWP to our list
454 (and return 0 so as not to report the trap to higher layers).
455 If we see an exec event, we will modify ORIG_EVENT_LWP to point
456 to a new LWP representing the new program. */
457
458static int
459handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
460{
461 struct lwp_info *event_lwp = *orig_event_lwp;
462 int event = linux_ptrace_get_extended_event (wstat);
463 struct thread_info *event_thr = get_lwp_thread (event_lwp);
464 struct lwp_info *new_lwp;
465
466 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
467
468 /* All extended events we currently use are mid-syscall. Only
469 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
470 you have to be using PTRACE_SEIZE to get that. */
471 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
472
473 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
474 || (event == PTRACE_EVENT_CLONE))
475 {
476 ptid_t ptid;
477 unsigned long new_pid;
478 int ret, status;
479
480 /* Get the pid of the new lwp. */
481 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
482 &new_pid);
483
484 /* If we haven't already seen the new PID stop, wait for it now. */
485 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
486 {
487 /* The new child has a pending SIGSTOP. We can't affect it until it
488 hits the SIGSTOP, but we're already attached. */
489
490 ret = my_waitpid (new_pid, &status, __WALL);
491
492 if (ret == -1)
493 perror_with_name ("waiting for new child");
494 else if (ret != new_pid)
495 warning ("wait returned unexpected PID %d", ret);
496 else if (!WIFSTOPPED (status))
497 warning ("wait returned unexpected status 0x%x", status);
498 }
499
500 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
501 {
502 struct process_info *parent_proc;
503 struct process_info *child_proc;
504 struct lwp_info *child_lwp;
505 struct thread_info *child_thr;
506 struct target_desc *tdesc;
507
508 ptid = ptid_build (new_pid, new_pid, 0);
509
510 if (debug_threads)
511 {
512 debug_printf ("HEW: Got fork event from LWP %ld, "
513 "new child is %d\n",
514 ptid_get_lwp (ptid_of (event_thr)),
515 ptid_get_pid (ptid));
516 }
517
518 /* Add the new process to the tables and clone the breakpoint
519 lists of the parent. We need to do this even if the new process
520 will be detached, since we will need the process object and the
521 breakpoints to remove any breakpoints from memory when we
522 detach, and the client side will access registers. */
523 child_proc = linux_add_process (new_pid, 0);
524 gdb_assert (child_proc != NULL);
525 child_lwp = add_lwp (ptid);
526 gdb_assert (child_lwp != NULL);
527 child_lwp->stopped = 1;
528 child_lwp->must_set_ptrace_flags = 1;
529 child_lwp->status_pending_p = 0;
530 child_thr = get_lwp_thread (child_lwp);
531 child_thr->last_resume_kind = resume_stop;
532 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
533
534 /* If we're suspending all threads, leave this one suspended
535 too. If the fork/clone parent is stepping over a breakpoint,
536 all other threads have been suspended already. Leave the
537 child suspended too. */
538 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
539 || event_lwp->bp_reinsert != 0)
540 {
541 if (debug_threads)
542 debug_printf ("HEW: leaving child suspended\n");
543 child_lwp->suspended = 1;
544 }
545
546 parent_proc = get_thread_process (event_thr);
547 child_proc->attached = parent_proc->attached;
548
549 if (event_lwp->bp_reinsert != 0
550 && can_software_single_step ()
551 && event == PTRACE_EVENT_VFORK)
552 {
553 /* If we leave reinsert breakpoints there, child will
554 hit it, so uninsert reinsert breakpoints from parent
555 (and child). Once vfork child is done, reinsert
556 them back to parent. */
557 uninsert_reinsert_breakpoints (event_thr);
558 }
559
560 clone_all_breakpoints (child_thr, event_thr);
561
562 tdesc = XNEW (struct target_desc);
563 copy_target_description (tdesc, parent_proc->tdesc);
564 child_proc->tdesc = tdesc;
565
566 /* Clone arch-specific process data. */
567 if (the_low_target.new_fork != NULL)
568 the_low_target.new_fork (parent_proc, child_proc);
569
570 /* Save fork info in the parent thread. */
571 if (event == PTRACE_EVENT_FORK)
572 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
573 else if (event == PTRACE_EVENT_VFORK)
574 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
575
576 event_lwp->waitstatus.value.related_pid = ptid;
577
578 /* The status_pending field contains bits denoting the
579 extended event, so when the pending event is handled,
580 the handler will look at lwp->waitstatus. */
581 event_lwp->status_pending_p = 1;
582 event_lwp->status_pending = wstat;
583
584 /* If the parent thread is doing step-over with reinsert
585 breakpoints, the list of reinsert breakpoints are cloned
586 from the parent's. Remove them from the child process.
587 In case of vfork, we'll reinsert them back once vforked
588 child is done. */
589 if (event_lwp->bp_reinsert != 0
590 && can_software_single_step ())
591 {
592 /* The child process is forked and stopped, so it is safe
593 to access its memory without stopping all other threads
594 from other processes. */
595 delete_reinsert_breakpoints (child_thr);
596
597 gdb_assert (has_reinsert_breakpoints (event_thr));
598 gdb_assert (!has_reinsert_breakpoints (child_thr));
599 }
600
601 /* Report the event. */
602 return 0;
603 }
604
605 if (debug_threads)
606 debug_printf ("HEW: Got clone event "
607 "from LWP %ld, new child is LWP %ld\n",
608 lwpid_of (event_thr), new_pid);
609
610 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
611 new_lwp = add_lwp (ptid);
612
613 /* Either we're going to immediately resume the new thread
614 or leave it stopped. linux_resume_one_lwp is a nop if it
615 thinks the thread is currently running, so set this first
616 before calling linux_resume_one_lwp. */
617 new_lwp->stopped = 1;
618
619 /* If we're suspending all threads, leave this one suspended
620 too. If the fork/clone parent is stepping over a breakpoint,
621 all other threads have been suspended already. Leave the
622 child suspended too. */
623 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
624 || event_lwp->bp_reinsert != 0)
625 new_lwp->suspended = 1;
626
627 /* Normally we will get the pending SIGSTOP. But in some cases
628 we might get another signal delivered to the group first.
629 If we do get another signal, be sure not to lose it. */
630 if (WSTOPSIG (status) != SIGSTOP)
631 {
632 new_lwp->stop_expected = 1;
633 new_lwp->status_pending_p = 1;
634 new_lwp->status_pending = status;
635 }
636 else if (report_thread_events)
637 {
638 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
639 new_lwp->status_pending_p = 1;
640 new_lwp->status_pending = status;
641 }
642
643 /* Don't report the event. */
644 return 1;
645 }
646 else if (event == PTRACE_EVENT_VFORK_DONE)
647 {
648 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
649
650 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
651 {
652 reinsert_reinsert_breakpoints (event_thr);
653
654 gdb_assert (has_reinsert_breakpoints (event_thr));
655 }
656
657 /* Report the event. */
658 return 0;
659 }
660 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
661 {
662 struct process_info *proc;
663 VEC (int) *syscalls_to_catch;
664 ptid_t event_ptid;
665 pid_t event_pid;
666
667 if (debug_threads)
668 {
669 debug_printf ("HEW: Got exec event from LWP %ld\n",
670 lwpid_of (event_thr));
671 }
672
673 /* Get the event ptid. */
674 event_ptid = ptid_of (event_thr);
675 event_pid = ptid_get_pid (event_ptid);
676
677 /* Save the syscall list from the execing process. */
678 proc = get_thread_process (event_thr);
679 syscalls_to_catch = proc->syscalls_to_catch;
680 proc->syscalls_to_catch = NULL;
681
682 /* Delete the execing process and all its threads. */
683 linux_mourn (proc);
684 current_thread = NULL;
685
686 /* Create a new process/lwp/thread. */
687 proc = linux_add_process (event_pid, 0);
688 event_lwp = add_lwp (event_ptid);
689 event_thr = get_lwp_thread (event_lwp);
690 gdb_assert (current_thread == event_thr);
691 linux_arch_setup_thread (event_thr);
692
693 /* Set the event status. */
694 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
695 event_lwp->waitstatus.value.execd_pathname
696 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
697
698 /* Mark the exec status as pending. */
699 event_lwp->stopped = 1;
700 event_lwp->status_pending_p = 1;
701 event_lwp->status_pending = wstat;
702 event_thr->last_resume_kind = resume_continue;
703 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
704
705 /* Update syscall state in the new lwp, effectively mid-syscall too. */
706 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
707
708 /* Restore the list to catch. Don't rely on the client, which is free
709 to avoid sending a new list when the architecture doesn't change.
710 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
711 proc->syscalls_to_catch = syscalls_to_catch;
712
713 /* Report the event. */
714 *orig_event_lwp = event_lwp;
715 return 0;
716 }
717
718 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
719}
720
721/* Return the PC as read from the regcache of LWP, without any
722 adjustment. */
723
724static CORE_ADDR
725get_pc (struct lwp_info *lwp)
726{
727 struct thread_info *saved_thread;
728 struct regcache *regcache;
729 CORE_ADDR pc;
730
731 if (the_low_target.get_pc == NULL)
732 return 0;
733
734 saved_thread = current_thread;
735 current_thread = get_lwp_thread (lwp);
736
737 regcache = get_thread_regcache (current_thread, 1);
738 pc = (*the_low_target.get_pc) (regcache);
739
740 if (debug_threads)
741 debug_printf ("pc is 0x%lx\n", (long) pc);
742
743 current_thread = saved_thread;
744 return pc;
745}
746
747/* This function should only be called if LWP got a SYSCALL_SIGTRAP.
748 Fill *SYSNO with the syscall nr trapped. */
749
750static void
751get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
752{
753 struct thread_info *saved_thread;
754 struct regcache *regcache;
755
756 if (the_low_target.get_syscall_trapinfo == NULL)
757 {
758 /* If we cannot get the syscall trapinfo, report an unknown
759 system call number. */
760 *sysno = UNKNOWN_SYSCALL;
761 return;
762 }
763
764 saved_thread = current_thread;
765 current_thread = get_lwp_thread (lwp);
766
767 regcache = get_thread_regcache (current_thread, 1);
768 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
769
770 if (debug_threads)
771 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
772
773 current_thread = saved_thread;
774}
775
776static int check_stopped_by_watchpoint (struct lwp_info *child);
777
778/* Called when the LWP stopped for a signal/trap. If it stopped for a
779 trap check what caused it (breakpoint, watchpoint, trace, etc.),
780 and save the result in the LWP's stop_reason field. If it stopped
781 for a breakpoint, decrement the PC if necessary on the lwp's
782 architecture. Returns true if we now have the LWP's stop PC. */
783
784static int
785save_stop_reason (struct lwp_info *lwp)
786{
787 CORE_ADDR pc;
788 CORE_ADDR sw_breakpoint_pc;
789 struct thread_info *saved_thread;
790#if USE_SIGTRAP_SIGINFO
791 siginfo_t siginfo;
792#endif
793
794 if (the_low_target.get_pc == NULL)
795 return 0;
796
797 pc = get_pc (lwp);
798 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
799
800 /* breakpoint_at reads from the current thread. */
801 saved_thread = current_thread;
802 current_thread = get_lwp_thread (lwp);
803
804#if USE_SIGTRAP_SIGINFO
805 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
806 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
807 {
808 if (siginfo.si_signo == SIGTRAP)
809 {
810 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
811 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
812 {
813 /* The si_code is ambiguous on this arch -- check debug
814 registers. */
815 if (!check_stopped_by_watchpoint (lwp))
816 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
817 }
818 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
819 {
820 /* If we determine the LWP stopped for a SW breakpoint,
821 trust it. Particularly don't check watchpoint
822 registers, because at least on s390, we'd find
823 stopped-by-watchpoint as long as there's a watchpoint
824 set. */
825 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
826 }
827 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
828 {
829 /* This can indicate either a hardware breakpoint or
830 hardware watchpoint. Check debug registers. */
831 if (!check_stopped_by_watchpoint (lwp))
832 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
833 }
834 else if (siginfo.si_code == TRAP_TRACE)
835 {
836 /* We may have single stepped an instruction that
837 triggered a watchpoint. In that case, on some
838 architectures (such as x86), instead of TRAP_HWBKPT,
839 si_code indicates TRAP_TRACE, and we need to check
840 the debug registers separately. */
841 if (!check_stopped_by_watchpoint (lwp))
842 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
843 }
844 }
845 }
846#else
847 /* We may have just stepped a breakpoint instruction. E.g., in
848 non-stop mode, GDB first tells the thread A to step a range, and
849 then the user inserts a breakpoint inside the range. In that
850 case we need to report the breakpoint PC. */
851 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
852 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
853 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
854
855 if (hardware_breakpoint_inserted_here (pc))
856 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
857
858 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
859 check_stopped_by_watchpoint (lwp);
860#endif
861
862 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
863 {
864 if (debug_threads)
865 {
866 struct thread_info *thr = get_lwp_thread (lwp);
867
868 debug_printf ("CSBB: %s stopped by software breakpoint\n",
869 target_pid_to_str (ptid_of (thr)));
870 }
871
872 /* Back up the PC if necessary. */
873 if (pc != sw_breakpoint_pc)
874 {
875 struct regcache *regcache
876 = get_thread_regcache (current_thread, 1);
877 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
878 }
879
880 /* Update this so we record the correct stop PC below. */
881 pc = sw_breakpoint_pc;
882 }
883 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
884 {
885 if (debug_threads)
886 {
887 struct thread_info *thr = get_lwp_thread (lwp);
888
889 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
890 target_pid_to_str (ptid_of (thr)));
891 }
892 }
893 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
894 {
895 if (debug_threads)
896 {
897 struct thread_info *thr = get_lwp_thread (lwp);
898
899 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
900 target_pid_to_str (ptid_of (thr)));
901 }
902 }
903 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
904 {
905 if (debug_threads)
906 {
907 struct thread_info *thr = get_lwp_thread (lwp);
908
909 debug_printf ("CSBB: %s stopped by trace\n",
910 target_pid_to_str (ptid_of (thr)));
911 }
912 }
913
914 lwp->stop_pc = pc;
915 current_thread = saved_thread;
916 return 1;
917}
918
919static struct lwp_info *
920add_lwp (ptid_t ptid)
921{
922 struct lwp_info *lwp;
923
924 lwp = XCNEW (struct lwp_info);
925
926 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
927
928 if (the_low_target.new_thread != NULL)
929 the_low_target.new_thread (lwp);
930
931 lwp->thread = add_thread (ptid, lwp);
932
933 return lwp;
934}
935
936/* Start an inferior process and returns its pid.
937 ALLARGS is a vector of program-name and args. */
938
939static int
940linux_create_inferior (char *program, char **allargs)
941{
942 struct lwp_info *new_lwp;
943 int pid;
944 ptid_t ptid;
945 struct cleanup *restore_personality
946 = maybe_disable_address_space_randomization (disable_randomization);
947
948#if defined(__UCLIBC__) && defined(HAS_NOMMU)
949 pid = vfork ();
950#else
951 pid = fork ();
952#endif
953 if (pid < 0)
954 perror_with_name ("fork");
955
956 if (pid == 0)
957 {
958 close_most_fds ();
959 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
960
961 setpgid (0, 0);
962
963 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
964 stdout to stderr so that inferior i/o doesn't corrupt the connection.
965 Also, redirect stdin to /dev/null. */
966 if (remote_connection_is_stdio ())
967 {
968 close (0);
969 open ("/dev/null", O_RDONLY);
970 dup2 (2, 1);
971 if (write (2, "stdin/stdout redirected\n",
972 sizeof ("stdin/stdout redirected\n") - 1) < 0)
973 {
974 /* Errors ignored. */;
975 }
976 }
977
978 execv (program, allargs);
979 if (errno == ENOENT)
980 execvp (program, allargs);
981
982 fprintf (stderr, "Cannot exec %s: %s.\n", program,
983 strerror (errno));
984 fflush (stderr);
985 _exit (0177);
986 }
987
988 do_cleanups (restore_personality);
989
990 linux_add_process (pid, 0);
991
992 ptid = ptid_build (pid, pid, 0);
993 new_lwp = add_lwp (ptid);
994 new_lwp->must_set_ptrace_flags = 1;
995
996 return pid;
997}
998
999/* Implement the post_create_inferior target_ops method. */
1000
1001static void
1002linux_post_create_inferior (void)
1003{
1004 struct lwp_info *lwp = get_thread_lwp (current_thread);
1005
1006 linux_arch_setup ();
1007
1008 if (lwp->must_set_ptrace_flags)
1009 {
1010 struct process_info *proc = current_process ();
1011 int options = linux_low_ptrace_options (proc->attached);
1012
1013 linux_enable_event_reporting (lwpid_of (current_thread), options);
1014 lwp->must_set_ptrace_flags = 0;
1015 }
1016}
1017
1018/* Attach to an inferior process. Returns 0 on success, ERRNO on
1019 error. */
1020
1021int
1022linux_attach_lwp (ptid_t ptid)
1023{
1024 struct lwp_info *new_lwp;
1025 int lwpid = ptid_get_lwp (ptid);
1026
1027 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1028 != 0)
1029 return errno;
1030
1031 new_lwp = add_lwp (ptid);
1032
1033 /* We need to wait for SIGSTOP before being able to make the next
1034 ptrace call on this LWP. */
1035 new_lwp->must_set_ptrace_flags = 1;
1036
1037 if (linux_proc_pid_is_stopped (lwpid))
1038 {
1039 if (debug_threads)
1040 debug_printf ("Attached to a stopped process\n");
1041
1042 /* The process is definitely stopped. It is in a job control
1043 stop, unless the kernel predates the TASK_STOPPED /
1044 TASK_TRACED distinction, in which case it might be in a
1045 ptrace stop. Make sure it is in a ptrace stop; from there we
1046 can kill it, signal it, et cetera.
1047
1048 First make sure there is a pending SIGSTOP. Since we are
1049 already attached, the process can not transition from stopped
1050 to running without a PTRACE_CONT; so we know this signal will
1051 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1052 probably already in the queue (unless this kernel is old
1053 enough to use TASK_STOPPED for ptrace stops); but since
1054 SIGSTOP is not an RT signal, it can only be queued once. */
1055 kill_lwp (lwpid, SIGSTOP);
1056
1057 /* Finally, resume the stopped process. This will deliver the
1058 SIGSTOP (or a higher priority signal, just like normal
1059 PTRACE_ATTACH), which we'll catch later on. */
1060 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1061 }
1062
1063 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1064 brings it to a halt.
1065
1066 There are several cases to consider here:
1067
1068 1) gdbserver has already attached to the process and is being notified
1069 of a new thread that is being created.
1070 In this case we should ignore that SIGSTOP and resume the
1071 process. This is handled below by setting stop_expected = 1,
1072 and the fact that add_thread sets last_resume_kind ==
1073 resume_continue.
1074
1075 2) This is the first thread (the process thread), and we're attaching
1076 to it via attach_inferior.
1077 In this case we want the process thread to stop.
1078 This is handled by having linux_attach set last_resume_kind ==
1079 resume_stop after we return.
1080
1081 If the pid we are attaching to is also the tgid, we attach to and
1082 stop all the existing threads. Otherwise, we attach to pid and
1083 ignore any other threads in the same group as this pid.
1084
1085 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1086 existing threads.
1087 In this case we want the thread to stop.
1088 FIXME: This case is currently not properly handled.
1089 We should wait for the SIGSTOP but don't. Things work apparently
1090 because enough time passes between when we ptrace (ATTACH) and when
1091 gdb makes the next ptrace call on the thread.
1092
1093 On the other hand, if we are currently trying to stop all threads, we
1094 should treat the new thread as if we had sent it a SIGSTOP. This works
1095 because we are guaranteed that the add_lwp call above added us to the
1096 end of the list, and so the new thread has not yet reached
1097 wait_for_sigstop (but will). */
1098 new_lwp->stop_expected = 1;
1099
1100 return 0;
1101}
1102
1103/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1104 already attached. Returns true if a new LWP is found, false
1105 otherwise. */
1106
1107static int
1108attach_proc_task_lwp_callback (ptid_t ptid)
1109{
1110 /* Is this a new thread? */
1111 if (find_thread_ptid (ptid) == NULL)
1112 {
1113 int lwpid = ptid_get_lwp (ptid);
1114 int err;
1115
1116 if (debug_threads)
1117 debug_printf ("Found new lwp %d\n", lwpid);
1118
1119 err = linux_attach_lwp (ptid);
1120
1121 /* Be quiet if we simply raced with the thread exiting. EPERM
1122 is returned if the thread's task still exists, and is marked
1123 as exited or zombie, as well as other conditions, so in that
1124 case, confirm the status in /proc/PID/status. */
1125 if (err == ESRCH
1126 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1127 {
1128 if (debug_threads)
1129 {
1130 debug_printf ("Cannot attach to lwp %d: "
1131 "thread is gone (%d: %s)\n",
1132 lwpid, err, strerror (err));
1133 }
1134 }
1135 else if (err != 0)
1136 {
1137 warning (_("Cannot attach to lwp %d: %s"),
1138 lwpid,
1139 linux_ptrace_attach_fail_reason_string (ptid, err));
1140 }
1141
1142 return 1;
1143 }
1144 return 0;
1145}
1146
1147static void async_file_mark (void);
1148
1149/* Attach to PID. If PID is the tgid, attach to it and all
1150 of its threads. */
1151
1152static int
1153linux_attach (unsigned long pid)
1154{
1155 struct process_info *proc;
1156 struct thread_info *initial_thread;
1157 ptid_t ptid = ptid_build (pid, pid, 0);
1158 int err;
1159
1160 /* Attach to PID. We will check for other threads
1161 soon. */
1162 err = linux_attach_lwp (ptid);
1163 if (err != 0)
1164 error ("Cannot attach to process %ld: %s",
1165 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1166
1167 proc = linux_add_process (pid, 1);
1168
1169 /* Don't ignore the initial SIGSTOP if we just attached to this
1170 process. It will be collected by wait shortly. */
1171 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1172 initial_thread->last_resume_kind = resume_stop;
1173
1174 /* We must attach to every LWP. If /proc is mounted, use that to
1175 find them now. On the one hand, the inferior may be using raw
1176 clone instead of using pthreads. On the other hand, even if it
1177 is using pthreads, GDB may not be connected yet (thread_db needs
1178 to do symbol lookups, through qSymbol). Also, thread_db walks
1179 structures in the inferior's address space to find the list of
1180 threads/LWPs, and those structures may well be corrupted. Note
1181 that once thread_db is loaded, we'll still use it to list threads
1182 and associate pthread info with each LWP. */
1183 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1184
1185 /* GDB will shortly read the xml target description for this
1186 process, to figure out the process' architecture. But the target
1187 description is only filled in when the first process/thread in
1188 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1189 that now, otherwise, if GDB is fast enough, it could read the
1190 target description _before_ that initial stop. */
1191 if (non_stop)
1192 {
1193 struct lwp_info *lwp;
1194 int wstat, lwpid;
1195 ptid_t pid_ptid = pid_to_ptid (pid);
1196
1197 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1198 &wstat, __WALL);
1199 gdb_assert (lwpid > 0);
1200
1201 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1202
1203 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1204 {
1205 lwp->status_pending_p = 1;
1206 lwp->status_pending = wstat;
1207 }
1208
1209 initial_thread->last_resume_kind = resume_continue;
1210
1211 async_file_mark ();
1212
1213 gdb_assert (proc->tdesc != NULL);
1214 }
1215
1216 return 0;
1217}
1218
1219struct counter
1220{
1221 int pid;
1222 int count;
1223};
1224
1225static int
1226second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1227{
1228 struct counter *counter = (struct counter *) args;
1229
1230 if (ptid_get_pid (entry->id) == counter->pid)
1231 {
1232 if (++counter->count > 1)
1233 return 1;
1234 }
1235
1236 return 0;
1237}
1238
1239static int
1240last_thread_of_process_p (int pid)
1241{
1242 struct counter counter = { pid , 0 };
1243
1244 return (find_inferior (&all_threads,
1245 second_thread_of_pid_p, &counter) == NULL);
1246}
1247
1248/* Kill LWP. */
1249
1250static void
1251linux_kill_one_lwp (struct lwp_info *lwp)
1252{
1253 struct thread_info *thr = get_lwp_thread (lwp);
1254 int pid = lwpid_of (thr);
1255
1256 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1257 there is no signal context, and ptrace(PTRACE_KILL) (or
1258 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1259 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1260 alternative is to kill with SIGKILL. We only need one SIGKILL
1261 per process, not one for each thread. But since we still support
1262 support debugging programs using raw clone without CLONE_THREAD,
1263 we send one for each thread. For years, we used PTRACE_KILL
1264 only, so we're being a bit paranoid about some old kernels where
1265 PTRACE_KILL might work better (dubious if there are any such, but
1266 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1267 second, and so we're fine everywhere. */
1268
1269 errno = 0;
1270 kill_lwp (pid, SIGKILL);
1271 if (debug_threads)
1272 {
1273 int save_errno = errno;
1274
1275 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1276 target_pid_to_str (ptid_of (thr)),
1277 save_errno ? strerror (save_errno) : "OK");
1278 }
1279
1280 errno = 0;
1281 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1282 if (debug_threads)
1283 {
1284 int save_errno = errno;
1285
1286 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1287 target_pid_to_str (ptid_of (thr)),
1288 save_errno ? strerror (save_errno) : "OK");
1289 }
1290}
1291
1292/* Kill LWP and wait for it to die. */
1293
1294static void
1295kill_wait_lwp (struct lwp_info *lwp)
1296{
1297 struct thread_info *thr = get_lwp_thread (lwp);
1298 int pid = ptid_get_pid (ptid_of (thr));
1299 int lwpid = ptid_get_lwp (ptid_of (thr));
1300 int wstat;
1301 int res;
1302
1303 if (debug_threads)
1304 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1305
1306 do
1307 {
1308 linux_kill_one_lwp (lwp);
1309
1310 /* Make sure it died. Notes:
1311
1312 - The loop is most likely unnecessary.
1313
1314 - We don't use linux_wait_for_event as that could delete lwps
1315 while we're iterating over them. We're not interested in
1316 any pending status at this point, only in making sure all
1317 wait status on the kernel side are collected until the
1318 process is reaped.
1319
1320 - We don't use __WALL here as the __WALL emulation relies on
1321 SIGCHLD, and killing a stopped process doesn't generate
1322 one, nor an exit status.
1323 */
1324 res = my_waitpid (lwpid, &wstat, 0);
1325 if (res == -1 && errno == ECHILD)
1326 res = my_waitpid (lwpid, &wstat, __WCLONE);
1327 } while (res > 0 && WIFSTOPPED (wstat));
1328
1329 /* Even if it was stopped, the child may have already disappeared.
1330 E.g., if it was killed by SIGKILL. */
1331 if (res < 0 && errno != ECHILD)
1332 perror_with_name ("kill_wait_lwp");
1333}
1334
1335/* Callback for `find_inferior'. Kills an lwp of a given process,
1336 except the leader. */
1337
1338static int
1339kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1340{
1341 struct thread_info *thread = (struct thread_info *) entry;
1342 struct lwp_info *lwp = get_thread_lwp (thread);
1343 int pid = * (int *) args;
1344
1345 if (ptid_get_pid (entry->id) != pid)
1346 return 0;
1347
1348 /* We avoid killing the first thread here, because of a Linux kernel (at
1349 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1350 the children get a chance to be reaped, it will remain a zombie
1351 forever. */
1352
1353 if (lwpid_of (thread) == pid)
1354 {
1355 if (debug_threads)
1356 debug_printf ("lkop: is last of process %s\n",
1357 target_pid_to_str (entry->id));
1358 return 0;
1359 }
1360
1361 kill_wait_lwp (lwp);
1362 return 0;
1363}
1364
1365static int
1366linux_kill (int pid)
1367{
1368 struct process_info *process;
1369 struct lwp_info *lwp;
1370
1371 process = find_process_pid (pid);
1372 if (process == NULL)
1373 return -1;
1374
1375 /* If we're killing a running inferior, make sure it is stopped
1376 first, as PTRACE_KILL will not work otherwise. */
1377 stop_all_lwps (0, NULL);
1378
1379 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1380
1381 /* See the comment in linux_kill_one_lwp. We did not kill the first
1382 thread in the list, so do so now. */
1383 lwp = find_lwp_pid (pid_to_ptid (pid));
1384
1385 if (lwp == NULL)
1386 {
1387 if (debug_threads)
1388 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1389 pid);
1390 }
1391 else
1392 kill_wait_lwp (lwp);
1393
1394 the_target->mourn (process);
1395
1396 /* Since we presently can only stop all lwps of all processes, we
1397 need to unstop lwps of other processes. */
1398 unstop_all_lwps (0, NULL);
1399 return 0;
1400}
1401
1402/* Get pending signal of THREAD, for detaching purposes. This is the
1403 signal the thread last stopped for, which we need to deliver to the
1404 thread when detaching, otherwise, it'd be suppressed/lost. */
1405
1406static int
1407get_detach_signal (struct thread_info *thread)
1408{
1409 enum gdb_signal signo = GDB_SIGNAL_0;
1410 int status;
1411 struct lwp_info *lp = get_thread_lwp (thread);
1412
1413 if (lp->status_pending_p)
1414 status = lp->status_pending;
1415 else
1416 {
1417 /* If the thread had been suspended by gdbserver, and it stopped
1418 cleanly, then it'll have stopped with SIGSTOP. But we don't
1419 want to deliver that SIGSTOP. */
1420 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1421 || thread->last_status.value.sig == GDB_SIGNAL_0)
1422 return 0;
1423
1424 /* Otherwise, we may need to deliver the signal we
1425 intercepted. */
1426 status = lp->last_status;
1427 }
1428
1429 if (!WIFSTOPPED (status))
1430 {
1431 if (debug_threads)
1432 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1433 target_pid_to_str (ptid_of (thread)));
1434 return 0;
1435 }
1436
1437 /* Extended wait statuses aren't real SIGTRAPs. */
1438 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1439 {
1440 if (debug_threads)
1441 debug_printf ("GPS: lwp %s had stopped with extended "
1442 "status: no pending signal\n",
1443 target_pid_to_str (ptid_of (thread)));
1444 return 0;
1445 }
1446
1447 signo = gdb_signal_from_host (WSTOPSIG (status));
1448
1449 if (program_signals_p && !program_signals[signo])
1450 {
1451 if (debug_threads)
1452 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1453 target_pid_to_str (ptid_of (thread)),
1454 gdb_signal_to_string (signo));
1455 return 0;
1456 }
1457 else if (!program_signals_p
1458 /* If we have no way to know which signals GDB does not
1459 want to have passed to the program, assume
1460 SIGTRAP/SIGINT, which is GDB's default. */
1461 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1462 {
1463 if (debug_threads)
1464 debug_printf ("GPS: lwp %s had signal %s, "
1465 "but we don't know if we should pass it. "
1466 "Default to not.\n",
1467 target_pid_to_str (ptid_of (thread)),
1468 gdb_signal_to_string (signo));
1469 return 0;
1470 }
1471 else
1472 {
1473 if (debug_threads)
1474 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1475 target_pid_to_str (ptid_of (thread)),
1476 gdb_signal_to_string (signo));
1477
1478 return WSTOPSIG (status);
1479 }
1480}
1481
1482/* Detach from LWP. */
1483
1484static void
1485linux_detach_one_lwp (struct lwp_info *lwp)
1486{
1487 struct thread_info *thread = get_lwp_thread (lwp);
1488 int sig;
1489 int lwpid;
1490
1491 /* If there is a pending SIGSTOP, get rid of it. */
1492 if (lwp->stop_expected)
1493 {
1494 if (debug_threads)
1495 debug_printf ("Sending SIGCONT to %s\n",
1496 target_pid_to_str (ptid_of (thread)));
1497
1498 kill_lwp (lwpid_of (thread), SIGCONT);
1499 lwp->stop_expected = 0;
1500 }
1501
1502 /* Pass on any pending signal for this thread. */
1503 sig = get_detach_signal (thread);
1504
1505 /* Preparing to resume may try to write registers, and fail if the
1506 lwp is zombie. If that happens, ignore the error. We'll handle
1507 it below, when detach fails with ESRCH. */
1508 TRY
1509 {
1510 /* Flush any pending changes to the process's registers. */
1511 regcache_invalidate_thread (thread);
1512
1513 /* Finally, let it resume. */
1514 if (the_low_target.prepare_to_resume != NULL)
1515 the_low_target.prepare_to_resume (lwp);
1516 }
1517 CATCH (ex, RETURN_MASK_ERROR)
1518 {
1519 if (!check_ptrace_stopped_lwp_gone (lwp))
1520 throw_exception (ex);
1521 }
1522 END_CATCH
1523
1524 lwpid = lwpid_of (thread);
1525 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1526 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1527 {
1528 int save_errno = errno;
1529
1530 /* We know the thread exists, so ESRCH must mean the lwp is
1531 zombie. This can happen if one of the already-detached
1532 threads exits the whole thread group. In that case we're
1533 still attached, and must reap the lwp. */
1534 if (save_errno == ESRCH)
1535 {
1536 int ret, status;
1537
1538 ret = my_waitpid (lwpid, &status, __WALL);
1539 if (ret == -1)
1540 {
1541 warning (_("Couldn't reap LWP %d while detaching: %s"),
1542 lwpid, strerror (errno));
1543 }
1544 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1545 {
1546 warning (_("Reaping LWP %d while detaching "
1547 "returned unexpected status 0x%x"),
1548 lwpid, status);
1549 }
1550 }
1551 else
1552 {
1553 error (_("Can't detach %s: %s"),
1554 target_pid_to_str (ptid_of (thread)),
1555 strerror (save_errno));
1556 }
1557 }
1558 else if (debug_threads)
1559 {
1560 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1561 target_pid_to_str (ptid_of (thread)),
1562 strsignal (sig));
1563 }
1564
1565 delete_lwp (lwp);
1566}
1567
1568/* Callback for find_inferior. Detaches from non-leader threads of a
1569 given process. */
1570
1571static int
1572linux_detach_lwp_callback (struct inferior_list_entry *entry, void *args)
1573{
1574 struct thread_info *thread = (struct thread_info *) entry;
1575 struct lwp_info *lwp = get_thread_lwp (thread);
1576 int pid = *(int *) args;
1577 int lwpid = lwpid_of (thread);
1578
1579 /* Skip other processes. */
1580 if (ptid_get_pid (entry->id) != pid)
1581 return 0;
1582
1583 /* We don't actually detach from the thread group leader just yet.
1584 If the thread group exits, we must reap the zombie clone lwps
1585 before we're able to reap the leader. */
1586 if (ptid_get_pid (entry->id) == lwpid)
1587 return 0;
1588
1589 linux_detach_one_lwp (lwp);
1590 return 0;
1591}
1592
1593static int
1594linux_detach (int pid)
1595{
1596 struct process_info *process;
1597 struct lwp_info *main_lwp;
1598
1599 process = find_process_pid (pid);
1600 if (process == NULL)
1601 return -1;
1602
1603 /* As there's a step over already in progress, let it finish first,
1604 otherwise nesting a stabilize_threads operation on top gets real
1605 messy. */
1606 complete_ongoing_step_over ();
1607
1608 /* Stop all threads before detaching. First, ptrace requires that
1609 the thread is stopped to sucessfully detach. Second, thread_db
1610 may need to uninstall thread event breakpoints from memory, which
1611 only works with a stopped process anyway. */
1612 stop_all_lwps (0, NULL);
1613
1614#ifdef USE_THREAD_DB
1615 thread_db_detach (process);
1616#endif
1617
1618 /* Stabilize threads (move out of jump pads). */
1619 stabilize_threads ();
1620
1621 /* Detach from the clone lwps first. If the thread group exits just
1622 while we're detaching, we must reap the clone lwps before we're
1623 able to reap the leader. */
1624 find_inferior (&all_threads, linux_detach_lwp_callback, &pid);
1625
1626 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1627 linux_detach_one_lwp (main_lwp);
1628
1629 the_target->mourn (process);
1630
1631 /* Since we presently can only stop all lwps of all processes, we
1632 need to unstop lwps of other processes. */
1633 unstop_all_lwps (0, NULL);
1634 return 0;
1635}
1636
1637/* Remove all LWPs that belong to process PROC from the lwp list. */
1638
1639static int
1640delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1641{
1642 struct thread_info *thread = (struct thread_info *) entry;
1643 struct lwp_info *lwp = get_thread_lwp (thread);
1644 struct process_info *process = (struct process_info *) proc;
1645
1646 if (pid_of (thread) == pid_of (process))
1647 delete_lwp (lwp);
1648
1649 return 0;
1650}
1651
1652static void
1653linux_mourn (struct process_info *process)
1654{
1655 struct process_info_private *priv;
1656
1657#ifdef USE_THREAD_DB
1658 thread_db_mourn (process);
1659#endif
1660
1661 find_inferior (&all_threads, delete_lwp_callback, process);
1662
1663 /* Freeing all private data. */
1664 priv = process->priv;
1665 free (priv->arch_private);
1666 free (priv);
1667 process->priv = NULL;
1668
1669 remove_process (process);
1670}
1671
1672static void
1673linux_join (int pid)
1674{
1675 int status, ret;
1676
1677 do {
1678 ret = my_waitpid (pid, &status, 0);
1679 if (WIFEXITED (status) || WIFSIGNALED (status))
1680 break;
1681 } while (ret != -1 || errno != ECHILD);
1682}
1683
1684/* Return nonzero if the given thread is still alive. */
1685static int
1686linux_thread_alive (ptid_t ptid)
1687{
1688 struct lwp_info *lwp = find_lwp_pid (ptid);
1689
1690 /* We assume we always know if a thread exits. If a whole process
1691 exited but we still haven't been able to report it to GDB, we'll
1692 hold on to the last lwp of the dead process. */
1693 if (lwp != NULL)
1694 return !lwp_is_marked_dead (lwp);
1695 else
1696 return 0;
1697}
1698
1699/* Return 1 if this lwp still has an interesting status pending. If
1700 not (e.g., it had stopped for a breakpoint that is gone), return
1701 false. */
1702
1703static int
1704thread_still_has_status_pending_p (struct thread_info *thread)
1705{
1706 struct lwp_info *lp = get_thread_lwp (thread);
1707
1708 if (!lp->status_pending_p)
1709 return 0;
1710
1711 if (thread->last_resume_kind != resume_stop
1712 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1713 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1714 {
1715 struct thread_info *saved_thread;
1716 CORE_ADDR pc;
1717 int discard = 0;
1718
1719 gdb_assert (lp->last_status != 0);
1720
1721 pc = get_pc (lp);
1722
1723 saved_thread = current_thread;
1724 current_thread = thread;
1725
1726 if (pc != lp->stop_pc)
1727 {
1728 if (debug_threads)
1729 debug_printf ("PC of %ld changed\n",
1730 lwpid_of (thread));
1731 discard = 1;
1732 }
1733
1734#if !USE_SIGTRAP_SIGINFO
1735 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1736 && !(*the_low_target.breakpoint_at) (pc))
1737 {
1738 if (debug_threads)
1739 debug_printf ("previous SW breakpoint of %ld gone\n",
1740 lwpid_of (thread));
1741 discard = 1;
1742 }
1743 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1744 && !hardware_breakpoint_inserted_here (pc))
1745 {
1746 if (debug_threads)
1747 debug_printf ("previous HW breakpoint of %ld gone\n",
1748 lwpid_of (thread));
1749 discard = 1;
1750 }
1751#endif
1752
1753 current_thread = saved_thread;
1754
1755 if (discard)
1756 {
1757 if (debug_threads)
1758 debug_printf ("discarding pending breakpoint status\n");
1759 lp->status_pending_p = 0;
1760 return 0;
1761 }
1762 }
1763
1764 return 1;
1765}
1766
1767/* Returns true if LWP is resumed from the client's perspective. */
1768
1769static int
1770lwp_resumed (struct lwp_info *lwp)
1771{
1772 struct thread_info *thread = get_lwp_thread (lwp);
1773
1774 if (thread->last_resume_kind != resume_stop)
1775 return 1;
1776
1777 /* Did gdb send us a `vCont;t', but we haven't reported the
1778 corresponding stop to gdb yet? If so, the thread is still
1779 resumed/running from gdb's perspective. */
1780 if (thread->last_resume_kind == resume_stop
1781 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1782 return 1;
1783
1784 return 0;
1785}
1786
1787/* Return 1 if this lwp has an interesting status pending. */
1788static int
1789status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1790{
1791 struct thread_info *thread = (struct thread_info *) entry;
1792 struct lwp_info *lp = get_thread_lwp (thread);
1793 ptid_t ptid = * (ptid_t *) arg;
1794
1795 /* Check if we're only interested in events from a specific process
1796 or a specific LWP. */
1797 if (!ptid_match (ptid_of (thread), ptid))
1798 return 0;
1799
1800 if (!lwp_resumed (lp))
1801 return 0;
1802
1803 if (lp->status_pending_p
1804 && !thread_still_has_status_pending_p (thread))
1805 {
1806 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1807 return 0;
1808 }
1809
1810 return lp->status_pending_p;
1811}
1812
1813static int
1814same_lwp (struct inferior_list_entry *entry, void *data)
1815{
1816 ptid_t ptid = *(ptid_t *) data;
1817 int lwp;
1818
1819 if (ptid_get_lwp (ptid) != 0)
1820 lwp = ptid_get_lwp (ptid);
1821 else
1822 lwp = ptid_get_pid (ptid);
1823
1824 if (ptid_get_lwp (entry->id) == lwp)
1825 return 1;
1826
1827 return 0;
1828}
1829
1830struct lwp_info *
1831find_lwp_pid (ptid_t ptid)
1832{
1833 struct inferior_list_entry *thread
1834 = find_inferior (&all_threads, same_lwp, &ptid);
1835
1836 if (thread == NULL)
1837 return NULL;
1838
1839 return get_thread_lwp ((struct thread_info *) thread);
1840}
1841
1842/* Return the number of known LWPs in the tgid given by PID. */
1843
1844static int
1845num_lwps (int pid)
1846{
1847 struct inferior_list_entry *inf, *tmp;
1848 int count = 0;
1849
1850 ALL_INFERIORS (&all_threads, inf, tmp)
1851 {
1852 if (ptid_get_pid (inf->id) == pid)
1853 count++;
1854 }
1855
1856 return count;
1857}
1858
1859/* The arguments passed to iterate_over_lwps. */
1860
1861struct iterate_over_lwps_args
1862{
1863 /* The FILTER argument passed to iterate_over_lwps. */
1864 ptid_t filter;
1865
1866 /* The CALLBACK argument passed to iterate_over_lwps. */
1867 iterate_over_lwps_ftype *callback;
1868
1869 /* The DATA argument passed to iterate_over_lwps. */
1870 void *data;
1871};
1872
1873/* Callback for find_inferior used by iterate_over_lwps to filter
1874 calls to the callback supplied to that function. Returning a
1875 nonzero value causes find_inferiors to stop iterating and return
1876 the current inferior_list_entry. Returning zero indicates that
1877 find_inferiors should continue iterating. */
1878
1879static int
1880iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1881{
1882 struct iterate_over_lwps_args *args
1883 = (struct iterate_over_lwps_args *) args_p;
1884
1885 if (ptid_match (entry->id, args->filter))
1886 {
1887 struct thread_info *thr = (struct thread_info *) entry;
1888 struct lwp_info *lwp = get_thread_lwp (thr);
1889
1890 return (*args->callback) (lwp, args->data);
1891 }
1892
1893 return 0;
1894}
1895
1896/* See nat/linux-nat.h. */
1897
1898struct lwp_info *
1899iterate_over_lwps (ptid_t filter,
1900 iterate_over_lwps_ftype callback,
1901 void *data)
1902{
1903 struct iterate_over_lwps_args args = {filter, callback, data};
1904 struct inferior_list_entry *entry;
1905
1906 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1907 if (entry == NULL)
1908 return NULL;
1909
1910 return get_thread_lwp ((struct thread_info *) entry);
1911}
1912
1913/* Detect zombie thread group leaders, and "exit" them. We can't reap
1914 their exits until all other threads in the group have exited. */
1915
1916static void
1917check_zombie_leaders (void)
1918{
1919 struct process_info *proc, *tmp;
1920
1921 ALL_PROCESSES (proc, tmp)
1922 {
1923 pid_t leader_pid = pid_of (proc);
1924 struct lwp_info *leader_lp;
1925
1926 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1927
1928 if (debug_threads)
1929 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1930 "num_lwps=%d, zombie=%d\n",
1931 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1932 linux_proc_pid_is_zombie (leader_pid));
1933
1934 if (leader_lp != NULL && !leader_lp->stopped
1935 /* Check if there are other threads in the group, as we may
1936 have raced with the inferior simply exiting. */
1937 && !last_thread_of_process_p (leader_pid)
1938 && linux_proc_pid_is_zombie (leader_pid))
1939 {
1940 /* A leader zombie can mean one of two things:
1941
1942 - It exited, and there's an exit status pending
1943 available, or only the leader exited (not the whole
1944 program). In the latter case, we can't waitpid the
1945 leader's exit status until all other threads are gone.
1946
1947 - There are 3 or more threads in the group, and a thread
1948 other than the leader exec'd. On an exec, the Linux
1949 kernel destroys all other threads (except the execing
1950 one) in the thread group, and resets the execing thread's
1951 tid to the tgid. No exit notification is sent for the
1952 execing thread -- from the ptracer's perspective, it
1953 appears as though the execing thread just vanishes.
1954 Until we reap all other threads except the leader and the
1955 execing thread, the leader will be zombie, and the
1956 execing thread will be in `D (disc sleep)'. As soon as
1957 all other threads are reaped, the execing thread changes
1958 it's tid to the tgid, and the previous (zombie) leader
1959 vanishes, giving place to the "new" leader. We could try
1960 distinguishing the exit and exec cases, by waiting once
1961 more, and seeing if something comes out, but it doesn't
1962 sound useful. The previous leader _does_ go away, and
1963 we'll re-add the new one once we see the exec event
1964 (which is just the same as what would happen if the
1965 previous leader did exit voluntarily before some other
1966 thread execs). */
1967
1968 if (debug_threads)
1969 fprintf (stderr,
1970 "CZL: Thread group leader %d zombie "
1971 "(it exited, or another thread execd).\n",
1972 leader_pid);
1973
1974 delete_lwp (leader_lp);
1975 }
1976 }
1977}
1978
1979/* Callback for `find_inferior'. Returns the first LWP that is not
1980 stopped. ARG is a PTID filter. */
1981
1982static int
1983not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1984{
1985 struct thread_info *thr = (struct thread_info *) entry;
1986 struct lwp_info *lwp;
1987 ptid_t filter = *(ptid_t *) arg;
1988
1989 if (!ptid_match (ptid_of (thr), filter))
1990 return 0;
1991
1992 lwp = get_thread_lwp (thr);
1993 if (!lwp->stopped)
1994 return 1;
1995
1996 return 0;
1997}
1998
1999/* Increment LWP's suspend count. */
2000
2001static void
2002lwp_suspended_inc (struct lwp_info *lwp)
2003{
2004 lwp->suspended++;
2005
2006 if (debug_threads && lwp->suspended > 4)
2007 {
2008 struct thread_info *thread = get_lwp_thread (lwp);
2009
2010 debug_printf ("LWP %ld has a suspiciously high suspend count,"
2011 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
2012 }
2013}
2014
2015/* Decrement LWP's suspend count. */
2016
2017static void
2018lwp_suspended_decr (struct lwp_info *lwp)
2019{
2020 lwp->suspended--;
2021
2022 if (lwp->suspended < 0)
2023 {
2024 struct thread_info *thread = get_lwp_thread (lwp);
2025
2026 internal_error (__FILE__, __LINE__,
2027 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
2028 lwp->suspended);
2029 }
2030}
2031
2032/* This function should only be called if the LWP got a SIGTRAP.
2033
2034 Handle any tracepoint steps or hits. Return true if a tracepoint
2035 event was handled, 0 otherwise. */
2036
2037static int
2038handle_tracepoints (struct lwp_info *lwp)
2039{
2040 struct thread_info *tinfo = get_lwp_thread (lwp);
2041 int tpoint_related_event = 0;
2042
2043 gdb_assert (lwp->suspended == 0);
2044
2045 /* If this tracepoint hit causes a tracing stop, we'll immediately
2046 uninsert tracepoints. To do this, we temporarily pause all
2047 threads, unpatch away, and then unpause threads. We need to make
2048 sure the unpausing doesn't resume LWP too. */
2049 lwp_suspended_inc (lwp);
2050
2051 /* And we need to be sure that any all-threads-stopping doesn't try
2052 to move threads out of the jump pads, as it could deadlock the
2053 inferior (LWP could be in the jump pad, maybe even holding the
2054 lock.) */
2055
2056 /* Do any necessary step collect actions. */
2057 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2058
2059 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2060
2061 /* See if we just hit a tracepoint and do its main collect
2062 actions. */
2063 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2064
2065 lwp_suspended_decr (lwp);
2066
2067 gdb_assert (lwp->suspended == 0);
2068 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
2069
2070 if (tpoint_related_event)
2071 {
2072 if (debug_threads)
2073 debug_printf ("got a tracepoint event\n");
2074 return 1;
2075 }
2076
2077 return 0;
2078}
2079
2080/* Convenience wrapper. Returns true if LWP is presently collecting a
2081 fast tracepoint. */
2082
2083static int
2084linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2085 struct fast_tpoint_collect_status *status)
2086{
2087 CORE_ADDR thread_area;
2088 struct thread_info *thread = get_lwp_thread (lwp);
2089
2090 if (the_low_target.get_thread_area == NULL)
2091 return 0;
2092
2093 /* Get the thread area address. This is used to recognize which
2094 thread is which when tracing with the in-process agent library.
2095 We don't read anything from the address, and treat it as opaque;
2096 it's the address itself that we assume is unique per-thread. */
2097 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2098 return 0;
2099
2100 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2101}
2102
2103/* The reason we resume in the caller, is because we want to be able
2104 to pass lwp->status_pending as WSTAT, and we need to clear
2105 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2106 refuses to resume. */
2107
2108static int
2109maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2110{
2111 struct thread_info *saved_thread;
2112
2113 saved_thread = current_thread;
2114 current_thread = get_lwp_thread (lwp);
2115
2116 if ((wstat == NULL
2117 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2118 && supports_fast_tracepoints ()
2119 && agent_loaded_p ())
2120 {
2121 struct fast_tpoint_collect_status status;
2122 int r;
2123
2124 if (debug_threads)
2125 debug_printf ("Checking whether LWP %ld needs to move out of the "
2126 "jump pad.\n",
2127 lwpid_of (current_thread));
2128
2129 r = linux_fast_tracepoint_collecting (lwp, &status);
2130
2131 if (wstat == NULL
2132 || (WSTOPSIG (*wstat) != SIGILL
2133 && WSTOPSIG (*wstat) != SIGFPE
2134 && WSTOPSIG (*wstat) != SIGSEGV
2135 && WSTOPSIG (*wstat) != SIGBUS))
2136 {
2137 lwp->collecting_fast_tracepoint = r;
2138
2139 if (r != 0)
2140 {
2141 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2142 {
2143 /* Haven't executed the original instruction yet.
2144 Set breakpoint there, and wait till it's hit,
2145 then single-step until exiting the jump pad. */
2146 lwp->exit_jump_pad_bkpt
2147 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2148 }
2149
2150 if (debug_threads)
2151 debug_printf ("Checking whether LWP %ld needs to move out of "
2152 "the jump pad...it does\n",
2153 lwpid_of (current_thread));
2154 current_thread = saved_thread;
2155
2156 return 1;
2157 }
2158 }
2159 else
2160 {
2161 /* If we get a synchronous signal while collecting, *and*
2162 while executing the (relocated) original instruction,
2163 reset the PC to point at the tpoint address, before
2164 reporting to GDB. Otherwise, it's an IPA lib bug: just
2165 report the signal to GDB, and pray for the best. */
2166
2167 lwp->collecting_fast_tracepoint = 0;
2168
2169 if (r != 0
2170 && (status.adjusted_insn_addr <= lwp->stop_pc
2171 && lwp->stop_pc < status.adjusted_insn_addr_end))
2172 {
2173 siginfo_t info;
2174 struct regcache *regcache;
2175
2176 /* The si_addr on a few signals references the address
2177 of the faulting instruction. Adjust that as
2178 well. */
2179 if ((WSTOPSIG (*wstat) == SIGILL
2180 || WSTOPSIG (*wstat) == SIGFPE
2181 || WSTOPSIG (*wstat) == SIGBUS
2182 || WSTOPSIG (*wstat) == SIGSEGV)
2183 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2184 (PTRACE_TYPE_ARG3) 0, &info) == 0
2185 /* Final check just to make sure we don't clobber
2186 the siginfo of non-kernel-sent signals. */
2187 && (uintptr_t) info.si_addr == lwp->stop_pc)
2188 {
2189 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2190 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2191 (PTRACE_TYPE_ARG3) 0, &info);
2192 }
2193
2194 regcache = get_thread_regcache (current_thread, 1);
2195 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2196 lwp->stop_pc = status.tpoint_addr;
2197
2198 /* Cancel any fast tracepoint lock this thread was
2199 holding. */
2200 force_unlock_trace_buffer ();
2201 }
2202
2203 if (lwp->exit_jump_pad_bkpt != NULL)
2204 {
2205 if (debug_threads)
2206 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2207 "stopping all threads momentarily.\n");
2208
2209 stop_all_lwps (1, lwp);
2210
2211 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2212 lwp->exit_jump_pad_bkpt = NULL;
2213
2214 unstop_all_lwps (1, lwp);
2215
2216 gdb_assert (lwp->suspended >= 0);
2217 }
2218 }
2219 }
2220
2221 if (debug_threads)
2222 debug_printf ("Checking whether LWP %ld needs to move out of the "
2223 "jump pad...no\n",
2224 lwpid_of (current_thread));
2225
2226 current_thread = saved_thread;
2227 return 0;
2228}
2229
2230/* Enqueue one signal in the "signals to report later when out of the
2231 jump pad" list. */
2232
2233static void
2234enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2235{
2236 struct pending_signals *p_sig;
2237 struct thread_info *thread = get_lwp_thread (lwp);
2238
2239 if (debug_threads)
2240 debug_printf ("Deferring signal %d for LWP %ld.\n",
2241 WSTOPSIG (*wstat), lwpid_of (thread));
2242
2243 if (debug_threads)
2244 {
2245 struct pending_signals *sig;
2246
2247 for (sig = lwp->pending_signals_to_report;
2248 sig != NULL;
2249 sig = sig->prev)
2250 debug_printf (" Already queued %d\n",
2251 sig->signal);
2252
2253 debug_printf (" (no more currently queued signals)\n");
2254 }
2255
2256 /* Don't enqueue non-RT signals if they are already in the deferred
2257 queue. (SIGSTOP being the easiest signal to see ending up here
2258 twice) */
2259 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2260 {
2261 struct pending_signals *sig;
2262
2263 for (sig = lwp->pending_signals_to_report;
2264 sig != NULL;
2265 sig = sig->prev)
2266 {
2267 if (sig->signal == WSTOPSIG (*wstat))
2268 {
2269 if (debug_threads)
2270 debug_printf ("Not requeuing already queued non-RT signal %d"
2271 " for LWP %ld\n",
2272 sig->signal,
2273 lwpid_of (thread));
2274 return;
2275 }
2276 }
2277 }
2278
2279 p_sig = XCNEW (struct pending_signals);
2280 p_sig->prev = lwp->pending_signals_to_report;
2281 p_sig->signal = WSTOPSIG (*wstat);
2282
2283 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2284 &p_sig->info);
2285
2286 lwp->pending_signals_to_report = p_sig;
2287}
2288
2289/* Dequeue one signal from the "signals to report later when out of
2290 the jump pad" list. */
2291
2292static int
2293dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2294{
2295 struct thread_info *thread = get_lwp_thread (lwp);
2296
2297 if (lwp->pending_signals_to_report != NULL)
2298 {
2299 struct pending_signals **p_sig;
2300
2301 p_sig = &lwp->pending_signals_to_report;
2302 while ((*p_sig)->prev != NULL)
2303 p_sig = &(*p_sig)->prev;
2304
2305 *wstat = W_STOPCODE ((*p_sig)->signal);
2306 if ((*p_sig)->info.si_signo != 0)
2307 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2308 &(*p_sig)->info);
2309 free (*p_sig);
2310 *p_sig = NULL;
2311
2312 if (debug_threads)
2313 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2314 WSTOPSIG (*wstat), lwpid_of (thread));
2315
2316 if (debug_threads)
2317 {
2318 struct pending_signals *sig;
2319
2320 for (sig = lwp->pending_signals_to_report;
2321 sig != NULL;
2322 sig = sig->prev)
2323 debug_printf (" Still queued %d\n",
2324 sig->signal);
2325
2326 debug_printf (" (no more queued signals)\n");
2327 }
2328
2329 return 1;
2330 }
2331
2332 return 0;
2333}
2334
2335/* Fetch the possibly triggered data watchpoint info and store it in
2336 CHILD.
2337
2338 On some archs, like x86, that use debug registers to set
2339 watchpoints, it's possible that the way to know which watched
2340 address trapped, is to check the register that is used to select
2341 which address to watch. Problem is, between setting the watchpoint
2342 and reading back which data address trapped, the user may change
2343 the set of watchpoints, and, as a consequence, GDB changes the
2344 debug registers in the inferior. To avoid reading back a stale
2345 stopped-data-address when that happens, we cache in LP the fact
2346 that a watchpoint trapped, and the corresponding data address, as
2347 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2348 registers meanwhile, we have the cached data we can rely on. */
2349
2350static int
2351check_stopped_by_watchpoint (struct lwp_info *child)
2352{
2353 if (the_low_target.stopped_by_watchpoint != NULL)
2354 {
2355 struct thread_info *saved_thread;
2356
2357 saved_thread = current_thread;
2358 current_thread = get_lwp_thread (child);
2359
2360 if (the_low_target.stopped_by_watchpoint ())
2361 {
2362 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2363
2364 if (the_low_target.stopped_data_address != NULL)
2365 child->stopped_data_address
2366 = the_low_target.stopped_data_address ();
2367 else
2368 child->stopped_data_address = 0;
2369 }
2370
2371 current_thread = saved_thread;
2372 }
2373
2374 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2375}
2376
2377/* Return the ptrace options that we want to try to enable. */
2378
2379static int
2380linux_low_ptrace_options (int attached)
2381{
2382 int options = 0;
2383
2384 if (!attached)
2385 options |= PTRACE_O_EXITKILL;
2386
2387 if (report_fork_events)
2388 options |= PTRACE_O_TRACEFORK;
2389
2390 if (report_vfork_events)
2391 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2392
2393 if (report_exec_events)
2394 options |= PTRACE_O_TRACEEXEC;
2395
2396 options |= PTRACE_O_TRACESYSGOOD;
2397
2398 return options;
2399}
2400
2401/* Do low-level handling of the event, and check if we should go on
2402 and pass it to caller code. Return the affected lwp if we are, or
2403 NULL otherwise. */
2404
2405static struct lwp_info *
2406linux_low_filter_event (int lwpid, int wstat)
2407{
2408 struct lwp_info *child;
2409 struct thread_info *thread;
2410 int have_stop_pc = 0;
2411
2412 child = find_lwp_pid (pid_to_ptid (lwpid));
2413
2414 /* Check for stop events reported by a process we didn't already
2415 know about - anything not already in our LWP list.
2416
2417 If we're expecting to receive stopped processes after
2418 fork, vfork, and clone events, then we'll just add the
2419 new one to our list and go back to waiting for the event
2420 to be reported - the stopped process might be returned
2421 from waitpid before or after the event is.
2422
2423 But note the case of a non-leader thread exec'ing after the
2424 leader having exited, and gone from our lists (because
2425 check_zombie_leaders deleted it). The non-leader thread
2426 changes its tid to the tgid. */
2427
2428 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2429 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2430 {
2431 ptid_t child_ptid;
2432
2433 /* A multi-thread exec after we had seen the leader exiting. */
2434 if (debug_threads)
2435 {
2436 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2437 "after exec.\n", lwpid);
2438 }
2439
2440 child_ptid = ptid_build (lwpid, lwpid, 0);
2441 child = add_lwp (child_ptid);
2442 child->stopped = 1;
2443 current_thread = child->thread;
2444 }
2445
2446 /* If we didn't find a process, one of two things presumably happened:
2447 - A process we started and then detached from has exited. Ignore it.
2448 - A process we are controlling has forked and the new child's stop
2449 was reported to us by the kernel. Save its PID. */
2450 if (child == NULL && WIFSTOPPED (wstat))
2451 {
2452 add_to_pid_list (&stopped_pids, lwpid, wstat);
2453 return NULL;
2454 }
2455 else if (child == NULL)
2456 return NULL;
2457
2458 thread = get_lwp_thread (child);
2459
2460 child->stopped = 1;
2461
2462 child->last_status = wstat;
2463
2464 /* Check if the thread has exited. */
2465 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2466 {
2467 if (debug_threads)
2468 debug_printf ("LLFE: %d exited.\n", lwpid);
2469
2470 if (finish_step_over (child))
2471 {
2472 /* Unsuspend all other LWPs, and set them back running again. */
2473 unsuspend_all_lwps (child);
2474 }
2475
2476 /* If there is at least one more LWP, then the exit signal was
2477 not the end of the debugged application and should be
2478 ignored, unless GDB wants to hear about thread exits. */
2479 if (report_thread_events
2480 || last_thread_of_process_p (pid_of (thread)))
2481 {
2482 /* Since events are serialized to GDB core, and we can't
2483 report this one right now. Leave the status pending for
2484 the next time we're able to report it. */
2485 mark_lwp_dead (child, wstat);
2486 return child;
2487 }
2488 else
2489 {
2490 delete_lwp (child);
2491 return NULL;
2492 }
2493 }
2494
2495 gdb_assert (WIFSTOPPED (wstat));
2496
2497 if (WIFSTOPPED (wstat))
2498 {
2499 struct process_info *proc;
2500
2501 /* Architecture-specific setup after inferior is running. */
2502 proc = find_process_pid (pid_of (thread));
2503 if (proc->tdesc == NULL)
2504 {
2505 if (proc->attached)
2506 {
2507 /* This needs to happen after we have attached to the
2508 inferior and it is stopped for the first time, but
2509 before we access any inferior registers. */
2510 linux_arch_setup_thread (thread);
2511 }
2512 else
2513 {
2514 /* The process is started, but GDBserver will do
2515 architecture-specific setup after the program stops at
2516 the first instruction. */
2517 child->status_pending_p = 1;
2518 child->status_pending = wstat;
2519 return child;
2520 }
2521 }
2522 }
2523
2524 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2525 {
2526 struct process_info *proc = find_process_pid (pid_of (thread));
2527 int options = linux_low_ptrace_options (proc->attached);
2528
2529 linux_enable_event_reporting (lwpid, options);
2530 child->must_set_ptrace_flags = 0;
2531 }
2532
2533 /* Always update syscall_state, even if it will be filtered later. */
2534 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2535 {
2536 child->syscall_state
2537 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2538 ? TARGET_WAITKIND_SYSCALL_RETURN
2539 : TARGET_WAITKIND_SYSCALL_ENTRY);
2540 }
2541 else
2542 {
2543 /* Almost all other ptrace-stops are known to be outside of system
2544 calls, with further exceptions in handle_extended_wait. */
2545 child->syscall_state = TARGET_WAITKIND_IGNORE;
2546 }
2547
2548 /* Be careful to not overwrite stop_pc until save_stop_reason is
2549 called. */
2550 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2551 && linux_is_extended_waitstatus (wstat))
2552 {
2553 child->stop_pc = get_pc (child);
2554 if (handle_extended_wait (&child, wstat))
2555 {
2556 /* The event has been handled, so just return without
2557 reporting it. */
2558 return NULL;
2559 }
2560 }
2561
2562 if (linux_wstatus_maybe_breakpoint (wstat))
2563 {
2564 if (save_stop_reason (child))
2565 have_stop_pc = 1;
2566 }
2567
2568 if (!have_stop_pc)
2569 child->stop_pc = get_pc (child);
2570
2571 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2572 && child->stop_expected)
2573 {
2574 if (debug_threads)
2575 debug_printf ("Expected stop.\n");
2576 child->stop_expected = 0;
2577
2578 if (thread->last_resume_kind == resume_stop)
2579 {
2580 /* We want to report the stop to the core. Treat the
2581 SIGSTOP as a normal event. */
2582 if (debug_threads)
2583 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2584 target_pid_to_str (ptid_of (thread)));
2585 }
2586 else if (stopping_threads != NOT_STOPPING_THREADS)
2587 {
2588 /* Stopping threads. We don't want this SIGSTOP to end up
2589 pending. */
2590 if (debug_threads)
2591 debug_printf ("LLW: SIGSTOP caught for %s "
2592 "while stopping threads.\n",
2593 target_pid_to_str (ptid_of (thread)));
2594 return NULL;
2595 }
2596 else
2597 {
2598 /* This is a delayed SIGSTOP. Filter out the event. */
2599 if (debug_threads)
2600 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2601 child->stepping ? "step" : "continue",
2602 target_pid_to_str (ptid_of (thread)));
2603
2604 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2605 return NULL;
2606 }
2607 }
2608
2609 child->status_pending_p = 1;
2610 child->status_pending = wstat;
2611 return child;
2612}
2613
2614/* Return true if THREAD is doing hardware single step. */
2615
2616static int
2617maybe_hw_step (struct thread_info *thread)
2618{
2619 if (can_hardware_single_step ())
2620 return 1;
2621 else
2622 {
2623 /* GDBserver must insert reinsert breakpoint for software
2624 single step. */
2625 gdb_assert (has_reinsert_breakpoints (thread));
2626 return 0;
2627 }
2628}
2629
2630/* Resume LWPs that are currently stopped without any pending status
2631 to report, but are resumed from the core's perspective. */
2632
2633static void
2634resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2635{
2636 struct thread_info *thread = (struct thread_info *) entry;
2637 struct lwp_info *lp = get_thread_lwp (thread);
2638
2639 if (lp->stopped
2640 && !lp->suspended
2641 && !lp->status_pending_p
2642 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2643 {
2644 int step = thread->last_resume_kind == resume_step;
2645
2646 if (debug_threads)
2647 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2648 target_pid_to_str (ptid_of (thread)),
2649 paddress (lp->stop_pc),
2650 step);
2651
2652 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2653 }
2654}
2655
2656/* Wait for an event from child(ren) WAIT_PTID, and return any that
2657 match FILTER_PTID (leaving others pending). The PTIDs can be:
2658 minus_one_ptid, to specify any child; a pid PTID, specifying all
2659 lwps of a thread group; or a PTID representing a single lwp. Store
2660 the stop status through the status pointer WSTAT. OPTIONS is
2661 passed to the waitpid call. Return 0 if no event was found and
2662 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2663 was found. Return the PID of the stopped child otherwise. */
2664
2665static int
2666linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2667 int *wstatp, int options)
2668{
2669 struct thread_info *event_thread;
2670 struct lwp_info *event_child, *requested_child;
2671 sigset_t block_mask, prev_mask;
2672
2673 retry:
2674 /* N.B. event_thread points to the thread_info struct that contains
2675 event_child. Keep them in sync. */
2676 event_thread = NULL;
2677 event_child = NULL;
2678 requested_child = NULL;
2679
2680 /* Check for a lwp with a pending status. */
2681
2682 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2683 {
2684 event_thread = (struct thread_info *)
2685 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2686 if (event_thread != NULL)
2687 event_child = get_thread_lwp (event_thread);
2688 if (debug_threads && event_thread)
2689 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2690 }
2691 else if (!ptid_equal (filter_ptid, null_ptid))
2692 {
2693 requested_child = find_lwp_pid (filter_ptid);
2694
2695 if (stopping_threads == NOT_STOPPING_THREADS
2696 && requested_child->status_pending_p
2697 && requested_child->collecting_fast_tracepoint)
2698 {
2699 enqueue_one_deferred_signal (requested_child,
2700 &requested_child->status_pending);
2701 requested_child->status_pending_p = 0;
2702 requested_child->status_pending = 0;
2703 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2704 }
2705
2706 if (requested_child->suspended
2707 && requested_child->status_pending_p)
2708 {
2709 internal_error (__FILE__, __LINE__,
2710 "requesting an event out of a"
2711 " suspended child?");
2712 }
2713
2714 if (requested_child->status_pending_p)
2715 {
2716 event_child = requested_child;
2717 event_thread = get_lwp_thread (event_child);
2718 }
2719 }
2720
2721 if (event_child != NULL)
2722 {
2723 if (debug_threads)
2724 debug_printf ("Got an event from pending child %ld (%04x)\n",
2725 lwpid_of (event_thread), event_child->status_pending);
2726 *wstatp = event_child->status_pending;
2727 event_child->status_pending_p = 0;
2728 event_child->status_pending = 0;
2729 current_thread = event_thread;
2730 return lwpid_of (event_thread);
2731 }
2732
2733 /* But if we don't find a pending event, we'll have to wait.
2734
2735 We only enter this loop if no process has a pending wait status.
2736 Thus any action taken in response to a wait status inside this
2737 loop is responding as soon as we detect the status, not after any
2738 pending events. */
2739
2740 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2741 all signals while here. */
2742 sigfillset (&block_mask);
2743 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2744
2745 /* Always pull all events out of the kernel. We'll randomly select
2746 an event LWP out of all that have events, to prevent
2747 starvation. */
2748 while (event_child == NULL)
2749 {
2750 pid_t ret = 0;
2751
2752 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2753 quirks:
2754
2755 - If the thread group leader exits while other threads in the
2756 thread group still exist, waitpid(TGID, ...) hangs. That
2757 waitpid won't return an exit status until the other threads
2758 in the group are reaped.
2759
2760 - When a non-leader thread execs, that thread just vanishes
2761 without reporting an exit (so we'd hang if we waited for it
2762 explicitly in that case). The exec event is reported to
2763 the TGID pid. */
2764 errno = 0;
2765 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2766
2767 if (debug_threads)
2768 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2769 ret, errno ? strerror (errno) : "ERRNO-OK");
2770
2771 if (ret > 0)
2772 {
2773 if (debug_threads)
2774 {
2775 debug_printf ("LLW: waitpid %ld received %s\n",
2776 (long) ret, status_to_str (*wstatp));
2777 }
2778
2779 /* Filter all events. IOW, leave all events pending. We'll
2780 randomly select an event LWP out of all that have events
2781 below. */
2782 linux_low_filter_event (ret, *wstatp);
2783 /* Retry until nothing comes out of waitpid. A single
2784 SIGCHLD can indicate more than one child stopped. */
2785 continue;
2786 }
2787
2788 /* Now that we've pulled all events out of the kernel, resume
2789 LWPs that don't have an interesting event to report. */
2790 if (stopping_threads == NOT_STOPPING_THREADS)
2791 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2792
2793 /* ... and find an LWP with a status to report to the core, if
2794 any. */
2795 event_thread = (struct thread_info *)
2796 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2797 if (event_thread != NULL)
2798 {
2799 event_child = get_thread_lwp (event_thread);
2800 *wstatp = event_child->status_pending;
2801 event_child->status_pending_p = 0;
2802 event_child->status_pending = 0;
2803 break;
2804 }
2805
2806 /* Check for zombie thread group leaders. Those can't be reaped
2807 until all other threads in the thread group are. */
2808 check_zombie_leaders ();
2809
2810 /* If there are no resumed children left in the set of LWPs we
2811 want to wait for, bail. We can't just block in
2812 waitpid/sigsuspend, because lwps might have been left stopped
2813 in trace-stop state, and we'd be stuck forever waiting for
2814 their status to change (which would only happen if we resumed
2815 them). Even if WNOHANG is set, this return code is preferred
2816 over 0 (below), as it is more detailed. */
2817 if ((find_inferior (&all_threads,
2818 not_stopped_callback,
2819 &wait_ptid) == NULL))
2820 {
2821 if (debug_threads)
2822 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2823 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2824 return -1;
2825 }
2826
2827 /* No interesting event to report to the caller. */
2828 if ((options & WNOHANG))
2829 {
2830 if (debug_threads)
2831 debug_printf ("WNOHANG set, no event found\n");
2832
2833 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2834 return 0;
2835 }
2836
2837 /* Block until we get an event reported with SIGCHLD. */
2838 if (debug_threads)
2839 debug_printf ("sigsuspend'ing\n");
2840
2841 sigsuspend (&prev_mask);
2842 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2843 goto retry;
2844 }
2845
2846 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2847
2848 current_thread = event_thread;
2849
2850 return lwpid_of (event_thread);
2851}
2852
2853/* Wait for an event from child(ren) PTID. PTIDs can be:
2854 minus_one_ptid, to specify any child; a pid PTID, specifying all
2855 lwps of a thread group; or a PTID representing a single lwp. Store
2856 the stop status through the status pointer WSTAT. OPTIONS is
2857 passed to the waitpid call. Return 0 if no event was found and
2858 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2859 was found. Return the PID of the stopped child otherwise. */
2860
2861static int
2862linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2863{
2864 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2865}
2866
2867/* Count the LWP's that have had events. */
2868
2869static int
2870count_events_callback (struct inferior_list_entry *entry, void *data)
2871{
2872 struct thread_info *thread = (struct thread_info *) entry;
2873 struct lwp_info *lp = get_thread_lwp (thread);
2874 int *count = (int *) data;
2875
2876 gdb_assert (count != NULL);
2877
2878 /* Count only resumed LWPs that have an event pending. */
2879 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2880 && lp->status_pending_p)
2881 (*count)++;
2882
2883 return 0;
2884}
2885
2886/* Select the LWP (if any) that is currently being single-stepped. */
2887
2888static int
2889select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2890{
2891 struct thread_info *thread = (struct thread_info *) entry;
2892 struct lwp_info *lp = get_thread_lwp (thread);
2893
2894 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2895 && thread->last_resume_kind == resume_step
2896 && lp->status_pending_p)
2897 return 1;
2898 else
2899 return 0;
2900}
2901
2902/* Select the Nth LWP that has had an event. */
2903
2904static int
2905select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2906{
2907 struct thread_info *thread = (struct thread_info *) entry;
2908 struct lwp_info *lp = get_thread_lwp (thread);
2909 int *selector = (int *) data;
2910
2911 gdb_assert (selector != NULL);
2912
2913 /* Select only resumed LWPs that have an event pending. */
2914 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2915 && lp->status_pending_p)
2916 if ((*selector)-- == 0)
2917 return 1;
2918
2919 return 0;
2920}
2921
2922/* Select one LWP out of those that have events pending. */
2923
2924static void
2925select_event_lwp (struct lwp_info **orig_lp)
2926{
2927 int num_events = 0;
2928 int random_selector;
2929 struct thread_info *event_thread = NULL;
2930
2931 /* In all-stop, give preference to the LWP that is being
2932 single-stepped. There will be at most one, and it's the LWP that
2933 the core is most interested in. If we didn't do this, then we'd
2934 have to handle pending step SIGTRAPs somehow in case the core
2935 later continues the previously-stepped thread, otherwise we'd
2936 report the pending SIGTRAP, and the core, not having stepped the
2937 thread, wouldn't understand what the trap was for, and therefore
2938 would report it to the user as a random signal. */
2939 if (!non_stop)
2940 {
2941 event_thread
2942 = (struct thread_info *) find_inferior (&all_threads,
2943 select_singlestep_lwp_callback,
2944 NULL);
2945 if (event_thread != NULL)
2946 {
2947 if (debug_threads)
2948 debug_printf ("SEL: Select single-step %s\n",
2949 target_pid_to_str (ptid_of (event_thread)));
2950 }
2951 }
2952 if (event_thread == NULL)
2953 {
2954 /* No single-stepping LWP. Select one at random, out of those
2955 which have had events. */
2956
2957 /* First see how many events we have. */
2958 find_inferior (&all_threads, count_events_callback, &num_events);
2959 gdb_assert (num_events > 0);
2960
2961 /* Now randomly pick a LWP out of those that have had
2962 events. */
2963 random_selector = (int)
2964 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2965
2966 if (debug_threads && num_events > 1)
2967 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2968 num_events, random_selector);
2969
2970 event_thread
2971 = (struct thread_info *) find_inferior (&all_threads,
2972 select_event_lwp_callback,
2973 &random_selector);
2974 }
2975
2976 if (event_thread != NULL)
2977 {
2978 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2979
2980 /* Switch the event LWP. */
2981 *orig_lp = event_lp;
2982 }
2983}
2984
2985/* Decrement the suspend count of an LWP. */
2986
2987static int
2988unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2989{
2990 struct thread_info *thread = (struct thread_info *) entry;
2991 struct lwp_info *lwp = get_thread_lwp (thread);
2992
2993 /* Ignore EXCEPT. */
2994 if (lwp == except)
2995 return 0;
2996
2997 lwp_suspended_decr (lwp);
2998 return 0;
2999}
3000
3001/* Decrement the suspend count of all LWPs, except EXCEPT, if non
3002 NULL. */
3003
3004static void
3005unsuspend_all_lwps (struct lwp_info *except)
3006{
3007 find_inferior (&all_threads, unsuspend_one_lwp, except);
3008}
3009
3010static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
3011static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
3012 void *data);
3013static int lwp_running (struct inferior_list_entry *entry, void *data);
3014static ptid_t linux_wait_1 (ptid_t ptid,
3015 struct target_waitstatus *ourstatus,
3016 int target_options);
3017
3018/* Stabilize threads (move out of jump pads).
3019
3020 If a thread is midway collecting a fast tracepoint, we need to
3021 finish the collection and move it out of the jump pad before
3022 reporting the signal.
3023
3024 This avoids recursion while collecting (when a signal arrives
3025 midway, and the signal handler itself collects), which would trash
3026 the trace buffer. In case the user set a breakpoint in a signal
3027 handler, this avoids the backtrace showing the jump pad, etc..
3028 Most importantly, there are certain things we can't do safely if
3029 threads are stopped in a jump pad (or in its callee's). For
3030 example:
3031
3032 - starting a new trace run. A thread still collecting the
3033 previous run, could trash the trace buffer when resumed. The trace
3034 buffer control structures would have been reset but the thread had
3035 no way to tell. The thread could even midway memcpy'ing to the
3036 buffer, which would mean that when resumed, it would clobber the
3037 trace buffer that had been set for a new run.
3038
3039 - we can't rewrite/reuse the jump pads for new tracepoints
3040 safely. Say you do tstart while a thread is stopped midway while
3041 collecting. When the thread is later resumed, it finishes the
3042 collection, and returns to the jump pad, to execute the original
3043 instruction that was under the tracepoint jump at the time the
3044 older run had been started. If the jump pad had been rewritten
3045 since for something else in the new run, the thread would now
3046 execute the wrong / random instructions. */
3047
3048static void
3049linux_stabilize_threads (void)
3050{
3051 struct thread_info *saved_thread;
3052 struct thread_info *thread_stuck;
3053
3054 thread_stuck
3055 = (struct thread_info *) find_inferior (&all_threads,
3056 stuck_in_jump_pad_callback,
3057 NULL);
3058 if (thread_stuck != NULL)
3059 {
3060 if (debug_threads)
3061 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
3062 lwpid_of (thread_stuck));
3063 return;
3064 }
3065
3066 saved_thread = current_thread;
3067
3068 stabilizing_threads = 1;
3069
3070 /* Kick 'em all. */
3071 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
3072
3073 /* Loop until all are stopped out of the jump pads. */
3074 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
3075 {
3076 struct target_waitstatus ourstatus;
3077 struct lwp_info *lwp;
3078 int wstat;
3079
3080 /* Note that we go through the full wait even loop. While
3081 moving threads out of jump pad, we need to be able to step
3082 over internal breakpoints and such. */
3083 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
3084
3085 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
3086 {
3087 lwp = get_thread_lwp (current_thread);
3088
3089 /* Lock it. */
3090 lwp_suspended_inc (lwp);
3091
3092 if (ourstatus.value.sig != GDB_SIGNAL_0
3093 || current_thread->last_resume_kind == resume_stop)
3094 {
3095 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3096 enqueue_one_deferred_signal (lwp, &wstat);
3097 }
3098 }
3099 }
3100
3101 unsuspend_all_lwps (NULL);
3102
3103 stabilizing_threads = 0;
3104
3105 current_thread = saved_thread;
3106
3107 if (debug_threads)
3108 {
3109 thread_stuck
3110 = (struct thread_info *) find_inferior (&all_threads,
3111 stuck_in_jump_pad_callback,
3112 NULL);
3113 if (thread_stuck != NULL)
3114 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3115 lwpid_of (thread_stuck));
3116 }
3117}
3118
3119/* Convenience function that is called when the kernel reports an
3120 event that is not passed out to GDB. */
3121
3122static ptid_t
3123ignore_event (struct target_waitstatus *ourstatus)
3124{
3125 /* If we got an event, there may still be others, as a single
3126 SIGCHLD can indicate more than one child stopped. This forces
3127 another target_wait call. */
3128 async_file_mark ();
3129
3130 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3131 return null_ptid;
3132}
3133
3134/* Convenience function that is called when the kernel reports an exit
3135 event. This decides whether to report the event to GDB as a
3136 process exit event, a thread exit event, or to suppress the
3137 event. */
3138
3139static ptid_t
3140filter_exit_event (struct lwp_info *event_child,
3141 struct target_waitstatus *ourstatus)
3142{
3143 struct thread_info *thread = get_lwp_thread (event_child);
3144 ptid_t ptid = ptid_of (thread);
3145
3146 if (!last_thread_of_process_p (pid_of (thread)))
3147 {
3148 if (report_thread_events)
3149 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3150 else
3151 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3152
3153 delete_lwp (event_child);
3154 }
3155 return ptid;
3156}
3157
3158/* Returns 1 if GDB is interested in any event_child syscalls. */
3159
3160static int
3161gdb_catching_syscalls_p (struct lwp_info *event_child)
3162{
3163 struct thread_info *thread = get_lwp_thread (event_child);
3164 struct process_info *proc = get_thread_process (thread);
3165
3166 return !VEC_empty (int, proc->syscalls_to_catch);
3167}
3168
3169/* Returns 1 if GDB is interested in the event_child syscall.
3170 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3171
3172static int
3173gdb_catch_this_syscall_p (struct lwp_info *event_child)
3174{
3175 int i, iter;
3176 int sysno;
3177 struct thread_info *thread = get_lwp_thread (event_child);
3178 struct process_info *proc = get_thread_process (thread);
3179
3180 if (VEC_empty (int, proc->syscalls_to_catch))
3181 return 0;
3182
3183 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3184 return 1;
3185
3186 get_syscall_trapinfo (event_child, &sysno);
3187 for (i = 0;
3188 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3189 i++)
3190 if (iter == sysno)
3191 return 1;
3192
3193 return 0;
3194}
3195
3196/* Wait for process, returns status. */
3197
3198static ptid_t
3199linux_wait_1 (ptid_t ptid,
3200 struct target_waitstatus *ourstatus, int target_options)
3201{
3202 int w;
3203 struct lwp_info *event_child;
3204 int options;
3205 int pid;
3206 int step_over_finished;
3207 int bp_explains_trap;
3208 int maybe_internal_trap;
3209 int report_to_gdb;
3210 int trace_event;
3211 int in_step_range;
3212 int any_resumed;
3213
3214 if (debug_threads)
3215 {
3216 debug_enter ();
3217 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3218 }
3219
3220 /* Translate generic target options into linux options. */
3221 options = __WALL;
3222 if (target_options & TARGET_WNOHANG)
3223 options |= WNOHANG;
3224
3225 bp_explains_trap = 0;
3226 trace_event = 0;
3227 in_step_range = 0;
3228 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3229
3230 /* Find a resumed LWP, if any. */
3231 if (find_inferior (&all_threads,
3232 status_pending_p_callback,
3233 &minus_one_ptid) != NULL)
3234 any_resumed = 1;
3235 else if ((find_inferior (&all_threads,
3236 not_stopped_callback,
3237 &minus_one_ptid) != NULL))
3238 any_resumed = 1;
3239 else
3240 any_resumed = 0;
3241
3242 if (ptid_equal (step_over_bkpt, null_ptid))
3243 pid = linux_wait_for_event (ptid, &w, options);
3244 else
3245 {
3246 if (debug_threads)
3247 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3248 target_pid_to_str (step_over_bkpt));
3249 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3250 }
3251
3252 if (pid == 0 || (pid == -1 && !any_resumed))
3253 {
3254 gdb_assert (target_options & TARGET_WNOHANG);
3255
3256 if (debug_threads)
3257 {
3258 debug_printf ("linux_wait_1 ret = null_ptid, "
3259 "TARGET_WAITKIND_IGNORE\n");
3260 debug_exit ();
3261 }
3262
3263 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3264 return null_ptid;
3265 }
3266 else if (pid == -1)
3267 {
3268 if (debug_threads)
3269 {
3270 debug_printf ("linux_wait_1 ret = null_ptid, "
3271 "TARGET_WAITKIND_NO_RESUMED\n");
3272 debug_exit ();
3273 }
3274
3275 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3276 return null_ptid;
3277 }
3278
3279 event_child = get_thread_lwp (current_thread);
3280
3281 /* linux_wait_for_event only returns an exit status for the last
3282 child of a process. Report it. */
3283 if (WIFEXITED (w) || WIFSIGNALED (w))
3284 {
3285 if (WIFEXITED (w))
3286 {
3287 ourstatus->kind = TARGET_WAITKIND_EXITED;
3288 ourstatus->value.integer = WEXITSTATUS (w);
3289
3290 if (debug_threads)
3291 {
3292 debug_printf ("linux_wait_1 ret = %s, exited with "
3293 "retcode %d\n",
3294 target_pid_to_str (ptid_of (current_thread)),
3295 WEXITSTATUS (w));
3296 debug_exit ();
3297 }
3298 }
3299 else
3300 {
3301 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3302 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3303
3304 if (debug_threads)
3305 {
3306 debug_printf ("linux_wait_1 ret = %s, terminated with "
3307 "signal %d\n",
3308 target_pid_to_str (ptid_of (current_thread)),
3309 WTERMSIG (w));
3310 debug_exit ();
3311 }
3312 }
3313
3314 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3315 return filter_exit_event (event_child, ourstatus);
3316
3317 return ptid_of (current_thread);
3318 }
3319
3320 /* If step-over executes a breakpoint instruction, in the case of a
3321 hardware single step it means a gdb/gdbserver breakpoint had been
3322 planted on top of a permanent breakpoint, in the case of a software
3323 single step it may just mean that gdbserver hit the reinsert breakpoint.
3324 The PC has been adjusted by save_stop_reason to point at
3325 the breakpoint address.
3326 So in the case of the hardware single step advance the PC manually
3327 past the breakpoint and in the case of software single step advance only
3328 if it's not the reinsert_breakpoint we are hitting.
3329 This avoids that a program would keep trapping a permanent breakpoint
3330 forever. */
3331 if (!ptid_equal (step_over_bkpt, null_ptid)
3332 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3333 && (event_child->stepping
3334 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
3335 {
3336 int increment_pc = 0;
3337 int breakpoint_kind = 0;
3338 CORE_ADDR stop_pc = event_child->stop_pc;
3339
3340 breakpoint_kind =
3341 the_target->breakpoint_kind_from_current_state (&stop_pc);
3342 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3343
3344 if (debug_threads)
3345 {
3346 debug_printf ("step-over for %s executed software breakpoint\n",
3347 target_pid_to_str (ptid_of (current_thread)));
3348 }
3349
3350 if (increment_pc != 0)
3351 {
3352 struct regcache *regcache
3353 = get_thread_regcache (current_thread, 1);
3354
3355 event_child->stop_pc += increment_pc;
3356 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3357
3358 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3359 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3360 }
3361 }
3362
3363 /* If this event was not handled before, and is not a SIGTRAP, we
3364 report it. SIGILL and SIGSEGV are also treated as traps in case
3365 a breakpoint is inserted at the current PC. If this target does
3366 not support internal breakpoints at all, we also report the
3367 SIGTRAP without further processing; it's of no concern to us. */
3368 maybe_internal_trap
3369 = (supports_breakpoints ()
3370 && (WSTOPSIG (w) == SIGTRAP
3371 || ((WSTOPSIG (w) == SIGILL
3372 || WSTOPSIG (w) == SIGSEGV)
3373 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3374
3375 if (maybe_internal_trap)
3376 {
3377 /* Handle anything that requires bookkeeping before deciding to
3378 report the event or continue waiting. */
3379
3380 /* First check if we can explain the SIGTRAP with an internal
3381 breakpoint, or if we should possibly report the event to GDB.
3382 Do this before anything that may remove or insert a
3383 breakpoint. */
3384 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3385
3386 /* We have a SIGTRAP, possibly a step-over dance has just
3387 finished. If so, tweak the state machine accordingly,
3388 reinsert breakpoints and delete any reinsert (software
3389 single-step) breakpoints. */
3390 step_over_finished = finish_step_over (event_child);
3391
3392 /* Now invoke the callbacks of any internal breakpoints there. */
3393 check_breakpoints (event_child->stop_pc);
3394
3395 /* Handle tracepoint data collecting. This may overflow the
3396 trace buffer, and cause a tracing stop, removing
3397 breakpoints. */
3398 trace_event = handle_tracepoints (event_child);
3399
3400 if (bp_explains_trap)
3401 {
3402 if (debug_threads)
3403 debug_printf ("Hit a gdbserver breakpoint.\n");
3404 }
3405 }
3406 else
3407 {
3408 /* We have some other signal, possibly a step-over dance was in
3409 progress, and it should be cancelled too. */
3410 step_over_finished = finish_step_over (event_child);
3411 }
3412
3413 /* We have all the data we need. Either report the event to GDB, or
3414 resume threads and keep waiting for more. */
3415
3416 /* If we're collecting a fast tracepoint, finish the collection and
3417 move out of the jump pad before delivering a signal. See
3418 linux_stabilize_threads. */
3419
3420 if (WIFSTOPPED (w)
3421 && WSTOPSIG (w) != SIGTRAP
3422 && supports_fast_tracepoints ()
3423 && agent_loaded_p ())
3424 {
3425 if (debug_threads)
3426 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3427 "to defer or adjust it.\n",
3428 WSTOPSIG (w), lwpid_of (current_thread));
3429
3430 /* Allow debugging the jump pad itself. */
3431 if (current_thread->last_resume_kind != resume_step
3432 && maybe_move_out_of_jump_pad (event_child, &w))
3433 {
3434 enqueue_one_deferred_signal (event_child, &w);
3435
3436 if (debug_threads)
3437 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3438 WSTOPSIG (w), lwpid_of (current_thread));
3439
3440 linux_resume_one_lwp (event_child, 0, 0, NULL);
3441
3442 return ignore_event (ourstatus);
3443 }
3444 }
3445
3446 if (event_child->collecting_fast_tracepoint)
3447 {
3448 if (debug_threads)
3449 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3450 "Check if we're already there.\n",
3451 lwpid_of (current_thread),
3452 event_child->collecting_fast_tracepoint);
3453
3454 trace_event = 1;
3455
3456 event_child->collecting_fast_tracepoint
3457 = linux_fast_tracepoint_collecting (event_child, NULL);
3458
3459 if (event_child->collecting_fast_tracepoint != 1)
3460 {
3461 /* No longer need this breakpoint. */
3462 if (event_child->exit_jump_pad_bkpt != NULL)
3463 {
3464 if (debug_threads)
3465 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3466 "stopping all threads momentarily.\n");
3467
3468 /* Other running threads could hit this breakpoint.
3469 We don't handle moribund locations like GDB does,
3470 instead we always pause all threads when removing
3471 breakpoints, so that any step-over or
3472 decr_pc_after_break adjustment is always taken
3473 care of while the breakpoint is still
3474 inserted. */
3475 stop_all_lwps (1, event_child);
3476
3477 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3478 event_child->exit_jump_pad_bkpt = NULL;
3479
3480 unstop_all_lwps (1, event_child);
3481
3482 gdb_assert (event_child->suspended >= 0);
3483 }
3484 }
3485
3486 if (event_child->collecting_fast_tracepoint == 0)
3487 {
3488 if (debug_threads)
3489 debug_printf ("fast tracepoint finished "
3490 "collecting successfully.\n");
3491
3492 /* We may have a deferred signal to report. */
3493 if (dequeue_one_deferred_signal (event_child, &w))
3494 {
3495 if (debug_threads)
3496 debug_printf ("dequeued one signal.\n");
3497 }
3498 else
3499 {
3500 if (debug_threads)
3501 debug_printf ("no deferred signals.\n");
3502
3503 if (stabilizing_threads)
3504 {
3505 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3506 ourstatus->value.sig = GDB_SIGNAL_0;
3507
3508 if (debug_threads)
3509 {
3510 debug_printf ("linux_wait_1 ret = %s, stopped "
3511 "while stabilizing threads\n",
3512 target_pid_to_str (ptid_of (current_thread)));
3513 debug_exit ();
3514 }
3515
3516 return ptid_of (current_thread);
3517 }
3518 }
3519 }
3520 }
3521
3522 /* Check whether GDB would be interested in this event. */
3523
3524 /* Check if GDB is interested in this syscall. */
3525 if (WIFSTOPPED (w)
3526 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3527 && !gdb_catch_this_syscall_p (event_child))
3528 {
3529 if (debug_threads)
3530 {
3531 debug_printf ("Ignored syscall for LWP %ld.\n",
3532 lwpid_of (current_thread));
3533 }
3534
3535 linux_resume_one_lwp (event_child, event_child->stepping,
3536 0, NULL);
3537 return ignore_event (ourstatus);
3538 }
3539
3540 /* If GDB is not interested in this signal, don't stop other
3541 threads, and don't report it to GDB. Just resume the inferior
3542 right away. We do this for threading-related signals as well as
3543 any that GDB specifically requested we ignore. But never ignore
3544 SIGSTOP if we sent it ourselves, and do not ignore signals when
3545 stepping - they may require special handling to skip the signal
3546 handler. Also never ignore signals that could be caused by a
3547 breakpoint. */
3548 if (WIFSTOPPED (w)
3549 && current_thread->last_resume_kind != resume_step
3550 && (
3551#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3552 (current_process ()->priv->thread_db != NULL
3553 && (WSTOPSIG (w) == __SIGRTMIN
3554 || WSTOPSIG (w) == __SIGRTMIN + 1))
3555 ||
3556#endif
3557 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3558 && !(WSTOPSIG (w) == SIGSTOP
3559 && current_thread->last_resume_kind == resume_stop)
3560 && !linux_wstatus_maybe_breakpoint (w))))
3561 {
3562 siginfo_t info, *info_p;
3563
3564 if (debug_threads)
3565 debug_printf ("Ignored signal %d for LWP %ld.\n",
3566 WSTOPSIG (w), lwpid_of (current_thread));
3567
3568 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3569 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3570 info_p = &info;
3571 else
3572 info_p = NULL;
3573
3574 if (step_over_finished)
3575 {
3576 /* We cancelled this thread's step-over above. We still
3577 need to unsuspend all other LWPs, and set them back
3578 running again while the signal handler runs. */
3579 unsuspend_all_lwps (event_child);
3580
3581 /* Enqueue the pending signal info so that proceed_all_lwps
3582 doesn't lose it. */
3583 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3584
3585 proceed_all_lwps ();
3586 }
3587 else
3588 {
3589 linux_resume_one_lwp (event_child, event_child->stepping,
3590 WSTOPSIG (w), info_p);
3591 }
3592 return ignore_event (ourstatus);
3593 }
3594
3595 /* Note that all addresses are always "out of the step range" when
3596 there's no range to begin with. */
3597 in_step_range = lwp_in_step_range (event_child);
3598
3599 /* If GDB wanted this thread to single step, and the thread is out
3600 of the step range, we always want to report the SIGTRAP, and let
3601 GDB handle it. Watchpoints should always be reported. So should
3602 signals we can't explain. A SIGTRAP we can't explain could be a
3603 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3604 do, we're be able to handle GDB breakpoints on top of internal
3605 breakpoints, by handling the internal breakpoint and still
3606 reporting the event to GDB. If we don't, we're out of luck, GDB
3607 won't see the breakpoint hit. If we see a single-step event but
3608 the thread should be continuing, don't pass the trap to gdb.
3609 That indicates that we had previously finished a single-step but
3610 left the single-step pending -- see
3611 complete_ongoing_step_over. */
3612 report_to_gdb = (!maybe_internal_trap
3613 || (current_thread->last_resume_kind == resume_step
3614 && !in_step_range)
3615 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3616 || (!in_step_range
3617 && !bp_explains_trap
3618 && !trace_event
3619 && !step_over_finished
3620 && !(current_thread->last_resume_kind == resume_continue
3621 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3622 || (gdb_breakpoint_here (event_child->stop_pc)
3623 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3624 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3625 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3626
3627 run_breakpoint_commands (event_child->stop_pc);
3628
3629 /* We found no reason GDB would want us to stop. We either hit one
3630 of our own breakpoints, or finished an internal step GDB
3631 shouldn't know about. */
3632 if (!report_to_gdb)
3633 {
3634 if (debug_threads)
3635 {
3636 if (bp_explains_trap)
3637 debug_printf ("Hit a gdbserver breakpoint.\n");
3638 if (step_over_finished)
3639 debug_printf ("Step-over finished.\n");
3640 if (trace_event)
3641 debug_printf ("Tracepoint event.\n");
3642 if (lwp_in_step_range (event_child))
3643 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3644 paddress (event_child->stop_pc),
3645 paddress (event_child->step_range_start),
3646 paddress (event_child->step_range_end));
3647 }
3648
3649 /* We're not reporting this breakpoint to GDB, so apply the
3650 decr_pc_after_break adjustment to the inferior's regcache
3651 ourselves. */
3652
3653 if (the_low_target.set_pc != NULL)
3654 {
3655 struct regcache *regcache
3656 = get_thread_regcache (current_thread, 1);
3657 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3658 }
3659
3660 /* We may have finished stepping over a breakpoint. If so,
3661 we've stopped and suspended all LWPs momentarily except the
3662 stepping one. This is where we resume them all again. We're
3663 going to keep waiting, so use proceed, which handles stepping
3664 over the next breakpoint. */
3665 if (debug_threads)
3666 debug_printf ("proceeding all threads.\n");
3667
3668 if (step_over_finished)
3669 unsuspend_all_lwps (event_child);
3670
3671 proceed_all_lwps ();
3672 return ignore_event (ourstatus);
3673 }
3674
3675 if (debug_threads)
3676 {
3677 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3678 {
3679 char *str;
3680
3681 str = target_waitstatus_to_string (&event_child->waitstatus);
3682 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3683 lwpid_of (get_lwp_thread (event_child)), str);
3684 xfree (str);
3685 }
3686 if (current_thread->last_resume_kind == resume_step)
3687 {
3688 if (event_child->step_range_start == event_child->step_range_end)
3689 debug_printf ("GDB wanted to single-step, reporting event.\n");
3690 else if (!lwp_in_step_range (event_child))
3691 debug_printf ("Out of step range, reporting event.\n");
3692 }
3693 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3694 debug_printf ("Stopped by watchpoint.\n");
3695 else if (gdb_breakpoint_here (event_child->stop_pc))
3696 debug_printf ("Stopped by GDB breakpoint.\n");
3697 if (debug_threads)
3698 debug_printf ("Hit a non-gdbserver trap event.\n");
3699 }
3700
3701 /* Alright, we're going to report a stop. */
3702
3703 if (!stabilizing_threads)
3704 {
3705 /* In all-stop, stop all threads. */
3706 if (!non_stop)
3707 stop_all_lwps (0, NULL);
3708
3709 /* If we're not waiting for a specific LWP, choose an event LWP
3710 from among those that have had events. Giving equal priority
3711 to all LWPs that have had events helps prevent
3712 starvation. */
3713 if (ptid_equal (ptid, minus_one_ptid))
3714 {
3715 event_child->status_pending_p = 1;
3716 event_child->status_pending = w;
3717
3718 select_event_lwp (&event_child);
3719
3720 /* current_thread and event_child must stay in sync. */
3721 current_thread = get_lwp_thread (event_child);
3722
3723 event_child->status_pending_p = 0;
3724 w = event_child->status_pending;
3725 }
3726
3727 if (step_over_finished)
3728 {
3729 if (!non_stop)
3730 {
3731 /* If we were doing a step-over, all other threads but
3732 the stepping one had been paused in start_step_over,
3733 with their suspend counts incremented. We don't want
3734 to do a full unstop/unpause, because we're in
3735 all-stop mode (so we want threads stopped), but we
3736 still need to unsuspend the other threads, to
3737 decrement their `suspended' count back. */
3738 unsuspend_all_lwps (event_child);
3739 }
3740 else
3741 {
3742 /* If we just finished a step-over, then all threads had
3743 been momentarily paused. In all-stop, that's fine,
3744 we want threads stopped by now anyway. In non-stop,
3745 we need to re-resume threads that GDB wanted to be
3746 running. */
3747 unstop_all_lwps (1, event_child);
3748 }
3749 }
3750
3751 /* Stabilize threads (move out of jump pads). */
3752 if (!non_stop)
3753 stabilize_threads ();
3754 }
3755 else
3756 {
3757 /* If we just finished a step-over, then all threads had been
3758 momentarily paused. In all-stop, that's fine, we want
3759 threads stopped by now anyway. In non-stop, we need to
3760 re-resume threads that GDB wanted to be running. */
3761 if (step_over_finished)
3762 unstop_all_lwps (1, event_child);
3763 }
3764
3765 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3766 {
3767 /* If the reported event is an exit, fork, vfork or exec, let
3768 GDB know. */
3769 *ourstatus = event_child->waitstatus;
3770 /* Clear the event lwp's waitstatus since we handled it already. */
3771 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3772 }
3773 else
3774 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3775
3776 /* Now that we've selected our final event LWP, un-adjust its PC if
3777 it was a software breakpoint, and the client doesn't know we can
3778 adjust the breakpoint ourselves. */
3779 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3780 && !swbreak_feature)
3781 {
3782 int decr_pc = the_low_target.decr_pc_after_break;
3783
3784 if (decr_pc != 0)
3785 {
3786 struct regcache *regcache
3787 = get_thread_regcache (current_thread, 1);
3788 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3789 }
3790 }
3791
3792 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3793 {
3794 get_syscall_trapinfo (event_child,
3795 &ourstatus->value.syscall_number);
3796 ourstatus->kind = event_child->syscall_state;
3797 }
3798 else if (current_thread->last_resume_kind == resume_stop
3799 && WSTOPSIG (w) == SIGSTOP)
3800 {
3801 /* A thread that has been requested to stop by GDB with vCont;t,
3802 and it stopped cleanly, so report as SIG0. The use of
3803 SIGSTOP is an implementation detail. */
3804 ourstatus->value.sig = GDB_SIGNAL_0;
3805 }
3806 else if (current_thread->last_resume_kind == resume_stop
3807 && WSTOPSIG (w) != SIGSTOP)
3808 {
3809 /* A thread that has been requested to stop by GDB with vCont;t,
3810 but, it stopped for other reasons. */
3811 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3812 }
3813 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3814 {
3815 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3816 }
3817
3818 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3819
3820 if (debug_threads)
3821 {
3822 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3823 target_pid_to_str (ptid_of (current_thread)),
3824 ourstatus->kind, ourstatus->value.sig);
3825 debug_exit ();
3826 }
3827
3828 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3829 return filter_exit_event (event_child, ourstatus);
3830
3831 return ptid_of (current_thread);
3832}
3833
3834/* Get rid of any pending event in the pipe. */
3835static void
3836async_file_flush (void)
3837{
3838 int ret;
3839 char buf;
3840
3841 do
3842 ret = read (linux_event_pipe[0], &buf, 1);
3843 while (ret >= 0 || (ret == -1 && errno == EINTR));
3844}
3845
3846/* Put something in the pipe, so the event loop wakes up. */
3847static void
3848async_file_mark (void)
3849{
3850 int ret;
3851
3852 async_file_flush ();
3853
3854 do
3855 ret = write (linux_event_pipe[1], "+", 1);
3856 while (ret == 0 || (ret == -1 && errno == EINTR));
3857
3858 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3859 be awakened anyway. */
3860}
3861
3862static ptid_t
3863linux_wait (ptid_t ptid,
3864 struct target_waitstatus *ourstatus, int target_options)
3865{
3866 ptid_t event_ptid;
3867
3868 /* Flush the async file first. */
3869 if (target_is_async_p ())
3870 async_file_flush ();
3871
3872 do
3873 {
3874 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3875 }
3876 while ((target_options & TARGET_WNOHANG) == 0
3877 && ptid_equal (event_ptid, null_ptid)
3878 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3879
3880 /* If at least one stop was reported, there may be more. A single
3881 SIGCHLD can signal more than one child stop. */
3882 if (target_is_async_p ()
3883 && (target_options & TARGET_WNOHANG) != 0
3884 && !ptid_equal (event_ptid, null_ptid))
3885 async_file_mark ();
3886
3887 return event_ptid;
3888}
3889
3890/* Send a signal to an LWP. */
3891
3892static int
3893kill_lwp (unsigned long lwpid, int signo)
3894{
3895 int ret;
3896
3897 errno = 0;
3898 ret = syscall (__NR_tkill, lwpid, signo);
3899 if (errno == ENOSYS)
3900 {
3901 /* If tkill fails, then we are not using nptl threads, a
3902 configuration we no longer support. */
3903 perror_with_name (("tkill"));
3904 }
3905 return ret;
3906}
3907
3908void
3909linux_stop_lwp (struct lwp_info *lwp)
3910{
3911 send_sigstop (lwp);
3912}
3913
3914static void
3915send_sigstop (struct lwp_info *lwp)
3916{
3917 int pid;
3918
3919 pid = lwpid_of (get_lwp_thread (lwp));
3920
3921 /* If we already have a pending stop signal for this process, don't
3922 send another. */
3923 if (lwp->stop_expected)
3924 {
3925 if (debug_threads)
3926 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3927
3928 return;
3929 }
3930
3931 if (debug_threads)
3932 debug_printf ("Sending sigstop to lwp %d\n", pid);
3933
3934 lwp->stop_expected = 1;
3935 kill_lwp (pid, SIGSTOP);
3936}
3937
3938static int
3939send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3940{
3941 struct thread_info *thread = (struct thread_info *) entry;
3942 struct lwp_info *lwp = get_thread_lwp (thread);
3943
3944 /* Ignore EXCEPT. */
3945 if (lwp == except)
3946 return 0;
3947
3948 if (lwp->stopped)
3949 return 0;
3950
3951 send_sigstop (lwp);
3952 return 0;
3953}
3954
3955/* Increment the suspend count of an LWP, and stop it, if not stopped
3956 yet. */
3957static int
3958suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3959 void *except)
3960{
3961 struct thread_info *thread = (struct thread_info *) entry;
3962 struct lwp_info *lwp = get_thread_lwp (thread);
3963
3964 /* Ignore EXCEPT. */
3965 if (lwp == except)
3966 return 0;
3967
3968 lwp_suspended_inc (lwp);
3969
3970 return send_sigstop_callback (entry, except);
3971}
3972
3973static void
3974mark_lwp_dead (struct lwp_info *lwp, int wstat)
3975{
3976 /* Store the exit status for later. */
3977 lwp->status_pending_p = 1;
3978 lwp->status_pending = wstat;
3979
3980 /* Store in waitstatus as well, as there's nothing else to process
3981 for this event. */
3982 if (WIFEXITED (wstat))
3983 {
3984 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3985 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3986 }
3987 else if (WIFSIGNALED (wstat))
3988 {
3989 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3990 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3991 }
3992
3993 /* Prevent trying to stop it. */
3994 lwp->stopped = 1;
3995
3996 /* No further stops are expected from a dead lwp. */
3997 lwp->stop_expected = 0;
3998}
3999
4000/* Return true if LWP has exited already, and has a pending exit event
4001 to report to GDB. */
4002
4003static int
4004lwp_is_marked_dead (struct lwp_info *lwp)
4005{
4006 return (lwp->status_pending_p
4007 && (WIFEXITED (lwp->status_pending)
4008 || WIFSIGNALED (lwp->status_pending)));
4009}
4010
4011/* Wait for all children to stop for the SIGSTOPs we just queued. */
4012
4013static void
4014wait_for_sigstop (void)
4015{
4016 struct thread_info *saved_thread;
4017 ptid_t saved_tid;
4018 int wstat;
4019 int ret;
4020
4021 saved_thread = current_thread;
4022 if (saved_thread != NULL)
4023 saved_tid = saved_thread->entry.id;
4024 else
4025 saved_tid = null_ptid; /* avoid bogus unused warning */
4026
4027 if (debug_threads)
4028 debug_printf ("wait_for_sigstop: pulling events\n");
4029
4030 /* Passing NULL_PTID as filter indicates we want all events to be
4031 left pending. Eventually this returns when there are no
4032 unwaited-for children left. */
4033 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4034 &wstat, __WALL);
4035 gdb_assert (ret == -1);
4036
4037 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4038 current_thread = saved_thread;
4039 else
4040 {
4041 if (debug_threads)
4042 debug_printf ("Previously current thread died.\n");
4043
4044 /* We can't change the current inferior behind GDB's back,
4045 otherwise, a subsequent command may apply to the wrong
4046 process. */
4047 current_thread = NULL;
4048 }
4049}
4050
4051/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
4052 move it out, because we need to report the stop event to GDB. For
4053 example, if the user puts a breakpoint in the jump pad, it's
4054 because she wants to debug it. */
4055
4056static int
4057stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
4058{
4059 struct thread_info *thread = (struct thread_info *) entry;
4060 struct lwp_info *lwp = get_thread_lwp (thread);
4061
4062 if (lwp->suspended != 0)
4063 {
4064 internal_error (__FILE__, __LINE__,
4065 "LWP %ld is suspended, suspended=%d\n",
4066 lwpid_of (thread), lwp->suspended);
4067 }
4068 gdb_assert (lwp->stopped);
4069
4070 /* Allow debugging the jump pad, gdb_collect, etc.. */
4071 return (supports_fast_tracepoints ()
4072 && agent_loaded_p ()
4073 && (gdb_breakpoint_here (lwp->stop_pc)
4074 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4075 || thread->last_resume_kind == resume_step)
4076 && linux_fast_tracepoint_collecting (lwp, NULL));
4077}
4078
4079static void
4080move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
4081{
4082 struct thread_info *thread = (struct thread_info *) entry;
4083 struct thread_info *saved_thread;
4084 struct lwp_info *lwp = get_thread_lwp (thread);
4085 int *wstat;
4086
4087 if (lwp->suspended != 0)
4088 {
4089 internal_error (__FILE__, __LINE__,
4090 "LWP %ld is suspended, suspended=%d\n",
4091 lwpid_of (thread), lwp->suspended);
4092 }
4093 gdb_assert (lwp->stopped);
4094
4095 /* For gdb_breakpoint_here. */
4096 saved_thread = current_thread;
4097 current_thread = thread;
4098
4099 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4100
4101 /* Allow debugging the jump pad, gdb_collect, etc. */
4102 if (!gdb_breakpoint_here (lwp->stop_pc)
4103 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4104 && thread->last_resume_kind != resume_step
4105 && maybe_move_out_of_jump_pad (lwp, wstat))
4106 {
4107 if (debug_threads)
4108 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4109 lwpid_of (thread));
4110
4111 if (wstat)
4112 {
4113 lwp->status_pending_p = 0;
4114 enqueue_one_deferred_signal (lwp, wstat);
4115
4116 if (debug_threads)
4117 debug_printf ("Signal %d for LWP %ld deferred "
4118 "(in jump pad)\n",
4119 WSTOPSIG (*wstat), lwpid_of (thread));
4120 }
4121
4122 linux_resume_one_lwp (lwp, 0, 0, NULL);
4123 }
4124 else
4125 lwp_suspended_inc (lwp);
4126
4127 current_thread = saved_thread;
4128}
4129
4130static int
4131lwp_running (struct inferior_list_entry *entry, void *data)
4132{
4133 struct thread_info *thread = (struct thread_info *) entry;
4134 struct lwp_info *lwp = get_thread_lwp (thread);
4135
4136 if (lwp_is_marked_dead (lwp))
4137 return 0;
4138 if (lwp->stopped)
4139 return 0;
4140 return 1;
4141}
4142
4143/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4144 If SUSPEND, then also increase the suspend count of every LWP,
4145 except EXCEPT. */
4146
4147static void
4148stop_all_lwps (int suspend, struct lwp_info *except)
4149{
4150 /* Should not be called recursively. */
4151 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4152
4153 if (debug_threads)
4154 {
4155 debug_enter ();
4156 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4157 suspend ? "stop-and-suspend" : "stop",
4158 except != NULL
4159 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4160 : "none");
4161 }
4162
4163 stopping_threads = (suspend
4164 ? STOPPING_AND_SUSPENDING_THREADS
4165 : STOPPING_THREADS);
4166
4167 if (suspend)
4168 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4169 else
4170 find_inferior (&all_threads, send_sigstop_callback, except);
4171 wait_for_sigstop ();
4172 stopping_threads = NOT_STOPPING_THREADS;
4173
4174 if (debug_threads)
4175 {
4176 debug_printf ("stop_all_lwps done, setting stopping_threads "
4177 "back to !stopping\n");
4178 debug_exit ();
4179 }
4180}
4181
4182/* Enqueue one signal in the chain of signals which need to be
4183 delivered to this process on next resume. */
4184
4185static void
4186enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4187{
4188 struct pending_signals *p_sig = XNEW (struct pending_signals);
4189
4190 p_sig->prev = lwp->pending_signals;
4191 p_sig->signal = signal;
4192 if (info == NULL)
4193 memset (&p_sig->info, 0, sizeof (siginfo_t));
4194 else
4195 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4196 lwp->pending_signals = p_sig;
4197}
4198
4199/* Install breakpoints for software single stepping. */
4200
4201static void
4202install_software_single_step_breakpoints (struct lwp_info *lwp)
4203{
4204 int i;
4205 CORE_ADDR pc;
4206 struct thread_info *thread = get_lwp_thread (lwp);
4207 struct regcache *regcache = get_thread_regcache (thread, 1);
4208 VEC (CORE_ADDR) *next_pcs = NULL;
4209 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
4210
4211 make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
4212
4213 current_thread = thread;
4214 next_pcs = (*the_low_target.get_next_pcs) (regcache);
4215
4216 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
4217 set_reinsert_breakpoint (pc, current_ptid);
4218
4219 do_cleanups (old_chain);
4220}
4221
4222/* Single step via hardware or software single step.
4223 Return 1 if hardware single stepping, 0 if software single stepping
4224 or can't single step. */
4225
4226static int
4227single_step (struct lwp_info* lwp)
4228{
4229 int step = 0;
4230
4231 if (can_hardware_single_step ())
4232 {
4233 step = 1;
4234 }
4235 else if (can_software_single_step ())
4236 {
4237 install_software_single_step_breakpoints (lwp);
4238 step = 0;
4239 }
4240 else
4241 {
4242 if (debug_threads)
4243 debug_printf ("stepping is not implemented on this target");
4244 }
4245
4246 return step;
4247}
4248
4249/* The signal can be delivered to the inferior if we are not trying to
4250 finish a fast tracepoint collect. Since signal can be delivered in
4251 the step-over, the program may go to signal handler and trap again
4252 after return from the signal handler. We can live with the spurious
4253 double traps. */
4254
4255static int
4256lwp_signal_can_be_delivered (struct lwp_info *lwp)
4257{
4258 return !lwp->collecting_fast_tracepoint;
4259}
4260
4261/* Resume execution of LWP. If STEP is nonzero, single-step it. If
4262 SIGNAL is nonzero, give it that signal. */
4263
4264static void
4265linux_resume_one_lwp_throw (struct lwp_info *lwp,
4266 int step, int signal, siginfo_t *info)
4267{
4268 struct thread_info *thread = get_lwp_thread (lwp);
4269 struct thread_info *saved_thread;
4270 int fast_tp_collecting;
4271 int ptrace_request;
4272 struct process_info *proc = get_thread_process (thread);
4273
4274 /* Note that target description may not be initialised
4275 (proc->tdesc == NULL) at this point because the program hasn't
4276 stopped at the first instruction yet. It means GDBserver skips
4277 the extra traps from the wrapper program (see option --wrapper).
4278 Code in this function that requires register access should be
4279 guarded by proc->tdesc == NULL or something else. */
4280
4281 if (lwp->stopped == 0)
4282 return;
4283
4284 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4285
4286 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4287
4288 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4289
4290 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4291 user used the "jump" command, or "set $pc = foo"). */
4292 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4293 {
4294 /* Collecting 'while-stepping' actions doesn't make sense
4295 anymore. */
4296 release_while_stepping_state_list (thread);
4297 }
4298
4299 /* If we have pending signals or status, and a new signal, enqueue the
4300 signal. Also enqueue the signal if it can't be delivered to the
4301 inferior right now. */
4302 if (signal != 0
4303 && (lwp->status_pending_p
4304 || lwp->pending_signals != NULL
4305 || !lwp_signal_can_be_delivered (lwp)))
4306 {
4307 enqueue_pending_signal (lwp, signal, info);
4308
4309 /* Postpone any pending signal. It was enqueued above. */
4310 signal = 0;
4311 }
4312
4313 if (lwp->status_pending_p)
4314 {
4315 if (debug_threads)
4316 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4317 " has pending status\n",
4318 lwpid_of (thread), step ? "step" : "continue",
4319 lwp->stop_expected ? "expected" : "not expected");
4320 return;
4321 }
4322
4323 saved_thread = current_thread;
4324 current_thread = thread;
4325
4326 /* This bit needs some thinking about. If we get a signal that
4327 we must report while a single-step reinsert is still pending,
4328 we often end up resuming the thread. It might be better to
4329 (ew) allow a stack of pending events; then we could be sure that
4330 the reinsert happened right away and not lose any signals.
4331
4332 Making this stack would also shrink the window in which breakpoints are
4333 uninserted (see comment in linux_wait_for_lwp) but not enough for
4334 complete correctness, so it won't solve that problem. It may be
4335 worthwhile just to solve this one, however. */
4336 if (lwp->bp_reinsert != 0)
4337 {
4338 if (debug_threads)
4339 debug_printf (" pending reinsert at 0x%s\n",
4340 paddress (lwp->bp_reinsert));
4341
4342 if (can_hardware_single_step ())
4343 {
4344 if (fast_tp_collecting == 0)
4345 {
4346 if (step == 0)
4347 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4348 if (lwp->suspended)
4349 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4350 lwp->suspended);
4351 }
4352 }
4353
4354 step = maybe_hw_step (thread);
4355 }
4356 else
4357 {
4358 /* If the thread isn't doing step-over, there shouldn't be any
4359 reinsert breakpoints. */
4360 gdb_assert (!has_reinsert_breakpoints (thread));
4361 }
4362
4363 if (fast_tp_collecting == 1)
4364 {
4365 if (debug_threads)
4366 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4367 " (exit-jump-pad-bkpt)\n",
4368 lwpid_of (thread));
4369 }
4370 else if (fast_tp_collecting == 2)
4371 {
4372 if (debug_threads)
4373 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4374 " single-stepping\n",
4375 lwpid_of (thread));
4376
4377 if (can_hardware_single_step ())
4378 step = 1;
4379 else
4380 {
4381 internal_error (__FILE__, __LINE__,
4382 "moving out of jump pad single-stepping"
4383 " not implemented on this target");
4384 }
4385 }
4386
4387 /* If we have while-stepping actions in this thread set it stepping.
4388 If we have a signal to deliver, it may or may not be set to
4389 SIG_IGN, we don't know. Assume so, and allow collecting
4390 while-stepping into a signal handler. A possible smart thing to
4391 do would be to set an internal breakpoint at the signal return
4392 address, continue, and carry on catching this while-stepping
4393 action only when that breakpoint is hit. A future
4394 enhancement. */
4395 if (thread->while_stepping != NULL)
4396 {
4397 if (debug_threads)
4398 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4399 lwpid_of (thread));
4400
4401 step = single_step (lwp);
4402 }
4403
4404 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4405 {
4406 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4407
4408 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4409
4410 if (debug_threads)
4411 {
4412 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4413 (long) lwp->stop_pc);
4414 }
4415 }
4416
4417 /* If we have pending signals, consume one if it can be delivered to
4418 the inferior. */
4419 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4420 {
4421 struct pending_signals **p_sig;
4422
4423 p_sig = &lwp->pending_signals;
4424 while ((*p_sig)->prev != NULL)
4425 p_sig = &(*p_sig)->prev;
4426
4427 signal = (*p_sig)->signal;
4428 if ((*p_sig)->info.si_signo != 0)
4429 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4430 &(*p_sig)->info);
4431
4432 free (*p_sig);
4433 *p_sig = NULL;
4434 }
4435
4436 if (debug_threads)
4437 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4438 lwpid_of (thread), step ? "step" : "continue", signal,
4439 lwp->stop_expected ? "expected" : "not expected");
4440
4441 if (the_low_target.prepare_to_resume != NULL)
4442 the_low_target.prepare_to_resume (lwp);
4443
4444 regcache_invalidate_thread (thread);
4445 errno = 0;
4446 lwp->stepping = step;
4447 if (step)
4448 ptrace_request = PTRACE_SINGLESTEP;
4449 else if (gdb_catching_syscalls_p (lwp))
4450 ptrace_request = PTRACE_SYSCALL;
4451 else
4452 ptrace_request = PTRACE_CONT;
4453 ptrace (ptrace_request,
4454 lwpid_of (thread),
4455 (PTRACE_TYPE_ARG3) 0,
4456 /* Coerce to a uintptr_t first to avoid potential gcc warning
4457 of coercing an 8 byte integer to a 4 byte pointer. */
4458 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4459
4460 current_thread = saved_thread;
4461 if (errno)
4462 perror_with_name ("resuming thread");
4463
4464 /* Successfully resumed. Clear state that no longer makes sense,
4465 and mark the LWP as running. Must not do this before resuming
4466 otherwise if that fails other code will be confused. E.g., we'd
4467 later try to stop the LWP and hang forever waiting for a stop
4468 status. Note that we must not throw after this is cleared,
4469 otherwise handle_zombie_lwp_error would get confused. */
4470 lwp->stopped = 0;
4471 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4472}
4473
4474/* Called when we try to resume a stopped LWP and that errors out. If
4475 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4476 or about to become), discard the error, clear any pending status
4477 the LWP may have, and return true (we'll collect the exit status
4478 soon enough). Otherwise, return false. */
4479
4480static int
4481check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4482{
4483 struct thread_info *thread = get_lwp_thread (lp);
4484
4485 /* If we get an error after resuming the LWP successfully, we'd
4486 confuse !T state for the LWP being gone. */
4487 gdb_assert (lp->stopped);
4488
4489 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4490 because even if ptrace failed with ESRCH, the tracee may be "not
4491 yet fully dead", but already refusing ptrace requests. In that
4492 case the tracee has 'R (Running)' state for a little bit
4493 (observed in Linux 3.18). See also the note on ESRCH in the
4494 ptrace(2) man page. Instead, check whether the LWP has any state
4495 other than ptrace-stopped. */
4496
4497 /* Don't assume anything if /proc/PID/status can't be read. */
4498 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4499 {
4500 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4501 lp->status_pending_p = 0;
4502 return 1;
4503 }
4504 return 0;
4505}
4506
4507/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4508 disappears while we try to resume it. */
4509
4510static void
4511linux_resume_one_lwp (struct lwp_info *lwp,
4512 int step, int signal, siginfo_t *info)
4513{
4514 TRY
4515 {
4516 linux_resume_one_lwp_throw (lwp, step, signal, info);
4517 }
4518 CATCH (ex, RETURN_MASK_ERROR)
4519 {
4520 if (!check_ptrace_stopped_lwp_gone (lwp))
4521 throw_exception (ex);
4522 }
4523 END_CATCH
4524}
4525
4526struct thread_resume_array
4527{
4528 struct thread_resume *resume;
4529 size_t n;
4530};
4531
4532/* This function is called once per thread via find_inferior.
4533 ARG is a pointer to a thread_resume_array struct.
4534 We look up the thread specified by ENTRY in ARG, and mark the thread
4535 with a pointer to the appropriate resume request.
4536
4537 This algorithm is O(threads * resume elements), but resume elements
4538 is small (and will remain small at least until GDB supports thread
4539 suspension). */
4540
4541static int
4542linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4543{
4544 struct thread_info *thread = (struct thread_info *) entry;
4545 struct lwp_info *lwp = get_thread_lwp (thread);
4546 int ndx;
4547 struct thread_resume_array *r;
4548
4549 r = (struct thread_resume_array *) arg;
4550
4551 for (ndx = 0; ndx < r->n; ndx++)
4552 {
4553 ptid_t ptid = r->resume[ndx].thread;
4554 if (ptid_equal (ptid, minus_one_ptid)
4555 || ptid_equal (ptid, entry->id)
4556 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4557 of PID'. */
4558 || (ptid_get_pid (ptid) == pid_of (thread)
4559 && (ptid_is_pid (ptid)
4560 || ptid_get_lwp (ptid) == -1)))
4561 {
4562 if (r->resume[ndx].kind == resume_stop
4563 && thread->last_resume_kind == resume_stop)
4564 {
4565 if (debug_threads)
4566 debug_printf ("already %s LWP %ld at GDB's request\n",
4567 (thread->last_status.kind
4568 == TARGET_WAITKIND_STOPPED)
4569 ? "stopped"
4570 : "stopping",
4571 lwpid_of (thread));
4572
4573 continue;
4574 }
4575
4576 lwp->resume = &r->resume[ndx];
4577 thread->last_resume_kind = lwp->resume->kind;
4578
4579 lwp->step_range_start = lwp->resume->step_range_start;
4580 lwp->step_range_end = lwp->resume->step_range_end;
4581
4582 /* If we had a deferred signal to report, dequeue one now.
4583 This can happen if LWP gets more than one signal while
4584 trying to get out of a jump pad. */
4585 if (lwp->stopped
4586 && !lwp->status_pending_p
4587 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4588 {
4589 lwp->status_pending_p = 1;
4590
4591 if (debug_threads)
4592 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4593 "leaving status pending.\n",
4594 WSTOPSIG (lwp->status_pending),
4595 lwpid_of (thread));
4596 }
4597
4598 return 0;
4599 }
4600 }
4601
4602 /* No resume action for this thread. */
4603 lwp->resume = NULL;
4604
4605 return 0;
4606}
4607
4608/* find_inferior callback for linux_resume.
4609 Set *FLAG_P if this lwp has an interesting status pending. */
4610
4611static int
4612resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4613{
4614 struct thread_info *thread = (struct thread_info *) entry;
4615 struct lwp_info *lwp = get_thread_lwp (thread);
4616
4617 /* LWPs which will not be resumed are not interesting, because
4618 we might not wait for them next time through linux_wait. */
4619 if (lwp->resume == NULL)
4620 return 0;
4621
4622 if (thread_still_has_status_pending_p (thread))
4623 * (int *) flag_p = 1;
4624
4625 return 0;
4626}
4627
4628/* Return 1 if this lwp that GDB wants running is stopped at an
4629 internal breakpoint that we need to step over. It assumes that any
4630 required STOP_PC adjustment has already been propagated to the
4631 inferior's regcache. */
4632
4633static int
4634need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4635{
4636 struct thread_info *thread = (struct thread_info *) entry;
4637 struct lwp_info *lwp = get_thread_lwp (thread);
4638 struct thread_info *saved_thread;
4639 CORE_ADDR pc;
4640 struct process_info *proc = get_thread_process (thread);
4641
4642 /* GDBserver is skipping the extra traps from the wrapper program,
4643 don't have to do step over. */
4644 if (proc->tdesc == NULL)
4645 return 0;
4646
4647 /* LWPs which will not be resumed are not interesting, because we
4648 might not wait for them next time through linux_wait. */
4649
4650 if (!lwp->stopped)
4651 {
4652 if (debug_threads)
4653 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4654 lwpid_of (thread));
4655 return 0;
4656 }
4657
4658 if (thread->last_resume_kind == resume_stop)
4659 {
4660 if (debug_threads)
4661 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4662 " stopped\n",
4663 lwpid_of (thread));
4664 return 0;
4665 }
4666
4667 gdb_assert (lwp->suspended >= 0);
4668
4669 if (lwp->suspended)
4670 {
4671 if (debug_threads)
4672 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4673 lwpid_of (thread));
4674 return 0;
4675 }
4676
4677 if (lwp->status_pending_p)
4678 {
4679 if (debug_threads)
4680 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4681 " status.\n",
4682 lwpid_of (thread));
4683 return 0;
4684 }
4685
4686 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4687 or we have. */
4688 pc = get_pc (lwp);
4689
4690 /* If the PC has changed since we stopped, then don't do anything,
4691 and let the breakpoint/tracepoint be hit. This happens if, for
4692 instance, GDB handled the decr_pc_after_break subtraction itself,
4693 GDB is OOL stepping this thread, or the user has issued a "jump"
4694 command, or poked thread's registers herself. */
4695 if (pc != lwp->stop_pc)
4696 {
4697 if (debug_threads)
4698 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4699 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4700 lwpid_of (thread),
4701 paddress (lwp->stop_pc), paddress (pc));
4702 return 0;
4703 }
4704
4705 /* On software single step target, resume the inferior with signal
4706 rather than stepping over. */
4707 if (can_software_single_step ()
4708 && lwp->pending_signals != NULL
4709 && lwp_signal_can_be_delivered (lwp))
4710 {
4711 if (debug_threads)
4712 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4713 " signals.\n",
4714 lwpid_of (thread));
4715
4716 return 0;
4717 }
4718
4719 saved_thread = current_thread;
4720 current_thread = thread;
4721
4722 /* We can only step over breakpoints we know about. */
4723 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4724 {
4725 /* Don't step over a breakpoint that GDB expects to hit
4726 though. If the condition is being evaluated on the target's side
4727 and it evaluate to false, step over this breakpoint as well. */
4728 if (gdb_breakpoint_here (pc)
4729 && gdb_condition_true_at_breakpoint (pc)
4730 && gdb_no_commands_at_breakpoint (pc))
4731 {
4732 if (debug_threads)
4733 debug_printf ("Need step over [LWP %ld]? yes, but found"
4734 " GDB breakpoint at 0x%s; skipping step over\n",
4735 lwpid_of (thread), paddress (pc));
4736
4737 current_thread = saved_thread;
4738 return 0;
4739 }
4740 else
4741 {
4742 if (debug_threads)
4743 debug_printf ("Need step over [LWP %ld]? yes, "
4744 "found breakpoint at 0x%s\n",
4745 lwpid_of (thread), paddress (pc));
4746
4747 /* We've found an lwp that needs stepping over --- return 1 so
4748 that find_inferior stops looking. */
4749 current_thread = saved_thread;
4750
4751 return 1;
4752 }
4753 }
4754
4755 current_thread = saved_thread;
4756
4757 if (debug_threads)
4758 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4759 " at 0x%s\n",
4760 lwpid_of (thread), paddress (pc));
4761
4762 return 0;
4763}
4764
4765/* Start a step-over operation on LWP. When LWP stopped at a
4766 breakpoint, to make progress, we need to remove the breakpoint out
4767 of the way. If we let other threads run while we do that, they may
4768 pass by the breakpoint location and miss hitting it. To avoid
4769 that, a step-over momentarily stops all threads while LWP is
4770 single-stepped by either hardware or software while the breakpoint
4771 is temporarily uninserted from the inferior. When the single-step
4772 finishes, we reinsert the breakpoint, and let all threads that are
4773 supposed to be running, run again. */
4774
4775static int
4776start_step_over (struct lwp_info *lwp)
4777{
4778 struct thread_info *thread = get_lwp_thread (lwp);
4779 struct thread_info *saved_thread;
4780 CORE_ADDR pc;
4781 int step;
4782
4783 if (debug_threads)
4784 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4785 lwpid_of (thread));
4786
4787 stop_all_lwps (1, lwp);
4788
4789 if (lwp->suspended != 0)
4790 {
4791 internal_error (__FILE__, __LINE__,
4792 "LWP %ld suspended=%d\n", lwpid_of (thread),
4793 lwp->suspended);
4794 }
4795
4796 if (debug_threads)
4797 debug_printf ("Done stopping all threads for step-over.\n");
4798
4799 /* Note, we should always reach here with an already adjusted PC,
4800 either by GDB (if we're resuming due to GDB's request), or by our
4801 caller, if we just finished handling an internal breakpoint GDB
4802 shouldn't care about. */
4803 pc = get_pc (lwp);
4804
4805 saved_thread = current_thread;
4806 current_thread = thread;
4807
4808 lwp->bp_reinsert = pc;
4809 uninsert_breakpoints_at (pc);
4810 uninsert_fast_tracepoint_jumps_at (pc);
4811
4812 step = single_step (lwp);
4813
4814 current_thread = saved_thread;
4815
4816 linux_resume_one_lwp (lwp, step, 0, NULL);
4817
4818 /* Require next event from this LWP. */
4819 step_over_bkpt = thread->entry.id;
4820 return 1;
4821}
4822
4823/* Finish a step-over. Reinsert the breakpoint we had uninserted in
4824 start_step_over, if still there, and delete any reinsert
4825 breakpoints we've set, on non hardware single-step targets. */
4826
4827static int
4828finish_step_over (struct lwp_info *lwp)
4829{
4830 if (lwp->bp_reinsert != 0)
4831 {
4832 struct thread_info *saved_thread = current_thread;
4833
4834 if (debug_threads)
4835 debug_printf ("Finished step over.\n");
4836
4837 current_thread = get_lwp_thread (lwp);
4838
4839 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4840 may be no breakpoint to reinsert there by now. */
4841 reinsert_breakpoints_at (lwp->bp_reinsert);
4842 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4843
4844 lwp->bp_reinsert = 0;
4845
4846 /* Delete any software-single-step reinsert breakpoints. No
4847 longer needed. We don't have to worry about other threads
4848 hitting this trap, and later not being able to explain it,
4849 because we were stepping over a breakpoint, and we hold all
4850 threads but LWP stopped while doing that. */
4851 if (!can_hardware_single_step ())
4852 {
4853 gdb_assert (has_reinsert_breakpoints (current_thread));
4854 delete_reinsert_breakpoints (current_thread);
4855 }
4856
4857 step_over_bkpt = null_ptid;
4858 current_thread = saved_thread;
4859 return 1;
4860 }
4861 else
4862 return 0;
4863}
4864
4865/* If there's a step over in progress, wait until all threads stop
4866 (that is, until the stepping thread finishes its step), and
4867 unsuspend all lwps. The stepping thread ends with its status
4868 pending, which is processed later when we get back to processing
4869 events. */
4870
4871static void
4872complete_ongoing_step_over (void)
4873{
4874 if (!ptid_equal (step_over_bkpt, null_ptid))
4875 {
4876 struct lwp_info *lwp;
4877 int wstat;
4878 int ret;
4879
4880 if (debug_threads)
4881 debug_printf ("detach: step over in progress, finish it first\n");
4882
4883 /* Passing NULL_PTID as filter indicates we want all events to
4884 be left pending. Eventually this returns when there are no
4885 unwaited-for children left. */
4886 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4887 &wstat, __WALL);
4888 gdb_assert (ret == -1);
4889
4890 lwp = find_lwp_pid (step_over_bkpt);
4891 if (lwp != NULL)
4892 finish_step_over (lwp);
4893 step_over_bkpt = null_ptid;
4894 unsuspend_all_lwps (lwp);
4895 }
4896}
4897
4898/* This function is called once per thread. We check the thread's resume
4899 request, which will tell us whether to resume, step, or leave the thread
4900 stopped; and what signal, if any, it should be sent.
4901
4902 For threads which we aren't explicitly told otherwise, we preserve
4903 the stepping flag; this is used for stepping over gdbserver-placed
4904 breakpoints.
4905
4906 If pending_flags was set in any thread, we queue any needed
4907 signals, since we won't actually resume. We already have a pending
4908 event to report, so we don't need to preserve any step requests;
4909 they should be re-issued if necessary. */
4910
4911static int
4912linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4913{
4914 struct thread_info *thread = (struct thread_info *) entry;
4915 struct lwp_info *lwp = get_thread_lwp (thread);
4916 int leave_all_stopped = * (int *) arg;
4917 int leave_pending;
4918
4919 if (lwp->resume == NULL)
4920 return 0;
4921
4922 if (lwp->resume->kind == resume_stop)
4923 {
4924 if (debug_threads)
4925 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4926
4927 if (!lwp->stopped)
4928 {
4929 if (debug_threads)
4930 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4931
4932 /* Stop the thread, and wait for the event asynchronously,
4933 through the event loop. */
4934 send_sigstop (lwp);
4935 }
4936 else
4937 {
4938 if (debug_threads)
4939 debug_printf ("already stopped LWP %ld\n",
4940 lwpid_of (thread));
4941
4942 /* The LWP may have been stopped in an internal event that
4943 was not meant to be notified back to GDB (e.g., gdbserver
4944 breakpoint), so we should be reporting a stop event in
4945 this case too. */
4946
4947 /* If the thread already has a pending SIGSTOP, this is a
4948 no-op. Otherwise, something later will presumably resume
4949 the thread and this will cause it to cancel any pending
4950 operation, due to last_resume_kind == resume_stop. If
4951 the thread already has a pending status to report, we
4952 will still report it the next time we wait - see
4953 status_pending_p_callback. */
4954
4955 /* If we already have a pending signal to report, then
4956 there's no need to queue a SIGSTOP, as this means we're
4957 midway through moving the LWP out of the jumppad, and we
4958 will report the pending signal as soon as that is
4959 finished. */
4960 if (lwp->pending_signals_to_report == NULL)
4961 send_sigstop (lwp);
4962 }
4963
4964 /* For stop requests, we're done. */
4965 lwp->resume = NULL;
4966 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4967 return 0;
4968 }
4969
4970 /* If this thread which is about to be resumed has a pending status,
4971 then don't resume it - we can just report the pending status.
4972 Likewise if it is suspended, because e.g., another thread is
4973 stepping past a breakpoint. Make sure to queue any signals that
4974 would otherwise be sent. In all-stop mode, we do this decision
4975 based on if *any* thread has a pending status. If there's a
4976 thread that needs the step-over-breakpoint dance, then don't
4977 resume any other thread but that particular one. */
4978 leave_pending = (lwp->suspended
4979 || lwp->status_pending_p
4980 || leave_all_stopped);
4981
4982 /* If we have a new signal, enqueue the signal. */
4983 if (lwp->resume->sig != 0)
4984 {
4985 siginfo_t info, *info_p;
4986
4987 /* If this is the same signal we were previously stopped by,
4988 make sure to queue its siginfo. */
4989 if (WIFSTOPPED (lwp->last_status)
4990 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4991 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4992 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4993 info_p = &info;
4994 else
4995 info_p = NULL;
4996
4997 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4998 }
4999
5000 if (!leave_pending)
5001 {
5002 if (debug_threads)
5003 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5004
5005 proceed_one_lwp (entry, NULL);
5006 }
5007 else
5008 {
5009 if (debug_threads)
5010 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5011 }
5012
5013 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5014 lwp->resume = NULL;
5015 return 0;
5016}
5017
5018static void
5019linux_resume (struct thread_resume *resume_info, size_t n)
5020{
5021 struct thread_resume_array array = { resume_info, n };
5022 struct thread_info *need_step_over = NULL;
5023 int any_pending;
5024 int leave_all_stopped;
5025
5026 if (debug_threads)
5027 {
5028 debug_enter ();
5029 debug_printf ("linux_resume:\n");
5030 }
5031
5032 find_inferior (&all_threads, linux_set_resume_request, &array);
5033
5034 /* If there is a thread which would otherwise be resumed, which has
5035 a pending status, then don't resume any threads - we can just
5036 report the pending status. Make sure to queue any signals that
5037 would otherwise be sent. In non-stop mode, we'll apply this
5038 logic to each thread individually. We consume all pending events
5039 before considering to start a step-over (in all-stop). */
5040 any_pending = 0;
5041 if (!non_stop)
5042 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
5043
5044 /* If there is a thread which would otherwise be resumed, which is
5045 stopped at a breakpoint that needs stepping over, then don't
5046 resume any threads - have it step over the breakpoint with all
5047 other threads stopped, then resume all threads again. Make sure
5048 to queue any signals that would otherwise be delivered or
5049 queued. */
5050 if (!any_pending && supports_breakpoints ())
5051 need_step_over
5052 = (struct thread_info *) find_inferior (&all_threads,
5053 need_step_over_p, NULL);
5054
5055 leave_all_stopped = (need_step_over != NULL || any_pending);
5056
5057 if (debug_threads)
5058 {
5059 if (need_step_over != NULL)
5060 debug_printf ("Not resuming all, need step over\n");
5061 else if (any_pending)
5062 debug_printf ("Not resuming, all-stop and found "
5063 "an LWP with pending status\n");
5064 else
5065 debug_printf ("Resuming, no pending status or step over needed\n");
5066 }
5067
5068 /* Even if we're leaving threads stopped, queue all signals we'd
5069 otherwise deliver. */
5070 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
5071
5072 if (need_step_over)
5073 start_step_over (get_thread_lwp (need_step_over));
5074
5075 if (debug_threads)
5076 {
5077 debug_printf ("linux_resume done\n");
5078 debug_exit ();
5079 }
5080
5081 /* We may have events that were pending that can/should be sent to
5082 the client now. Trigger a linux_wait call. */
5083 if (target_is_async_p ())
5084 async_file_mark ();
5085}
5086
5087/* This function is called once per thread. We check the thread's
5088 last resume request, which will tell us whether to resume, step, or
5089 leave the thread stopped. Any signal the client requested to be
5090 delivered has already been enqueued at this point.
5091
5092 If any thread that GDB wants running is stopped at an internal
5093 breakpoint that needs stepping over, we start a step-over operation
5094 on that particular thread, and leave all others stopped. */
5095
5096static int
5097proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5098{
5099 struct thread_info *thread = (struct thread_info *) entry;
5100 struct lwp_info *lwp = get_thread_lwp (thread);
5101 int step;
5102
5103 if (lwp == except)
5104 return 0;
5105
5106 if (debug_threads)
5107 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5108
5109 if (!lwp->stopped)
5110 {
5111 if (debug_threads)
5112 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5113 return 0;
5114 }
5115
5116 if (thread->last_resume_kind == resume_stop
5117 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5118 {
5119 if (debug_threads)
5120 debug_printf (" client wants LWP to remain %ld stopped\n",
5121 lwpid_of (thread));
5122 return 0;
5123 }
5124
5125 if (lwp->status_pending_p)
5126 {
5127 if (debug_threads)
5128 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5129 lwpid_of (thread));
5130 return 0;
5131 }
5132
5133 gdb_assert (lwp->suspended >= 0);
5134
5135 if (lwp->suspended)
5136 {
5137 if (debug_threads)
5138 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5139 return 0;
5140 }
5141
5142 if (thread->last_resume_kind == resume_stop
5143 && lwp->pending_signals_to_report == NULL
5144 && lwp->collecting_fast_tracepoint == 0)
5145 {
5146 /* We haven't reported this LWP as stopped yet (otherwise, the
5147 last_status.kind check above would catch it, and we wouldn't
5148 reach here. This LWP may have been momentarily paused by a
5149 stop_all_lwps call while handling for example, another LWP's
5150 step-over. In that case, the pending expected SIGSTOP signal
5151 that was queued at vCont;t handling time will have already
5152 been consumed by wait_for_sigstop, and so we need to requeue
5153 another one here. Note that if the LWP already has a SIGSTOP
5154 pending, this is a no-op. */
5155
5156 if (debug_threads)
5157 debug_printf ("Client wants LWP %ld to stop. "
5158 "Making sure it has a SIGSTOP pending\n",
5159 lwpid_of (thread));
5160
5161 send_sigstop (lwp);
5162 }
5163
5164 if (thread->last_resume_kind == resume_step)
5165 {
5166 if (debug_threads)
5167 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5168 lwpid_of (thread));
5169 step = 1;
5170 }
5171 else if (lwp->bp_reinsert != 0)
5172 {
5173 if (debug_threads)
5174 debug_printf (" stepping LWP %ld, reinsert set\n",
5175 lwpid_of (thread));
5176
5177 step = maybe_hw_step (thread);
5178 }
5179 else
5180 step = 0;
5181
5182 linux_resume_one_lwp (lwp, step, 0, NULL);
5183 return 0;
5184}
5185
5186static int
5187unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5188{
5189 struct thread_info *thread = (struct thread_info *) entry;
5190 struct lwp_info *lwp = get_thread_lwp (thread);
5191
5192 if (lwp == except)
5193 return 0;
5194
5195 lwp_suspended_decr (lwp);
5196
5197 return proceed_one_lwp (entry, except);
5198}
5199
5200/* When we finish a step-over, set threads running again. If there's
5201 another thread that may need a step-over, now's the time to start
5202 it. Eventually, we'll move all threads past their breakpoints. */
5203
5204static void
5205proceed_all_lwps (void)
5206{
5207 struct thread_info *need_step_over;
5208
5209 /* If there is a thread which would otherwise be resumed, which is
5210 stopped at a breakpoint that needs stepping over, then don't
5211 resume any threads - have it step over the breakpoint with all
5212 other threads stopped, then resume all threads again. */
5213
5214 if (supports_breakpoints ())
5215 {
5216 need_step_over
5217 = (struct thread_info *) find_inferior (&all_threads,
5218 need_step_over_p, NULL);
5219
5220 if (need_step_over != NULL)
5221 {
5222 if (debug_threads)
5223 debug_printf ("proceed_all_lwps: found "
5224 "thread %ld needing a step-over\n",
5225 lwpid_of (need_step_over));
5226
5227 start_step_over (get_thread_lwp (need_step_over));
5228 return;
5229 }
5230 }
5231
5232 if (debug_threads)
5233 debug_printf ("Proceeding, no step-over needed\n");
5234
5235 find_inferior (&all_threads, proceed_one_lwp, NULL);
5236}
5237
5238/* Stopped LWPs that the client wanted to be running, that don't have
5239 pending statuses, are set to run again, except for EXCEPT, if not
5240 NULL. This undoes a stop_all_lwps call. */
5241
5242static void
5243unstop_all_lwps (int unsuspend, struct lwp_info *except)
5244{
5245 if (debug_threads)
5246 {
5247 debug_enter ();
5248 if (except)
5249 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5250 lwpid_of (get_lwp_thread (except)));
5251 else
5252 debug_printf ("unstopping all lwps\n");
5253 }
5254
5255 if (unsuspend)
5256 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5257 else
5258 find_inferior (&all_threads, proceed_one_lwp, except);
5259
5260 if (debug_threads)
5261 {
5262 debug_printf ("unstop_all_lwps done\n");
5263 debug_exit ();
5264 }
5265}
5266
5267
5268#ifdef HAVE_LINUX_REGSETS
5269
5270#define use_linux_regsets 1
5271
5272/* Returns true if REGSET has been disabled. */
5273
5274static int
5275regset_disabled (struct regsets_info *info, struct regset_info *regset)
5276{
5277 return (info->disabled_regsets != NULL
5278 && info->disabled_regsets[regset - info->regsets]);
5279}
5280
5281/* Disable REGSET. */
5282
5283static void
5284disable_regset (struct regsets_info *info, struct regset_info *regset)
5285{
5286 int dr_offset;
5287
5288 dr_offset = regset - info->regsets;
5289 if (info->disabled_regsets == NULL)
5290 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5291 info->disabled_regsets[dr_offset] = 1;
5292}
5293
5294static int
5295regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5296 struct regcache *regcache)
5297{
5298 struct regset_info *regset;
5299 int saw_general_regs = 0;
5300 int pid;
5301 struct iovec iov;
5302
5303 pid = lwpid_of (current_thread);
5304 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5305 {
5306 void *buf, *data;
5307 int nt_type, res;
5308
5309 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5310 continue;
5311
5312 buf = xmalloc (regset->size);
5313
5314 nt_type = regset->nt_type;
5315 if (nt_type)
5316 {
5317 iov.iov_base = buf;
5318 iov.iov_len = regset->size;
5319 data = (void *) &iov;
5320 }
5321 else
5322 data = buf;
5323
5324#ifndef __sparc__
5325 res = ptrace (regset->get_request, pid,
5326 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5327#else
5328 res = ptrace (regset->get_request, pid, data, nt_type);
5329#endif
5330 if (res < 0)
5331 {
5332 if (errno == EIO)
5333 {
5334 /* If we get EIO on a regset, do not try it again for
5335 this process mode. */
5336 disable_regset (regsets_info, regset);
5337 }
5338 else if (errno == ENODATA)
5339 {
5340 /* ENODATA may be returned if the regset is currently
5341 not "active". This can happen in normal operation,
5342 so suppress the warning in this case. */
5343 }
5344 else
5345 {
5346 char s[256];
5347 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5348 pid);
5349 perror (s);
5350 }
5351 }
5352 else
5353 {
5354 if (regset->type == GENERAL_REGS)
5355 saw_general_regs = 1;
5356 regset->store_function (regcache, buf);
5357 }
5358 free (buf);
5359 }
5360 if (saw_general_regs)
5361 return 0;
5362 else
5363 return 1;
5364}
5365
5366static int
5367regsets_store_inferior_registers (struct regsets_info *regsets_info,
5368 struct regcache *regcache)
5369{
5370 struct regset_info *regset;
5371 int saw_general_regs = 0;
5372 int pid;
5373 struct iovec iov;
5374
5375 pid = lwpid_of (current_thread);
5376 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5377 {
5378 void *buf, *data;
5379 int nt_type, res;
5380
5381 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5382 || regset->fill_function == NULL)
5383 continue;
5384
5385 buf = xmalloc (regset->size);
5386
5387 /* First fill the buffer with the current register set contents,
5388 in case there are any items in the kernel's regset that are
5389 not in gdbserver's regcache. */
5390
5391 nt_type = regset->nt_type;
5392 if (nt_type)
5393 {
5394 iov.iov_base = buf;
5395 iov.iov_len = regset->size;
5396 data = (void *) &iov;
5397 }
5398 else
5399 data = buf;
5400
5401#ifndef __sparc__
5402 res = ptrace (regset->get_request, pid,
5403 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5404#else
5405 res = ptrace (regset->get_request, pid, data, nt_type);
5406#endif
5407
5408 if (res == 0)
5409 {
5410 /* Then overlay our cached registers on that. */
5411 regset->fill_function (regcache, buf);
5412
5413 /* Only now do we write the register set. */
5414#ifndef __sparc__
5415 res = ptrace (regset->set_request, pid,
5416 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5417#else
5418 res = ptrace (regset->set_request, pid, data, nt_type);
5419#endif
5420 }
5421
5422 if (res < 0)
5423 {
5424 if (errno == EIO)
5425 {
5426 /* If we get EIO on a regset, do not try it again for
5427 this process mode. */
5428 disable_regset (regsets_info, regset);
5429 }
5430 else if (errno == ESRCH)
5431 {
5432 /* At this point, ESRCH should mean the process is
5433 already gone, in which case we simply ignore attempts
5434 to change its registers. See also the related
5435 comment in linux_resume_one_lwp. */
5436 free (buf);
5437 return 0;
5438 }
5439 else
5440 {
5441 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5442 }
5443 }
5444 else if (regset->type == GENERAL_REGS)
5445 saw_general_regs = 1;
5446 free (buf);
5447 }
5448 if (saw_general_regs)
5449 return 0;
5450 else
5451 return 1;
5452}
5453
5454#else /* !HAVE_LINUX_REGSETS */
5455
5456#define use_linux_regsets 0
5457#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5458#define regsets_store_inferior_registers(regsets_info, regcache) 1
5459
5460#endif
5461
5462/* Return 1 if register REGNO is supported by one of the regset ptrace
5463 calls or 0 if it has to be transferred individually. */
5464
5465static int
5466linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5467{
5468 unsigned char mask = 1 << (regno % 8);
5469 size_t index = regno / 8;
5470
5471 return (use_linux_regsets
5472 && (regs_info->regset_bitmap == NULL
5473 || (regs_info->regset_bitmap[index] & mask) != 0));
5474}
5475
5476#ifdef HAVE_LINUX_USRREGS
5477
5478static int
5479register_addr (const struct usrregs_info *usrregs, int regnum)
5480{
5481 int addr;
5482
5483 if (regnum < 0 || regnum >= usrregs->num_regs)
5484 error ("Invalid register number %d.", regnum);
5485
5486 addr = usrregs->regmap[regnum];
5487
5488 return addr;
5489}
5490
5491/* Fetch one register. */
5492static void
5493fetch_register (const struct usrregs_info *usrregs,
5494 struct regcache *regcache, int regno)
5495{
5496 CORE_ADDR regaddr;
5497 int i, size;
5498 char *buf;
5499 int pid;
5500
5501 if (regno >= usrregs->num_regs)
5502 return;
5503 if ((*the_low_target.cannot_fetch_register) (regno))
5504 return;
5505
5506 regaddr = register_addr (usrregs, regno);
5507 if (regaddr == -1)
5508 return;
5509
5510 size = ((register_size (regcache->tdesc, regno)
5511 + sizeof (PTRACE_XFER_TYPE) - 1)
5512 & -sizeof (PTRACE_XFER_TYPE));
5513 buf = (char *) alloca (size);
5514
5515 pid = lwpid_of (current_thread);
5516 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5517 {
5518 errno = 0;
5519 *(PTRACE_XFER_TYPE *) (buf + i) =
5520 ptrace (PTRACE_PEEKUSER, pid,
5521 /* Coerce to a uintptr_t first to avoid potential gcc warning
5522 of coercing an 8 byte integer to a 4 byte pointer. */
5523 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5524 regaddr += sizeof (PTRACE_XFER_TYPE);
5525 if (errno != 0)
5526 error ("reading register %d: %s", regno, strerror (errno));
5527 }
5528
5529 if (the_low_target.supply_ptrace_register)
5530 the_low_target.supply_ptrace_register (regcache, regno, buf);
5531 else
5532 supply_register (regcache, regno, buf);
5533}
5534
5535/* Store one register. */
5536static void
5537store_register (const struct usrregs_info *usrregs,
5538 struct regcache *regcache, int regno)
5539{
5540 CORE_ADDR regaddr;
5541 int i, size;
5542 char *buf;
5543 int pid;
5544
5545 if (regno >= usrregs->num_regs)
5546 return;
5547 if ((*the_low_target.cannot_store_register) (regno))
5548 return;
5549
5550 regaddr = register_addr (usrregs, regno);
5551 if (regaddr == -1)
5552 return;
5553
5554 size = ((register_size (regcache->tdesc, regno)
5555 + sizeof (PTRACE_XFER_TYPE) - 1)
5556 & -sizeof (PTRACE_XFER_TYPE));
5557 buf = (char *) alloca (size);
5558 memset (buf, 0, size);
5559
5560 if (the_low_target.collect_ptrace_register)
5561 the_low_target.collect_ptrace_register (regcache, regno, buf);
5562 else
5563 collect_register (regcache, regno, buf);
5564
5565 pid = lwpid_of (current_thread);
5566 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5567 {
5568 errno = 0;
5569 ptrace (PTRACE_POKEUSER, pid,
5570 /* Coerce to a uintptr_t first to avoid potential gcc warning
5571 about coercing an 8 byte integer to a 4 byte pointer. */
5572 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5573 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5574 if (errno != 0)
5575 {
5576 /* At this point, ESRCH should mean the process is
5577 already gone, in which case we simply ignore attempts
5578 to change its registers. See also the related
5579 comment in linux_resume_one_lwp. */
5580 if (errno == ESRCH)
5581 return;
5582
5583 if ((*the_low_target.cannot_store_register) (regno) == 0)
5584 error ("writing register %d: %s", regno, strerror (errno));
5585 }
5586 regaddr += sizeof (PTRACE_XFER_TYPE);
5587 }
5588}
5589
5590/* Fetch all registers, or just one, from the child process.
5591 If REGNO is -1, do this for all registers, skipping any that are
5592 assumed to have been retrieved by regsets_fetch_inferior_registers,
5593 unless ALL is non-zero.
5594 Otherwise, REGNO specifies which register (so we can save time). */
5595static void
5596usr_fetch_inferior_registers (const struct regs_info *regs_info,
5597 struct regcache *regcache, int regno, int all)
5598{
5599 struct usrregs_info *usr = regs_info->usrregs;
5600
5601 if (regno == -1)
5602 {
5603 for (regno = 0; regno < usr->num_regs; regno++)
5604 if (all || !linux_register_in_regsets (regs_info, regno))
5605 fetch_register (usr, regcache, regno);
5606 }
5607 else
5608 fetch_register (usr, regcache, regno);
5609}
5610
5611/* Store our register values back into the inferior.
5612 If REGNO is -1, do this for all registers, skipping any that are
5613 assumed to have been saved by regsets_store_inferior_registers,
5614 unless ALL is non-zero.
5615 Otherwise, REGNO specifies which register (so we can save time). */
5616static void
5617usr_store_inferior_registers (const struct regs_info *regs_info,
5618 struct regcache *regcache, int regno, int all)
5619{
5620 struct usrregs_info *usr = regs_info->usrregs;
5621
5622 if (regno == -1)
5623 {
5624 for (regno = 0; regno < usr->num_regs; regno++)
5625 if (all || !linux_register_in_regsets (regs_info, regno))
5626 store_register (usr, regcache, regno);
5627 }
5628 else
5629 store_register (usr, regcache, regno);
5630}
5631
5632#else /* !HAVE_LINUX_USRREGS */
5633
5634#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5635#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5636
5637#endif
5638
5639
5640static void
5641linux_fetch_registers (struct regcache *regcache, int regno)
5642{
5643 int use_regsets;
5644 int all = 0;
5645 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5646
5647 if (regno == -1)
5648 {
5649 if (the_low_target.fetch_register != NULL
5650 && regs_info->usrregs != NULL)
5651 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5652 (*the_low_target.fetch_register) (regcache, regno);
5653
5654 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5655 if (regs_info->usrregs != NULL)
5656 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5657 }
5658 else
5659 {
5660 if (the_low_target.fetch_register != NULL
5661 && (*the_low_target.fetch_register) (regcache, regno))
5662 return;
5663
5664 use_regsets = linux_register_in_regsets (regs_info, regno);
5665 if (use_regsets)
5666 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5667 regcache);
5668 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5669 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5670 }
5671}
5672
5673static void
5674linux_store_registers (struct regcache *regcache, int regno)
5675{
5676 int use_regsets;
5677 int all = 0;
5678 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5679
5680 if (regno == -1)
5681 {
5682 all = regsets_store_inferior_registers (regs_info->regsets_info,
5683 regcache);
5684 if (regs_info->usrregs != NULL)
5685 usr_store_inferior_registers (regs_info, regcache, regno, all);
5686 }
5687 else
5688 {
5689 use_regsets = linux_register_in_regsets (regs_info, regno);
5690 if (use_regsets)
5691 all = regsets_store_inferior_registers (regs_info->regsets_info,
5692 regcache);
5693 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5694 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5695 }
5696}
5697
5698
5699/* Copy LEN bytes from inferior's memory starting at MEMADDR
5700 to debugger memory starting at MYADDR. */
5701
5702static int
5703linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5704{
5705 int pid = lwpid_of (current_thread);
5706 register PTRACE_XFER_TYPE *buffer;
5707 register CORE_ADDR addr;
5708 register int count;
5709 char filename[64];
5710 register int i;
5711 int ret;
5712 int fd;
5713
5714 /* Try using /proc. Don't bother for one word. */
5715 if (len >= 3 * sizeof (long))
5716 {
5717 int bytes;
5718
5719 /* We could keep this file open and cache it - possibly one per
5720 thread. That requires some juggling, but is even faster. */
5721 sprintf (filename, "/proc/%d/mem", pid);
5722 fd = open (filename, O_RDONLY | O_LARGEFILE);
5723 if (fd == -1)
5724 goto no_proc;
5725
5726 /* If pread64 is available, use it. It's faster if the kernel
5727 supports it (only one syscall), and it's 64-bit safe even on
5728 32-bit platforms (for instance, SPARC debugging a SPARC64
5729 application). */
5730#ifdef HAVE_PREAD64
5731 bytes = pread64 (fd, myaddr, len, memaddr);
5732#else
5733 bytes = -1;
5734 if (lseek (fd, memaddr, SEEK_SET) != -1)
5735 bytes = read (fd, myaddr, len);
5736#endif
5737
5738 close (fd);
5739 if (bytes == len)
5740 return 0;
5741
5742 /* Some data was read, we'll try to get the rest with ptrace. */
5743 if (bytes > 0)
5744 {
5745 memaddr += bytes;
5746 myaddr += bytes;
5747 len -= bytes;
5748 }
5749 }
5750
5751 no_proc:
5752 /* Round starting address down to longword boundary. */
5753 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5754 /* Round ending address up; get number of longwords that makes. */
5755 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5756 / sizeof (PTRACE_XFER_TYPE));
5757 /* Allocate buffer of that many longwords. */
5758 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5759
5760 /* Read all the longwords */
5761 errno = 0;
5762 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5763 {
5764 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5765 about coercing an 8 byte integer to a 4 byte pointer. */
5766 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5767 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5768 (PTRACE_TYPE_ARG4) 0);
5769 if (errno)
5770 break;
5771 }
5772 ret = errno;
5773
5774 /* Copy appropriate bytes out of the buffer. */
5775 if (i > 0)
5776 {
5777 i *= sizeof (PTRACE_XFER_TYPE);
5778 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5779 memcpy (myaddr,
5780 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5781 i < len ? i : len);
5782 }
5783
5784 return ret;
5785}
5786
5787/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5788 memory at MEMADDR. On failure (cannot write to the inferior)
5789 returns the value of errno. Always succeeds if LEN is zero. */
5790
5791static int
5792linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5793{
5794 register int i;
5795 /* Round starting address down to longword boundary. */
5796 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5797 /* Round ending address up; get number of longwords that makes. */
5798 register int count
5799 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5800 / sizeof (PTRACE_XFER_TYPE);
5801
5802 /* Allocate buffer of that many longwords. */
5803 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5804
5805 int pid = lwpid_of (current_thread);
5806
5807 if (len == 0)
5808 {
5809 /* Zero length write always succeeds. */
5810 return 0;
5811 }
5812
5813 if (debug_threads)
5814 {
5815 /* Dump up to four bytes. */
5816 char str[4 * 2 + 1];
5817 char *p = str;
5818 int dump = len < 4 ? len : 4;
5819
5820 for (i = 0; i < dump; i++)
5821 {
5822 sprintf (p, "%02x", myaddr[i]);
5823 p += 2;
5824 }
5825 *p = '\0';
5826
5827 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5828 str, (long) memaddr, pid);
5829 }
5830
5831 /* Fill start and end extra bytes of buffer with existing memory data. */
5832
5833 errno = 0;
5834 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5835 about coercing an 8 byte integer to a 4 byte pointer. */
5836 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5837 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5838 (PTRACE_TYPE_ARG4) 0);
5839 if (errno)
5840 return errno;
5841
5842 if (count > 1)
5843 {
5844 errno = 0;
5845 buffer[count - 1]
5846 = ptrace (PTRACE_PEEKTEXT, pid,
5847 /* Coerce to a uintptr_t first to avoid potential gcc warning
5848 about coercing an 8 byte integer to a 4 byte pointer. */
5849 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5850 * sizeof (PTRACE_XFER_TYPE)),
5851 (PTRACE_TYPE_ARG4) 0);
5852 if (errno)
5853 return errno;
5854 }
5855
5856 /* Copy data to be written over corresponding part of buffer. */
5857
5858 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5859 myaddr, len);
5860
5861 /* Write the entire buffer. */
5862
5863 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5864 {
5865 errno = 0;
5866 ptrace (PTRACE_POKETEXT, pid,
5867 /* Coerce to a uintptr_t first to avoid potential gcc warning
5868 about coercing an 8 byte integer to a 4 byte pointer. */
5869 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5870 (PTRACE_TYPE_ARG4) buffer[i]);
5871 if (errno)
5872 return errno;
5873 }
5874
5875 return 0;
5876}
5877
5878static void
5879linux_look_up_symbols (void)
5880{
5881#ifdef USE_THREAD_DB
5882 struct process_info *proc = current_process ();
5883
5884 if (proc->priv->thread_db != NULL)
5885 return;
5886
5887 thread_db_init ();
5888#endif
5889}
5890
5891static void
5892linux_request_interrupt (void)
5893{
5894 extern unsigned long signal_pid;
5895
5896 /* Send a SIGINT to the process group. This acts just like the user
5897 typed a ^C on the controlling terminal. */
5898 kill (-signal_pid, SIGINT);
5899}
5900
5901/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5902 to debugger memory starting at MYADDR. */
5903
5904static int
5905linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5906{
5907 char filename[PATH_MAX];
5908 int fd, n;
5909 int pid = lwpid_of (current_thread);
5910
5911 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5912
5913 fd = open (filename, O_RDONLY);
5914 if (fd < 0)
5915 return -1;
5916
5917 if (offset != (CORE_ADDR) 0
5918 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5919 n = -1;
5920 else
5921 n = read (fd, myaddr, len);
5922
5923 close (fd);
5924
5925 return n;
5926}
5927
5928/* These breakpoint and watchpoint related wrapper functions simply
5929 pass on the function call if the target has registered a
5930 corresponding function. */
5931
5932static int
5933linux_supports_z_point_type (char z_type)
5934{
5935 return (the_low_target.supports_z_point_type != NULL
5936 && the_low_target.supports_z_point_type (z_type));
5937}
5938
5939static int
5940linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5941 int size, struct raw_breakpoint *bp)
5942{
5943 if (type == raw_bkpt_type_sw)
5944 return insert_memory_breakpoint (bp);
5945 else if (the_low_target.insert_point != NULL)
5946 return the_low_target.insert_point (type, addr, size, bp);
5947 else
5948 /* Unsupported (see target.h). */
5949 return 1;
5950}
5951
5952static int
5953linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5954 int size, struct raw_breakpoint *bp)
5955{
5956 if (type == raw_bkpt_type_sw)
5957 return remove_memory_breakpoint (bp);
5958 else if (the_low_target.remove_point != NULL)
5959 return the_low_target.remove_point (type, addr, size, bp);
5960 else
5961 /* Unsupported (see target.h). */
5962 return 1;
5963}
5964
5965/* Implement the to_stopped_by_sw_breakpoint target_ops
5966 method. */
5967
5968static int
5969linux_stopped_by_sw_breakpoint (void)
5970{
5971 struct lwp_info *lwp = get_thread_lwp (current_thread);
5972
5973 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5974}
5975
5976/* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5977 method. */
5978
5979static int
5980linux_supports_stopped_by_sw_breakpoint (void)
5981{
5982 return USE_SIGTRAP_SIGINFO;
5983}
5984
5985/* Implement the to_stopped_by_hw_breakpoint target_ops
5986 method. */
5987
5988static int
5989linux_stopped_by_hw_breakpoint (void)
5990{
5991 struct lwp_info *lwp = get_thread_lwp (current_thread);
5992
5993 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5994}
5995
5996/* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5997 method. */
5998
5999static int
6000linux_supports_stopped_by_hw_breakpoint (void)
6001{
6002 return USE_SIGTRAP_SIGINFO;
6003}
6004
6005/* Implement the supports_hardware_single_step target_ops method. */
6006
6007static int
6008linux_supports_hardware_single_step (void)
6009{
6010 return can_hardware_single_step ();
6011}
6012
6013static int
6014linux_supports_software_single_step (void)
6015{
6016 return can_software_single_step ();
6017}
6018
6019static int
6020linux_stopped_by_watchpoint (void)
6021{
6022 struct lwp_info *lwp = get_thread_lwp (current_thread);
6023
6024 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6025}
6026
6027static CORE_ADDR
6028linux_stopped_data_address (void)
6029{
6030 struct lwp_info *lwp = get_thread_lwp (current_thread);
6031
6032 return lwp->stopped_data_address;
6033}
6034
6035#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6036 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6037 && defined(PT_TEXT_END_ADDR)
6038
6039/* This is only used for targets that define PT_TEXT_ADDR,
6040 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6041 the target has different ways of acquiring this information, like
6042 loadmaps. */
6043
6044/* Under uClinux, programs are loaded at non-zero offsets, which we need
6045 to tell gdb about. */
6046
6047static int
6048linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6049{
6050 unsigned long text, text_end, data;
6051 int pid = lwpid_of (current_thread);
6052
6053 errno = 0;
6054
6055 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6056 (PTRACE_TYPE_ARG4) 0);
6057 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6058 (PTRACE_TYPE_ARG4) 0);
6059 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6060 (PTRACE_TYPE_ARG4) 0);
6061
6062 if (errno == 0)
6063 {
6064 /* Both text and data offsets produced at compile-time (and so
6065 used by gdb) are relative to the beginning of the program,
6066 with the data segment immediately following the text segment.
6067 However, the actual runtime layout in memory may put the data
6068 somewhere else, so when we send gdb a data base-address, we
6069 use the real data base address and subtract the compile-time
6070 data base-address from it (which is just the length of the
6071 text segment). BSS immediately follows data in both
6072 cases. */
6073 *text_p = text;
6074 *data_p = data - (text_end - text);
6075
6076 return 1;
6077 }
6078 return 0;
6079}
6080#endif
6081
6082static int
6083linux_qxfer_osdata (const char *annex,
6084 unsigned char *readbuf, unsigned const char *writebuf,
6085 CORE_ADDR offset, int len)
6086{
6087 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6088}
6089
6090/* Convert a native/host siginfo object, into/from the siginfo in the
6091 layout of the inferiors' architecture. */
6092
6093static void
6094siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6095{
6096 int done = 0;
6097
6098 if (the_low_target.siginfo_fixup != NULL)
6099 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6100
6101 /* If there was no callback, or the callback didn't do anything,
6102 then just do a straight memcpy. */
6103 if (!done)
6104 {
6105 if (direction == 1)
6106 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6107 else
6108 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6109 }
6110}
6111
6112static int
6113linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6114 unsigned const char *writebuf, CORE_ADDR offset, int len)
6115{
6116 int pid;
6117 siginfo_t siginfo;
6118 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6119
6120 if (current_thread == NULL)
6121 return -1;
6122
6123 pid = lwpid_of (current_thread);
6124
6125 if (debug_threads)
6126 debug_printf ("%s siginfo for lwp %d.\n",
6127 readbuf != NULL ? "Reading" : "Writing",
6128 pid);
6129
6130 if (offset >= sizeof (siginfo))
6131 return -1;
6132
6133 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6134 return -1;
6135
6136 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6137 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6138 inferior with a 64-bit GDBSERVER should look the same as debugging it
6139 with a 32-bit GDBSERVER, we need to convert it. */
6140 siginfo_fixup (&siginfo, inf_siginfo, 0);
6141
6142 if (offset + len > sizeof (siginfo))
6143 len = sizeof (siginfo) - offset;
6144
6145 if (readbuf != NULL)
6146 memcpy (readbuf, inf_siginfo + offset, len);
6147 else
6148 {
6149 memcpy (inf_siginfo + offset, writebuf, len);
6150
6151 /* Convert back to ptrace layout before flushing it out. */
6152 siginfo_fixup (&siginfo, inf_siginfo, 1);
6153
6154 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6155 return -1;
6156 }
6157
6158 return len;
6159}
6160
6161/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6162 so we notice when children change state; as the handler for the
6163 sigsuspend in my_waitpid. */
6164
6165static void
6166sigchld_handler (int signo)
6167{
6168 int old_errno = errno;
6169
6170 if (debug_threads)
6171 {
6172 do
6173 {
6174 /* fprintf is not async-signal-safe, so call write
6175 directly. */
6176 if (write (2, "sigchld_handler\n",
6177 sizeof ("sigchld_handler\n") - 1) < 0)
6178 break; /* just ignore */
6179 } while (0);
6180 }
6181
6182 if (target_is_async_p ())
6183 async_file_mark (); /* trigger a linux_wait */
6184
6185 errno = old_errno;
6186}
6187
6188static int
6189linux_supports_non_stop (void)
6190{
6191 return 1;
6192}
6193
6194static int
6195linux_async (int enable)
6196{
6197 int previous = target_is_async_p ();
6198
6199 if (debug_threads)
6200 debug_printf ("linux_async (%d), previous=%d\n",
6201 enable, previous);
6202
6203 if (previous != enable)
6204 {
6205 sigset_t mask;
6206 sigemptyset (&mask);
6207 sigaddset (&mask, SIGCHLD);
6208
6209 sigprocmask (SIG_BLOCK, &mask, NULL);
6210
6211 if (enable)
6212 {
6213 if (pipe (linux_event_pipe) == -1)
6214 {
6215 linux_event_pipe[0] = -1;
6216 linux_event_pipe[1] = -1;
6217 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6218
6219 warning ("creating event pipe failed.");
6220 return previous;
6221 }
6222
6223 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6224 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6225
6226 /* Register the event loop handler. */
6227 add_file_handler (linux_event_pipe[0],
6228 handle_target_event, NULL);
6229
6230 /* Always trigger a linux_wait. */
6231 async_file_mark ();
6232 }
6233 else
6234 {
6235 delete_file_handler (linux_event_pipe[0]);
6236
6237 close (linux_event_pipe[0]);
6238 close (linux_event_pipe[1]);
6239 linux_event_pipe[0] = -1;
6240 linux_event_pipe[1] = -1;
6241 }
6242
6243 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6244 }
6245
6246 return previous;
6247}
6248
6249static int
6250linux_start_non_stop (int nonstop)
6251{
6252 /* Register or unregister from event-loop accordingly. */
6253 linux_async (nonstop);
6254
6255 if (target_is_async_p () != (nonstop != 0))
6256 return -1;
6257
6258 return 0;
6259}
6260
6261static int
6262linux_supports_multi_process (void)
6263{
6264 return 1;
6265}
6266
6267/* Check if fork events are supported. */
6268
6269static int
6270linux_supports_fork_events (void)
6271{
6272 return linux_supports_tracefork ();
6273}
6274
6275/* Check if vfork events are supported. */
6276
6277static int
6278linux_supports_vfork_events (void)
6279{
6280 return linux_supports_tracefork ();
6281}
6282
6283/* Check if exec events are supported. */
6284
6285static int
6286linux_supports_exec_events (void)
6287{
6288 return linux_supports_traceexec ();
6289}
6290
6291/* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6292 options for the specified lwp. */
6293
6294static int
6295reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6296 void *args)
6297{
6298 struct thread_info *thread = (struct thread_info *) entry;
6299 struct lwp_info *lwp = get_thread_lwp (thread);
6300
6301 if (!lwp->stopped)
6302 {
6303 /* Stop the lwp so we can modify its ptrace options. */
6304 lwp->must_set_ptrace_flags = 1;
6305 linux_stop_lwp (lwp);
6306 }
6307 else
6308 {
6309 /* Already stopped; go ahead and set the ptrace options. */
6310 struct process_info *proc = find_process_pid (pid_of (thread));
6311 int options = linux_low_ptrace_options (proc->attached);
6312
6313 linux_enable_event_reporting (lwpid_of (thread), options);
6314 lwp->must_set_ptrace_flags = 0;
6315 }
6316
6317 return 0;
6318}
6319
6320/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6321 ptrace flags for all inferiors. This is in case the new GDB connection
6322 doesn't support the same set of events that the previous one did. */
6323
6324static void
6325linux_handle_new_gdb_connection (void)
6326{
6327 pid_t pid;
6328
6329 /* Request that all the lwps reset their ptrace options. */
6330 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6331}
6332
6333static int
6334linux_supports_disable_randomization (void)
6335{
6336#ifdef HAVE_PERSONALITY
6337 return 1;
6338#else
6339 return 0;
6340#endif
6341}
6342
6343static int
6344linux_supports_agent (void)
6345{
6346 return 1;
6347}
6348
6349static int
6350linux_supports_range_stepping (void)
6351{
6352 if (*the_low_target.supports_range_stepping == NULL)
6353 return 0;
6354
6355 return (*the_low_target.supports_range_stepping) ();
6356}
6357
6358/* Enumerate spufs IDs for process PID. */
6359static int
6360spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6361{
6362 int pos = 0;
6363 int written = 0;
6364 char path[128];
6365 DIR *dir;
6366 struct dirent *entry;
6367
6368 sprintf (path, "/proc/%ld/fd", pid);
6369 dir = opendir (path);
6370 if (!dir)
6371 return -1;
6372
6373 rewinddir (dir);
6374 while ((entry = readdir (dir)) != NULL)
6375 {
6376 struct stat st;
6377 struct statfs stfs;
6378 int fd;
6379
6380 fd = atoi (entry->d_name);
6381 if (!fd)
6382 continue;
6383
6384 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6385 if (stat (path, &st) != 0)
6386 continue;
6387 if (!S_ISDIR (st.st_mode))
6388 continue;
6389
6390 if (statfs (path, &stfs) != 0)
6391 continue;
6392 if (stfs.f_type != SPUFS_MAGIC)
6393 continue;
6394
6395 if (pos >= offset && pos + 4 <= offset + len)
6396 {
6397 *(unsigned int *)(buf + pos - offset) = fd;
6398 written += 4;
6399 }
6400 pos += 4;
6401 }
6402
6403 closedir (dir);
6404 return written;
6405}
6406
6407/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6408 object type, using the /proc file system. */
6409static int
6410linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6411 unsigned const char *writebuf,
6412 CORE_ADDR offset, int len)
6413{
6414 long pid = lwpid_of (current_thread);
6415 char buf[128];
6416 int fd = 0;
6417 int ret = 0;
6418
6419 if (!writebuf && !readbuf)
6420 return -1;
6421
6422 if (!*annex)
6423 {
6424 if (!readbuf)
6425 return -1;
6426 else
6427 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6428 }
6429
6430 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6431 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6432 if (fd <= 0)
6433 return -1;
6434
6435 if (offset != 0
6436 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6437 {
6438 close (fd);
6439 return 0;
6440 }
6441
6442 if (writebuf)
6443 ret = write (fd, writebuf, (size_t) len);
6444 else
6445 ret = read (fd, readbuf, (size_t) len);
6446
6447 close (fd);
6448 return ret;
6449}
6450
6451#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6452struct target_loadseg
6453{
6454 /* Core address to which the segment is mapped. */
6455 Elf32_Addr addr;
6456 /* VMA recorded in the program header. */
6457 Elf32_Addr p_vaddr;
6458 /* Size of this segment in memory. */
6459 Elf32_Word p_memsz;
6460};
6461
6462# if defined PT_GETDSBT
6463struct target_loadmap
6464{
6465 /* Protocol version number, must be zero. */
6466 Elf32_Word version;
6467 /* Pointer to the DSBT table, its size, and the DSBT index. */
6468 unsigned *dsbt_table;
6469 unsigned dsbt_size, dsbt_index;
6470 /* Number of segments in this map. */
6471 Elf32_Word nsegs;
6472 /* The actual memory map. */
6473 struct target_loadseg segs[/*nsegs*/];
6474};
6475# define LINUX_LOADMAP PT_GETDSBT
6476# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6477# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6478# else
6479struct target_loadmap
6480{
6481 /* Protocol version number, must be zero. */
6482 Elf32_Half version;
6483 /* Number of segments in this map. */
6484 Elf32_Half nsegs;
6485 /* The actual memory map. */
6486 struct target_loadseg segs[/*nsegs*/];
6487};
6488# define LINUX_LOADMAP PTRACE_GETFDPIC
6489# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6490# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6491# endif
6492
6493static int
6494linux_read_loadmap (const char *annex, CORE_ADDR offset,
6495 unsigned char *myaddr, unsigned int len)
6496{
6497 int pid = lwpid_of (current_thread);
6498 int addr = -1;
6499 struct target_loadmap *data = NULL;
6500 unsigned int actual_length, copy_length;
6501
6502 if (strcmp (annex, "exec") == 0)
6503 addr = (int) LINUX_LOADMAP_EXEC;
6504 else if (strcmp (annex, "interp") == 0)
6505 addr = (int) LINUX_LOADMAP_INTERP;
6506 else
6507 return -1;
6508
6509 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6510 return -1;
6511
6512 if (data == NULL)
6513 return -1;
6514
6515 actual_length = sizeof (struct target_loadmap)
6516 + sizeof (struct target_loadseg) * data->nsegs;
6517
6518 if (offset < 0 || offset > actual_length)
6519 return -1;
6520
6521 copy_length = actual_length - offset < len ? actual_length - offset : len;
6522 memcpy (myaddr, (char *) data + offset, copy_length);
6523 return copy_length;
6524}
6525#else
6526# define linux_read_loadmap NULL
6527#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6528
6529static void
6530linux_process_qsupported (char **features, int count)
6531{
6532 if (the_low_target.process_qsupported != NULL)
6533 the_low_target.process_qsupported (features, count);
6534}
6535
6536static int
6537linux_supports_catch_syscall (void)
6538{
6539 return (the_low_target.get_syscall_trapinfo != NULL
6540 && linux_supports_tracesysgood ());
6541}
6542
6543static int
6544linux_get_ipa_tdesc_idx (void)
6545{
6546 if (the_low_target.get_ipa_tdesc_idx == NULL)
6547 return 0;
6548
6549 return (*the_low_target.get_ipa_tdesc_idx) ();
6550}
6551
6552static int
6553linux_supports_tracepoints (void)
6554{
6555 if (*the_low_target.supports_tracepoints == NULL)
6556 return 0;
6557
6558 return (*the_low_target.supports_tracepoints) ();
6559}
6560
6561static CORE_ADDR
6562linux_read_pc (struct regcache *regcache)
6563{
6564 if (the_low_target.get_pc == NULL)
6565 return 0;
6566
6567 return (*the_low_target.get_pc) (regcache);
6568}
6569
6570static void
6571linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6572{
6573 gdb_assert (the_low_target.set_pc != NULL);
6574
6575 (*the_low_target.set_pc) (regcache, pc);
6576}
6577
6578static int
6579linux_thread_stopped (struct thread_info *thread)
6580{
6581 return get_thread_lwp (thread)->stopped;
6582}
6583
6584/* This exposes stop-all-threads functionality to other modules. */
6585
6586static void
6587linux_pause_all (int freeze)
6588{
6589 stop_all_lwps (freeze, NULL);
6590}
6591
6592/* This exposes unstop-all-threads functionality to other gdbserver
6593 modules. */
6594
6595static void
6596linux_unpause_all (int unfreeze)
6597{
6598 unstop_all_lwps (unfreeze, NULL);
6599}
6600
6601static int
6602linux_prepare_to_access_memory (void)
6603{
6604 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6605 running LWP. */
6606 if (non_stop)
6607 linux_pause_all (1);
6608 return 0;
6609}
6610
6611static void
6612linux_done_accessing_memory (void)
6613{
6614 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6615 running LWP. */
6616 if (non_stop)
6617 linux_unpause_all (1);
6618}
6619
6620static int
6621linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6622 CORE_ADDR collector,
6623 CORE_ADDR lockaddr,
6624 ULONGEST orig_size,
6625 CORE_ADDR *jump_entry,
6626 CORE_ADDR *trampoline,
6627 ULONGEST *trampoline_size,
6628 unsigned char *jjump_pad_insn,
6629 ULONGEST *jjump_pad_insn_size,
6630 CORE_ADDR *adjusted_insn_addr,
6631 CORE_ADDR *adjusted_insn_addr_end,
6632 char *err)
6633{
6634 return (*the_low_target.install_fast_tracepoint_jump_pad)
6635 (tpoint, tpaddr, collector, lockaddr, orig_size,
6636 jump_entry, trampoline, trampoline_size,
6637 jjump_pad_insn, jjump_pad_insn_size,
6638 adjusted_insn_addr, adjusted_insn_addr_end,
6639 err);
6640}
6641
6642static struct emit_ops *
6643linux_emit_ops (void)
6644{
6645 if (the_low_target.emit_ops != NULL)
6646 return (*the_low_target.emit_ops) ();
6647 else
6648 return NULL;
6649}
6650
6651static int
6652linux_get_min_fast_tracepoint_insn_len (void)
6653{
6654 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6655}
6656
6657/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6658
6659static int
6660get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6661 CORE_ADDR *phdr_memaddr, int *num_phdr)
6662{
6663 char filename[PATH_MAX];
6664 int fd;
6665 const int auxv_size = is_elf64
6666 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6667 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6668
6669 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6670
6671 fd = open (filename, O_RDONLY);
6672 if (fd < 0)
6673 return 1;
6674
6675 *phdr_memaddr = 0;
6676 *num_phdr = 0;
6677 while (read (fd, buf, auxv_size) == auxv_size
6678 && (*phdr_memaddr == 0 || *num_phdr == 0))
6679 {
6680 if (is_elf64)
6681 {
6682 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6683
6684 switch (aux->a_type)
6685 {
6686 case AT_PHDR:
6687 *phdr_memaddr = aux->a_un.a_val;
6688 break;
6689 case AT_PHNUM:
6690 *num_phdr = aux->a_un.a_val;
6691 break;
6692 }
6693 }
6694 else
6695 {
6696 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6697
6698 switch (aux->a_type)
6699 {
6700 case AT_PHDR:
6701 *phdr_memaddr = aux->a_un.a_val;
6702 break;
6703 case AT_PHNUM:
6704 *num_phdr = aux->a_un.a_val;
6705 break;
6706 }
6707 }
6708 }
6709
6710 close (fd);
6711
6712 if (*phdr_memaddr == 0 || *num_phdr == 0)
6713 {
6714 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6715 "phdr_memaddr = %ld, phdr_num = %d",
6716 (long) *phdr_memaddr, *num_phdr);
6717 return 2;
6718 }
6719
6720 return 0;
6721}
6722
6723/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6724
6725static CORE_ADDR
6726get_dynamic (const int pid, const int is_elf64)
6727{
6728 CORE_ADDR phdr_memaddr, relocation;
6729 int num_phdr, i;
6730 unsigned char *phdr_buf;
6731 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6732
6733 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6734 return 0;
6735
6736 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6737 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6738
6739 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6740 return 0;
6741
6742 /* Compute relocation: it is expected to be 0 for "regular" executables,
6743 non-zero for PIE ones. */
6744 relocation = -1;
6745 for (i = 0; relocation == -1 && i < num_phdr; i++)
6746 if (is_elf64)
6747 {
6748 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6749
6750 if (p->p_type == PT_PHDR)
6751 relocation = phdr_memaddr - p->p_vaddr;
6752 }
6753 else
6754 {
6755 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6756
6757 if (p->p_type == PT_PHDR)
6758 relocation = phdr_memaddr - p->p_vaddr;
6759 }
6760
6761 if (relocation == -1)
6762 {
6763 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6764 any real world executables, including PIE executables, have always
6765 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6766 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6767 or present DT_DEBUG anyway (fpc binaries are statically linked).
6768
6769 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6770
6771 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6772
6773 return 0;
6774 }
6775
6776 for (i = 0; i < num_phdr; i++)
6777 {
6778 if (is_elf64)
6779 {
6780 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6781
6782 if (p->p_type == PT_DYNAMIC)
6783 return p->p_vaddr + relocation;
6784 }
6785 else
6786 {
6787 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6788
6789 if (p->p_type == PT_DYNAMIC)
6790 return p->p_vaddr + relocation;
6791 }
6792 }
6793
6794 return 0;
6795}
6796
6797/* Return &_r_debug in the inferior, or -1 if not present. Return value
6798 can be 0 if the inferior does not yet have the library list initialized.
6799 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6800 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6801
6802static CORE_ADDR
6803get_r_debug (const int pid, const int is_elf64)
6804{
6805 CORE_ADDR dynamic_memaddr;
6806 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6807 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6808 CORE_ADDR map = -1;
6809
6810 dynamic_memaddr = get_dynamic (pid, is_elf64);
6811 if (dynamic_memaddr == 0)
6812 return map;
6813
6814 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6815 {
6816 if (is_elf64)
6817 {
6818 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6819#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6820 union
6821 {
6822 Elf64_Xword map;
6823 unsigned char buf[sizeof (Elf64_Xword)];
6824 }
6825 rld_map;
6826#endif
6827#ifdef DT_MIPS_RLD_MAP
6828 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6829 {
6830 if (linux_read_memory (dyn->d_un.d_val,
6831 rld_map.buf, sizeof (rld_map.buf)) == 0)
6832 return rld_map.map;
6833 else
6834 break;
6835 }
6836#endif /* DT_MIPS_RLD_MAP */
6837#ifdef DT_MIPS_RLD_MAP_REL
6838 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6839 {
6840 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6841 rld_map.buf, sizeof (rld_map.buf)) == 0)
6842 return rld_map.map;
6843 else
6844 break;
6845 }
6846#endif /* DT_MIPS_RLD_MAP_REL */
6847
6848 if (dyn->d_tag == DT_DEBUG && map == -1)
6849 map = dyn->d_un.d_val;
6850
6851 if (dyn->d_tag == DT_NULL)
6852 break;
6853 }
6854 else
6855 {
6856 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6857#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6858 union
6859 {
6860 Elf32_Word map;
6861 unsigned char buf[sizeof (Elf32_Word)];
6862 }
6863 rld_map;
6864#endif
6865#ifdef DT_MIPS_RLD_MAP
6866 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6867 {
6868 if (linux_read_memory (dyn->d_un.d_val,
6869 rld_map.buf, sizeof (rld_map.buf)) == 0)
6870 return rld_map.map;
6871 else
6872 break;
6873 }
6874#endif /* DT_MIPS_RLD_MAP */
6875#ifdef DT_MIPS_RLD_MAP_REL
6876 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6877 {
6878 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6879 rld_map.buf, sizeof (rld_map.buf)) == 0)
6880 return rld_map.map;
6881 else
6882 break;
6883 }
6884#endif /* DT_MIPS_RLD_MAP_REL */
6885
6886 if (dyn->d_tag == DT_DEBUG && map == -1)
6887 map = dyn->d_un.d_val;
6888
6889 if (dyn->d_tag == DT_NULL)
6890 break;
6891 }
6892
6893 dynamic_memaddr += dyn_size;
6894 }
6895
6896 return map;
6897}
6898
6899/* Read one pointer from MEMADDR in the inferior. */
6900
6901static int
6902read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6903{
6904 int ret;
6905
6906 /* Go through a union so this works on either big or little endian
6907 hosts, when the inferior's pointer size is smaller than the size
6908 of CORE_ADDR. It is assumed the inferior's endianness is the
6909 same of the superior's. */
6910 union
6911 {
6912 CORE_ADDR core_addr;
6913 unsigned int ui;
6914 unsigned char uc;
6915 } addr;
6916
6917 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6918 if (ret == 0)
6919 {
6920 if (ptr_size == sizeof (CORE_ADDR))
6921 *ptr = addr.core_addr;
6922 else if (ptr_size == sizeof (unsigned int))
6923 *ptr = addr.ui;
6924 else
6925 gdb_assert_not_reached ("unhandled pointer size");
6926 }
6927 return ret;
6928}
6929
6930struct link_map_offsets
6931 {
6932 /* Offset and size of r_debug.r_version. */
6933 int r_version_offset;
6934
6935 /* Offset and size of r_debug.r_map. */
6936 int r_map_offset;
6937
6938 /* Offset to l_addr field in struct link_map. */
6939 int l_addr_offset;
6940
6941 /* Offset to l_name field in struct link_map. */
6942 int l_name_offset;
6943
6944 /* Offset to l_ld field in struct link_map. */
6945 int l_ld_offset;
6946
6947 /* Offset to l_next field in struct link_map. */
6948 int l_next_offset;
6949
6950 /* Offset to l_prev field in struct link_map. */
6951 int l_prev_offset;
6952 };
6953
6954/* Construct qXfer:libraries-svr4:read reply. */
6955
6956static int
6957linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6958 unsigned const char *writebuf,
6959 CORE_ADDR offset, int len)
6960{
6961 char *document;
6962 unsigned document_len;
6963 struct process_info_private *const priv = current_process ()->priv;
6964 char filename[PATH_MAX];
6965 int pid, is_elf64;
6966
6967 static const struct link_map_offsets lmo_32bit_offsets =
6968 {
6969 0, /* r_version offset. */
6970 4, /* r_debug.r_map offset. */
6971 0, /* l_addr offset in link_map. */
6972 4, /* l_name offset in link_map. */
6973 8, /* l_ld offset in link_map. */
6974 12, /* l_next offset in link_map. */
6975 16 /* l_prev offset in link_map. */
6976 };
6977
6978 static const struct link_map_offsets lmo_64bit_offsets =
6979 {
6980 0, /* r_version offset. */
6981 8, /* r_debug.r_map offset. */
6982 0, /* l_addr offset in link_map. */
6983 8, /* l_name offset in link_map. */
6984 16, /* l_ld offset in link_map. */
6985 24, /* l_next offset in link_map. */
6986 32 /* l_prev offset in link_map. */
6987 };
6988 const struct link_map_offsets *lmo;
6989 unsigned int machine;
6990 int ptr_size;
6991 CORE_ADDR lm_addr = 0, lm_prev = 0;
6992 int allocated = 1024;
6993 char *p;
6994 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6995 int header_done = 0;
6996
6997 if (writebuf != NULL)
6998 return -2;
6999 if (readbuf == NULL)
7000 return -1;
7001
7002 pid = lwpid_of (current_thread);
7003 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7004 is_elf64 = elf_64_file_p (filename, &machine);
7005 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7006 ptr_size = is_elf64 ? 8 : 4;
7007
7008 while (annex[0] != '\0')
7009 {
7010 const char *sep;
7011 CORE_ADDR *addrp;
7012 int len;
7013
7014 sep = strchr (annex, '=');
7015 if (sep == NULL)
7016 break;
7017
7018 len = sep - annex;
7019 if (len == 5 && startswith (annex, "start"))
7020 addrp = &lm_addr;
7021 else if (len == 4 && startswith (annex, "prev"))
7022 addrp = &lm_prev;
7023 else
7024 {
7025 annex = strchr (sep, ';');
7026 if (annex == NULL)
7027 break;
7028 annex++;
7029 continue;
7030 }
7031
7032 annex = decode_address_to_semicolon (addrp, sep + 1);
7033 }
7034
7035 if (lm_addr == 0)
7036 {
7037 int r_version = 0;
7038
7039 if (priv->r_debug == 0)
7040 priv->r_debug = get_r_debug (pid, is_elf64);
7041
7042 /* We failed to find DT_DEBUG. Such situation will not change
7043 for this inferior - do not retry it. Report it to GDB as
7044 E01, see for the reasons at the GDB solib-svr4.c side. */
7045 if (priv->r_debug == (CORE_ADDR) -1)
7046 return -1;
7047
7048 if (priv->r_debug != 0)
7049 {
7050 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7051 (unsigned char *) &r_version,
7052 sizeof (r_version)) != 0
7053 || r_version != 1)
7054 {
7055 warning ("unexpected r_debug version %d", r_version);
7056 }
7057 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7058 &lm_addr, ptr_size) != 0)
7059 {
7060 warning ("unable to read r_map from 0x%lx",
7061 (long) priv->r_debug + lmo->r_map_offset);
7062 }
7063 }
7064 }
7065
7066 document = (char *) xmalloc (allocated);
7067 strcpy (document, "<library-list-svr4 version=\"1.0\"");
7068 p = document + strlen (document);
7069
7070 while (lm_addr
7071 && read_one_ptr (lm_addr + lmo->l_name_offset,
7072 &l_name, ptr_size) == 0
7073 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7074 &l_addr, ptr_size) == 0
7075 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7076 &l_ld, ptr_size) == 0
7077 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7078 &l_prev, ptr_size) == 0
7079 && read_one_ptr (lm_addr + lmo->l_next_offset,
7080 &l_next, ptr_size) == 0)
7081 {
7082 unsigned char libname[PATH_MAX];
7083
7084 if (lm_prev != l_prev)
7085 {
7086 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7087 (long) lm_prev, (long) l_prev);
7088 break;
7089 }
7090
7091 /* Ignore the first entry even if it has valid name as the first entry
7092 corresponds to the main executable. The first entry should not be
7093 skipped if the dynamic loader was loaded late by a static executable
7094 (see solib-svr4.c parameter ignore_first). But in such case the main
7095 executable does not have PT_DYNAMIC present and this function already
7096 exited above due to failed get_r_debug. */
7097 if (lm_prev == 0)
7098 {
7099 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7100 p = p + strlen (p);
7101 }
7102 else
7103 {
7104 /* Not checking for error because reading may stop before
7105 we've got PATH_MAX worth of characters. */
7106 libname[0] = '\0';
7107 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7108 libname[sizeof (libname) - 1] = '\0';
7109 if (libname[0] != '\0')
7110 {
7111 /* 6x the size for xml_escape_text below. */
7112 size_t len = 6 * strlen ((char *) libname);
7113 char *name;
7114
7115 if (!header_done)
7116 {
7117 /* Terminate `<library-list-svr4'. */
7118 *p++ = '>';
7119 header_done = 1;
7120 }
7121
7122 while (allocated < p - document + len + 200)
7123 {
7124 /* Expand to guarantee sufficient storage. */
7125 uintptr_t document_len = p - document;
7126
7127 document = (char *) xrealloc (document, 2 * allocated);
7128 allocated *= 2;
7129 p = document + document_len;
7130 }
7131
7132 name = xml_escape_text ((char *) libname);
7133 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7134 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7135 name, (unsigned long) lm_addr,
7136 (unsigned long) l_addr, (unsigned long) l_ld);
7137 free (name);
7138 }
7139 }
7140
7141 lm_prev = lm_addr;
7142 lm_addr = l_next;
7143 }
7144
7145 if (!header_done)
7146 {
7147 /* Empty list; terminate `<library-list-svr4'. */
7148 strcpy (p, "/>");
7149 }
7150 else
7151 strcpy (p, "</library-list-svr4>");
7152
7153 document_len = strlen (document);
7154 if (offset < document_len)
7155 document_len -= offset;
7156 else
7157 document_len = 0;
7158 if (len > document_len)
7159 len = document_len;
7160
7161 memcpy (readbuf, document + offset, len);
7162 xfree (document);
7163
7164 return len;
7165}
7166
7167#ifdef HAVE_LINUX_BTRACE
7168
7169/* See to_disable_btrace target method. */
7170
7171static int
7172linux_low_disable_btrace (struct btrace_target_info *tinfo)
7173{
7174 enum btrace_error err;
7175
7176 err = linux_disable_btrace (tinfo);
7177 return (err == BTRACE_ERR_NONE ? 0 : -1);
7178}
7179
7180/* Encode an Intel Processor Trace configuration. */
7181
7182static void
7183linux_low_encode_pt_config (struct buffer *buffer,
7184 const struct btrace_data_pt_config *config)
7185{
7186 buffer_grow_str (buffer, "<pt-config>\n");
7187
7188 switch (config->cpu.vendor)
7189 {
7190 case CV_INTEL:
7191 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7192 "model=\"%u\" stepping=\"%u\"/>\n",
7193 config->cpu.family, config->cpu.model,
7194 config->cpu.stepping);
7195 break;
7196
7197 default:
7198 break;
7199 }
7200
7201 buffer_grow_str (buffer, "</pt-config>\n");
7202}
7203
7204/* Encode a raw buffer. */
7205
7206static void
7207linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7208 unsigned int size)
7209{
7210 if (size == 0)
7211 return;
7212
7213 /* We use hex encoding - see common/rsp-low.h. */
7214 buffer_grow_str (buffer, "<raw>\n");
7215
7216 while (size-- > 0)
7217 {
7218 char elem[2];
7219
7220 elem[0] = tohex ((*data >> 4) & 0xf);
7221 elem[1] = tohex (*data++ & 0xf);
7222
7223 buffer_grow (buffer, elem, 2);
7224 }
7225
7226 buffer_grow_str (buffer, "</raw>\n");
7227}
7228
7229/* See to_read_btrace target method. */
7230
7231static int
7232linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7233 enum btrace_read_type type)
7234{
7235 struct btrace_data btrace;
7236 struct btrace_block *block;
7237 enum btrace_error err;
7238 int i;
7239
7240 btrace_data_init (&btrace);
7241
7242 err = linux_read_btrace (&btrace, tinfo, type);
7243 if (err != BTRACE_ERR_NONE)
7244 {
7245 if (err == BTRACE_ERR_OVERFLOW)
7246 buffer_grow_str0 (buffer, "E.Overflow.");
7247 else
7248 buffer_grow_str0 (buffer, "E.Generic Error.");
7249
7250 goto err;
7251 }
7252
7253 switch (btrace.format)
7254 {
7255 case BTRACE_FORMAT_NONE:
7256 buffer_grow_str0 (buffer, "E.No Trace.");
7257 goto err;
7258
7259 case BTRACE_FORMAT_BTS:
7260 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7261 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7262
7263 for (i = 0;
7264 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7265 i++)
7266 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7267 paddress (block->begin), paddress (block->end));
7268
7269 buffer_grow_str0 (buffer, "</btrace>\n");
7270 break;
7271
7272 case BTRACE_FORMAT_PT:
7273 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7274 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7275 buffer_grow_str (buffer, "<pt>\n");
7276
7277 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7278
7279 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7280 btrace.variant.pt.size);
7281
7282 buffer_grow_str (buffer, "</pt>\n");
7283 buffer_grow_str0 (buffer, "</btrace>\n");
7284 break;
7285
7286 default:
7287 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7288 goto err;
7289 }
7290
7291 btrace_data_fini (&btrace);
7292 return 0;
7293
7294err:
7295 btrace_data_fini (&btrace);
7296 return -1;
7297}
7298
7299/* See to_btrace_conf target method. */
7300
7301static int
7302linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7303 struct buffer *buffer)
7304{
7305 const struct btrace_config *conf;
7306
7307 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7308 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7309
7310 conf = linux_btrace_conf (tinfo);
7311 if (conf != NULL)
7312 {
7313 switch (conf->format)
7314 {
7315 case BTRACE_FORMAT_NONE:
7316 break;
7317
7318 case BTRACE_FORMAT_BTS:
7319 buffer_xml_printf (buffer, "<bts");
7320 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7321 buffer_xml_printf (buffer, " />\n");
7322 break;
7323
7324 case BTRACE_FORMAT_PT:
7325 buffer_xml_printf (buffer, "<pt");
7326 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7327 buffer_xml_printf (buffer, "/>\n");
7328 break;
7329 }
7330 }
7331
7332 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7333 return 0;
7334}
7335#endif /* HAVE_LINUX_BTRACE */
7336
7337/* See nat/linux-nat.h. */
7338
7339ptid_t
7340current_lwp_ptid (void)
7341{
7342 return ptid_of (current_thread);
7343}
7344
7345/* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7346
7347static int
7348linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7349{
7350 if (the_low_target.breakpoint_kind_from_pc != NULL)
7351 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7352 else
7353 return default_breakpoint_kind_from_pc (pcptr);
7354}
7355
7356/* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7357
7358static const gdb_byte *
7359linux_sw_breakpoint_from_kind (int kind, int *size)
7360{
7361 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7362
7363 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7364}
7365
7366/* Implementation of the target_ops method
7367 "breakpoint_kind_from_current_state". */
7368
7369static int
7370linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7371{
7372 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7373 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7374 else
7375 return linux_breakpoint_kind_from_pc (pcptr);
7376}
7377
7378/* Default implementation of linux_target_ops method "set_pc" for
7379 32-bit pc register which is literally named "pc". */
7380
7381void
7382linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7383{
7384 uint32_t newpc = pc;
7385
7386 supply_register_by_name (regcache, "pc", &newpc);
7387}
7388
7389/* Default implementation of linux_target_ops method "get_pc" for
7390 32-bit pc register which is literally named "pc". */
7391
7392CORE_ADDR
7393linux_get_pc_32bit (struct regcache *regcache)
7394{
7395 uint32_t pc;
7396
7397 collect_register_by_name (regcache, "pc", &pc);
7398 if (debug_threads)
7399 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7400 return pc;
7401}
7402
7403/* Default implementation of linux_target_ops method "set_pc" for
7404 64-bit pc register which is literally named "pc". */
7405
7406void
7407linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7408{
7409 uint64_t newpc = pc;
7410
7411 supply_register_by_name (regcache, "pc", &newpc);
7412}
7413
7414/* Default implementation of linux_target_ops method "get_pc" for
7415 64-bit pc register which is literally named "pc". */
7416
7417CORE_ADDR
7418linux_get_pc_64bit (struct regcache *regcache)
7419{
7420 uint64_t pc;
7421
7422 collect_register_by_name (regcache, "pc", &pc);
7423 if (debug_threads)
7424 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7425 return pc;
7426}
7427
7428
7429static struct target_ops linux_target_ops = {
7430 linux_create_inferior,
7431 linux_post_create_inferior,
7432 linux_attach,
7433 linux_kill,
7434 linux_detach,
7435 linux_mourn,
7436 linux_join,
7437 linux_thread_alive,
7438 linux_resume,
7439 linux_wait,
7440 linux_fetch_registers,
7441 linux_store_registers,
7442 linux_prepare_to_access_memory,
7443 linux_done_accessing_memory,
7444 linux_read_memory,
7445 linux_write_memory,
7446 linux_look_up_symbols,
7447 linux_request_interrupt,
7448 linux_read_auxv,
7449 linux_supports_z_point_type,
7450 linux_insert_point,
7451 linux_remove_point,
7452 linux_stopped_by_sw_breakpoint,
7453 linux_supports_stopped_by_sw_breakpoint,
7454 linux_stopped_by_hw_breakpoint,
7455 linux_supports_stopped_by_hw_breakpoint,
7456 linux_supports_hardware_single_step,
7457 linux_stopped_by_watchpoint,
7458 linux_stopped_data_address,
7459#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7460 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7461 && defined(PT_TEXT_END_ADDR)
7462 linux_read_offsets,
7463#else
7464 NULL,
7465#endif
7466#ifdef USE_THREAD_DB
7467 thread_db_get_tls_address,
7468#else
7469 NULL,
7470#endif
7471 linux_qxfer_spu,
7472 hostio_last_error_from_errno,
7473 linux_qxfer_osdata,
7474 linux_xfer_siginfo,
7475 linux_supports_non_stop,
7476 linux_async,
7477 linux_start_non_stop,
7478 linux_supports_multi_process,
7479 linux_supports_fork_events,
7480 linux_supports_vfork_events,
7481 linux_supports_exec_events,
7482 linux_handle_new_gdb_connection,
7483#ifdef USE_THREAD_DB
7484 thread_db_handle_monitor_command,
7485#else
7486 NULL,
7487#endif
7488 linux_common_core_of_thread,
7489 linux_read_loadmap,
7490 linux_process_qsupported,
7491 linux_supports_tracepoints,
7492 linux_read_pc,
7493 linux_write_pc,
7494 linux_thread_stopped,
7495 NULL,
7496 linux_pause_all,
7497 linux_unpause_all,
7498 linux_stabilize_threads,
7499 linux_install_fast_tracepoint_jump_pad,
7500 linux_emit_ops,
7501 linux_supports_disable_randomization,
7502 linux_get_min_fast_tracepoint_insn_len,
7503 linux_qxfer_libraries_svr4,
7504 linux_supports_agent,
7505#ifdef HAVE_LINUX_BTRACE
7506 linux_supports_btrace,
7507 linux_enable_btrace,
7508 linux_low_disable_btrace,
7509 linux_low_read_btrace,
7510 linux_low_btrace_conf,
7511#else
7512 NULL,
7513 NULL,
7514 NULL,
7515 NULL,
7516 NULL,
7517#endif
7518 linux_supports_range_stepping,
7519 linux_proc_pid_to_exec_file,
7520 linux_mntns_open_cloexec,
7521 linux_mntns_unlink,
7522 linux_mntns_readlink,
7523 linux_breakpoint_kind_from_pc,
7524 linux_sw_breakpoint_from_kind,
7525 linux_proc_tid_get_name,
7526 linux_breakpoint_kind_from_current_state,
7527 linux_supports_software_single_step,
7528 linux_supports_catch_syscall,
7529 linux_get_ipa_tdesc_idx,
7530};
7531
7532#ifdef HAVE_LINUX_REGSETS
7533void
7534initialize_regsets_info (struct regsets_info *info)
7535{
7536 for (info->num_regsets = 0;
7537 info->regsets[info->num_regsets].size >= 0;
7538 info->num_regsets++)
7539 ;
7540}
7541#endif
7542
7543void
7544initialize_low (void)
7545{
7546 struct sigaction sigchld_action;
7547
7548 memset (&sigchld_action, 0, sizeof (sigchld_action));
7549 set_target_ops (&linux_target_ops);
7550
7551 linux_ptrace_init_warnings ();
7552
7553 sigchld_action.sa_handler = sigchld_handler;
7554 sigemptyset (&sigchld_action.sa_mask);
7555 sigchld_action.sa_flags = SA_RESTART;
7556 sigaction (SIGCHLD, &sigchld_action, NULL);
7557
7558 initialize_low_arch ();
7559
7560 linux_check_ptrace_features ();
7561}