]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
[GDBserver] Don't error in reinsert_raw_breakpoint if bp->inserted
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2016 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57 #include "nat/linux-namespaces.h"
58
59 #ifndef SPUFS_MAGIC
60 #define SPUFS_MAGIC 0x23c9b64e
61 #endif
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* BFIN already defines these since at least 2.6.32 kernels. */
86 #elif defined(BFIN)
87 #define PT_TEXT_ADDR 220
88 #define PT_TEXT_END_ADDR 224
89 #define PT_DATA_ADDR 228
90 /* These are still undefined in 3.10 kernels. */
91 #elif defined(__TMS320C6X__)
92 #define PT_TEXT_ADDR (0x10000*4)
93 #define PT_DATA_ADDR (0x10004*4)
94 #define PT_TEXT_END_ADDR (0x10008*4)
95 #endif
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* A list of all unknown processes which receive stop signals. Some
180 other process will presumably claim each of these as forked
181 children momentarily. */
182
183 struct simple_pid_list
184 {
185 /* The process ID. */
186 int pid;
187
188 /* The status as reported by waitpid. */
189 int status;
190
191 /* Next in chain. */
192 struct simple_pid_list *next;
193 };
194 struct simple_pid_list *stopped_pids;
195
196 /* Trivial list manipulation functions to keep track of a list of new
197 stopped processes. */
198
199 static void
200 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
201 {
202 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
203
204 new_pid->pid = pid;
205 new_pid->status = status;
206 new_pid->next = *listp;
207 *listp = new_pid;
208 }
209
210 static int
211 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
212 {
213 struct simple_pid_list **p;
214
215 for (p = listp; *p != NULL; p = &(*p)->next)
216 if ((*p)->pid == pid)
217 {
218 struct simple_pid_list *next = (*p)->next;
219
220 *statusp = (*p)->status;
221 xfree (*p);
222 *p = next;
223 return 1;
224 }
225 return 0;
226 }
227
228 enum stopping_threads_kind
229 {
230 /* Not stopping threads presently. */
231 NOT_STOPPING_THREADS,
232
233 /* Stopping threads. */
234 STOPPING_THREADS,
235
236 /* Stopping and suspending threads. */
237 STOPPING_AND_SUSPENDING_THREADS
238 };
239
240 /* This is set while stop_all_lwps is in effect. */
241 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
242
243 /* FIXME make into a target method? */
244 int using_threads = 1;
245
246 /* True if we're presently stabilizing threads (moving them out of
247 jump pads). */
248 static int stabilizing_threads;
249
250 static void linux_resume_one_lwp (struct lwp_info *lwp,
251 int step, int signal, siginfo_t *info);
252 static void linux_resume (struct thread_resume *resume_info, size_t n);
253 static void stop_all_lwps (int suspend, struct lwp_info *except);
254 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
255 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
256 int *wstat, int options);
257 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
258 static struct lwp_info *add_lwp (ptid_t ptid);
259 static void linux_mourn (struct process_info *process);
260 static int linux_stopped_by_watchpoint (void);
261 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
262 static int lwp_is_marked_dead (struct lwp_info *lwp);
263 static void proceed_all_lwps (void);
264 static int finish_step_over (struct lwp_info *lwp);
265 static int kill_lwp (unsigned long lwpid, int signo);
266 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
267 static void complete_ongoing_step_over (void);
268 static int linux_low_ptrace_options (int attached);
269
270 /* When the event-loop is doing a step-over, this points at the thread
271 being stepped. */
272 ptid_t step_over_bkpt;
273
274 /* True if the low target can hardware single-step. */
275
276 static int
277 can_hardware_single_step (void)
278 {
279 if (the_low_target.supports_hardware_single_step != NULL)
280 return the_low_target.supports_hardware_single_step ();
281 else
282 return 0;
283 }
284
285 /* True if the low target can software single-step. Such targets
286 implement the GET_NEXT_PCS callback. */
287
288 static int
289 can_software_single_step (void)
290 {
291 return (the_low_target.get_next_pcs != NULL);
292 }
293
294 /* True if the low target supports memory breakpoints. If so, we'll
295 have a GET_PC implementation. */
296
297 static int
298 supports_breakpoints (void)
299 {
300 return (the_low_target.get_pc != NULL);
301 }
302
303 /* Returns true if this target can support fast tracepoints. This
304 does not mean that the in-process agent has been loaded in the
305 inferior. */
306
307 static int
308 supports_fast_tracepoints (void)
309 {
310 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
311 }
312
313 /* True if LWP is stopped in its stepping range. */
314
315 static int
316 lwp_in_step_range (struct lwp_info *lwp)
317 {
318 CORE_ADDR pc = lwp->stop_pc;
319
320 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
321 }
322
323 struct pending_signals
324 {
325 int signal;
326 siginfo_t info;
327 struct pending_signals *prev;
328 };
329
330 /* The read/write ends of the pipe registered as waitable file in the
331 event loop. */
332 static int linux_event_pipe[2] = { -1, -1 };
333
334 /* True if we're currently in async mode. */
335 #define target_is_async_p() (linux_event_pipe[0] != -1)
336
337 static void send_sigstop (struct lwp_info *lwp);
338 static void wait_for_sigstop (void);
339
340 /* Return non-zero if HEADER is a 64-bit ELF file. */
341
342 static int
343 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
344 {
345 if (header->e_ident[EI_MAG0] == ELFMAG0
346 && header->e_ident[EI_MAG1] == ELFMAG1
347 && header->e_ident[EI_MAG2] == ELFMAG2
348 && header->e_ident[EI_MAG3] == ELFMAG3)
349 {
350 *machine = header->e_machine;
351 return header->e_ident[EI_CLASS] == ELFCLASS64;
352
353 }
354 *machine = EM_NONE;
355 return -1;
356 }
357
358 /* Return non-zero if FILE is a 64-bit ELF file,
359 zero if the file is not a 64-bit ELF file,
360 and -1 if the file is not accessible or doesn't exist. */
361
362 static int
363 elf_64_file_p (const char *file, unsigned int *machine)
364 {
365 Elf64_Ehdr header;
366 int fd;
367
368 fd = open (file, O_RDONLY);
369 if (fd < 0)
370 return -1;
371
372 if (read (fd, &header, sizeof (header)) != sizeof (header))
373 {
374 close (fd);
375 return 0;
376 }
377 close (fd);
378
379 return elf_64_header_p (&header, machine);
380 }
381
382 /* Accepts an integer PID; Returns true if the executable PID is
383 running is a 64-bit ELF file.. */
384
385 int
386 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
387 {
388 char file[PATH_MAX];
389
390 sprintf (file, "/proc/%d/exe", pid);
391 return elf_64_file_p (file, machine);
392 }
393
394 static void
395 delete_lwp (struct lwp_info *lwp)
396 {
397 struct thread_info *thr = get_lwp_thread (lwp);
398
399 if (debug_threads)
400 debug_printf ("deleting %ld\n", lwpid_of (thr));
401
402 remove_thread (thr);
403 free (lwp->arch_private);
404 free (lwp);
405 }
406
407 /* Add a process to the common process list, and set its private
408 data. */
409
410 static struct process_info *
411 linux_add_process (int pid, int attached)
412 {
413 struct process_info *proc;
414
415 proc = add_process (pid, attached);
416 proc->priv = XCNEW (struct process_info_private);
417
418 if (the_low_target.new_process != NULL)
419 proc->priv->arch_private = the_low_target.new_process ();
420
421 return proc;
422 }
423
424 static CORE_ADDR get_pc (struct lwp_info *lwp);
425
426 /* Call the target arch_setup function on the current thread. */
427
428 static void
429 linux_arch_setup (void)
430 {
431 the_low_target.arch_setup ();
432 }
433
434 /* Call the target arch_setup function on THREAD. */
435
436 static void
437 linux_arch_setup_thread (struct thread_info *thread)
438 {
439 struct thread_info *saved_thread;
440
441 saved_thread = current_thread;
442 current_thread = thread;
443
444 linux_arch_setup ();
445
446 current_thread = saved_thread;
447 }
448
449 /* Handle a GNU/Linux extended wait response. If we see a clone,
450 fork, or vfork event, we need to add the new LWP to our list
451 (and return 0 so as not to report the trap to higher layers).
452 If we see an exec event, we will modify ORIG_EVENT_LWP to point
453 to a new LWP representing the new program. */
454
455 static int
456 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
457 {
458 struct lwp_info *event_lwp = *orig_event_lwp;
459 int event = linux_ptrace_get_extended_event (wstat);
460 struct thread_info *event_thr = get_lwp_thread (event_lwp);
461 struct lwp_info *new_lwp;
462
463 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
464
465 /* All extended events we currently use are mid-syscall. Only
466 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
467 you have to be using PTRACE_SEIZE to get that. */
468 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
469
470 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
471 || (event == PTRACE_EVENT_CLONE))
472 {
473 ptid_t ptid;
474 unsigned long new_pid;
475 int ret, status;
476
477 /* Get the pid of the new lwp. */
478 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
479 &new_pid);
480
481 /* If we haven't already seen the new PID stop, wait for it now. */
482 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
483 {
484 /* The new child has a pending SIGSTOP. We can't affect it until it
485 hits the SIGSTOP, but we're already attached. */
486
487 ret = my_waitpid (new_pid, &status, __WALL);
488
489 if (ret == -1)
490 perror_with_name ("waiting for new child");
491 else if (ret != new_pid)
492 warning ("wait returned unexpected PID %d", ret);
493 else if (!WIFSTOPPED (status))
494 warning ("wait returned unexpected status 0x%x", status);
495 }
496
497 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
498 {
499 struct process_info *parent_proc;
500 struct process_info *child_proc;
501 struct lwp_info *child_lwp;
502 struct thread_info *child_thr;
503 struct target_desc *tdesc;
504
505 ptid = ptid_build (new_pid, new_pid, 0);
506
507 if (debug_threads)
508 {
509 debug_printf ("HEW: Got fork event from LWP %ld, "
510 "new child is %d\n",
511 ptid_get_lwp (ptid_of (event_thr)),
512 ptid_get_pid (ptid));
513 }
514
515 /* Add the new process to the tables and clone the breakpoint
516 lists of the parent. We need to do this even if the new process
517 will be detached, since we will need the process object and the
518 breakpoints to remove any breakpoints from memory when we
519 detach, and the client side will access registers. */
520 child_proc = linux_add_process (new_pid, 0);
521 gdb_assert (child_proc != NULL);
522 child_lwp = add_lwp (ptid);
523 gdb_assert (child_lwp != NULL);
524 child_lwp->stopped = 1;
525 child_lwp->must_set_ptrace_flags = 1;
526 child_lwp->status_pending_p = 0;
527 child_thr = get_lwp_thread (child_lwp);
528 child_thr->last_resume_kind = resume_stop;
529 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
530
531 /* If we're suspending all threads, leave this one suspended
532 too. If the fork/clone parent is stepping over a breakpoint,
533 all other threads have been suspended already. Leave the
534 child suspended too. */
535 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
536 || event_lwp->bp_reinsert != 0)
537 {
538 if (debug_threads)
539 debug_printf ("HEW: leaving child suspended\n");
540 child_lwp->suspended = 1;
541 }
542
543 parent_proc = get_thread_process (event_thr);
544 child_proc->attached = parent_proc->attached;
545 clone_all_breakpoints (&child_proc->breakpoints,
546 &child_proc->raw_breakpoints,
547 parent_proc->breakpoints);
548
549 tdesc = XNEW (struct target_desc);
550 copy_target_description (tdesc, parent_proc->tdesc);
551 child_proc->tdesc = tdesc;
552
553 /* Clone arch-specific process data. */
554 if (the_low_target.new_fork != NULL)
555 the_low_target.new_fork (parent_proc, child_proc);
556
557 /* Save fork info in the parent thread. */
558 if (event == PTRACE_EVENT_FORK)
559 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
560 else if (event == PTRACE_EVENT_VFORK)
561 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
562
563 event_lwp->waitstatus.value.related_pid = ptid;
564
565 /* The status_pending field contains bits denoting the
566 extended event, so when the pending event is handled,
567 the handler will look at lwp->waitstatus. */
568 event_lwp->status_pending_p = 1;
569 event_lwp->status_pending = wstat;
570
571 /* Report the event. */
572 return 0;
573 }
574
575 if (debug_threads)
576 debug_printf ("HEW: Got clone event "
577 "from LWP %ld, new child is LWP %ld\n",
578 lwpid_of (event_thr), new_pid);
579
580 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
581 new_lwp = add_lwp (ptid);
582
583 /* Either we're going to immediately resume the new thread
584 or leave it stopped. linux_resume_one_lwp is a nop if it
585 thinks the thread is currently running, so set this first
586 before calling linux_resume_one_lwp. */
587 new_lwp->stopped = 1;
588
589 /* If we're suspending all threads, leave this one suspended
590 too. If the fork/clone parent is stepping over a breakpoint,
591 all other threads have been suspended already. Leave the
592 child suspended too. */
593 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
594 || event_lwp->bp_reinsert != 0)
595 new_lwp->suspended = 1;
596
597 /* Normally we will get the pending SIGSTOP. But in some cases
598 we might get another signal delivered to the group first.
599 If we do get another signal, be sure not to lose it. */
600 if (WSTOPSIG (status) != SIGSTOP)
601 {
602 new_lwp->stop_expected = 1;
603 new_lwp->status_pending_p = 1;
604 new_lwp->status_pending = status;
605 }
606 else if (report_thread_events)
607 {
608 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
609 new_lwp->status_pending_p = 1;
610 new_lwp->status_pending = status;
611 }
612
613 /* Don't report the event. */
614 return 1;
615 }
616 else if (event == PTRACE_EVENT_VFORK_DONE)
617 {
618 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
619
620 /* Report the event. */
621 return 0;
622 }
623 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
624 {
625 struct process_info *proc;
626 VEC (int) *syscalls_to_catch;
627 ptid_t event_ptid;
628 pid_t event_pid;
629
630 if (debug_threads)
631 {
632 debug_printf ("HEW: Got exec event from LWP %ld\n",
633 lwpid_of (event_thr));
634 }
635
636 /* Get the event ptid. */
637 event_ptid = ptid_of (event_thr);
638 event_pid = ptid_get_pid (event_ptid);
639
640 /* Save the syscall list from the execing process. */
641 proc = get_thread_process (event_thr);
642 syscalls_to_catch = proc->syscalls_to_catch;
643 proc->syscalls_to_catch = NULL;
644
645 /* Delete the execing process and all its threads. */
646 linux_mourn (proc);
647 current_thread = NULL;
648
649 /* Create a new process/lwp/thread. */
650 proc = linux_add_process (event_pid, 0);
651 event_lwp = add_lwp (event_ptid);
652 event_thr = get_lwp_thread (event_lwp);
653 gdb_assert (current_thread == event_thr);
654 linux_arch_setup_thread (event_thr);
655
656 /* Set the event status. */
657 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
658 event_lwp->waitstatus.value.execd_pathname
659 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
660
661 /* Mark the exec status as pending. */
662 event_lwp->stopped = 1;
663 event_lwp->status_pending_p = 1;
664 event_lwp->status_pending = wstat;
665 event_thr->last_resume_kind = resume_continue;
666 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
667
668 /* Update syscall state in the new lwp, effectively mid-syscall too. */
669 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
670
671 /* Restore the list to catch. Don't rely on the client, which is free
672 to avoid sending a new list when the architecture doesn't change.
673 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
674 proc->syscalls_to_catch = syscalls_to_catch;
675
676 /* Report the event. */
677 *orig_event_lwp = event_lwp;
678 return 0;
679 }
680
681 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
682 }
683
684 /* Return the PC as read from the regcache of LWP, without any
685 adjustment. */
686
687 static CORE_ADDR
688 get_pc (struct lwp_info *lwp)
689 {
690 struct thread_info *saved_thread;
691 struct regcache *regcache;
692 CORE_ADDR pc;
693
694 if (the_low_target.get_pc == NULL)
695 return 0;
696
697 saved_thread = current_thread;
698 current_thread = get_lwp_thread (lwp);
699
700 regcache = get_thread_regcache (current_thread, 1);
701 pc = (*the_low_target.get_pc) (regcache);
702
703 if (debug_threads)
704 debug_printf ("pc is 0x%lx\n", (long) pc);
705
706 current_thread = saved_thread;
707 return pc;
708 }
709
710 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
711 Fill *SYSNO with the syscall nr trapped. Fill *SYSRET with the
712 return code. */
713
714 static void
715 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno, int *sysret)
716 {
717 struct thread_info *saved_thread;
718 struct regcache *regcache;
719
720 if (the_low_target.get_syscall_trapinfo == NULL)
721 {
722 /* If we cannot get the syscall trapinfo, report an unknown
723 system call number and -ENOSYS return value. */
724 *sysno = UNKNOWN_SYSCALL;
725 *sysret = -ENOSYS;
726 return;
727 }
728
729 saved_thread = current_thread;
730 current_thread = get_lwp_thread (lwp);
731
732 regcache = get_thread_regcache (current_thread, 1);
733 (*the_low_target.get_syscall_trapinfo) (regcache, sysno, sysret);
734
735 if (debug_threads)
736 {
737 debug_printf ("get_syscall_trapinfo sysno %d sysret %d\n",
738 *sysno, *sysret);
739 }
740
741 current_thread = saved_thread;
742 }
743
744 static int check_stopped_by_watchpoint (struct lwp_info *child);
745
746 /* Called when the LWP stopped for a signal/trap. If it stopped for a
747 trap check what caused it (breakpoint, watchpoint, trace, etc.),
748 and save the result in the LWP's stop_reason field. If it stopped
749 for a breakpoint, decrement the PC if necessary on the lwp's
750 architecture. Returns true if we now have the LWP's stop PC. */
751
752 static int
753 save_stop_reason (struct lwp_info *lwp)
754 {
755 CORE_ADDR pc;
756 CORE_ADDR sw_breakpoint_pc;
757 struct thread_info *saved_thread;
758 #if USE_SIGTRAP_SIGINFO
759 siginfo_t siginfo;
760 #endif
761
762 if (the_low_target.get_pc == NULL)
763 return 0;
764
765 pc = get_pc (lwp);
766 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
767
768 /* breakpoint_at reads from the current thread. */
769 saved_thread = current_thread;
770 current_thread = get_lwp_thread (lwp);
771
772 #if USE_SIGTRAP_SIGINFO
773 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
774 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
775 {
776 if (siginfo.si_signo == SIGTRAP)
777 {
778 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
779 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
780 {
781 /* The si_code is ambiguous on this arch -- check debug
782 registers. */
783 if (!check_stopped_by_watchpoint (lwp))
784 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
785 }
786 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
787 {
788 /* If we determine the LWP stopped for a SW breakpoint,
789 trust it. Particularly don't check watchpoint
790 registers, because at least on s390, we'd find
791 stopped-by-watchpoint as long as there's a watchpoint
792 set. */
793 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
794 }
795 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
796 {
797 /* This can indicate either a hardware breakpoint or
798 hardware watchpoint. Check debug registers. */
799 if (!check_stopped_by_watchpoint (lwp))
800 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
801 }
802 else if (siginfo.si_code == TRAP_TRACE)
803 {
804 /* We may have single stepped an instruction that
805 triggered a watchpoint. In that case, on some
806 architectures (such as x86), instead of TRAP_HWBKPT,
807 si_code indicates TRAP_TRACE, and we need to check
808 the debug registers separately. */
809 if (!check_stopped_by_watchpoint (lwp))
810 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
811 }
812 }
813 }
814 #else
815 /* We may have just stepped a breakpoint instruction. E.g., in
816 non-stop mode, GDB first tells the thread A to step a range, and
817 then the user inserts a breakpoint inside the range. In that
818 case we need to report the breakpoint PC. */
819 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
820 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
821 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
822
823 if (hardware_breakpoint_inserted_here (pc))
824 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
825
826 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
827 check_stopped_by_watchpoint (lwp);
828 #endif
829
830 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
831 {
832 if (debug_threads)
833 {
834 struct thread_info *thr = get_lwp_thread (lwp);
835
836 debug_printf ("CSBB: %s stopped by software breakpoint\n",
837 target_pid_to_str (ptid_of (thr)));
838 }
839
840 /* Back up the PC if necessary. */
841 if (pc != sw_breakpoint_pc)
842 {
843 struct regcache *regcache
844 = get_thread_regcache (current_thread, 1);
845 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
846 }
847
848 /* Update this so we record the correct stop PC below. */
849 pc = sw_breakpoint_pc;
850 }
851 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
852 {
853 if (debug_threads)
854 {
855 struct thread_info *thr = get_lwp_thread (lwp);
856
857 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
858 target_pid_to_str (ptid_of (thr)));
859 }
860 }
861 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
862 {
863 if (debug_threads)
864 {
865 struct thread_info *thr = get_lwp_thread (lwp);
866
867 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
868 target_pid_to_str (ptid_of (thr)));
869 }
870 }
871 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
872 {
873 if (debug_threads)
874 {
875 struct thread_info *thr = get_lwp_thread (lwp);
876
877 debug_printf ("CSBB: %s stopped by trace\n",
878 target_pid_to_str (ptid_of (thr)));
879 }
880 }
881
882 lwp->stop_pc = pc;
883 current_thread = saved_thread;
884 return 1;
885 }
886
887 static struct lwp_info *
888 add_lwp (ptid_t ptid)
889 {
890 struct lwp_info *lwp;
891
892 lwp = XCNEW (struct lwp_info);
893
894 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
895
896 if (the_low_target.new_thread != NULL)
897 the_low_target.new_thread (lwp);
898
899 lwp->thread = add_thread (ptid, lwp);
900
901 return lwp;
902 }
903
904 /* Start an inferior process and returns its pid.
905 ALLARGS is a vector of program-name and args. */
906
907 static int
908 linux_create_inferior (char *program, char **allargs)
909 {
910 struct lwp_info *new_lwp;
911 int pid;
912 ptid_t ptid;
913 struct cleanup *restore_personality
914 = maybe_disable_address_space_randomization (disable_randomization);
915
916 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
917 pid = vfork ();
918 #else
919 pid = fork ();
920 #endif
921 if (pid < 0)
922 perror_with_name ("fork");
923
924 if (pid == 0)
925 {
926 close_most_fds ();
927 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
928
929 setpgid (0, 0);
930
931 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
932 stdout to stderr so that inferior i/o doesn't corrupt the connection.
933 Also, redirect stdin to /dev/null. */
934 if (remote_connection_is_stdio ())
935 {
936 close (0);
937 open ("/dev/null", O_RDONLY);
938 dup2 (2, 1);
939 if (write (2, "stdin/stdout redirected\n",
940 sizeof ("stdin/stdout redirected\n") - 1) < 0)
941 {
942 /* Errors ignored. */;
943 }
944 }
945
946 execv (program, allargs);
947 if (errno == ENOENT)
948 execvp (program, allargs);
949
950 fprintf (stderr, "Cannot exec %s: %s.\n", program,
951 strerror (errno));
952 fflush (stderr);
953 _exit (0177);
954 }
955
956 do_cleanups (restore_personality);
957
958 linux_add_process (pid, 0);
959
960 ptid = ptid_build (pid, pid, 0);
961 new_lwp = add_lwp (ptid);
962 new_lwp->must_set_ptrace_flags = 1;
963
964 return pid;
965 }
966
967 /* Implement the post_create_inferior target_ops method. */
968
969 static void
970 linux_post_create_inferior (void)
971 {
972 struct lwp_info *lwp = get_thread_lwp (current_thread);
973
974 linux_arch_setup ();
975
976 if (lwp->must_set_ptrace_flags)
977 {
978 struct process_info *proc = current_process ();
979 int options = linux_low_ptrace_options (proc->attached);
980
981 linux_enable_event_reporting (lwpid_of (current_thread), options);
982 lwp->must_set_ptrace_flags = 0;
983 }
984 }
985
986 /* Attach to an inferior process. Returns 0 on success, ERRNO on
987 error. */
988
989 int
990 linux_attach_lwp (ptid_t ptid)
991 {
992 struct lwp_info *new_lwp;
993 int lwpid = ptid_get_lwp (ptid);
994
995 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
996 != 0)
997 return errno;
998
999 new_lwp = add_lwp (ptid);
1000
1001 /* We need to wait for SIGSTOP before being able to make the next
1002 ptrace call on this LWP. */
1003 new_lwp->must_set_ptrace_flags = 1;
1004
1005 if (linux_proc_pid_is_stopped (lwpid))
1006 {
1007 if (debug_threads)
1008 debug_printf ("Attached to a stopped process\n");
1009
1010 /* The process is definitely stopped. It is in a job control
1011 stop, unless the kernel predates the TASK_STOPPED /
1012 TASK_TRACED distinction, in which case it might be in a
1013 ptrace stop. Make sure it is in a ptrace stop; from there we
1014 can kill it, signal it, et cetera.
1015
1016 First make sure there is a pending SIGSTOP. Since we are
1017 already attached, the process can not transition from stopped
1018 to running without a PTRACE_CONT; so we know this signal will
1019 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1020 probably already in the queue (unless this kernel is old
1021 enough to use TASK_STOPPED for ptrace stops); but since
1022 SIGSTOP is not an RT signal, it can only be queued once. */
1023 kill_lwp (lwpid, SIGSTOP);
1024
1025 /* Finally, resume the stopped process. This will deliver the
1026 SIGSTOP (or a higher priority signal, just like normal
1027 PTRACE_ATTACH), which we'll catch later on. */
1028 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1029 }
1030
1031 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1032 brings it to a halt.
1033
1034 There are several cases to consider here:
1035
1036 1) gdbserver has already attached to the process and is being notified
1037 of a new thread that is being created.
1038 In this case we should ignore that SIGSTOP and resume the
1039 process. This is handled below by setting stop_expected = 1,
1040 and the fact that add_thread sets last_resume_kind ==
1041 resume_continue.
1042
1043 2) This is the first thread (the process thread), and we're attaching
1044 to it via attach_inferior.
1045 In this case we want the process thread to stop.
1046 This is handled by having linux_attach set last_resume_kind ==
1047 resume_stop after we return.
1048
1049 If the pid we are attaching to is also the tgid, we attach to and
1050 stop all the existing threads. Otherwise, we attach to pid and
1051 ignore any other threads in the same group as this pid.
1052
1053 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1054 existing threads.
1055 In this case we want the thread to stop.
1056 FIXME: This case is currently not properly handled.
1057 We should wait for the SIGSTOP but don't. Things work apparently
1058 because enough time passes between when we ptrace (ATTACH) and when
1059 gdb makes the next ptrace call on the thread.
1060
1061 On the other hand, if we are currently trying to stop all threads, we
1062 should treat the new thread as if we had sent it a SIGSTOP. This works
1063 because we are guaranteed that the add_lwp call above added us to the
1064 end of the list, and so the new thread has not yet reached
1065 wait_for_sigstop (but will). */
1066 new_lwp->stop_expected = 1;
1067
1068 return 0;
1069 }
1070
1071 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1072 already attached. Returns true if a new LWP is found, false
1073 otherwise. */
1074
1075 static int
1076 attach_proc_task_lwp_callback (ptid_t ptid)
1077 {
1078 /* Is this a new thread? */
1079 if (find_thread_ptid (ptid) == NULL)
1080 {
1081 int lwpid = ptid_get_lwp (ptid);
1082 int err;
1083
1084 if (debug_threads)
1085 debug_printf ("Found new lwp %d\n", lwpid);
1086
1087 err = linux_attach_lwp (ptid);
1088
1089 /* Be quiet if we simply raced with the thread exiting. EPERM
1090 is returned if the thread's task still exists, and is marked
1091 as exited or zombie, as well as other conditions, so in that
1092 case, confirm the status in /proc/PID/status. */
1093 if (err == ESRCH
1094 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1095 {
1096 if (debug_threads)
1097 {
1098 debug_printf ("Cannot attach to lwp %d: "
1099 "thread is gone (%d: %s)\n",
1100 lwpid, err, strerror (err));
1101 }
1102 }
1103 else if (err != 0)
1104 {
1105 warning (_("Cannot attach to lwp %d: %s"),
1106 lwpid,
1107 linux_ptrace_attach_fail_reason_string (ptid, err));
1108 }
1109
1110 return 1;
1111 }
1112 return 0;
1113 }
1114
1115 static void async_file_mark (void);
1116
1117 /* Attach to PID. If PID is the tgid, attach to it and all
1118 of its threads. */
1119
1120 static int
1121 linux_attach (unsigned long pid)
1122 {
1123 struct process_info *proc;
1124 struct thread_info *initial_thread;
1125 ptid_t ptid = ptid_build (pid, pid, 0);
1126 int err;
1127
1128 /* Attach to PID. We will check for other threads
1129 soon. */
1130 err = linux_attach_lwp (ptid);
1131 if (err != 0)
1132 error ("Cannot attach to process %ld: %s",
1133 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1134
1135 proc = linux_add_process (pid, 1);
1136
1137 /* Don't ignore the initial SIGSTOP if we just attached to this
1138 process. It will be collected by wait shortly. */
1139 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1140 initial_thread->last_resume_kind = resume_stop;
1141
1142 /* We must attach to every LWP. If /proc is mounted, use that to
1143 find them now. On the one hand, the inferior may be using raw
1144 clone instead of using pthreads. On the other hand, even if it
1145 is using pthreads, GDB may not be connected yet (thread_db needs
1146 to do symbol lookups, through qSymbol). Also, thread_db walks
1147 structures in the inferior's address space to find the list of
1148 threads/LWPs, and those structures may well be corrupted. Note
1149 that once thread_db is loaded, we'll still use it to list threads
1150 and associate pthread info with each LWP. */
1151 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1152
1153 /* GDB will shortly read the xml target description for this
1154 process, to figure out the process' architecture. But the target
1155 description is only filled in when the first process/thread in
1156 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1157 that now, otherwise, if GDB is fast enough, it could read the
1158 target description _before_ that initial stop. */
1159 if (non_stop)
1160 {
1161 struct lwp_info *lwp;
1162 int wstat, lwpid;
1163 ptid_t pid_ptid = pid_to_ptid (pid);
1164
1165 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1166 &wstat, __WALL);
1167 gdb_assert (lwpid > 0);
1168
1169 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1170
1171 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1172 {
1173 lwp->status_pending_p = 1;
1174 lwp->status_pending = wstat;
1175 }
1176
1177 initial_thread->last_resume_kind = resume_continue;
1178
1179 async_file_mark ();
1180
1181 gdb_assert (proc->tdesc != NULL);
1182 }
1183
1184 return 0;
1185 }
1186
1187 struct counter
1188 {
1189 int pid;
1190 int count;
1191 };
1192
1193 static int
1194 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1195 {
1196 struct counter *counter = (struct counter *) args;
1197
1198 if (ptid_get_pid (entry->id) == counter->pid)
1199 {
1200 if (++counter->count > 1)
1201 return 1;
1202 }
1203
1204 return 0;
1205 }
1206
1207 static int
1208 last_thread_of_process_p (int pid)
1209 {
1210 struct counter counter = { pid , 0 };
1211
1212 return (find_inferior (&all_threads,
1213 second_thread_of_pid_p, &counter) == NULL);
1214 }
1215
1216 /* Kill LWP. */
1217
1218 static void
1219 linux_kill_one_lwp (struct lwp_info *lwp)
1220 {
1221 struct thread_info *thr = get_lwp_thread (lwp);
1222 int pid = lwpid_of (thr);
1223
1224 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1225 there is no signal context, and ptrace(PTRACE_KILL) (or
1226 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1227 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1228 alternative is to kill with SIGKILL. We only need one SIGKILL
1229 per process, not one for each thread. But since we still support
1230 support debugging programs using raw clone without CLONE_THREAD,
1231 we send one for each thread. For years, we used PTRACE_KILL
1232 only, so we're being a bit paranoid about some old kernels where
1233 PTRACE_KILL might work better (dubious if there are any such, but
1234 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1235 second, and so we're fine everywhere. */
1236
1237 errno = 0;
1238 kill_lwp (pid, SIGKILL);
1239 if (debug_threads)
1240 {
1241 int save_errno = errno;
1242
1243 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1244 target_pid_to_str (ptid_of (thr)),
1245 save_errno ? strerror (save_errno) : "OK");
1246 }
1247
1248 errno = 0;
1249 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1250 if (debug_threads)
1251 {
1252 int save_errno = errno;
1253
1254 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1255 target_pid_to_str (ptid_of (thr)),
1256 save_errno ? strerror (save_errno) : "OK");
1257 }
1258 }
1259
1260 /* Kill LWP and wait for it to die. */
1261
1262 static void
1263 kill_wait_lwp (struct lwp_info *lwp)
1264 {
1265 struct thread_info *thr = get_lwp_thread (lwp);
1266 int pid = ptid_get_pid (ptid_of (thr));
1267 int lwpid = ptid_get_lwp (ptid_of (thr));
1268 int wstat;
1269 int res;
1270
1271 if (debug_threads)
1272 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1273
1274 do
1275 {
1276 linux_kill_one_lwp (lwp);
1277
1278 /* Make sure it died. Notes:
1279
1280 - The loop is most likely unnecessary.
1281
1282 - We don't use linux_wait_for_event as that could delete lwps
1283 while we're iterating over them. We're not interested in
1284 any pending status at this point, only in making sure all
1285 wait status on the kernel side are collected until the
1286 process is reaped.
1287
1288 - We don't use __WALL here as the __WALL emulation relies on
1289 SIGCHLD, and killing a stopped process doesn't generate
1290 one, nor an exit status.
1291 */
1292 res = my_waitpid (lwpid, &wstat, 0);
1293 if (res == -1 && errno == ECHILD)
1294 res = my_waitpid (lwpid, &wstat, __WCLONE);
1295 } while (res > 0 && WIFSTOPPED (wstat));
1296
1297 /* Even if it was stopped, the child may have already disappeared.
1298 E.g., if it was killed by SIGKILL. */
1299 if (res < 0 && errno != ECHILD)
1300 perror_with_name ("kill_wait_lwp");
1301 }
1302
1303 /* Callback for `find_inferior'. Kills an lwp of a given process,
1304 except the leader. */
1305
1306 static int
1307 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1308 {
1309 struct thread_info *thread = (struct thread_info *) entry;
1310 struct lwp_info *lwp = get_thread_lwp (thread);
1311 int pid = * (int *) args;
1312
1313 if (ptid_get_pid (entry->id) != pid)
1314 return 0;
1315
1316 /* We avoid killing the first thread here, because of a Linux kernel (at
1317 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1318 the children get a chance to be reaped, it will remain a zombie
1319 forever. */
1320
1321 if (lwpid_of (thread) == pid)
1322 {
1323 if (debug_threads)
1324 debug_printf ("lkop: is last of process %s\n",
1325 target_pid_to_str (entry->id));
1326 return 0;
1327 }
1328
1329 kill_wait_lwp (lwp);
1330 return 0;
1331 }
1332
1333 static int
1334 linux_kill (int pid)
1335 {
1336 struct process_info *process;
1337 struct lwp_info *lwp;
1338
1339 process = find_process_pid (pid);
1340 if (process == NULL)
1341 return -1;
1342
1343 /* If we're killing a running inferior, make sure it is stopped
1344 first, as PTRACE_KILL will not work otherwise. */
1345 stop_all_lwps (0, NULL);
1346
1347 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1348
1349 /* See the comment in linux_kill_one_lwp. We did not kill the first
1350 thread in the list, so do so now. */
1351 lwp = find_lwp_pid (pid_to_ptid (pid));
1352
1353 if (lwp == NULL)
1354 {
1355 if (debug_threads)
1356 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1357 pid);
1358 }
1359 else
1360 kill_wait_lwp (lwp);
1361
1362 the_target->mourn (process);
1363
1364 /* Since we presently can only stop all lwps of all processes, we
1365 need to unstop lwps of other processes. */
1366 unstop_all_lwps (0, NULL);
1367 return 0;
1368 }
1369
1370 /* Get pending signal of THREAD, for detaching purposes. This is the
1371 signal the thread last stopped for, which we need to deliver to the
1372 thread when detaching, otherwise, it'd be suppressed/lost. */
1373
1374 static int
1375 get_detach_signal (struct thread_info *thread)
1376 {
1377 enum gdb_signal signo = GDB_SIGNAL_0;
1378 int status;
1379 struct lwp_info *lp = get_thread_lwp (thread);
1380
1381 if (lp->status_pending_p)
1382 status = lp->status_pending;
1383 else
1384 {
1385 /* If the thread had been suspended by gdbserver, and it stopped
1386 cleanly, then it'll have stopped with SIGSTOP. But we don't
1387 want to deliver that SIGSTOP. */
1388 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1389 || thread->last_status.value.sig == GDB_SIGNAL_0)
1390 return 0;
1391
1392 /* Otherwise, we may need to deliver the signal we
1393 intercepted. */
1394 status = lp->last_status;
1395 }
1396
1397 if (!WIFSTOPPED (status))
1398 {
1399 if (debug_threads)
1400 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1401 target_pid_to_str (ptid_of (thread)));
1402 return 0;
1403 }
1404
1405 /* Extended wait statuses aren't real SIGTRAPs. */
1406 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1407 {
1408 if (debug_threads)
1409 debug_printf ("GPS: lwp %s had stopped with extended "
1410 "status: no pending signal\n",
1411 target_pid_to_str (ptid_of (thread)));
1412 return 0;
1413 }
1414
1415 signo = gdb_signal_from_host (WSTOPSIG (status));
1416
1417 if (program_signals_p && !program_signals[signo])
1418 {
1419 if (debug_threads)
1420 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1421 target_pid_to_str (ptid_of (thread)),
1422 gdb_signal_to_string (signo));
1423 return 0;
1424 }
1425 else if (!program_signals_p
1426 /* If we have no way to know which signals GDB does not
1427 want to have passed to the program, assume
1428 SIGTRAP/SIGINT, which is GDB's default. */
1429 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1430 {
1431 if (debug_threads)
1432 debug_printf ("GPS: lwp %s had signal %s, "
1433 "but we don't know if we should pass it. "
1434 "Default to not.\n",
1435 target_pid_to_str (ptid_of (thread)),
1436 gdb_signal_to_string (signo));
1437 return 0;
1438 }
1439 else
1440 {
1441 if (debug_threads)
1442 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1443 target_pid_to_str (ptid_of (thread)),
1444 gdb_signal_to_string (signo));
1445
1446 return WSTOPSIG (status);
1447 }
1448 }
1449
1450 static int
1451 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1452 {
1453 struct thread_info *thread = (struct thread_info *) entry;
1454 struct lwp_info *lwp = get_thread_lwp (thread);
1455 int pid = * (int *) args;
1456 int sig;
1457
1458 if (ptid_get_pid (entry->id) != pid)
1459 return 0;
1460
1461 /* If there is a pending SIGSTOP, get rid of it. */
1462 if (lwp->stop_expected)
1463 {
1464 if (debug_threads)
1465 debug_printf ("Sending SIGCONT to %s\n",
1466 target_pid_to_str (ptid_of (thread)));
1467
1468 kill_lwp (lwpid_of (thread), SIGCONT);
1469 lwp->stop_expected = 0;
1470 }
1471
1472 /* Flush any pending changes to the process's registers. */
1473 regcache_invalidate_thread (thread);
1474
1475 /* Pass on any pending signal for this thread. */
1476 sig = get_detach_signal (thread);
1477
1478 /* Finally, let it resume. */
1479 if (the_low_target.prepare_to_resume != NULL)
1480 the_low_target.prepare_to_resume (lwp);
1481 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1482 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1483 error (_("Can't detach %s: %s"),
1484 target_pid_to_str (ptid_of (thread)),
1485 strerror (errno));
1486
1487 delete_lwp (lwp);
1488 return 0;
1489 }
1490
1491 static int
1492 linux_detach (int pid)
1493 {
1494 struct process_info *process;
1495
1496 process = find_process_pid (pid);
1497 if (process == NULL)
1498 return -1;
1499
1500 /* As there's a step over already in progress, let it finish first,
1501 otherwise nesting a stabilize_threads operation on top gets real
1502 messy. */
1503 complete_ongoing_step_over ();
1504
1505 /* Stop all threads before detaching. First, ptrace requires that
1506 the thread is stopped to sucessfully detach. Second, thread_db
1507 may need to uninstall thread event breakpoints from memory, which
1508 only works with a stopped process anyway. */
1509 stop_all_lwps (0, NULL);
1510
1511 #ifdef USE_THREAD_DB
1512 thread_db_detach (process);
1513 #endif
1514
1515 /* Stabilize threads (move out of jump pads). */
1516 stabilize_threads ();
1517
1518 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1519
1520 the_target->mourn (process);
1521
1522 /* Since we presently can only stop all lwps of all processes, we
1523 need to unstop lwps of other processes. */
1524 unstop_all_lwps (0, NULL);
1525 return 0;
1526 }
1527
1528 /* Remove all LWPs that belong to process PROC from the lwp list. */
1529
1530 static int
1531 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1532 {
1533 struct thread_info *thread = (struct thread_info *) entry;
1534 struct lwp_info *lwp = get_thread_lwp (thread);
1535 struct process_info *process = (struct process_info *) proc;
1536
1537 if (pid_of (thread) == pid_of (process))
1538 delete_lwp (lwp);
1539
1540 return 0;
1541 }
1542
1543 static void
1544 linux_mourn (struct process_info *process)
1545 {
1546 struct process_info_private *priv;
1547
1548 #ifdef USE_THREAD_DB
1549 thread_db_mourn (process);
1550 #endif
1551
1552 find_inferior (&all_threads, delete_lwp_callback, process);
1553
1554 /* Freeing all private data. */
1555 priv = process->priv;
1556 free (priv->arch_private);
1557 free (priv);
1558 process->priv = NULL;
1559
1560 remove_process (process);
1561 }
1562
1563 static void
1564 linux_join (int pid)
1565 {
1566 int status, ret;
1567
1568 do {
1569 ret = my_waitpid (pid, &status, 0);
1570 if (WIFEXITED (status) || WIFSIGNALED (status))
1571 break;
1572 } while (ret != -1 || errno != ECHILD);
1573 }
1574
1575 /* Return nonzero if the given thread is still alive. */
1576 static int
1577 linux_thread_alive (ptid_t ptid)
1578 {
1579 struct lwp_info *lwp = find_lwp_pid (ptid);
1580
1581 /* We assume we always know if a thread exits. If a whole process
1582 exited but we still haven't been able to report it to GDB, we'll
1583 hold on to the last lwp of the dead process. */
1584 if (lwp != NULL)
1585 return !lwp_is_marked_dead (lwp);
1586 else
1587 return 0;
1588 }
1589
1590 /* Return 1 if this lwp still has an interesting status pending. If
1591 not (e.g., it had stopped for a breakpoint that is gone), return
1592 false. */
1593
1594 static int
1595 thread_still_has_status_pending_p (struct thread_info *thread)
1596 {
1597 struct lwp_info *lp = get_thread_lwp (thread);
1598
1599 if (!lp->status_pending_p)
1600 return 0;
1601
1602 if (thread->last_resume_kind != resume_stop
1603 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1604 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1605 {
1606 struct thread_info *saved_thread;
1607 CORE_ADDR pc;
1608 int discard = 0;
1609
1610 gdb_assert (lp->last_status != 0);
1611
1612 pc = get_pc (lp);
1613
1614 saved_thread = current_thread;
1615 current_thread = thread;
1616
1617 if (pc != lp->stop_pc)
1618 {
1619 if (debug_threads)
1620 debug_printf ("PC of %ld changed\n",
1621 lwpid_of (thread));
1622 discard = 1;
1623 }
1624
1625 #if !USE_SIGTRAP_SIGINFO
1626 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1627 && !(*the_low_target.breakpoint_at) (pc))
1628 {
1629 if (debug_threads)
1630 debug_printf ("previous SW breakpoint of %ld gone\n",
1631 lwpid_of (thread));
1632 discard = 1;
1633 }
1634 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1635 && !hardware_breakpoint_inserted_here (pc))
1636 {
1637 if (debug_threads)
1638 debug_printf ("previous HW breakpoint of %ld gone\n",
1639 lwpid_of (thread));
1640 discard = 1;
1641 }
1642 #endif
1643
1644 current_thread = saved_thread;
1645
1646 if (discard)
1647 {
1648 if (debug_threads)
1649 debug_printf ("discarding pending breakpoint status\n");
1650 lp->status_pending_p = 0;
1651 return 0;
1652 }
1653 }
1654
1655 return 1;
1656 }
1657
1658 /* Returns true if LWP is resumed from the client's perspective. */
1659
1660 static int
1661 lwp_resumed (struct lwp_info *lwp)
1662 {
1663 struct thread_info *thread = get_lwp_thread (lwp);
1664
1665 if (thread->last_resume_kind != resume_stop)
1666 return 1;
1667
1668 /* Did gdb send us a `vCont;t', but we haven't reported the
1669 corresponding stop to gdb yet? If so, the thread is still
1670 resumed/running from gdb's perspective. */
1671 if (thread->last_resume_kind == resume_stop
1672 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1673 return 1;
1674
1675 return 0;
1676 }
1677
1678 /* Return 1 if this lwp has an interesting status pending. */
1679 static int
1680 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1681 {
1682 struct thread_info *thread = (struct thread_info *) entry;
1683 struct lwp_info *lp = get_thread_lwp (thread);
1684 ptid_t ptid = * (ptid_t *) arg;
1685
1686 /* Check if we're only interested in events from a specific process
1687 or a specific LWP. */
1688 if (!ptid_match (ptid_of (thread), ptid))
1689 return 0;
1690
1691 if (!lwp_resumed (lp))
1692 return 0;
1693
1694 if (lp->status_pending_p
1695 && !thread_still_has_status_pending_p (thread))
1696 {
1697 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1698 return 0;
1699 }
1700
1701 return lp->status_pending_p;
1702 }
1703
1704 static int
1705 same_lwp (struct inferior_list_entry *entry, void *data)
1706 {
1707 ptid_t ptid = *(ptid_t *) data;
1708 int lwp;
1709
1710 if (ptid_get_lwp (ptid) != 0)
1711 lwp = ptid_get_lwp (ptid);
1712 else
1713 lwp = ptid_get_pid (ptid);
1714
1715 if (ptid_get_lwp (entry->id) == lwp)
1716 return 1;
1717
1718 return 0;
1719 }
1720
1721 struct lwp_info *
1722 find_lwp_pid (ptid_t ptid)
1723 {
1724 struct inferior_list_entry *thread
1725 = find_inferior (&all_threads, same_lwp, &ptid);
1726
1727 if (thread == NULL)
1728 return NULL;
1729
1730 return get_thread_lwp ((struct thread_info *) thread);
1731 }
1732
1733 /* Return the number of known LWPs in the tgid given by PID. */
1734
1735 static int
1736 num_lwps (int pid)
1737 {
1738 struct inferior_list_entry *inf, *tmp;
1739 int count = 0;
1740
1741 ALL_INFERIORS (&all_threads, inf, tmp)
1742 {
1743 if (ptid_get_pid (inf->id) == pid)
1744 count++;
1745 }
1746
1747 return count;
1748 }
1749
1750 /* The arguments passed to iterate_over_lwps. */
1751
1752 struct iterate_over_lwps_args
1753 {
1754 /* The FILTER argument passed to iterate_over_lwps. */
1755 ptid_t filter;
1756
1757 /* The CALLBACK argument passed to iterate_over_lwps. */
1758 iterate_over_lwps_ftype *callback;
1759
1760 /* The DATA argument passed to iterate_over_lwps. */
1761 void *data;
1762 };
1763
1764 /* Callback for find_inferior used by iterate_over_lwps to filter
1765 calls to the callback supplied to that function. Returning a
1766 nonzero value causes find_inferiors to stop iterating and return
1767 the current inferior_list_entry. Returning zero indicates that
1768 find_inferiors should continue iterating. */
1769
1770 static int
1771 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1772 {
1773 struct iterate_over_lwps_args *args
1774 = (struct iterate_over_lwps_args *) args_p;
1775
1776 if (ptid_match (entry->id, args->filter))
1777 {
1778 struct thread_info *thr = (struct thread_info *) entry;
1779 struct lwp_info *lwp = get_thread_lwp (thr);
1780
1781 return (*args->callback) (lwp, args->data);
1782 }
1783
1784 return 0;
1785 }
1786
1787 /* See nat/linux-nat.h. */
1788
1789 struct lwp_info *
1790 iterate_over_lwps (ptid_t filter,
1791 iterate_over_lwps_ftype callback,
1792 void *data)
1793 {
1794 struct iterate_over_lwps_args args = {filter, callback, data};
1795 struct inferior_list_entry *entry;
1796
1797 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1798 if (entry == NULL)
1799 return NULL;
1800
1801 return get_thread_lwp ((struct thread_info *) entry);
1802 }
1803
1804 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1805 their exits until all other threads in the group have exited. */
1806
1807 static void
1808 check_zombie_leaders (void)
1809 {
1810 struct process_info *proc, *tmp;
1811
1812 ALL_PROCESSES (proc, tmp)
1813 {
1814 pid_t leader_pid = pid_of (proc);
1815 struct lwp_info *leader_lp;
1816
1817 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1818
1819 if (debug_threads)
1820 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1821 "num_lwps=%d, zombie=%d\n",
1822 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1823 linux_proc_pid_is_zombie (leader_pid));
1824
1825 if (leader_lp != NULL && !leader_lp->stopped
1826 /* Check if there are other threads in the group, as we may
1827 have raced with the inferior simply exiting. */
1828 && !last_thread_of_process_p (leader_pid)
1829 && linux_proc_pid_is_zombie (leader_pid))
1830 {
1831 /* A leader zombie can mean one of two things:
1832
1833 - It exited, and there's an exit status pending
1834 available, or only the leader exited (not the whole
1835 program). In the latter case, we can't waitpid the
1836 leader's exit status until all other threads are gone.
1837
1838 - There are 3 or more threads in the group, and a thread
1839 other than the leader exec'd. On an exec, the Linux
1840 kernel destroys all other threads (except the execing
1841 one) in the thread group, and resets the execing thread's
1842 tid to the tgid. No exit notification is sent for the
1843 execing thread -- from the ptracer's perspective, it
1844 appears as though the execing thread just vanishes.
1845 Until we reap all other threads except the leader and the
1846 execing thread, the leader will be zombie, and the
1847 execing thread will be in `D (disc sleep)'. As soon as
1848 all other threads are reaped, the execing thread changes
1849 it's tid to the tgid, and the previous (zombie) leader
1850 vanishes, giving place to the "new" leader. We could try
1851 distinguishing the exit and exec cases, by waiting once
1852 more, and seeing if something comes out, but it doesn't
1853 sound useful. The previous leader _does_ go away, and
1854 we'll re-add the new one once we see the exec event
1855 (which is just the same as what would happen if the
1856 previous leader did exit voluntarily before some other
1857 thread execs). */
1858
1859 if (debug_threads)
1860 fprintf (stderr,
1861 "CZL: Thread group leader %d zombie "
1862 "(it exited, or another thread execd).\n",
1863 leader_pid);
1864
1865 delete_lwp (leader_lp);
1866 }
1867 }
1868 }
1869
1870 /* Callback for `find_inferior'. Returns the first LWP that is not
1871 stopped. ARG is a PTID filter. */
1872
1873 static int
1874 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1875 {
1876 struct thread_info *thr = (struct thread_info *) entry;
1877 struct lwp_info *lwp;
1878 ptid_t filter = *(ptid_t *) arg;
1879
1880 if (!ptid_match (ptid_of (thr), filter))
1881 return 0;
1882
1883 lwp = get_thread_lwp (thr);
1884 if (!lwp->stopped)
1885 return 1;
1886
1887 return 0;
1888 }
1889
1890 /* Increment LWP's suspend count. */
1891
1892 static void
1893 lwp_suspended_inc (struct lwp_info *lwp)
1894 {
1895 lwp->suspended++;
1896
1897 if (debug_threads && lwp->suspended > 4)
1898 {
1899 struct thread_info *thread = get_lwp_thread (lwp);
1900
1901 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1902 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1903 }
1904 }
1905
1906 /* Decrement LWP's suspend count. */
1907
1908 static void
1909 lwp_suspended_decr (struct lwp_info *lwp)
1910 {
1911 lwp->suspended--;
1912
1913 if (lwp->suspended < 0)
1914 {
1915 struct thread_info *thread = get_lwp_thread (lwp);
1916
1917 internal_error (__FILE__, __LINE__,
1918 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1919 lwp->suspended);
1920 }
1921 }
1922
1923 /* This function should only be called if the LWP got a SIGTRAP.
1924
1925 Handle any tracepoint steps or hits. Return true if a tracepoint
1926 event was handled, 0 otherwise. */
1927
1928 static int
1929 handle_tracepoints (struct lwp_info *lwp)
1930 {
1931 struct thread_info *tinfo = get_lwp_thread (lwp);
1932 int tpoint_related_event = 0;
1933
1934 gdb_assert (lwp->suspended == 0);
1935
1936 /* If this tracepoint hit causes a tracing stop, we'll immediately
1937 uninsert tracepoints. To do this, we temporarily pause all
1938 threads, unpatch away, and then unpause threads. We need to make
1939 sure the unpausing doesn't resume LWP too. */
1940 lwp_suspended_inc (lwp);
1941
1942 /* And we need to be sure that any all-threads-stopping doesn't try
1943 to move threads out of the jump pads, as it could deadlock the
1944 inferior (LWP could be in the jump pad, maybe even holding the
1945 lock.) */
1946
1947 /* Do any necessary step collect actions. */
1948 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1949
1950 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1951
1952 /* See if we just hit a tracepoint and do its main collect
1953 actions. */
1954 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1955
1956 lwp_suspended_decr (lwp);
1957
1958 gdb_assert (lwp->suspended == 0);
1959 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1960
1961 if (tpoint_related_event)
1962 {
1963 if (debug_threads)
1964 debug_printf ("got a tracepoint event\n");
1965 return 1;
1966 }
1967
1968 return 0;
1969 }
1970
1971 /* Convenience wrapper. Returns true if LWP is presently collecting a
1972 fast tracepoint. */
1973
1974 static int
1975 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1976 struct fast_tpoint_collect_status *status)
1977 {
1978 CORE_ADDR thread_area;
1979 struct thread_info *thread = get_lwp_thread (lwp);
1980
1981 if (the_low_target.get_thread_area == NULL)
1982 return 0;
1983
1984 /* Get the thread area address. This is used to recognize which
1985 thread is which when tracing with the in-process agent library.
1986 We don't read anything from the address, and treat it as opaque;
1987 it's the address itself that we assume is unique per-thread. */
1988 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1989 return 0;
1990
1991 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1992 }
1993
1994 /* The reason we resume in the caller, is because we want to be able
1995 to pass lwp->status_pending as WSTAT, and we need to clear
1996 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1997 refuses to resume. */
1998
1999 static int
2000 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2001 {
2002 struct thread_info *saved_thread;
2003
2004 saved_thread = current_thread;
2005 current_thread = get_lwp_thread (lwp);
2006
2007 if ((wstat == NULL
2008 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2009 && supports_fast_tracepoints ()
2010 && agent_loaded_p ())
2011 {
2012 struct fast_tpoint_collect_status status;
2013 int r;
2014
2015 if (debug_threads)
2016 debug_printf ("Checking whether LWP %ld needs to move out of the "
2017 "jump pad.\n",
2018 lwpid_of (current_thread));
2019
2020 r = linux_fast_tracepoint_collecting (lwp, &status);
2021
2022 if (wstat == NULL
2023 || (WSTOPSIG (*wstat) != SIGILL
2024 && WSTOPSIG (*wstat) != SIGFPE
2025 && WSTOPSIG (*wstat) != SIGSEGV
2026 && WSTOPSIG (*wstat) != SIGBUS))
2027 {
2028 lwp->collecting_fast_tracepoint = r;
2029
2030 if (r != 0)
2031 {
2032 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2033 {
2034 /* Haven't executed the original instruction yet.
2035 Set breakpoint there, and wait till it's hit,
2036 then single-step until exiting the jump pad. */
2037 lwp->exit_jump_pad_bkpt
2038 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2039 }
2040
2041 if (debug_threads)
2042 debug_printf ("Checking whether LWP %ld needs to move out of "
2043 "the jump pad...it does\n",
2044 lwpid_of (current_thread));
2045 current_thread = saved_thread;
2046
2047 return 1;
2048 }
2049 }
2050 else
2051 {
2052 /* If we get a synchronous signal while collecting, *and*
2053 while executing the (relocated) original instruction,
2054 reset the PC to point at the tpoint address, before
2055 reporting to GDB. Otherwise, it's an IPA lib bug: just
2056 report the signal to GDB, and pray for the best. */
2057
2058 lwp->collecting_fast_tracepoint = 0;
2059
2060 if (r != 0
2061 && (status.adjusted_insn_addr <= lwp->stop_pc
2062 && lwp->stop_pc < status.adjusted_insn_addr_end))
2063 {
2064 siginfo_t info;
2065 struct regcache *regcache;
2066
2067 /* The si_addr on a few signals references the address
2068 of the faulting instruction. Adjust that as
2069 well. */
2070 if ((WSTOPSIG (*wstat) == SIGILL
2071 || WSTOPSIG (*wstat) == SIGFPE
2072 || WSTOPSIG (*wstat) == SIGBUS
2073 || WSTOPSIG (*wstat) == SIGSEGV)
2074 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2075 (PTRACE_TYPE_ARG3) 0, &info) == 0
2076 /* Final check just to make sure we don't clobber
2077 the siginfo of non-kernel-sent signals. */
2078 && (uintptr_t) info.si_addr == lwp->stop_pc)
2079 {
2080 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2081 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2082 (PTRACE_TYPE_ARG3) 0, &info);
2083 }
2084
2085 regcache = get_thread_regcache (current_thread, 1);
2086 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2087 lwp->stop_pc = status.tpoint_addr;
2088
2089 /* Cancel any fast tracepoint lock this thread was
2090 holding. */
2091 force_unlock_trace_buffer ();
2092 }
2093
2094 if (lwp->exit_jump_pad_bkpt != NULL)
2095 {
2096 if (debug_threads)
2097 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2098 "stopping all threads momentarily.\n");
2099
2100 stop_all_lwps (1, lwp);
2101
2102 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2103 lwp->exit_jump_pad_bkpt = NULL;
2104
2105 unstop_all_lwps (1, lwp);
2106
2107 gdb_assert (lwp->suspended >= 0);
2108 }
2109 }
2110 }
2111
2112 if (debug_threads)
2113 debug_printf ("Checking whether LWP %ld needs to move out of the "
2114 "jump pad...no\n",
2115 lwpid_of (current_thread));
2116
2117 current_thread = saved_thread;
2118 return 0;
2119 }
2120
2121 /* Enqueue one signal in the "signals to report later when out of the
2122 jump pad" list. */
2123
2124 static void
2125 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2126 {
2127 struct pending_signals *p_sig;
2128 struct thread_info *thread = get_lwp_thread (lwp);
2129
2130 if (debug_threads)
2131 debug_printf ("Deferring signal %d for LWP %ld.\n",
2132 WSTOPSIG (*wstat), lwpid_of (thread));
2133
2134 if (debug_threads)
2135 {
2136 struct pending_signals *sig;
2137
2138 for (sig = lwp->pending_signals_to_report;
2139 sig != NULL;
2140 sig = sig->prev)
2141 debug_printf (" Already queued %d\n",
2142 sig->signal);
2143
2144 debug_printf (" (no more currently queued signals)\n");
2145 }
2146
2147 /* Don't enqueue non-RT signals if they are already in the deferred
2148 queue. (SIGSTOP being the easiest signal to see ending up here
2149 twice) */
2150 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2151 {
2152 struct pending_signals *sig;
2153
2154 for (sig = lwp->pending_signals_to_report;
2155 sig != NULL;
2156 sig = sig->prev)
2157 {
2158 if (sig->signal == WSTOPSIG (*wstat))
2159 {
2160 if (debug_threads)
2161 debug_printf ("Not requeuing already queued non-RT signal %d"
2162 " for LWP %ld\n",
2163 sig->signal,
2164 lwpid_of (thread));
2165 return;
2166 }
2167 }
2168 }
2169
2170 p_sig = XCNEW (struct pending_signals);
2171 p_sig->prev = lwp->pending_signals_to_report;
2172 p_sig->signal = WSTOPSIG (*wstat);
2173
2174 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2175 &p_sig->info);
2176
2177 lwp->pending_signals_to_report = p_sig;
2178 }
2179
2180 /* Dequeue one signal from the "signals to report later when out of
2181 the jump pad" list. */
2182
2183 static int
2184 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2185 {
2186 struct thread_info *thread = get_lwp_thread (lwp);
2187
2188 if (lwp->pending_signals_to_report != NULL)
2189 {
2190 struct pending_signals **p_sig;
2191
2192 p_sig = &lwp->pending_signals_to_report;
2193 while ((*p_sig)->prev != NULL)
2194 p_sig = &(*p_sig)->prev;
2195
2196 *wstat = W_STOPCODE ((*p_sig)->signal);
2197 if ((*p_sig)->info.si_signo != 0)
2198 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2199 &(*p_sig)->info);
2200 free (*p_sig);
2201 *p_sig = NULL;
2202
2203 if (debug_threads)
2204 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2205 WSTOPSIG (*wstat), lwpid_of (thread));
2206
2207 if (debug_threads)
2208 {
2209 struct pending_signals *sig;
2210
2211 for (sig = lwp->pending_signals_to_report;
2212 sig != NULL;
2213 sig = sig->prev)
2214 debug_printf (" Still queued %d\n",
2215 sig->signal);
2216
2217 debug_printf (" (no more queued signals)\n");
2218 }
2219
2220 return 1;
2221 }
2222
2223 return 0;
2224 }
2225
2226 /* Fetch the possibly triggered data watchpoint info and store it in
2227 CHILD.
2228
2229 On some archs, like x86, that use debug registers to set
2230 watchpoints, it's possible that the way to know which watched
2231 address trapped, is to check the register that is used to select
2232 which address to watch. Problem is, between setting the watchpoint
2233 and reading back which data address trapped, the user may change
2234 the set of watchpoints, and, as a consequence, GDB changes the
2235 debug registers in the inferior. To avoid reading back a stale
2236 stopped-data-address when that happens, we cache in LP the fact
2237 that a watchpoint trapped, and the corresponding data address, as
2238 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2239 registers meanwhile, we have the cached data we can rely on. */
2240
2241 static int
2242 check_stopped_by_watchpoint (struct lwp_info *child)
2243 {
2244 if (the_low_target.stopped_by_watchpoint != NULL)
2245 {
2246 struct thread_info *saved_thread;
2247
2248 saved_thread = current_thread;
2249 current_thread = get_lwp_thread (child);
2250
2251 if (the_low_target.stopped_by_watchpoint ())
2252 {
2253 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2254
2255 if (the_low_target.stopped_data_address != NULL)
2256 child->stopped_data_address
2257 = the_low_target.stopped_data_address ();
2258 else
2259 child->stopped_data_address = 0;
2260 }
2261
2262 current_thread = saved_thread;
2263 }
2264
2265 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2266 }
2267
2268 /* Return the ptrace options that we want to try to enable. */
2269
2270 static int
2271 linux_low_ptrace_options (int attached)
2272 {
2273 int options = 0;
2274
2275 if (!attached)
2276 options |= PTRACE_O_EXITKILL;
2277
2278 if (report_fork_events)
2279 options |= PTRACE_O_TRACEFORK;
2280
2281 if (report_vfork_events)
2282 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2283
2284 if (report_exec_events)
2285 options |= PTRACE_O_TRACEEXEC;
2286
2287 options |= PTRACE_O_TRACESYSGOOD;
2288
2289 return options;
2290 }
2291
2292 /* Do low-level handling of the event, and check if we should go on
2293 and pass it to caller code. Return the affected lwp if we are, or
2294 NULL otherwise. */
2295
2296 static struct lwp_info *
2297 linux_low_filter_event (int lwpid, int wstat)
2298 {
2299 struct lwp_info *child;
2300 struct thread_info *thread;
2301 int have_stop_pc = 0;
2302
2303 child = find_lwp_pid (pid_to_ptid (lwpid));
2304
2305 /* Check for stop events reported by a process we didn't already
2306 know about - anything not already in our LWP list.
2307
2308 If we're expecting to receive stopped processes after
2309 fork, vfork, and clone events, then we'll just add the
2310 new one to our list and go back to waiting for the event
2311 to be reported - the stopped process might be returned
2312 from waitpid before or after the event is.
2313
2314 But note the case of a non-leader thread exec'ing after the
2315 leader having exited, and gone from our lists (because
2316 check_zombie_leaders deleted it). The non-leader thread
2317 changes its tid to the tgid. */
2318
2319 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2320 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2321 {
2322 ptid_t child_ptid;
2323
2324 /* A multi-thread exec after we had seen the leader exiting. */
2325 if (debug_threads)
2326 {
2327 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2328 "after exec.\n", lwpid);
2329 }
2330
2331 child_ptid = ptid_build (lwpid, lwpid, 0);
2332 child = add_lwp (child_ptid);
2333 child->stopped = 1;
2334 current_thread = child->thread;
2335 }
2336
2337 /* If we didn't find a process, one of two things presumably happened:
2338 - A process we started and then detached from has exited. Ignore it.
2339 - A process we are controlling has forked and the new child's stop
2340 was reported to us by the kernel. Save its PID. */
2341 if (child == NULL && WIFSTOPPED (wstat))
2342 {
2343 add_to_pid_list (&stopped_pids, lwpid, wstat);
2344 return NULL;
2345 }
2346 else if (child == NULL)
2347 return NULL;
2348
2349 thread = get_lwp_thread (child);
2350
2351 child->stopped = 1;
2352
2353 child->last_status = wstat;
2354
2355 /* Check if the thread has exited. */
2356 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2357 {
2358 if (debug_threads)
2359 debug_printf ("LLFE: %d exited.\n", lwpid);
2360 /* If there is at least one more LWP, then the exit signal was
2361 not the end of the debugged application and should be
2362 ignored, unless GDB wants to hear about thread exits. */
2363 if (report_thread_events
2364 || last_thread_of_process_p (pid_of (thread)))
2365 {
2366 /* Since events are serialized to GDB core, and we can't
2367 report this one right now. Leave the status pending for
2368 the next time we're able to report it. */
2369 mark_lwp_dead (child, wstat);
2370 return child;
2371 }
2372 else
2373 {
2374 delete_lwp (child);
2375 return NULL;
2376 }
2377 }
2378
2379 gdb_assert (WIFSTOPPED (wstat));
2380
2381 if (WIFSTOPPED (wstat))
2382 {
2383 struct process_info *proc;
2384
2385 /* Architecture-specific setup after inferior is running. */
2386 proc = find_process_pid (pid_of (thread));
2387 if (proc->tdesc == NULL)
2388 {
2389 if (proc->attached)
2390 {
2391 /* This needs to happen after we have attached to the
2392 inferior and it is stopped for the first time, but
2393 before we access any inferior registers. */
2394 linux_arch_setup_thread (thread);
2395 }
2396 else
2397 {
2398 /* The process is started, but GDBserver will do
2399 architecture-specific setup after the program stops at
2400 the first instruction. */
2401 child->status_pending_p = 1;
2402 child->status_pending = wstat;
2403 return child;
2404 }
2405 }
2406 }
2407
2408 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2409 {
2410 struct process_info *proc = find_process_pid (pid_of (thread));
2411 int options = linux_low_ptrace_options (proc->attached);
2412
2413 linux_enable_event_reporting (lwpid, options);
2414 child->must_set_ptrace_flags = 0;
2415 }
2416
2417 /* Always update syscall_state, even if it will be filtered later. */
2418 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2419 {
2420 child->syscall_state
2421 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2422 ? TARGET_WAITKIND_SYSCALL_RETURN
2423 : TARGET_WAITKIND_SYSCALL_ENTRY);
2424 }
2425 else
2426 {
2427 /* Almost all other ptrace-stops are known to be outside of system
2428 calls, with further exceptions in handle_extended_wait. */
2429 child->syscall_state = TARGET_WAITKIND_IGNORE;
2430 }
2431
2432 /* Be careful to not overwrite stop_pc until save_stop_reason is
2433 called. */
2434 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2435 && linux_is_extended_waitstatus (wstat))
2436 {
2437 child->stop_pc = get_pc (child);
2438 if (handle_extended_wait (&child, wstat))
2439 {
2440 /* The event has been handled, so just return without
2441 reporting it. */
2442 return NULL;
2443 }
2444 }
2445
2446 if (linux_wstatus_maybe_breakpoint (wstat))
2447 {
2448 if (save_stop_reason (child))
2449 have_stop_pc = 1;
2450 }
2451
2452 if (!have_stop_pc)
2453 child->stop_pc = get_pc (child);
2454
2455 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2456 && child->stop_expected)
2457 {
2458 if (debug_threads)
2459 debug_printf ("Expected stop.\n");
2460 child->stop_expected = 0;
2461
2462 if (thread->last_resume_kind == resume_stop)
2463 {
2464 /* We want to report the stop to the core. Treat the
2465 SIGSTOP as a normal event. */
2466 if (debug_threads)
2467 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2468 target_pid_to_str (ptid_of (thread)));
2469 }
2470 else if (stopping_threads != NOT_STOPPING_THREADS)
2471 {
2472 /* Stopping threads. We don't want this SIGSTOP to end up
2473 pending. */
2474 if (debug_threads)
2475 debug_printf ("LLW: SIGSTOP caught for %s "
2476 "while stopping threads.\n",
2477 target_pid_to_str (ptid_of (thread)));
2478 return NULL;
2479 }
2480 else
2481 {
2482 /* This is a delayed SIGSTOP. Filter out the event. */
2483 if (debug_threads)
2484 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2485 child->stepping ? "step" : "continue",
2486 target_pid_to_str (ptid_of (thread)));
2487
2488 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2489 return NULL;
2490 }
2491 }
2492
2493 child->status_pending_p = 1;
2494 child->status_pending = wstat;
2495 return child;
2496 }
2497
2498 /* Resume LWPs that are currently stopped without any pending status
2499 to report, but are resumed from the core's perspective. */
2500
2501 static void
2502 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2503 {
2504 struct thread_info *thread = (struct thread_info *) entry;
2505 struct lwp_info *lp = get_thread_lwp (thread);
2506
2507 if (lp->stopped
2508 && !lp->suspended
2509 && !lp->status_pending_p
2510 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2511 {
2512 int step = thread->last_resume_kind == resume_step;
2513
2514 if (debug_threads)
2515 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2516 target_pid_to_str (ptid_of (thread)),
2517 paddress (lp->stop_pc),
2518 step);
2519
2520 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2521 }
2522 }
2523
2524 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2525 match FILTER_PTID (leaving others pending). The PTIDs can be:
2526 minus_one_ptid, to specify any child; a pid PTID, specifying all
2527 lwps of a thread group; or a PTID representing a single lwp. Store
2528 the stop status through the status pointer WSTAT. OPTIONS is
2529 passed to the waitpid call. Return 0 if no event was found and
2530 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2531 was found. Return the PID of the stopped child otherwise. */
2532
2533 static int
2534 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2535 int *wstatp, int options)
2536 {
2537 struct thread_info *event_thread;
2538 struct lwp_info *event_child, *requested_child;
2539 sigset_t block_mask, prev_mask;
2540
2541 retry:
2542 /* N.B. event_thread points to the thread_info struct that contains
2543 event_child. Keep them in sync. */
2544 event_thread = NULL;
2545 event_child = NULL;
2546 requested_child = NULL;
2547
2548 /* Check for a lwp with a pending status. */
2549
2550 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2551 {
2552 event_thread = (struct thread_info *)
2553 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2554 if (event_thread != NULL)
2555 event_child = get_thread_lwp (event_thread);
2556 if (debug_threads && event_thread)
2557 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2558 }
2559 else if (!ptid_equal (filter_ptid, null_ptid))
2560 {
2561 requested_child = find_lwp_pid (filter_ptid);
2562
2563 if (stopping_threads == NOT_STOPPING_THREADS
2564 && requested_child->status_pending_p
2565 && requested_child->collecting_fast_tracepoint)
2566 {
2567 enqueue_one_deferred_signal (requested_child,
2568 &requested_child->status_pending);
2569 requested_child->status_pending_p = 0;
2570 requested_child->status_pending = 0;
2571 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2572 }
2573
2574 if (requested_child->suspended
2575 && requested_child->status_pending_p)
2576 {
2577 internal_error (__FILE__, __LINE__,
2578 "requesting an event out of a"
2579 " suspended child?");
2580 }
2581
2582 if (requested_child->status_pending_p)
2583 {
2584 event_child = requested_child;
2585 event_thread = get_lwp_thread (event_child);
2586 }
2587 }
2588
2589 if (event_child != NULL)
2590 {
2591 if (debug_threads)
2592 debug_printf ("Got an event from pending child %ld (%04x)\n",
2593 lwpid_of (event_thread), event_child->status_pending);
2594 *wstatp = event_child->status_pending;
2595 event_child->status_pending_p = 0;
2596 event_child->status_pending = 0;
2597 current_thread = event_thread;
2598 return lwpid_of (event_thread);
2599 }
2600
2601 /* But if we don't find a pending event, we'll have to wait.
2602
2603 We only enter this loop if no process has a pending wait status.
2604 Thus any action taken in response to a wait status inside this
2605 loop is responding as soon as we detect the status, not after any
2606 pending events. */
2607
2608 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2609 all signals while here. */
2610 sigfillset (&block_mask);
2611 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2612
2613 /* Always pull all events out of the kernel. We'll randomly select
2614 an event LWP out of all that have events, to prevent
2615 starvation. */
2616 while (event_child == NULL)
2617 {
2618 pid_t ret = 0;
2619
2620 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2621 quirks:
2622
2623 - If the thread group leader exits while other threads in the
2624 thread group still exist, waitpid(TGID, ...) hangs. That
2625 waitpid won't return an exit status until the other threads
2626 in the group are reaped.
2627
2628 - When a non-leader thread execs, that thread just vanishes
2629 without reporting an exit (so we'd hang if we waited for it
2630 explicitly in that case). The exec event is reported to
2631 the TGID pid. */
2632 errno = 0;
2633 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2634
2635 if (debug_threads)
2636 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2637 ret, errno ? strerror (errno) : "ERRNO-OK");
2638
2639 if (ret > 0)
2640 {
2641 if (debug_threads)
2642 {
2643 debug_printf ("LLW: waitpid %ld received %s\n",
2644 (long) ret, status_to_str (*wstatp));
2645 }
2646
2647 /* Filter all events. IOW, leave all events pending. We'll
2648 randomly select an event LWP out of all that have events
2649 below. */
2650 linux_low_filter_event (ret, *wstatp);
2651 /* Retry until nothing comes out of waitpid. A single
2652 SIGCHLD can indicate more than one child stopped. */
2653 continue;
2654 }
2655
2656 /* Now that we've pulled all events out of the kernel, resume
2657 LWPs that don't have an interesting event to report. */
2658 if (stopping_threads == NOT_STOPPING_THREADS)
2659 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2660
2661 /* ... and find an LWP with a status to report to the core, if
2662 any. */
2663 event_thread = (struct thread_info *)
2664 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2665 if (event_thread != NULL)
2666 {
2667 event_child = get_thread_lwp (event_thread);
2668 *wstatp = event_child->status_pending;
2669 event_child->status_pending_p = 0;
2670 event_child->status_pending = 0;
2671 break;
2672 }
2673
2674 /* Check for zombie thread group leaders. Those can't be reaped
2675 until all other threads in the thread group are. */
2676 check_zombie_leaders ();
2677
2678 /* If there are no resumed children left in the set of LWPs we
2679 want to wait for, bail. We can't just block in
2680 waitpid/sigsuspend, because lwps might have been left stopped
2681 in trace-stop state, and we'd be stuck forever waiting for
2682 their status to change (which would only happen if we resumed
2683 them). Even if WNOHANG is set, this return code is preferred
2684 over 0 (below), as it is more detailed. */
2685 if ((find_inferior (&all_threads,
2686 not_stopped_callback,
2687 &wait_ptid) == NULL))
2688 {
2689 if (debug_threads)
2690 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2691 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2692 return -1;
2693 }
2694
2695 /* No interesting event to report to the caller. */
2696 if ((options & WNOHANG))
2697 {
2698 if (debug_threads)
2699 debug_printf ("WNOHANG set, no event found\n");
2700
2701 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2702 return 0;
2703 }
2704
2705 /* Block until we get an event reported with SIGCHLD. */
2706 if (debug_threads)
2707 debug_printf ("sigsuspend'ing\n");
2708
2709 sigsuspend (&prev_mask);
2710 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2711 goto retry;
2712 }
2713
2714 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2715
2716 current_thread = event_thread;
2717
2718 return lwpid_of (event_thread);
2719 }
2720
2721 /* Wait for an event from child(ren) PTID. PTIDs can be:
2722 minus_one_ptid, to specify any child; a pid PTID, specifying all
2723 lwps of a thread group; or a PTID representing a single lwp. Store
2724 the stop status through the status pointer WSTAT. OPTIONS is
2725 passed to the waitpid call. Return 0 if no event was found and
2726 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2727 was found. Return the PID of the stopped child otherwise. */
2728
2729 static int
2730 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2731 {
2732 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2733 }
2734
2735 /* Count the LWP's that have had events. */
2736
2737 static int
2738 count_events_callback (struct inferior_list_entry *entry, void *data)
2739 {
2740 struct thread_info *thread = (struct thread_info *) entry;
2741 struct lwp_info *lp = get_thread_lwp (thread);
2742 int *count = (int *) data;
2743
2744 gdb_assert (count != NULL);
2745
2746 /* Count only resumed LWPs that have an event pending. */
2747 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2748 && lp->status_pending_p)
2749 (*count)++;
2750
2751 return 0;
2752 }
2753
2754 /* Select the LWP (if any) that is currently being single-stepped. */
2755
2756 static int
2757 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2758 {
2759 struct thread_info *thread = (struct thread_info *) entry;
2760 struct lwp_info *lp = get_thread_lwp (thread);
2761
2762 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2763 && thread->last_resume_kind == resume_step
2764 && lp->status_pending_p)
2765 return 1;
2766 else
2767 return 0;
2768 }
2769
2770 /* Select the Nth LWP that has had an event. */
2771
2772 static int
2773 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2774 {
2775 struct thread_info *thread = (struct thread_info *) entry;
2776 struct lwp_info *lp = get_thread_lwp (thread);
2777 int *selector = (int *) data;
2778
2779 gdb_assert (selector != NULL);
2780
2781 /* Select only resumed LWPs that have an event pending. */
2782 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2783 && lp->status_pending_p)
2784 if ((*selector)-- == 0)
2785 return 1;
2786
2787 return 0;
2788 }
2789
2790 /* Select one LWP out of those that have events pending. */
2791
2792 static void
2793 select_event_lwp (struct lwp_info **orig_lp)
2794 {
2795 int num_events = 0;
2796 int random_selector;
2797 struct thread_info *event_thread = NULL;
2798
2799 /* In all-stop, give preference to the LWP that is being
2800 single-stepped. There will be at most one, and it's the LWP that
2801 the core is most interested in. If we didn't do this, then we'd
2802 have to handle pending step SIGTRAPs somehow in case the core
2803 later continues the previously-stepped thread, otherwise we'd
2804 report the pending SIGTRAP, and the core, not having stepped the
2805 thread, wouldn't understand what the trap was for, and therefore
2806 would report it to the user as a random signal. */
2807 if (!non_stop)
2808 {
2809 event_thread
2810 = (struct thread_info *) find_inferior (&all_threads,
2811 select_singlestep_lwp_callback,
2812 NULL);
2813 if (event_thread != NULL)
2814 {
2815 if (debug_threads)
2816 debug_printf ("SEL: Select single-step %s\n",
2817 target_pid_to_str (ptid_of (event_thread)));
2818 }
2819 }
2820 if (event_thread == NULL)
2821 {
2822 /* No single-stepping LWP. Select one at random, out of those
2823 which have had events. */
2824
2825 /* First see how many events we have. */
2826 find_inferior (&all_threads, count_events_callback, &num_events);
2827 gdb_assert (num_events > 0);
2828
2829 /* Now randomly pick a LWP out of those that have had
2830 events. */
2831 random_selector = (int)
2832 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2833
2834 if (debug_threads && num_events > 1)
2835 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2836 num_events, random_selector);
2837
2838 event_thread
2839 = (struct thread_info *) find_inferior (&all_threads,
2840 select_event_lwp_callback,
2841 &random_selector);
2842 }
2843
2844 if (event_thread != NULL)
2845 {
2846 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2847
2848 /* Switch the event LWP. */
2849 *orig_lp = event_lp;
2850 }
2851 }
2852
2853 /* Decrement the suspend count of an LWP. */
2854
2855 static int
2856 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2857 {
2858 struct thread_info *thread = (struct thread_info *) entry;
2859 struct lwp_info *lwp = get_thread_lwp (thread);
2860
2861 /* Ignore EXCEPT. */
2862 if (lwp == except)
2863 return 0;
2864
2865 lwp_suspended_decr (lwp);
2866 return 0;
2867 }
2868
2869 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2870 NULL. */
2871
2872 static void
2873 unsuspend_all_lwps (struct lwp_info *except)
2874 {
2875 find_inferior (&all_threads, unsuspend_one_lwp, except);
2876 }
2877
2878 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2879 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2880 void *data);
2881 static int lwp_running (struct inferior_list_entry *entry, void *data);
2882 static ptid_t linux_wait_1 (ptid_t ptid,
2883 struct target_waitstatus *ourstatus,
2884 int target_options);
2885
2886 /* Stabilize threads (move out of jump pads).
2887
2888 If a thread is midway collecting a fast tracepoint, we need to
2889 finish the collection and move it out of the jump pad before
2890 reporting the signal.
2891
2892 This avoids recursion while collecting (when a signal arrives
2893 midway, and the signal handler itself collects), which would trash
2894 the trace buffer. In case the user set a breakpoint in a signal
2895 handler, this avoids the backtrace showing the jump pad, etc..
2896 Most importantly, there are certain things we can't do safely if
2897 threads are stopped in a jump pad (or in its callee's). For
2898 example:
2899
2900 - starting a new trace run. A thread still collecting the
2901 previous run, could trash the trace buffer when resumed. The trace
2902 buffer control structures would have been reset but the thread had
2903 no way to tell. The thread could even midway memcpy'ing to the
2904 buffer, which would mean that when resumed, it would clobber the
2905 trace buffer that had been set for a new run.
2906
2907 - we can't rewrite/reuse the jump pads for new tracepoints
2908 safely. Say you do tstart while a thread is stopped midway while
2909 collecting. When the thread is later resumed, it finishes the
2910 collection, and returns to the jump pad, to execute the original
2911 instruction that was under the tracepoint jump at the time the
2912 older run had been started. If the jump pad had been rewritten
2913 since for something else in the new run, the thread would now
2914 execute the wrong / random instructions. */
2915
2916 static void
2917 linux_stabilize_threads (void)
2918 {
2919 struct thread_info *saved_thread;
2920 struct thread_info *thread_stuck;
2921
2922 thread_stuck
2923 = (struct thread_info *) find_inferior (&all_threads,
2924 stuck_in_jump_pad_callback,
2925 NULL);
2926 if (thread_stuck != NULL)
2927 {
2928 if (debug_threads)
2929 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2930 lwpid_of (thread_stuck));
2931 return;
2932 }
2933
2934 saved_thread = current_thread;
2935
2936 stabilizing_threads = 1;
2937
2938 /* Kick 'em all. */
2939 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2940
2941 /* Loop until all are stopped out of the jump pads. */
2942 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2943 {
2944 struct target_waitstatus ourstatus;
2945 struct lwp_info *lwp;
2946 int wstat;
2947
2948 /* Note that we go through the full wait even loop. While
2949 moving threads out of jump pad, we need to be able to step
2950 over internal breakpoints and such. */
2951 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2952
2953 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2954 {
2955 lwp = get_thread_lwp (current_thread);
2956
2957 /* Lock it. */
2958 lwp_suspended_inc (lwp);
2959
2960 if (ourstatus.value.sig != GDB_SIGNAL_0
2961 || current_thread->last_resume_kind == resume_stop)
2962 {
2963 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2964 enqueue_one_deferred_signal (lwp, &wstat);
2965 }
2966 }
2967 }
2968
2969 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2970
2971 stabilizing_threads = 0;
2972
2973 current_thread = saved_thread;
2974
2975 if (debug_threads)
2976 {
2977 thread_stuck
2978 = (struct thread_info *) find_inferior (&all_threads,
2979 stuck_in_jump_pad_callback,
2980 NULL);
2981 if (thread_stuck != NULL)
2982 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2983 lwpid_of (thread_stuck));
2984 }
2985 }
2986
2987 /* Convenience function that is called when the kernel reports an
2988 event that is not passed out to GDB. */
2989
2990 static ptid_t
2991 ignore_event (struct target_waitstatus *ourstatus)
2992 {
2993 /* If we got an event, there may still be others, as a single
2994 SIGCHLD can indicate more than one child stopped. This forces
2995 another target_wait call. */
2996 async_file_mark ();
2997
2998 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2999 return null_ptid;
3000 }
3001
3002 /* Convenience function that is called when the kernel reports an exit
3003 event. This decides whether to report the event to GDB as a
3004 process exit event, a thread exit event, or to suppress the
3005 event. */
3006
3007 static ptid_t
3008 filter_exit_event (struct lwp_info *event_child,
3009 struct target_waitstatus *ourstatus)
3010 {
3011 struct thread_info *thread = get_lwp_thread (event_child);
3012 ptid_t ptid = ptid_of (thread);
3013
3014 if (!last_thread_of_process_p (pid_of (thread)))
3015 {
3016 if (report_thread_events)
3017 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3018 else
3019 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3020
3021 delete_lwp (event_child);
3022 }
3023 return ptid;
3024 }
3025
3026 /* Returns 1 if GDB is interested in any event_child syscalls. */
3027
3028 static int
3029 gdb_catching_syscalls_p (struct lwp_info *event_child)
3030 {
3031 struct thread_info *thread = get_lwp_thread (event_child);
3032 struct process_info *proc = get_thread_process (thread);
3033
3034 return !VEC_empty (int, proc->syscalls_to_catch);
3035 }
3036
3037 /* Returns 1 if GDB is interested in the event_child syscall.
3038 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3039
3040 static int
3041 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3042 {
3043 int i, iter;
3044 int sysno, sysret;
3045 struct thread_info *thread = get_lwp_thread (event_child);
3046 struct process_info *proc = get_thread_process (thread);
3047
3048 if (VEC_empty (int, proc->syscalls_to_catch))
3049 return 0;
3050
3051 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3052 return 1;
3053
3054 get_syscall_trapinfo (event_child, &sysno, &sysret);
3055 for (i = 0;
3056 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3057 i++)
3058 if (iter == sysno)
3059 return 1;
3060
3061 return 0;
3062 }
3063
3064 /* Wait for process, returns status. */
3065
3066 static ptid_t
3067 linux_wait_1 (ptid_t ptid,
3068 struct target_waitstatus *ourstatus, int target_options)
3069 {
3070 int w;
3071 struct lwp_info *event_child;
3072 int options;
3073 int pid;
3074 int step_over_finished;
3075 int bp_explains_trap;
3076 int maybe_internal_trap;
3077 int report_to_gdb;
3078 int trace_event;
3079 int in_step_range;
3080 int any_resumed;
3081
3082 if (debug_threads)
3083 {
3084 debug_enter ();
3085 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3086 }
3087
3088 /* Translate generic target options into linux options. */
3089 options = __WALL;
3090 if (target_options & TARGET_WNOHANG)
3091 options |= WNOHANG;
3092
3093 bp_explains_trap = 0;
3094 trace_event = 0;
3095 in_step_range = 0;
3096 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3097
3098 /* Find a resumed LWP, if any. */
3099 if (find_inferior (&all_threads,
3100 status_pending_p_callback,
3101 &minus_one_ptid) != NULL)
3102 any_resumed = 1;
3103 else if ((find_inferior (&all_threads,
3104 not_stopped_callback,
3105 &minus_one_ptid) != NULL))
3106 any_resumed = 1;
3107 else
3108 any_resumed = 0;
3109
3110 if (ptid_equal (step_over_bkpt, null_ptid))
3111 pid = linux_wait_for_event (ptid, &w, options);
3112 else
3113 {
3114 if (debug_threads)
3115 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3116 target_pid_to_str (step_over_bkpt));
3117 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3118 }
3119
3120 if (pid == 0 || (pid == -1 && !any_resumed))
3121 {
3122 gdb_assert (target_options & TARGET_WNOHANG);
3123
3124 if (debug_threads)
3125 {
3126 debug_printf ("linux_wait_1 ret = null_ptid, "
3127 "TARGET_WAITKIND_IGNORE\n");
3128 debug_exit ();
3129 }
3130
3131 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3132 return null_ptid;
3133 }
3134 else if (pid == -1)
3135 {
3136 if (debug_threads)
3137 {
3138 debug_printf ("linux_wait_1 ret = null_ptid, "
3139 "TARGET_WAITKIND_NO_RESUMED\n");
3140 debug_exit ();
3141 }
3142
3143 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3144 return null_ptid;
3145 }
3146
3147 event_child = get_thread_lwp (current_thread);
3148
3149 /* linux_wait_for_event only returns an exit status for the last
3150 child of a process. Report it. */
3151 if (WIFEXITED (w) || WIFSIGNALED (w))
3152 {
3153 if (WIFEXITED (w))
3154 {
3155 ourstatus->kind = TARGET_WAITKIND_EXITED;
3156 ourstatus->value.integer = WEXITSTATUS (w);
3157
3158 if (debug_threads)
3159 {
3160 debug_printf ("linux_wait_1 ret = %s, exited with "
3161 "retcode %d\n",
3162 target_pid_to_str (ptid_of (current_thread)),
3163 WEXITSTATUS (w));
3164 debug_exit ();
3165 }
3166 }
3167 else
3168 {
3169 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3170 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3171
3172 if (debug_threads)
3173 {
3174 debug_printf ("linux_wait_1 ret = %s, terminated with "
3175 "signal %d\n",
3176 target_pid_to_str (ptid_of (current_thread)),
3177 WTERMSIG (w));
3178 debug_exit ();
3179 }
3180 }
3181
3182 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3183 return filter_exit_event (event_child, ourstatus);
3184
3185 return ptid_of (current_thread);
3186 }
3187
3188 /* If step-over executes a breakpoint instruction, in the case of a
3189 hardware single step it means a gdb/gdbserver breakpoint had been
3190 planted on top of a permanent breakpoint, in the case of a software
3191 single step it may just mean that gdbserver hit the reinsert breakpoint.
3192 The PC has been adjusted by save_stop_reason to point at
3193 the breakpoint address.
3194 So in the case of the hardware single step advance the PC manually
3195 past the breakpoint and in the case of software single step advance only
3196 if it's not the reinsert_breakpoint we are hitting.
3197 This avoids that a program would keep trapping a permanent breakpoint
3198 forever. */
3199 if (!ptid_equal (step_over_bkpt, null_ptid)
3200 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3201 && (event_child->stepping
3202 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
3203 {
3204 int increment_pc = 0;
3205 int breakpoint_kind = 0;
3206 CORE_ADDR stop_pc = event_child->stop_pc;
3207
3208 breakpoint_kind =
3209 the_target->breakpoint_kind_from_current_state (&stop_pc);
3210 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3211
3212 if (debug_threads)
3213 {
3214 debug_printf ("step-over for %s executed software breakpoint\n",
3215 target_pid_to_str (ptid_of (current_thread)));
3216 }
3217
3218 if (increment_pc != 0)
3219 {
3220 struct regcache *regcache
3221 = get_thread_regcache (current_thread, 1);
3222
3223 event_child->stop_pc += increment_pc;
3224 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3225
3226 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3227 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3228 }
3229 }
3230
3231 /* If this event was not handled before, and is not a SIGTRAP, we
3232 report it. SIGILL and SIGSEGV are also treated as traps in case
3233 a breakpoint is inserted at the current PC. If this target does
3234 not support internal breakpoints at all, we also report the
3235 SIGTRAP without further processing; it's of no concern to us. */
3236 maybe_internal_trap
3237 = (supports_breakpoints ()
3238 && (WSTOPSIG (w) == SIGTRAP
3239 || ((WSTOPSIG (w) == SIGILL
3240 || WSTOPSIG (w) == SIGSEGV)
3241 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3242
3243 if (maybe_internal_trap)
3244 {
3245 /* Handle anything that requires bookkeeping before deciding to
3246 report the event or continue waiting. */
3247
3248 /* First check if we can explain the SIGTRAP with an internal
3249 breakpoint, or if we should possibly report the event to GDB.
3250 Do this before anything that may remove or insert a
3251 breakpoint. */
3252 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3253
3254 /* We have a SIGTRAP, possibly a step-over dance has just
3255 finished. If so, tweak the state machine accordingly,
3256 reinsert breakpoints and delete any reinsert (software
3257 single-step) breakpoints. */
3258 step_over_finished = finish_step_over (event_child);
3259
3260 /* Now invoke the callbacks of any internal breakpoints there. */
3261 check_breakpoints (event_child->stop_pc);
3262
3263 /* Handle tracepoint data collecting. This may overflow the
3264 trace buffer, and cause a tracing stop, removing
3265 breakpoints. */
3266 trace_event = handle_tracepoints (event_child);
3267
3268 if (bp_explains_trap)
3269 {
3270 /* If we stepped or ran into an internal breakpoint, we've
3271 already handled it. So next time we resume (from this
3272 PC), we should step over it. */
3273 if (debug_threads)
3274 debug_printf ("Hit a gdbserver breakpoint.\n");
3275
3276 if (breakpoint_here (event_child->stop_pc))
3277 event_child->need_step_over = 1;
3278 }
3279 }
3280 else
3281 {
3282 /* We have some other signal, possibly a step-over dance was in
3283 progress, and it should be cancelled too. */
3284 step_over_finished = finish_step_over (event_child);
3285 }
3286
3287 /* We have all the data we need. Either report the event to GDB, or
3288 resume threads and keep waiting for more. */
3289
3290 /* If we're collecting a fast tracepoint, finish the collection and
3291 move out of the jump pad before delivering a signal. See
3292 linux_stabilize_threads. */
3293
3294 if (WIFSTOPPED (w)
3295 && WSTOPSIG (w) != SIGTRAP
3296 && supports_fast_tracepoints ()
3297 && agent_loaded_p ())
3298 {
3299 if (debug_threads)
3300 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3301 "to defer or adjust it.\n",
3302 WSTOPSIG (w), lwpid_of (current_thread));
3303
3304 /* Allow debugging the jump pad itself. */
3305 if (current_thread->last_resume_kind != resume_step
3306 && maybe_move_out_of_jump_pad (event_child, &w))
3307 {
3308 enqueue_one_deferred_signal (event_child, &w);
3309
3310 if (debug_threads)
3311 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3312 WSTOPSIG (w), lwpid_of (current_thread));
3313
3314 linux_resume_one_lwp (event_child, 0, 0, NULL);
3315
3316 return ignore_event (ourstatus);
3317 }
3318 }
3319
3320 if (event_child->collecting_fast_tracepoint)
3321 {
3322 if (debug_threads)
3323 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3324 "Check if we're already there.\n",
3325 lwpid_of (current_thread),
3326 event_child->collecting_fast_tracepoint);
3327
3328 trace_event = 1;
3329
3330 event_child->collecting_fast_tracepoint
3331 = linux_fast_tracepoint_collecting (event_child, NULL);
3332
3333 if (event_child->collecting_fast_tracepoint != 1)
3334 {
3335 /* No longer need this breakpoint. */
3336 if (event_child->exit_jump_pad_bkpt != NULL)
3337 {
3338 if (debug_threads)
3339 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3340 "stopping all threads momentarily.\n");
3341
3342 /* Other running threads could hit this breakpoint.
3343 We don't handle moribund locations like GDB does,
3344 instead we always pause all threads when removing
3345 breakpoints, so that any step-over or
3346 decr_pc_after_break adjustment is always taken
3347 care of while the breakpoint is still
3348 inserted. */
3349 stop_all_lwps (1, event_child);
3350
3351 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3352 event_child->exit_jump_pad_bkpt = NULL;
3353
3354 unstop_all_lwps (1, event_child);
3355
3356 gdb_assert (event_child->suspended >= 0);
3357 }
3358 }
3359
3360 if (event_child->collecting_fast_tracepoint == 0)
3361 {
3362 if (debug_threads)
3363 debug_printf ("fast tracepoint finished "
3364 "collecting successfully.\n");
3365
3366 /* We may have a deferred signal to report. */
3367 if (dequeue_one_deferred_signal (event_child, &w))
3368 {
3369 if (debug_threads)
3370 debug_printf ("dequeued one signal.\n");
3371 }
3372 else
3373 {
3374 if (debug_threads)
3375 debug_printf ("no deferred signals.\n");
3376
3377 if (stabilizing_threads)
3378 {
3379 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3380 ourstatus->value.sig = GDB_SIGNAL_0;
3381
3382 if (debug_threads)
3383 {
3384 debug_printf ("linux_wait_1 ret = %s, stopped "
3385 "while stabilizing threads\n",
3386 target_pid_to_str (ptid_of (current_thread)));
3387 debug_exit ();
3388 }
3389
3390 return ptid_of (current_thread);
3391 }
3392 }
3393 }
3394 }
3395
3396 /* Check whether GDB would be interested in this event. */
3397
3398 /* Check if GDB is interested in this syscall. */
3399 if (WIFSTOPPED (w)
3400 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3401 && !gdb_catch_this_syscall_p (event_child))
3402 {
3403 if (debug_threads)
3404 {
3405 debug_printf ("Ignored syscall for LWP %ld.\n",
3406 lwpid_of (current_thread));
3407 }
3408
3409 linux_resume_one_lwp (event_child, event_child->stepping,
3410 0, NULL);
3411 return ignore_event (ourstatus);
3412 }
3413
3414 /* If GDB is not interested in this signal, don't stop other
3415 threads, and don't report it to GDB. Just resume the inferior
3416 right away. We do this for threading-related signals as well as
3417 any that GDB specifically requested we ignore. But never ignore
3418 SIGSTOP if we sent it ourselves, and do not ignore signals when
3419 stepping - they may require special handling to skip the signal
3420 handler. Also never ignore signals that could be caused by a
3421 breakpoint. */
3422 if (WIFSTOPPED (w)
3423 && current_thread->last_resume_kind != resume_step
3424 && (
3425 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3426 (current_process ()->priv->thread_db != NULL
3427 && (WSTOPSIG (w) == __SIGRTMIN
3428 || WSTOPSIG (w) == __SIGRTMIN + 1))
3429 ||
3430 #endif
3431 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3432 && !(WSTOPSIG (w) == SIGSTOP
3433 && current_thread->last_resume_kind == resume_stop)
3434 && !linux_wstatus_maybe_breakpoint (w))))
3435 {
3436 siginfo_t info, *info_p;
3437
3438 if (debug_threads)
3439 debug_printf ("Ignored signal %d for LWP %ld.\n",
3440 WSTOPSIG (w), lwpid_of (current_thread));
3441
3442 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3443 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3444 info_p = &info;
3445 else
3446 info_p = NULL;
3447
3448 if (step_over_finished)
3449 {
3450 /* We cancelled this thread's step-over above. We still
3451 need to unsuspend all other LWPs, and set them back
3452 running again while the signal handler runs. */
3453 unsuspend_all_lwps (event_child);
3454
3455 /* Enqueue the pending signal info so that proceed_all_lwps
3456 doesn't lose it. */
3457 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3458
3459 proceed_all_lwps ();
3460 }
3461 else
3462 {
3463 linux_resume_one_lwp (event_child, event_child->stepping,
3464 WSTOPSIG (w), info_p);
3465 }
3466 return ignore_event (ourstatus);
3467 }
3468
3469 /* Note that all addresses are always "out of the step range" when
3470 there's no range to begin with. */
3471 in_step_range = lwp_in_step_range (event_child);
3472
3473 /* If GDB wanted this thread to single step, and the thread is out
3474 of the step range, we always want to report the SIGTRAP, and let
3475 GDB handle it. Watchpoints should always be reported. So should
3476 signals we can't explain. A SIGTRAP we can't explain could be a
3477 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3478 do, we're be able to handle GDB breakpoints on top of internal
3479 breakpoints, by handling the internal breakpoint and still
3480 reporting the event to GDB. If we don't, we're out of luck, GDB
3481 won't see the breakpoint hit. If we see a single-step event but
3482 the thread should be continuing, don't pass the trap to gdb.
3483 That indicates that we had previously finished a single-step but
3484 left the single-step pending -- see
3485 complete_ongoing_step_over. */
3486 report_to_gdb = (!maybe_internal_trap
3487 || (current_thread->last_resume_kind == resume_step
3488 && !in_step_range)
3489 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3490 || (!in_step_range
3491 && !bp_explains_trap
3492 && !trace_event
3493 && !step_over_finished
3494 && !(current_thread->last_resume_kind == resume_continue
3495 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3496 || (gdb_breakpoint_here (event_child->stop_pc)
3497 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3498 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3499 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3500
3501 run_breakpoint_commands (event_child->stop_pc);
3502
3503 /* We found no reason GDB would want us to stop. We either hit one
3504 of our own breakpoints, or finished an internal step GDB
3505 shouldn't know about. */
3506 if (!report_to_gdb)
3507 {
3508 if (debug_threads)
3509 {
3510 if (bp_explains_trap)
3511 debug_printf ("Hit a gdbserver breakpoint.\n");
3512 if (step_over_finished)
3513 debug_printf ("Step-over finished.\n");
3514 if (trace_event)
3515 debug_printf ("Tracepoint event.\n");
3516 if (lwp_in_step_range (event_child))
3517 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3518 paddress (event_child->stop_pc),
3519 paddress (event_child->step_range_start),
3520 paddress (event_child->step_range_end));
3521 }
3522
3523 /* We're not reporting this breakpoint to GDB, so apply the
3524 decr_pc_after_break adjustment to the inferior's regcache
3525 ourselves. */
3526
3527 if (the_low_target.set_pc != NULL)
3528 {
3529 struct regcache *regcache
3530 = get_thread_regcache (current_thread, 1);
3531 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3532 }
3533
3534 /* We may have finished stepping over a breakpoint. If so,
3535 we've stopped and suspended all LWPs momentarily except the
3536 stepping one. This is where we resume them all again. We're
3537 going to keep waiting, so use proceed, which handles stepping
3538 over the next breakpoint. */
3539 if (debug_threads)
3540 debug_printf ("proceeding all threads.\n");
3541
3542 if (step_over_finished)
3543 unsuspend_all_lwps (event_child);
3544
3545 proceed_all_lwps ();
3546 return ignore_event (ourstatus);
3547 }
3548
3549 if (debug_threads)
3550 {
3551 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3552 {
3553 char *str;
3554
3555 str = target_waitstatus_to_string (&event_child->waitstatus);
3556 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3557 lwpid_of (get_lwp_thread (event_child)), str);
3558 xfree (str);
3559 }
3560 if (current_thread->last_resume_kind == resume_step)
3561 {
3562 if (event_child->step_range_start == event_child->step_range_end)
3563 debug_printf ("GDB wanted to single-step, reporting event.\n");
3564 else if (!lwp_in_step_range (event_child))
3565 debug_printf ("Out of step range, reporting event.\n");
3566 }
3567 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3568 debug_printf ("Stopped by watchpoint.\n");
3569 else if (gdb_breakpoint_here (event_child->stop_pc))
3570 debug_printf ("Stopped by GDB breakpoint.\n");
3571 if (debug_threads)
3572 debug_printf ("Hit a non-gdbserver trap event.\n");
3573 }
3574
3575 /* Alright, we're going to report a stop. */
3576
3577 if (!stabilizing_threads)
3578 {
3579 /* In all-stop, stop all threads. */
3580 if (!non_stop)
3581 stop_all_lwps (0, NULL);
3582
3583 /* If we're not waiting for a specific LWP, choose an event LWP
3584 from among those that have had events. Giving equal priority
3585 to all LWPs that have had events helps prevent
3586 starvation. */
3587 if (ptid_equal (ptid, minus_one_ptid))
3588 {
3589 event_child->status_pending_p = 1;
3590 event_child->status_pending = w;
3591
3592 select_event_lwp (&event_child);
3593
3594 /* current_thread and event_child must stay in sync. */
3595 current_thread = get_lwp_thread (event_child);
3596
3597 event_child->status_pending_p = 0;
3598 w = event_child->status_pending;
3599 }
3600
3601 if (step_over_finished)
3602 {
3603 if (!non_stop)
3604 {
3605 /* If we were doing a step-over, all other threads but
3606 the stepping one had been paused in start_step_over,
3607 with their suspend counts incremented. We don't want
3608 to do a full unstop/unpause, because we're in
3609 all-stop mode (so we want threads stopped), but we
3610 still need to unsuspend the other threads, to
3611 decrement their `suspended' count back. */
3612 unsuspend_all_lwps (event_child);
3613 }
3614 else
3615 {
3616 /* If we just finished a step-over, then all threads had
3617 been momentarily paused. In all-stop, that's fine,
3618 we want threads stopped by now anyway. In non-stop,
3619 we need to re-resume threads that GDB wanted to be
3620 running. */
3621 unstop_all_lwps (1, event_child);
3622 }
3623 }
3624
3625 /* Stabilize threads (move out of jump pads). */
3626 if (!non_stop)
3627 stabilize_threads ();
3628 }
3629 else
3630 {
3631 /* If we just finished a step-over, then all threads had been
3632 momentarily paused. In all-stop, that's fine, we want
3633 threads stopped by now anyway. In non-stop, we need to
3634 re-resume threads that GDB wanted to be running. */
3635 if (step_over_finished)
3636 unstop_all_lwps (1, event_child);
3637 }
3638
3639 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3640 {
3641 /* If the reported event is an exit, fork, vfork or exec, let
3642 GDB know. */
3643 *ourstatus = event_child->waitstatus;
3644 /* Clear the event lwp's waitstatus since we handled it already. */
3645 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3646 }
3647 else
3648 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3649
3650 /* Now that we've selected our final event LWP, un-adjust its PC if
3651 it was a software breakpoint, and the client doesn't know we can
3652 adjust the breakpoint ourselves. */
3653 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3654 && !swbreak_feature)
3655 {
3656 int decr_pc = the_low_target.decr_pc_after_break;
3657
3658 if (decr_pc != 0)
3659 {
3660 struct regcache *regcache
3661 = get_thread_regcache (current_thread, 1);
3662 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3663 }
3664 }
3665
3666 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3667 {
3668 int sysret;
3669
3670 get_syscall_trapinfo (event_child,
3671 &ourstatus->value.syscall_number, &sysret);
3672 ourstatus->kind = event_child->syscall_state;
3673 }
3674 else if (current_thread->last_resume_kind == resume_stop
3675 && WSTOPSIG (w) == SIGSTOP)
3676 {
3677 /* A thread that has been requested to stop by GDB with vCont;t,
3678 and it stopped cleanly, so report as SIG0. The use of
3679 SIGSTOP is an implementation detail. */
3680 ourstatus->value.sig = GDB_SIGNAL_0;
3681 }
3682 else if (current_thread->last_resume_kind == resume_stop
3683 && WSTOPSIG (w) != SIGSTOP)
3684 {
3685 /* A thread that has been requested to stop by GDB with vCont;t,
3686 but, it stopped for other reasons. */
3687 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3688 }
3689 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3690 {
3691 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3692 }
3693
3694 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3695
3696 if (debug_threads)
3697 {
3698 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3699 target_pid_to_str (ptid_of (current_thread)),
3700 ourstatus->kind, ourstatus->value.sig);
3701 debug_exit ();
3702 }
3703
3704 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3705 return filter_exit_event (event_child, ourstatus);
3706
3707 return ptid_of (current_thread);
3708 }
3709
3710 /* Get rid of any pending event in the pipe. */
3711 static void
3712 async_file_flush (void)
3713 {
3714 int ret;
3715 char buf;
3716
3717 do
3718 ret = read (linux_event_pipe[0], &buf, 1);
3719 while (ret >= 0 || (ret == -1 && errno == EINTR));
3720 }
3721
3722 /* Put something in the pipe, so the event loop wakes up. */
3723 static void
3724 async_file_mark (void)
3725 {
3726 int ret;
3727
3728 async_file_flush ();
3729
3730 do
3731 ret = write (linux_event_pipe[1], "+", 1);
3732 while (ret == 0 || (ret == -1 && errno == EINTR));
3733
3734 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3735 be awakened anyway. */
3736 }
3737
3738 static ptid_t
3739 linux_wait (ptid_t ptid,
3740 struct target_waitstatus *ourstatus, int target_options)
3741 {
3742 ptid_t event_ptid;
3743
3744 /* Flush the async file first. */
3745 if (target_is_async_p ())
3746 async_file_flush ();
3747
3748 do
3749 {
3750 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3751 }
3752 while ((target_options & TARGET_WNOHANG) == 0
3753 && ptid_equal (event_ptid, null_ptid)
3754 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3755
3756 /* If at least one stop was reported, there may be more. A single
3757 SIGCHLD can signal more than one child stop. */
3758 if (target_is_async_p ()
3759 && (target_options & TARGET_WNOHANG) != 0
3760 && !ptid_equal (event_ptid, null_ptid))
3761 async_file_mark ();
3762
3763 return event_ptid;
3764 }
3765
3766 /* Send a signal to an LWP. */
3767
3768 static int
3769 kill_lwp (unsigned long lwpid, int signo)
3770 {
3771 int ret;
3772
3773 errno = 0;
3774 ret = syscall (__NR_tkill, lwpid, signo);
3775 if (errno == ENOSYS)
3776 {
3777 /* If tkill fails, then we are not using nptl threads, a
3778 configuration we no longer support. */
3779 perror_with_name (("tkill"));
3780 }
3781 return ret;
3782 }
3783
3784 void
3785 linux_stop_lwp (struct lwp_info *lwp)
3786 {
3787 send_sigstop (lwp);
3788 }
3789
3790 static void
3791 send_sigstop (struct lwp_info *lwp)
3792 {
3793 int pid;
3794
3795 pid = lwpid_of (get_lwp_thread (lwp));
3796
3797 /* If we already have a pending stop signal for this process, don't
3798 send another. */
3799 if (lwp->stop_expected)
3800 {
3801 if (debug_threads)
3802 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3803
3804 return;
3805 }
3806
3807 if (debug_threads)
3808 debug_printf ("Sending sigstop to lwp %d\n", pid);
3809
3810 lwp->stop_expected = 1;
3811 kill_lwp (pid, SIGSTOP);
3812 }
3813
3814 static int
3815 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3816 {
3817 struct thread_info *thread = (struct thread_info *) entry;
3818 struct lwp_info *lwp = get_thread_lwp (thread);
3819
3820 /* Ignore EXCEPT. */
3821 if (lwp == except)
3822 return 0;
3823
3824 if (lwp->stopped)
3825 return 0;
3826
3827 send_sigstop (lwp);
3828 return 0;
3829 }
3830
3831 /* Increment the suspend count of an LWP, and stop it, if not stopped
3832 yet. */
3833 static int
3834 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3835 void *except)
3836 {
3837 struct thread_info *thread = (struct thread_info *) entry;
3838 struct lwp_info *lwp = get_thread_lwp (thread);
3839
3840 /* Ignore EXCEPT. */
3841 if (lwp == except)
3842 return 0;
3843
3844 lwp_suspended_inc (lwp);
3845
3846 return send_sigstop_callback (entry, except);
3847 }
3848
3849 static void
3850 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3851 {
3852 /* Store the exit status for later. */
3853 lwp->status_pending_p = 1;
3854 lwp->status_pending = wstat;
3855
3856 /* Store in waitstatus as well, as there's nothing else to process
3857 for this event. */
3858 if (WIFEXITED (wstat))
3859 {
3860 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3861 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3862 }
3863 else if (WIFSIGNALED (wstat))
3864 {
3865 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3866 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3867 }
3868
3869 /* Prevent trying to stop it. */
3870 lwp->stopped = 1;
3871
3872 /* No further stops are expected from a dead lwp. */
3873 lwp->stop_expected = 0;
3874 }
3875
3876 /* Return true if LWP has exited already, and has a pending exit event
3877 to report to GDB. */
3878
3879 static int
3880 lwp_is_marked_dead (struct lwp_info *lwp)
3881 {
3882 return (lwp->status_pending_p
3883 && (WIFEXITED (lwp->status_pending)
3884 || WIFSIGNALED (lwp->status_pending)));
3885 }
3886
3887 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3888
3889 static void
3890 wait_for_sigstop (void)
3891 {
3892 struct thread_info *saved_thread;
3893 ptid_t saved_tid;
3894 int wstat;
3895 int ret;
3896
3897 saved_thread = current_thread;
3898 if (saved_thread != NULL)
3899 saved_tid = saved_thread->entry.id;
3900 else
3901 saved_tid = null_ptid; /* avoid bogus unused warning */
3902
3903 if (debug_threads)
3904 debug_printf ("wait_for_sigstop: pulling events\n");
3905
3906 /* Passing NULL_PTID as filter indicates we want all events to be
3907 left pending. Eventually this returns when there are no
3908 unwaited-for children left. */
3909 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3910 &wstat, __WALL);
3911 gdb_assert (ret == -1);
3912
3913 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3914 current_thread = saved_thread;
3915 else
3916 {
3917 if (debug_threads)
3918 debug_printf ("Previously current thread died.\n");
3919
3920 /* We can't change the current inferior behind GDB's back,
3921 otherwise, a subsequent command may apply to the wrong
3922 process. */
3923 current_thread = NULL;
3924 }
3925 }
3926
3927 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3928 move it out, because we need to report the stop event to GDB. For
3929 example, if the user puts a breakpoint in the jump pad, it's
3930 because she wants to debug it. */
3931
3932 static int
3933 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3934 {
3935 struct thread_info *thread = (struct thread_info *) entry;
3936 struct lwp_info *lwp = get_thread_lwp (thread);
3937
3938 if (lwp->suspended != 0)
3939 {
3940 internal_error (__FILE__, __LINE__,
3941 "LWP %ld is suspended, suspended=%d\n",
3942 lwpid_of (thread), lwp->suspended);
3943 }
3944 gdb_assert (lwp->stopped);
3945
3946 /* Allow debugging the jump pad, gdb_collect, etc.. */
3947 return (supports_fast_tracepoints ()
3948 && agent_loaded_p ()
3949 && (gdb_breakpoint_here (lwp->stop_pc)
3950 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3951 || thread->last_resume_kind == resume_step)
3952 && linux_fast_tracepoint_collecting (lwp, NULL));
3953 }
3954
3955 static void
3956 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3957 {
3958 struct thread_info *thread = (struct thread_info *) entry;
3959 struct thread_info *saved_thread;
3960 struct lwp_info *lwp = get_thread_lwp (thread);
3961 int *wstat;
3962
3963 if (lwp->suspended != 0)
3964 {
3965 internal_error (__FILE__, __LINE__,
3966 "LWP %ld is suspended, suspended=%d\n",
3967 lwpid_of (thread), lwp->suspended);
3968 }
3969 gdb_assert (lwp->stopped);
3970
3971 /* For gdb_breakpoint_here. */
3972 saved_thread = current_thread;
3973 current_thread = thread;
3974
3975 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3976
3977 /* Allow debugging the jump pad, gdb_collect, etc. */
3978 if (!gdb_breakpoint_here (lwp->stop_pc)
3979 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3980 && thread->last_resume_kind != resume_step
3981 && maybe_move_out_of_jump_pad (lwp, wstat))
3982 {
3983 if (debug_threads)
3984 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3985 lwpid_of (thread));
3986
3987 if (wstat)
3988 {
3989 lwp->status_pending_p = 0;
3990 enqueue_one_deferred_signal (lwp, wstat);
3991
3992 if (debug_threads)
3993 debug_printf ("Signal %d for LWP %ld deferred "
3994 "(in jump pad)\n",
3995 WSTOPSIG (*wstat), lwpid_of (thread));
3996 }
3997
3998 linux_resume_one_lwp (lwp, 0, 0, NULL);
3999 }
4000 else
4001 lwp_suspended_inc (lwp);
4002
4003 current_thread = saved_thread;
4004 }
4005
4006 static int
4007 lwp_running (struct inferior_list_entry *entry, void *data)
4008 {
4009 struct thread_info *thread = (struct thread_info *) entry;
4010 struct lwp_info *lwp = get_thread_lwp (thread);
4011
4012 if (lwp_is_marked_dead (lwp))
4013 return 0;
4014 if (lwp->stopped)
4015 return 0;
4016 return 1;
4017 }
4018
4019 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4020 If SUSPEND, then also increase the suspend count of every LWP,
4021 except EXCEPT. */
4022
4023 static void
4024 stop_all_lwps (int suspend, struct lwp_info *except)
4025 {
4026 /* Should not be called recursively. */
4027 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4028
4029 if (debug_threads)
4030 {
4031 debug_enter ();
4032 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4033 suspend ? "stop-and-suspend" : "stop",
4034 except != NULL
4035 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4036 : "none");
4037 }
4038
4039 stopping_threads = (suspend
4040 ? STOPPING_AND_SUSPENDING_THREADS
4041 : STOPPING_THREADS);
4042
4043 if (suspend)
4044 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4045 else
4046 find_inferior (&all_threads, send_sigstop_callback, except);
4047 wait_for_sigstop ();
4048 stopping_threads = NOT_STOPPING_THREADS;
4049
4050 if (debug_threads)
4051 {
4052 debug_printf ("stop_all_lwps done, setting stopping_threads "
4053 "back to !stopping\n");
4054 debug_exit ();
4055 }
4056 }
4057
4058 /* Enqueue one signal in the chain of signals which need to be
4059 delivered to this process on next resume. */
4060
4061 static void
4062 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4063 {
4064 struct pending_signals *p_sig = XNEW (struct pending_signals);
4065
4066 p_sig->prev = lwp->pending_signals;
4067 p_sig->signal = signal;
4068 if (info == NULL)
4069 memset (&p_sig->info, 0, sizeof (siginfo_t));
4070 else
4071 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4072 lwp->pending_signals = p_sig;
4073 }
4074
4075 /* Install breakpoints for software single stepping. */
4076
4077 static void
4078 install_software_single_step_breakpoints (struct lwp_info *lwp)
4079 {
4080 int i;
4081 CORE_ADDR pc;
4082 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4083 VEC (CORE_ADDR) *next_pcs = NULL;
4084 struct cleanup *old_chain = make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
4085
4086 next_pcs = (*the_low_target.get_next_pcs) (regcache);
4087
4088 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
4089 set_reinsert_breakpoint (pc);
4090
4091 do_cleanups (old_chain);
4092 }
4093
4094 /* Single step via hardware or software single step.
4095 Return 1 if hardware single stepping, 0 if software single stepping
4096 or can't single step. */
4097
4098 static int
4099 single_step (struct lwp_info* lwp)
4100 {
4101 int step = 0;
4102
4103 if (can_hardware_single_step ())
4104 {
4105 step = 1;
4106 }
4107 else if (can_software_single_step ())
4108 {
4109 install_software_single_step_breakpoints (lwp);
4110 step = 0;
4111 }
4112 else
4113 {
4114 if (debug_threads)
4115 debug_printf ("stepping is not implemented on this target");
4116 }
4117
4118 return step;
4119 }
4120
4121 /* The signal can be delivered to the inferior if we are not trying to
4122 reinsert a breakpoint for software single step and not trying to
4123 finish a fast tracepoint collect. Since signal can be delivered in
4124 the step-over, the program may go to signal handler and trap again
4125 after return from the signal handler. We can live with the spurious
4126 double traps. */
4127
4128 static int
4129 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4130 {
4131 return (!(lwp->bp_reinsert != 0 && can_software_single_step ())
4132 && !lwp->collecting_fast_tracepoint);
4133 }
4134
4135 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4136 SIGNAL is nonzero, give it that signal. */
4137
4138 static void
4139 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4140 int step, int signal, siginfo_t *info)
4141 {
4142 struct thread_info *thread = get_lwp_thread (lwp);
4143 struct thread_info *saved_thread;
4144 int fast_tp_collecting;
4145 int ptrace_request;
4146 struct process_info *proc = get_thread_process (thread);
4147
4148 /* Note that target description may not be initialised
4149 (proc->tdesc == NULL) at this point because the program hasn't
4150 stopped at the first instruction yet. It means GDBserver skips
4151 the extra traps from the wrapper program (see option --wrapper).
4152 Code in this function that requires register access should be
4153 guarded by proc->tdesc == NULL or something else. */
4154
4155 if (lwp->stopped == 0)
4156 return;
4157
4158 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4159
4160 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4161
4162 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4163
4164 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4165 user used the "jump" command, or "set $pc = foo"). */
4166 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4167 {
4168 /* Collecting 'while-stepping' actions doesn't make sense
4169 anymore. */
4170 release_while_stepping_state_list (thread);
4171 }
4172
4173 /* If we have pending signals or status, and a new signal, enqueue the
4174 signal. Also enqueue the signal if it can't be delivered to the
4175 inferior right now. */
4176 if (signal != 0
4177 && (lwp->status_pending_p
4178 || lwp->pending_signals != NULL
4179 || !lwp_signal_can_be_delivered (lwp)))
4180 {
4181 enqueue_pending_signal (lwp, signal, info);
4182
4183 /* Postpone any pending signal. It was enqueued above. */
4184 signal = 0;
4185 }
4186
4187 if (lwp->status_pending_p)
4188 {
4189 if (debug_threads)
4190 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4191 " has pending status\n",
4192 lwpid_of (thread), step ? "step" : "continue",
4193 lwp->stop_expected ? "expected" : "not expected");
4194 return;
4195 }
4196
4197 saved_thread = current_thread;
4198 current_thread = thread;
4199
4200 /* This bit needs some thinking about. If we get a signal that
4201 we must report while a single-step reinsert is still pending,
4202 we often end up resuming the thread. It might be better to
4203 (ew) allow a stack of pending events; then we could be sure that
4204 the reinsert happened right away and not lose any signals.
4205
4206 Making this stack would also shrink the window in which breakpoints are
4207 uninserted (see comment in linux_wait_for_lwp) but not enough for
4208 complete correctness, so it won't solve that problem. It may be
4209 worthwhile just to solve this one, however. */
4210 if (lwp->bp_reinsert != 0)
4211 {
4212 if (debug_threads)
4213 debug_printf (" pending reinsert at 0x%s\n",
4214 paddress (lwp->bp_reinsert));
4215
4216 if (can_hardware_single_step ())
4217 {
4218 if (fast_tp_collecting == 0)
4219 {
4220 if (step == 0)
4221 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4222 if (lwp->suspended)
4223 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4224 lwp->suspended);
4225 }
4226
4227 step = 1;
4228 }
4229 }
4230
4231 if (fast_tp_collecting == 1)
4232 {
4233 if (debug_threads)
4234 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4235 " (exit-jump-pad-bkpt)\n",
4236 lwpid_of (thread));
4237 }
4238 else if (fast_tp_collecting == 2)
4239 {
4240 if (debug_threads)
4241 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4242 " single-stepping\n",
4243 lwpid_of (thread));
4244
4245 if (can_hardware_single_step ())
4246 step = 1;
4247 else
4248 {
4249 internal_error (__FILE__, __LINE__,
4250 "moving out of jump pad single-stepping"
4251 " not implemented on this target");
4252 }
4253 }
4254
4255 /* If we have while-stepping actions in this thread set it stepping.
4256 If we have a signal to deliver, it may or may not be set to
4257 SIG_IGN, we don't know. Assume so, and allow collecting
4258 while-stepping into a signal handler. A possible smart thing to
4259 do would be to set an internal breakpoint at the signal return
4260 address, continue, and carry on catching this while-stepping
4261 action only when that breakpoint is hit. A future
4262 enhancement. */
4263 if (thread->while_stepping != NULL)
4264 {
4265 if (debug_threads)
4266 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4267 lwpid_of (thread));
4268
4269 step = single_step (lwp);
4270 }
4271
4272 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4273 {
4274 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4275
4276 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4277
4278 if (debug_threads)
4279 {
4280 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4281 (long) lwp->stop_pc);
4282 }
4283 }
4284
4285 /* If we have pending signals, consume one if it can be delivered to
4286 the inferior. */
4287 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4288 {
4289 struct pending_signals **p_sig;
4290
4291 p_sig = &lwp->pending_signals;
4292 while ((*p_sig)->prev != NULL)
4293 p_sig = &(*p_sig)->prev;
4294
4295 signal = (*p_sig)->signal;
4296 if ((*p_sig)->info.si_signo != 0)
4297 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4298 &(*p_sig)->info);
4299
4300 free (*p_sig);
4301 *p_sig = NULL;
4302 }
4303
4304 if (debug_threads)
4305 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4306 lwpid_of (thread), step ? "step" : "continue", signal,
4307 lwp->stop_expected ? "expected" : "not expected");
4308
4309 if (the_low_target.prepare_to_resume != NULL)
4310 the_low_target.prepare_to_resume (lwp);
4311
4312 regcache_invalidate_thread (thread);
4313 errno = 0;
4314 lwp->stepping = step;
4315 if (step)
4316 ptrace_request = PTRACE_SINGLESTEP;
4317 else if (gdb_catching_syscalls_p (lwp))
4318 ptrace_request = PTRACE_SYSCALL;
4319 else
4320 ptrace_request = PTRACE_CONT;
4321 ptrace (ptrace_request,
4322 lwpid_of (thread),
4323 (PTRACE_TYPE_ARG3) 0,
4324 /* Coerce to a uintptr_t first to avoid potential gcc warning
4325 of coercing an 8 byte integer to a 4 byte pointer. */
4326 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4327
4328 current_thread = saved_thread;
4329 if (errno)
4330 perror_with_name ("resuming thread");
4331
4332 /* Successfully resumed. Clear state that no longer makes sense,
4333 and mark the LWP as running. Must not do this before resuming
4334 otherwise if that fails other code will be confused. E.g., we'd
4335 later try to stop the LWP and hang forever waiting for a stop
4336 status. Note that we must not throw after this is cleared,
4337 otherwise handle_zombie_lwp_error would get confused. */
4338 lwp->stopped = 0;
4339 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4340 }
4341
4342 /* Called when we try to resume a stopped LWP and that errors out. If
4343 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4344 or about to become), discard the error, clear any pending status
4345 the LWP may have, and return true (we'll collect the exit status
4346 soon enough). Otherwise, return false. */
4347
4348 static int
4349 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4350 {
4351 struct thread_info *thread = get_lwp_thread (lp);
4352
4353 /* If we get an error after resuming the LWP successfully, we'd
4354 confuse !T state for the LWP being gone. */
4355 gdb_assert (lp->stopped);
4356
4357 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4358 because even if ptrace failed with ESRCH, the tracee may be "not
4359 yet fully dead", but already refusing ptrace requests. In that
4360 case the tracee has 'R (Running)' state for a little bit
4361 (observed in Linux 3.18). See also the note on ESRCH in the
4362 ptrace(2) man page. Instead, check whether the LWP has any state
4363 other than ptrace-stopped. */
4364
4365 /* Don't assume anything if /proc/PID/status can't be read. */
4366 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4367 {
4368 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4369 lp->status_pending_p = 0;
4370 return 1;
4371 }
4372 return 0;
4373 }
4374
4375 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4376 disappears while we try to resume it. */
4377
4378 static void
4379 linux_resume_one_lwp (struct lwp_info *lwp,
4380 int step, int signal, siginfo_t *info)
4381 {
4382 TRY
4383 {
4384 linux_resume_one_lwp_throw (lwp, step, signal, info);
4385 }
4386 CATCH (ex, RETURN_MASK_ERROR)
4387 {
4388 if (!check_ptrace_stopped_lwp_gone (lwp))
4389 throw_exception (ex);
4390 }
4391 END_CATCH
4392 }
4393
4394 struct thread_resume_array
4395 {
4396 struct thread_resume *resume;
4397 size_t n;
4398 };
4399
4400 /* This function is called once per thread via find_inferior.
4401 ARG is a pointer to a thread_resume_array struct.
4402 We look up the thread specified by ENTRY in ARG, and mark the thread
4403 with a pointer to the appropriate resume request.
4404
4405 This algorithm is O(threads * resume elements), but resume elements
4406 is small (and will remain small at least until GDB supports thread
4407 suspension). */
4408
4409 static int
4410 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4411 {
4412 struct thread_info *thread = (struct thread_info *) entry;
4413 struct lwp_info *lwp = get_thread_lwp (thread);
4414 int ndx;
4415 struct thread_resume_array *r;
4416
4417 r = (struct thread_resume_array *) arg;
4418
4419 for (ndx = 0; ndx < r->n; ndx++)
4420 {
4421 ptid_t ptid = r->resume[ndx].thread;
4422 if (ptid_equal (ptid, minus_one_ptid)
4423 || ptid_equal (ptid, entry->id)
4424 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4425 of PID'. */
4426 || (ptid_get_pid (ptid) == pid_of (thread)
4427 && (ptid_is_pid (ptid)
4428 || ptid_get_lwp (ptid) == -1)))
4429 {
4430 if (r->resume[ndx].kind == resume_stop
4431 && thread->last_resume_kind == resume_stop)
4432 {
4433 if (debug_threads)
4434 debug_printf ("already %s LWP %ld at GDB's request\n",
4435 (thread->last_status.kind
4436 == TARGET_WAITKIND_STOPPED)
4437 ? "stopped"
4438 : "stopping",
4439 lwpid_of (thread));
4440
4441 continue;
4442 }
4443
4444 lwp->resume = &r->resume[ndx];
4445 thread->last_resume_kind = lwp->resume->kind;
4446
4447 lwp->step_range_start = lwp->resume->step_range_start;
4448 lwp->step_range_end = lwp->resume->step_range_end;
4449
4450 /* If we had a deferred signal to report, dequeue one now.
4451 This can happen if LWP gets more than one signal while
4452 trying to get out of a jump pad. */
4453 if (lwp->stopped
4454 && !lwp->status_pending_p
4455 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4456 {
4457 lwp->status_pending_p = 1;
4458
4459 if (debug_threads)
4460 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4461 "leaving status pending.\n",
4462 WSTOPSIG (lwp->status_pending),
4463 lwpid_of (thread));
4464 }
4465
4466 return 0;
4467 }
4468 }
4469
4470 /* No resume action for this thread. */
4471 lwp->resume = NULL;
4472
4473 return 0;
4474 }
4475
4476 /* find_inferior callback for linux_resume.
4477 Set *FLAG_P if this lwp has an interesting status pending. */
4478
4479 static int
4480 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4481 {
4482 struct thread_info *thread = (struct thread_info *) entry;
4483 struct lwp_info *lwp = get_thread_lwp (thread);
4484
4485 /* LWPs which will not be resumed are not interesting, because
4486 we might not wait for them next time through linux_wait. */
4487 if (lwp->resume == NULL)
4488 return 0;
4489
4490 if (thread_still_has_status_pending_p (thread))
4491 * (int *) flag_p = 1;
4492
4493 return 0;
4494 }
4495
4496 /* Return 1 if this lwp that GDB wants running is stopped at an
4497 internal breakpoint that we need to step over. It assumes that any
4498 required STOP_PC adjustment has already been propagated to the
4499 inferior's regcache. */
4500
4501 static int
4502 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4503 {
4504 struct thread_info *thread = (struct thread_info *) entry;
4505 struct lwp_info *lwp = get_thread_lwp (thread);
4506 struct thread_info *saved_thread;
4507 CORE_ADDR pc;
4508 struct process_info *proc = get_thread_process (thread);
4509
4510 /* GDBserver is skipping the extra traps from the wrapper program,
4511 don't have to do step over. */
4512 if (proc->tdesc == NULL)
4513 return 0;
4514
4515 /* LWPs which will not be resumed are not interesting, because we
4516 might not wait for them next time through linux_wait. */
4517
4518 if (!lwp->stopped)
4519 {
4520 if (debug_threads)
4521 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4522 lwpid_of (thread));
4523 return 0;
4524 }
4525
4526 if (thread->last_resume_kind == resume_stop)
4527 {
4528 if (debug_threads)
4529 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4530 " stopped\n",
4531 lwpid_of (thread));
4532 return 0;
4533 }
4534
4535 gdb_assert (lwp->suspended >= 0);
4536
4537 if (lwp->suspended)
4538 {
4539 if (debug_threads)
4540 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4541 lwpid_of (thread));
4542 return 0;
4543 }
4544
4545 if (!lwp->need_step_over)
4546 {
4547 if (debug_threads)
4548 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4549 }
4550
4551 if (lwp->status_pending_p)
4552 {
4553 if (debug_threads)
4554 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4555 " status.\n",
4556 lwpid_of (thread));
4557 return 0;
4558 }
4559
4560 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4561 or we have. */
4562 pc = get_pc (lwp);
4563
4564 /* If the PC has changed since we stopped, then don't do anything,
4565 and let the breakpoint/tracepoint be hit. This happens if, for
4566 instance, GDB handled the decr_pc_after_break subtraction itself,
4567 GDB is OOL stepping this thread, or the user has issued a "jump"
4568 command, or poked thread's registers herself. */
4569 if (pc != lwp->stop_pc)
4570 {
4571 if (debug_threads)
4572 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4573 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4574 lwpid_of (thread),
4575 paddress (lwp->stop_pc), paddress (pc));
4576
4577 lwp->need_step_over = 0;
4578 return 0;
4579 }
4580
4581 saved_thread = current_thread;
4582 current_thread = thread;
4583
4584 /* We can only step over breakpoints we know about. */
4585 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4586 {
4587 /* Don't step over a breakpoint that GDB expects to hit
4588 though. If the condition is being evaluated on the target's side
4589 and it evaluate to false, step over this breakpoint as well. */
4590 if (gdb_breakpoint_here (pc)
4591 && gdb_condition_true_at_breakpoint (pc)
4592 && gdb_no_commands_at_breakpoint (pc))
4593 {
4594 if (debug_threads)
4595 debug_printf ("Need step over [LWP %ld]? yes, but found"
4596 " GDB breakpoint at 0x%s; skipping step over\n",
4597 lwpid_of (thread), paddress (pc));
4598
4599 current_thread = saved_thread;
4600 return 0;
4601 }
4602 else
4603 {
4604 if (debug_threads)
4605 debug_printf ("Need step over [LWP %ld]? yes, "
4606 "found breakpoint at 0x%s\n",
4607 lwpid_of (thread), paddress (pc));
4608
4609 /* We've found an lwp that needs stepping over --- return 1 so
4610 that find_inferior stops looking. */
4611 current_thread = saved_thread;
4612
4613 /* If the step over is cancelled, this is set again. */
4614 lwp->need_step_over = 0;
4615 return 1;
4616 }
4617 }
4618
4619 current_thread = saved_thread;
4620
4621 if (debug_threads)
4622 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4623 " at 0x%s\n",
4624 lwpid_of (thread), paddress (pc));
4625
4626 return 0;
4627 }
4628
4629 /* Start a step-over operation on LWP. When LWP stopped at a
4630 breakpoint, to make progress, we need to remove the breakpoint out
4631 of the way. If we let other threads run while we do that, they may
4632 pass by the breakpoint location and miss hitting it. To avoid
4633 that, a step-over momentarily stops all threads while LWP is
4634 single-stepped by either hardware or software while the breakpoint
4635 is temporarily uninserted from the inferior. When the single-step
4636 finishes, we reinsert the breakpoint, and let all threads that are
4637 supposed to be running, run again. */
4638
4639 static int
4640 start_step_over (struct lwp_info *lwp)
4641 {
4642 struct thread_info *thread = get_lwp_thread (lwp);
4643 struct thread_info *saved_thread;
4644 CORE_ADDR pc;
4645 int step;
4646
4647 if (debug_threads)
4648 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4649 lwpid_of (thread));
4650
4651 stop_all_lwps (1, lwp);
4652
4653 if (lwp->suspended != 0)
4654 {
4655 internal_error (__FILE__, __LINE__,
4656 "LWP %ld suspended=%d\n", lwpid_of (thread),
4657 lwp->suspended);
4658 }
4659
4660 if (debug_threads)
4661 debug_printf ("Done stopping all threads for step-over.\n");
4662
4663 /* Note, we should always reach here with an already adjusted PC,
4664 either by GDB (if we're resuming due to GDB's request), or by our
4665 caller, if we just finished handling an internal breakpoint GDB
4666 shouldn't care about. */
4667 pc = get_pc (lwp);
4668
4669 saved_thread = current_thread;
4670 current_thread = thread;
4671
4672 lwp->bp_reinsert = pc;
4673 uninsert_breakpoints_at (pc);
4674 uninsert_fast_tracepoint_jumps_at (pc);
4675
4676 step = single_step (lwp);
4677
4678 current_thread = saved_thread;
4679
4680 linux_resume_one_lwp (lwp, step, 0, NULL);
4681
4682 /* Require next event from this LWP. */
4683 step_over_bkpt = thread->entry.id;
4684 return 1;
4685 }
4686
4687 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4688 start_step_over, if still there, and delete any reinsert
4689 breakpoints we've set, on non hardware single-step targets. */
4690
4691 static int
4692 finish_step_over (struct lwp_info *lwp)
4693 {
4694 if (lwp->bp_reinsert != 0)
4695 {
4696 if (debug_threads)
4697 debug_printf ("Finished step over.\n");
4698
4699 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4700 may be no breakpoint to reinsert there by now. */
4701 reinsert_breakpoints_at (lwp->bp_reinsert);
4702 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4703
4704 lwp->bp_reinsert = 0;
4705
4706 /* Delete any software-single-step reinsert breakpoints. No
4707 longer needed. We don't have to worry about other threads
4708 hitting this trap, and later not being able to explain it,
4709 because we were stepping over a breakpoint, and we hold all
4710 threads but LWP stopped while doing that. */
4711 if (!can_hardware_single_step ())
4712 delete_reinsert_breakpoints ();
4713
4714 step_over_bkpt = null_ptid;
4715 return 1;
4716 }
4717 else
4718 return 0;
4719 }
4720
4721 /* If there's a step over in progress, wait until all threads stop
4722 (that is, until the stepping thread finishes its step), and
4723 unsuspend all lwps. The stepping thread ends with its status
4724 pending, which is processed later when we get back to processing
4725 events. */
4726
4727 static void
4728 complete_ongoing_step_over (void)
4729 {
4730 if (!ptid_equal (step_over_bkpt, null_ptid))
4731 {
4732 struct lwp_info *lwp;
4733 int wstat;
4734 int ret;
4735
4736 if (debug_threads)
4737 debug_printf ("detach: step over in progress, finish it first\n");
4738
4739 /* Passing NULL_PTID as filter indicates we want all events to
4740 be left pending. Eventually this returns when there are no
4741 unwaited-for children left. */
4742 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4743 &wstat, __WALL);
4744 gdb_assert (ret == -1);
4745
4746 lwp = find_lwp_pid (step_over_bkpt);
4747 if (lwp != NULL)
4748 finish_step_over (lwp);
4749 step_over_bkpt = null_ptid;
4750 unsuspend_all_lwps (lwp);
4751 }
4752 }
4753
4754 /* This function is called once per thread. We check the thread's resume
4755 request, which will tell us whether to resume, step, or leave the thread
4756 stopped; and what signal, if any, it should be sent.
4757
4758 For threads which we aren't explicitly told otherwise, we preserve
4759 the stepping flag; this is used for stepping over gdbserver-placed
4760 breakpoints.
4761
4762 If pending_flags was set in any thread, we queue any needed
4763 signals, since we won't actually resume. We already have a pending
4764 event to report, so we don't need to preserve any step requests;
4765 they should be re-issued if necessary. */
4766
4767 static int
4768 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4769 {
4770 struct thread_info *thread = (struct thread_info *) entry;
4771 struct lwp_info *lwp = get_thread_lwp (thread);
4772 int step;
4773 int leave_all_stopped = * (int *) arg;
4774 int leave_pending;
4775
4776 if (lwp->resume == NULL)
4777 return 0;
4778
4779 if (lwp->resume->kind == resume_stop)
4780 {
4781 if (debug_threads)
4782 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4783
4784 if (!lwp->stopped)
4785 {
4786 if (debug_threads)
4787 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4788
4789 /* Stop the thread, and wait for the event asynchronously,
4790 through the event loop. */
4791 send_sigstop (lwp);
4792 }
4793 else
4794 {
4795 if (debug_threads)
4796 debug_printf ("already stopped LWP %ld\n",
4797 lwpid_of (thread));
4798
4799 /* The LWP may have been stopped in an internal event that
4800 was not meant to be notified back to GDB (e.g., gdbserver
4801 breakpoint), so we should be reporting a stop event in
4802 this case too. */
4803
4804 /* If the thread already has a pending SIGSTOP, this is a
4805 no-op. Otherwise, something later will presumably resume
4806 the thread and this will cause it to cancel any pending
4807 operation, due to last_resume_kind == resume_stop. If
4808 the thread already has a pending status to report, we
4809 will still report it the next time we wait - see
4810 status_pending_p_callback. */
4811
4812 /* If we already have a pending signal to report, then
4813 there's no need to queue a SIGSTOP, as this means we're
4814 midway through moving the LWP out of the jumppad, and we
4815 will report the pending signal as soon as that is
4816 finished. */
4817 if (lwp->pending_signals_to_report == NULL)
4818 send_sigstop (lwp);
4819 }
4820
4821 /* For stop requests, we're done. */
4822 lwp->resume = NULL;
4823 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4824 return 0;
4825 }
4826
4827 /* If this thread which is about to be resumed has a pending status,
4828 then don't resume it - we can just report the pending status.
4829 Likewise if it is suspended, because e.g., another thread is
4830 stepping past a breakpoint. Make sure to queue any signals that
4831 would otherwise be sent. In all-stop mode, we do this decision
4832 based on if *any* thread has a pending status. If there's a
4833 thread that needs the step-over-breakpoint dance, then don't
4834 resume any other thread but that particular one. */
4835 leave_pending = (lwp->suspended
4836 || lwp->status_pending_p
4837 || leave_all_stopped);
4838
4839 if (!leave_pending)
4840 {
4841 if (debug_threads)
4842 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4843
4844 step = (lwp->resume->kind == resume_step);
4845 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4846 }
4847 else
4848 {
4849 if (debug_threads)
4850 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4851
4852 /* If we have a new signal, enqueue the signal. */
4853 if (lwp->resume->sig != 0)
4854 {
4855 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4856
4857 p_sig->prev = lwp->pending_signals;
4858 p_sig->signal = lwp->resume->sig;
4859
4860 /* If this is the same signal we were previously stopped by,
4861 make sure to queue its siginfo. We can ignore the return
4862 value of ptrace; if it fails, we'll skip
4863 PTRACE_SETSIGINFO. */
4864 if (WIFSTOPPED (lwp->last_status)
4865 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4866 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4867 &p_sig->info);
4868
4869 lwp->pending_signals = p_sig;
4870 }
4871 }
4872
4873 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4874 lwp->resume = NULL;
4875 return 0;
4876 }
4877
4878 static void
4879 linux_resume (struct thread_resume *resume_info, size_t n)
4880 {
4881 struct thread_resume_array array = { resume_info, n };
4882 struct thread_info *need_step_over = NULL;
4883 int any_pending;
4884 int leave_all_stopped;
4885
4886 if (debug_threads)
4887 {
4888 debug_enter ();
4889 debug_printf ("linux_resume:\n");
4890 }
4891
4892 find_inferior (&all_threads, linux_set_resume_request, &array);
4893
4894 /* If there is a thread which would otherwise be resumed, which has
4895 a pending status, then don't resume any threads - we can just
4896 report the pending status. Make sure to queue any signals that
4897 would otherwise be sent. In non-stop mode, we'll apply this
4898 logic to each thread individually. We consume all pending events
4899 before considering to start a step-over (in all-stop). */
4900 any_pending = 0;
4901 if (!non_stop)
4902 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4903
4904 /* If there is a thread which would otherwise be resumed, which is
4905 stopped at a breakpoint that needs stepping over, then don't
4906 resume any threads - have it step over the breakpoint with all
4907 other threads stopped, then resume all threads again. Make sure
4908 to queue any signals that would otherwise be delivered or
4909 queued. */
4910 if (!any_pending && supports_breakpoints ())
4911 need_step_over
4912 = (struct thread_info *) find_inferior (&all_threads,
4913 need_step_over_p, NULL);
4914
4915 leave_all_stopped = (need_step_over != NULL || any_pending);
4916
4917 if (debug_threads)
4918 {
4919 if (need_step_over != NULL)
4920 debug_printf ("Not resuming all, need step over\n");
4921 else if (any_pending)
4922 debug_printf ("Not resuming, all-stop and found "
4923 "an LWP with pending status\n");
4924 else
4925 debug_printf ("Resuming, no pending status or step over needed\n");
4926 }
4927
4928 /* Even if we're leaving threads stopped, queue all signals we'd
4929 otherwise deliver. */
4930 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4931
4932 if (need_step_over)
4933 start_step_over (get_thread_lwp (need_step_over));
4934
4935 if (debug_threads)
4936 {
4937 debug_printf ("linux_resume done\n");
4938 debug_exit ();
4939 }
4940
4941 /* We may have events that were pending that can/should be sent to
4942 the client now. Trigger a linux_wait call. */
4943 if (target_is_async_p ())
4944 async_file_mark ();
4945 }
4946
4947 /* This function is called once per thread. We check the thread's
4948 last resume request, which will tell us whether to resume, step, or
4949 leave the thread stopped. Any signal the client requested to be
4950 delivered has already been enqueued at this point.
4951
4952 If any thread that GDB wants running is stopped at an internal
4953 breakpoint that needs stepping over, we start a step-over operation
4954 on that particular thread, and leave all others stopped. */
4955
4956 static int
4957 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4958 {
4959 struct thread_info *thread = (struct thread_info *) entry;
4960 struct lwp_info *lwp = get_thread_lwp (thread);
4961 int step;
4962
4963 if (lwp == except)
4964 return 0;
4965
4966 if (debug_threads)
4967 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4968
4969 if (!lwp->stopped)
4970 {
4971 if (debug_threads)
4972 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4973 return 0;
4974 }
4975
4976 if (thread->last_resume_kind == resume_stop
4977 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4978 {
4979 if (debug_threads)
4980 debug_printf (" client wants LWP to remain %ld stopped\n",
4981 lwpid_of (thread));
4982 return 0;
4983 }
4984
4985 if (lwp->status_pending_p)
4986 {
4987 if (debug_threads)
4988 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4989 lwpid_of (thread));
4990 return 0;
4991 }
4992
4993 gdb_assert (lwp->suspended >= 0);
4994
4995 if (lwp->suspended)
4996 {
4997 if (debug_threads)
4998 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4999 return 0;
5000 }
5001
5002 if (thread->last_resume_kind == resume_stop
5003 && lwp->pending_signals_to_report == NULL
5004 && lwp->collecting_fast_tracepoint == 0)
5005 {
5006 /* We haven't reported this LWP as stopped yet (otherwise, the
5007 last_status.kind check above would catch it, and we wouldn't
5008 reach here. This LWP may have been momentarily paused by a
5009 stop_all_lwps call while handling for example, another LWP's
5010 step-over. In that case, the pending expected SIGSTOP signal
5011 that was queued at vCont;t handling time will have already
5012 been consumed by wait_for_sigstop, and so we need to requeue
5013 another one here. Note that if the LWP already has a SIGSTOP
5014 pending, this is a no-op. */
5015
5016 if (debug_threads)
5017 debug_printf ("Client wants LWP %ld to stop. "
5018 "Making sure it has a SIGSTOP pending\n",
5019 lwpid_of (thread));
5020
5021 send_sigstop (lwp);
5022 }
5023
5024 if (thread->last_resume_kind == resume_step)
5025 {
5026 if (debug_threads)
5027 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5028 lwpid_of (thread));
5029 step = 1;
5030 }
5031 else if (lwp->bp_reinsert != 0)
5032 {
5033 if (debug_threads)
5034 debug_printf (" stepping LWP %ld, reinsert set\n",
5035 lwpid_of (thread));
5036 step = 1;
5037 }
5038 else
5039 step = 0;
5040
5041 linux_resume_one_lwp (lwp, step, 0, NULL);
5042 return 0;
5043 }
5044
5045 static int
5046 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5047 {
5048 struct thread_info *thread = (struct thread_info *) entry;
5049 struct lwp_info *lwp = get_thread_lwp (thread);
5050
5051 if (lwp == except)
5052 return 0;
5053
5054 lwp_suspended_decr (lwp);
5055
5056 return proceed_one_lwp (entry, except);
5057 }
5058
5059 /* When we finish a step-over, set threads running again. If there's
5060 another thread that may need a step-over, now's the time to start
5061 it. Eventually, we'll move all threads past their breakpoints. */
5062
5063 static void
5064 proceed_all_lwps (void)
5065 {
5066 struct thread_info *need_step_over;
5067
5068 /* If there is a thread which would otherwise be resumed, which is
5069 stopped at a breakpoint that needs stepping over, then don't
5070 resume any threads - have it step over the breakpoint with all
5071 other threads stopped, then resume all threads again. */
5072
5073 if (supports_breakpoints ())
5074 {
5075 need_step_over
5076 = (struct thread_info *) find_inferior (&all_threads,
5077 need_step_over_p, NULL);
5078
5079 if (need_step_over != NULL)
5080 {
5081 if (debug_threads)
5082 debug_printf ("proceed_all_lwps: found "
5083 "thread %ld needing a step-over\n",
5084 lwpid_of (need_step_over));
5085
5086 start_step_over (get_thread_lwp (need_step_over));
5087 return;
5088 }
5089 }
5090
5091 if (debug_threads)
5092 debug_printf ("Proceeding, no step-over needed\n");
5093
5094 find_inferior (&all_threads, proceed_one_lwp, NULL);
5095 }
5096
5097 /* Stopped LWPs that the client wanted to be running, that don't have
5098 pending statuses, are set to run again, except for EXCEPT, if not
5099 NULL. This undoes a stop_all_lwps call. */
5100
5101 static void
5102 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5103 {
5104 if (debug_threads)
5105 {
5106 debug_enter ();
5107 if (except)
5108 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5109 lwpid_of (get_lwp_thread (except)));
5110 else
5111 debug_printf ("unstopping all lwps\n");
5112 }
5113
5114 if (unsuspend)
5115 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5116 else
5117 find_inferior (&all_threads, proceed_one_lwp, except);
5118
5119 if (debug_threads)
5120 {
5121 debug_printf ("unstop_all_lwps done\n");
5122 debug_exit ();
5123 }
5124 }
5125
5126
5127 #ifdef HAVE_LINUX_REGSETS
5128
5129 #define use_linux_regsets 1
5130
5131 /* Returns true if REGSET has been disabled. */
5132
5133 static int
5134 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5135 {
5136 return (info->disabled_regsets != NULL
5137 && info->disabled_regsets[regset - info->regsets]);
5138 }
5139
5140 /* Disable REGSET. */
5141
5142 static void
5143 disable_regset (struct regsets_info *info, struct regset_info *regset)
5144 {
5145 int dr_offset;
5146
5147 dr_offset = regset - info->regsets;
5148 if (info->disabled_regsets == NULL)
5149 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5150 info->disabled_regsets[dr_offset] = 1;
5151 }
5152
5153 static int
5154 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5155 struct regcache *regcache)
5156 {
5157 struct regset_info *regset;
5158 int saw_general_regs = 0;
5159 int pid;
5160 struct iovec iov;
5161
5162 pid = lwpid_of (current_thread);
5163 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5164 {
5165 void *buf, *data;
5166 int nt_type, res;
5167
5168 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5169 continue;
5170
5171 buf = xmalloc (regset->size);
5172
5173 nt_type = regset->nt_type;
5174 if (nt_type)
5175 {
5176 iov.iov_base = buf;
5177 iov.iov_len = regset->size;
5178 data = (void *) &iov;
5179 }
5180 else
5181 data = buf;
5182
5183 #ifndef __sparc__
5184 res = ptrace (regset->get_request, pid,
5185 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5186 #else
5187 res = ptrace (regset->get_request, pid, data, nt_type);
5188 #endif
5189 if (res < 0)
5190 {
5191 if (errno == EIO)
5192 {
5193 /* If we get EIO on a regset, do not try it again for
5194 this process mode. */
5195 disable_regset (regsets_info, regset);
5196 }
5197 else if (errno == ENODATA)
5198 {
5199 /* ENODATA may be returned if the regset is currently
5200 not "active". This can happen in normal operation,
5201 so suppress the warning in this case. */
5202 }
5203 else
5204 {
5205 char s[256];
5206 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5207 pid);
5208 perror (s);
5209 }
5210 }
5211 else
5212 {
5213 if (regset->type == GENERAL_REGS)
5214 saw_general_regs = 1;
5215 regset->store_function (regcache, buf);
5216 }
5217 free (buf);
5218 }
5219 if (saw_general_regs)
5220 return 0;
5221 else
5222 return 1;
5223 }
5224
5225 static int
5226 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5227 struct regcache *regcache)
5228 {
5229 struct regset_info *regset;
5230 int saw_general_regs = 0;
5231 int pid;
5232 struct iovec iov;
5233
5234 pid = lwpid_of (current_thread);
5235 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5236 {
5237 void *buf, *data;
5238 int nt_type, res;
5239
5240 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5241 || regset->fill_function == NULL)
5242 continue;
5243
5244 buf = xmalloc (regset->size);
5245
5246 /* First fill the buffer with the current register set contents,
5247 in case there are any items in the kernel's regset that are
5248 not in gdbserver's regcache. */
5249
5250 nt_type = regset->nt_type;
5251 if (nt_type)
5252 {
5253 iov.iov_base = buf;
5254 iov.iov_len = regset->size;
5255 data = (void *) &iov;
5256 }
5257 else
5258 data = buf;
5259
5260 #ifndef __sparc__
5261 res = ptrace (regset->get_request, pid,
5262 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5263 #else
5264 res = ptrace (regset->get_request, pid, data, nt_type);
5265 #endif
5266
5267 if (res == 0)
5268 {
5269 /* Then overlay our cached registers on that. */
5270 regset->fill_function (regcache, buf);
5271
5272 /* Only now do we write the register set. */
5273 #ifndef __sparc__
5274 res = ptrace (regset->set_request, pid,
5275 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5276 #else
5277 res = ptrace (regset->set_request, pid, data, nt_type);
5278 #endif
5279 }
5280
5281 if (res < 0)
5282 {
5283 if (errno == EIO)
5284 {
5285 /* If we get EIO on a regset, do not try it again for
5286 this process mode. */
5287 disable_regset (regsets_info, regset);
5288 }
5289 else if (errno == ESRCH)
5290 {
5291 /* At this point, ESRCH should mean the process is
5292 already gone, in which case we simply ignore attempts
5293 to change its registers. See also the related
5294 comment in linux_resume_one_lwp. */
5295 free (buf);
5296 return 0;
5297 }
5298 else
5299 {
5300 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5301 }
5302 }
5303 else if (regset->type == GENERAL_REGS)
5304 saw_general_regs = 1;
5305 free (buf);
5306 }
5307 if (saw_general_regs)
5308 return 0;
5309 else
5310 return 1;
5311 }
5312
5313 #else /* !HAVE_LINUX_REGSETS */
5314
5315 #define use_linux_regsets 0
5316 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5317 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5318
5319 #endif
5320
5321 /* Return 1 if register REGNO is supported by one of the regset ptrace
5322 calls or 0 if it has to be transferred individually. */
5323
5324 static int
5325 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5326 {
5327 unsigned char mask = 1 << (regno % 8);
5328 size_t index = regno / 8;
5329
5330 return (use_linux_regsets
5331 && (regs_info->regset_bitmap == NULL
5332 || (regs_info->regset_bitmap[index] & mask) != 0));
5333 }
5334
5335 #ifdef HAVE_LINUX_USRREGS
5336
5337 static int
5338 register_addr (const struct usrregs_info *usrregs, int regnum)
5339 {
5340 int addr;
5341
5342 if (regnum < 0 || regnum >= usrregs->num_regs)
5343 error ("Invalid register number %d.", regnum);
5344
5345 addr = usrregs->regmap[regnum];
5346
5347 return addr;
5348 }
5349
5350 /* Fetch one register. */
5351 static void
5352 fetch_register (const struct usrregs_info *usrregs,
5353 struct regcache *regcache, int regno)
5354 {
5355 CORE_ADDR regaddr;
5356 int i, size;
5357 char *buf;
5358 int pid;
5359
5360 if (regno >= usrregs->num_regs)
5361 return;
5362 if ((*the_low_target.cannot_fetch_register) (regno))
5363 return;
5364
5365 regaddr = register_addr (usrregs, regno);
5366 if (regaddr == -1)
5367 return;
5368
5369 size = ((register_size (regcache->tdesc, regno)
5370 + sizeof (PTRACE_XFER_TYPE) - 1)
5371 & -sizeof (PTRACE_XFER_TYPE));
5372 buf = (char *) alloca (size);
5373
5374 pid = lwpid_of (current_thread);
5375 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5376 {
5377 errno = 0;
5378 *(PTRACE_XFER_TYPE *) (buf + i) =
5379 ptrace (PTRACE_PEEKUSER, pid,
5380 /* Coerce to a uintptr_t first to avoid potential gcc warning
5381 of coercing an 8 byte integer to a 4 byte pointer. */
5382 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5383 regaddr += sizeof (PTRACE_XFER_TYPE);
5384 if (errno != 0)
5385 error ("reading register %d: %s", regno, strerror (errno));
5386 }
5387
5388 if (the_low_target.supply_ptrace_register)
5389 the_low_target.supply_ptrace_register (regcache, regno, buf);
5390 else
5391 supply_register (regcache, regno, buf);
5392 }
5393
5394 /* Store one register. */
5395 static void
5396 store_register (const struct usrregs_info *usrregs,
5397 struct regcache *regcache, int regno)
5398 {
5399 CORE_ADDR regaddr;
5400 int i, size;
5401 char *buf;
5402 int pid;
5403
5404 if (regno >= usrregs->num_regs)
5405 return;
5406 if ((*the_low_target.cannot_store_register) (regno))
5407 return;
5408
5409 regaddr = register_addr (usrregs, regno);
5410 if (regaddr == -1)
5411 return;
5412
5413 size = ((register_size (regcache->tdesc, regno)
5414 + sizeof (PTRACE_XFER_TYPE) - 1)
5415 & -sizeof (PTRACE_XFER_TYPE));
5416 buf = (char *) alloca (size);
5417 memset (buf, 0, size);
5418
5419 if (the_low_target.collect_ptrace_register)
5420 the_low_target.collect_ptrace_register (regcache, regno, buf);
5421 else
5422 collect_register (regcache, regno, buf);
5423
5424 pid = lwpid_of (current_thread);
5425 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5426 {
5427 errno = 0;
5428 ptrace (PTRACE_POKEUSER, pid,
5429 /* Coerce to a uintptr_t first to avoid potential gcc warning
5430 about coercing an 8 byte integer to a 4 byte pointer. */
5431 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5432 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5433 if (errno != 0)
5434 {
5435 /* At this point, ESRCH should mean the process is
5436 already gone, in which case we simply ignore attempts
5437 to change its registers. See also the related
5438 comment in linux_resume_one_lwp. */
5439 if (errno == ESRCH)
5440 return;
5441
5442 if ((*the_low_target.cannot_store_register) (regno) == 0)
5443 error ("writing register %d: %s", regno, strerror (errno));
5444 }
5445 regaddr += sizeof (PTRACE_XFER_TYPE);
5446 }
5447 }
5448
5449 /* Fetch all registers, or just one, from the child process.
5450 If REGNO is -1, do this for all registers, skipping any that are
5451 assumed to have been retrieved by regsets_fetch_inferior_registers,
5452 unless ALL is non-zero.
5453 Otherwise, REGNO specifies which register (so we can save time). */
5454 static void
5455 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5456 struct regcache *regcache, int regno, int all)
5457 {
5458 struct usrregs_info *usr = regs_info->usrregs;
5459
5460 if (regno == -1)
5461 {
5462 for (regno = 0; regno < usr->num_regs; regno++)
5463 if (all || !linux_register_in_regsets (regs_info, regno))
5464 fetch_register (usr, regcache, regno);
5465 }
5466 else
5467 fetch_register (usr, regcache, regno);
5468 }
5469
5470 /* Store our register values back into the inferior.
5471 If REGNO is -1, do this for all registers, skipping any that are
5472 assumed to have been saved by regsets_store_inferior_registers,
5473 unless ALL is non-zero.
5474 Otherwise, REGNO specifies which register (so we can save time). */
5475 static void
5476 usr_store_inferior_registers (const struct regs_info *regs_info,
5477 struct regcache *regcache, int regno, int all)
5478 {
5479 struct usrregs_info *usr = regs_info->usrregs;
5480
5481 if (regno == -1)
5482 {
5483 for (regno = 0; regno < usr->num_regs; regno++)
5484 if (all || !linux_register_in_regsets (regs_info, regno))
5485 store_register (usr, regcache, regno);
5486 }
5487 else
5488 store_register (usr, regcache, regno);
5489 }
5490
5491 #else /* !HAVE_LINUX_USRREGS */
5492
5493 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5494 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5495
5496 #endif
5497
5498
5499 static void
5500 linux_fetch_registers (struct regcache *regcache, int regno)
5501 {
5502 int use_regsets;
5503 int all = 0;
5504 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5505
5506 if (regno == -1)
5507 {
5508 if (the_low_target.fetch_register != NULL
5509 && regs_info->usrregs != NULL)
5510 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5511 (*the_low_target.fetch_register) (regcache, regno);
5512
5513 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5514 if (regs_info->usrregs != NULL)
5515 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5516 }
5517 else
5518 {
5519 if (the_low_target.fetch_register != NULL
5520 && (*the_low_target.fetch_register) (regcache, regno))
5521 return;
5522
5523 use_regsets = linux_register_in_regsets (regs_info, regno);
5524 if (use_regsets)
5525 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5526 regcache);
5527 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5528 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5529 }
5530 }
5531
5532 static void
5533 linux_store_registers (struct regcache *regcache, int regno)
5534 {
5535 int use_regsets;
5536 int all = 0;
5537 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5538
5539 if (regno == -1)
5540 {
5541 all = regsets_store_inferior_registers (regs_info->regsets_info,
5542 regcache);
5543 if (regs_info->usrregs != NULL)
5544 usr_store_inferior_registers (regs_info, regcache, regno, all);
5545 }
5546 else
5547 {
5548 use_regsets = linux_register_in_regsets (regs_info, regno);
5549 if (use_regsets)
5550 all = regsets_store_inferior_registers (regs_info->regsets_info,
5551 regcache);
5552 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5553 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5554 }
5555 }
5556
5557
5558 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5559 to debugger memory starting at MYADDR. */
5560
5561 static int
5562 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5563 {
5564 int pid = lwpid_of (current_thread);
5565 register PTRACE_XFER_TYPE *buffer;
5566 register CORE_ADDR addr;
5567 register int count;
5568 char filename[64];
5569 register int i;
5570 int ret;
5571 int fd;
5572
5573 /* Try using /proc. Don't bother for one word. */
5574 if (len >= 3 * sizeof (long))
5575 {
5576 int bytes;
5577
5578 /* We could keep this file open and cache it - possibly one per
5579 thread. That requires some juggling, but is even faster. */
5580 sprintf (filename, "/proc/%d/mem", pid);
5581 fd = open (filename, O_RDONLY | O_LARGEFILE);
5582 if (fd == -1)
5583 goto no_proc;
5584
5585 /* If pread64 is available, use it. It's faster if the kernel
5586 supports it (only one syscall), and it's 64-bit safe even on
5587 32-bit platforms (for instance, SPARC debugging a SPARC64
5588 application). */
5589 #ifdef HAVE_PREAD64
5590 bytes = pread64 (fd, myaddr, len, memaddr);
5591 #else
5592 bytes = -1;
5593 if (lseek (fd, memaddr, SEEK_SET) != -1)
5594 bytes = read (fd, myaddr, len);
5595 #endif
5596
5597 close (fd);
5598 if (bytes == len)
5599 return 0;
5600
5601 /* Some data was read, we'll try to get the rest with ptrace. */
5602 if (bytes > 0)
5603 {
5604 memaddr += bytes;
5605 myaddr += bytes;
5606 len -= bytes;
5607 }
5608 }
5609
5610 no_proc:
5611 /* Round starting address down to longword boundary. */
5612 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5613 /* Round ending address up; get number of longwords that makes. */
5614 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5615 / sizeof (PTRACE_XFER_TYPE));
5616 /* Allocate buffer of that many longwords. */
5617 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5618
5619 /* Read all the longwords */
5620 errno = 0;
5621 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5622 {
5623 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5624 about coercing an 8 byte integer to a 4 byte pointer. */
5625 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5626 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5627 (PTRACE_TYPE_ARG4) 0);
5628 if (errno)
5629 break;
5630 }
5631 ret = errno;
5632
5633 /* Copy appropriate bytes out of the buffer. */
5634 if (i > 0)
5635 {
5636 i *= sizeof (PTRACE_XFER_TYPE);
5637 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5638 memcpy (myaddr,
5639 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5640 i < len ? i : len);
5641 }
5642
5643 return ret;
5644 }
5645
5646 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5647 memory at MEMADDR. On failure (cannot write to the inferior)
5648 returns the value of errno. Always succeeds if LEN is zero. */
5649
5650 static int
5651 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5652 {
5653 register int i;
5654 /* Round starting address down to longword boundary. */
5655 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5656 /* Round ending address up; get number of longwords that makes. */
5657 register int count
5658 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5659 / sizeof (PTRACE_XFER_TYPE);
5660
5661 /* Allocate buffer of that many longwords. */
5662 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5663
5664 int pid = lwpid_of (current_thread);
5665
5666 if (len == 0)
5667 {
5668 /* Zero length write always succeeds. */
5669 return 0;
5670 }
5671
5672 if (debug_threads)
5673 {
5674 /* Dump up to four bytes. */
5675 char str[4 * 2 + 1];
5676 char *p = str;
5677 int dump = len < 4 ? len : 4;
5678
5679 for (i = 0; i < dump; i++)
5680 {
5681 sprintf (p, "%02x", myaddr[i]);
5682 p += 2;
5683 }
5684 *p = '\0';
5685
5686 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5687 str, (long) memaddr, pid);
5688 }
5689
5690 /* Fill start and end extra bytes of buffer with existing memory data. */
5691
5692 errno = 0;
5693 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5694 about coercing an 8 byte integer to a 4 byte pointer. */
5695 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5696 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5697 (PTRACE_TYPE_ARG4) 0);
5698 if (errno)
5699 return errno;
5700
5701 if (count > 1)
5702 {
5703 errno = 0;
5704 buffer[count - 1]
5705 = ptrace (PTRACE_PEEKTEXT, pid,
5706 /* Coerce to a uintptr_t first to avoid potential gcc warning
5707 about coercing an 8 byte integer to a 4 byte pointer. */
5708 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5709 * sizeof (PTRACE_XFER_TYPE)),
5710 (PTRACE_TYPE_ARG4) 0);
5711 if (errno)
5712 return errno;
5713 }
5714
5715 /* Copy data to be written over corresponding part of buffer. */
5716
5717 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5718 myaddr, len);
5719
5720 /* Write the entire buffer. */
5721
5722 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5723 {
5724 errno = 0;
5725 ptrace (PTRACE_POKETEXT, pid,
5726 /* Coerce to a uintptr_t first to avoid potential gcc warning
5727 about coercing an 8 byte integer to a 4 byte pointer. */
5728 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5729 (PTRACE_TYPE_ARG4) buffer[i]);
5730 if (errno)
5731 return errno;
5732 }
5733
5734 return 0;
5735 }
5736
5737 static void
5738 linux_look_up_symbols (void)
5739 {
5740 #ifdef USE_THREAD_DB
5741 struct process_info *proc = current_process ();
5742
5743 if (proc->priv->thread_db != NULL)
5744 return;
5745
5746 thread_db_init ();
5747 #endif
5748 }
5749
5750 static void
5751 linux_request_interrupt (void)
5752 {
5753 extern unsigned long signal_pid;
5754
5755 /* Send a SIGINT to the process group. This acts just like the user
5756 typed a ^C on the controlling terminal. */
5757 kill (-signal_pid, SIGINT);
5758 }
5759
5760 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5761 to debugger memory starting at MYADDR. */
5762
5763 static int
5764 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5765 {
5766 char filename[PATH_MAX];
5767 int fd, n;
5768 int pid = lwpid_of (current_thread);
5769
5770 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5771
5772 fd = open (filename, O_RDONLY);
5773 if (fd < 0)
5774 return -1;
5775
5776 if (offset != (CORE_ADDR) 0
5777 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5778 n = -1;
5779 else
5780 n = read (fd, myaddr, len);
5781
5782 close (fd);
5783
5784 return n;
5785 }
5786
5787 /* These breakpoint and watchpoint related wrapper functions simply
5788 pass on the function call if the target has registered a
5789 corresponding function. */
5790
5791 static int
5792 linux_supports_z_point_type (char z_type)
5793 {
5794 return (the_low_target.supports_z_point_type != NULL
5795 && the_low_target.supports_z_point_type (z_type));
5796 }
5797
5798 static int
5799 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5800 int size, struct raw_breakpoint *bp)
5801 {
5802 if (type == raw_bkpt_type_sw)
5803 return insert_memory_breakpoint (bp);
5804 else if (the_low_target.insert_point != NULL)
5805 return the_low_target.insert_point (type, addr, size, bp);
5806 else
5807 /* Unsupported (see target.h). */
5808 return 1;
5809 }
5810
5811 static int
5812 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5813 int size, struct raw_breakpoint *bp)
5814 {
5815 if (type == raw_bkpt_type_sw)
5816 return remove_memory_breakpoint (bp);
5817 else if (the_low_target.remove_point != NULL)
5818 return the_low_target.remove_point (type, addr, size, bp);
5819 else
5820 /* Unsupported (see target.h). */
5821 return 1;
5822 }
5823
5824 /* Implement the to_stopped_by_sw_breakpoint target_ops
5825 method. */
5826
5827 static int
5828 linux_stopped_by_sw_breakpoint (void)
5829 {
5830 struct lwp_info *lwp = get_thread_lwp (current_thread);
5831
5832 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5833 }
5834
5835 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5836 method. */
5837
5838 static int
5839 linux_supports_stopped_by_sw_breakpoint (void)
5840 {
5841 return USE_SIGTRAP_SIGINFO;
5842 }
5843
5844 /* Implement the to_stopped_by_hw_breakpoint target_ops
5845 method. */
5846
5847 static int
5848 linux_stopped_by_hw_breakpoint (void)
5849 {
5850 struct lwp_info *lwp = get_thread_lwp (current_thread);
5851
5852 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5853 }
5854
5855 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5856 method. */
5857
5858 static int
5859 linux_supports_stopped_by_hw_breakpoint (void)
5860 {
5861 return USE_SIGTRAP_SIGINFO;
5862 }
5863
5864 /* Implement the supports_hardware_single_step target_ops method. */
5865
5866 static int
5867 linux_supports_hardware_single_step (void)
5868 {
5869 return can_hardware_single_step ();
5870 }
5871
5872 static int
5873 linux_supports_software_single_step (void)
5874 {
5875 return can_software_single_step ();
5876 }
5877
5878 static int
5879 linux_stopped_by_watchpoint (void)
5880 {
5881 struct lwp_info *lwp = get_thread_lwp (current_thread);
5882
5883 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5884 }
5885
5886 static CORE_ADDR
5887 linux_stopped_data_address (void)
5888 {
5889 struct lwp_info *lwp = get_thread_lwp (current_thread);
5890
5891 return lwp->stopped_data_address;
5892 }
5893
5894 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5895 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5896 && defined(PT_TEXT_END_ADDR)
5897
5898 /* This is only used for targets that define PT_TEXT_ADDR,
5899 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5900 the target has different ways of acquiring this information, like
5901 loadmaps. */
5902
5903 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5904 to tell gdb about. */
5905
5906 static int
5907 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5908 {
5909 unsigned long text, text_end, data;
5910 int pid = lwpid_of (current_thread);
5911
5912 errno = 0;
5913
5914 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5915 (PTRACE_TYPE_ARG4) 0);
5916 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5917 (PTRACE_TYPE_ARG4) 0);
5918 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5919 (PTRACE_TYPE_ARG4) 0);
5920
5921 if (errno == 0)
5922 {
5923 /* Both text and data offsets produced at compile-time (and so
5924 used by gdb) are relative to the beginning of the program,
5925 with the data segment immediately following the text segment.
5926 However, the actual runtime layout in memory may put the data
5927 somewhere else, so when we send gdb a data base-address, we
5928 use the real data base address and subtract the compile-time
5929 data base-address from it (which is just the length of the
5930 text segment). BSS immediately follows data in both
5931 cases. */
5932 *text_p = text;
5933 *data_p = data - (text_end - text);
5934
5935 return 1;
5936 }
5937 return 0;
5938 }
5939 #endif
5940
5941 static int
5942 linux_qxfer_osdata (const char *annex,
5943 unsigned char *readbuf, unsigned const char *writebuf,
5944 CORE_ADDR offset, int len)
5945 {
5946 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5947 }
5948
5949 /* Convert a native/host siginfo object, into/from the siginfo in the
5950 layout of the inferiors' architecture. */
5951
5952 static void
5953 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5954 {
5955 int done = 0;
5956
5957 if (the_low_target.siginfo_fixup != NULL)
5958 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5959
5960 /* If there was no callback, or the callback didn't do anything,
5961 then just do a straight memcpy. */
5962 if (!done)
5963 {
5964 if (direction == 1)
5965 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5966 else
5967 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5968 }
5969 }
5970
5971 static int
5972 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5973 unsigned const char *writebuf, CORE_ADDR offset, int len)
5974 {
5975 int pid;
5976 siginfo_t siginfo;
5977 gdb_byte inf_siginfo[sizeof (siginfo_t)];
5978
5979 if (current_thread == NULL)
5980 return -1;
5981
5982 pid = lwpid_of (current_thread);
5983
5984 if (debug_threads)
5985 debug_printf ("%s siginfo for lwp %d.\n",
5986 readbuf != NULL ? "Reading" : "Writing",
5987 pid);
5988
5989 if (offset >= sizeof (siginfo))
5990 return -1;
5991
5992 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5993 return -1;
5994
5995 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5996 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5997 inferior with a 64-bit GDBSERVER should look the same as debugging it
5998 with a 32-bit GDBSERVER, we need to convert it. */
5999 siginfo_fixup (&siginfo, inf_siginfo, 0);
6000
6001 if (offset + len > sizeof (siginfo))
6002 len = sizeof (siginfo) - offset;
6003
6004 if (readbuf != NULL)
6005 memcpy (readbuf, inf_siginfo + offset, len);
6006 else
6007 {
6008 memcpy (inf_siginfo + offset, writebuf, len);
6009
6010 /* Convert back to ptrace layout before flushing it out. */
6011 siginfo_fixup (&siginfo, inf_siginfo, 1);
6012
6013 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6014 return -1;
6015 }
6016
6017 return len;
6018 }
6019
6020 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6021 so we notice when children change state; as the handler for the
6022 sigsuspend in my_waitpid. */
6023
6024 static void
6025 sigchld_handler (int signo)
6026 {
6027 int old_errno = errno;
6028
6029 if (debug_threads)
6030 {
6031 do
6032 {
6033 /* fprintf is not async-signal-safe, so call write
6034 directly. */
6035 if (write (2, "sigchld_handler\n",
6036 sizeof ("sigchld_handler\n") - 1) < 0)
6037 break; /* just ignore */
6038 } while (0);
6039 }
6040
6041 if (target_is_async_p ())
6042 async_file_mark (); /* trigger a linux_wait */
6043
6044 errno = old_errno;
6045 }
6046
6047 static int
6048 linux_supports_non_stop (void)
6049 {
6050 return 1;
6051 }
6052
6053 static int
6054 linux_async (int enable)
6055 {
6056 int previous = target_is_async_p ();
6057
6058 if (debug_threads)
6059 debug_printf ("linux_async (%d), previous=%d\n",
6060 enable, previous);
6061
6062 if (previous != enable)
6063 {
6064 sigset_t mask;
6065 sigemptyset (&mask);
6066 sigaddset (&mask, SIGCHLD);
6067
6068 sigprocmask (SIG_BLOCK, &mask, NULL);
6069
6070 if (enable)
6071 {
6072 if (pipe (linux_event_pipe) == -1)
6073 {
6074 linux_event_pipe[0] = -1;
6075 linux_event_pipe[1] = -1;
6076 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6077
6078 warning ("creating event pipe failed.");
6079 return previous;
6080 }
6081
6082 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6083 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6084
6085 /* Register the event loop handler. */
6086 add_file_handler (linux_event_pipe[0],
6087 handle_target_event, NULL);
6088
6089 /* Always trigger a linux_wait. */
6090 async_file_mark ();
6091 }
6092 else
6093 {
6094 delete_file_handler (linux_event_pipe[0]);
6095
6096 close (linux_event_pipe[0]);
6097 close (linux_event_pipe[1]);
6098 linux_event_pipe[0] = -1;
6099 linux_event_pipe[1] = -1;
6100 }
6101
6102 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6103 }
6104
6105 return previous;
6106 }
6107
6108 static int
6109 linux_start_non_stop (int nonstop)
6110 {
6111 /* Register or unregister from event-loop accordingly. */
6112 linux_async (nonstop);
6113
6114 if (target_is_async_p () != (nonstop != 0))
6115 return -1;
6116
6117 return 0;
6118 }
6119
6120 static int
6121 linux_supports_multi_process (void)
6122 {
6123 return 1;
6124 }
6125
6126 /* Check if fork events are supported. */
6127
6128 static int
6129 linux_supports_fork_events (void)
6130 {
6131 return linux_supports_tracefork ();
6132 }
6133
6134 /* Check if vfork events are supported. */
6135
6136 static int
6137 linux_supports_vfork_events (void)
6138 {
6139 return linux_supports_tracefork ();
6140 }
6141
6142 /* Check if exec events are supported. */
6143
6144 static int
6145 linux_supports_exec_events (void)
6146 {
6147 return linux_supports_traceexec ();
6148 }
6149
6150 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6151 options for the specified lwp. */
6152
6153 static int
6154 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6155 void *args)
6156 {
6157 struct thread_info *thread = (struct thread_info *) entry;
6158 struct lwp_info *lwp = get_thread_lwp (thread);
6159
6160 if (!lwp->stopped)
6161 {
6162 /* Stop the lwp so we can modify its ptrace options. */
6163 lwp->must_set_ptrace_flags = 1;
6164 linux_stop_lwp (lwp);
6165 }
6166 else
6167 {
6168 /* Already stopped; go ahead and set the ptrace options. */
6169 struct process_info *proc = find_process_pid (pid_of (thread));
6170 int options = linux_low_ptrace_options (proc->attached);
6171
6172 linux_enable_event_reporting (lwpid_of (thread), options);
6173 lwp->must_set_ptrace_flags = 0;
6174 }
6175
6176 return 0;
6177 }
6178
6179 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6180 ptrace flags for all inferiors. This is in case the new GDB connection
6181 doesn't support the same set of events that the previous one did. */
6182
6183 static void
6184 linux_handle_new_gdb_connection (void)
6185 {
6186 pid_t pid;
6187
6188 /* Request that all the lwps reset their ptrace options. */
6189 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6190 }
6191
6192 static int
6193 linux_supports_disable_randomization (void)
6194 {
6195 #ifdef HAVE_PERSONALITY
6196 return 1;
6197 #else
6198 return 0;
6199 #endif
6200 }
6201
6202 static int
6203 linux_supports_agent (void)
6204 {
6205 return 1;
6206 }
6207
6208 static int
6209 linux_supports_range_stepping (void)
6210 {
6211 if (*the_low_target.supports_range_stepping == NULL)
6212 return 0;
6213
6214 return (*the_low_target.supports_range_stepping) ();
6215 }
6216
6217 /* Enumerate spufs IDs for process PID. */
6218 static int
6219 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6220 {
6221 int pos = 0;
6222 int written = 0;
6223 char path[128];
6224 DIR *dir;
6225 struct dirent *entry;
6226
6227 sprintf (path, "/proc/%ld/fd", pid);
6228 dir = opendir (path);
6229 if (!dir)
6230 return -1;
6231
6232 rewinddir (dir);
6233 while ((entry = readdir (dir)) != NULL)
6234 {
6235 struct stat st;
6236 struct statfs stfs;
6237 int fd;
6238
6239 fd = atoi (entry->d_name);
6240 if (!fd)
6241 continue;
6242
6243 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6244 if (stat (path, &st) != 0)
6245 continue;
6246 if (!S_ISDIR (st.st_mode))
6247 continue;
6248
6249 if (statfs (path, &stfs) != 0)
6250 continue;
6251 if (stfs.f_type != SPUFS_MAGIC)
6252 continue;
6253
6254 if (pos >= offset && pos + 4 <= offset + len)
6255 {
6256 *(unsigned int *)(buf + pos - offset) = fd;
6257 written += 4;
6258 }
6259 pos += 4;
6260 }
6261
6262 closedir (dir);
6263 return written;
6264 }
6265
6266 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6267 object type, using the /proc file system. */
6268 static int
6269 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6270 unsigned const char *writebuf,
6271 CORE_ADDR offset, int len)
6272 {
6273 long pid = lwpid_of (current_thread);
6274 char buf[128];
6275 int fd = 0;
6276 int ret = 0;
6277
6278 if (!writebuf && !readbuf)
6279 return -1;
6280
6281 if (!*annex)
6282 {
6283 if (!readbuf)
6284 return -1;
6285 else
6286 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6287 }
6288
6289 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6290 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6291 if (fd <= 0)
6292 return -1;
6293
6294 if (offset != 0
6295 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6296 {
6297 close (fd);
6298 return 0;
6299 }
6300
6301 if (writebuf)
6302 ret = write (fd, writebuf, (size_t) len);
6303 else
6304 ret = read (fd, readbuf, (size_t) len);
6305
6306 close (fd);
6307 return ret;
6308 }
6309
6310 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6311 struct target_loadseg
6312 {
6313 /* Core address to which the segment is mapped. */
6314 Elf32_Addr addr;
6315 /* VMA recorded in the program header. */
6316 Elf32_Addr p_vaddr;
6317 /* Size of this segment in memory. */
6318 Elf32_Word p_memsz;
6319 };
6320
6321 # if defined PT_GETDSBT
6322 struct target_loadmap
6323 {
6324 /* Protocol version number, must be zero. */
6325 Elf32_Word version;
6326 /* Pointer to the DSBT table, its size, and the DSBT index. */
6327 unsigned *dsbt_table;
6328 unsigned dsbt_size, dsbt_index;
6329 /* Number of segments in this map. */
6330 Elf32_Word nsegs;
6331 /* The actual memory map. */
6332 struct target_loadseg segs[/*nsegs*/];
6333 };
6334 # define LINUX_LOADMAP PT_GETDSBT
6335 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6336 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6337 # else
6338 struct target_loadmap
6339 {
6340 /* Protocol version number, must be zero. */
6341 Elf32_Half version;
6342 /* Number of segments in this map. */
6343 Elf32_Half nsegs;
6344 /* The actual memory map. */
6345 struct target_loadseg segs[/*nsegs*/];
6346 };
6347 # define LINUX_LOADMAP PTRACE_GETFDPIC
6348 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6349 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6350 # endif
6351
6352 static int
6353 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6354 unsigned char *myaddr, unsigned int len)
6355 {
6356 int pid = lwpid_of (current_thread);
6357 int addr = -1;
6358 struct target_loadmap *data = NULL;
6359 unsigned int actual_length, copy_length;
6360
6361 if (strcmp (annex, "exec") == 0)
6362 addr = (int) LINUX_LOADMAP_EXEC;
6363 else if (strcmp (annex, "interp") == 0)
6364 addr = (int) LINUX_LOADMAP_INTERP;
6365 else
6366 return -1;
6367
6368 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6369 return -1;
6370
6371 if (data == NULL)
6372 return -1;
6373
6374 actual_length = sizeof (struct target_loadmap)
6375 + sizeof (struct target_loadseg) * data->nsegs;
6376
6377 if (offset < 0 || offset > actual_length)
6378 return -1;
6379
6380 copy_length = actual_length - offset < len ? actual_length - offset : len;
6381 memcpy (myaddr, (char *) data + offset, copy_length);
6382 return copy_length;
6383 }
6384 #else
6385 # define linux_read_loadmap NULL
6386 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6387
6388 static void
6389 linux_process_qsupported (char **features, int count)
6390 {
6391 if (the_low_target.process_qsupported != NULL)
6392 the_low_target.process_qsupported (features, count);
6393 }
6394
6395 static int
6396 linux_supports_catch_syscall (void)
6397 {
6398 return (the_low_target.get_syscall_trapinfo != NULL
6399 && linux_supports_tracesysgood ());
6400 }
6401
6402 static int
6403 linux_get_ipa_tdesc_idx (void)
6404 {
6405 if (the_low_target.get_ipa_tdesc_idx == NULL)
6406 return 0;
6407
6408 return (*the_low_target.get_ipa_tdesc_idx) ();
6409 }
6410
6411 static int
6412 linux_supports_tracepoints (void)
6413 {
6414 if (*the_low_target.supports_tracepoints == NULL)
6415 return 0;
6416
6417 return (*the_low_target.supports_tracepoints) ();
6418 }
6419
6420 static CORE_ADDR
6421 linux_read_pc (struct regcache *regcache)
6422 {
6423 if (the_low_target.get_pc == NULL)
6424 return 0;
6425
6426 return (*the_low_target.get_pc) (regcache);
6427 }
6428
6429 static void
6430 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6431 {
6432 gdb_assert (the_low_target.set_pc != NULL);
6433
6434 (*the_low_target.set_pc) (regcache, pc);
6435 }
6436
6437 static int
6438 linux_thread_stopped (struct thread_info *thread)
6439 {
6440 return get_thread_lwp (thread)->stopped;
6441 }
6442
6443 /* This exposes stop-all-threads functionality to other modules. */
6444
6445 static void
6446 linux_pause_all (int freeze)
6447 {
6448 stop_all_lwps (freeze, NULL);
6449 }
6450
6451 /* This exposes unstop-all-threads functionality to other gdbserver
6452 modules. */
6453
6454 static void
6455 linux_unpause_all (int unfreeze)
6456 {
6457 unstop_all_lwps (unfreeze, NULL);
6458 }
6459
6460 static int
6461 linux_prepare_to_access_memory (void)
6462 {
6463 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6464 running LWP. */
6465 if (non_stop)
6466 linux_pause_all (1);
6467 return 0;
6468 }
6469
6470 static void
6471 linux_done_accessing_memory (void)
6472 {
6473 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6474 running LWP. */
6475 if (non_stop)
6476 linux_unpause_all (1);
6477 }
6478
6479 static int
6480 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6481 CORE_ADDR collector,
6482 CORE_ADDR lockaddr,
6483 ULONGEST orig_size,
6484 CORE_ADDR *jump_entry,
6485 CORE_ADDR *trampoline,
6486 ULONGEST *trampoline_size,
6487 unsigned char *jjump_pad_insn,
6488 ULONGEST *jjump_pad_insn_size,
6489 CORE_ADDR *adjusted_insn_addr,
6490 CORE_ADDR *adjusted_insn_addr_end,
6491 char *err)
6492 {
6493 return (*the_low_target.install_fast_tracepoint_jump_pad)
6494 (tpoint, tpaddr, collector, lockaddr, orig_size,
6495 jump_entry, trampoline, trampoline_size,
6496 jjump_pad_insn, jjump_pad_insn_size,
6497 adjusted_insn_addr, adjusted_insn_addr_end,
6498 err);
6499 }
6500
6501 static struct emit_ops *
6502 linux_emit_ops (void)
6503 {
6504 if (the_low_target.emit_ops != NULL)
6505 return (*the_low_target.emit_ops) ();
6506 else
6507 return NULL;
6508 }
6509
6510 static int
6511 linux_get_min_fast_tracepoint_insn_len (void)
6512 {
6513 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6514 }
6515
6516 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6517
6518 static int
6519 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6520 CORE_ADDR *phdr_memaddr, int *num_phdr)
6521 {
6522 char filename[PATH_MAX];
6523 int fd;
6524 const int auxv_size = is_elf64
6525 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6526 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6527
6528 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6529
6530 fd = open (filename, O_RDONLY);
6531 if (fd < 0)
6532 return 1;
6533
6534 *phdr_memaddr = 0;
6535 *num_phdr = 0;
6536 while (read (fd, buf, auxv_size) == auxv_size
6537 && (*phdr_memaddr == 0 || *num_phdr == 0))
6538 {
6539 if (is_elf64)
6540 {
6541 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6542
6543 switch (aux->a_type)
6544 {
6545 case AT_PHDR:
6546 *phdr_memaddr = aux->a_un.a_val;
6547 break;
6548 case AT_PHNUM:
6549 *num_phdr = aux->a_un.a_val;
6550 break;
6551 }
6552 }
6553 else
6554 {
6555 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6556
6557 switch (aux->a_type)
6558 {
6559 case AT_PHDR:
6560 *phdr_memaddr = aux->a_un.a_val;
6561 break;
6562 case AT_PHNUM:
6563 *num_phdr = aux->a_un.a_val;
6564 break;
6565 }
6566 }
6567 }
6568
6569 close (fd);
6570
6571 if (*phdr_memaddr == 0 || *num_phdr == 0)
6572 {
6573 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6574 "phdr_memaddr = %ld, phdr_num = %d",
6575 (long) *phdr_memaddr, *num_phdr);
6576 return 2;
6577 }
6578
6579 return 0;
6580 }
6581
6582 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6583
6584 static CORE_ADDR
6585 get_dynamic (const int pid, const int is_elf64)
6586 {
6587 CORE_ADDR phdr_memaddr, relocation;
6588 int num_phdr, i;
6589 unsigned char *phdr_buf;
6590 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6591
6592 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6593 return 0;
6594
6595 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6596 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6597
6598 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6599 return 0;
6600
6601 /* Compute relocation: it is expected to be 0 for "regular" executables,
6602 non-zero for PIE ones. */
6603 relocation = -1;
6604 for (i = 0; relocation == -1 && i < num_phdr; i++)
6605 if (is_elf64)
6606 {
6607 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6608
6609 if (p->p_type == PT_PHDR)
6610 relocation = phdr_memaddr - p->p_vaddr;
6611 }
6612 else
6613 {
6614 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6615
6616 if (p->p_type == PT_PHDR)
6617 relocation = phdr_memaddr - p->p_vaddr;
6618 }
6619
6620 if (relocation == -1)
6621 {
6622 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6623 any real world executables, including PIE executables, have always
6624 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6625 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6626 or present DT_DEBUG anyway (fpc binaries are statically linked).
6627
6628 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6629
6630 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6631
6632 return 0;
6633 }
6634
6635 for (i = 0; i < num_phdr; i++)
6636 {
6637 if (is_elf64)
6638 {
6639 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6640
6641 if (p->p_type == PT_DYNAMIC)
6642 return p->p_vaddr + relocation;
6643 }
6644 else
6645 {
6646 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6647
6648 if (p->p_type == PT_DYNAMIC)
6649 return p->p_vaddr + relocation;
6650 }
6651 }
6652
6653 return 0;
6654 }
6655
6656 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6657 can be 0 if the inferior does not yet have the library list initialized.
6658 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6659 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6660
6661 static CORE_ADDR
6662 get_r_debug (const int pid, const int is_elf64)
6663 {
6664 CORE_ADDR dynamic_memaddr;
6665 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6666 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6667 CORE_ADDR map = -1;
6668
6669 dynamic_memaddr = get_dynamic (pid, is_elf64);
6670 if (dynamic_memaddr == 0)
6671 return map;
6672
6673 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6674 {
6675 if (is_elf64)
6676 {
6677 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6678 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6679 union
6680 {
6681 Elf64_Xword map;
6682 unsigned char buf[sizeof (Elf64_Xword)];
6683 }
6684 rld_map;
6685 #endif
6686 #ifdef DT_MIPS_RLD_MAP
6687 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6688 {
6689 if (linux_read_memory (dyn->d_un.d_val,
6690 rld_map.buf, sizeof (rld_map.buf)) == 0)
6691 return rld_map.map;
6692 else
6693 break;
6694 }
6695 #endif /* DT_MIPS_RLD_MAP */
6696 #ifdef DT_MIPS_RLD_MAP_REL
6697 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6698 {
6699 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6700 rld_map.buf, sizeof (rld_map.buf)) == 0)
6701 return rld_map.map;
6702 else
6703 break;
6704 }
6705 #endif /* DT_MIPS_RLD_MAP_REL */
6706
6707 if (dyn->d_tag == DT_DEBUG && map == -1)
6708 map = dyn->d_un.d_val;
6709
6710 if (dyn->d_tag == DT_NULL)
6711 break;
6712 }
6713 else
6714 {
6715 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6716 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6717 union
6718 {
6719 Elf32_Word map;
6720 unsigned char buf[sizeof (Elf32_Word)];
6721 }
6722 rld_map;
6723 #endif
6724 #ifdef DT_MIPS_RLD_MAP
6725 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6726 {
6727 if (linux_read_memory (dyn->d_un.d_val,
6728 rld_map.buf, sizeof (rld_map.buf)) == 0)
6729 return rld_map.map;
6730 else
6731 break;
6732 }
6733 #endif /* DT_MIPS_RLD_MAP */
6734 #ifdef DT_MIPS_RLD_MAP_REL
6735 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6736 {
6737 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6738 rld_map.buf, sizeof (rld_map.buf)) == 0)
6739 return rld_map.map;
6740 else
6741 break;
6742 }
6743 #endif /* DT_MIPS_RLD_MAP_REL */
6744
6745 if (dyn->d_tag == DT_DEBUG && map == -1)
6746 map = dyn->d_un.d_val;
6747
6748 if (dyn->d_tag == DT_NULL)
6749 break;
6750 }
6751
6752 dynamic_memaddr += dyn_size;
6753 }
6754
6755 return map;
6756 }
6757
6758 /* Read one pointer from MEMADDR in the inferior. */
6759
6760 static int
6761 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6762 {
6763 int ret;
6764
6765 /* Go through a union so this works on either big or little endian
6766 hosts, when the inferior's pointer size is smaller than the size
6767 of CORE_ADDR. It is assumed the inferior's endianness is the
6768 same of the superior's. */
6769 union
6770 {
6771 CORE_ADDR core_addr;
6772 unsigned int ui;
6773 unsigned char uc;
6774 } addr;
6775
6776 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6777 if (ret == 0)
6778 {
6779 if (ptr_size == sizeof (CORE_ADDR))
6780 *ptr = addr.core_addr;
6781 else if (ptr_size == sizeof (unsigned int))
6782 *ptr = addr.ui;
6783 else
6784 gdb_assert_not_reached ("unhandled pointer size");
6785 }
6786 return ret;
6787 }
6788
6789 struct link_map_offsets
6790 {
6791 /* Offset and size of r_debug.r_version. */
6792 int r_version_offset;
6793
6794 /* Offset and size of r_debug.r_map. */
6795 int r_map_offset;
6796
6797 /* Offset to l_addr field in struct link_map. */
6798 int l_addr_offset;
6799
6800 /* Offset to l_name field in struct link_map. */
6801 int l_name_offset;
6802
6803 /* Offset to l_ld field in struct link_map. */
6804 int l_ld_offset;
6805
6806 /* Offset to l_next field in struct link_map. */
6807 int l_next_offset;
6808
6809 /* Offset to l_prev field in struct link_map. */
6810 int l_prev_offset;
6811 };
6812
6813 /* Construct qXfer:libraries-svr4:read reply. */
6814
6815 static int
6816 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6817 unsigned const char *writebuf,
6818 CORE_ADDR offset, int len)
6819 {
6820 char *document;
6821 unsigned document_len;
6822 struct process_info_private *const priv = current_process ()->priv;
6823 char filename[PATH_MAX];
6824 int pid, is_elf64;
6825
6826 static const struct link_map_offsets lmo_32bit_offsets =
6827 {
6828 0, /* r_version offset. */
6829 4, /* r_debug.r_map offset. */
6830 0, /* l_addr offset in link_map. */
6831 4, /* l_name offset in link_map. */
6832 8, /* l_ld offset in link_map. */
6833 12, /* l_next offset in link_map. */
6834 16 /* l_prev offset in link_map. */
6835 };
6836
6837 static const struct link_map_offsets lmo_64bit_offsets =
6838 {
6839 0, /* r_version offset. */
6840 8, /* r_debug.r_map offset. */
6841 0, /* l_addr offset in link_map. */
6842 8, /* l_name offset in link_map. */
6843 16, /* l_ld offset in link_map. */
6844 24, /* l_next offset in link_map. */
6845 32 /* l_prev offset in link_map. */
6846 };
6847 const struct link_map_offsets *lmo;
6848 unsigned int machine;
6849 int ptr_size;
6850 CORE_ADDR lm_addr = 0, lm_prev = 0;
6851 int allocated = 1024;
6852 char *p;
6853 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6854 int header_done = 0;
6855
6856 if (writebuf != NULL)
6857 return -2;
6858 if (readbuf == NULL)
6859 return -1;
6860
6861 pid = lwpid_of (current_thread);
6862 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6863 is_elf64 = elf_64_file_p (filename, &machine);
6864 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6865 ptr_size = is_elf64 ? 8 : 4;
6866
6867 while (annex[0] != '\0')
6868 {
6869 const char *sep;
6870 CORE_ADDR *addrp;
6871 int len;
6872
6873 sep = strchr (annex, '=');
6874 if (sep == NULL)
6875 break;
6876
6877 len = sep - annex;
6878 if (len == 5 && startswith (annex, "start"))
6879 addrp = &lm_addr;
6880 else if (len == 4 && startswith (annex, "prev"))
6881 addrp = &lm_prev;
6882 else
6883 {
6884 annex = strchr (sep, ';');
6885 if (annex == NULL)
6886 break;
6887 annex++;
6888 continue;
6889 }
6890
6891 annex = decode_address_to_semicolon (addrp, sep + 1);
6892 }
6893
6894 if (lm_addr == 0)
6895 {
6896 int r_version = 0;
6897
6898 if (priv->r_debug == 0)
6899 priv->r_debug = get_r_debug (pid, is_elf64);
6900
6901 /* We failed to find DT_DEBUG. Such situation will not change
6902 for this inferior - do not retry it. Report it to GDB as
6903 E01, see for the reasons at the GDB solib-svr4.c side. */
6904 if (priv->r_debug == (CORE_ADDR) -1)
6905 return -1;
6906
6907 if (priv->r_debug != 0)
6908 {
6909 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6910 (unsigned char *) &r_version,
6911 sizeof (r_version)) != 0
6912 || r_version != 1)
6913 {
6914 warning ("unexpected r_debug version %d", r_version);
6915 }
6916 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6917 &lm_addr, ptr_size) != 0)
6918 {
6919 warning ("unable to read r_map from 0x%lx",
6920 (long) priv->r_debug + lmo->r_map_offset);
6921 }
6922 }
6923 }
6924
6925 document = (char *) xmalloc (allocated);
6926 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6927 p = document + strlen (document);
6928
6929 while (lm_addr
6930 && read_one_ptr (lm_addr + lmo->l_name_offset,
6931 &l_name, ptr_size) == 0
6932 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6933 &l_addr, ptr_size) == 0
6934 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6935 &l_ld, ptr_size) == 0
6936 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6937 &l_prev, ptr_size) == 0
6938 && read_one_ptr (lm_addr + lmo->l_next_offset,
6939 &l_next, ptr_size) == 0)
6940 {
6941 unsigned char libname[PATH_MAX];
6942
6943 if (lm_prev != l_prev)
6944 {
6945 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6946 (long) lm_prev, (long) l_prev);
6947 break;
6948 }
6949
6950 /* Ignore the first entry even if it has valid name as the first entry
6951 corresponds to the main executable. The first entry should not be
6952 skipped if the dynamic loader was loaded late by a static executable
6953 (see solib-svr4.c parameter ignore_first). But in such case the main
6954 executable does not have PT_DYNAMIC present and this function already
6955 exited above due to failed get_r_debug. */
6956 if (lm_prev == 0)
6957 {
6958 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6959 p = p + strlen (p);
6960 }
6961 else
6962 {
6963 /* Not checking for error because reading may stop before
6964 we've got PATH_MAX worth of characters. */
6965 libname[0] = '\0';
6966 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6967 libname[sizeof (libname) - 1] = '\0';
6968 if (libname[0] != '\0')
6969 {
6970 /* 6x the size for xml_escape_text below. */
6971 size_t len = 6 * strlen ((char *) libname);
6972 char *name;
6973
6974 if (!header_done)
6975 {
6976 /* Terminate `<library-list-svr4'. */
6977 *p++ = '>';
6978 header_done = 1;
6979 }
6980
6981 while (allocated < p - document + len + 200)
6982 {
6983 /* Expand to guarantee sufficient storage. */
6984 uintptr_t document_len = p - document;
6985
6986 document = (char *) xrealloc (document, 2 * allocated);
6987 allocated *= 2;
6988 p = document + document_len;
6989 }
6990
6991 name = xml_escape_text ((char *) libname);
6992 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6993 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6994 name, (unsigned long) lm_addr,
6995 (unsigned long) l_addr, (unsigned long) l_ld);
6996 free (name);
6997 }
6998 }
6999
7000 lm_prev = lm_addr;
7001 lm_addr = l_next;
7002 }
7003
7004 if (!header_done)
7005 {
7006 /* Empty list; terminate `<library-list-svr4'. */
7007 strcpy (p, "/>");
7008 }
7009 else
7010 strcpy (p, "</library-list-svr4>");
7011
7012 document_len = strlen (document);
7013 if (offset < document_len)
7014 document_len -= offset;
7015 else
7016 document_len = 0;
7017 if (len > document_len)
7018 len = document_len;
7019
7020 memcpy (readbuf, document + offset, len);
7021 xfree (document);
7022
7023 return len;
7024 }
7025
7026 #ifdef HAVE_LINUX_BTRACE
7027
7028 /* See to_disable_btrace target method. */
7029
7030 static int
7031 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7032 {
7033 enum btrace_error err;
7034
7035 err = linux_disable_btrace (tinfo);
7036 return (err == BTRACE_ERR_NONE ? 0 : -1);
7037 }
7038
7039 /* Encode an Intel Processor Trace configuration. */
7040
7041 static void
7042 linux_low_encode_pt_config (struct buffer *buffer,
7043 const struct btrace_data_pt_config *config)
7044 {
7045 buffer_grow_str (buffer, "<pt-config>\n");
7046
7047 switch (config->cpu.vendor)
7048 {
7049 case CV_INTEL:
7050 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7051 "model=\"%u\" stepping=\"%u\"/>\n",
7052 config->cpu.family, config->cpu.model,
7053 config->cpu.stepping);
7054 break;
7055
7056 default:
7057 break;
7058 }
7059
7060 buffer_grow_str (buffer, "</pt-config>\n");
7061 }
7062
7063 /* Encode a raw buffer. */
7064
7065 static void
7066 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7067 unsigned int size)
7068 {
7069 if (size == 0)
7070 return;
7071
7072 /* We use hex encoding - see common/rsp-low.h. */
7073 buffer_grow_str (buffer, "<raw>\n");
7074
7075 while (size-- > 0)
7076 {
7077 char elem[2];
7078
7079 elem[0] = tohex ((*data >> 4) & 0xf);
7080 elem[1] = tohex (*data++ & 0xf);
7081
7082 buffer_grow (buffer, elem, 2);
7083 }
7084
7085 buffer_grow_str (buffer, "</raw>\n");
7086 }
7087
7088 /* See to_read_btrace target method. */
7089
7090 static int
7091 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7092 enum btrace_read_type type)
7093 {
7094 struct btrace_data btrace;
7095 struct btrace_block *block;
7096 enum btrace_error err;
7097 int i;
7098
7099 btrace_data_init (&btrace);
7100
7101 err = linux_read_btrace (&btrace, tinfo, type);
7102 if (err != BTRACE_ERR_NONE)
7103 {
7104 if (err == BTRACE_ERR_OVERFLOW)
7105 buffer_grow_str0 (buffer, "E.Overflow.");
7106 else
7107 buffer_grow_str0 (buffer, "E.Generic Error.");
7108
7109 goto err;
7110 }
7111
7112 switch (btrace.format)
7113 {
7114 case BTRACE_FORMAT_NONE:
7115 buffer_grow_str0 (buffer, "E.No Trace.");
7116 goto err;
7117
7118 case BTRACE_FORMAT_BTS:
7119 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7120 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7121
7122 for (i = 0;
7123 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7124 i++)
7125 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7126 paddress (block->begin), paddress (block->end));
7127
7128 buffer_grow_str0 (buffer, "</btrace>\n");
7129 break;
7130
7131 case BTRACE_FORMAT_PT:
7132 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7133 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7134 buffer_grow_str (buffer, "<pt>\n");
7135
7136 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7137
7138 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7139 btrace.variant.pt.size);
7140
7141 buffer_grow_str (buffer, "</pt>\n");
7142 buffer_grow_str0 (buffer, "</btrace>\n");
7143 break;
7144
7145 default:
7146 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7147 goto err;
7148 }
7149
7150 btrace_data_fini (&btrace);
7151 return 0;
7152
7153 err:
7154 btrace_data_fini (&btrace);
7155 return -1;
7156 }
7157
7158 /* See to_btrace_conf target method. */
7159
7160 static int
7161 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7162 struct buffer *buffer)
7163 {
7164 const struct btrace_config *conf;
7165
7166 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7167 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7168
7169 conf = linux_btrace_conf (tinfo);
7170 if (conf != NULL)
7171 {
7172 switch (conf->format)
7173 {
7174 case BTRACE_FORMAT_NONE:
7175 break;
7176
7177 case BTRACE_FORMAT_BTS:
7178 buffer_xml_printf (buffer, "<bts");
7179 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7180 buffer_xml_printf (buffer, " />\n");
7181 break;
7182
7183 case BTRACE_FORMAT_PT:
7184 buffer_xml_printf (buffer, "<pt");
7185 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7186 buffer_xml_printf (buffer, "/>\n");
7187 break;
7188 }
7189 }
7190
7191 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7192 return 0;
7193 }
7194 #endif /* HAVE_LINUX_BTRACE */
7195
7196 /* See nat/linux-nat.h. */
7197
7198 ptid_t
7199 current_lwp_ptid (void)
7200 {
7201 return ptid_of (current_thread);
7202 }
7203
7204 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7205
7206 static int
7207 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7208 {
7209 if (the_low_target.breakpoint_kind_from_pc != NULL)
7210 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7211 else
7212 return default_breakpoint_kind_from_pc (pcptr);
7213 }
7214
7215 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7216
7217 static const gdb_byte *
7218 linux_sw_breakpoint_from_kind (int kind, int *size)
7219 {
7220 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7221
7222 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7223 }
7224
7225 /* Implementation of the target_ops method
7226 "breakpoint_kind_from_current_state". */
7227
7228 static int
7229 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7230 {
7231 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7232 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7233 else
7234 return linux_breakpoint_kind_from_pc (pcptr);
7235 }
7236
7237 /* Default implementation of linux_target_ops method "set_pc" for
7238 32-bit pc register which is literally named "pc". */
7239
7240 void
7241 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7242 {
7243 uint32_t newpc = pc;
7244
7245 supply_register_by_name (regcache, "pc", &newpc);
7246 }
7247
7248 /* Default implementation of linux_target_ops method "get_pc" for
7249 32-bit pc register which is literally named "pc". */
7250
7251 CORE_ADDR
7252 linux_get_pc_32bit (struct regcache *regcache)
7253 {
7254 uint32_t pc;
7255
7256 collect_register_by_name (regcache, "pc", &pc);
7257 if (debug_threads)
7258 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7259 return pc;
7260 }
7261
7262 /* Default implementation of linux_target_ops method "set_pc" for
7263 64-bit pc register which is literally named "pc". */
7264
7265 void
7266 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7267 {
7268 uint64_t newpc = pc;
7269
7270 supply_register_by_name (regcache, "pc", &newpc);
7271 }
7272
7273 /* Default implementation of linux_target_ops method "get_pc" for
7274 64-bit pc register which is literally named "pc". */
7275
7276 CORE_ADDR
7277 linux_get_pc_64bit (struct regcache *regcache)
7278 {
7279 uint64_t pc;
7280
7281 collect_register_by_name (regcache, "pc", &pc);
7282 if (debug_threads)
7283 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7284 return pc;
7285 }
7286
7287
7288 static struct target_ops linux_target_ops = {
7289 linux_create_inferior,
7290 linux_post_create_inferior,
7291 linux_attach,
7292 linux_kill,
7293 linux_detach,
7294 linux_mourn,
7295 linux_join,
7296 linux_thread_alive,
7297 linux_resume,
7298 linux_wait,
7299 linux_fetch_registers,
7300 linux_store_registers,
7301 linux_prepare_to_access_memory,
7302 linux_done_accessing_memory,
7303 linux_read_memory,
7304 linux_write_memory,
7305 linux_look_up_symbols,
7306 linux_request_interrupt,
7307 linux_read_auxv,
7308 linux_supports_z_point_type,
7309 linux_insert_point,
7310 linux_remove_point,
7311 linux_stopped_by_sw_breakpoint,
7312 linux_supports_stopped_by_sw_breakpoint,
7313 linux_stopped_by_hw_breakpoint,
7314 linux_supports_stopped_by_hw_breakpoint,
7315 linux_supports_hardware_single_step,
7316 linux_stopped_by_watchpoint,
7317 linux_stopped_data_address,
7318 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7319 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7320 && defined(PT_TEXT_END_ADDR)
7321 linux_read_offsets,
7322 #else
7323 NULL,
7324 #endif
7325 #ifdef USE_THREAD_DB
7326 thread_db_get_tls_address,
7327 #else
7328 NULL,
7329 #endif
7330 linux_qxfer_spu,
7331 hostio_last_error_from_errno,
7332 linux_qxfer_osdata,
7333 linux_xfer_siginfo,
7334 linux_supports_non_stop,
7335 linux_async,
7336 linux_start_non_stop,
7337 linux_supports_multi_process,
7338 linux_supports_fork_events,
7339 linux_supports_vfork_events,
7340 linux_supports_exec_events,
7341 linux_handle_new_gdb_connection,
7342 #ifdef USE_THREAD_DB
7343 thread_db_handle_monitor_command,
7344 #else
7345 NULL,
7346 #endif
7347 linux_common_core_of_thread,
7348 linux_read_loadmap,
7349 linux_process_qsupported,
7350 linux_supports_tracepoints,
7351 linux_read_pc,
7352 linux_write_pc,
7353 linux_thread_stopped,
7354 NULL,
7355 linux_pause_all,
7356 linux_unpause_all,
7357 linux_stabilize_threads,
7358 linux_install_fast_tracepoint_jump_pad,
7359 linux_emit_ops,
7360 linux_supports_disable_randomization,
7361 linux_get_min_fast_tracepoint_insn_len,
7362 linux_qxfer_libraries_svr4,
7363 linux_supports_agent,
7364 #ifdef HAVE_LINUX_BTRACE
7365 linux_supports_btrace,
7366 linux_enable_btrace,
7367 linux_low_disable_btrace,
7368 linux_low_read_btrace,
7369 linux_low_btrace_conf,
7370 #else
7371 NULL,
7372 NULL,
7373 NULL,
7374 NULL,
7375 NULL,
7376 #endif
7377 linux_supports_range_stepping,
7378 linux_proc_pid_to_exec_file,
7379 linux_mntns_open_cloexec,
7380 linux_mntns_unlink,
7381 linux_mntns_readlink,
7382 linux_breakpoint_kind_from_pc,
7383 linux_sw_breakpoint_from_kind,
7384 linux_proc_tid_get_name,
7385 linux_breakpoint_kind_from_current_state,
7386 linux_supports_software_single_step,
7387 linux_supports_catch_syscall,
7388 linux_get_ipa_tdesc_idx,
7389 };
7390
7391 #ifdef HAVE_LINUX_REGSETS
7392 void
7393 initialize_regsets_info (struct regsets_info *info)
7394 {
7395 for (info->num_regsets = 0;
7396 info->regsets[info->num_regsets].size >= 0;
7397 info->num_regsets++)
7398 ;
7399 }
7400 #endif
7401
7402 void
7403 initialize_low (void)
7404 {
7405 struct sigaction sigchld_action;
7406
7407 memset (&sigchld_action, 0, sizeof (sigchld_action));
7408 set_target_ops (&linux_target_ops);
7409
7410 linux_ptrace_init_warnings ();
7411
7412 sigchld_action.sa_handler = sigchld_handler;
7413 sigemptyset (&sigchld_action.sa_mask);
7414 sigchld_action.sa_flags = SA_RESTART;
7415 sigaction (SIGCHLD, &sigchld_action, NULL);
7416
7417 initialize_low_arch ();
7418
7419 linux_check_ptrace_features ();
7420 }