]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
Switch current_thread to lwp's thread in install_software_single_step_breakpoints
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2016 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57 #include "nat/linux-namespaces.h"
58
59 #ifndef SPUFS_MAGIC
60 #define SPUFS_MAGIC 0x23c9b64e
61 #endif
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* BFIN already defines these since at least 2.6.32 kernels. */
86 #elif defined(BFIN)
87 #define PT_TEXT_ADDR 220
88 #define PT_TEXT_END_ADDR 224
89 #define PT_DATA_ADDR 228
90 /* These are still undefined in 3.10 kernels. */
91 #elif defined(__TMS320C6X__)
92 #define PT_TEXT_ADDR (0x10000*4)
93 #define PT_DATA_ADDR (0x10004*4)
94 #define PT_TEXT_END_ADDR (0x10008*4)
95 #endif
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* A list of all unknown processes which receive stop signals. Some
180 other process will presumably claim each of these as forked
181 children momentarily. */
182
183 struct simple_pid_list
184 {
185 /* The process ID. */
186 int pid;
187
188 /* The status as reported by waitpid. */
189 int status;
190
191 /* Next in chain. */
192 struct simple_pid_list *next;
193 };
194 struct simple_pid_list *stopped_pids;
195
196 /* Trivial list manipulation functions to keep track of a list of new
197 stopped processes. */
198
199 static void
200 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
201 {
202 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
203
204 new_pid->pid = pid;
205 new_pid->status = status;
206 new_pid->next = *listp;
207 *listp = new_pid;
208 }
209
210 static int
211 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
212 {
213 struct simple_pid_list **p;
214
215 for (p = listp; *p != NULL; p = &(*p)->next)
216 if ((*p)->pid == pid)
217 {
218 struct simple_pid_list *next = (*p)->next;
219
220 *statusp = (*p)->status;
221 xfree (*p);
222 *p = next;
223 return 1;
224 }
225 return 0;
226 }
227
228 enum stopping_threads_kind
229 {
230 /* Not stopping threads presently. */
231 NOT_STOPPING_THREADS,
232
233 /* Stopping threads. */
234 STOPPING_THREADS,
235
236 /* Stopping and suspending threads. */
237 STOPPING_AND_SUSPENDING_THREADS
238 };
239
240 /* This is set while stop_all_lwps is in effect. */
241 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
242
243 /* FIXME make into a target method? */
244 int using_threads = 1;
245
246 /* True if we're presently stabilizing threads (moving them out of
247 jump pads). */
248 static int stabilizing_threads;
249
250 static void linux_resume_one_lwp (struct lwp_info *lwp,
251 int step, int signal, siginfo_t *info);
252 static void linux_resume (struct thread_resume *resume_info, size_t n);
253 static void stop_all_lwps (int suspend, struct lwp_info *except);
254 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
255 static void unsuspend_all_lwps (struct lwp_info *except);
256 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
257 int *wstat, int options);
258 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
259 static struct lwp_info *add_lwp (ptid_t ptid);
260 static void linux_mourn (struct process_info *process);
261 static int linux_stopped_by_watchpoint (void);
262 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
263 static int lwp_is_marked_dead (struct lwp_info *lwp);
264 static void proceed_all_lwps (void);
265 static int finish_step_over (struct lwp_info *lwp);
266 static int kill_lwp (unsigned long lwpid, int signo);
267 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
268 static void complete_ongoing_step_over (void);
269 static int linux_low_ptrace_options (int attached);
270 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
271
272 /* When the event-loop is doing a step-over, this points at the thread
273 being stepped. */
274 ptid_t step_over_bkpt;
275
276 /* True if the low target can hardware single-step. */
277
278 static int
279 can_hardware_single_step (void)
280 {
281 if (the_low_target.supports_hardware_single_step != NULL)
282 return the_low_target.supports_hardware_single_step ();
283 else
284 return 0;
285 }
286
287 /* True if the low target can software single-step. Such targets
288 implement the GET_NEXT_PCS callback. */
289
290 static int
291 can_software_single_step (void)
292 {
293 return (the_low_target.get_next_pcs != NULL);
294 }
295
296 /* True if the low target supports memory breakpoints. If so, we'll
297 have a GET_PC implementation. */
298
299 static int
300 supports_breakpoints (void)
301 {
302 return (the_low_target.get_pc != NULL);
303 }
304
305 /* Returns true if this target can support fast tracepoints. This
306 does not mean that the in-process agent has been loaded in the
307 inferior. */
308
309 static int
310 supports_fast_tracepoints (void)
311 {
312 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
313 }
314
315 /* True if LWP is stopped in its stepping range. */
316
317 static int
318 lwp_in_step_range (struct lwp_info *lwp)
319 {
320 CORE_ADDR pc = lwp->stop_pc;
321
322 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
323 }
324
325 struct pending_signals
326 {
327 int signal;
328 siginfo_t info;
329 struct pending_signals *prev;
330 };
331
332 /* The read/write ends of the pipe registered as waitable file in the
333 event loop. */
334 static int linux_event_pipe[2] = { -1, -1 };
335
336 /* True if we're currently in async mode. */
337 #define target_is_async_p() (linux_event_pipe[0] != -1)
338
339 static void send_sigstop (struct lwp_info *lwp);
340 static void wait_for_sigstop (void);
341
342 /* Return non-zero if HEADER is a 64-bit ELF file. */
343
344 static int
345 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
346 {
347 if (header->e_ident[EI_MAG0] == ELFMAG0
348 && header->e_ident[EI_MAG1] == ELFMAG1
349 && header->e_ident[EI_MAG2] == ELFMAG2
350 && header->e_ident[EI_MAG3] == ELFMAG3)
351 {
352 *machine = header->e_machine;
353 return header->e_ident[EI_CLASS] == ELFCLASS64;
354
355 }
356 *machine = EM_NONE;
357 return -1;
358 }
359
360 /* Return non-zero if FILE is a 64-bit ELF file,
361 zero if the file is not a 64-bit ELF file,
362 and -1 if the file is not accessible or doesn't exist. */
363
364 static int
365 elf_64_file_p (const char *file, unsigned int *machine)
366 {
367 Elf64_Ehdr header;
368 int fd;
369
370 fd = open (file, O_RDONLY);
371 if (fd < 0)
372 return -1;
373
374 if (read (fd, &header, sizeof (header)) != sizeof (header))
375 {
376 close (fd);
377 return 0;
378 }
379 close (fd);
380
381 return elf_64_header_p (&header, machine);
382 }
383
384 /* Accepts an integer PID; Returns true if the executable PID is
385 running is a 64-bit ELF file.. */
386
387 int
388 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
389 {
390 char file[PATH_MAX];
391
392 sprintf (file, "/proc/%d/exe", pid);
393 return elf_64_file_p (file, machine);
394 }
395
396 static void
397 delete_lwp (struct lwp_info *lwp)
398 {
399 struct thread_info *thr = get_lwp_thread (lwp);
400
401 if (debug_threads)
402 debug_printf ("deleting %ld\n", lwpid_of (thr));
403
404 remove_thread (thr);
405 free (lwp->arch_private);
406 free (lwp);
407 }
408
409 /* Add a process to the common process list, and set its private
410 data. */
411
412 static struct process_info *
413 linux_add_process (int pid, int attached)
414 {
415 struct process_info *proc;
416
417 proc = add_process (pid, attached);
418 proc->priv = XCNEW (struct process_info_private);
419
420 if (the_low_target.new_process != NULL)
421 proc->priv->arch_private = the_low_target.new_process ();
422
423 return proc;
424 }
425
426 static CORE_ADDR get_pc (struct lwp_info *lwp);
427
428 /* Call the target arch_setup function on the current thread. */
429
430 static void
431 linux_arch_setup (void)
432 {
433 the_low_target.arch_setup ();
434 }
435
436 /* Call the target arch_setup function on THREAD. */
437
438 static void
439 linux_arch_setup_thread (struct thread_info *thread)
440 {
441 struct thread_info *saved_thread;
442
443 saved_thread = current_thread;
444 current_thread = thread;
445
446 linux_arch_setup ();
447
448 current_thread = saved_thread;
449 }
450
451 /* Handle a GNU/Linux extended wait response. If we see a clone,
452 fork, or vfork event, we need to add the new LWP to our list
453 (and return 0 so as not to report the trap to higher layers).
454 If we see an exec event, we will modify ORIG_EVENT_LWP to point
455 to a new LWP representing the new program. */
456
457 static int
458 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
459 {
460 struct lwp_info *event_lwp = *orig_event_lwp;
461 int event = linux_ptrace_get_extended_event (wstat);
462 struct thread_info *event_thr = get_lwp_thread (event_lwp);
463 struct lwp_info *new_lwp;
464
465 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
466
467 /* All extended events we currently use are mid-syscall. Only
468 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
469 you have to be using PTRACE_SEIZE to get that. */
470 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
471
472 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
473 || (event == PTRACE_EVENT_CLONE))
474 {
475 ptid_t ptid;
476 unsigned long new_pid;
477 int ret, status;
478
479 /* Get the pid of the new lwp. */
480 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
481 &new_pid);
482
483 /* If we haven't already seen the new PID stop, wait for it now. */
484 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
485 {
486 /* The new child has a pending SIGSTOP. We can't affect it until it
487 hits the SIGSTOP, but we're already attached. */
488
489 ret = my_waitpid (new_pid, &status, __WALL);
490
491 if (ret == -1)
492 perror_with_name ("waiting for new child");
493 else if (ret != new_pid)
494 warning ("wait returned unexpected PID %d", ret);
495 else if (!WIFSTOPPED (status))
496 warning ("wait returned unexpected status 0x%x", status);
497 }
498
499 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
500 {
501 struct process_info *parent_proc;
502 struct process_info *child_proc;
503 struct lwp_info *child_lwp;
504 struct thread_info *child_thr;
505 struct target_desc *tdesc;
506
507 ptid = ptid_build (new_pid, new_pid, 0);
508
509 if (debug_threads)
510 {
511 debug_printf ("HEW: Got fork event from LWP %ld, "
512 "new child is %d\n",
513 ptid_get_lwp (ptid_of (event_thr)),
514 ptid_get_pid (ptid));
515 }
516
517 /* Add the new process to the tables and clone the breakpoint
518 lists of the parent. We need to do this even if the new process
519 will be detached, since we will need the process object and the
520 breakpoints to remove any breakpoints from memory when we
521 detach, and the client side will access registers. */
522 child_proc = linux_add_process (new_pid, 0);
523 gdb_assert (child_proc != NULL);
524 child_lwp = add_lwp (ptid);
525 gdb_assert (child_lwp != NULL);
526 child_lwp->stopped = 1;
527 child_lwp->must_set_ptrace_flags = 1;
528 child_lwp->status_pending_p = 0;
529 child_thr = get_lwp_thread (child_lwp);
530 child_thr->last_resume_kind = resume_stop;
531 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
532
533 /* If we're suspending all threads, leave this one suspended
534 too. If the fork/clone parent is stepping over a breakpoint,
535 all other threads have been suspended already. Leave the
536 child suspended too. */
537 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
538 || event_lwp->bp_reinsert != 0)
539 {
540 if (debug_threads)
541 debug_printf ("HEW: leaving child suspended\n");
542 child_lwp->suspended = 1;
543 }
544
545 parent_proc = get_thread_process (event_thr);
546 child_proc->attached = parent_proc->attached;
547
548 if (event_lwp->bp_reinsert != 0
549 && can_software_single_step ()
550 && event == PTRACE_EVENT_VFORK)
551 {
552 /* If we leave reinsert breakpoints there, child will
553 hit it, so uninsert reinsert breakpoints from parent
554 (and child). Once vfork child is done, reinsert
555 them back to parent. */
556 uninsert_reinsert_breakpoints (event_thr);
557 }
558
559 clone_all_breakpoints (child_thr, event_thr);
560
561 tdesc = XNEW (struct target_desc);
562 copy_target_description (tdesc, parent_proc->tdesc);
563 child_proc->tdesc = tdesc;
564
565 /* Clone arch-specific process data. */
566 if (the_low_target.new_fork != NULL)
567 the_low_target.new_fork (parent_proc, child_proc);
568
569 /* Save fork info in the parent thread. */
570 if (event == PTRACE_EVENT_FORK)
571 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
572 else if (event == PTRACE_EVENT_VFORK)
573 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
574
575 event_lwp->waitstatus.value.related_pid = ptid;
576
577 /* The status_pending field contains bits denoting the
578 extended event, so when the pending event is handled,
579 the handler will look at lwp->waitstatus. */
580 event_lwp->status_pending_p = 1;
581 event_lwp->status_pending = wstat;
582
583 /* If the parent thread is doing step-over with reinsert
584 breakpoints, the list of reinsert breakpoints are cloned
585 from the parent's. Remove them from the child process.
586 In case of vfork, we'll reinsert them back once vforked
587 child is done. */
588 if (event_lwp->bp_reinsert != 0
589 && can_software_single_step ())
590 {
591 /* The child process is forked and stopped, so it is safe
592 to access its memory without stopping all other threads
593 from other processes. */
594 delete_reinsert_breakpoints (child_thr);
595
596 gdb_assert (has_reinsert_breakpoints (event_thr));
597 gdb_assert (!has_reinsert_breakpoints (child_thr));
598 }
599
600 /* Report the event. */
601 return 0;
602 }
603
604 if (debug_threads)
605 debug_printf ("HEW: Got clone event "
606 "from LWP %ld, new child is LWP %ld\n",
607 lwpid_of (event_thr), new_pid);
608
609 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
610 new_lwp = add_lwp (ptid);
611
612 /* Either we're going to immediately resume the new thread
613 or leave it stopped. linux_resume_one_lwp is a nop if it
614 thinks the thread is currently running, so set this first
615 before calling linux_resume_one_lwp. */
616 new_lwp->stopped = 1;
617
618 /* If we're suspending all threads, leave this one suspended
619 too. If the fork/clone parent is stepping over a breakpoint,
620 all other threads have been suspended already. Leave the
621 child suspended too. */
622 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
623 || event_lwp->bp_reinsert != 0)
624 new_lwp->suspended = 1;
625
626 /* Normally we will get the pending SIGSTOP. But in some cases
627 we might get another signal delivered to the group first.
628 If we do get another signal, be sure not to lose it. */
629 if (WSTOPSIG (status) != SIGSTOP)
630 {
631 new_lwp->stop_expected = 1;
632 new_lwp->status_pending_p = 1;
633 new_lwp->status_pending = status;
634 }
635 else if (report_thread_events)
636 {
637 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
638 new_lwp->status_pending_p = 1;
639 new_lwp->status_pending = status;
640 }
641
642 /* Don't report the event. */
643 return 1;
644 }
645 else if (event == PTRACE_EVENT_VFORK_DONE)
646 {
647 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
648
649 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
650 {
651 reinsert_reinsert_breakpoints (event_thr);
652
653 gdb_assert (has_reinsert_breakpoints (event_thr));
654 }
655
656 /* Report the event. */
657 return 0;
658 }
659 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
660 {
661 struct process_info *proc;
662 VEC (int) *syscalls_to_catch;
663 ptid_t event_ptid;
664 pid_t event_pid;
665
666 if (debug_threads)
667 {
668 debug_printf ("HEW: Got exec event from LWP %ld\n",
669 lwpid_of (event_thr));
670 }
671
672 /* Get the event ptid. */
673 event_ptid = ptid_of (event_thr);
674 event_pid = ptid_get_pid (event_ptid);
675
676 /* Save the syscall list from the execing process. */
677 proc = get_thread_process (event_thr);
678 syscalls_to_catch = proc->syscalls_to_catch;
679 proc->syscalls_to_catch = NULL;
680
681 /* Delete the execing process and all its threads. */
682 linux_mourn (proc);
683 current_thread = NULL;
684
685 /* Create a new process/lwp/thread. */
686 proc = linux_add_process (event_pid, 0);
687 event_lwp = add_lwp (event_ptid);
688 event_thr = get_lwp_thread (event_lwp);
689 gdb_assert (current_thread == event_thr);
690 linux_arch_setup_thread (event_thr);
691
692 /* Set the event status. */
693 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
694 event_lwp->waitstatus.value.execd_pathname
695 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
696
697 /* Mark the exec status as pending. */
698 event_lwp->stopped = 1;
699 event_lwp->status_pending_p = 1;
700 event_lwp->status_pending = wstat;
701 event_thr->last_resume_kind = resume_continue;
702 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
703
704 /* Update syscall state in the new lwp, effectively mid-syscall too. */
705 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
706
707 /* Restore the list to catch. Don't rely on the client, which is free
708 to avoid sending a new list when the architecture doesn't change.
709 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
710 proc->syscalls_to_catch = syscalls_to_catch;
711
712 /* Report the event. */
713 *orig_event_lwp = event_lwp;
714 return 0;
715 }
716
717 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
718 }
719
720 /* Return the PC as read from the regcache of LWP, without any
721 adjustment. */
722
723 static CORE_ADDR
724 get_pc (struct lwp_info *lwp)
725 {
726 struct thread_info *saved_thread;
727 struct regcache *regcache;
728 CORE_ADDR pc;
729
730 if (the_low_target.get_pc == NULL)
731 return 0;
732
733 saved_thread = current_thread;
734 current_thread = get_lwp_thread (lwp);
735
736 regcache = get_thread_regcache (current_thread, 1);
737 pc = (*the_low_target.get_pc) (regcache);
738
739 if (debug_threads)
740 debug_printf ("pc is 0x%lx\n", (long) pc);
741
742 current_thread = saved_thread;
743 return pc;
744 }
745
746 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
747 Fill *SYSNO with the syscall nr trapped. */
748
749 static void
750 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
751 {
752 struct thread_info *saved_thread;
753 struct regcache *regcache;
754
755 if (the_low_target.get_syscall_trapinfo == NULL)
756 {
757 /* If we cannot get the syscall trapinfo, report an unknown
758 system call number. */
759 *sysno = UNKNOWN_SYSCALL;
760 return;
761 }
762
763 saved_thread = current_thread;
764 current_thread = get_lwp_thread (lwp);
765
766 regcache = get_thread_regcache (current_thread, 1);
767 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
768
769 if (debug_threads)
770 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
771
772 current_thread = saved_thread;
773 }
774
775 static int check_stopped_by_watchpoint (struct lwp_info *child);
776
777 /* Called when the LWP stopped for a signal/trap. If it stopped for a
778 trap check what caused it (breakpoint, watchpoint, trace, etc.),
779 and save the result in the LWP's stop_reason field. If it stopped
780 for a breakpoint, decrement the PC if necessary on the lwp's
781 architecture. Returns true if we now have the LWP's stop PC. */
782
783 static int
784 save_stop_reason (struct lwp_info *lwp)
785 {
786 CORE_ADDR pc;
787 CORE_ADDR sw_breakpoint_pc;
788 struct thread_info *saved_thread;
789 #if USE_SIGTRAP_SIGINFO
790 siginfo_t siginfo;
791 #endif
792
793 if (the_low_target.get_pc == NULL)
794 return 0;
795
796 pc = get_pc (lwp);
797 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
798
799 /* breakpoint_at reads from the current thread. */
800 saved_thread = current_thread;
801 current_thread = get_lwp_thread (lwp);
802
803 #if USE_SIGTRAP_SIGINFO
804 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
805 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
806 {
807 if (siginfo.si_signo == SIGTRAP)
808 {
809 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
810 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
811 {
812 /* The si_code is ambiguous on this arch -- check debug
813 registers. */
814 if (!check_stopped_by_watchpoint (lwp))
815 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
816 }
817 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
818 {
819 /* If we determine the LWP stopped for a SW breakpoint,
820 trust it. Particularly don't check watchpoint
821 registers, because at least on s390, we'd find
822 stopped-by-watchpoint as long as there's a watchpoint
823 set. */
824 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
825 }
826 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
827 {
828 /* This can indicate either a hardware breakpoint or
829 hardware watchpoint. Check debug registers. */
830 if (!check_stopped_by_watchpoint (lwp))
831 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
832 }
833 else if (siginfo.si_code == TRAP_TRACE)
834 {
835 /* We may have single stepped an instruction that
836 triggered a watchpoint. In that case, on some
837 architectures (such as x86), instead of TRAP_HWBKPT,
838 si_code indicates TRAP_TRACE, and we need to check
839 the debug registers separately. */
840 if (!check_stopped_by_watchpoint (lwp))
841 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
842 }
843 }
844 }
845 #else
846 /* We may have just stepped a breakpoint instruction. E.g., in
847 non-stop mode, GDB first tells the thread A to step a range, and
848 then the user inserts a breakpoint inside the range. In that
849 case we need to report the breakpoint PC. */
850 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
851 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
852 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
853
854 if (hardware_breakpoint_inserted_here (pc))
855 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
856
857 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
858 check_stopped_by_watchpoint (lwp);
859 #endif
860
861 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
862 {
863 if (debug_threads)
864 {
865 struct thread_info *thr = get_lwp_thread (lwp);
866
867 debug_printf ("CSBB: %s stopped by software breakpoint\n",
868 target_pid_to_str (ptid_of (thr)));
869 }
870
871 /* Back up the PC if necessary. */
872 if (pc != sw_breakpoint_pc)
873 {
874 struct regcache *regcache
875 = get_thread_regcache (current_thread, 1);
876 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
877 }
878
879 /* Update this so we record the correct stop PC below. */
880 pc = sw_breakpoint_pc;
881 }
882 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
883 {
884 if (debug_threads)
885 {
886 struct thread_info *thr = get_lwp_thread (lwp);
887
888 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
889 target_pid_to_str (ptid_of (thr)));
890 }
891 }
892 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
893 {
894 if (debug_threads)
895 {
896 struct thread_info *thr = get_lwp_thread (lwp);
897
898 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
899 target_pid_to_str (ptid_of (thr)));
900 }
901 }
902 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
903 {
904 if (debug_threads)
905 {
906 struct thread_info *thr = get_lwp_thread (lwp);
907
908 debug_printf ("CSBB: %s stopped by trace\n",
909 target_pid_to_str (ptid_of (thr)));
910 }
911 }
912
913 lwp->stop_pc = pc;
914 current_thread = saved_thread;
915 return 1;
916 }
917
918 static struct lwp_info *
919 add_lwp (ptid_t ptid)
920 {
921 struct lwp_info *lwp;
922
923 lwp = XCNEW (struct lwp_info);
924
925 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
926
927 if (the_low_target.new_thread != NULL)
928 the_low_target.new_thread (lwp);
929
930 lwp->thread = add_thread (ptid, lwp);
931
932 return lwp;
933 }
934
935 /* Start an inferior process and returns its pid.
936 ALLARGS is a vector of program-name and args. */
937
938 static int
939 linux_create_inferior (char *program, char **allargs)
940 {
941 struct lwp_info *new_lwp;
942 int pid;
943 ptid_t ptid;
944 struct cleanup *restore_personality
945 = maybe_disable_address_space_randomization (disable_randomization);
946
947 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
948 pid = vfork ();
949 #else
950 pid = fork ();
951 #endif
952 if (pid < 0)
953 perror_with_name ("fork");
954
955 if (pid == 0)
956 {
957 close_most_fds ();
958 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
959
960 setpgid (0, 0);
961
962 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
963 stdout to stderr so that inferior i/o doesn't corrupt the connection.
964 Also, redirect stdin to /dev/null. */
965 if (remote_connection_is_stdio ())
966 {
967 close (0);
968 open ("/dev/null", O_RDONLY);
969 dup2 (2, 1);
970 if (write (2, "stdin/stdout redirected\n",
971 sizeof ("stdin/stdout redirected\n") - 1) < 0)
972 {
973 /* Errors ignored. */;
974 }
975 }
976
977 execv (program, allargs);
978 if (errno == ENOENT)
979 execvp (program, allargs);
980
981 fprintf (stderr, "Cannot exec %s: %s.\n", program,
982 strerror (errno));
983 fflush (stderr);
984 _exit (0177);
985 }
986
987 do_cleanups (restore_personality);
988
989 linux_add_process (pid, 0);
990
991 ptid = ptid_build (pid, pid, 0);
992 new_lwp = add_lwp (ptid);
993 new_lwp->must_set_ptrace_flags = 1;
994
995 return pid;
996 }
997
998 /* Implement the post_create_inferior target_ops method. */
999
1000 static void
1001 linux_post_create_inferior (void)
1002 {
1003 struct lwp_info *lwp = get_thread_lwp (current_thread);
1004
1005 linux_arch_setup ();
1006
1007 if (lwp->must_set_ptrace_flags)
1008 {
1009 struct process_info *proc = current_process ();
1010 int options = linux_low_ptrace_options (proc->attached);
1011
1012 linux_enable_event_reporting (lwpid_of (current_thread), options);
1013 lwp->must_set_ptrace_flags = 0;
1014 }
1015 }
1016
1017 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1018 error. */
1019
1020 int
1021 linux_attach_lwp (ptid_t ptid)
1022 {
1023 struct lwp_info *new_lwp;
1024 int lwpid = ptid_get_lwp (ptid);
1025
1026 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1027 != 0)
1028 return errno;
1029
1030 new_lwp = add_lwp (ptid);
1031
1032 /* We need to wait for SIGSTOP before being able to make the next
1033 ptrace call on this LWP. */
1034 new_lwp->must_set_ptrace_flags = 1;
1035
1036 if (linux_proc_pid_is_stopped (lwpid))
1037 {
1038 if (debug_threads)
1039 debug_printf ("Attached to a stopped process\n");
1040
1041 /* The process is definitely stopped. It is in a job control
1042 stop, unless the kernel predates the TASK_STOPPED /
1043 TASK_TRACED distinction, in which case it might be in a
1044 ptrace stop. Make sure it is in a ptrace stop; from there we
1045 can kill it, signal it, et cetera.
1046
1047 First make sure there is a pending SIGSTOP. Since we are
1048 already attached, the process can not transition from stopped
1049 to running without a PTRACE_CONT; so we know this signal will
1050 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1051 probably already in the queue (unless this kernel is old
1052 enough to use TASK_STOPPED for ptrace stops); but since
1053 SIGSTOP is not an RT signal, it can only be queued once. */
1054 kill_lwp (lwpid, SIGSTOP);
1055
1056 /* Finally, resume the stopped process. This will deliver the
1057 SIGSTOP (or a higher priority signal, just like normal
1058 PTRACE_ATTACH), which we'll catch later on. */
1059 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1060 }
1061
1062 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1063 brings it to a halt.
1064
1065 There are several cases to consider here:
1066
1067 1) gdbserver has already attached to the process and is being notified
1068 of a new thread that is being created.
1069 In this case we should ignore that SIGSTOP and resume the
1070 process. This is handled below by setting stop_expected = 1,
1071 and the fact that add_thread sets last_resume_kind ==
1072 resume_continue.
1073
1074 2) This is the first thread (the process thread), and we're attaching
1075 to it via attach_inferior.
1076 In this case we want the process thread to stop.
1077 This is handled by having linux_attach set last_resume_kind ==
1078 resume_stop after we return.
1079
1080 If the pid we are attaching to is also the tgid, we attach to and
1081 stop all the existing threads. Otherwise, we attach to pid and
1082 ignore any other threads in the same group as this pid.
1083
1084 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1085 existing threads.
1086 In this case we want the thread to stop.
1087 FIXME: This case is currently not properly handled.
1088 We should wait for the SIGSTOP but don't. Things work apparently
1089 because enough time passes between when we ptrace (ATTACH) and when
1090 gdb makes the next ptrace call on the thread.
1091
1092 On the other hand, if we are currently trying to stop all threads, we
1093 should treat the new thread as if we had sent it a SIGSTOP. This works
1094 because we are guaranteed that the add_lwp call above added us to the
1095 end of the list, and so the new thread has not yet reached
1096 wait_for_sigstop (but will). */
1097 new_lwp->stop_expected = 1;
1098
1099 return 0;
1100 }
1101
1102 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1103 already attached. Returns true if a new LWP is found, false
1104 otherwise. */
1105
1106 static int
1107 attach_proc_task_lwp_callback (ptid_t ptid)
1108 {
1109 /* Is this a new thread? */
1110 if (find_thread_ptid (ptid) == NULL)
1111 {
1112 int lwpid = ptid_get_lwp (ptid);
1113 int err;
1114
1115 if (debug_threads)
1116 debug_printf ("Found new lwp %d\n", lwpid);
1117
1118 err = linux_attach_lwp (ptid);
1119
1120 /* Be quiet if we simply raced with the thread exiting. EPERM
1121 is returned if the thread's task still exists, and is marked
1122 as exited or zombie, as well as other conditions, so in that
1123 case, confirm the status in /proc/PID/status. */
1124 if (err == ESRCH
1125 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1126 {
1127 if (debug_threads)
1128 {
1129 debug_printf ("Cannot attach to lwp %d: "
1130 "thread is gone (%d: %s)\n",
1131 lwpid, err, strerror (err));
1132 }
1133 }
1134 else if (err != 0)
1135 {
1136 warning (_("Cannot attach to lwp %d: %s"),
1137 lwpid,
1138 linux_ptrace_attach_fail_reason_string (ptid, err));
1139 }
1140
1141 return 1;
1142 }
1143 return 0;
1144 }
1145
1146 static void async_file_mark (void);
1147
1148 /* Attach to PID. If PID is the tgid, attach to it and all
1149 of its threads. */
1150
1151 static int
1152 linux_attach (unsigned long pid)
1153 {
1154 struct process_info *proc;
1155 struct thread_info *initial_thread;
1156 ptid_t ptid = ptid_build (pid, pid, 0);
1157 int err;
1158
1159 /* Attach to PID. We will check for other threads
1160 soon. */
1161 err = linux_attach_lwp (ptid);
1162 if (err != 0)
1163 error ("Cannot attach to process %ld: %s",
1164 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1165
1166 proc = linux_add_process (pid, 1);
1167
1168 /* Don't ignore the initial SIGSTOP if we just attached to this
1169 process. It will be collected by wait shortly. */
1170 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1171 initial_thread->last_resume_kind = resume_stop;
1172
1173 /* We must attach to every LWP. If /proc is mounted, use that to
1174 find them now. On the one hand, the inferior may be using raw
1175 clone instead of using pthreads. On the other hand, even if it
1176 is using pthreads, GDB may not be connected yet (thread_db needs
1177 to do symbol lookups, through qSymbol). Also, thread_db walks
1178 structures in the inferior's address space to find the list of
1179 threads/LWPs, and those structures may well be corrupted. Note
1180 that once thread_db is loaded, we'll still use it to list threads
1181 and associate pthread info with each LWP. */
1182 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1183
1184 /* GDB will shortly read the xml target description for this
1185 process, to figure out the process' architecture. But the target
1186 description is only filled in when the first process/thread in
1187 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1188 that now, otherwise, if GDB is fast enough, it could read the
1189 target description _before_ that initial stop. */
1190 if (non_stop)
1191 {
1192 struct lwp_info *lwp;
1193 int wstat, lwpid;
1194 ptid_t pid_ptid = pid_to_ptid (pid);
1195
1196 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1197 &wstat, __WALL);
1198 gdb_assert (lwpid > 0);
1199
1200 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1201
1202 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1203 {
1204 lwp->status_pending_p = 1;
1205 lwp->status_pending = wstat;
1206 }
1207
1208 initial_thread->last_resume_kind = resume_continue;
1209
1210 async_file_mark ();
1211
1212 gdb_assert (proc->tdesc != NULL);
1213 }
1214
1215 return 0;
1216 }
1217
1218 struct counter
1219 {
1220 int pid;
1221 int count;
1222 };
1223
1224 static int
1225 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1226 {
1227 struct counter *counter = (struct counter *) args;
1228
1229 if (ptid_get_pid (entry->id) == counter->pid)
1230 {
1231 if (++counter->count > 1)
1232 return 1;
1233 }
1234
1235 return 0;
1236 }
1237
1238 static int
1239 last_thread_of_process_p (int pid)
1240 {
1241 struct counter counter = { pid , 0 };
1242
1243 return (find_inferior (&all_threads,
1244 second_thread_of_pid_p, &counter) == NULL);
1245 }
1246
1247 /* Kill LWP. */
1248
1249 static void
1250 linux_kill_one_lwp (struct lwp_info *lwp)
1251 {
1252 struct thread_info *thr = get_lwp_thread (lwp);
1253 int pid = lwpid_of (thr);
1254
1255 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1256 there is no signal context, and ptrace(PTRACE_KILL) (or
1257 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1258 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1259 alternative is to kill with SIGKILL. We only need one SIGKILL
1260 per process, not one for each thread. But since we still support
1261 support debugging programs using raw clone without CLONE_THREAD,
1262 we send one for each thread. For years, we used PTRACE_KILL
1263 only, so we're being a bit paranoid about some old kernels where
1264 PTRACE_KILL might work better (dubious if there are any such, but
1265 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1266 second, and so we're fine everywhere. */
1267
1268 errno = 0;
1269 kill_lwp (pid, SIGKILL);
1270 if (debug_threads)
1271 {
1272 int save_errno = errno;
1273
1274 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1275 target_pid_to_str (ptid_of (thr)),
1276 save_errno ? strerror (save_errno) : "OK");
1277 }
1278
1279 errno = 0;
1280 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1281 if (debug_threads)
1282 {
1283 int save_errno = errno;
1284
1285 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1286 target_pid_to_str (ptid_of (thr)),
1287 save_errno ? strerror (save_errno) : "OK");
1288 }
1289 }
1290
1291 /* Kill LWP and wait for it to die. */
1292
1293 static void
1294 kill_wait_lwp (struct lwp_info *lwp)
1295 {
1296 struct thread_info *thr = get_lwp_thread (lwp);
1297 int pid = ptid_get_pid (ptid_of (thr));
1298 int lwpid = ptid_get_lwp (ptid_of (thr));
1299 int wstat;
1300 int res;
1301
1302 if (debug_threads)
1303 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1304
1305 do
1306 {
1307 linux_kill_one_lwp (lwp);
1308
1309 /* Make sure it died. Notes:
1310
1311 - The loop is most likely unnecessary.
1312
1313 - We don't use linux_wait_for_event as that could delete lwps
1314 while we're iterating over them. We're not interested in
1315 any pending status at this point, only in making sure all
1316 wait status on the kernel side are collected until the
1317 process is reaped.
1318
1319 - We don't use __WALL here as the __WALL emulation relies on
1320 SIGCHLD, and killing a stopped process doesn't generate
1321 one, nor an exit status.
1322 */
1323 res = my_waitpid (lwpid, &wstat, 0);
1324 if (res == -1 && errno == ECHILD)
1325 res = my_waitpid (lwpid, &wstat, __WCLONE);
1326 } while (res > 0 && WIFSTOPPED (wstat));
1327
1328 /* Even if it was stopped, the child may have already disappeared.
1329 E.g., if it was killed by SIGKILL. */
1330 if (res < 0 && errno != ECHILD)
1331 perror_with_name ("kill_wait_lwp");
1332 }
1333
1334 /* Callback for `find_inferior'. Kills an lwp of a given process,
1335 except the leader. */
1336
1337 static int
1338 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1339 {
1340 struct thread_info *thread = (struct thread_info *) entry;
1341 struct lwp_info *lwp = get_thread_lwp (thread);
1342 int pid = * (int *) args;
1343
1344 if (ptid_get_pid (entry->id) != pid)
1345 return 0;
1346
1347 /* We avoid killing the first thread here, because of a Linux kernel (at
1348 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1349 the children get a chance to be reaped, it will remain a zombie
1350 forever. */
1351
1352 if (lwpid_of (thread) == pid)
1353 {
1354 if (debug_threads)
1355 debug_printf ("lkop: is last of process %s\n",
1356 target_pid_to_str (entry->id));
1357 return 0;
1358 }
1359
1360 kill_wait_lwp (lwp);
1361 return 0;
1362 }
1363
1364 static int
1365 linux_kill (int pid)
1366 {
1367 struct process_info *process;
1368 struct lwp_info *lwp;
1369
1370 process = find_process_pid (pid);
1371 if (process == NULL)
1372 return -1;
1373
1374 /* If we're killing a running inferior, make sure it is stopped
1375 first, as PTRACE_KILL will not work otherwise. */
1376 stop_all_lwps (0, NULL);
1377
1378 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1379
1380 /* See the comment in linux_kill_one_lwp. We did not kill the first
1381 thread in the list, so do so now. */
1382 lwp = find_lwp_pid (pid_to_ptid (pid));
1383
1384 if (lwp == NULL)
1385 {
1386 if (debug_threads)
1387 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1388 pid);
1389 }
1390 else
1391 kill_wait_lwp (lwp);
1392
1393 the_target->mourn (process);
1394
1395 /* Since we presently can only stop all lwps of all processes, we
1396 need to unstop lwps of other processes. */
1397 unstop_all_lwps (0, NULL);
1398 return 0;
1399 }
1400
1401 /* Get pending signal of THREAD, for detaching purposes. This is the
1402 signal the thread last stopped for, which we need to deliver to the
1403 thread when detaching, otherwise, it'd be suppressed/lost. */
1404
1405 static int
1406 get_detach_signal (struct thread_info *thread)
1407 {
1408 enum gdb_signal signo = GDB_SIGNAL_0;
1409 int status;
1410 struct lwp_info *lp = get_thread_lwp (thread);
1411
1412 if (lp->status_pending_p)
1413 status = lp->status_pending;
1414 else
1415 {
1416 /* If the thread had been suspended by gdbserver, and it stopped
1417 cleanly, then it'll have stopped with SIGSTOP. But we don't
1418 want to deliver that SIGSTOP. */
1419 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1420 || thread->last_status.value.sig == GDB_SIGNAL_0)
1421 return 0;
1422
1423 /* Otherwise, we may need to deliver the signal we
1424 intercepted. */
1425 status = lp->last_status;
1426 }
1427
1428 if (!WIFSTOPPED (status))
1429 {
1430 if (debug_threads)
1431 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1432 target_pid_to_str (ptid_of (thread)));
1433 return 0;
1434 }
1435
1436 /* Extended wait statuses aren't real SIGTRAPs. */
1437 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1438 {
1439 if (debug_threads)
1440 debug_printf ("GPS: lwp %s had stopped with extended "
1441 "status: no pending signal\n",
1442 target_pid_to_str (ptid_of (thread)));
1443 return 0;
1444 }
1445
1446 signo = gdb_signal_from_host (WSTOPSIG (status));
1447
1448 if (program_signals_p && !program_signals[signo])
1449 {
1450 if (debug_threads)
1451 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1452 target_pid_to_str (ptid_of (thread)),
1453 gdb_signal_to_string (signo));
1454 return 0;
1455 }
1456 else if (!program_signals_p
1457 /* If we have no way to know which signals GDB does not
1458 want to have passed to the program, assume
1459 SIGTRAP/SIGINT, which is GDB's default. */
1460 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1461 {
1462 if (debug_threads)
1463 debug_printf ("GPS: lwp %s had signal %s, "
1464 "but we don't know if we should pass it. "
1465 "Default to not.\n",
1466 target_pid_to_str (ptid_of (thread)),
1467 gdb_signal_to_string (signo));
1468 return 0;
1469 }
1470 else
1471 {
1472 if (debug_threads)
1473 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1474 target_pid_to_str (ptid_of (thread)),
1475 gdb_signal_to_string (signo));
1476
1477 return WSTOPSIG (status);
1478 }
1479 }
1480
1481 /* Detach from LWP. */
1482
1483 static void
1484 linux_detach_one_lwp (struct lwp_info *lwp)
1485 {
1486 struct thread_info *thread = get_lwp_thread (lwp);
1487 int sig;
1488 int lwpid;
1489
1490 /* If there is a pending SIGSTOP, get rid of it. */
1491 if (lwp->stop_expected)
1492 {
1493 if (debug_threads)
1494 debug_printf ("Sending SIGCONT to %s\n",
1495 target_pid_to_str (ptid_of (thread)));
1496
1497 kill_lwp (lwpid_of (thread), SIGCONT);
1498 lwp->stop_expected = 0;
1499 }
1500
1501 /* Pass on any pending signal for this thread. */
1502 sig = get_detach_signal (thread);
1503
1504 /* Preparing to resume may try to write registers, and fail if the
1505 lwp is zombie. If that happens, ignore the error. We'll handle
1506 it below, when detach fails with ESRCH. */
1507 TRY
1508 {
1509 /* Flush any pending changes to the process's registers. */
1510 regcache_invalidate_thread (thread);
1511
1512 /* Finally, let it resume. */
1513 if (the_low_target.prepare_to_resume != NULL)
1514 the_low_target.prepare_to_resume (lwp);
1515 }
1516 CATCH (ex, RETURN_MASK_ERROR)
1517 {
1518 if (!check_ptrace_stopped_lwp_gone (lwp))
1519 throw_exception (ex);
1520 }
1521 END_CATCH
1522
1523 lwpid = lwpid_of (thread);
1524 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1525 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1526 {
1527 int save_errno = errno;
1528
1529 /* We know the thread exists, so ESRCH must mean the lwp is
1530 zombie. This can happen if one of the already-detached
1531 threads exits the whole thread group. In that case we're
1532 still attached, and must reap the lwp. */
1533 if (save_errno == ESRCH)
1534 {
1535 int ret, status;
1536
1537 ret = my_waitpid (lwpid, &status, __WALL);
1538 if (ret == -1)
1539 {
1540 warning (_("Couldn't reap LWP %d while detaching: %s"),
1541 lwpid, strerror (errno));
1542 }
1543 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1544 {
1545 warning (_("Reaping LWP %d while detaching "
1546 "returned unexpected status 0x%x"),
1547 lwpid, status);
1548 }
1549 }
1550 else
1551 {
1552 error (_("Can't detach %s: %s"),
1553 target_pid_to_str (ptid_of (thread)),
1554 strerror (save_errno));
1555 }
1556 }
1557 else if (debug_threads)
1558 {
1559 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1560 target_pid_to_str (ptid_of (thread)),
1561 strsignal (sig));
1562 }
1563
1564 delete_lwp (lwp);
1565 }
1566
1567 /* Callback for find_inferior. Detaches from non-leader threads of a
1568 given process. */
1569
1570 static int
1571 linux_detach_lwp_callback (struct inferior_list_entry *entry, void *args)
1572 {
1573 struct thread_info *thread = (struct thread_info *) entry;
1574 struct lwp_info *lwp = get_thread_lwp (thread);
1575 int pid = *(int *) args;
1576 int lwpid = lwpid_of (thread);
1577
1578 /* Skip other processes. */
1579 if (ptid_get_pid (entry->id) != pid)
1580 return 0;
1581
1582 /* We don't actually detach from the thread group leader just yet.
1583 If the thread group exits, we must reap the zombie clone lwps
1584 before we're able to reap the leader. */
1585 if (ptid_get_pid (entry->id) == lwpid)
1586 return 0;
1587
1588 linux_detach_one_lwp (lwp);
1589 return 0;
1590 }
1591
1592 static int
1593 linux_detach (int pid)
1594 {
1595 struct process_info *process;
1596 struct lwp_info *main_lwp;
1597
1598 process = find_process_pid (pid);
1599 if (process == NULL)
1600 return -1;
1601
1602 /* As there's a step over already in progress, let it finish first,
1603 otherwise nesting a stabilize_threads operation on top gets real
1604 messy. */
1605 complete_ongoing_step_over ();
1606
1607 /* Stop all threads before detaching. First, ptrace requires that
1608 the thread is stopped to sucessfully detach. Second, thread_db
1609 may need to uninstall thread event breakpoints from memory, which
1610 only works with a stopped process anyway. */
1611 stop_all_lwps (0, NULL);
1612
1613 #ifdef USE_THREAD_DB
1614 thread_db_detach (process);
1615 #endif
1616
1617 /* Stabilize threads (move out of jump pads). */
1618 stabilize_threads ();
1619
1620 /* Detach from the clone lwps first. If the thread group exits just
1621 while we're detaching, we must reap the clone lwps before we're
1622 able to reap the leader. */
1623 find_inferior (&all_threads, linux_detach_lwp_callback, &pid);
1624
1625 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1626 linux_detach_one_lwp (main_lwp);
1627
1628 the_target->mourn (process);
1629
1630 /* Since we presently can only stop all lwps of all processes, we
1631 need to unstop lwps of other processes. */
1632 unstop_all_lwps (0, NULL);
1633 return 0;
1634 }
1635
1636 /* Remove all LWPs that belong to process PROC from the lwp list. */
1637
1638 static int
1639 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1640 {
1641 struct thread_info *thread = (struct thread_info *) entry;
1642 struct lwp_info *lwp = get_thread_lwp (thread);
1643 struct process_info *process = (struct process_info *) proc;
1644
1645 if (pid_of (thread) == pid_of (process))
1646 delete_lwp (lwp);
1647
1648 return 0;
1649 }
1650
1651 static void
1652 linux_mourn (struct process_info *process)
1653 {
1654 struct process_info_private *priv;
1655
1656 #ifdef USE_THREAD_DB
1657 thread_db_mourn (process);
1658 #endif
1659
1660 find_inferior (&all_threads, delete_lwp_callback, process);
1661
1662 /* Freeing all private data. */
1663 priv = process->priv;
1664 free (priv->arch_private);
1665 free (priv);
1666 process->priv = NULL;
1667
1668 remove_process (process);
1669 }
1670
1671 static void
1672 linux_join (int pid)
1673 {
1674 int status, ret;
1675
1676 do {
1677 ret = my_waitpid (pid, &status, 0);
1678 if (WIFEXITED (status) || WIFSIGNALED (status))
1679 break;
1680 } while (ret != -1 || errno != ECHILD);
1681 }
1682
1683 /* Return nonzero if the given thread is still alive. */
1684 static int
1685 linux_thread_alive (ptid_t ptid)
1686 {
1687 struct lwp_info *lwp = find_lwp_pid (ptid);
1688
1689 /* We assume we always know if a thread exits. If a whole process
1690 exited but we still haven't been able to report it to GDB, we'll
1691 hold on to the last lwp of the dead process. */
1692 if (lwp != NULL)
1693 return !lwp_is_marked_dead (lwp);
1694 else
1695 return 0;
1696 }
1697
1698 /* Return 1 if this lwp still has an interesting status pending. If
1699 not (e.g., it had stopped for a breakpoint that is gone), return
1700 false. */
1701
1702 static int
1703 thread_still_has_status_pending_p (struct thread_info *thread)
1704 {
1705 struct lwp_info *lp = get_thread_lwp (thread);
1706
1707 if (!lp->status_pending_p)
1708 return 0;
1709
1710 if (thread->last_resume_kind != resume_stop
1711 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1712 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1713 {
1714 struct thread_info *saved_thread;
1715 CORE_ADDR pc;
1716 int discard = 0;
1717
1718 gdb_assert (lp->last_status != 0);
1719
1720 pc = get_pc (lp);
1721
1722 saved_thread = current_thread;
1723 current_thread = thread;
1724
1725 if (pc != lp->stop_pc)
1726 {
1727 if (debug_threads)
1728 debug_printf ("PC of %ld changed\n",
1729 lwpid_of (thread));
1730 discard = 1;
1731 }
1732
1733 #if !USE_SIGTRAP_SIGINFO
1734 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1735 && !(*the_low_target.breakpoint_at) (pc))
1736 {
1737 if (debug_threads)
1738 debug_printf ("previous SW breakpoint of %ld gone\n",
1739 lwpid_of (thread));
1740 discard = 1;
1741 }
1742 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1743 && !hardware_breakpoint_inserted_here (pc))
1744 {
1745 if (debug_threads)
1746 debug_printf ("previous HW breakpoint of %ld gone\n",
1747 lwpid_of (thread));
1748 discard = 1;
1749 }
1750 #endif
1751
1752 current_thread = saved_thread;
1753
1754 if (discard)
1755 {
1756 if (debug_threads)
1757 debug_printf ("discarding pending breakpoint status\n");
1758 lp->status_pending_p = 0;
1759 return 0;
1760 }
1761 }
1762
1763 return 1;
1764 }
1765
1766 /* Returns true if LWP is resumed from the client's perspective. */
1767
1768 static int
1769 lwp_resumed (struct lwp_info *lwp)
1770 {
1771 struct thread_info *thread = get_lwp_thread (lwp);
1772
1773 if (thread->last_resume_kind != resume_stop)
1774 return 1;
1775
1776 /* Did gdb send us a `vCont;t', but we haven't reported the
1777 corresponding stop to gdb yet? If so, the thread is still
1778 resumed/running from gdb's perspective. */
1779 if (thread->last_resume_kind == resume_stop
1780 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1781 return 1;
1782
1783 return 0;
1784 }
1785
1786 /* Return 1 if this lwp has an interesting status pending. */
1787 static int
1788 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1789 {
1790 struct thread_info *thread = (struct thread_info *) entry;
1791 struct lwp_info *lp = get_thread_lwp (thread);
1792 ptid_t ptid = * (ptid_t *) arg;
1793
1794 /* Check if we're only interested in events from a specific process
1795 or a specific LWP. */
1796 if (!ptid_match (ptid_of (thread), ptid))
1797 return 0;
1798
1799 if (!lwp_resumed (lp))
1800 return 0;
1801
1802 if (lp->status_pending_p
1803 && !thread_still_has_status_pending_p (thread))
1804 {
1805 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1806 return 0;
1807 }
1808
1809 return lp->status_pending_p;
1810 }
1811
1812 static int
1813 same_lwp (struct inferior_list_entry *entry, void *data)
1814 {
1815 ptid_t ptid = *(ptid_t *) data;
1816 int lwp;
1817
1818 if (ptid_get_lwp (ptid) != 0)
1819 lwp = ptid_get_lwp (ptid);
1820 else
1821 lwp = ptid_get_pid (ptid);
1822
1823 if (ptid_get_lwp (entry->id) == lwp)
1824 return 1;
1825
1826 return 0;
1827 }
1828
1829 struct lwp_info *
1830 find_lwp_pid (ptid_t ptid)
1831 {
1832 struct inferior_list_entry *thread
1833 = find_inferior (&all_threads, same_lwp, &ptid);
1834
1835 if (thread == NULL)
1836 return NULL;
1837
1838 return get_thread_lwp ((struct thread_info *) thread);
1839 }
1840
1841 /* Return the number of known LWPs in the tgid given by PID. */
1842
1843 static int
1844 num_lwps (int pid)
1845 {
1846 struct inferior_list_entry *inf, *tmp;
1847 int count = 0;
1848
1849 ALL_INFERIORS (&all_threads, inf, tmp)
1850 {
1851 if (ptid_get_pid (inf->id) == pid)
1852 count++;
1853 }
1854
1855 return count;
1856 }
1857
1858 /* The arguments passed to iterate_over_lwps. */
1859
1860 struct iterate_over_lwps_args
1861 {
1862 /* The FILTER argument passed to iterate_over_lwps. */
1863 ptid_t filter;
1864
1865 /* The CALLBACK argument passed to iterate_over_lwps. */
1866 iterate_over_lwps_ftype *callback;
1867
1868 /* The DATA argument passed to iterate_over_lwps. */
1869 void *data;
1870 };
1871
1872 /* Callback for find_inferior used by iterate_over_lwps to filter
1873 calls to the callback supplied to that function. Returning a
1874 nonzero value causes find_inferiors to stop iterating and return
1875 the current inferior_list_entry. Returning zero indicates that
1876 find_inferiors should continue iterating. */
1877
1878 static int
1879 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1880 {
1881 struct iterate_over_lwps_args *args
1882 = (struct iterate_over_lwps_args *) args_p;
1883
1884 if (ptid_match (entry->id, args->filter))
1885 {
1886 struct thread_info *thr = (struct thread_info *) entry;
1887 struct lwp_info *lwp = get_thread_lwp (thr);
1888
1889 return (*args->callback) (lwp, args->data);
1890 }
1891
1892 return 0;
1893 }
1894
1895 /* See nat/linux-nat.h. */
1896
1897 struct lwp_info *
1898 iterate_over_lwps (ptid_t filter,
1899 iterate_over_lwps_ftype callback,
1900 void *data)
1901 {
1902 struct iterate_over_lwps_args args = {filter, callback, data};
1903 struct inferior_list_entry *entry;
1904
1905 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1906 if (entry == NULL)
1907 return NULL;
1908
1909 return get_thread_lwp ((struct thread_info *) entry);
1910 }
1911
1912 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1913 their exits until all other threads in the group have exited. */
1914
1915 static void
1916 check_zombie_leaders (void)
1917 {
1918 struct process_info *proc, *tmp;
1919
1920 ALL_PROCESSES (proc, tmp)
1921 {
1922 pid_t leader_pid = pid_of (proc);
1923 struct lwp_info *leader_lp;
1924
1925 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1926
1927 if (debug_threads)
1928 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1929 "num_lwps=%d, zombie=%d\n",
1930 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1931 linux_proc_pid_is_zombie (leader_pid));
1932
1933 if (leader_lp != NULL && !leader_lp->stopped
1934 /* Check if there are other threads in the group, as we may
1935 have raced with the inferior simply exiting. */
1936 && !last_thread_of_process_p (leader_pid)
1937 && linux_proc_pid_is_zombie (leader_pid))
1938 {
1939 /* A leader zombie can mean one of two things:
1940
1941 - It exited, and there's an exit status pending
1942 available, or only the leader exited (not the whole
1943 program). In the latter case, we can't waitpid the
1944 leader's exit status until all other threads are gone.
1945
1946 - There are 3 or more threads in the group, and a thread
1947 other than the leader exec'd. On an exec, the Linux
1948 kernel destroys all other threads (except the execing
1949 one) in the thread group, and resets the execing thread's
1950 tid to the tgid. No exit notification is sent for the
1951 execing thread -- from the ptracer's perspective, it
1952 appears as though the execing thread just vanishes.
1953 Until we reap all other threads except the leader and the
1954 execing thread, the leader will be zombie, and the
1955 execing thread will be in `D (disc sleep)'. As soon as
1956 all other threads are reaped, the execing thread changes
1957 it's tid to the tgid, and the previous (zombie) leader
1958 vanishes, giving place to the "new" leader. We could try
1959 distinguishing the exit and exec cases, by waiting once
1960 more, and seeing if something comes out, but it doesn't
1961 sound useful. The previous leader _does_ go away, and
1962 we'll re-add the new one once we see the exec event
1963 (which is just the same as what would happen if the
1964 previous leader did exit voluntarily before some other
1965 thread execs). */
1966
1967 if (debug_threads)
1968 fprintf (stderr,
1969 "CZL: Thread group leader %d zombie "
1970 "(it exited, or another thread execd).\n",
1971 leader_pid);
1972
1973 delete_lwp (leader_lp);
1974 }
1975 }
1976 }
1977
1978 /* Callback for `find_inferior'. Returns the first LWP that is not
1979 stopped. ARG is a PTID filter. */
1980
1981 static int
1982 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1983 {
1984 struct thread_info *thr = (struct thread_info *) entry;
1985 struct lwp_info *lwp;
1986 ptid_t filter = *(ptid_t *) arg;
1987
1988 if (!ptid_match (ptid_of (thr), filter))
1989 return 0;
1990
1991 lwp = get_thread_lwp (thr);
1992 if (!lwp->stopped)
1993 return 1;
1994
1995 return 0;
1996 }
1997
1998 /* Increment LWP's suspend count. */
1999
2000 static void
2001 lwp_suspended_inc (struct lwp_info *lwp)
2002 {
2003 lwp->suspended++;
2004
2005 if (debug_threads && lwp->suspended > 4)
2006 {
2007 struct thread_info *thread = get_lwp_thread (lwp);
2008
2009 debug_printf ("LWP %ld has a suspiciously high suspend count,"
2010 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
2011 }
2012 }
2013
2014 /* Decrement LWP's suspend count. */
2015
2016 static void
2017 lwp_suspended_decr (struct lwp_info *lwp)
2018 {
2019 lwp->suspended--;
2020
2021 if (lwp->suspended < 0)
2022 {
2023 struct thread_info *thread = get_lwp_thread (lwp);
2024
2025 internal_error (__FILE__, __LINE__,
2026 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
2027 lwp->suspended);
2028 }
2029 }
2030
2031 /* This function should only be called if the LWP got a SIGTRAP.
2032
2033 Handle any tracepoint steps or hits. Return true if a tracepoint
2034 event was handled, 0 otherwise. */
2035
2036 static int
2037 handle_tracepoints (struct lwp_info *lwp)
2038 {
2039 struct thread_info *tinfo = get_lwp_thread (lwp);
2040 int tpoint_related_event = 0;
2041
2042 gdb_assert (lwp->suspended == 0);
2043
2044 /* If this tracepoint hit causes a tracing stop, we'll immediately
2045 uninsert tracepoints. To do this, we temporarily pause all
2046 threads, unpatch away, and then unpause threads. We need to make
2047 sure the unpausing doesn't resume LWP too. */
2048 lwp_suspended_inc (lwp);
2049
2050 /* And we need to be sure that any all-threads-stopping doesn't try
2051 to move threads out of the jump pads, as it could deadlock the
2052 inferior (LWP could be in the jump pad, maybe even holding the
2053 lock.) */
2054
2055 /* Do any necessary step collect actions. */
2056 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2057
2058 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2059
2060 /* See if we just hit a tracepoint and do its main collect
2061 actions. */
2062 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2063
2064 lwp_suspended_decr (lwp);
2065
2066 gdb_assert (lwp->suspended == 0);
2067 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
2068
2069 if (tpoint_related_event)
2070 {
2071 if (debug_threads)
2072 debug_printf ("got a tracepoint event\n");
2073 return 1;
2074 }
2075
2076 return 0;
2077 }
2078
2079 /* Convenience wrapper. Returns true if LWP is presently collecting a
2080 fast tracepoint. */
2081
2082 static int
2083 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2084 struct fast_tpoint_collect_status *status)
2085 {
2086 CORE_ADDR thread_area;
2087 struct thread_info *thread = get_lwp_thread (lwp);
2088
2089 if (the_low_target.get_thread_area == NULL)
2090 return 0;
2091
2092 /* Get the thread area address. This is used to recognize which
2093 thread is which when tracing with the in-process agent library.
2094 We don't read anything from the address, and treat it as opaque;
2095 it's the address itself that we assume is unique per-thread. */
2096 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2097 return 0;
2098
2099 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2100 }
2101
2102 /* The reason we resume in the caller, is because we want to be able
2103 to pass lwp->status_pending as WSTAT, and we need to clear
2104 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2105 refuses to resume. */
2106
2107 static int
2108 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2109 {
2110 struct thread_info *saved_thread;
2111
2112 saved_thread = current_thread;
2113 current_thread = get_lwp_thread (lwp);
2114
2115 if ((wstat == NULL
2116 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2117 && supports_fast_tracepoints ()
2118 && agent_loaded_p ())
2119 {
2120 struct fast_tpoint_collect_status status;
2121 int r;
2122
2123 if (debug_threads)
2124 debug_printf ("Checking whether LWP %ld needs to move out of the "
2125 "jump pad.\n",
2126 lwpid_of (current_thread));
2127
2128 r = linux_fast_tracepoint_collecting (lwp, &status);
2129
2130 if (wstat == NULL
2131 || (WSTOPSIG (*wstat) != SIGILL
2132 && WSTOPSIG (*wstat) != SIGFPE
2133 && WSTOPSIG (*wstat) != SIGSEGV
2134 && WSTOPSIG (*wstat) != SIGBUS))
2135 {
2136 lwp->collecting_fast_tracepoint = r;
2137
2138 if (r != 0)
2139 {
2140 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2141 {
2142 /* Haven't executed the original instruction yet.
2143 Set breakpoint there, and wait till it's hit,
2144 then single-step until exiting the jump pad. */
2145 lwp->exit_jump_pad_bkpt
2146 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2147 }
2148
2149 if (debug_threads)
2150 debug_printf ("Checking whether LWP %ld needs to move out of "
2151 "the jump pad...it does\n",
2152 lwpid_of (current_thread));
2153 current_thread = saved_thread;
2154
2155 return 1;
2156 }
2157 }
2158 else
2159 {
2160 /* If we get a synchronous signal while collecting, *and*
2161 while executing the (relocated) original instruction,
2162 reset the PC to point at the tpoint address, before
2163 reporting to GDB. Otherwise, it's an IPA lib bug: just
2164 report the signal to GDB, and pray for the best. */
2165
2166 lwp->collecting_fast_tracepoint = 0;
2167
2168 if (r != 0
2169 && (status.adjusted_insn_addr <= lwp->stop_pc
2170 && lwp->stop_pc < status.adjusted_insn_addr_end))
2171 {
2172 siginfo_t info;
2173 struct regcache *regcache;
2174
2175 /* The si_addr on a few signals references the address
2176 of the faulting instruction. Adjust that as
2177 well. */
2178 if ((WSTOPSIG (*wstat) == SIGILL
2179 || WSTOPSIG (*wstat) == SIGFPE
2180 || WSTOPSIG (*wstat) == SIGBUS
2181 || WSTOPSIG (*wstat) == SIGSEGV)
2182 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2183 (PTRACE_TYPE_ARG3) 0, &info) == 0
2184 /* Final check just to make sure we don't clobber
2185 the siginfo of non-kernel-sent signals. */
2186 && (uintptr_t) info.si_addr == lwp->stop_pc)
2187 {
2188 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2189 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2190 (PTRACE_TYPE_ARG3) 0, &info);
2191 }
2192
2193 regcache = get_thread_regcache (current_thread, 1);
2194 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2195 lwp->stop_pc = status.tpoint_addr;
2196
2197 /* Cancel any fast tracepoint lock this thread was
2198 holding. */
2199 force_unlock_trace_buffer ();
2200 }
2201
2202 if (lwp->exit_jump_pad_bkpt != NULL)
2203 {
2204 if (debug_threads)
2205 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2206 "stopping all threads momentarily.\n");
2207
2208 stop_all_lwps (1, lwp);
2209
2210 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2211 lwp->exit_jump_pad_bkpt = NULL;
2212
2213 unstop_all_lwps (1, lwp);
2214
2215 gdb_assert (lwp->suspended >= 0);
2216 }
2217 }
2218 }
2219
2220 if (debug_threads)
2221 debug_printf ("Checking whether LWP %ld needs to move out of the "
2222 "jump pad...no\n",
2223 lwpid_of (current_thread));
2224
2225 current_thread = saved_thread;
2226 return 0;
2227 }
2228
2229 /* Enqueue one signal in the "signals to report later when out of the
2230 jump pad" list. */
2231
2232 static void
2233 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2234 {
2235 struct pending_signals *p_sig;
2236 struct thread_info *thread = get_lwp_thread (lwp);
2237
2238 if (debug_threads)
2239 debug_printf ("Deferring signal %d for LWP %ld.\n",
2240 WSTOPSIG (*wstat), lwpid_of (thread));
2241
2242 if (debug_threads)
2243 {
2244 struct pending_signals *sig;
2245
2246 for (sig = lwp->pending_signals_to_report;
2247 sig != NULL;
2248 sig = sig->prev)
2249 debug_printf (" Already queued %d\n",
2250 sig->signal);
2251
2252 debug_printf (" (no more currently queued signals)\n");
2253 }
2254
2255 /* Don't enqueue non-RT signals if they are already in the deferred
2256 queue. (SIGSTOP being the easiest signal to see ending up here
2257 twice) */
2258 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2259 {
2260 struct pending_signals *sig;
2261
2262 for (sig = lwp->pending_signals_to_report;
2263 sig != NULL;
2264 sig = sig->prev)
2265 {
2266 if (sig->signal == WSTOPSIG (*wstat))
2267 {
2268 if (debug_threads)
2269 debug_printf ("Not requeuing already queued non-RT signal %d"
2270 " for LWP %ld\n",
2271 sig->signal,
2272 lwpid_of (thread));
2273 return;
2274 }
2275 }
2276 }
2277
2278 p_sig = XCNEW (struct pending_signals);
2279 p_sig->prev = lwp->pending_signals_to_report;
2280 p_sig->signal = WSTOPSIG (*wstat);
2281
2282 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2283 &p_sig->info);
2284
2285 lwp->pending_signals_to_report = p_sig;
2286 }
2287
2288 /* Dequeue one signal from the "signals to report later when out of
2289 the jump pad" list. */
2290
2291 static int
2292 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2293 {
2294 struct thread_info *thread = get_lwp_thread (lwp);
2295
2296 if (lwp->pending_signals_to_report != NULL)
2297 {
2298 struct pending_signals **p_sig;
2299
2300 p_sig = &lwp->pending_signals_to_report;
2301 while ((*p_sig)->prev != NULL)
2302 p_sig = &(*p_sig)->prev;
2303
2304 *wstat = W_STOPCODE ((*p_sig)->signal);
2305 if ((*p_sig)->info.si_signo != 0)
2306 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2307 &(*p_sig)->info);
2308 free (*p_sig);
2309 *p_sig = NULL;
2310
2311 if (debug_threads)
2312 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2313 WSTOPSIG (*wstat), lwpid_of (thread));
2314
2315 if (debug_threads)
2316 {
2317 struct pending_signals *sig;
2318
2319 for (sig = lwp->pending_signals_to_report;
2320 sig != NULL;
2321 sig = sig->prev)
2322 debug_printf (" Still queued %d\n",
2323 sig->signal);
2324
2325 debug_printf (" (no more queued signals)\n");
2326 }
2327
2328 return 1;
2329 }
2330
2331 return 0;
2332 }
2333
2334 /* Fetch the possibly triggered data watchpoint info and store it in
2335 CHILD.
2336
2337 On some archs, like x86, that use debug registers to set
2338 watchpoints, it's possible that the way to know which watched
2339 address trapped, is to check the register that is used to select
2340 which address to watch. Problem is, between setting the watchpoint
2341 and reading back which data address trapped, the user may change
2342 the set of watchpoints, and, as a consequence, GDB changes the
2343 debug registers in the inferior. To avoid reading back a stale
2344 stopped-data-address when that happens, we cache in LP the fact
2345 that a watchpoint trapped, and the corresponding data address, as
2346 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2347 registers meanwhile, we have the cached data we can rely on. */
2348
2349 static int
2350 check_stopped_by_watchpoint (struct lwp_info *child)
2351 {
2352 if (the_low_target.stopped_by_watchpoint != NULL)
2353 {
2354 struct thread_info *saved_thread;
2355
2356 saved_thread = current_thread;
2357 current_thread = get_lwp_thread (child);
2358
2359 if (the_low_target.stopped_by_watchpoint ())
2360 {
2361 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2362
2363 if (the_low_target.stopped_data_address != NULL)
2364 child->stopped_data_address
2365 = the_low_target.stopped_data_address ();
2366 else
2367 child->stopped_data_address = 0;
2368 }
2369
2370 current_thread = saved_thread;
2371 }
2372
2373 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2374 }
2375
2376 /* Return the ptrace options that we want to try to enable. */
2377
2378 static int
2379 linux_low_ptrace_options (int attached)
2380 {
2381 int options = 0;
2382
2383 if (!attached)
2384 options |= PTRACE_O_EXITKILL;
2385
2386 if (report_fork_events)
2387 options |= PTRACE_O_TRACEFORK;
2388
2389 if (report_vfork_events)
2390 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2391
2392 if (report_exec_events)
2393 options |= PTRACE_O_TRACEEXEC;
2394
2395 options |= PTRACE_O_TRACESYSGOOD;
2396
2397 return options;
2398 }
2399
2400 /* Do low-level handling of the event, and check if we should go on
2401 and pass it to caller code. Return the affected lwp if we are, or
2402 NULL otherwise. */
2403
2404 static struct lwp_info *
2405 linux_low_filter_event (int lwpid, int wstat)
2406 {
2407 struct lwp_info *child;
2408 struct thread_info *thread;
2409 int have_stop_pc = 0;
2410
2411 child = find_lwp_pid (pid_to_ptid (lwpid));
2412
2413 /* Check for stop events reported by a process we didn't already
2414 know about - anything not already in our LWP list.
2415
2416 If we're expecting to receive stopped processes after
2417 fork, vfork, and clone events, then we'll just add the
2418 new one to our list and go back to waiting for the event
2419 to be reported - the stopped process might be returned
2420 from waitpid before or after the event is.
2421
2422 But note the case of a non-leader thread exec'ing after the
2423 leader having exited, and gone from our lists (because
2424 check_zombie_leaders deleted it). The non-leader thread
2425 changes its tid to the tgid. */
2426
2427 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2428 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2429 {
2430 ptid_t child_ptid;
2431
2432 /* A multi-thread exec after we had seen the leader exiting. */
2433 if (debug_threads)
2434 {
2435 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2436 "after exec.\n", lwpid);
2437 }
2438
2439 child_ptid = ptid_build (lwpid, lwpid, 0);
2440 child = add_lwp (child_ptid);
2441 child->stopped = 1;
2442 current_thread = child->thread;
2443 }
2444
2445 /* If we didn't find a process, one of two things presumably happened:
2446 - A process we started and then detached from has exited. Ignore it.
2447 - A process we are controlling has forked and the new child's stop
2448 was reported to us by the kernel. Save its PID. */
2449 if (child == NULL && WIFSTOPPED (wstat))
2450 {
2451 add_to_pid_list (&stopped_pids, lwpid, wstat);
2452 return NULL;
2453 }
2454 else if (child == NULL)
2455 return NULL;
2456
2457 thread = get_lwp_thread (child);
2458
2459 child->stopped = 1;
2460
2461 child->last_status = wstat;
2462
2463 /* Check if the thread has exited. */
2464 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2465 {
2466 if (debug_threads)
2467 debug_printf ("LLFE: %d exited.\n", lwpid);
2468
2469 if (finish_step_over (child))
2470 {
2471 /* Unsuspend all other LWPs, and set them back running again. */
2472 unsuspend_all_lwps (child);
2473 }
2474
2475 /* If there is at least one more LWP, then the exit signal was
2476 not the end of the debugged application and should be
2477 ignored, unless GDB wants to hear about thread exits. */
2478 if (report_thread_events
2479 || last_thread_of_process_p (pid_of (thread)))
2480 {
2481 /* Since events are serialized to GDB core, and we can't
2482 report this one right now. Leave the status pending for
2483 the next time we're able to report it. */
2484 mark_lwp_dead (child, wstat);
2485 return child;
2486 }
2487 else
2488 {
2489 delete_lwp (child);
2490 return NULL;
2491 }
2492 }
2493
2494 gdb_assert (WIFSTOPPED (wstat));
2495
2496 if (WIFSTOPPED (wstat))
2497 {
2498 struct process_info *proc;
2499
2500 /* Architecture-specific setup after inferior is running. */
2501 proc = find_process_pid (pid_of (thread));
2502 if (proc->tdesc == NULL)
2503 {
2504 if (proc->attached)
2505 {
2506 /* This needs to happen after we have attached to the
2507 inferior and it is stopped for the first time, but
2508 before we access any inferior registers. */
2509 linux_arch_setup_thread (thread);
2510 }
2511 else
2512 {
2513 /* The process is started, but GDBserver will do
2514 architecture-specific setup after the program stops at
2515 the first instruction. */
2516 child->status_pending_p = 1;
2517 child->status_pending = wstat;
2518 return child;
2519 }
2520 }
2521 }
2522
2523 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2524 {
2525 struct process_info *proc = find_process_pid (pid_of (thread));
2526 int options = linux_low_ptrace_options (proc->attached);
2527
2528 linux_enable_event_reporting (lwpid, options);
2529 child->must_set_ptrace_flags = 0;
2530 }
2531
2532 /* Always update syscall_state, even if it will be filtered later. */
2533 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2534 {
2535 child->syscall_state
2536 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2537 ? TARGET_WAITKIND_SYSCALL_RETURN
2538 : TARGET_WAITKIND_SYSCALL_ENTRY);
2539 }
2540 else
2541 {
2542 /* Almost all other ptrace-stops are known to be outside of system
2543 calls, with further exceptions in handle_extended_wait. */
2544 child->syscall_state = TARGET_WAITKIND_IGNORE;
2545 }
2546
2547 /* Be careful to not overwrite stop_pc until save_stop_reason is
2548 called. */
2549 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2550 && linux_is_extended_waitstatus (wstat))
2551 {
2552 child->stop_pc = get_pc (child);
2553 if (handle_extended_wait (&child, wstat))
2554 {
2555 /* The event has been handled, so just return without
2556 reporting it. */
2557 return NULL;
2558 }
2559 }
2560
2561 if (linux_wstatus_maybe_breakpoint (wstat))
2562 {
2563 if (save_stop_reason (child))
2564 have_stop_pc = 1;
2565 }
2566
2567 if (!have_stop_pc)
2568 child->stop_pc = get_pc (child);
2569
2570 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2571 && child->stop_expected)
2572 {
2573 if (debug_threads)
2574 debug_printf ("Expected stop.\n");
2575 child->stop_expected = 0;
2576
2577 if (thread->last_resume_kind == resume_stop)
2578 {
2579 /* We want to report the stop to the core. Treat the
2580 SIGSTOP as a normal event. */
2581 if (debug_threads)
2582 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2583 target_pid_to_str (ptid_of (thread)));
2584 }
2585 else if (stopping_threads != NOT_STOPPING_THREADS)
2586 {
2587 /* Stopping threads. We don't want this SIGSTOP to end up
2588 pending. */
2589 if (debug_threads)
2590 debug_printf ("LLW: SIGSTOP caught for %s "
2591 "while stopping threads.\n",
2592 target_pid_to_str (ptid_of (thread)));
2593 return NULL;
2594 }
2595 else
2596 {
2597 /* This is a delayed SIGSTOP. Filter out the event. */
2598 if (debug_threads)
2599 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2600 child->stepping ? "step" : "continue",
2601 target_pid_to_str (ptid_of (thread)));
2602
2603 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2604 return NULL;
2605 }
2606 }
2607
2608 child->status_pending_p = 1;
2609 child->status_pending = wstat;
2610 return child;
2611 }
2612
2613 /* Return true if THREAD is doing hardware single step. */
2614
2615 static int
2616 maybe_hw_step (struct thread_info *thread)
2617 {
2618 if (can_hardware_single_step ())
2619 return 1;
2620 else
2621 {
2622 /* GDBserver must insert reinsert breakpoint for software
2623 single step. */
2624 gdb_assert (has_reinsert_breakpoints (thread));
2625 return 0;
2626 }
2627 }
2628
2629 /* Resume LWPs that are currently stopped without any pending status
2630 to report, but are resumed from the core's perspective. */
2631
2632 static void
2633 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2634 {
2635 struct thread_info *thread = (struct thread_info *) entry;
2636 struct lwp_info *lp = get_thread_lwp (thread);
2637
2638 if (lp->stopped
2639 && !lp->suspended
2640 && !lp->status_pending_p
2641 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2642 {
2643 int step = thread->last_resume_kind == resume_step;
2644
2645 if (debug_threads)
2646 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2647 target_pid_to_str (ptid_of (thread)),
2648 paddress (lp->stop_pc),
2649 step);
2650
2651 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2652 }
2653 }
2654
2655 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2656 match FILTER_PTID (leaving others pending). The PTIDs can be:
2657 minus_one_ptid, to specify any child; a pid PTID, specifying all
2658 lwps of a thread group; or a PTID representing a single lwp. Store
2659 the stop status through the status pointer WSTAT. OPTIONS is
2660 passed to the waitpid call. Return 0 if no event was found and
2661 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2662 was found. Return the PID of the stopped child otherwise. */
2663
2664 static int
2665 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2666 int *wstatp, int options)
2667 {
2668 struct thread_info *event_thread;
2669 struct lwp_info *event_child, *requested_child;
2670 sigset_t block_mask, prev_mask;
2671
2672 retry:
2673 /* N.B. event_thread points to the thread_info struct that contains
2674 event_child. Keep them in sync. */
2675 event_thread = NULL;
2676 event_child = NULL;
2677 requested_child = NULL;
2678
2679 /* Check for a lwp with a pending status. */
2680
2681 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2682 {
2683 event_thread = (struct thread_info *)
2684 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2685 if (event_thread != NULL)
2686 event_child = get_thread_lwp (event_thread);
2687 if (debug_threads && event_thread)
2688 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2689 }
2690 else if (!ptid_equal (filter_ptid, null_ptid))
2691 {
2692 requested_child = find_lwp_pid (filter_ptid);
2693
2694 if (stopping_threads == NOT_STOPPING_THREADS
2695 && requested_child->status_pending_p
2696 && requested_child->collecting_fast_tracepoint)
2697 {
2698 enqueue_one_deferred_signal (requested_child,
2699 &requested_child->status_pending);
2700 requested_child->status_pending_p = 0;
2701 requested_child->status_pending = 0;
2702 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2703 }
2704
2705 if (requested_child->suspended
2706 && requested_child->status_pending_p)
2707 {
2708 internal_error (__FILE__, __LINE__,
2709 "requesting an event out of a"
2710 " suspended child?");
2711 }
2712
2713 if (requested_child->status_pending_p)
2714 {
2715 event_child = requested_child;
2716 event_thread = get_lwp_thread (event_child);
2717 }
2718 }
2719
2720 if (event_child != NULL)
2721 {
2722 if (debug_threads)
2723 debug_printf ("Got an event from pending child %ld (%04x)\n",
2724 lwpid_of (event_thread), event_child->status_pending);
2725 *wstatp = event_child->status_pending;
2726 event_child->status_pending_p = 0;
2727 event_child->status_pending = 0;
2728 current_thread = event_thread;
2729 return lwpid_of (event_thread);
2730 }
2731
2732 /* But if we don't find a pending event, we'll have to wait.
2733
2734 We only enter this loop if no process has a pending wait status.
2735 Thus any action taken in response to a wait status inside this
2736 loop is responding as soon as we detect the status, not after any
2737 pending events. */
2738
2739 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2740 all signals while here. */
2741 sigfillset (&block_mask);
2742 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2743
2744 /* Always pull all events out of the kernel. We'll randomly select
2745 an event LWP out of all that have events, to prevent
2746 starvation. */
2747 while (event_child == NULL)
2748 {
2749 pid_t ret = 0;
2750
2751 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2752 quirks:
2753
2754 - If the thread group leader exits while other threads in the
2755 thread group still exist, waitpid(TGID, ...) hangs. That
2756 waitpid won't return an exit status until the other threads
2757 in the group are reaped.
2758
2759 - When a non-leader thread execs, that thread just vanishes
2760 without reporting an exit (so we'd hang if we waited for it
2761 explicitly in that case). The exec event is reported to
2762 the TGID pid. */
2763 errno = 0;
2764 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2765
2766 if (debug_threads)
2767 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2768 ret, errno ? strerror (errno) : "ERRNO-OK");
2769
2770 if (ret > 0)
2771 {
2772 if (debug_threads)
2773 {
2774 debug_printf ("LLW: waitpid %ld received %s\n",
2775 (long) ret, status_to_str (*wstatp));
2776 }
2777
2778 /* Filter all events. IOW, leave all events pending. We'll
2779 randomly select an event LWP out of all that have events
2780 below. */
2781 linux_low_filter_event (ret, *wstatp);
2782 /* Retry until nothing comes out of waitpid. A single
2783 SIGCHLD can indicate more than one child stopped. */
2784 continue;
2785 }
2786
2787 /* Now that we've pulled all events out of the kernel, resume
2788 LWPs that don't have an interesting event to report. */
2789 if (stopping_threads == NOT_STOPPING_THREADS)
2790 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2791
2792 /* ... and find an LWP with a status to report to the core, if
2793 any. */
2794 event_thread = (struct thread_info *)
2795 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2796 if (event_thread != NULL)
2797 {
2798 event_child = get_thread_lwp (event_thread);
2799 *wstatp = event_child->status_pending;
2800 event_child->status_pending_p = 0;
2801 event_child->status_pending = 0;
2802 break;
2803 }
2804
2805 /* Check for zombie thread group leaders. Those can't be reaped
2806 until all other threads in the thread group are. */
2807 check_zombie_leaders ();
2808
2809 /* If there are no resumed children left in the set of LWPs we
2810 want to wait for, bail. We can't just block in
2811 waitpid/sigsuspend, because lwps might have been left stopped
2812 in trace-stop state, and we'd be stuck forever waiting for
2813 their status to change (which would only happen if we resumed
2814 them). Even if WNOHANG is set, this return code is preferred
2815 over 0 (below), as it is more detailed. */
2816 if ((find_inferior (&all_threads,
2817 not_stopped_callback,
2818 &wait_ptid) == NULL))
2819 {
2820 if (debug_threads)
2821 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2822 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2823 return -1;
2824 }
2825
2826 /* No interesting event to report to the caller. */
2827 if ((options & WNOHANG))
2828 {
2829 if (debug_threads)
2830 debug_printf ("WNOHANG set, no event found\n");
2831
2832 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2833 return 0;
2834 }
2835
2836 /* Block until we get an event reported with SIGCHLD. */
2837 if (debug_threads)
2838 debug_printf ("sigsuspend'ing\n");
2839
2840 sigsuspend (&prev_mask);
2841 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2842 goto retry;
2843 }
2844
2845 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2846
2847 current_thread = event_thread;
2848
2849 return lwpid_of (event_thread);
2850 }
2851
2852 /* Wait for an event from child(ren) PTID. PTIDs can be:
2853 minus_one_ptid, to specify any child; a pid PTID, specifying all
2854 lwps of a thread group; or a PTID representing a single lwp. Store
2855 the stop status through the status pointer WSTAT. OPTIONS is
2856 passed to the waitpid call. Return 0 if no event was found and
2857 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2858 was found. Return the PID of the stopped child otherwise. */
2859
2860 static int
2861 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2862 {
2863 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2864 }
2865
2866 /* Count the LWP's that have had events. */
2867
2868 static int
2869 count_events_callback (struct inferior_list_entry *entry, void *data)
2870 {
2871 struct thread_info *thread = (struct thread_info *) entry;
2872 struct lwp_info *lp = get_thread_lwp (thread);
2873 int *count = (int *) data;
2874
2875 gdb_assert (count != NULL);
2876
2877 /* Count only resumed LWPs that have an event pending. */
2878 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2879 && lp->status_pending_p)
2880 (*count)++;
2881
2882 return 0;
2883 }
2884
2885 /* Select the LWP (if any) that is currently being single-stepped. */
2886
2887 static int
2888 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2889 {
2890 struct thread_info *thread = (struct thread_info *) entry;
2891 struct lwp_info *lp = get_thread_lwp (thread);
2892
2893 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2894 && thread->last_resume_kind == resume_step
2895 && lp->status_pending_p)
2896 return 1;
2897 else
2898 return 0;
2899 }
2900
2901 /* Select the Nth LWP that has had an event. */
2902
2903 static int
2904 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2905 {
2906 struct thread_info *thread = (struct thread_info *) entry;
2907 struct lwp_info *lp = get_thread_lwp (thread);
2908 int *selector = (int *) data;
2909
2910 gdb_assert (selector != NULL);
2911
2912 /* Select only resumed LWPs that have an event pending. */
2913 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2914 && lp->status_pending_p)
2915 if ((*selector)-- == 0)
2916 return 1;
2917
2918 return 0;
2919 }
2920
2921 /* Select one LWP out of those that have events pending. */
2922
2923 static void
2924 select_event_lwp (struct lwp_info **orig_lp)
2925 {
2926 int num_events = 0;
2927 int random_selector;
2928 struct thread_info *event_thread = NULL;
2929
2930 /* In all-stop, give preference to the LWP that is being
2931 single-stepped. There will be at most one, and it's the LWP that
2932 the core is most interested in. If we didn't do this, then we'd
2933 have to handle pending step SIGTRAPs somehow in case the core
2934 later continues the previously-stepped thread, otherwise we'd
2935 report the pending SIGTRAP, and the core, not having stepped the
2936 thread, wouldn't understand what the trap was for, and therefore
2937 would report it to the user as a random signal. */
2938 if (!non_stop)
2939 {
2940 event_thread
2941 = (struct thread_info *) find_inferior (&all_threads,
2942 select_singlestep_lwp_callback,
2943 NULL);
2944 if (event_thread != NULL)
2945 {
2946 if (debug_threads)
2947 debug_printf ("SEL: Select single-step %s\n",
2948 target_pid_to_str (ptid_of (event_thread)));
2949 }
2950 }
2951 if (event_thread == NULL)
2952 {
2953 /* No single-stepping LWP. Select one at random, out of those
2954 which have had events. */
2955
2956 /* First see how many events we have. */
2957 find_inferior (&all_threads, count_events_callback, &num_events);
2958 gdb_assert (num_events > 0);
2959
2960 /* Now randomly pick a LWP out of those that have had
2961 events. */
2962 random_selector = (int)
2963 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2964
2965 if (debug_threads && num_events > 1)
2966 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2967 num_events, random_selector);
2968
2969 event_thread
2970 = (struct thread_info *) find_inferior (&all_threads,
2971 select_event_lwp_callback,
2972 &random_selector);
2973 }
2974
2975 if (event_thread != NULL)
2976 {
2977 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2978
2979 /* Switch the event LWP. */
2980 *orig_lp = event_lp;
2981 }
2982 }
2983
2984 /* Decrement the suspend count of an LWP. */
2985
2986 static int
2987 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2988 {
2989 struct thread_info *thread = (struct thread_info *) entry;
2990 struct lwp_info *lwp = get_thread_lwp (thread);
2991
2992 /* Ignore EXCEPT. */
2993 if (lwp == except)
2994 return 0;
2995
2996 lwp_suspended_decr (lwp);
2997 return 0;
2998 }
2999
3000 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
3001 NULL. */
3002
3003 static void
3004 unsuspend_all_lwps (struct lwp_info *except)
3005 {
3006 find_inferior (&all_threads, unsuspend_one_lwp, except);
3007 }
3008
3009 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
3010 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
3011 void *data);
3012 static int lwp_running (struct inferior_list_entry *entry, void *data);
3013 static ptid_t linux_wait_1 (ptid_t ptid,
3014 struct target_waitstatus *ourstatus,
3015 int target_options);
3016
3017 /* Stabilize threads (move out of jump pads).
3018
3019 If a thread is midway collecting a fast tracepoint, we need to
3020 finish the collection and move it out of the jump pad before
3021 reporting the signal.
3022
3023 This avoids recursion while collecting (when a signal arrives
3024 midway, and the signal handler itself collects), which would trash
3025 the trace buffer. In case the user set a breakpoint in a signal
3026 handler, this avoids the backtrace showing the jump pad, etc..
3027 Most importantly, there are certain things we can't do safely if
3028 threads are stopped in a jump pad (or in its callee's). For
3029 example:
3030
3031 - starting a new trace run. A thread still collecting the
3032 previous run, could trash the trace buffer when resumed. The trace
3033 buffer control structures would have been reset but the thread had
3034 no way to tell. The thread could even midway memcpy'ing to the
3035 buffer, which would mean that when resumed, it would clobber the
3036 trace buffer that had been set for a new run.
3037
3038 - we can't rewrite/reuse the jump pads for new tracepoints
3039 safely. Say you do tstart while a thread is stopped midway while
3040 collecting. When the thread is later resumed, it finishes the
3041 collection, and returns to the jump pad, to execute the original
3042 instruction that was under the tracepoint jump at the time the
3043 older run had been started. If the jump pad had been rewritten
3044 since for something else in the new run, the thread would now
3045 execute the wrong / random instructions. */
3046
3047 static void
3048 linux_stabilize_threads (void)
3049 {
3050 struct thread_info *saved_thread;
3051 struct thread_info *thread_stuck;
3052
3053 thread_stuck
3054 = (struct thread_info *) find_inferior (&all_threads,
3055 stuck_in_jump_pad_callback,
3056 NULL);
3057 if (thread_stuck != NULL)
3058 {
3059 if (debug_threads)
3060 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
3061 lwpid_of (thread_stuck));
3062 return;
3063 }
3064
3065 saved_thread = current_thread;
3066
3067 stabilizing_threads = 1;
3068
3069 /* Kick 'em all. */
3070 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
3071
3072 /* Loop until all are stopped out of the jump pads. */
3073 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
3074 {
3075 struct target_waitstatus ourstatus;
3076 struct lwp_info *lwp;
3077 int wstat;
3078
3079 /* Note that we go through the full wait even loop. While
3080 moving threads out of jump pad, we need to be able to step
3081 over internal breakpoints and such. */
3082 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
3083
3084 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
3085 {
3086 lwp = get_thread_lwp (current_thread);
3087
3088 /* Lock it. */
3089 lwp_suspended_inc (lwp);
3090
3091 if (ourstatus.value.sig != GDB_SIGNAL_0
3092 || current_thread->last_resume_kind == resume_stop)
3093 {
3094 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3095 enqueue_one_deferred_signal (lwp, &wstat);
3096 }
3097 }
3098 }
3099
3100 unsuspend_all_lwps (NULL);
3101
3102 stabilizing_threads = 0;
3103
3104 current_thread = saved_thread;
3105
3106 if (debug_threads)
3107 {
3108 thread_stuck
3109 = (struct thread_info *) find_inferior (&all_threads,
3110 stuck_in_jump_pad_callback,
3111 NULL);
3112 if (thread_stuck != NULL)
3113 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3114 lwpid_of (thread_stuck));
3115 }
3116 }
3117
3118 /* Convenience function that is called when the kernel reports an
3119 event that is not passed out to GDB. */
3120
3121 static ptid_t
3122 ignore_event (struct target_waitstatus *ourstatus)
3123 {
3124 /* If we got an event, there may still be others, as a single
3125 SIGCHLD can indicate more than one child stopped. This forces
3126 another target_wait call. */
3127 async_file_mark ();
3128
3129 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3130 return null_ptid;
3131 }
3132
3133 /* Convenience function that is called when the kernel reports an exit
3134 event. This decides whether to report the event to GDB as a
3135 process exit event, a thread exit event, or to suppress the
3136 event. */
3137
3138 static ptid_t
3139 filter_exit_event (struct lwp_info *event_child,
3140 struct target_waitstatus *ourstatus)
3141 {
3142 struct thread_info *thread = get_lwp_thread (event_child);
3143 ptid_t ptid = ptid_of (thread);
3144
3145 if (!last_thread_of_process_p (pid_of (thread)))
3146 {
3147 if (report_thread_events)
3148 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3149 else
3150 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3151
3152 delete_lwp (event_child);
3153 }
3154 return ptid;
3155 }
3156
3157 /* Returns 1 if GDB is interested in any event_child syscalls. */
3158
3159 static int
3160 gdb_catching_syscalls_p (struct lwp_info *event_child)
3161 {
3162 struct thread_info *thread = get_lwp_thread (event_child);
3163 struct process_info *proc = get_thread_process (thread);
3164
3165 return !VEC_empty (int, proc->syscalls_to_catch);
3166 }
3167
3168 /* Returns 1 if GDB is interested in the event_child syscall.
3169 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3170
3171 static int
3172 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3173 {
3174 int i, iter;
3175 int sysno;
3176 struct thread_info *thread = get_lwp_thread (event_child);
3177 struct process_info *proc = get_thread_process (thread);
3178
3179 if (VEC_empty (int, proc->syscalls_to_catch))
3180 return 0;
3181
3182 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3183 return 1;
3184
3185 get_syscall_trapinfo (event_child, &sysno);
3186 for (i = 0;
3187 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3188 i++)
3189 if (iter == sysno)
3190 return 1;
3191
3192 return 0;
3193 }
3194
3195 /* Wait for process, returns status. */
3196
3197 static ptid_t
3198 linux_wait_1 (ptid_t ptid,
3199 struct target_waitstatus *ourstatus, int target_options)
3200 {
3201 int w;
3202 struct lwp_info *event_child;
3203 int options;
3204 int pid;
3205 int step_over_finished;
3206 int bp_explains_trap;
3207 int maybe_internal_trap;
3208 int report_to_gdb;
3209 int trace_event;
3210 int in_step_range;
3211 int any_resumed;
3212
3213 if (debug_threads)
3214 {
3215 debug_enter ();
3216 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3217 }
3218
3219 /* Translate generic target options into linux options. */
3220 options = __WALL;
3221 if (target_options & TARGET_WNOHANG)
3222 options |= WNOHANG;
3223
3224 bp_explains_trap = 0;
3225 trace_event = 0;
3226 in_step_range = 0;
3227 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3228
3229 /* Find a resumed LWP, if any. */
3230 if (find_inferior (&all_threads,
3231 status_pending_p_callback,
3232 &minus_one_ptid) != NULL)
3233 any_resumed = 1;
3234 else if ((find_inferior (&all_threads,
3235 not_stopped_callback,
3236 &minus_one_ptid) != NULL))
3237 any_resumed = 1;
3238 else
3239 any_resumed = 0;
3240
3241 if (ptid_equal (step_over_bkpt, null_ptid))
3242 pid = linux_wait_for_event (ptid, &w, options);
3243 else
3244 {
3245 if (debug_threads)
3246 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3247 target_pid_to_str (step_over_bkpt));
3248 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3249 }
3250
3251 if (pid == 0 || (pid == -1 && !any_resumed))
3252 {
3253 gdb_assert (target_options & TARGET_WNOHANG);
3254
3255 if (debug_threads)
3256 {
3257 debug_printf ("linux_wait_1 ret = null_ptid, "
3258 "TARGET_WAITKIND_IGNORE\n");
3259 debug_exit ();
3260 }
3261
3262 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3263 return null_ptid;
3264 }
3265 else if (pid == -1)
3266 {
3267 if (debug_threads)
3268 {
3269 debug_printf ("linux_wait_1 ret = null_ptid, "
3270 "TARGET_WAITKIND_NO_RESUMED\n");
3271 debug_exit ();
3272 }
3273
3274 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3275 return null_ptid;
3276 }
3277
3278 event_child = get_thread_lwp (current_thread);
3279
3280 /* linux_wait_for_event only returns an exit status for the last
3281 child of a process. Report it. */
3282 if (WIFEXITED (w) || WIFSIGNALED (w))
3283 {
3284 if (WIFEXITED (w))
3285 {
3286 ourstatus->kind = TARGET_WAITKIND_EXITED;
3287 ourstatus->value.integer = WEXITSTATUS (w);
3288
3289 if (debug_threads)
3290 {
3291 debug_printf ("linux_wait_1 ret = %s, exited with "
3292 "retcode %d\n",
3293 target_pid_to_str (ptid_of (current_thread)),
3294 WEXITSTATUS (w));
3295 debug_exit ();
3296 }
3297 }
3298 else
3299 {
3300 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3301 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3302
3303 if (debug_threads)
3304 {
3305 debug_printf ("linux_wait_1 ret = %s, terminated with "
3306 "signal %d\n",
3307 target_pid_to_str (ptid_of (current_thread)),
3308 WTERMSIG (w));
3309 debug_exit ();
3310 }
3311 }
3312
3313 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3314 return filter_exit_event (event_child, ourstatus);
3315
3316 return ptid_of (current_thread);
3317 }
3318
3319 /* If step-over executes a breakpoint instruction, in the case of a
3320 hardware single step it means a gdb/gdbserver breakpoint had been
3321 planted on top of a permanent breakpoint, in the case of a software
3322 single step it may just mean that gdbserver hit the reinsert breakpoint.
3323 The PC has been adjusted by save_stop_reason to point at
3324 the breakpoint address.
3325 So in the case of the hardware single step advance the PC manually
3326 past the breakpoint and in the case of software single step advance only
3327 if it's not the reinsert_breakpoint we are hitting.
3328 This avoids that a program would keep trapping a permanent breakpoint
3329 forever. */
3330 if (!ptid_equal (step_over_bkpt, null_ptid)
3331 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3332 && (event_child->stepping
3333 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
3334 {
3335 int increment_pc = 0;
3336 int breakpoint_kind = 0;
3337 CORE_ADDR stop_pc = event_child->stop_pc;
3338
3339 breakpoint_kind =
3340 the_target->breakpoint_kind_from_current_state (&stop_pc);
3341 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3342
3343 if (debug_threads)
3344 {
3345 debug_printf ("step-over for %s executed software breakpoint\n",
3346 target_pid_to_str (ptid_of (current_thread)));
3347 }
3348
3349 if (increment_pc != 0)
3350 {
3351 struct regcache *regcache
3352 = get_thread_regcache (current_thread, 1);
3353
3354 event_child->stop_pc += increment_pc;
3355 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3356
3357 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3358 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3359 }
3360 }
3361
3362 /* If this event was not handled before, and is not a SIGTRAP, we
3363 report it. SIGILL and SIGSEGV are also treated as traps in case
3364 a breakpoint is inserted at the current PC. If this target does
3365 not support internal breakpoints at all, we also report the
3366 SIGTRAP without further processing; it's of no concern to us. */
3367 maybe_internal_trap
3368 = (supports_breakpoints ()
3369 && (WSTOPSIG (w) == SIGTRAP
3370 || ((WSTOPSIG (w) == SIGILL
3371 || WSTOPSIG (w) == SIGSEGV)
3372 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3373
3374 if (maybe_internal_trap)
3375 {
3376 /* Handle anything that requires bookkeeping before deciding to
3377 report the event or continue waiting. */
3378
3379 /* First check if we can explain the SIGTRAP with an internal
3380 breakpoint, or if we should possibly report the event to GDB.
3381 Do this before anything that may remove or insert a
3382 breakpoint. */
3383 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3384
3385 /* We have a SIGTRAP, possibly a step-over dance has just
3386 finished. If so, tweak the state machine accordingly,
3387 reinsert breakpoints and delete any reinsert (software
3388 single-step) breakpoints. */
3389 step_over_finished = finish_step_over (event_child);
3390
3391 /* Now invoke the callbacks of any internal breakpoints there. */
3392 check_breakpoints (event_child->stop_pc);
3393
3394 /* Handle tracepoint data collecting. This may overflow the
3395 trace buffer, and cause a tracing stop, removing
3396 breakpoints. */
3397 trace_event = handle_tracepoints (event_child);
3398
3399 if (bp_explains_trap)
3400 {
3401 if (debug_threads)
3402 debug_printf ("Hit a gdbserver breakpoint.\n");
3403 }
3404 }
3405 else
3406 {
3407 /* We have some other signal, possibly a step-over dance was in
3408 progress, and it should be cancelled too. */
3409 step_over_finished = finish_step_over (event_child);
3410 }
3411
3412 /* We have all the data we need. Either report the event to GDB, or
3413 resume threads and keep waiting for more. */
3414
3415 /* If we're collecting a fast tracepoint, finish the collection and
3416 move out of the jump pad before delivering a signal. See
3417 linux_stabilize_threads. */
3418
3419 if (WIFSTOPPED (w)
3420 && WSTOPSIG (w) != SIGTRAP
3421 && supports_fast_tracepoints ()
3422 && agent_loaded_p ())
3423 {
3424 if (debug_threads)
3425 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3426 "to defer or adjust it.\n",
3427 WSTOPSIG (w), lwpid_of (current_thread));
3428
3429 /* Allow debugging the jump pad itself. */
3430 if (current_thread->last_resume_kind != resume_step
3431 && maybe_move_out_of_jump_pad (event_child, &w))
3432 {
3433 enqueue_one_deferred_signal (event_child, &w);
3434
3435 if (debug_threads)
3436 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3437 WSTOPSIG (w), lwpid_of (current_thread));
3438
3439 linux_resume_one_lwp (event_child, 0, 0, NULL);
3440
3441 return ignore_event (ourstatus);
3442 }
3443 }
3444
3445 if (event_child->collecting_fast_tracepoint)
3446 {
3447 if (debug_threads)
3448 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3449 "Check if we're already there.\n",
3450 lwpid_of (current_thread),
3451 event_child->collecting_fast_tracepoint);
3452
3453 trace_event = 1;
3454
3455 event_child->collecting_fast_tracepoint
3456 = linux_fast_tracepoint_collecting (event_child, NULL);
3457
3458 if (event_child->collecting_fast_tracepoint != 1)
3459 {
3460 /* No longer need this breakpoint. */
3461 if (event_child->exit_jump_pad_bkpt != NULL)
3462 {
3463 if (debug_threads)
3464 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3465 "stopping all threads momentarily.\n");
3466
3467 /* Other running threads could hit this breakpoint.
3468 We don't handle moribund locations like GDB does,
3469 instead we always pause all threads when removing
3470 breakpoints, so that any step-over or
3471 decr_pc_after_break adjustment is always taken
3472 care of while the breakpoint is still
3473 inserted. */
3474 stop_all_lwps (1, event_child);
3475
3476 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3477 event_child->exit_jump_pad_bkpt = NULL;
3478
3479 unstop_all_lwps (1, event_child);
3480
3481 gdb_assert (event_child->suspended >= 0);
3482 }
3483 }
3484
3485 if (event_child->collecting_fast_tracepoint == 0)
3486 {
3487 if (debug_threads)
3488 debug_printf ("fast tracepoint finished "
3489 "collecting successfully.\n");
3490
3491 /* We may have a deferred signal to report. */
3492 if (dequeue_one_deferred_signal (event_child, &w))
3493 {
3494 if (debug_threads)
3495 debug_printf ("dequeued one signal.\n");
3496 }
3497 else
3498 {
3499 if (debug_threads)
3500 debug_printf ("no deferred signals.\n");
3501
3502 if (stabilizing_threads)
3503 {
3504 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3505 ourstatus->value.sig = GDB_SIGNAL_0;
3506
3507 if (debug_threads)
3508 {
3509 debug_printf ("linux_wait_1 ret = %s, stopped "
3510 "while stabilizing threads\n",
3511 target_pid_to_str (ptid_of (current_thread)));
3512 debug_exit ();
3513 }
3514
3515 return ptid_of (current_thread);
3516 }
3517 }
3518 }
3519 }
3520
3521 /* Check whether GDB would be interested in this event. */
3522
3523 /* Check if GDB is interested in this syscall. */
3524 if (WIFSTOPPED (w)
3525 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3526 && !gdb_catch_this_syscall_p (event_child))
3527 {
3528 if (debug_threads)
3529 {
3530 debug_printf ("Ignored syscall for LWP %ld.\n",
3531 lwpid_of (current_thread));
3532 }
3533
3534 linux_resume_one_lwp (event_child, event_child->stepping,
3535 0, NULL);
3536 return ignore_event (ourstatus);
3537 }
3538
3539 /* If GDB is not interested in this signal, don't stop other
3540 threads, and don't report it to GDB. Just resume the inferior
3541 right away. We do this for threading-related signals as well as
3542 any that GDB specifically requested we ignore. But never ignore
3543 SIGSTOP if we sent it ourselves, and do not ignore signals when
3544 stepping - they may require special handling to skip the signal
3545 handler. Also never ignore signals that could be caused by a
3546 breakpoint. */
3547 if (WIFSTOPPED (w)
3548 && current_thread->last_resume_kind != resume_step
3549 && (
3550 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3551 (current_process ()->priv->thread_db != NULL
3552 && (WSTOPSIG (w) == __SIGRTMIN
3553 || WSTOPSIG (w) == __SIGRTMIN + 1))
3554 ||
3555 #endif
3556 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3557 && !(WSTOPSIG (w) == SIGSTOP
3558 && current_thread->last_resume_kind == resume_stop)
3559 && !linux_wstatus_maybe_breakpoint (w))))
3560 {
3561 siginfo_t info, *info_p;
3562
3563 if (debug_threads)
3564 debug_printf ("Ignored signal %d for LWP %ld.\n",
3565 WSTOPSIG (w), lwpid_of (current_thread));
3566
3567 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3568 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3569 info_p = &info;
3570 else
3571 info_p = NULL;
3572
3573 if (step_over_finished)
3574 {
3575 /* We cancelled this thread's step-over above. We still
3576 need to unsuspend all other LWPs, and set them back
3577 running again while the signal handler runs. */
3578 unsuspend_all_lwps (event_child);
3579
3580 /* Enqueue the pending signal info so that proceed_all_lwps
3581 doesn't lose it. */
3582 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3583
3584 proceed_all_lwps ();
3585 }
3586 else
3587 {
3588 linux_resume_one_lwp (event_child, event_child->stepping,
3589 WSTOPSIG (w), info_p);
3590 }
3591 return ignore_event (ourstatus);
3592 }
3593
3594 /* Note that all addresses are always "out of the step range" when
3595 there's no range to begin with. */
3596 in_step_range = lwp_in_step_range (event_child);
3597
3598 /* If GDB wanted this thread to single step, and the thread is out
3599 of the step range, we always want to report the SIGTRAP, and let
3600 GDB handle it. Watchpoints should always be reported. So should
3601 signals we can't explain. A SIGTRAP we can't explain could be a
3602 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3603 do, we're be able to handle GDB breakpoints on top of internal
3604 breakpoints, by handling the internal breakpoint and still
3605 reporting the event to GDB. If we don't, we're out of luck, GDB
3606 won't see the breakpoint hit. If we see a single-step event but
3607 the thread should be continuing, don't pass the trap to gdb.
3608 That indicates that we had previously finished a single-step but
3609 left the single-step pending -- see
3610 complete_ongoing_step_over. */
3611 report_to_gdb = (!maybe_internal_trap
3612 || (current_thread->last_resume_kind == resume_step
3613 && !in_step_range)
3614 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3615 || (!in_step_range
3616 && !bp_explains_trap
3617 && !trace_event
3618 && !step_over_finished
3619 && !(current_thread->last_resume_kind == resume_continue
3620 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3621 || (gdb_breakpoint_here (event_child->stop_pc)
3622 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3623 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3624 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3625
3626 run_breakpoint_commands (event_child->stop_pc);
3627
3628 /* We found no reason GDB would want us to stop. We either hit one
3629 of our own breakpoints, or finished an internal step GDB
3630 shouldn't know about. */
3631 if (!report_to_gdb)
3632 {
3633 if (debug_threads)
3634 {
3635 if (bp_explains_trap)
3636 debug_printf ("Hit a gdbserver breakpoint.\n");
3637 if (step_over_finished)
3638 debug_printf ("Step-over finished.\n");
3639 if (trace_event)
3640 debug_printf ("Tracepoint event.\n");
3641 if (lwp_in_step_range (event_child))
3642 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3643 paddress (event_child->stop_pc),
3644 paddress (event_child->step_range_start),
3645 paddress (event_child->step_range_end));
3646 }
3647
3648 /* We're not reporting this breakpoint to GDB, so apply the
3649 decr_pc_after_break adjustment to the inferior's regcache
3650 ourselves. */
3651
3652 if (the_low_target.set_pc != NULL)
3653 {
3654 struct regcache *regcache
3655 = get_thread_regcache (current_thread, 1);
3656 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3657 }
3658
3659 /* We may have finished stepping over a breakpoint. If so,
3660 we've stopped and suspended all LWPs momentarily except the
3661 stepping one. This is where we resume them all again. We're
3662 going to keep waiting, so use proceed, which handles stepping
3663 over the next breakpoint. */
3664 if (debug_threads)
3665 debug_printf ("proceeding all threads.\n");
3666
3667 if (step_over_finished)
3668 unsuspend_all_lwps (event_child);
3669
3670 proceed_all_lwps ();
3671 return ignore_event (ourstatus);
3672 }
3673
3674 if (debug_threads)
3675 {
3676 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3677 {
3678 char *str;
3679
3680 str = target_waitstatus_to_string (&event_child->waitstatus);
3681 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3682 lwpid_of (get_lwp_thread (event_child)), str);
3683 xfree (str);
3684 }
3685 if (current_thread->last_resume_kind == resume_step)
3686 {
3687 if (event_child->step_range_start == event_child->step_range_end)
3688 debug_printf ("GDB wanted to single-step, reporting event.\n");
3689 else if (!lwp_in_step_range (event_child))
3690 debug_printf ("Out of step range, reporting event.\n");
3691 }
3692 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3693 debug_printf ("Stopped by watchpoint.\n");
3694 else if (gdb_breakpoint_here (event_child->stop_pc))
3695 debug_printf ("Stopped by GDB breakpoint.\n");
3696 if (debug_threads)
3697 debug_printf ("Hit a non-gdbserver trap event.\n");
3698 }
3699
3700 /* Alright, we're going to report a stop. */
3701
3702 if (!stabilizing_threads)
3703 {
3704 /* In all-stop, stop all threads. */
3705 if (!non_stop)
3706 stop_all_lwps (0, NULL);
3707
3708 /* If we're not waiting for a specific LWP, choose an event LWP
3709 from among those that have had events. Giving equal priority
3710 to all LWPs that have had events helps prevent
3711 starvation. */
3712 if (ptid_equal (ptid, minus_one_ptid))
3713 {
3714 event_child->status_pending_p = 1;
3715 event_child->status_pending = w;
3716
3717 select_event_lwp (&event_child);
3718
3719 /* current_thread and event_child must stay in sync. */
3720 current_thread = get_lwp_thread (event_child);
3721
3722 event_child->status_pending_p = 0;
3723 w = event_child->status_pending;
3724 }
3725
3726 if (step_over_finished)
3727 {
3728 if (!non_stop)
3729 {
3730 /* If we were doing a step-over, all other threads but
3731 the stepping one had been paused in start_step_over,
3732 with their suspend counts incremented. We don't want
3733 to do a full unstop/unpause, because we're in
3734 all-stop mode (so we want threads stopped), but we
3735 still need to unsuspend the other threads, to
3736 decrement their `suspended' count back. */
3737 unsuspend_all_lwps (event_child);
3738 }
3739 else
3740 {
3741 /* If we just finished a step-over, then all threads had
3742 been momentarily paused. In all-stop, that's fine,
3743 we want threads stopped by now anyway. In non-stop,
3744 we need to re-resume threads that GDB wanted to be
3745 running. */
3746 unstop_all_lwps (1, event_child);
3747 }
3748 }
3749
3750 /* Stabilize threads (move out of jump pads). */
3751 if (!non_stop)
3752 stabilize_threads ();
3753 }
3754 else
3755 {
3756 /* If we just finished a step-over, then all threads had been
3757 momentarily paused. In all-stop, that's fine, we want
3758 threads stopped by now anyway. In non-stop, we need to
3759 re-resume threads that GDB wanted to be running. */
3760 if (step_over_finished)
3761 unstop_all_lwps (1, event_child);
3762 }
3763
3764 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3765 {
3766 /* If the reported event is an exit, fork, vfork or exec, let
3767 GDB know. */
3768 *ourstatus = event_child->waitstatus;
3769 /* Clear the event lwp's waitstatus since we handled it already. */
3770 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3771 }
3772 else
3773 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3774
3775 /* Now that we've selected our final event LWP, un-adjust its PC if
3776 it was a software breakpoint, and the client doesn't know we can
3777 adjust the breakpoint ourselves. */
3778 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3779 && !swbreak_feature)
3780 {
3781 int decr_pc = the_low_target.decr_pc_after_break;
3782
3783 if (decr_pc != 0)
3784 {
3785 struct regcache *regcache
3786 = get_thread_regcache (current_thread, 1);
3787 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3788 }
3789 }
3790
3791 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3792 {
3793 get_syscall_trapinfo (event_child,
3794 &ourstatus->value.syscall_number);
3795 ourstatus->kind = event_child->syscall_state;
3796 }
3797 else if (current_thread->last_resume_kind == resume_stop
3798 && WSTOPSIG (w) == SIGSTOP)
3799 {
3800 /* A thread that has been requested to stop by GDB with vCont;t,
3801 and it stopped cleanly, so report as SIG0. The use of
3802 SIGSTOP is an implementation detail. */
3803 ourstatus->value.sig = GDB_SIGNAL_0;
3804 }
3805 else if (current_thread->last_resume_kind == resume_stop
3806 && WSTOPSIG (w) != SIGSTOP)
3807 {
3808 /* A thread that has been requested to stop by GDB with vCont;t,
3809 but, it stopped for other reasons. */
3810 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3811 }
3812 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3813 {
3814 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3815 }
3816
3817 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3818
3819 if (debug_threads)
3820 {
3821 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3822 target_pid_to_str (ptid_of (current_thread)),
3823 ourstatus->kind, ourstatus->value.sig);
3824 debug_exit ();
3825 }
3826
3827 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3828 return filter_exit_event (event_child, ourstatus);
3829
3830 return ptid_of (current_thread);
3831 }
3832
3833 /* Get rid of any pending event in the pipe. */
3834 static void
3835 async_file_flush (void)
3836 {
3837 int ret;
3838 char buf;
3839
3840 do
3841 ret = read (linux_event_pipe[0], &buf, 1);
3842 while (ret >= 0 || (ret == -1 && errno == EINTR));
3843 }
3844
3845 /* Put something in the pipe, so the event loop wakes up. */
3846 static void
3847 async_file_mark (void)
3848 {
3849 int ret;
3850
3851 async_file_flush ();
3852
3853 do
3854 ret = write (linux_event_pipe[1], "+", 1);
3855 while (ret == 0 || (ret == -1 && errno == EINTR));
3856
3857 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3858 be awakened anyway. */
3859 }
3860
3861 static ptid_t
3862 linux_wait (ptid_t ptid,
3863 struct target_waitstatus *ourstatus, int target_options)
3864 {
3865 ptid_t event_ptid;
3866
3867 /* Flush the async file first. */
3868 if (target_is_async_p ())
3869 async_file_flush ();
3870
3871 do
3872 {
3873 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3874 }
3875 while ((target_options & TARGET_WNOHANG) == 0
3876 && ptid_equal (event_ptid, null_ptid)
3877 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3878
3879 /* If at least one stop was reported, there may be more. A single
3880 SIGCHLD can signal more than one child stop. */
3881 if (target_is_async_p ()
3882 && (target_options & TARGET_WNOHANG) != 0
3883 && !ptid_equal (event_ptid, null_ptid))
3884 async_file_mark ();
3885
3886 return event_ptid;
3887 }
3888
3889 /* Send a signal to an LWP. */
3890
3891 static int
3892 kill_lwp (unsigned long lwpid, int signo)
3893 {
3894 int ret;
3895
3896 errno = 0;
3897 ret = syscall (__NR_tkill, lwpid, signo);
3898 if (errno == ENOSYS)
3899 {
3900 /* If tkill fails, then we are not using nptl threads, a
3901 configuration we no longer support. */
3902 perror_with_name (("tkill"));
3903 }
3904 return ret;
3905 }
3906
3907 void
3908 linux_stop_lwp (struct lwp_info *lwp)
3909 {
3910 send_sigstop (lwp);
3911 }
3912
3913 static void
3914 send_sigstop (struct lwp_info *lwp)
3915 {
3916 int pid;
3917
3918 pid = lwpid_of (get_lwp_thread (lwp));
3919
3920 /* If we already have a pending stop signal for this process, don't
3921 send another. */
3922 if (lwp->stop_expected)
3923 {
3924 if (debug_threads)
3925 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3926
3927 return;
3928 }
3929
3930 if (debug_threads)
3931 debug_printf ("Sending sigstop to lwp %d\n", pid);
3932
3933 lwp->stop_expected = 1;
3934 kill_lwp (pid, SIGSTOP);
3935 }
3936
3937 static int
3938 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3939 {
3940 struct thread_info *thread = (struct thread_info *) entry;
3941 struct lwp_info *lwp = get_thread_lwp (thread);
3942
3943 /* Ignore EXCEPT. */
3944 if (lwp == except)
3945 return 0;
3946
3947 if (lwp->stopped)
3948 return 0;
3949
3950 send_sigstop (lwp);
3951 return 0;
3952 }
3953
3954 /* Increment the suspend count of an LWP, and stop it, if not stopped
3955 yet. */
3956 static int
3957 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3958 void *except)
3959 {
3960 struct thread_info *thread = (struct thread_info *) entry;
3961 struct lwp_info *lwp = get_thread_lwp (thread);
3962
3963 /* Ignore EXCEPT. */
3964 if (lwp == except)
3965 return 0;
3966
3967 lwp_suspended_inc (lwp);
3968
3969 return send_sigstop_callback (entry, except);
3970 }
3971
3972 static void
3973 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3974 {
3975 /* Store the exit status for later. */
3976 lwp->status_pending_p = 1;
3977 lwp->status_pending = wstat;
3978
3979 /* Store in waitstatus as well, as there's nothing else to process
3980 for this event. */
3981 if (WIFEXITED (wstat))
3982 {
3983 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3984 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3985 }
3986 else if (WIFSIGNALED (wstat))
3987 {
3988 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3989 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3990 }
3991
3992 /* Prevent trying to stop it. */
3993 lwp->stopped = 1;
3994
3995 /* No further stops are expected from a dead lwp. */
3996 lwp->stop_expected = 0;
3997 }
3998
3999 /* Return true if LWP has exited already, and has a pending exit event
4000 to report to GDB. */
4001
4002 static int
4003 lwp_is_marked_dead (struct lwp_info *lwp)
4004 {
4005 return (lwp->status_pending_p
4006 && (WIFEXITED (lwp->status_pending)
4007 || WIFSIGNALED (lwp->status_pending)));
4008 }
4009
4010 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4011
4012 static void
4013 wait_for_sigstop (void)
4014 {
4015 struct thread_info *saved_thread;
4016 ptid_t saved_tid;
4017 int wstat;
4018 int ret;
4019
4020 saved_thread = current_thread;
4021 if (saved_thread != NULL)
4022 saved_tid = saved_thread->entry.id;
4023 else
4024 saved_tid = null_ptid; /* avoid bogus unused warning */
4025
4026 if (debug_threads)
4027 debug_printf ("wait_for_sigstop: pulling events\n");
4028
4029 /* Passing NULL_PTID as filter indicates we want all events to be
4030 left pending. Eventually this returns when there are no
4031 unwaited-for children left. */
4032 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4033 &wstat, __WALL);
4034 gdb_assert (ret == -1);
4035
4036 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4037 current_thread = saved_thread;
4038 else
4039 {
4040 if (debug_threads)
4041 debug_printf ("Previously current thread died.\n");
4042
4043 /* We can't change the current inferior behind GDB's back,
4044 otherwise, a subsequent command may apply to the wrong
4045 process. */
4046 current_thread = NULL;
4047 }
4048 }
4049
4050 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
4051 move it out, because we need to report the stop event to GDB. For
4052 example, if the user puts a breakpoint in the jump pad, it's
4053 because she wants to debug it. */
4054
4055 static int
4056 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
4057 {
4058 struct thread_info *thread = (struct thread_info *) entry;
4059 struct lwp_info *lwp = get_thread_lwp (thread);
4060
4061 if (lwp->suspended != 0)
4062 {
4063 internal_error (__FILE__, __LINE__,
4064 "LWP %ld is suspended, suspended=%d\n",
4065 lwpid_of (thread), lwp->suspended);
4066 }
4067 gdb_assert (lwp->stopped);
4068
4069 /* Allow debugging the jump pad, gdb_collect, etc.. */
4070 return (supports_fast_tracepoints ()
4071 && agent_loaded_p ()
4072 && (gdb_breakpoint_here (lwp->stop_pc)
4073 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4074 || thread->last_resume_kind == resume_step)
4075 && linux_fast_tracepoint_collecting (lwp, NULL));
4076 }
4077
4078 static void
4079 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
4080 {
4081 struct thread_info *thread = (struct thread_info *) entry;
4082 struct thread_info *saved_thread;
4083 struct lwp_info *lwp = get_thread_lwp (thread);
4084 int *wstat;
4085
4086 if (lwp->suspended != 0)
4087 {
4088 internal_error (__FILE__, __LINE__,
4089 "LWP %ld is suspended, suspended=%d\n",
4090 lwpid_of (thread), lwp->suspended);
4091 }
4092 gdb_assert (lwp->stopped);
4093
4094 /* For gdb_breakpoint_here. */
4095 saved_thread = current_thread;
4096 current_thread = thread;
4097
4098 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4099
4100 /* Allow debugging the jump pad, gdb_collect, etc. */
4101 if (!gdb_breakpoint_here (lwp->stop_pc)
4102 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4103 && thread->last_resume_kind != resume_step
4104 && maybe_move_out_of_jump_pad (lwp, wstat))
4105 {
4106 if (debug_threads)
4107 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4108 lwpid_of (thread));
4109
4110 if (wstat)
4111 {
4112 lwp->status_pending_p = 0;
4113 enqueue_one_deferred_signal (lwp, wstat);
4114
4115 if (debug_threads)
4116 debug_printf ("Signal %d for LWP %ld deferred "
4117 "(in jump pad)\n",
4118 WSTOPSIG (*wstat), lwpid_of (thread));
4119 }
4120
4121 linux_resume_one_lwp (lwp, 0, 0, NULL);
4122 }
4123 else
4124 lwp_suspended_inc (lwp);
4125
4126 current_thread = saved_thread;
4127 }
4128
4129 static int
4130 lwp_running (struct inferior_list_entry *entry, void *data)
4131 {
4132 struct thread_info *thread = (struct thread_info *) entry;
4133 struct lwp_info *lwp = get_thread_lwp (thread);
4134
4135 if (lwp_is_marked_dead (lwp))
4136 return 0;
4137 if (lwp->stopped)
4138 return 0;
4139 return 1;
4140 }
4141
4142 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4143 If SUSPEND, then also increase the suspend count of every LWP,
4144 except EXCEPT. */
4145
4146 static void
4147 stop_all_lwps (int suspend, struct lwp_info *except)
4148 {
4149 /* Should not be called recursively. */
4150 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4151
4152 if (debug_threads)
4153 {
4154 debug_enter ();
4155 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4156 suspend ? "stop-and-suspend" : "stop",
4157 except != NULL
4158 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4159 : "none");
4160 }
4161
4162 stopping_threads = (suspend
4163 ? STOPPING_AND_SUSPENDING_THREADS
4164 : STOPPING_THREADS);
4165
4166 if (suspend)
4167 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4168 else
4169 find_inferior (&all_threads, send_sigstop_callback, except);
4170 wait_for_sigstop ();
4171 stopping_threads = NOT_STOPPING_THREADS;
4172
4173 if (debug_threads)
4174 {
4175 debug_printf ("stop_all_lwps done, setting stopping_threads "
4176 "back to !stopping\n");
4177 debug_exit ();
4178 }
4179 }
4180
4181 /* Enqueue one signal in the chain of signals which need to be
4182 delivered to this process on next resume. */
4183
4184 static void
4185 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4186 {
4187 struct pending_signals *p_sig = XNEW (struct pending_signals);
4188
4189 p_sig->prev = lwp->pending_signals;
4190 p_sig->signal = signal;
4191 if (info == NULL)
4192 memset (&p_sig->info, 0, sizeof (siginfo_t));
4193 else
4194 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4195 lwp->pending_signals = p_sig;
4196 }
4197
4198 /* Install breakpoints for software single stepping. */
4199
4200 static void
4201 install_software_single_step_breakpoints (struct lwp_info *lwp)
4202 {
4203 int i;
4204 CORE_ADDR pc;
4205 struct thread_info *thread = get_lwp_thread (lwp);
4206 struct regcache *regcache = get_thread_regcache (thread, 1);
4207 VEC (CORE_ADDR) *next_pcs = NULL;
4208 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
4209
4210 make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
4211
4212 current_thread = thread;
4213 next_pcs = (*the_low_target.get_next_pcs) (regcache);
4214
4215 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
4216 set_reinsert_breakpoint (pc, current_ptid);
4217
4218 do_cleanups (old_chain);
4219 }
4220
4221 /* Single step via hardware or software single step.
4222 Return 1 if hardware single stepping, 0 if software single stepping
4223 or can't single step. */
4224
4225 static int
4226 single_step (struct lwp_info* lwp)
4227 {
4228 int step = 0;
4229
4230 if (can_hardware_single_step ())
4231 {
4232 step = 1;
4233 }
4234 else if (can_software_single_step ())
4235 {
4236 install_software_single_step_breakpoints (lwp);
4237 step = 0;
4238 }
4239 else
4240 {
4241 if (debug_threads)
4242 debug_printf ("stepping is not implemented on this target");
4243 }
4244
4245 return step;
4246 }
4247
4248 /* The signal can be delivered to the inferior if we are not trying to
4249 finish a fast tracepoint collect. Since signal can be delivered in
4250 the step-over, the program may go to signal handler and trap again
4251 after return from the signal handler. We can live with the spurious
4252 double traps. */
4253
4254 static int
4255 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4256 {
4257 return !lwp->collecting_fast_tracepoint;
4258 }
4259
4260 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4261 SIGNAL is nonzero, give it that signal. */
4262
4263 static void
4264 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4265 int step, int signal, siginfo_t *info)
4266 {
4267 struct thread_info *thread = get_lwp_thread (lwp);
4268 struct thread_info *saved_thread;
4269 int fast_tp_collecting;
4270 int ptrace_request;
4271 struct process_info *proc = get_thread_process (thread);
4272
4273 /* Note that target description may not be initialised
4274 (proc->tdesc == NULL) at this point because the program hasn't
4275 stopped at the first instruction yet. It means GDBserver skips
4276 the extra traps from the wrapper program (see option --wrapper).
4277 Code in this function that requires register access should be
4278 guarded by proc->tdesc == NULL or something else. */
4279
4280 if (lwp->stopped == 0)
4281 return;
4282
4283 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4284
4285 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4286
4287 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4288
4289 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4290 user used the "jump" command, or "set $pc = foo"). */
4291 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4292 {
4293 /* Collecting 'while-stepping' actions doesn't make sense
4294 anymore. */
4295 release_while_stepping_state_list (thread);
4296 }
4297
4298 /* If we have pending signals or status, and a new signal, enqueue the
4299 signal. Also enqueue the signal if it can't be delivered to the
4300 inferior right now. */
4301 if (signal != 0
4302 && (lwp->status_pending_p
4303 || lwp->pending_signals != NULL
4304 || !lwp_signal_can_be_delivered (lwp)))
4305 {
4306 enqueue_pending_signal (lwp, signal, info);
4307
4308 /* Postpone any pending signal. It was enqueued above. */
4309 signal = 0;
4310 }
4311
4312 if (lwp->status_pending_p)
4313 {
4314 if (debug_threads)
4315 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4316 " has pending status\n",
4317 lwpid_of (thread), step ? "step" : "continue",
4318 lwp->stop_expected ? "expected" : "not expected");
4319 return;
4320 }
4321
4322 saved_thread = current_thread;
4323 current_thread = thread;
4324
4325 /* This bit needs some thinking about. If we get a signal that
4326 we must report while a single-step reinsert is still pending,
4327 we often end up resuming the thread. It might be better to
4328 (ew) allow a stack of pending events; then we could be sure that
4329 the reinsert happened right away and not lose any signals.
4330
4331 Making this stack would also shrink the window in which breakpoints are
4332 uninserted (see comment in linux_wait_for_lwp) but not enough for
4333 complete correctness, so it won't solve that problem. It may be
4334 worthwhile just to solve this one, however. */
4335 if (lwp->bp_reinsert != 0)
4336 {
4337 if (debug_threads)
4338 debug_printf (" pending reinsert at 0x%s\n",
4339 paddress (lwp->bp_reinsert));
4340
4341 if (can_hardware_single_step ())
4342 {
4343 if (fast_tp_collecting == 0)
4344 {
4345 if (step == 0)
4346 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4347 if (lwp->suspended)
4348 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4349 lwp->suspended);
4350 }
4351 }
4352
4353 step = maybe_hw_step (thread);
4354 }
4355 else
4356 {
4357 /* If the thread isn't doing step-over, there shouldn't be any
4358 reinsert breakpoints. */
4359 gdb_assert (!has_reinsert_breakpoints (thread));
4360 }
4361
4362 if (fast_tp_collecting == 1)
4363 {
4364 if (debug_threads)
4365 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4366 " (exit-jump-pad-bkpt)\n",
4367 lwpid_of (thread));
4368 }
4369 else if (fast_tp_collecting == 2)
4370 {
4371 if (debug_threads)
4372 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4373 " single-stepping\n",
4374 lwpid_of (thread));
4375
4376 if (can_hardware_single_step ())
4377 step = 1;
4378 else
4379 {
4380 internal_error (__FILE__, __LINE__,
4381 "moving out of jump pad single-stepping"
4382 " not implemented on this target");
4383 }
4384 }
4385
4386 /* If we have while-stepping actions in this thread set it stepping.
4387 If we have a signal to deliver, it may or may not be set to
4388 SIG_IGN, we don't know. Assume so, and allow collecting
4389 while-stepping into a signal handler. A possible smart thing to
4390 do would be to set an internal breakpoint at the signal return
4391 address, continue, and carry on catching this while-stepping
4392 action only when that breakpoint is hit. A future
4393 enhancement. */
4394 if (thread->while_stepping != NULL)
4395 {
4396 if (debug_threads)
4397 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4398 lwpid_of (thread));
4399
4400 step = single_step (lwp);
4401 }
4402
4403 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4404 {
4405 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4406
4407 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4408
4409 if (debug_threads)
4410 {
4411 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4412 (long) lwp->stop_pc);
4413 }
4414 }
4415
4416 /* If we have pending signals, consume one if it can be delivered to
4417 the inferior. */
4418 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4419 {
4420 struct pending_signals **p_sig;
4421
4422 p_sig = &lwp->pending_signals;
4423 while ((*p_sig)->prev != NULL)
4424 p_sig = &(*p_sig)->prev;
4425
4426 signal = (*p_sig)->signal;
4427 if ((*p_sig)->info.si_signo != 0)
4428 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4429 &(*p_sig)->info);
4430
4431 free (*p_sig);
4432 *p_sig = NULL;
4433 }
4434
4435 if (debug_threads)
4436 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4437 lwpid_of (thread), step ? "step" : "continue", signal,
4438 lwp->stop_expected ? "expected" : "not expected");
4439
4440 if (the_low_target.prepare_to_resume != NULL)
4441 the_low_target.prepare_to_resume (lwp);
4442
4443 regcache_invalidate_thread (thread);
4444 errno = 0;
4445 lwp->stepping = step;
4446 if (step)
4447 ptrace_request = PTRACE_SINGLESTEP;
4448 else if (gdb_catching_syscalls_p (lwp))
4449 ptrace_request = PTRACE_SYSCALL;
4450 else
4451 ptrace_request = PTRACE_CONT;
4452 ptrace (ptrace_request,
4453 lwpid_of (thread),
4454 (PTRACE_TYPE_ARG3) 0,
4455 /* Coerce to a uintptr_t first to avoid potential gcc warning
4456 of coercing an 8 byte integer to a 4 byte pointer. */
4457 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4458
4459 current_thread = saved_thread;
4460 if (errno)
4461 perror_with_name ("resuming thread");
4462
4463 /* Successfully resumed. Clear state that no longer makes sense,
4464 and mark the LWP as running. Must not do this before resuming
4465 otherwise if that fails other code will be confused. E.g., we'd
4466 later try to stop the LWP and hang forever waiting for a stop
4467 status. Note that we must not throw after this is cleared,
4468 otherwise handle_zombie_lwp_error would get confused. */
4469 lwp->stopped = 0;
4470 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4471 }
4472
4473 /* Called when we try to resume a stopped LWP and that errors out. If
4474 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4475 or about to become), discard the error, clear any pending status
4476 the LWP may have, and return true (we'll collect the exit status
4477 soon enough). Otherwise, return false. */
4478
4479 static int
4480 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4481 {
4482 struct thread_info *thread = get_lwp_thread (lp);
4483
4484 /* If we get an error after resuming the LWP successfully, we'd
4485 confuse !T state for the LWP being gone. */
4486 gdb_assert (lp->stopped);
4487
4488 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4489 because even if ptrace failed with ESRCH, the tracee may be "not
4490 yet fully dead", but already refusing ptrace requests. In that
4491 case the tracee has 'R (Running)' state for a little bit
4492 (observed in Linux 3.18). See also the note on ESRCH in the
4493 ptrace(2) man page. Instead, check whether the LWP has any state
4494 other than ptrace-stopped. */
4495
4496 /* Don't assume anything if /proc/PID/status can't be read. */
4497 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4498 {
4499 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4500 lp->status_pending_p = 0;
4501 return 1;
4502 }
4503 return 0;
4504 }
4505
4506 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4507 disappears while we try to resume it. */
4508
4509 static void
4510 linux_resume_one_lwp (struct lwp_info *lwp,
4511 int step, int signal, siginfo_t *info)
4512 {
4513 TRY
4514 {
4515 linux_resume_one_lwp_throw (lwp, step, signal, info);
4516 }
4517 CATCH (ex, RETURN_MASK_ERROR)
4518 {
4519 if (!check_ptrace_stopped_lwp_gone (lwp))
4520 throw_exception (ex);
4521 }
4522 END_CATCH
4523 }
4524
4525 struct thread_resume_array
4526 {
4527 struct thread_resume *resume;
4528 size_t n;
4529 };
4530
4531 /* This function is called once per thread via find_inferior.
4532 ARG is a pointer to a thread_resume_array struct.
4533 We look up the thread specified by ENTRY in ARG, and mark the thread
4534 with a pointer to the appropriate resume request.
4535
4536 This algorithm is O(threads * resume elements), but resume elements
4537 is small (and will remain small at least until GDB supports thread
4538 suspension). */
4539
4540 static int
4541 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4542 {
4543 struct thread_info *thread = (struct thread_info *) entry;
4544 struct lwp_info *lwp = get_thread_lwp (thread);
4545 int ndx;
4546 struct thread_resume_array *r;
4547
4548 r = (struct thread_resume_array *) arg;
4549
4550 for (ndx = 0; ndx < r->n; ndx++)
4551 {
4552 ptid_t ptid = r->resume[ndx].thread;
4553 if (ptid_equal (ptid, minus_one_ptid)
4554 || ptid_equal (ptid, entry->id)
4555 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4556 of PID'. */
4557 || (ptid_get_pid (ptid) == pid_of (thread)
4558 && (ptid_is_pid (ptid)
4559 || ptid_get_lwp (ptid) == -1)))
4560 {
4561 if (r->resume[ndx].kind == resume_stop
4562 && thread->last_resume_kind == resume_stop)
4563 {
4564 if (debug_threads)
4565 debug_printf ("already %s LWP %ld at GDB's request\n",
4566 (thread->last_status.kind
4567 == TARGET_WAITKIND_STOPPED)
4568 ? "stopped"
4569 : "stopping",
4570 lwpid_of (thread));
4571
4572 continue;
4573 }
4574
4575 lwp->resume = &r->resume[ndx];
4576 thread->last_resume_kind = lwp->resume->kind;
4577
4578 lwp->step_range_start = lwp->resume->step_range_start;
4579 lwp->step_range_end = lwp->resume->step_range_end;
4580
4581 /* If we had a deferred signal to report, dequeue one now.
4582 This can happen if LWP gets more than one signal while
4583 trying to get out of a jump pad. */
4584 if (lwp->stopped
4585 && !lwp->status_pending_p
4586 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4587 {
4588 lwp->status_pending_p = 1;
4589
4590 if (debug_threads)
4591 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4592 "leaving status pending.\n",
4593 WSTOPSIG (lwp->status_pending),
4594 lwpid_of (thread));
4595 }
4596
4597 return 0;
4598 }
4599 }
4600
4601 /* No resume action for this thread. */
4602 lwp->resume = NULL;
4603
4604 return 0;
4605 }
4606
4607 /* find_inferior callback for linux_resume.
4608 Set *FLAG_P if this lwp has an interesting status pending. */
4609
4610 static int
4611 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4612 {
4613 struct thread_info *thread = (struct thread_info *) entry;
4614 struct lwp_info *lwp = get_thread_lwp (thread);
4615
4616 /* LWPs which will not be resumed are not interesting, because
4617 we might not wait for them next time through linux_wait. */
4618 if (lwp->resume == NULL)
4619 return 0;
4620
4621 if (thread_still_has_status_pending_p (thread))
4622 * (int *) flag_p = 1;
4623
4624 return 0;
4625 }
4626
4627 /* Return 1 if this lwp that GDB wants running is stopped at an
4628 internal breakpoint that we need to step over. It assumes that any
4629 required STOP_PC adjustment has already been propagated to the
4630 inferior's regcache. */
4631
4632 static int
4633 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4634 {
4635 struct thread_info *thread = (struct thread_info *) entry;
4636 struct lwp_info *lwp = get_thread_lwp (thread);
4637 struct thread_info *saved_thread;
4638 CORE_ADDR pc;
4639 struct process_info *proc = get_thread_process (thread);
4640
4641 /* GDBserver is skipping the extra traps from the wrapper program,
4642 don't have to do step over. */
4643 if (proc->tdesc == NULL)
4644 return 0;
4645
4646 /* LWPs which will not be resumed are not interesting, because we
4647 might not wait for them next time through linux_wait. */
4648
4649 if (!lwp->stopped)
4650 {
4651 if (debug_threads)
4652 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4653 lwpid_of (thread));
4654 return 0;
4655 }
4656
4657 if (thread->last_resume_kind == resume_stop)
4658 {
4659 if (debug_threads)
4660 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4661 " stopped\n",
4662 lwpid_of (thread));
4663 return 0;
4664 }
4665
4666 gdb_assert (lwp->suspended >= 0);
4667
4668 if (lwp->suspended)
4669 {
4670 if (debug_threads)
4671 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4672 lwpid_of (thread));
4673 return 0;
4674 }
4675
4676 if (lwp->status_pending_p)
4677 {
4678 if (debug_threads)
4679 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4680 " status.\n",
4681 lwpid_of (thread));
4682 return 0;
4683 }
4684
4685 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4686 or we have. */
4687 pc = get_pc (lwp);
4688
4689 /* If the PC has changed since we stopped, then don't do anything,
4690 and let the breakpoint/tracepoint be hit. This happens if, for
4691 instance, GDB handled the decr_pc_after_break subtraction itself,
4692 GDB is OOL stepping this thread, or the user has issued a "jump"
4693 command, or poked thread's registers herself. */
4694 if (pc != lwp->stop_pc)
4695 {
4696 if (debug_threads)
4697 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4698 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4699 lwpid_of (thread),
4700 paddress (lwp->stop_pc), paddress (pc));
4701 return 0;
4702 }
4703
4704 /* On software single step target, resume the inferior with signal
4705 rather than stepping over. */
4706 if (can_software_single_step ()
4707 && lwp->pending_signals != NULL
4708 && lwp_signal_can_be_delivered (lwp))
4709 {
4710 if (debug_threads)
4711 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4712 " signals.\n",
4713 lwpid_of (thread));
4714
4715 return 0;
4716 }
4717
4718 saved_thread = current_thread;
4719 current_thread = thread;
4720
4721 /* We can only step over breakpoints we know about. */
4722 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4723 {
4724 /* Don't step over a breakpoint that GDB expects to hit
4725 though. If the condition is being evaluated on the target's side
4726 and it evaluate to false, step over this breakpoint as well. */
4727 if (gdb_breakpoint_here (pc)
4728 && gdb_condition_true_at_breakpoint (pc)
4729 && gdb_no_commands_at_breakpoint (pc))
4730 {
4731 if (debug_threads)
4732 debug_printf ("Need step over [LWP %ld]? yes, but found"
4733 " GDB breakpoint at 0x%s; skipping step over\n",
4734 lwpid_of (thread), paddress (pc));
4735
4736 current_thread = saved_thread;
4737 return 0;
4738 }
4739 else
4740 {
4741 if (debug_threads)
4742 debug_printf ("Need step over [LWP %ld]? yes, "
4743 "found breakpoint at 0x%s\n",
4744 lwpid_of (thread), paddress (pc));
4745
4746 /* We've found an lwp that needs stepping over --- return 1 so
4747 that find_inferior stops looking. */
4748 current_thread = saved_thread;
4749
4750 return 1;
4751 }
4752 }
4753
4754 current_thread = saved_thread;
4755
4756 if (debug_threads)
4757 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4758 " at 0x%s\n",
4759 lwpid_of (thread), paddress (pc));
4760
4761 return 0;
4762 }
4763
4764 /* Start a step-over operation on LWP. When LWP stopped at a
4765 breakpoint, to make progress, we need to remove the breakpoint out
4766 of the way. If we let other threads run while we do that, they may
4767 pass by the breakpoint location and miss hitting it. To avoid
4768 that, a step-over momentarily stops all threads while LWP is
4769 single-stepped by either hardware or software while the breakpoint
4770 is temporarily uninserted from the inferior. When the single-step
4771 finishes, we reinsert the breakpoint, and let all threads that are
4772 supposed to be running, run again. */
4773
4774 static int
4775 start_step_over (struct lwp_info *lwp)
4776 {
4777 struct thread_info *thread = get_lwp_thread (lwp);
4778 struct thread_info *saved_thread;
4779 CORE_ADDR pc;
4780 int step;
4781
4782 if (debug_threads)
4783 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4784 lwpid_of (thread));
4785
4786 stop_all_lwps (1, lwp);
4787
4788 if (lwp->suspended != 0)
4789 {
4790 internal_error (__FILE__, __LINE__,
4791 "LWP %ld suspended=%d\n", lwpid_of (thread),
4792 lwp->suspended);
4793 }
4794
4795 if (debug_threads)
4796 debug_printf ("Done stopping all threads for step-over.\n");
4797
4798 /* Note, we should always reach here with an already adjusted PC,
4799 either by GDB (if we're resuming due to GDB's request), or by our
4800 caller, if we just finished handling an internal breakpoint GDB
4801 shouldn't care about. */
4802 pc = get_pc (lwp);
4803
4804 saved_thread = current_thread;
4805 current_thread = thread;
4806
4807 lwp->bp_reinsert = pc;
4808 uninsert_breakpoints_at (pc);
4809 uninsert_fast_tracepoint_jumps_at (pc);
4810
4811 step = single_step (lwp);
4812
4813 current_thread = saved_thread;
4814
4815 linux_resume_one_lwp (lwp, step, 0, NULL);
4816
4817 /* Require next event from this LWP. */
4818 step_over_bkpt = thread->entry.id;
4819 return 1;
4820 }
4821
4822 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4823 start_step_over, if still there, and delete any reinsert
4824 breakpoints we've set, on non hardware single-step targets. */
4825
4826 static int
4827 finish_step_over (struct lwp_info *lwp)
4828 {
4829 if (lwp->bp_reinsert != 0)
4830 {
4831 struct thread_info *saved_thread = current_thread;
4832
4833 if (debug_threads)
4834 debug_printf ("Finished step over.\n");
4835
4836 current_thread = get_lwp_thread (lwp);
4837
4838 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4839 may be no breakpoint to reinsert there by now. */
4840 reinsert_breakpoints_at (lwp->bp_reinsert);
4841 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4842
4843 lwp->bp_reinsert = 0;
4844
4845 /* Delete any software-single-step reinsert breakpoints. No
4846 longer needed. We don't have to worry about other threads
4847 hitting this trap, and later not being able to explain it,
4848 because we were stepping over a breakpoint, and we hold all
4849 threads but LWP stopped while doing that. */
4850 if (!can_hardware_single_step ())
4851 {
4852 gdb_assert (has_reinsert_breakpoints (current_thread));
4853 delete_reinsert_breakpoints (current_thread);
4854 }
4855
4856 step_over_bkpt = null_ptid;
4857 current_thread = saved_thread;
4858 return 1;
4859 }
4860 else
4861 return 0;
4862 }
4863
4864 /* If there's a step over in progress, wait until all threads stop
4865 (that is, until the stepping thread finishes its step), and
4866 unsuspend all lwps. The stepping thread ends with its status
4867 pending, which is processed later when we get back to processing
4868 events. */
4869
4870 static void
4871 complete_ongoing_step_over (void)
4872 {
4873 if (!ptid_equal (step_over_bkpt, null_ptid))
4874 {
4875 struct lwp_info *lwp;
4876 int wstat;
4877 int ret;
4878
4879 if (debug_threads)
4880 debug_printf ("detach: step over in progress, finish it first\n");
4881
4882 /* Passing NULL_PTID as filter indicates we want all events to
4883 be left pending. Eventually this returns when there are no
4884 unwaited-for children left. */
4885 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4886 &wstat, __WALL);
4887 gdb_assert (ret == -1);
4888
4889 lwp = find_lwp_pid (step_over_bkpt);
4890 if (lwp != NULL)
4891 finish_step_over (lwp);
4892 step_over_bkpt = null_ptid;
4893 unsuspend_all_lwps (lwp);
4894 }
4895 }
4896
4897 /* This function is called once per thread. We check the thread's resume
4898 request, which will tell us whether to resume, step, or leave the thread
4899 stopped; and what signal, if any, it should be sent.
4900
4901 For threads which we aren't explicitly told otherwise, we preserve
4902 the stepping flag; this is used for stepping over gdbserver-placed
4903 breakpoints.
4904
4905 If pending_flags was set in any thread, we queue any needed
4906 signals, since we won't actually resume. We already have a pending
4907 event to report, so we don't need to preserve any step requests;
4908 they should be re-issued if necessary. */
4909
4910 static int
4911 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4912 {
4913 struct thread_info *thread = (struct thread_info *) entry;
4914 struct lwp_info *lwp = get_thread_lwp (thread);
4915 int step;
4916 int leave_all_stopped = * (int *) arg;
4917 int leave_pending;
4918
4919 if (lwp->resume == NULL)
4920 return 0;
4921
4922 if (lwp->resume->kind == resume_stop)
4923 {
4924 if (debug_threads)
4925 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4926
4927 if (!lwp->stopped)
4928 {
4929 if (debug_threads)
4930 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4931
4932 /* Stop the thread, and wait for the event asynchronously,
4933 through the event loop. */
4934 send_sigstop (lwp);
4935 }
4936 else
4937 {
4938 if (debug_threads)
4939 debug_printf ("already stopped LWP %ld\n",
4940 lwpid_of (thread));
4941
4942 /* The LWP may have been stopped in an internal event that
4943 was not meant to be notified back to GDB (e.g., gdbserver
4944 breakpoint), so we should be reporting a stop event in
4945 this case too. */
4946
4947 /* If the thread already has a pending SIGSTOP, this is a
4948 no-op. Otherwise, something later will presumably resume
4949 the thread and this will cause it to cancel any pending
4950 operation, due to last_resume_kind == resume_stop. If
4951 the thread already has a pending status to report, we
4952 will still report it the next time we wait - see
4953 status_pending_p_callback. */
4954
4955 /* If we already have a pending signal to report, then
4956 there's no need to queue a SIGSTOP, as this means we're
4957 midway through moving the LWP out of the jumppad, and we
4958 will report the pending signal as soon as that is
4959 finished. */
4960 if (lwp->pending_signals_to_report == NULL)
4961 send_sigstop (lwp);
4962 }
4963
4964 /* For stop requests, we're done. */
4965 lwp->resume = NULL;
4966 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4967 return 0;
4968 }
4969
4970 /* If this thread which is about to be resumed has a pending status,
4971 then don't resume it - we can just report the pending status.
4972 Likewise if it is suspended, because e.g., another thread is
4973 stepping past a breakpoint. Make sure to queue any signals that
4974 would otherwise be sent. In all-stop mode, we do this decision
4975 based on if *any* thread has a pending status. If there's a
4976 thread that needs the step-over-breakpoint dance, then don't
4977 resume any other thread but that particular one. */
4978 leave_pending = (lwp->suspended
4979 || lwp->status_pending_p
4980 || leave_all_stopped);
4981
4982 if (!leave_pending)
4983 {
4984 if (debug_threads)
4985 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4986
4987 step = (lwp->resume->kind == resume_step);
4988 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4989 }
4990 else
4991 {
4992 if (debug_threads)
4993 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4994
4995 /* If we have a new signal, enqueue the signal. */
4996 if (lwp->resume->sig != 0)
4997 {
4998 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4999
5000 p_sig->prev = lwp->pending_signals;
5001 p_sig->signal = lwp->resume->sig;
5002
5003 /* If this is the same signal we were previously stopped by,
5004 make sure to queue its siginfo. We can ignore the return
5005 value of ptrace; if it fails, we'll skip
5006 PTRACE_SETSIGINFO. */
5007 if (WIFSTOPPED (lwp->last_status)
5008 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
5009 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
5010 &p_sig->info);
5011
5012 lwp->pending_signals = p_sig;
5013 }
5014 }
5015
5016 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5017 lwp->resume = NULL;
5018 return 0;
5019 }
5020
5021 static void
5022 linux_resume (struct thread_resume *resume_info, size_t n)
5023 {
5024 struct thread_resume_array array = { resume_info, n };
5025 struct thread_info *need_step_over = NULL;
5026 int any_pending;
5027 int leave_all_stopped;
5028
5029 if (debug_threads)
5030 {
5031 debug_enter ();
5032 debug_printf ("linux_resume:\n");
5033 }
5034
5035 find_inferior (&all_threads, linux_set_resume_request, &array);
5036
5037 /* If there is a thread which would otherwise be resumed, which has
5038 a pending status, then don't resume any threads - we can just
5039 report the pending status. Make sure to queue any signals that
5040 would otherwise be sent. In non-stop mode, we'll apply this
5041 logic to each thread individually. We consume all pending events
5042 before considering to start a step-over (in all-stop). */
5043 any_pending = 0;
5044 if (!non_stop)
5045 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
5046
5047 /* If there is a thread which would otherwise be resumed, which is
5048 stopped at a breakpoint that needs stepping over, then don't
5049 resume any threads - have it step over the breakpoint with all
5050 other threads stopped, then resume all threads again. Make sure
5051 to queue any signals that would otherwise be delivered or
5052 queued. */
5053 if (!any_pending && supports_breakpoints ())
5054 need_step_over
5055 = (struct thread_info *) find_inferior (&all_threads,
5056 need_step_over_p, NULL);
5057
5058 leave_all_stopped = (need_step_over != NULL || any_pending);
5059
5060 if (debug_threads)
5061 {
5062 if (need_step_over != NULL)
5063 debug_printf ("Not resuming all, need step over\n");
5064 else if (any_pending)
5065 debug_printf ("Not resuming, all-stop and found "
5066 "an LWP with pending status\n");
5067 else
5068 debug_printf ("Resuming, no pending status or step over needed\n");
5069 }
5070
5071 /* Even if we're leaving threads stopped, queue all signals we'd
5072 otherwise deliver. */
5073 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
5074
5075 if (need_step_over)
5076 start_step_over (get_thread_lwp (need_step_over));
5077
5078 if (debug_threads)
5079 {
5080 debug_printf ("linux_resume done\n");
5081 debug_exit ();
5082 }
5083
5084 /* We may have events that were pending that can/should be sent to
5085 the client now. Trigger a linux_wait call. */
5086 if (target_is_async_p ())
5087 async_file_mark ();
5088 }
5089
5090 /* This function is called once per thread. We check the thread's
5091 last resume request, which will tell us whether to resume, step, or
5092 leave the thread stopped. Any signal the client requested to be
5093 delivered has already been enqueued at this point.
5094
5095 If any thread that GDB wants running is stopped at an internal
5096 breakpoint that needs stepping over, we start a step-over operation
5097 on that particular thread, and leave all others stopped. */
5098
5099 static int
5100 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5101 {
5102 struct thread_info *thread = (struct thread_info *) entry;
5103 struct lwp_info *lwp = get_thread_lwp (thread);
5104 int step;
5105
5106 if (lwp == except)
5107 return 0;
5108
5109 if (debug_threads)
5110 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5111
5112 if (!lwp->stopped)
5113 {
5114 if (debug_threads)
5115 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5116 return 0;
5117 }
5118
5119 if (thread->last_resume_kind == resume_stop
5120 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5121 {
5122 if (debug_threads)
5123 debug_printf (" client wants LWP to remain %ld stopped\n",
5124 lwpid_of (thread));
5125 return 0;
5126 }
5127
5128 if (lwp->status_pending_p)
5129 {
5130 if (debug_threads)
5131 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5132 lwpid_of (thread));
5133 return 0;
5134 }
5135
5136 gdb_assert (lwp->suspended >= 0);
5137
5138 if (lwp->suspended)
5139 {
5140 if (debug_threads)
5141 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5142 return 0;
5143 }
5144
5145 if (thread->last_resume_kind == resume_stop
5146 && lwp->pending_signals_to_report == NULL
5147 && lwp->collecting_fast_tracepoint == 0)
5148 {
5149 /* We haven't reported this LWP as stopped yet (otherwise, the
5150 last_status.kind check above would catch it, and we wouldn't
5151 reach here. This LWP may have been momentarily paused by a
5152 stop_all_lwps call while handling for example, another LWP's
5153 step-over. In that case, the pending expected SIGSTOP signal
5154 that was queued at vCont;t handling time will have already
5155 been consumed by wait_for_sigstop, and so we need to requeue
5156 another one here. Note that if the LWP already has a SIGSTOP
5157 pending, this is a no-op. */
5158
5159 if (debug_threads)
5160 debug_printf ("Client wants LWP %ld to stop. "
5161 "Making sure it has a SIGSTOP pending\n",
5162 lwpid_of (thread));
5163
5164 send_sigstop (lwp);
5165 }
5166
5167 if (thread->last_resume_kind == resume_step)
5168 {
5169 if (debug_threads)
5170 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5171 lwpid_of (thread));
5172 step = 1;
5173 }
5174 else if (lwp->bp_reinsert != 0)
5175 {
5176 if (debug_threads)
5177 debug_printf (" stepping LWP %ld, reinsert set\n",
5178 lwpid_of (thread));
5179
5180 step = maybe_hw_step (thread);
5181 }
5182 else
5183 step = 0;
5184
5185 linux_resume_one_lwp (lwp, step, 0, NULL);
5186 return 0;
5187 }
5188
5189 static int
5190 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5191 {
5192 struct thread_info *thread = (struct thread_info *) entry;
5193 struct lwp_info *lwp = get_thread_lwp (thread);
5194
5195 if (lwp == except)
5196 return 0;
5197
5198 lwp_suspended_decr (lwp);
5199
5200 return proceed_one_lwp (entry, except);
5201 }
5202
5203 /* When we finish a step-over, set threads running again. If there's
5204 another thread that may need a step-over, now's the time to start
5205 it. Eventually, we'll move all threads past their breakpoints. */
5206
5207 static void
5208 proceed_all_lwps (void)
5209 {
5210 struct thread_info *need_step_over;
5211
5212 /* If there is a thread which would otherwise be resumed, which is
5213 stopped at a breakpoint that needs stepping over, then don't
5214 resume any threads - have it step over the breakpoint with all
5215 other threads stopped, then resume all threads again. */
5216
5217 if (supports_breakpoints ())
5218 {
5219 need_step_over
5220 = (struct thread_info *) find_inferior (&all_threads,
5221 need_step_over_p, NULL);
5222
5223 if (need_step_over != NULL)
5224 {
5225 if (debug_threads)
5226 debug_printf ("proceed_all_lwps: found "
5227 "thread %ld needing a step-over\n",
5228 lwpid_of (need_step_over));
5229
5230 start_step_over (get_thread_lwp (need_step_over));
5231 return;
5232 }
5233 }
5234
5235 if (debug_threads)
5236 debug_printf ("Proceeding, no step-over needed\n");
5237
5238 find_inferior (&all_threads, proceed_one_lwp, NULL);
5239 }
5240
5241 /* Stopped LWPs that the client wanted to be running, that don't have
5242 pending statuses, are set to run again, except for EXCEPT, if not
5243 NULL. This undoes a stop_all_lwps call. */
5244
5245 static void
5246 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5247 {
5248 if (debug_threads)
5249 {
5250 debug_enter ();
5251 if (except)
5252 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5253 lwpid_of (get_lwp_thread (except)));
5254 else
5255 debug_printf ("unstopping all lwps\n");
5256 }
5257
5258 if (unsuspend)
5259 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5260 else
5261 find_inferior (&all_threads, proceed_one_lwp, except);
5262
5263 if (debug_threads)
5264 {
5265 debug_printf ("unstop_all_lwps done\n");
5266 debug_exit ();
5267 }
5268 }
5269
5270
5271 #ifdef HAVE_LINUX_REGSETS
5272
5273 #define use_linux_regsets 1
5274
5275 /* Returns true if REGSET has been disabled. */
5276
5277 static int
5278 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5279 {
5280 return (info->disabled_regsets != NULL
5281 && info->disabled_regsets[regset - info->regsets]);
5282 }
5283
5284 /* Disable REGSET. */
5285
5286 static void
5287 disable_regset (struct regsets_info *info, struct regset_info *regset)
5288 {
5289 int dr_offset;
5290
5291 dr_offset = regset - info->regsets;
5292 if (info->disabled_regsets == NULL)
5293 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5294 info->disabled_regsets[dr_offset] = 1;
5295 }
5296
5297 static int
5298 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5299 struct regcache *regcache)
5300 {
5301 struct regset_info *regset;
5302 int saw_general_regs = 0;
5303 int pid;
5304 struct iovec iov;
5305
5306 pid = lwpid_of (current_thread);
5307 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5308 {
5309 void *buf, *data;
5310 int nt_type, res;
5311
5312 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5313 continue;
5314
5315 buf = xmalloc (regset->size);
5316
5317 nt_type = regset->nt_type;
5318 if (nt_type)
5319 {
5320 iov.iov_base = buf;
5321 iov.iov_len = regset->size;
5322 data = (void *) &iov;
5323 }
5324 else
5325 data = buf;
5326
5327 #ifndef __sparc__
5328 res = ptrace (regset->get_request, pid,
5329 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5330 #else
5331 res = ptrace (regset->get_request, pid, data, nt_type);
5332 #endif
5333 if (res < 0)
5334 {
5335 if (errno == EIO)
5336 {
5337 /* If we get EIO on a regset, do not try it again for
5338 this process mode. */
5339 disable_regset (regsets_info, regset);
5340 }
5341 else if (errno == ENODATA)
5342 {
5343 /* ENODATA may be returned if the regset is currently
5344 not "active". This can happen in normal operation,
5345 so suppress the warning in this case. */
5346 }
5347 else
5348 {
5349 char s[256];
5350 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5351 pid);
5352 perror (s);
5353 }
5354 }
5355 else
5356 {
5357 if (regset->type == GENERAL_REGS)
5358 saw_general_regs = 1;
5359 regset->store_function (regcache, buf);
5360 }
5361 free (buf);
5362 }
5363 if (saw_general_regs)
5364 return 0;
5365 else
5366 return 1;
5367 }
5368
5369 static int
5370 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5371 struct regcache *regcache)
5372 {
5373 struct regset_info *regset;
5374 int saw_general_regs = 0;
5375 int pid;
5376 struct iovec iov;
5377
5378 pid = lwpid_of (current_thread);
5379 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5380 {
5381 void *buf, *data;
5382 int nt_type, res;
5383
5384 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5385 || regset->fill_function == NULL)
5386 continue;
5387
5388 buf = xmalloc (regset->size);
5389
5390 /* First fill the buffer with the current register set contents,
5391 in case there are any items in the kernel's regset that are
5392 not in gdbserver's regcache. */
5393
5394 nt_type = regset->nt_type;
5395 if (nt_type)
5396 {
5397 iov.iov_base = buf;
5398 iov.iov_len = regset->size;
5399 data = (void *) &iov;
5400 }
5401 else
5402 data = buf;
5403
5404 #ifndef __sparc__
5405 res = ptrace (regset->get_request, pid,
5406 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5407 #else
5408 res = ptrace (regset->get_request, pid, data, nt_type);
5409 #endif
5410
5411 if (res == 0)
5412 {
5413 /* Then overlay our cached registers on that. */
5414 regset->fill_function (regcache, buf);
5415
5416 /* Only now do we write the register set. */
5417 #ifndef __sparc__
5418 res = ptrace (regset->set_request, pid,
5419 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5420 #else
5421 res = ptrace (regset->set_request, pid, data, nt_type);
5422 #endif
5423 }
5424
5425 if (res < 0)
5426 {
5427 if (errno == EIO)
5428 {
5429 /* If we get EIO on a regset, do not try it again for
5430 this process mode. */
5431 disable_regset (regsets_info, regset);
5432 }
5433 else if (errno == ESRCH)
5434 {
5435 /* At this point, ESRCH should mean the process is
5436 already gone, in which case we simply ignore attempts
5437 to change its registers. See also the related
5438 comment in linux_resume_one_lwp. */
5439 free (buf);
5440 return 0;
5441 }
5442 else
5443 {
5444 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5445 }
5446 }
5447 else if (regset->type == GENERAL_REGS)
5448 saw_general_regs = 1;
5449 free (buf);
5450 }
5451 if (saw_general_regs)
5452 return 0;
5453 else
5454 return 1;
5455 }
5456
5457 #else /* !HAVE_LINUX_REGSETS */
5458
5459 #define use_linux_regsets 0
5460 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5461 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5462
5463 #endif
5464
5465 /* Return 1 if register REGNO is supported by one of the regset ptrace
5466 calls or 0 if it has to be transferred individually. */
5467
5468 static int
5469 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5470 {
5471 unsigned char mask = 1 << (regno % 8);
5472 size_t index = regno / 8;
5473
5474 return (use_linux_regsets
5475 && (regs_info->regset_bitmap == NULL
5476 || (regs_info->regset_bitmap[index] & mask) != 0));
5477 }
5478
5479 #ifdef HAVE_LINUX_USRREGS
5480
5481 static int
5482 register_addr (const struct usrregs_info *usrregs, int regnum)
5483 {
5484 int addr;
5485
5486 if (regnum < 0 || regnum >= usrregs->num_regs)
5487 error ("Invalid register number %d.", regnum);
5488
5489 addr = usrregs->regmap[regnum];
5490
5491 return addr;
5492 }
5493
5494 /* Fetch one register. */
5495 static void
5496 fetch_register (const struct usrregs_info *usrregs,
5497 struct regcache *regcache, int regno)
5498 {
5499 CORE_ADDR regaddr;
5500 int i, size;
5501 char *buf;
5502 int pid;
5503
5504 if (regno >= usrregs->num_regs)
5505 return;
5506 if ((*the_low_target.cannot_fetch_register) (regno))
5507 return;
5508
5509 regaddr = register_addr (usrregs, regno);
5510 if (regaddr == -1)
5511 return;
5512
5513 size = ((register_size (regcache->tdesc, regno)
5514 + sizeof (PTRACE_XFER_TYPE) - 1)
5515 & -sizeof (PTRACE_XFER_TYPE));
5516 buf = (char *) alloca (size);
5517
5518 pid = lwpid_of (current_thread);
5519 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5520 {
5521 errno = 0;
5522 *(PTRACE_XFER_TYPE *) (buf + i) =
5523 ptrace (PTRACE_PEEKUSER, pid,
5524 /* Coerce to a uintptr_t first to avoid potential gcc warning
5525 of coercing an 8 byte integer to a 4 byte pointer. */
5526 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5527 regaddr += sizeof (PTRACE_XFER_TYPE);
5528 if (errno != 0)
5529 error ("reading register %d: %s", regno, strerror (errno));
5530 }
5531
5532 if (the_low_target.supply_ptrace_register)
5533 the_low_target.supply_ptrace_register (regcache, regno, buf);
5534 else
5535 supply_register (regcache, regno, buf);
5536 }
5537
5538 /* Store one register. */
5539 static void
5540 store_register (const struct usrregs_info *usrregs,
5541 struct regcache *regcache, int regno)
5542 {
5543 CORE_ADDR regaddr;
5544 int i, size;
5545 char *buf;
5546 int pid;
5547
5548 if (regno >= usrregs->num_regs)
5549 return;
5550 if ((*the_low_target.cannot_store_register) (regno))
5551 return;
5552
5553 regaddr = register_addr (usrregs, regno);
5554 if (regaddr == -1)
5555 return;
5556
5557 size = ((register_size (regcache->tdesc, regno)
5558 + sizeof (PTRACE_XFER_TYPE) - 1)
5559 & -sizeof (PTRACE_XFER_TYPE));
5560 buf = (char *) alloca (size);
5561 memset (buf, 0, size);
5562
5563 if (the_low_target.collect_ptrace_register)
5564 the_low_target.collect_ptrace_register (regcache, regno, buf);
5565 else
5566 collect_register (regcache, regno, buf);
5567
5568 pid = lwpid_of (current_thread);
5569 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5570 {
5571 errno = 0;
5572 ptrace (PTRACE_POKEUSER, pid,
5573 /* Coerce to a uintptr_t first to avoid potential gcc warning
5574 about coercing an 8 byte integer to a 4 byte pointer. */
5575 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5576 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5577 if (errno != 0)
5578 {
5579 /* At this point, ESRCH should mean the process is
5580 already gone, in which case we simply ignore attempts
5581 to change its registers. See also the related
5582 comment in linux_resume_one_lwp. */
5583 if (errno == ESRCH)
5584 return;
5585
5586 if ((*the_low_target.cannot_store_register) (regno) == 0)
5587 error ("writing register %d: %s", regno, strerror (errno));
5588 }
5589 regaddr += sizeof (PTRACE_XFER_TYPE);
5590 }
5591 }
5592
5593 /* Fetch all registers, or just one, from the child process.
5594 If REGNO is -1, do this for all registers, skipping any that are
5595 assumed to have been retrieved by regsets_fetch_inferior_registers,
5596 unless ALL is non-zero.
5597 Otherwise, REGNO specifies which register (so we can save time). */
5598 static void
5599 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5600 struct regcache *regcache, int regno, int all)
5601 {
5602 struct usrregs_info *usr = regs_info->usrregs;
5603
5604 if (regno == -1)
5605 {
5606 for (regno = 0; regno < usr->num_regs; regno++)
5607 if (all || !linux_register_in_regsets (regs_info, regno))
5608 fetch_register (usr, regcache, regno);
5609 }
5610 else
5611 fetch_register (usr, regcache, regno);
5612 }
5613
5614 /* Store our register values back into the inferior.
5615 If REGNO is -1, do this for all registers, skipping any that are
5616 assumed to have been saved by regsets_store_inferior_registers,
5617 unless ALL is non-zero.
5618 Otherwise, REGNO specifies which register (so we can save time). */
5619 static void
5620 usr_store_inferior_registers (const struct regs_info *regs_info,
5621 struct regcache *regcache, int regno, int all)
5622 {
5623 struct usrregs_info *usr = regs_info->usrregs;
5624
5625 if (regno == -1)
5626 {
5627 for (regno = 0; regno < usr->num_regs; regno++)
5628 if (all || !linux_register_in_regsets (regs_info, regno))
5629 store_register (usr, regcache, regno);
5630 }
5631 else
5632 store_register (usr, regcache, regno);
5633 }
5634
5635 #else /* !HAVE_LINUX_USRREGS */
5636
5637 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5638 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5639
5640 #endif
5641
5642
5643 static void
5644 linux_fetch_registers (struct regcache *regcache, int regno)
5645 {
5646 int use_regsets;
5647 int all = 0;
5648 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5649
5650 if (regno == -1)
5651 {
5652 if (the_low_target.fetch_register != NULL
5653 && regs_info->usrregs != NULL)
5654 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5655 (*the_low_target.fetch_register) (regcache, regno);
5656
5657 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5658 if (regs_info->usrregs != NULL)
5659 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5660 }
5661 else
5662 {
5663 if (the_low_target.fetch_register != NULL
5664 && (*the_low_target.fetch_register) (regcache, regno))
5665 return;
5666
5667 use_regsets = linux_register_in_regsets (regs_info, regno);
5668 if (use_regsets)
5669 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5670 regcache);
5671 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5672 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5673 }
5674 }
5675
5676 static void
5677 linux_store_registers (struct regcache *regcache, int regno)
5678 {
5679 int use_regsets;
5680 int all = 0;
5681 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5682
5683 if (regno == -1)
5684 {
5685 all = regsets_store_inferior_registers (regs_info->regsets_info,
5686 regcache);
5687 if (regs_info->usrregs != NULL)
5688 usr_store_inferior_registers (regs_info, regcache, regno, all);
5689 }
5690 else
5691 {
5692 use_regsets = linux_register_in_regsets (regs_info, regno);
5693 if (use_regsets)
5694 all = regsets_store_inferior_registers (regs_info->regsets_info,
5695 regcache);
5696 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5697 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5698 }
5699 }
5700
5701
5702 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5703 to debugger memory starting at MYADDR. */
5704
5705 static int
5706 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5707 {
5708 int pid = lwpid_of (current_thread);
5709 register PTRACE_XFER_TYPE *buffer;
5710 register CORE_ADDR addr;
5711 register int count;
5712 char filename[64];
5713 register int i;
5714 int ret;
5715 int fd;
5716
5717 /* Try using /proc. Don't bother for one word. */
5718 if (len >= 3 * sizeof (long))
5719 {
5720 int bytes;
5721
5722 /* We could keep this file open and cache it - possibly one per
5723 thread. That requires some juggling, but is even faster. */
5724 sprintf (filename, "/proc/%d/mem", pid);
5725 fd = open (filename, O_RDONLY | O_LARGEFILE);
5726 if (fd == -1)
5727 goto no_proc;
5728
5729 /* If pread64 is available, use it. It's faster if the kernel
5730 supports it (only one syscall), and it's 64-bit safe even on
5731 32-bit platforms (for instance, SPARC debugging a SPARC64
5732 application). */
5733 #ifdef HAVE_PREAD64
5734 bytes = pread64 (fd, myaddr, len, memaddr);
5735 #else
5736 bytes = -1;
5737 if (lseek (fd, memaddr, SEEK_SET) != -1)
5738 bytes = read (fd, myaddr, len);
5739 #endif
5740
5741 close (fd);
5742 if (bytes == len)
5743 return 0;
5744
5745 /* Some data was read, we'll try to get the rest with ptrace. */
5746 if (bytes > 0)
5747 {
5748 memaddr += bytes;
5749 myaddr += bytes;
5750 len -= bytes;
5751 }
5752 }
5753
5754 no_proc:
5755 /* Round starting address down to longword boundary. */
5756 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5757 /* Round ending address up; get number of longwords that makes. */
5758 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5759 / sizeof (PTRACE_XFER_TYPE));
5760 /* Allocate buffer of that many longwords. */
5761 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5762
5763 /* Read all the longwords */
5764 errno = 0;
5765 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5766 {
5767 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5768 about coercing an 8 byte integer to a 4 byte pointer. */
5769 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5770 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5771 (PTRACE_TYPE_ARG4) 0);
5772 if (errno)
5773 break;
5774 }
5775 ret = errno;
5776
5777 /* Copy appropriate bytes out of the buffer. */
5778 if (i > 0)
5779 {
5780 i *= sizeof (PTRACE_XFER_TYPE);
5781 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5782 memcpy (myaddr,
5783 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5784 i < len ? i : len);
5785 }
5786
5787 return ret;
5788 }
5789
5790 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5791 memory at MEMADDR. On failure (cannot write to the inferior)
5792 returns the value of errno. Always succeeds if LEN is zero. */
5793
5794 static int
5795 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5796 {
5797 register int i;
5798 /* Round starting address down to longword boundary. */
5799 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5800 /* Round ending address up; get number of longwords that makes. */
5801 register int count
5802 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5803 / sizeof (PTRACE_XFER_TYPE);
5804
5805 /* Allocate buffer of that many longwords. */
5806 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5807
5808 int pid = lwpid_of (current_thread);
5809
5810 if (len == 0)
5811 {
5812 /* Zero length write always succeeds. */
5813 return 0;
5814 }
5815
5816 if (debug_threads)
5817 {
5818 /* Dump up to four bytes. */
5819 char str[4 * 2 + 1];
5820 char *p = str;
5821 int dump = len < 4 ? len : 4;
5822
5823 for (i = 0; i < dump; i++)
5824 {
5825 sprintf (p, "%02x", myaddr[i]);
5826 p += 2;
5827 }
5828 *p = '\0';
5829
5830 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5831 str, (long) memaddr, pid);
5832 }
5833
5834 /* Fill start and end extra bytes of buffer with existing memory data. */
5835
5836 errno = 0;
5837 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5838 about coercing an 8 byte integer to a 4 byte pointer. */
5839 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5840 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5841 (PTRACE_TYPE_ARG4) 0);
5842 if (errno)
5843 return errno;
5844
5845 if (count > 1)
5846 {
5847 errno = 0;
5848 buffer[count - 1]
5849 = ptrace (PTRACE_PEEKTEXT, pid,
5850 /* Coerce to a uintptr_t first to avoid potential gcc warning
5851 about coercing an 8 byte integer to a 4 byte pointer. */
5852 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5853 * sizeof (PTRACE_XFER_TYPE)),
5854 (PTRACE_TYPE_ARG4) 0);
5855 if (errno)
5856 return errno;
5857 }
5858
5859 /* Copy data to be written over corresponding part of buffer. */
5860
5861 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5862 myaddr, len);
5863
5864 /* Write the entire buffer. */
5865
5866 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5867 {
5868 errno = 0;
5869 ptrace (PTRACE_POKETEXT, pid,
5870 /* Coerce to a uintptr_t first to avoid potential gcc warning
5871 about coercing an 8 byte integer to a 4 byte pointer. */
5872 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5873 (PTRACE_TYPE_ARG4) buffer[i]);
5874 if (errno)
5875 return errno;
5876 }
5877
5878 return 0;
5879 }
5880
5881 static void
5882 linux_look_up_symbols (void)
5883 {
5884 #ifdef USE_THREAD_DB
5885 struct process_info *proc = current_process ();
5886
5887 if (proc->priv->thread_db != NULL)
5888 return;
5889
5890 thread_db_init ();
5891 #endif
5892 }
5893
5894 static void
5895 linux_request_interrupt (void)
5896 {
5897 extern unsigned long signal_pid;
5898
5899 /* Send a SIGINT to the process group. This acts just like the user
5900 typed a ^C on the controlling terminal. */
5901 kill (-signal_pid, SIGINT);
5902 }
5903
5904 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5905 to debugger memory starting at MYADDR. */
5906
5907 static int
5908 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5909 {
5910 char filename[PATH_MAX];
5911 int fd, n;
5912 int pid = lwpid_of (current_thread);
5913
5914 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5915
5916 fd = open (filename, O_RDONLY);
5917 if (fd < 0)
5918 return -1;
5919
5920 if (offset != (CORE_ADDR) 0
5921 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5922 n = -1;
5923 else
5924 n = read (fd, myaddr, len);
5925
5926 close (fd);
5927
5928 return n;
5929 }
5930
5931 /* These breakpoint and watchpoint related wrapper functions simply
5932 pass on the function call if the target has registered a
5933 corresponding function. */
5934
5935 static int
5936 linux_supports_z_point_type (char z_type)
5937 {
5938 return (the_low_target.supports_z_point_type != NULL
5939 && the_low_target.supports_z_point_type (z_type));
5940 }
5941
5942 static int
5943 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5944 int size, struct raw_breakpoint *bp)
5945 {
5946 if (type == raw_bkpt_type_sw)
5947 return insert_memory_breakpoint (bp);
5948 else if (the_low_target.insert_point != NULL)
5949 return the_low_target.insert_point (type, addr, size, bp);
5950 else
5951 /* Unsupported (see target.h). */
5952 return 1;
5953 }
5954
5955 static int
5956 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5957 int size, struct raw_breakpoint *bp)
5958 {
5959 if (type == raw_bkpt_type_sw)
5960 return remove_memory_breakpoint (bp);
5961 else if (the_low_target.remove_point != NULL)
5962 return the_low_target.remove_point (type, addr, size, bp);
5963 else
5964 /* Unsupported (see target.h). */
5965 return 1;
5966 }
5967
5968 /* Implement the to_stopped_by_sw_breakpoint target_ops
5969 method. */
5970
5971 static int
5972 linux_stopped_by_sw_breakpoint (void)
5973 {
5974 struct lwp_info *lwp = get_thread_lwp (current_thread);
5975
5976 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5977 }
5978
5979 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5980 method. */
5981
5982 static int
5983 linux_supports_stopped_by_sw_breakpoint (void)
5984 {
5985 return USE_SIGTRAP_SIGINFO;
5986 }
5987
5988 /* Implement the to_stopped_by_hw_breakpoint target_ops
5989 method. */
5990
5991 static int
5992 linux_stopped_by_hw_breakpoint (void)
5993 {
5994 struct lwp_info *lwp = get_thread_lwp (current_thread);
5995
5996 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5997 }
5998
5999 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6000 method. */
6001
6002 static int
6003 linux_supports_stopped_by_hw_breakpoint (void)
6004 {
6005 return USE_SIGTRAP_SIGINFO;
6006 }
6007
6008 /* Implement the supports_hardware_single_step target_ops method. */
6009
6010 static int
6011 linux_supports_hardware_single_step (void)
6012 {
6013 return can_hardware_single_step ();
6014 }
6015
6016 static int
6017 linux_supports_software_single_step (void)
6018 {
6019 return can_software_single_step ();
6020 }
6021
6022 static int
6023 linux_stopped_by_watchpoint (void)
6024 {
6025 struct lwp_info *lwp = get_thread_lwp (current_thread);
6026
6027 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6028 }
6029
6030 static CORE_ADDR
6031 linux_stopped_data_address (void)
6032 {
6033 struct lwp_info *lwp = get_thread_lwp (current_thread);
6034
6035 return lwp->stopped_data_address;
6036 }
6037
6038 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6039 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6040 && defined(PT_TEXT_END_ADDR)
6041
6042 /* This is only used for targets that define PT_TEXT_ADDR,
6043 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6044 the target has different ways of acquiring this information, like
6045 loadmaps. */
6046
6047 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6048 to tell gdb about. */
6049
6050 static int
6051 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6052 {
6053 unsigned long text, text_end, data;
6054 int pid = lwpid_of (current_thread);
6055
6056 errno = 0;
6057
6058 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6059 (PTRACE_TYPE_ARG4) 0);
6060 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6061 (PTRACE_TYPE_ARG4) 0);
6062 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6063 (PTRACE_TYPE_ARG4) 0);
6064
6065 if (errno == 0)
6066 {
6067 /* Both text and data offsets produced at compile-time (and so
6068 used by gdb) are relative to the beginning of the program,
6069 with the data segment immediately following the text segment.
6070 However, the actual runtime layout in memory may put the data
6071 somewhere else, so when we send gdb a data base-address, we
6072 use the real data base address and subtract the compile-time
6073 data base-address from it (which is just the length of the
6074 text segment). BSS immediately follows data in both
6075 cases. */
6076 *text_p = text;
6077 *data_p = data - (text_end - text);
6078
6079 return 1;
6080 }
6081 return 0;
6082 }
6083 #endif
6084
6085 static int
6086 linux_qxfer_osdata (const char *annex,
6087 unsigned char *readbuf, unsigned const char *writebuf,
6088 CORE_ADDR offset, int len)
6089 {
6090 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6091 }
6092
6093 /* Convert a native/host siginfo object, into/from the siginfo in the
6094 layout of the inferiors' architecture. */
6095
6096 static void
6097 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6098 {
6099 int done = 0;
6100
6101 if (the_low_target.siginfo_fixup != NULL)
6102 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6103
6104 /* If there was no callback, or the callback didn't do anything,
6105 then just do a straight memcpy. */
6106 if (!done)
6107 {
6108 if (direction == 1)
6109 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6110 else
6111 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6112 }
6113 }
6114
6115 static int
6116 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6117 unsigned const char *writebuf, CORE_ADDR offset, int len)
6118 {
6119 int pid;
6120 siginfo_t siginfo;
6121 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6122
6123 if (current_thread == NULL)
6124 return -1;
6125
6126 pid = lwpid_of (current_thread);
6127
6128 if (debug_threads)
6129 debug_printf ("%s siginfo for lwp %d.\n",
6130 readbuf != NULL ? "Reading" : "Writing",
6131 pid);
6132
6133 if (offset >= sizeof (siginfo))
6134 return -1;
6135
6136 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6137 return -1;
6138
6139 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6140 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6141 inferior with a 64-bit GDBSERVER should look the same as debugging it
6142 with a 32-bit GDBSERVER, we need to convert it. */
6143 siginfo_fixup (&siginfo, inf_siginfo, 0);
6144
6145 if (offset + len > sizeof (siginfo))
6146 len = sizeof (siginfo) - offset;
6147
6148 if (readbuf != NULL)
6149 memcpy (readbuf, inf_siginfo + offset, len);
6150 else
6151 {
6152 memcpy (inf_siginfo + offset, writebuf, len);
6153
6154 /* Convert back to ptrace layout before flushing it out. */
6155 siginfo_fixup (&siginfo, inf_siginfo, 1);
6156
6157 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6158 return -1;
6159 }
6160
6161 return len;
6162 }
6163
6164 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6165 so we notice when children change state; as the handler for the
6166 sigsuspend in my_waitpid. */
6167
6168 static void
6169 sigchld_handler (int signo)
6170 {
6171 int old_errno = errno;
6172
6173 if (debug_threads)
6174 {
6175 do
6176 {
6177 /* fprintf is not async-signal-safe, so call write
6178 directly. */
6179 if (write (2, "sigchld_handler\n",
6180 sizeof ("sigchld_handler\n") - 1) < 0)
6181 break; /* just ignore */
6182 } while (0);
6183 }
6184
6185 if (target_is_async_p ())
6186 async_file_mark (); /* trigger a linux_wait */
6187
6188 errno = old_errno;
6189 }
6190
6191 static int
6192 linux_supports_non_stop (void)
6193 {
6194 return 1;
6195 }
6196
6197 static int
6198 linux_async (int enable)
6199 {
6200 int previous = target_is_async_p ();
6201
6202 if (debug_threads)
6203 debug_printf ("linux_async (%d), previous=%d\n",
6204 enable, previous);
6205
6206 if (previous != enable)
6207 {
6208 sigset_t mask;
6209 sigemptyset (&mask);
6210 sigaddset (&mask, SIGCHLD);
6211
6212 sigprocmask (SIG_BLOCK, &mask, NULL);
6213
6214 if (enable)
6215 {
6216 if (pipe (linux_event_pipe) == -1)
6217 {
6218 linux_event_pipe[0] = -1;
6219 linux_event_pipe[1] = -1;
6220 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6221
6222 warning ("creating event pipe failed.");
6223 return previous;
6224 }
6225
6226 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6227 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6228
6229 /* Register the event loop handler. */
6230 add_file_handler (linux_event_pipe[0],
6231 handle_target_event, NULL);
6232
6233 /* Always trigger a linux_wait. */
6234 async_file_mark ();
6235 }
6236 else
6237 {
6238 delete_file_handler (linux_event_pipe[0]);
6239
6240 close (linux_event_pipe[0]);
6241 close (linux_event_pipe[1]);
6242 linux_event_pipe[0] = -1;
6243 linux_event_pipe[1] = -1;
6244 }
6245
6246 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6247 }
6248
6249 return previous;
6250 }
6251
6252 static int
6253 linux_start_non_stop (int nonstop)
6254 {
6255 /* Register or unregister from event-loop accordingly. */
6256 linux_async (nonstop);
6257
6258 if (target_is_async_p () != (nonstop != 0))
6259 return -1;
6260
6261 return 0;
6262 }
6263
6264 static int
6265 linux_supports_multi_process (void)
6266 {
6267 return 1;
6268 }
6269
6270 /* Check if fork events are supported. */
6271
6272 static int
6273 linux_supports_fork_events (void)
6274 {
6275 return linux_supports_tracefork ();
6276 }
6277
6278 /* Check if vfork events are supported. */
6279
6280 static int
6281 linux_supports_vfork_events (void)
6282 {
6283 return linux_supports_tracefork ();
6284 }
6285
6286 /* Check if exec events are supported. */
6287
6288 static int
6289 linux_supports_exec_events (void)
6290 {
6291 return linux_supports_traceexec ();
6292 }
6293
6294 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6295 options for the specified lwp. */
6296
6297 static int
6298 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6299 void *args)
6300 {
6301 struct thread_info *thread = (struct thread_info *) entry;
6302 struct lwp_info *lwp = get_thread_lwp (thread);
6303
6304 if (!lwp->stopped)
6305 {
6306 /* Stop the lwp so we can modify its ptrace options. */
6307 lwp->must_set_ptrace_flags = 1;
6308 linux_stop_lwp (lwp);
6309 }
6310 else
6311 {
6312 /* Already stopped; go ahead and set the ptrace options. */
6313 struct process_info *proc = find_process_pid (pid_of (thread));
6314 int options = linux_low_ptrace_options (proc->attached);
6315
6316 linux_enable_event_reporting (lwpid_of (thread), options);
6317 lwp->must_set_ptrace_flags = 0;
6318 }
6319
6320 return 0;
6321 }
6322
6323 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6324 ptrace flags for all inferiors. This is in case the new GDB connection
6325 doesn't support the same set of events that the previous one did. */
6326
6327 static void
6328 linux_handle_new_gdb_connection (void)
6329 {
6330 pid_t pid;
6331
6332 /* Request that all the lwps reset their ptrace options. */
6333 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6334 }
6335
6336 static int
6337 linux_supports_disable_randomization (void)
6338 {
6339 #ifdef HAVE_PERSONALITY
6340 return 1;
6341 #else
6342 return 0;
6343 #endif
6344 }
6345
6346 static int
6347 linux_supports_agent (void)
6348 {
6349 return 1;
6350 }
6351
6352 static int
6353 linux_supports_range_stepping (void)
6354 {
6355 if (*the_low_target.supports_range_stepping == NULL)
6356 return 0;
6357
6358 return (*the_low_target.supports_range_stepping) ();
6359 }
6360
6361 /* Enumerate spufs IDs for process PID. */
6362 static int
6363 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6364 {
6365 int pos = 0;
6366 int written = 0;
6367 char path[128];
6368 DIR *dir;
6369 struct dirent *entry;
6370
6371 sprintf (path, "/proc/%ld/fd", pid);
6372 dir = opendir (path);
6373 if (!dir)
6374 return -1;
6375
6376 rewinddir (dir);
6377 while ((entry = readdir (dir)) != NULL)
6378 {
6379 struct stat st;
6380 struct statfs stfs;
6381 int fd;
6382
6383 fd = atoi (entry->d_name);
6384 if (!fd)
6385 continue;
6386
6387 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6388 if (stat (path, &st) != 0)
6389 continue;
6390 if (!S_ISDIR (st.st_mode))
6391 continue;
6392
6393 if (statfs (path, &stfs) != 0)
6394 continue;
6395 if (stfs.f_type != SPUFS_MAGIC)
6396 continue;
6397
6398 if (pos >= offset && pos + 4 <= offset + len)
6399 {
6400 *(unsigned int *)(buf + pos - offset) = fd;
6401 written += 4;
6402 }
6403 pos += 4;
6404 }
6405
6406 closedir (dir);
6407 return written;
6408 }
6409
6410 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6411 object type, using the /proc file system. */
6412 static int
6413 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6414 unsigned const char *writebuf,
6415 CORE_ADDR offset, int len)
6416 {
6417 long pid = lwpid_of (current_thread);
6418 char buf[128];
6419 int fd = 0;
6420 int ret = 0;
6421
6422 if (!writebuf && !readbuf)
6423 return -1;
6424
6425 if (!*annex)
6426 {
6427 if (!readbuf)
6428 return -1;
6429 else
6430 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6431 }
6432
6433 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6434 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6435 if (fd <= 0)
6436 return -1;
6437
6438 if (offset != 0
6439 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6440 {
6441 close (fd);
6442 return 0;
6443 }
6444
6445 if (writebuf)
6446 ret = write (fd, writebuf, (size_t) len);
6447 else
6448 ret = read (fd, readbuf, (size_t) len);
6449
6450 close (fd);
6451 return ret;
6452 }
6453
6454 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6455 struct target_loadseg
6456 {
6457 /* Core address to which the segment is mapped. */
6458 Elf32_Addr addr;
6459 /* VMA recorded in the program header. */
6460 Elf32_Addr p_vaddr;
6461 /* Size of this segment in memory. */
6462 Elf32_Word p_memsz;
6463 };
6464
6465 # if defined PT_GETDSBT
6466 struct target_loadmap
6467 {
6468 /* Protocol version number, must be zero. */
6469 Elf32_Word version;
6470 /* Pointer to the DSBT table, its size, and the DSBT index. */
6471 unsigned *dsbt_table;
6472 unsigned dsbt_size, dsbt_index;
6473 /* Number of segments in this map. */
6474 Elf32_Word nsegs;
6475 /* The actual memory map. */
6476 struct target_loadseg segs[/*nsegs*/];
6477 };
6478 # define LINUX_LOADMAP PT_GETDSBT
6479 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6480 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6481 # else
6482 struct target_loadmap
6483 {
6484 /* Protocol version number, must be zero. */
6485 Elf32_Half version;
6486 /* Number of segments in this map. */
6487 Elf32_Half nsegs;
6488 /* The actual memory map. */
6489 struct target_loadseg segs[/*nsegs*/];
6490 };
6491 # define LINUX_LOADMAP PTRACE_GETFDPIC
6492 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6493 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6494 # endif
6495
6496 static int
6497 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6498 unsigned char *myaddr, unsigned int len)
6499 {
6500 int pid = lwpid_of (current_thread);
6501 int addr = -1;
6502 struct target_loadmap *data = NULL;
6503 unsigned int actual_length, copy_length;
6504
6505 if (strcmp (annex, "exec") == 0)
6506 addr = (int) LINUX_LOADMAP_EXEC;
6507 else if (strcmp (annex, "interp") == 0)
6508 addr = (int) LINUX_LOADMAP_INTERP;
6509 else
6510 return -1;
6511
6512 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6513 return -1;
6514
6515 if (data == NULL)
6516 return -1;
6517
6518 actual_length = sizeof (struct target_loadmap)
6519 + sizeof (struct target_loadseg) * data->nsegs;
6520
6521 if (offset < 0 || offset > actual_length)
6522 return -1;
6523
6524 copy_length = actual_length - offset < len ? actual_length - offset : len;
6525 memcpy (myaddr, (char *) data + offset, copy_length);
6526 return copy_length;
6527 }
6528 #else
6529 # define linux_read_loadmap NULL
6530 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6531
6532 static void
6533 linux_process_qsupported (char **features, int count)
6534 {
6535 if (the_low_target.process_qsupported != NULL)
6536 the_low_target.process_qsupported (features, count);
6537 }
6538
6539 static int
6540 linux_supports_catch_syscall (void)
6541 {
6542 return (the_low_target.get_syscall_trapinfo != NULL
6543 && linux_supports_tracesysgood ());
6544 }
6545
6546 static int
6547 linux_get_ipa_tdesc_idx (void)
6548 {
6549 if (the_low_target.get_ipa_tdesc_idx == NULL)
6550 return 0;
6551
6552 return (*the_low_target.get_ipa_tdesc_idx) ();
6553 }
6554
6555 static int
6556 linux_supports_tracepoints (void)
6557 {
6558 if (*the_low_target.supports_tracepoints == NULL)
6559 return 0;
6560
6561 return (*the_low_target.supports_tracepoints) ();
6562 }
6563
6564 static CORE_ADDR
6565 linux_read_pc (struct regcache *regcache)
6566 {
6567 if (the_low_target.get_pc == NULL)
6568 return 0;
6569
6570 return (*the_low_target.get_pc) (regcache);
6571 }
6572
6573 static void
6574 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6575 {
6576 gdb_assert (the_low_target.set_pc != NULL);
6577
6578 (*the_low_target.set_pc) (regcache, pc);
6579 }
6580
6581 static int
6582 linux_thread_stopped (struct thread_info *thread)
6583 {
6584 return get_thread_lwp (thread)->stopped;
6585 }
6586
6587 /* This exposes stop-all-threads functionality to other modules. */
6588
6589 static void
6590 linux_pause_all (int freeze)
6591 {
6592 stop_all_lwps (freeze, NULL);
6593 }
6594
6595 /* This exposes unstop-all-threads functionality to other gdbserver
6596 modules. */
6597
6598 static void
6599 linux_unpause_all (int unfreeze)
6600 {
6601 unstop_all_lwps (unfreeze, NULL);
6602 }
6603
6604 static int
6605 linux_prepare_to_access_memory (void)
6606 {
6607 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6608 running LWP. */
6609 if (non_stop)
6610 linux_pause_all (1);
6611 return 0;
6612 }
6613
6614 static void
6615 linux_done_accessing_memory (void)
6616 {
6617 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6618 running LWP. */
6619 if (non_stop)
6620 linux_unpause_all (1);
6621 }
6622
6623 static int
6624 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6625 CORE_ADDR collector,
6626 CORE_ADDR lockaddr,
6627 ULONGEST orig_size,
6628 CORE_ADDR *jump_entry,
6629 CORE_ADDR *trampoline,
6630 ULONGEST *trampoline_size,
6631 unsigned char *jjump_pad_insn,
6632 ULONGEST *jjump_pad_insn_size,
6633 CORE_ADDR *adjusted_insn_addr,
6634 CORE_ADDR *adjusted_insn_addr_end,
6635 char *err)
6636 {
6637 return (*the_low_target.install_fast_tracepoint_jump_pad)
6638 (tpoint, tpaddr, collector, lockaddr, orig_size,
6639 jump_entry, trampoline, trampoline_size,
6640 jjump_pad_insn, jjump_pad_insn_size,
6641 adjusted_insn_addr, adjusted_insn_addr_end,
6642 err);
6643 }
6644
6645 static struct emit_ops *
6646 linux_emit_ops (void)
6647 {
6648 if (the_low_target.emit_ops != NULL)
6649 return (*the_low_target.emit_ops) ();
6650 else
6651 return NULL;
6652 }
6653
6654 static int
6655 linux_get_min_fast_tracepoint_insn_len (void)
6656 {
6657 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6658 }
6659
6660 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6661
6662 static int
6663 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6664 CORE_ADDR *phdr_memaddr, int *num_phdr)
6665 {
6666 char filename[PATH_MAX];
6667 int fd;
6668 const int auxv_size = is_elf64
6669 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6670 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6671
6672 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6673
6674 fd = open (filename, O_RDONLY);
6675 if (fd < 0)
6676 return 1;
6677
6678 *phdr_memaddr = 0;
6679 *num_phdr = 0;
6680 while (read (fd, buf, auxv_size) == auxv_size
6681 && (*phdr_memaddr == 0 || *num_phdr == 0))
6682 {
6683 if (is_elf64)
6684 {
6685 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6686
6687 switch (aux->a_type)
6688 {
6689 case AT_PHDR:
6690 *phdr_memaddr = aux->a_un.a_val;
6691 break;
6692 case AT_PHNUM:
6693 *num_phdr = aux->a_un.a_val;
6694 break;
6695 }
6696 }
6697 else
6698 {
6699 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6700
6701 switch (aux->a_type)
6702 {
6703 case AT_PHDR:
6704 *phdr_memaddr = aux->a_un.a_val;
6705 break;
6706 case AT_PHNUM:
6707 *num_phdr = aux->a_un.a_val;
6708 break;
6709 }
6710 }
6711 }
6712
6713 close (fd);
6714
6715 if (*phdr_memaddr == 0 || *num_phdr == 0)
6716 {
6717 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6718 "phdr_memaddr = %ld, phdr_num = %d",
6719 (long) *phdr_memaddr, *num_phdr);
6720 return 2;
6721 }
6722
6723 return 0;
6724 }
6725
6726 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6727
6728 static CORE_ADDR
6729 get_dynamic (const int pid, const int is_elf64)
6730 {
6731 CORE_ADDR phdr_memaddr, relocation;
6732 int num_phdr, i;
6733 unsigned char *phdr_buf;
6734 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6735
6736 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6737 return 0;
6738
6739 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6740 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6741
6742 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6743 return 0;
6744
6745 /* Compute relocation: it is expected to be 0 for "regular" executables,
6746 non-zero for PIE ones. */
6747 relocation = -1;
6748 for (i = 0; relocation == -1 && i < num_phdr; i++)
6749 if (is_elf64)
6750 {
6751 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6752
6753 if (p->p_type == PT_PHDR)
6754 relocation = phdr_memaddr - p->p_vaddr;
6755 }
6756 else
6757 {
6758 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6759
6760 if (p->p_type == PT_PHDR)
6761 relocation = phdr_memaddr - p->p_vaddr;
6762 }
6763
6764 if (relocation == -1)
6765 {
6766 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6767 any real world executables, including PIE executables, have always
6768 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6769 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6770 or present DT_DEBUG anyway (fpc binaries are statically linked).
6771
6772 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6773
6774 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6775
6776 return 0;
6777 }
6778
6779 for (i = 0; i < num_phdr; i++)
6780 {
6781 if (is_elf64)
6782 {
6783 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6784
6785 if (p->p_type == PT_DYNAMIC)
6786 return p->p_vaddr + relocation;
6787 }
6788 else
6789 {
6790 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6791
6792 if (p->p_type == PT_DYNAMIC)
6793 return p->p_vaddr + relocation;
6794 }
6795 }
6796
6797 return 0;
6798 }
6799
6800 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6801 can be 0 if the inferior does not yet have the library list initialized.
6802 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6803 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6804
6805 static CORE_ADDR
6806 get_r_debug (const int pid, const int is_elf64)
6807 {
6808 CORE_ADDR dynamic_memaddr;
6809 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6810 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6811 CORE_ADDR map = -1;
6812
6813 dynamic_memaddr = get_dynamic (pid, is_elf64);
6814 if (dynamic_memaddr == 0)
6815 return map;
6816
6817 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6818 {
6819 if (is_elf64)
6820 {
6821 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6822 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6823 union
6824 {
6825 Elf64_Xword map;
6826 unsigned char buf[sizeof (Elf64_Xword)];
6827 }
6828 rld_map;
6829 #endif
6830 #ifdef DT_MIPS_RLD_MAP
6831 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6832 {
6833 if (linux_read_memory (dyn->d_un.d_val,
6834 rld_map.buf, sizeof (rld_map.buf)) == 0)
6835 return rld_map.map;
6836 else
6837 break;
6838 }
6839 #endif /* DT_MIPS_RLD_MAP */
6840 #ifdef DT_MIPS_RLD_MAP_REL
6841 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6842 {
6843 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6844 rld_map.buf, sizeof (rld_map.buf)) == 0)
6845 return rld_map.map;
6846 else
6847 break;
6848 }
6849 #endif /* DT_MIPS_RLD_MAP_REL */
6850
6851 if (dyn->d_tag == DT_DEBUG && map == -1)
6852 map = dyn->d_un.d_val;
6853
6854 if (dyn->d_tag == DT_NULL)
6855 break;
6856 }
6857 else
6858 {
6859 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6860 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6861 union
6862 {
6863 Elf32_Word map;
6864 unsigned char buf[sizeof (Elf32_Word)];
6865 }
6866 rld_map;
6867 #endif
6868 #ifdef DT_MIPS_RLD_MAP
6869 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6870 {
6871 if (linux_read_memory (dyn->d_un.d_val,
6872 rld_map.buf, sizeof (rld_map.buf)) == 0)
6873 return rld_map.map;
6874 else
6875 break;
6876 }
6877 #endif /* DT_MIPS_RLD_MAP */
6878 #ifdef DT_MIPS_RLD_MAP_REL
6879 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6880 {
6881 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6882 rld_map.buf, sizeof (rld_map.buf)) == 0)
6883 return rld_map.map;
6884 else
6885 break;
6886 }
6887 #endif /* DT_MIPS_RLD_MAP_REL */
6888
6889 if (dyn->d_tag == DT_DEBUG && map == -1)
6890 map = dyn->d_un.d_val;
6891
6892 if (dyn->d_tag == DT_NULL)
6893 break;
6894 }
6895
6896 dynamic_memaddr += dyn_size;
6897 }
6898
6899 return map;
6900 }
6901
6902 /* Read one pointer from MEMADDR in the inferior. */
6903
6904 static int
6905 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6906 {
6907 int ret;
6908
6909 /* Go through a union so this works on either big or little endian
6910 hosts, when the inferior's pointer size is smaller than the size
6911 of CORE_ADDR. It is assumed the inferior's endianness is the
6912 same of the superior's. */
6913 union
6914 {
6915 CORE_ADDR core_addr;
6916 unsigned int ui;
6917 unsigned char uc;
6918 } addr;
6919
6920 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6921 if (ret == 0)
6922 {
6923 if (ptr_size == sizeof (CORE_ADDR))
6924 *ptr = addr.core_addr;
6925 else if (ptr_size == sizeof (unsigned int))
6926 *ptr = addr.ui;
6927 else
6928 gdb_assert_not_reached ("unhandled pointer size");
6929 }
6930 return ret;
6931 }
6932
6933 struct link_map_offsets
6934 {
6935 /* Offset and size of r_debug.r_version. */
6936 int r_version_offset;
6937
6938 /* Offset and size of r_debug.r_map. */
6939 int r_map_offset;
6940
6941 /* Offset to l_addr field in struct link_map. */
6942 int l_addr_offset;
6943
6944 /* Offset to l_name field in struct link_map. */
6945 int l_name_offset;
6946
6947 /* Offset to l_ld field in struct link_map. */
6948 int l_ld_offset;
6949
6950 /* Offset to l_next field in struct link_map. */
6951 int l_next_offset;
6952
6953 /* Offset to l_prev field in struct link_map. */
6954 int l_prev_offset;
6955 };
6956
6957 /* Construct qXfer:libraries-svr4:read reply. */
6958
6959 static int
6960 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6961 unsigned const char *writebuf,
6962 CORE_ADDR offset, int len)
6963 {
6964 char *document;
6965 unsigned document_len;
6966 struct process_info_private *const priv = current_process ()->priv;
6967 char filename[PATH_MAX];
6968 int pid, is_elf64;
6969
6970 static const struct link_map_offsets lmo_32bit_offsets =
6971 {
6972 0, /* r_version offset. */
6973 4, /* r_debug.r_map offset. */
6974 0, /* l_addr offset in link_map. */
6975 4, /* l_name offset in link_map. */
6976 8, /* l_ld offset in link_map. */
6977 12, /* l_next offset in link_map. */
6978 16 /* l_prev offset in link_map. */
6979 };
6980
6981 static const struct link_map_offsets lmo_64bit_offsets =
6982 {
6983 0, /* r_version offset. */
6984 8, /* r_debug.r_map offset. */
6985 0, /* l_addr offset in link_map. */
6986 8, /* l_name offset in link_map. */
6987 16, /* l_ld offset in link_map. */
6988 24, /* l_next offset in link_map. */
6989 32 /* l_prev offset in link_map. */
6990 };
6991 const struct link_map_offsets *lmo;
6992 unsigned int machine;
6993 int ptr_size;
6994 CORE_ADDR lm_addr = 0, lm_prev = 0;
6995 int allocated = 1024;
6996 char *p;
6997 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6998 int header_done = 0;
6999
7000 if (writebuf != NULL)
7001 return -2;
7002 if (readbuf == NULL)
7003 return -1;
7004
7005 pid = lwpid_of (current_thread);
7006 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7007 is_elf64 = elf_64_file_p (filename, &machine);
7008 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7009 ptr_size = is_elf64 ? 8 : 4;
7010
7011 while (annex[0] != '\0')
7012 {
7013 const char *sep;
7014 CORE_ADDR *addrp;
7015 int len;
7016
7017 sep = strchr (annex, '=');
7018 if (sep == NULL)
7019 break;
7020
7021 len = sep - annex;
7022 if (len == 5 && startswith (annex, "start"))
7023 addrp = &lm_addr;
7024 else if (len == 4 && startswith (annex, "prev"))
7025 addrp = &lm_prev;
7026 else
7027 {
7028 annex = strchr (sep, ';');
7029 if (annex == NULL)
7030 break;
7031 annex++;
7032 continue;
7033 }
7034
7035 annex = decode_address_to_semicolon (addrp, sep + 1);
7036 }
7037
7038 if (lm_addr == 0)
7039 {
7040 int r_version = 0;
7041
7042 if (priv->r_debug == 0)
7043 priv->r_debug = get_r_debug (pid, is_elf64);
7044
7045 /* We failed to find DT_DEBUG. Such situation will not change
7046 for this inferior - do not retry it. Report it to GDB as
7047 E01, see for the reasons at the GDB solib-svr4.c side. */
7048 if (priv->r_debug == (CORE_ADDR) -1)
7049 return -1;
7050
7051 if (priv->r_debug != 0)
7052 {
7053 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7054 (unsigned char *) &r_version,
7055 sizeof (r_version)) != 0
7056 || r_version != 1)
7057 {
7058 warning ("unexpected r_debug version %d", r_version);
7059 }
7060 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7061 &lm_addr, ptr_size) != 0)
7062 {
7063 warning ("unable to read r_map from 0x%lx",
7064 (long) priv->r_debug + lmo->r_map_offset);
7065 }
7066 }
7067 }
7068
7069 document = (char *) xmalloc (allocated);
7070 strcpy (document, "<library-list-svr4 version=\"1.0\"");
7071 p = document + strlen (document);
7072
7073 while (lm_addr
7074 && read_one_ptr (lm_addr + lmo->l_name_offset,
7075 &l_name, ptr_size) == 0
7076 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7077 &l_addr, ptr_size) == 0
7078 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7079 &l_ld, ptr_size) == 0
7080 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7081 &l_prev, ptr_size) == 0
7082 && read_one_ptr (lm_addr + lmo->l_next_offset,
7083 &l_next, ptr_size) == 0)
7084 {
7085 unsigned char libname[PATH_MAX];
7086
7087 if (lm_prev != l_prev)
7088 {
7089 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7090 (long) lm_prev, (long) l_prev);
7091 break;
7092 }
7093
7094 /* Ignore the first entry even if it has valid name as the first entry
7095 corresponds to the main executable. The first entry should not be
7096 skipped if the dynamic loader was loaded late by a static executable
7097 (see solib-svr4.c parameter ignore_first). But in such case the main
7098 executable does not have PT_DYNAMIC present and this function already
7099 exited above due to failed get_r_debug. */
7100 if (lm_prev == 0)
7101 {
7102 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7103 p = p + strlen (p);
7104 }
7105 else
7106 {
7107 /* Not checking for error because reading may stop before
7108 we've got PATH_MAX worth of characters. */
7109 libname[0] = '\0';
7110 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7111 libname[sizeof (libname) - 1] = '\0';
7112 if (libname[0] != '\0')
7113 {
7114 /* 6x the size for xml_escape_text below. */
7115 size_t len = 6 * strlen ((char *) libname);
7116 char *name;
7117
7118 if (!header_done)
7119 {
7120 /* Terminate `<library-list-svr4'. */
7121 *p++ = '>';
7122 header_done = 1;
7123 }
7124
7125 while (allocated < p - document + len + 200)
7126 {
7127 /* Expand to guarantee sufficient storage. */
7128 uintptr_t document_len = p - document;
7129
7130 document = (char *) xrealloc (document, 2 * allocated);
7131 allocated *= 2;
7132 p = document + document_len;
7133 }
7134
7135 name = xml_escape_text ((char *) libname);
7136 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7137 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7138 name, (unsigned long) lm_addr,
7139 (unsigned long) l_addr, (unsigned long) l_ld);
7140 free (name);
7141 }
7142 }
7143
7144 lm_prev = lm_addr;
7145 lm_addr = l_next;
7146 }
7147
7148 if (!header_done)
7149 {
7150 /* Empty list; terminate `<library-list-svr4'. */
7151 strcpy (p, "/>");
7152 }
7153 else
7154 strcpy (p, "</library-list-svr4>");
7155
7156 document_len = strlen (document);
7157 if (offset < document_len)
7158 document_len -= offset;
7159 else
7160 document_len = 0;
7161 if (len > document_len)
7162 len = document_len;
7163
7164 memcpy (readbuf, document + offset, len);
7165 xfree (document);
7166
7167 return len;
7168 }
7169
7170 #ifdef HAVE_LINUX_BTRACE
7171
7172 /* See to_disable_btrace target method. */
7173
7174 static int
7175 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7176 {
7177 enum btrace_error err;
7178
7179 err = linux_disable_btrace (tinfo);
7180 return (err == BTRACE_ERR_NONE ? 0 : -1);
7181 }
7182
7183 /* Encode an Intel Processor Trace configuration. */
7184
7185 static void
7186 linux_low_encode_pt_config (struct buffer *buffer,
7187 const struct btrace_data_pt_config *config)
7188 {
7189 buffer_grow_str (buffer, "<pt-config>\n");
7190
7191 switch (config->cpu.vendor)
7192 {
7193 case CV_INTEL:
7194 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7195 "model=\"%u\" stepping=\"%u\"/>\n",
7196 config->cpu.family, config->cpu.model,
7197 config->cpu.stepping);
7198 break;
7199
7200 default:
7201 break;
7202 }
7203
7204 buffer_grow_str (buffer, "</pt-config>\n");
7205 }
7206
7207 /* Encode a raw buffer. */
7208
7209 static void
7210 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7211 unsigned int size)
7212 {
7213 if (size == 0)
7214 return;
7215
7216 /* We use hex encoding - see common/rsp-low.h. */
7217 buffer_grow_str (buffer, "<raw>\n");
7218
7219 while (size-- > 0)
7220 {
7221 char elem[2];
7222
7223 elem[0] = tohex ((*data >> 4) & 0xf);
7224 elem[1] = tohex (*data++ & 0xf);
7225
7226 buffer_grow (buffer, elem, 2);
7227 }
7228
7229 buffer_grow_str (buffer, "</raw>\n");
7230 }
7231
7232 /* See to_read_btrace target method. */
7233
7234 static int
7235 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7236 enum btrace_read_type type)
7237 {
7238 struct btrace_data btrace;
7239 struct btrace_block *block;
7240 enum btrace_error err;
7241 int i;
7242
7243 btrace_data_init (&btrace);
7244
7245 err = linux_read_btrace (&btrace, tinfo, type);
7246 if (err != BTRACE_ERR_NONE)
7247 {
7248 if (err == BTRACE_ERR_OVERFLOW)
7249 buffer_grow_str0 (buffer, "E.Overflow.");
7250 else
7251 buffer_grow_str0 (buffer, "E.Generic Error.");
7252
7253 goto err;
7254 }
7255
7256 switch (btrace.format)
7257 {
7258 case BTRACE_FORMAT_NONE:
7259 buffer_grow_str0 (buffer, "E.No Trace.");
7260 goto err;
7261
7262 case BTRACE_FORMAT_BTS:
7263 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7264 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7265
7266 for (i = 0;
7267 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7268 i++)
7269 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7270 paddress (block->begin), paddress (block->end));
7271
7272 buffer_grow_str0 (buffer, "</btrace>\n");
7273 break;
7274
7275 case BTRACE_FORMAT_PT:
7276 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7277 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7278 buffer_grow_str (buffer, "<pt>\n");
7279
7280 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7281
7282 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7283 btrace.variant.pt.size);
7284
7285 buffer_grow_str (buffer, "</pt>\n");
7286 buffer_grow_str0 (buffer, "</btrace>\n");
7287 break;
7288
7289 default:
7290 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7291 goto err;
7292 }
7293
7294 btrace_data_fini (&btrace);
7295 return 0;
7296
7297 err:
7298 btrace_data_fini (&btrace);
7299 return -1;
7300 }
7301
7302 /* See to_btrace_conf target method. */
7303
7304 static int
7305 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7306 struct buffer *buffer)
7307 {
7308 const struct btrace_config *conf;
7309
7310 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7311 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7312
7313 conf = linux_btrace_conf (tinfo);
7314 if (conf != NULL)
7315 {
7316 switch (conf->format)
7317 {
7318 case BTRACE_FORMAT_NONE:
7319 break;
7320
7321 case BTRACE_FORMAT_BTS:
7322 buffer_xml_printf (buffer, "<bts");
7323 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7324 buffer_xml_printf (buffer, " />\n");
7325 break;
7326
7327 case BTRACE_FORMAT_PT:
7328 buffer_xml_printf (buffer, "<pt");
7329 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7330 buffer_xml_printf (buffer, "/>\n");
7331 break;
7332 }
7333 }
7334
7335 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7336 return 0;
7337 }
7338 #endif /* HAVE_LINUX_BTRACE */
7339
7340 /* See nat/linux-nat.h. */
7341
7342 ptid_t
7343 current_lwp_ptid (void)
7344 {
7345 return ptid_of (current_thread);
7346 }
7347
7348 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7349
7350 static int
7351 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7352 {
7353 if (the_low_target.breakpoint_kind_from_pc != NULL)
7354 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7355 else
7356 return default_breakpoint_kind_from_pc (pcptr);
7357 }
7358
7359 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7360
7361 static const gdb_byte *
7362 linux_sw_breakpoint_from_kind (int kind, int *size)
7363 {
7364 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7365
7366 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7367 }
7368
7369 /* Implementation of the target_ops method
7370 "breakpoint_kind_from_current_state". */
7371
7372 static int
7373 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7374 {
7375 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7376 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7377 else
7378 return linux_breakpoint_kind_from_pc (pcptr);
7379 }
7380
7381 /* Default implementation of linux_target_ops method "set_pc" for
7382 32-bit pc register which is literally named "pc". */
7383
7384 void
7385 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7386 {
7387 uint32_t newpc = pc;
7388
7389 supply_register_by_name (regcache, "pc", &newpc);
7390 }
7391
7392 /* Default implementation of linux_target_ops method "get_pc" for
7393 32-bit pc register which is literally named "pc". */
7394
7395 CORE_ADDR
7396 linux_get_pc_32bit (struct regcache *regcache)
7397 {
7398 uint32_t pc;
7399
7400 collect_register_by_name (regcache, "pc", &pc);
7401 if (debug_threads)
7402 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7403 return pc;
7404 }
7405
7406 /* Default implementation of linux_target_ops method "set_pc" for
7407 64-bit pc register which is literally named "pc". */
7408
7409 void
7410 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7411 {
7412 uint64_t newpc = pc;
7413
7414 supply_register_by_name (regcache, "pc", &newpc);
7415 }
7416
7417 /* Default implementation of linux_target_ops method "get_pc" for
7418 64-bit pc register which is literally named "pc". */
7419
7420 CORE_ADDR
7421 linux_get_pc_64bit (struct regcache *regcache)
7422 {
7423 uint64_t pc;
7424
7425 collect_register_by_name (regcache, "pc", &pc);
7426 if (debug_threads)
7427 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7428 return pc;
7429 }
7430
7431
7432 static struct target_ops linux_target_ops = {
7433 linux_create_inferior,
7434 linux_post_create_inferior,
7435 linux_attach,
7436 linux_kill,
7437 linux_detach,
7438 linux_mourn,
7439 linux_join,
7440 linux_thread_alive,
7441 linux_resume,
7442 linux_wait,
7443 linux_fetch_registers,
7444 linux_store_registers,
7445 linux_prepare_to_access_memory,
7446 linux_done_accessing_memory,
7447 linux_read_memory,
7448 linux_write_memory,
7449 linux_look_up_symbols,
7450 linux_request_interrupt,
7451 linux_read_auxv,
7452 linux_supports_z_point_type,
7453 linux_insert_point,
7454 linux_remove_point,
7455 linux_stopped_by_sw_breakpoint,
7456 linux_supports_stopped_by_sw_breakpoint,
7457 linux_stopped_by_hw_breakpoint,
7458 linux_supports_stopped_by_hw_breakpoint,
7459 linux_supports_hardware_single_step,
7460 linux_stopped_by_watchpoint,
7461 linux_stopped_data_address,
7462 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7463 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7464 && defined(PT_TEXT_END_ADDR)
7465 linux_read_offsets,
7466 #else
7467 NULL,
7468 #endif
7469 #ifdef USE_THREAD_DB
7470 thread_db_get_tls_address,
7471 #else
7472 NULL,
7473 #endif
7474 linux_qxfer_spu,
7475 hostio_last_error_from_errno,
7476 linux_qxfer_osdata,
7477 linux_xfer_siginfo,
7478 linux_supports_non_stop,
7479 linux_async,
7480 linux_start_non_stop,
7481 linux_supports_multi_process,
7482 linux_supports_fork_events,
7483 linux_supports_vfork_events,
7484 linux_supports_exec_events,
7485 linux_handle_new_gdb_connection,
7486 #ifdef USE_THREAD_DB
7487 thread_db_handle_monitor_command,
7488 #else
7489 NULL,
7490 #endif
7491 linux_common_core_of_thread,
7492 linux_read_loadmap,
7493 linux_process_qsupported,
7494 linux_supports_tracepoints,
7495 linux_read_pc,
7496 linux_write_pc,
7497 linux_thread_stopped,
7498 NULL,
7499 linux_pause_all,
7500 linux_unpause_all,
7501 linux_stabilize_threads,
7502 linux_install_fast_tracepoint_jump_pad,
7503 linux_emit_ops,
7504 linux_supports_disable_randomization,
7505 linux_get_min_fast_tracepoint_insn_len,
7506 linux_qxfer_libraries_svr4,
7507 linux_supports_agent,
7508 #ifdef HAVE_LINUX_BTRACE
7509 linux_supports_btrace,
7510 linux_enable_btrace,
7511 linux_low_disable_btrace,
7512 linux_low_read_btrace,
7513 linux_low_btrace_conf,
7514 #else
7515 NULL,
7516 NULL,
7517 NULL,
7518 NULL,
7519 NULL,
7520 #endif
7521 linux_supports_range_stepping,
7522 linux_proc_pid_to_exec_file,
7523 linux_mntns_open_cloexec,
7524 linux_mntns_unlink,
7525 linux_mntns_readlink,
7526 linux_breakpoint_kind_from_pc,
7527 linux_sw_breakpoint_from_kind,
7528 linux_proc_tid_get_name,
7529 linux_breakpoint_kind_from_current_state,
7530 linux_supports_software_single_step,
7531 linux_supports_catch_syscall,
7532 linux_get_ipa_tdesc_idx,
7533 };
7534
7535 #ifdef HAVE_LINUX_REGSETS
7536 void
7537 initialize_regsets_info (struct regsets_info *info)
7538 {
7539 for (info->num_regsets = 0;
7540 info->regsets[info->num_regsets].size >= 0;
7541 info->num_regsets++)
7542 ;
7543 }
7544 #endif
7545
7546 void
7547 initialize_low (void)
7548 {
7549 struct sigaction sigchld_action;
7550
7551 memset (&sigchld_action, 0, sizeof (sigchld_action));
7552 set_target_ops (&linux_target_ops);
7553
7554 linux_ptrace_init_warnings ();
7555
7556 sigchld_action.sa_handler = sigchld_handler;
7557 sigemptyset (&sigchld_action.sa_mask);
7558 sigchld_action.sa_flags = SA_RESTART;
7559 sigaction (SIGCHLD, &sigchld_action, NULL);
7560
7561 initialize_low_arch ();
7562
7563 linux_check_ptrace_features ();
7564 }