]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
Call debug_exit in linux_wait_1
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2016 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25 #include "signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57 #include "nat/linux-namespaces.h"
58
59 #ifndef SPUFS_MAGIC
60 #define SPUFS_MAGIC 0x23c9b64e
61 #endif
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* BFIN already defines these since at least 2.6.32 kernels. */
86 #elif defined(BFIN)
87 #define PT_TEXT_ADDR 220
88 #define PT_TEXT_END_ADDR 224
89 #define PT_DATA_ADDR 228
90 /* These are still undefined in 3.10 kernels. */
91 #elif defined(__TMS320C6X__)
92 #define PT_TEXT_ADDR (0x10000*4)
93 #define PT_DATA_ADDR (0x10004*4)
94 #define PT_TEXT_END_ADDR (0x10008*4)
95 #endif
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* See nat/linux-nat.h. */
180
181 int
182 lwp_is_stepping (struct lwp_info *lwp)
183 {
184 return lwp->stepping;
185 }
186
187 /* A list of all unknown processes which receive stop signals. Some
188 other process will presumably claim each of these as forked
189 children momentarily. */
190
191 struct simple_pid_list
192 {
193 /* The process ID. */
194 int pid;
195
196 /* The status as reported by waitpid. */
197 int status;
198
199 /* Next in chain. */
200 struct simple_pid_list *next;
201 };
202 struct simple_pid_list *stopped_pids;
203
204 /* Trivial list manipulation functions to keep track of a list of new
205 stopped processes. */
206
207 static void
208 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
209 {
210 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
211
212 new_pid->pid = pid;
213 new_pid->status = status;
214 new_pid->next = *listp;
215 *listp = new_pid;
216 }
217
218 static int
219 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
220 {
221 struct simple_pid_list **p;
222
223 for (p = listp; *p != NULL; p = &(*p)->next)
224 if ((*p)->pid == pid)
225 {
226 struct simple_pid_list *next = (*p)->next;
227
228 *statusp = (*p)->status;
229 xfree (*p);
230 *p = next;
231 return 1;
232 }
233 return 0;
234 }
235
236 enum stopping_threads_kind
237 {
238 /* Not stopping threads presently. */
239 NOT_STOPPING_THREADS,
240
241 /* Stopping threads. */
242 STOPPING_THREADS,
243
244 /* Stopping and suspending threads. */
245 STOPPING_AND_SUSPENDING_THREADS
246 };
247
248 /* This is set while stop_all_lwps is in effect. */
249 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
250
251 /* FIXME make into a target method? */
252 int using_threads = 1;
253
254 /* True if we're presently stabilizing threads (moving them out of
255 jump pads). */
256 static int stabilizing_threads;
257
258 static void linux_resume_one_lwp (struct lwp_info *lwp,
259 int step, int signal, siginfo_t *info);
260 static void linux_resume (struct thread_resume *resume_info, size_t n);
261 static void stop_all_lwps (int suspend, struct lwp_info *except);
262 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
263 static void unsuspend_all_lwps (struct lwp_info *except);
264 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
265 int *wstat, int options);
266 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
267 static struct lwp_info *add_lwp (ptid_t ptid);
268 static void linux_mourn (struct process_info *process);
269 static int linux_stopped_by_watchpoint (void);
270 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
271 static int lwp_is_marked_dead (struct lwp_info *lwp);
272 static void proceed_all_lwps (void);
273 static int finish_step_over (struct lwp_info *lwp);
274 static int kill_lwp (unsigned long lwpid, int signo);
275 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
276 static void complete_ongoing_step_over (void);
277 static int linux_low_ptrace_options (int attached);
278 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
279 static int proceed_one_lwp (struct inferior_list_entry *entry, void *except);
280
281 /* When the event-loop is doing a step-over, this points at the thread
282 being stepped. */
283 ptid_t step_over_bkpt;
284
285 /* True if the low target can hardware single-step. */
286
287 static int
288 can_hardware_single_step (void)
289 {
290 if (the_low_target.supports_hardware_single_step != NULL)
291 return the_low_target.supports_hardware_single_step ();
292 else
293 return 0;
294 }
295
296 /* True if the low target can software single-step. Such targets
297 implement the GET_NEXT_PCS callback. */
298
299 static int
300 can_software_single_step (void)
301 {
302 return (the_low_target.get_next_pcs != NULL);
303 }
304
305 /* True if the low target supports memory breakpoints. If so, we'll
306 have a GET_PC implementation. */
307
308 static int
309 supports_breakpoints (void)
310 {
311 return (the_low_target.get_pc != NULL);
312 }
313
314 /* Returns true if this target can support fast tracepoints. This
315 does not mean that the in-process agent has been loaded in the
316 inferior. */
317
318 static int
319 supports_fast_tracepoints (void)
320 {
321 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
322 }
323
324 /* True if LWP is stopped in its stepping range. */
325
326 static int
327 lwp_in_step_range (struct lwp_info *lwp)
328 {
329 CORE_ADDR pc = lwp->stop_pc;
330
331 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
332 }
333
334 struct pending_signals
335 {
336 int signal;
337 siginfo_t info;
338 struct pending_signals *prev;
339 };
340
341 /* The read/write ends of the pipe registered as waitable file in the
342 event loop. */
343 static int linux_event_pipe[2] = { -1, -1 };
344
345 /* True if we're currently in async mode. */
346 #define target_is_async_p() (linux_event_pipe[0] != -1)
347
348 static void send_sigstop (struct lwp_info *lwp);
349 static void wait_for_sigstop (void);
350
351 /* Return non-zero if HEADER is a 64-bit ELF file. */
352
353 static int
354 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
355 {
356 if (header->e_ident[EI_MAG0] == ELFMAG0
357 && header->e_ident[EI_MAG1] == ELFMAG1
358 && header->e_ident[EI_MAG2] == ELFMAG2
359 && header->e_ident[EI_MAG3] == ELFMAG3)
360 {
361 *machine = header->e_machine;
362 return header->e_ident[EI_CLASS] == ELFCLASS64;
363
364 }
365 *machine = EM_NONE;
366 return -1;
367 }
368
369 /* Return non-zero if FILE is a 64-bit ELF file,
370 zero if the file is not a 64-bit ELF file,
371 and -1 if the file is not accessible or doesn't exist. */
372
373 static int
374 elf_64_file_p (const char *file, unsigned int *machine)
375 {
376 Elf64_Ehdr header;
377 int fd;
378
379 fd = open (file, O_RDONLY);
380 if (fd < 0)
381 return -1;
382
383 if (read (fd, &header, sizeof (header)) != sizeof (header))
384 {
385 close (fd);
386 return 0;
387 }
388 close (fd);
389
390 return elf_64_header_p (&header, machine);
391 }
392
393 /* Accepts an integer PID; Returns true if the executable PID is
394 running is a 64-bit ELF file.. */
395
396 int
397 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
398 {
399 char file[PATH_MAX];
400
401 sprintf (file, "/proc/%d/exe", pid);
402 return elf_64_file_p (file, machine);
403 }
404
405 static void
406 delete_lwp (struct lwp_info *lwp)
407 {
408 struct thread_info *thr = get_lwp_thread (lwp);
409
410 if (debug_threads)
411 debug_printf ("deleting %ld\n", lwpid_of (thr));
412
413 remove_thread (thr);
414 free (lwp->arch_private);
415 free (lwp);
416 }
417
418 /* Add a process to the common process list, and set its private
419 data. */
420
421 static struct process_info *
422 linux_add_process (int pid, int attached)
423 {
424 struct process_info *proc;
425
426 proc = add_process (pid, attached);
427 proc->priv = XCNEW (struct process_info_private);
428
429 if (the_low_target.new_process != NULL)
430 proc->priv->arch_private = the_low_target.new_process ();
431
432 return proc;
433 }
434
435 static CORE_ADDR get_pc (struct lwp_info *lwp);
436
437 /* Call the target arch_setup function on the current thread. */
438
439 static void
440 linux_arch_setup (void)
441 {
442 the_low_target.arch_setup ();
443 }
444
445 /* Call the target arch_setup function on THREAD. */
446
447 static void
448 linux_arch_setup_thread (struct thread_info *thread)
449 {
450 struct thread_info *saved_thread;
451
452 saved_thread = current_thread;
453 current_thread = thread;
454
455 linux_arch_setup ();
456
457 current_thread = saved_thread;
458 }
459
460 /* Handle a GNU/Linux extended wait response. If we see a clone,
461 fork, or vfork event, we need to add the new LWP to our list
462 (and return 0 so as not to report the trap to higher layers).
463 If we see an exec event, we will modify ORIG_EVENT_LWP to point
464 to a new LWP representing the new program. */
465
466 static int
467 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
468 {
469 struct lwp_info *event_lwp = *orig_event_lwp;
470 int event = linux_ptrace_get_extended_event (wstat);
471 struct thread_info *event_thr = get_lwp_thread (event_lwp);
472 struct lwp_info *new_lwp;
473
474 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
475
476 /* All extended events we currently use are mid-syscall. Only
477 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
478 you have to be using PTRACE_SEIZE to get that. */
479 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
480
481 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
482 || (event == PTRACE_EVENT_CLONE))
483 {
484 ptid_t ptid;
485 unsigned long new_pid;
486 int ret, status;
487
488 /* Get the pid of the new lwp. */
489 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
490 &new_pid);
491
492 /* If we haven't already seen the new PID stop, wait for it now. */
493 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
494 {
495 /* The new child has a pending SIGSTOP. We can't affect it until it
496 hits the SIGSTOP, but we're already attached. */
497
498 ret = my_waitpid (new_pid, &status, __WALL);
499
500 if (ret == -1)
501 perror_with_name ("waiting for new child");
502 else if (ret != new_pid)
503 warning ("wait returned unexpected PID %d", ret);
504 else if (!WIFSTOPPED (status))
505 warning ("wait returned unexpected status 0x%x", status);
506 }
507
508 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
509 {
510 struct process_info *parent_proc;
511 struct process_info *child_proc;
512 struct lwp_info *child_lwp;
513 struct thread_info *child_thr;
514 struct target_desc *tdesc;
515
516 ptid = ptid_build (new_pid, new_pid, 0);
517
518 if (debug_threads)
519 {
520 debug_printf ("HEW: Got fork event from LWP %ld, "
521 "new child is %d\n",
522 ptid_get_lwp (ptid_of (event_thr)),
523 ptid_get_pid (ptid));
524 }
525
526 /* Add the new process to the tables and clone the breakpoint
527 lists of the parent. We need to do this even if the new process
528 will be detached, since we will need the process object and the
529 breakpoints to remove any breakpoints from memory when we
530 detach, and the client side will access registers. */
531 child_proc = linux_add_process (new_pid, 0);
532 gdb_assert (child_proc != NULL);
533 child_lwp = add_lwp (ptid);
534 gdb_assert (child_lwp != NULL);
535 child_lwp->stopped = 1;
536 child_lwp->must_set_ptrace_flags = 1;
537 child_lwp->status_pending_p = 0;
538 child_thr = get_lwp_thread (child_lwp);
539 child_thr->last_resume_kind = resume_stop;
540 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
541
542 /* If we're suspending all threads, leave this one suspended
543 too. If the fork/clone parent is stepping over a breakpoint,
544 all other threads have been suspended already. Leave the
545 child suspended too. */
546 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
547 || event_lwp->bp_reinsert != 0)
548 {
549 if (debug_threads)
550 debug_printf ("HEW: leaving child suspended\n");
551 child_lwp->suspended = 1;
552 }
553
554 parent_proc = get_thread_process (event_thr);
555 child_proc->attached = parent_proc->attached;
556
557 if (event_lwp->bp_reinsert != 0
558 && can_software_single_step ()
559 && event == PTRACE_EVENT_VFORK)
560 {
561 /* If we leave single-step breakpoints there, child will
562 hit it, so uninsert single-step breakpoints from parent
563 (and child). Once vfork child is done, reinsert
564 them back to parent. */
565 uninsert_single_step_breakpoints (event_thr);
566 }
567
568 clone_all_breakpoints (child_thr, event_thr);
569
570 tdesc = XNEW (struct target_desc);
571 copy_target_description (tdesc, parent_proc->tdesc);
572 child_proc->tdesc = tdesc;
573
574 /* Clone arch-specific process data. */
575 if (the_low_target.new_fork != NULL)
576 the_low_target.new_fork (parent_proc, child_proc);
577
578 /* Save fork info in the parent thread. */
579 if (event == PTRACE_EVENT_FORK)
580 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
581 else if (event == PTRACE_EVENT_VFORK)
582 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
583
584 event_lwp->waitstatus.value.related_pid = ptid;
585
586 /* The status_pending field contains bits denoting the
587 extended event, so when the pending event is handled,
588 the handler will look at lwp->waitstatus. */
589 event_lwp->status_pending_p = 1;
590 event_lwp->status_pending = wstat;
591
592 /* If the parent thread is doing step-over with single-step
593 breakpoints, the list of single-step breakpoints are cloned
594 from the parent's. Remove them from the child process.
595 In case of vfork, we'll reinsert them back once vforked
596 child is done. */
597 if (event_lwp->bp_reinsert != 0
598 && can_software_single_step ())
599 {
600 /* The child process is forked and stopped, so it is safe
601 to access its memory without stopping all other threads
602 from other processes. */
603 delete_single_step_breakpoints (child_thr);
604
605 gdb_assert (has_single_step_breakpoints (event_thr));
606 gdb_assert (!has_single_step_breakpoints (child_thr));
607 }
608
609 /* Report the event. */
610 return 0;
611 }
612
613 if (debug_threads)
614 debug_printf ("HEW: Got clone event "
615 "from LWP %ld, new child is LWP %ld\n",
616 lwpid_of (event_thr), new_pid);
617
618 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
619 new_lwp = add_lwp (ptid);
620
621 /* Either we're going to immediately resume the new thread
622 or leave it stopped. linux_resume_one_lwp is a nop if it
623 thinks the thread is currently running, so set this first
624 before calling linux_resume_one_lwp. */
625 new_lwp->stopped = 1;
626
627 /* If we're suspending all threads, leave this one suspended
628 too. If the fork/clone parent is stepping over a breakpoint,
629 all other threads have been suspended already. Leave the
630 child suspended too. */
631 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
632 || event_lwp->bp_reinsert != 0)
633 new_lwp->suspended = 1;
634
635 /* Normally we will get the pending SIGSTOP. But in some cases
636 we might get another signal delivered to the group first.
637 If we do get another signal, be sure not to lose it. */
638 if (WSTOPSIG (status) != SIGSTOP)
639 {
640 new_lwp->stop_expected = 1;
641 new_lwp->status_pending_p = 1;
642 new_lwp->status_pending = status;
643 }
644 else if (report_thread_events)
645 {
646 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
647 new_lwp->status_pending_p = 1;
648 new_lwp->status_pending = status;
649 }
650
651 /* Don't report the event. */
652 return 1;
653 }
654 else if (event == PTRACE_EVENT_VFORK_DONE)
655 {
656 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
657
658 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
659 {
660 reinsert_single_step_breakpoints (event_thr);
661
662 gdb_assert (has_single_step_breakpoints (event_thr));
663 }
664
665 /* Report the event. */
666 return 0;
667 }
668 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
669 {
670 struct process_info *proc;
671 VEC (int) *syscalls_to_catch;
672 ptid_t event_ptid;
673 pid_t event_pid;
674
675 if (debug_threads)
676 {
677 debug_printf ("HEW: Got exec event from LWP %ld\n",
678 lwpid_of (event_thr));
679 }
680
681 /* Get the event ptid. */
682 event_ptid = ptid_of (event_thr);
683 event_pid = ptid_get_pid (event_ptid);
684
685 /* Save the syscall list from the execing process. */
686 proc = get_thread_process (event_thr);
687 syscalls_to_catch = proc->syscalls_to_catch;
688 proc->syscalls_to_catch = NULL;
689
690 /* Delete the execing process and all its threads. */
691 linux_mourn (proc);
692 current_thread = NULL;
693
694 /* Create a new process/lwp/thread. */
695 proc = linux_add_process (event_pid, 0);
696 event_lwp = add_lwp (event_ptid);
697 event_thr = get_lwp_thread (event_lwp);
698 gdb_assert (current_thread == event_thr);
699 linux_arch_setup_thread (event_thr);
700
701 /* Set the event status. */
702 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
703 event_lwp->waitstatus.value.execd_pathname
704 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
705
706 /* Mark the exec status as pending. */
707 event_lwp->stopped = 1;
708 event_lwp->status_pending_p = 1;
709 event_lwp->status_pending = wstat;
710 event_thr->last_resume_kind = resume_continue;
711 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
712
713 /* Update syscall state in the new lwp, effectively mid-syscall too. */
714 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
715
716 /* Restore the list to catch. Don't rely on the client, which is free
717 to avoid sending a new list when the architecture doesn't change.
718 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
719 proc->syscalls_to_catch = syscalls_to_catch;
720
721 /* Report the event. */
722 *orig_event_lwp = event_lwp;
723 return 0;
724 }
725
726 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
727 }
728
729 /* Return the PC as read from the regcache of LWP, without any
730 adjustment. */
731
732 static CORE_ADDR
733 get_pc (struct lwp_info *lwp)
734 {
735 struct thread_info *saved_thread;
736 struct regcache *regcache;
737 CORE_ADDR pc;
738
739 if (the_low_target.get_pc == NULL)
740 return 0;
741
742 saved_thread = current_thread;
743 current_thread = get_lwp_thread (lwp);
744
745 regcache = get_thread_regcache (current_thread, 1);
746 pc = (*the_low_target.get_pc) (regcache);
747
748 if (debug_threads)
749 debug_printf ("pc is 0x%lx\n", (long) pc);
750
751 current_thread = saved_thread;
752 return pc;
753 }
754
755 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
756 Fill *SYSNO with the syscall nr trapped. */
757
758 static void
759 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
760 {
761 struct thread_info *saved_thread;
762 struct regcache *regcache;
763
764 if (the_low_target.get_syscall_trapinfo == NULL)
765 {
766 /* If we cannot get the syscall trapinfo, report an unknown
767 system call number. */
768 *sysno = UNKNOWN_SYSCALL;
769 return;
770 }
771
772 saved_thread = current_thread;
773 current_thread = get_lwp_thread (lwp);
774
775 regcache = get_thread_regcache (current_thread, 1);
776 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
777
778 if (debug_threads)
779 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
780
781 current_thread = saved_thread;
782 }
783
784 static int check_stopped_by_watchpoint (struct lwp_info *child);
785
786 /* Called when the LWP stopped for a signal/trap. If it stopped for a
787 trap check what caused it (breakpoint, watchpoint, trace, etc.),
788 and save the result in the LWP's stop_reason field. If it stopped
789 for a breakpoint, decrement the PC if necessary on the lwp's
790 architecture. Returns true if we now have the LWP's stop PC. */
791
792 static int
793 save_stop_reason (struct lwp_info *lwp)
794 {
795 CORE_ADDR pc;
796 CORE_ADDR sw_breakpoint_pc;
797 struct thread_info *saved_thread;
798 #if USE_SIGTRAP_SIGINFO
799 siginfo_t siginfo;
800 #endif
801
802 if (the_low_target.get_pc == NULL)
803 return 0;
804
805 pc = get_pc (lwp);
806 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
807
808 /* breakpoint_at reads from the current thread. */
809 saved_thread = current_thread;
810 current_thread = get_lwp_thread (lwp);
811
812 #if USE_SIGTRAP_SIGINFO
813 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
814 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
815 {
816 if (siginfo.si_signo == SIGTRAP)
817 {
818 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
819 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
820 {
821 /* The si_code is ambiguous on this arch -- check debug
822 registers. */
823 if (!check_stopped_by_watchpoint (lwp))
824 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
825 }
826 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
827 {
828 /* If we determine the LWP stopped for a SW breakpoint,
829 trust it. Particularly don't check watchpoint
830 registers, because at least on s390, we'd find
831 stopped-by-watchpoint as long as there's a watchpoint
832 set. */
833 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
834 }
835 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
836 {
837 /* This can indicate either a hardware breakpoint or
838 hardware watchpoint. Check debug registers. */
839 if (!check_stopped_by_watchpoint (lwp))
840 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
841 }
842 else if (siginfo.si_code == TRAP_TRACE)
843 {
844 /* We may have single stepped an instruction that
845 triggered a watchpoint. In that case, on some
846 architectures (such as x86), instead of TRAP_HWBKPT,
847 si_code indicates TRAP_TRACE, and we need to check
848 the debug registers separately. */
849 if (!check_stopped_by_watchpoint (lwp))
850 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
851 }
852 }
853 }
854 #else
855 /* We may have just stepped a breakpoint instruction. E.g., in
856 non-stop mode, GDB first tells the thread A to step a range, and
857 then the user inserts a breakpoint inside the range. In that
858 case we need to report the breakpoint PC. */
859 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
860 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
861 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
862
863 if (hardware_breakpoint_inserted_here (pc))
864 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
865
866 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
867 check_stopped_by_watchpoint (lwp);
868 #endif
869
870 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
871 {
872 if (debug_threads)
873 {
874 struct thread_info *thr = get_lwp_thread (lwp);
875
876 debug_printf ("CSBB: %s stopped by software breakpoint\n",
877 target_pid_to_str (ptid_of (thr)));
878 }
879
880 /* Back up the PC if necessary. */
881 if (pc != sw_breakpoint_pc)
882 {
883 struct regcache *regcache
884 = get_thread_regcache (current_thread, 1);
885 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
886 }
887
888 /* Update this so we record the correct stop PC below. */
889 pc = sw_breakpoint_pc;
890 }
891 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
892 {
893 if (debug_threads)
894 {
895 struct thread_info *thr = get_lwp_thread (lwp);
896
897 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
898 target_pid_to_str (ptid_of (thr)));
899 }
900 }
901 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
902 {
903 if (debug_threads)
904 {
905 struct thread_info *thr = get_lwp_thread (lwp);
906
907 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
908 target_pid_to_str (ptid_of (thr)));
909 }
910 }
911 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
912 {
913 if (debug_threads)
914 {
915 struct thread_info *thr = get_lwp_thread (lwp);
916
917 debug_printf ("CSBB: %s stopped by trace\n",
918 target_pid_to_str (ptid_of (thr)));
919 }
920 }
921
922 lwp->stop_pc = pc;
923 current_thread = saved_thread;
924 return 1;
925 }
926
927 static struct lwp_info *
928 add_lwp (ptid_t ptid)
929 {
930 struct lwp_info *lwp;
931
932 lwp = XCNEW (struct lwp_info);
933
934 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
935
936 if (the_low_target.new_thread != NULL)
937 the_low_target.new_thread (lwp);
938
939 lwp->thread = add_thread (ptid, lwp);
940
941 return lwp;
942 }
943
944 /* Start an inferior process and returns its pid.
945 ALLARGS is a vector of program-name and args. */
946
947 static int
948 linux_create_inferior (char *program, char **allargs)
949 {
950 struct lwp_info *new_lwp;
951 int pid;
952 ptid_t ptid;
953 struct cleanup *restore_personality
954 = maybe_disable_address_space_randomization (disable_randomization);
955
956 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
957 pid = vfork ();
958 #else
959 pid = fork ();
960 #endif
961 if (pid < 0)
962 perror_with_name ("fork");
963
964 if (pid == 0)
965 {
966 close_most_fds ();
967 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
968
969 setpgid (0, 0);
970
971 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
972 stdout to stderr so that inferior i/o doesn't corrupt the connection.
973 Also, redirect stdin to /dev/null. */
974 if (remote_connection_is_stdio ())
975 {
976 close (0);
977 open ("/dev/null", O_RDONLY);
978 dup2 (2, 1);
979 if (write (2, "stdin/stdout redirected\n",
980 sizeof ("stdin/stdout redirected\n") - 1) < 0)
981 {
982 /* Errors ignored. */;
983 }
984 }
985
986 restore_original_signals_state ();
987
988 execv (program, allargs);
989 if (errno == ENOENT)
990 execvp (program, allargs);
991
992 fprintf (stderr, "Cannot exec %s: %s.\n", program,
993 strerror (errno));
994 fflush (stderr);
995 _exit (0177);
996 }
997
998 do_cleanups (restore_personality);
999
1000 linux_add_process (pid, 0);
1001
1002 ptid = ptid_build (pid, pid, 0);
1003 new_lwp = add_lwp (ptid);
1004 new_lwp->must_set_ptrace_flags = 1;
1005
1006 return pid;
1007 }
1008
1009 /* Implement the post_create_inferior target_ops method. */
1010
1011 static void
1012 linux_post_create_inferior (void)
1013 {
1014 struct lwp_info *lwp = get_thread_lwp (current_thread);
1015
1016 linux_arch_setup ();
1017
1018 if (lwp->must_set_ptrace_flags)
1019 {
1020 struct process_info *proc = current_process ();
1021 int options = linux_low_ptrace_options (proc->attached);
1022
1023 linux_enable_event_reporting (lwpid_of (current_thread), options);
1024 lwp->must_set_ptrace_flags = 0;
1025 }
1026 }
1027
1028 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1029 error. */
1030
1031 int
1032 linux_attach_lwp (ptid_t ptid)
1033 {
1034 struct lwp_info *new_lwp;
1035 int lwpid = ptid_get_lwp (ptid);
1036
1037 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1038 != 0)
1039 return errno;
1040
1041 new_lwp = add_lwp (ptid);
1042
1043 /* We need to wait for SIGSTOP before being able to make the next
1044 ptrace call on this LWP. */
1045 new_lwp->must_set_ptrace_flags = 1;
1046
1047 if (linux_proc_pid_is_stopped (lwpid))
1048 {
1049 if (debug_threads)
1050 debug_printf ("Attached to a stopped process\n");
1051
1052 /* The process is definitely stopped. It is in a job control
1053 stop, unless the kernel predates the TASK_STOPPED /
1054 TASK_TRACED distinction, in which case it might be in a
1055 ptrace stop. Make sure it is in a ptrace stop; from there we
1056 can kill it, signal it, et cetera.
1057
1058 First make sure there is a pending SIGSTOP. Since we are
1059 already attached, the process can not transition from stopped
1060 to running without a PTRACE_CONT; so we know this signal will
1061 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1062 probably already in the queue (unless this kernel is old
1063 enough to use TASK_STOPPED for ptrace stops); but since
1064 SIGSTOP is not an RT signal, it can only be queued once. */
1065 kill_lwp (lwpid, SIGSTOP);
1066
1067 /* Finally, resume the stopped process. This will deliver the
1068 SIGSTOP (or a higher priority signal, just like normal
1069 PTRACE_ATTACH), which we'll catch later on. */
1070 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1071 }
1072
1073 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1074 brings it to a halt.
1075
1076 There are several cases to consider here:
1077
1078 1) gdbserver has already attached to the process and is being notified
1079 of a new thread that is being created.
1080 In this case we should ignore that SIGSTOP and resume the
1081 process. This is handled below by setting stop_expected = 1,
1082 and the fact that add_thread sets last_resume_kind ==
1083 resume_continue.
1084
1085 2) This is the first thread (the process thread), and we're attaching
1086 to it via attach_inferior.
1087 In this case we want the process thread to stop.
1088 This is handled by having linux_attach set last_resume_kind ==
1089 resume_stop after we return.
1090
1091 If the pid we are attaching to is also the tgid, we attach to and
1092 stop all the existing threads. Otherwise, we attach to pid and
1093 ignore any other threads in the same group as this pid.
1094
1095 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1096 existing threads.
1097 In this case we want the thread to stop.
1098 FIXME: This case is currently not properly handled.
1099 We should wait for the SIGSTOP but don't. Things work apparently
1100 because enough time passes between when we ptrace (ATTACH) and when
1101 gdb makes the next ptrace call on the thread.
1102
1103 On the other hand, if we are currently trying to stop all threads, we
1104 should treat the new thread as if we had sent it a SIGSTOP. This works
1105 because we are guaranteed that the add_lwp call above added us to the
1106 end of the list, and so the new thread has not yet reached
1107 wait_for_sigstop (but will). */
1108 new_lwp->stop_expected = 1;
1109
1110 return 0;
1111 }
1112
1113 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1114 already attached. Returns true if a new LWP is found, false
1115 otherwise. */
1116
1117 static int
1118 attach_proc_task_lwp_callback (ptid_t ptid)
1119 {
1120 /* Is this a new thread? */
1121 if (find_thread_ptid (ptid) == NULL)
1122 {
1123 int lwpid = ptid_get_lwp (ptid);
1124 int err;
1125
1126 if (debug_threads)
1127 debug_printf ("Found new lwp %d\n", lwpid);
1128
1129 err = linux_attach_lwp (ptid);
1130
1131 /* Be quiet if we simply raced with the thread exiting. EPERM
1132 is returned if the thread's task still exists, and is marked
1133 as exited or zombie, as well as other conditions, so in that
1134 case, confirm the status in /proc/PID/status. */
1135 if (err == ESRCH
1136 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1137 {
1138 if (debug_threads)
1139 {
1140 debug_printf ("Cannot attach to lwp %d: "
1141 "thread is gone (%d: %s)\n",
1142 lwpid, err, strerror (err));
1143 }
1144 }
1145 else if (err != 0)
1146 {
1147 warning (_("Cannot attach to lwp %d: %s"),
1148 lwpid,
1149 linux_ptrace_attach_fail_reason_string (ptid, err));
1150 }
1151
1152 return 1;
1153 }
1154 return 0;
1155 }
1156
1157 static void async_file_mark (void);
1158
1159 /* Attach to PID. If PID is the tgid, attach to it and all
1160 of its threads. */
1161
1162 static int
1163 linux_attach (unsigned long pid)
1164 {
1165 struct process_info *proc;
1166 struct thread_info *initial_thread;
1167 ptid_t ptid = ptid_build (pid, pid, 0);
1168 int err;
1169
1170 /* Attach to PID. We will check for other threads
1171 soon. */
1172 err = linux_attach_lwp (ptid);
1173 if (err != 0)
1174 error ("Cannot attach to process %ld: %s",
1175 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1176
1177 proc = linux_add_process (pid, 1);
1178
1179 /* Don't ignore the initial SIGSTOP if we just attached to this
1180 process. It will be collected by wait shortly. */
1181 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1182 initial_thread->last_resume_kind = resume_stop;
1183
1184 /* We must attach to every LWP. If /proc is mounted, use that to
1185 find them now. On the one hand, the inferior may be using raw
1186 clone instead of using pthreads. On the other hand, even if it
1187 is using pthreads, GDB may not be connected yet (thread_db needs
1188 to do symbol lookups, through qSymbol). Also, thread_db walks
1189 structures in the inferior's address space to find the list of
1190 threads/LWPs, and those structures may well be corrupted. Note
1191 that once thread_db is loaded, we'll still use it to list threads
1192 and associate pthread info with each LWP. */
1193 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1194
1195 /* GDB will shortly read the xml target description for this
1196 process, to figure out the process' architecture. But the target
1197 description is only filled in when the first process/thread in
1198 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1199 that now, otherwise, if GDB is fast enough, it could read the
1200 target description _before_ that initial stop. */
1201 if (non_stop)
1202 {
1203 struct lwp_info *lwp;
1204 int wstat, lwpid;
1205 ptid_t pid_ptid = pid_to_ptid (pid);
1206
1207 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1208 &wstat, __WALL);
1209 gdb_assert (lwpid > 0);
1210
1211 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1212
1213 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1214 {
1215 lwp->status_pending_p = 1;
1216 lwp->status_pending = wstat;
1217 }
1218
1219 initial_thread->last_resume_kind = resume_continue;
1220
1221 async_file_mark ();
1222
1223 gdb_assert (proc->tdesc != NULL);
1224 }
1225
1226 return 0;
1227 }
1228
1229 struct counter
1230 {
1231 int pid;
1232 int count;
1233 };
1234
1235 static int
1236 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1237 {
1238 struct counter *counter = (struct counter *) args;
1239
1240 if (ptid_get_pid (entry->id) == counter->pid)
1241 {
1242 if (++counter->count > 1)
1243 return 1;
1244 }
1245
1246 return 0;
1247 }
1248
1249 static int
1250 last_thread_of_process_p (int pid)
1251 {
1252 struct counter counter = { pid , 0 };
1253
1254 return (find_inferior (&all_threads,
1255 second_thread_of_pid_p, &counter) == NULL);
1256 }
1257
1258 /* Kill LWP. */
1259
1260 static void
1261 linux_kill_one_lwp (struct lwp_info *lwp)
1262 {
1263 struct thread_info *thr = get_lwp_thread (lwp);
1264 int pid = lwpid_of (thr);
1265
1266 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1267 there is no signal context, and ptrace(PTRACE_KILL) (or
1268 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1269 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1270 alternative is to kill with SIGKILL. We only need one SIGKILL
1271 per process, not one for each thread. But since we still support
1272 support debugging programs using raw clone without CLONE_THREAD,
1273 we send one for each thread. For years, we used PTRACE_KILL
1274 only, so we're being a bit paranoid about some old kernels where
1275 PTRACE_KILL might work better (dubious if there are any such, but
1276 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1277 second, and so we're fine everywhere. */
1278
1279 errno = 0;
1280 kill_lwp (pid, SIGKILL);
1281 if (debug_threads)
1282 {
1283 int save_errno = errno;
1284
1285 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1286 target_pid_to_str (ptid_of (thr)),
1287 save_errno ? strerror (save_errno) : "OK");
1288 }
1289
1290 errno = 0;
1291 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1292 if (debug_threads)
1293 {
1294 int save_errno = errno;
1295
1296 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1297 target_pid_to_str (ptid_of (thr)),
1298 save_errno ? strerror (save_errno) : "OK");
1299 }
1300 }
1301
1302 /* Kill LWP and wait for it to die. */
1303
1304 static void
1305 kill_wait_lwp (struct lwp_info *lwp)
1306 {
1307 struct thread_info *thr = get_lwp_thread (lwp);
1308 int pid = ptid_get_pid (ptid_of (thr));
1309 int lwpid = ptid_get_lwp (ptid_of (thr));
1310 int wstat;
1311 int res;
1312
1313 if (debug_threads)
1314 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1315
1316 do
1317 {
1318 linux_kill_one_lwp (lwp);
1319
1320 /* Make sure it died. Notes:
1321
1322 - The loop is most likely unnecessary.
1323
1324 - We don't use linux_wait_for_event as that could delete lwps
1325 while we're iterating over them. We're not interested in
1326 any pending status at this point, only in making sure all
1327 wait status on the kernel side are collected until the
1328 process is reaped.
1329
1330 - We don't use __WALL here as the __WALL emulation relies on
1331 SIGCHLD, and killing a stopped process doesn't generate
1332 one, nor an exit status.
1333 */
1334 res = my_waitpid (lwpid, &wstat, 0);
1335 if (res == -1 && errno == ECHILD)
1336 res = my_waitpid (lwpid, &wstat, __WCLONE);
1337 } while (res > 0 && WIFSTOPPED (wstat));
1338
1339 /* Even if it was stopped, the child may have already disappeared.
1340 E.g., if it was killed by SIGKILL. */
1341 if (res < 0 && errno != ECHILD)
1342 perror_with_name ("kill_wait_lwp");
1343 }
1344
1345 /* Callback for `find_inferior'. Kills an lwp of a given process,
1346 except the leader. */
1347
1348 static int
1349 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1350 {
1351 struct thread_info *thread = (struct thread_info *) entry;
1352 struct lwp_info *lwp = get_thread_lwp (thread);
1353 int pid = * (int *) args;
1354
1355 if (ptid_get_pid (entry->id) != pid)
1356 return 0;
1357
1358 /* We avoid killing the first thread here, because of a Linux kernel (at
1359 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1360 the children get a chance to be reaped, it will remain a zombie
1361 forever. */
1362
1363 if (lwpid_of (thread) == pid)
1364 {
1365 if (debug_threads)
1366 debug_printf ("lkop: is last of process %s\n",
1367 target_pid_to_str (entry->id));
1368 return 0;
1369 }
1370
1371 kill_wait_lwp (lwp);
1372 return 0;
1373 }
1374
1375 static int
1376 linux_kill (int pid)
1377 {
1378 struct process_info *process;
1379 struct lwp_info *lwp;
1380
1381 process = find_process_pid (pid);
1382 if (process == NULL)
1383 return -1;
1384
1385 /* If we're killing a running inferior, make sure it is stopped
1386 first, as PTRACE_KILL will not work otherwise. */
1387 stop_all_lwps (0, NULL);
1388
1389 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1390
1391 /* See the comment in linux_kill_one_lwp. We did not kill the first
1392 thread in the list, so do so now. */
1393 lwp = find_lwp_pid (pid_to_ptid (pid));
1394
1395 if (lwp == NULL)
1396 {
1397 if (debug_threads)
1398 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1399 pid);
1400 }
1401 else
1402 kill_wait_lwp (lwp);
1403
1404 the_target->mourn (process);
1405
1406 /* Since we presently can only stop all lwps of all processes, we
1407 need to unstop lwps of other processes. */
1408 unstop_all_lwps (0, NULL);
1409 return 0;
1410 }
1411
1412 /* Get pending signal of THREAD, for detaching purposes. This is the
1413 signal the thread last stopped for, which we need to deliver to the
1414 thread when detaching, otherwise, it'd be suppressed/lost. */
1415
1416 static int
1417 get_detach_signal (struct thread_info *thread)
1418 {
1419 enum gdb_signal signo = GDB_SIGNAL_0;
1420 int status;
1421 struct lwp_info *lp = get_thread_lwp (thread);
1422
1423 if (lp->status_pending_p)
1424 status = lp->status_pending;
1425 else
1426 {
1427 /* If the thread had been suspended by gdbserver, and it stopped
1428 cleanly, then it'll have stopped with SIGSTOP. But we don't
1429 want to deliver that SIGSTOP. */
1430 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1431 || thread->last_status.value.sig == GDB_SIGNAL_0)
1432 return 0;
1433
1434 /* Otherwise, we may need to deliver the signal we
1435 intercepted. */
1436 status = lp->last_status;
1437 }
1438
1439 if (!WIFSTOPPED (status))
1440 {
1441 if (debug_threads)
1442 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1443 target_pid_to_str (ptid_of (thread)));
1444 return 0;
1445 }
1446
1447 /* Extended wait statuses aren't real SIGTRAPs. */
1448 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1449 {
1450 if (debug_threads)
1451 debug_printf ("GPS: lwp %s had stopped with extended "
1452 "status: no pending signal\n",
1453 target_pid_to_str (ptid_of (thread)));
1454 return 0;
1455 }
1456
1457 signo = gdb_signal_from_host (WSTOPSIG (status));
1458
1459 if (program_signals_p && !program_signals[signo])
1460 {
1461 if (debug_threads)
1462 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1463 target_pid_to_str (ptid_of (thread)),
1464 gdb_signal_to_string (signo));
1465 return 0;
1466 }
1467 else if (!program_signals_p
1468 /* If we have no way to know which signals GDB does not
1469 want to have passed to the program, assume
1470 SIGTRAP/SIGINT, which is GDB's default. */
1471 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1472 {
1473 if (debug_threads)
1474 debug_printf ("GPS: lwp %s had signal %s, "
1475 "but we don't know if we should pass it. "
1476 "Default to not.\n",
1477 target_pid_to_str (ptid_of (thread)),
1478 gdb_signal_to_string (signo));
1479 return 0;
1480 }
1481 else
1482 {
1483 if (debug_threads)
1484 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1485 target_pid_to_str (ptid_of (thread)),
1486 gdb_signal_to_string (signo));
1487
1488 return WSTOPSIG (status);
1489 }
1490 }
1491
1492 /* Detach from LWP. */
1493
1494 static void
1495 linux_detach_one_lwp (struct lwp_info *lwp)
1496 {
1497 struct thread_info *thread = get_lwp_thread (lwp);
1498 int sig;
1499 int lwpid;
1500
1501 /* If there is a pending SIGSTOP, get rid of it. */
1502 if (lwp->stop_expected)
1503 {
1504 if (debug_threads)
1505 debug_printf ("Sending SIGCONT to %s\n",
1506 target_pid_to_str (ptid_of (thread)));
1507
1508 kill_lwp (lwpid_of (thread), SIGCONT);
1509 lwp->stop_expected = 0;
1510 }
1511
1512 /* Pass on any pending signal for this thread. */
1513 sig = get_detach_signal (thread);
1514
1515 /* Preparing to resume may try to write registers, and fail if the
1516 lwp is zombie. If that happens, ignore the error. We'll handle
1517 it below, when detach fails with ESRCH. */
1518 TRY
1519 {
1520 /* Flush any pending changes to the process's registers. */
1521 regcache_invalidate_thread (thread);
1522
1523 /* Finally, let it resume. */
1524 if (the_low_target.prepare_to_resume != NULL)
1525 the_low_target.prepare_to_resume (lwp);
1526 }
1527 CATCH (ex, RETURN_MASK_ERROR)
1528 {
1529 if (!check_ptrace_stopped_lwp_gone (lwp))
1530 throw_exception (ex);
1531 }
1532 END_CATCH
1533
1534 lwpid = lwpid_of (thread);
1535 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1536 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1537 {
1538 int save_errno = errno;
1539
1540 /* We know the thread exists, so ESRCH must mean the lwp is
1541 zombie. This can happen if one of the already-detached
1542 threads exits the whole thread group. In that case we're
1543 still attached, and must reap the lwp. */
1544 if (save_errno == ESRCH)
1545 {
1546 int ret, status;
1547
1548 ret = my_waitpid (lwpid, &status, __WALL);
1549 if (ret == -1)
1550 {
1551 warning (_("Couldn't reap LWP %d while detaching: %s"),
1552 lwpid, strerror (errno));
1553 }
1554 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1555 {
1556 warning (_("Reaping LWP %d while detaching "
1557 "returned unexpected status 0x%x"),
1558 lwpid, status);
1559 }
1560 }
1561 else
1562 {
1563 error (_("Can't detach %s: %s"),
1564 target_pid_to_str (ptid_of (thread)),
1565 strerror (save_errno));
1566 }
1567 }
1568 else if (debug_threads)
1569 {
1570 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1571 target_pid_to_str (ptid_of (thread)),
1572 strsignal (sig));
1573 }
1574
1575 delete_lwp (lwp);
1576 }
1577
1578 /* Callback for find_inferior. Detaches from non-leader threads of a
1579 given process. */
1580
1581 static int
1582 linux_detach_lwp_callback (struct inferior_list_entry *entry, void *args)
1583 {
1584 struct thread_info *thread = (struct thread_info *) entry;
1585 struct lwp_info *lwp = get_thread_lwp (thread);
1586 int pid = *(int *) args;
1587 int lwpid = lwpid_of (thread);
1588
1589 /* Skip other processes. */
1590 if (ptid_get_pid (entry->id) != pid)
1591 return 0;
1592
1593 /* We don't actually detach from the thread group leader just yet.
1594 If the thread group exits, we must reap the zombie clone lwps
1595 before we're able to reap the leader. */
1596 if (ptid_get_pid (entry->id) == lwpid)
1597 return 0;
1598
1599 linux_detach_one_lwp (lwp);
1600 return 0;
1601 }
1602
1603 static int
1604 linux_detach (int pid)
1605 {
1606 struct process_info *process;
1607 struct lwp_info *main_lwp;
1608
1609 process = find_process_pid (pid);
1610 if (process == NULL)
1611 return -1;
1612
1613 /* As there's a step over already in progress, let it finish first,
1614 otherwise nesting a stabilize_threads operation on top gets real
1615 messy. */
1616 complete_ongoing_step_over ();
1617
1618 /* Stop all threads before detaching. First, ptrace requires that
1619 the thread is stopped to sucessfully detach. Second, thread_db
1620 may need to uninstall thread event breakpoints from memory, which
1621 only works with a stopped process anyway. */
1622 stop_all_lwps (0, NULL);
1623
1624 #ifdef USE_THREAD_DB
1625 thread_db_detach (process);
1626 #endif
1627
1628 /* Stabilize threads (move out of jump pads). */
1629 stabilize_threads ();
1630
1631 /* Detach from the clone lwps first. If the thread group exits just
1632 while we're detaching, we must reap the clone lwps before we're
1633 able to reap the leader. */
1634 find_inferior (&all_threads, linux_detach_lwp_callback, &pid);
1635
1636 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1637 linux_detach_one_lwp (main_lwp);
1638
1639 the_target->mourn (process);
1640
1641 /* Since we presently can only stop all lwps of all processes, we
1642 need to unstop lwps of other processes. */
1643 unstop_all_lwps (0, NULL);
1644 return 0;
1645 }
1646
1647 /* Remove all LWPs that belong to process PROC from the lwp list. */
1648
1649 static int
1650 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1651 {
1652 struct thread_info *thread = (struct thread_info *) entry;
1653 struct lwp_info *lwp = get_thread_lwp (thread);
1654 struct process_info *process = (struct process_info *) proc;
1655
1656 if (pid_of (thread) == pid_of (process))
1657 delete_lwp (lwp);
1658
1659 return 0;
1660 }
1661
1662 static void
1663 linux_mourn (struct process_info *process)
1664 {
1665 struct process_info_private *priv;
1666
1667 #ifdef USE_THREAD_DB
1668 thread_db_mourn (process);
1669 #endif
1670
1671 find_inferior (&all_threads, delete_lwp_callback, process);
1672
1673 /* Freeing all private data. */
1674 priv = process->priv;
1675 free (priv->arch_private);
1676 free (priv);
1677 process->priv = NULL;
1678
1679 remove_process (process);
1680 }
1681
1682 static void
1683 linux_join (int pid)
1684 {
1685 int status, ret;
1686
1687 do {
1688 ret = my_waitpid (pid, &status, 0);
1689 if (WIFEXITED (status) || WIFSIGNALED (status))
1690 break;
1691 } while (ret != -1 || errno != ECHILD);
1692 }
1693
1694 /* Return nonzero if the given thread is still alive. */
1695 static int
1696 linux_thread_alive (ptid_t ptid)
1697 {
1698 struct lwp_info *lwp = find_lwp_pid (ptid);
1699
1700 /* We assume we always know if a thread exits. If a whole process
1701 exited but we still haven't been able to report it to GDB, we'll
1702 hold on to the last lwp of the dead process. */
1703 if (lwp != NULL)
1704 return !lwp_is_marked_dead (lwp);
1705 else
1706 return 0;
1707 }
1708
1709 /* Return 1 if this lwp still has an interesting status pending. If
1710 not (e.g., it had stopped for a breakpoint that is gone), return
1711 false. */
1712
1713 static int
1714 thread_still_has_status_pending_p (struct thread_info *thread)
1715 {
1716 struct lwp_info *lp = get_thread_lwp (thread);
1717
1718 if (!lp->status_pending_p)
1719 return 0;
1720
1721 if (thread->last_resume_kind != resume_stop
1722 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1723 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1724 {
1725 struct thread_info *saved_thread;
1726 CORE_ADDR pc;
1727 int discard = 0;
1728
1729 gdb_assert (lp->last_status != 0);
1730
1731 pc = get_pc (lp);
1732
1733 saved_thread = current_thread;
1734 current_thread = thread;
1735
1736 if (pc != lp->stop_pc)
1737 {
1738 if (debug_threads)
1739 debug_printf ("PC of %ld changed\n",
1740 lwpid_of (thread));
1741 discard = 1;
1742 }
1743
1744 #if !USE_SIGTRAP_SIGINFO
1745 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1746 && !(*the_low_target.breakpoint_at) (pc))
1747 {
1748 if (debug_threads)
1749 debug_printf ("previous SW breakpoint of %ld gone\n",
1750 lwpid_of (thread));
1751 discard = 1;
1752 }
1753 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1754 && !hardware_breakpoint_inserted_here (pc))
1755 {
1756 if (debug_threads)
1757 debug_printf ("previous HW breakpoint of %ld gone\n",
1758 lwpid_of (thread));
1759 discard = 1;
1760 }
1761 #endif
1762
1763 current_thread = saved_thread;
1764
1765 if (discard)
1766 {
1767 if (debug_threads)
1768 debug_printf ("discarding pending breakpoint status\n");
1769 lp->status_pending_p = 0;
1770 return 0;
1771 }
1772 }
1773
1774 return 1;
1775 }
1776
1777 /* Returns true if LWP is resumed from the client's perspective. */
1778
1779 static int
1780 lwp_resumed (struct lwp_info *lwp)
1781 {
1782 struct thread_info *thread = get_lwp_thread (lwp);
1783
1784 if (thread->last_resume_kind != resume_stop)
1785 return 1;
1786
1787 /* Did gdb send us a `vCont;t', but we haven't reported the
1788 corresponding stop to gdb yet? If so, the thread is still
1789 resumed/running from gdb's perspective. */
1790 if (thread->last_resume_kind == resume_stop
1791 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1792 return 1;
1793
1794 return 0;
1795 }
1796
1797 /* Return 1 if this lwp has an interesting status pending. */
1798 static int
1799 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1800 {
1801 struct thread_info *thread = (struct thread_info *) entry;
1802 struct lwp_info *lp = get_thread_lwp (thread);
1803 ptid_t ptid = * (ptid_t *) arg;
1804
1805 /* Check if we're only interested in events from a specific process
1806 or a specific LWP. */
1807 if (!ptid_match (ptid_of (thread), ptid))
1808 return 0;
1809
1810 if (!lwp_resumed (lp))
1811 return 0;
1812
1813 if (lp->status_pending_p
1814 && !thread_still_has_status_pending_p (thread))
1815 {
1816 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1817 return 0;
1818 }
1819
1820 return lp->status_pending_p;
1821 }
1822
1823 static int
1824 same_lwp (struct inferior_list_entry *entry, void *data)
1825 {
1826 ptid_t ptid = *(ptid_t *) data;
1827 int lwp;
1828
1829 if (ptid_get_lwp (ptid) != 0)
1830 lwp = ptid_get_lwp (ptid);
1831 else
1832 lwp = ptid_get_pid (ptid);
1833
1834 if (ptid_get_lwp (entry->id) == lwp)
1835 return 1;
1836
1837 return 0;
1838 }
1839
1840 struct lwp_info *
1841 find_lwp_pid (ptid_t ptid)
1842 {
1843 struct inferior_list_entry *thread
1844 = find_inferior (&all_threads, same_lwp, &ptid);
1845
1846 if (thread == NULL)
1847 return NULL;
1848
1849 return get_thread_lwp ((struct thread_info *) thread);
1850 }
1851
1852 /* Return the number of known LWPs in the tgid given by PID. */
1853
1854 static int
1855 num_lwps (int pid)
1856 {
1857 struct inferior_list_entry *inf, *tmp;
1858 int count = 0;
1859
1860 ALL_INFERIORS (&all_threads, inf, tmp)
1861 {
1862 if (ptid_get_pid (inf->id) == pid)
1863 count++;
1864 }
1865
1866 return count;
1867 }
1868
1869 /* The arguments passed to iterate_over_lwps. */
1870
1871 struct iterate_over_lwps_args
1872 {
1873 /* The FILTER argument passed to iterate_over_lwps. */
1874 ptid_t filter;
1875
1876 /* The CALLBACK argument passed to iterate_over_lwps. */
1877 iterate_over_lwps_ftype *callback;
1878
1879 /* The DATA argument passed to iterate_over_lwps. */
1880 void *data;
1881 };
1882
1883 /* Callback for find_inferior used by iterate_over_lwps to filter
1884 calls to the callback supplied to that function. Returning a
1885 nonzero value causes find_inferiors to stop iterating and return
1886 the current inferior_list_entry. Returning zero indicates that
1887 find_inferiors should continue iterating. */
1888
1889 static int
1890 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1891 {
1892 struct iterate_over_lwps_args *args
1893 = (struct iterate_over_lwps_args *) args_p;
1894
1895 if (ptid_match (entry->id, args->filter))
1896 {
1897 struct thread_info *thr = (struct thread_info *) entry;
1898 struct lwp_info *lwp = get_thread_lwp (thr);
1899
1900 return (*args->callback) (lwp, args->data);
1901 }
1902
1903 return 0;
1904 }
1905
1906 /* See nat/linux-nat.h. */
1907
1908 struct lwp_info *
1909 iterate_over_lwps (ptid_t filter,
1910 iterate_over_lwps_ftype callback,
1911 void *data)
1912 {
1913 struct iterate_over_lwps_args args = {filter, callback, data};
1914 struct inferior_list_entry *entry;
1915
1916 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1917 if (entry == NULL)
1918 return NULL;
1919
1920 return get_thread_lwp ((struct thread_info *) entry);
1921 }
1922
1923 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1924 their exits until all other threads in the group have exited. */
1925
1926 static void
1927 check_zombie_leaders (void)
1928 {
1929 struct process_info *proc, *tmp;
1930
1931 ALL_PROCESSES (proc, tmp)
1932 {
1933 pid_t leader_pid = pid_of (proc);
1934 struct lwp_info *leader_lp;
1935
1936 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1937
1938 if (debug_threads)
1939 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1940 "num_lwps=%d, zombie=%d\n",
1941 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1942 linux_proc_pid_is_zombie (leader_pid));
1943
1944 if (leader_lp != NULL && !leader_lp->stopped
1945 /* Check if there are other threads in the group, as we may
1946 have raced with the inferior simply exiting. */
1947 && !last_thread_of_process_p (leader_pid)
1948 && linux_proc_pid_is_zombie (leader_pid))
1949 {
1950 /* A leader zombie can mean one of two things:
1951
1952 - It exited, and there's an exit status pending
1953 available, or only the leader exited (not the whole
1954 program). In the latter case, we can't waitpid the
1955 leader's exit status until all other threads are gone.
1956
1957 - There are 3 or more threads in the group, and a thread
1958 other than the leader exec'd. On an exec, the Linux
1959 kernel destroys all other threads (except the execing
1960 one) in the thread group, and resets the execing thread's
1961 tid to the tgid. No exit notification is sent for the
1962 execing thread -- from the ptracer's perspective, it
1963 appears as though the execing thread just vanishes.
1964 Until we reap all other threads except the leader and the
1965 execing thread, the leader will be zombie, and the
1966 execing thread will be in `D (disc sleep)'. As soon as
1967 all other threads are reaped, the execing thread changes
1968 it's tid to the tgid, and the previous (zombie) leader
1969 vanishes, giving place to the "new" leader. We could try
1970 distinguishing the exit and exec cases, by waiting once
1971 more, and seeing if something comes out, but it doesn't
1972 sound useful. The previous leader _does_ go away, and
1973 we'll re-add the new one once we see the exec event
1974 (which is just the same as what would happen if the
1975 previous leader did exit voluntarily before some other
1976 thread execs). */
1977
1978 if (debug_threads)
1979 fprintf (stderr,
1980 "CZL: Thread group leader %d zombie "
1981 "(it exited, or another thread execd).\n",
1982 leader_pid);
1983
1984 delete_lwp (leader_lp);
1985 }
1986 }
1987 }
1988
1989 /* Callback for `find_inferior'. Returns the first LWP that is not
1990 stopped. ARG is a PTID filter. */
1991
1992 static int
1993 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1994 {
1995 struct thread_info *thr = (struct thread_info *) entry;
1996 struct lwp_info *lwp;
1997 ptid_t filter = *(ptid_t *) arg;
1998
1999 if (!ptid_match (ptid_of (thr), filter))
2000 return 0;
2001
2002 lwp = get_thread_lwp (thr);
2003 if (!lwp->stopped)
2004 return 1;
2005
2006 return 0;
2007 }
2008
2009 /* Increment LWP's suspend count. */
2010
2011 static void
2012 lwp_suspended_inc (struct lwp_info *lwp)
2013 {
2014 lwp->suspended++;
2015
2016 if (debug_threads && lwp->suspended > 4)
2017 {
2018 struct thread_info *thread = get_lwp_thread (lwp);
2019
2020 debug_printf ("LWP %ld has a suspiciously high suspend count,"
2021 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
2022 }
2023 }
2024
2025 /* Decrement LWP's suspend count. */
2026
2027 static void
2028 lwp_suspended_decr (struct lwp_info *lwp)
2029 {
2030 lwp->suspended--;
2031
2032 if (lwp->suspended < 0)
2033 {
2034 struct thread_info *thread = get_lwp_thread (lwp);
2035
2036 internal_error (__FILE__, __LINE__,
2037 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
2038 lwp->suspended);
2039 }
2040 }
2041
2042 /* This function should only be called if the LWP got a SIGTRAP.
2043
2044 Handle any tracepoint steps or hits. Return true if a tracepoint
2045 event was handled, 0 otherwise. */
2046
2047 static int
2048 handle_tracepoints (struct lwp_info *lwp)
2049 {
2050 struct thread_info *tinfo = get_lwp_thread (lwp);
2051 int tpoint_related_event = 0;
2052
2053 gdb_assert (lwp->suspended == 0);
2054
2055 /* If this tracepoint hit causes a tracing stop, we'll immediately
2056 uninsert tracepoints. To do this, we temporarily pause all
2057 threads, unpatch away, and then unpause threads. We need to make
2058 sure the unpausing doesn't resume LWP too. */
2059 lwp_suspended_inc (lwp);
2060
2061 /* And we need to be sure that any all-threads-stopping doesn't try
2062 to move threads out of the jump pads, as it could deadlock the
2063 inferior (LWP could be in the jump pad, maybe even holding the
2064 lock.) */
2065
2066 /* Do any necessary step collect actions. */
2067 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2068
2069 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2070
2071 /* See if we just hit a tracepoint and do its main collect
2072 actions. */
2073 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2074
2075 lwp_suspended_decr (lwp);
2076
2077 gdb_assert (lwp->suspended == 0);
2078 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
2079
2080 if (tpoint_related_event)
2081 {
2082 if (debug_threads)
2083 debug_printf ("got a tracepoint event\n");
2084 return 1;
2085 }
2086
2087 return 0;
2088 }
2089
2090 /* Convenience wrapper. Returns true if LWP is presently collecting a
2091 fast tracepoint. */
2092
2093 static int
2094 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2095 struct fast_tpoint_collect_status *status)
2096 {
2097 CORE_ADDR thread_area;
2098 struct thread_info *thread = get_lwp_thread (lwp);
2099
2100 if (the_low_target.get_thread_area == NULL)
2101 return 0;
2102
2103 /* Get the thread area address. This is used to recognize which
2104 thread is which when tracing with the in-process agent library.
2105 We don't read anything from the address, and treat it as opaque;
2106 it's the address itself that we assume is unique per-thread. */
2107 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2108 return 0;
2109
2110 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2111 }
2112
2113 /* The reason we resume in the caller, is because we want to be able
2114 to pass lwp->status_pending as WSTAT, and we need to clear
2115 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2116 refuses to resume. */
2117
2118 static int
2119 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2120 {
2121 struct thread_info *saved_thread;
2122
2123 saved_thread = current_thread;
2124 current_thread = get_lwp_thread (lwp);
2125
2126 if ((wstat == NULL
2127 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2128 && supports_fast_tracepoints ()
2129 && agent_loaded_p ())
2130 {
2131 struct fast_tpoint_collect_status status;
2132 int r;
2133
2134 if (debug_threads)
2135 debug_printf ("Checking whether LWP %ld needs to move out of the "
2136 "jump pad.\n",
2137 lwpid_of (current_thread));
2138
2139 r = linux_fast_tracepoint_collecting (lwp, &status);
2140
2141 if (wstat == NULL
2142 || (WSTOPSIG (*wstat) != SIGILL
2143 && WSTOPSIG (*wstat) != SIGFPE
2144 && WSTOPSIG (*wstat) != SIGSEGV
2145 && WSTOPSIG (*wstat) != SIGBUS))
2146 {
2147 lwp->collecting_fast_tracepoint = r;
2148
2149 if (r != 0)
2150 {
2151 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2152 {
2153 /* Haven't executed the original instruction yet.
2154 Set breakpoint there, and wait till it's hit,
2155 then single-step until exiting the jump pad. */
2156 lwp->exit_jump_pad_bkpt
2157 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2158 }
2159
2160 if (debug_threads)
2161 debug_printf ("Checking whether LWP %ld needs to move out of "
2162 "the jump pad...it does\n",
2163 lwpid_of (current_thread));
2164 current_thread = saved_thread;
2165
2166 return 1;
2167 }
2168 }
2169 else
2170 {
2171 /* If we get a synchronous signal while collecting, *and*
2172 while executing the (relocated) original instruction,
2173 reset the PC to point at the tpoint address, before
2174 reporting to GDB. Otherwise, it's an IPA lib bug: just
2175 report the signal to GDB, and pray for the best. */
2176
2177 lwp->collecting_fast_tracepoint = 0;
2178
2179 if (r != 0
2180 && (status.adjusted_insn_addr <= lwp->stop_pc
2181 && lwp->stop_pc < status.adjusted_insn_addr_end))
2182 {
2183 siginfo_t info;
2184 struct regcache *regcache;
2185
2186 /* The si_addr on a few signals references the address
2187 of the faulting instruction. Adjust that as
2188 well. */
2189 if ((WSTOPSIG (*wstat) == SIGILL
2190 || WSTOPSIG (*wstat) == SIGFPE
2191 || WSTOPSIG (*wstat) == SIGBUS
2192 || WSTOPSIG (*wstat) == SIGSEGV)
2193 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2194 (PTRACE_TYPE_ARG3) 0, &info) == 0
2195 /* Final check just to make sure we don't clobber
2196 the siginfo of non-kernel-sent signals. */
2197 && (uintptr_t) info.si_addr == lwp->stop_pc)
2198 {
2199 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2200 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2201 (PTRACE_TYPE_ARG3) 0, &info);
2202 }
2203
2204 regcache = get_thread_regcache (current_thread, 1);
2205 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2206 lwp->stop_pc = status.tpoint_addr;
2207
2208 /* Cancel any fast tracepoint lock this thread was
2209 holding. */
2210 force_unlock_trace_buffer ();
2211 }
2212
2213 if (lwp->exit_jump_pad_bkpt != NULL)
2214 {
2215 if (debug_threads)
2216 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2217 "stopping all threads momentarily.\n");
2218
2219 stop_all_lwps (1, lwp);
2220
2221 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2222 lwp->exit_jump_pad_bkpt = NULL;
2223
2224 unstop_all_lwps (1, lwp);
2225
2226 gdb_assert (lwp->suspended >= 0);
2227 }
2228 }
2229 }
2230
2231 if (debug_threads)
2232 debug_printf ("Checking whether LWP %ld needs to move out of the "
2233 "jump pad...no\n",
2234 lwpid_of (current_thread));
2235
2236 current_thread = saved_thread;
2237 return 0;
2238 }
2239
2240 /* Enqueue one signal in the "signals to report later when out of the
2241 jump pad" list. */
2242
2243 static void
2244 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2245 {
2246 struct pending_signals *p_sig;
2247 struct thread_info *thread = get_lwp_thread (lwp);
2248
2249 if (debug_threads)
2250 debug_printf ("Deferring signal %d for LWP %ld.\n",
2251 WSTOPSIG (*wstat), lwpid_of (thread));
2252
2253 if (debug_threads)
2254 {
2255 struct pending_signals *sig;
2256
2257 for (sig = lwp->pending_signals_to_report;
2258 sig != NULL;
2259 sig = sig->prev)
2260 debug_printf (" Already queued %d\n",
2261 sig->signal);
2262
2263 debug_printf (" (no more currently queued signals)\n");
2264 }
2265
2266 /* Don't enqueue non-RT signals if they are already in the deferred
2267 queue. (SIGSTOP being the easiest signal to see ending up here
2268 twice) */
2269 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2270 {
2271 struct pending_signals *sig;
2272
2273 for (sig = lwp->pending_signals_to_report;
2274 sig != NULL;
2275 sig = sig->prev)
2276 {
2277 if (sig->signal == WSTOPSIG (*wstat))
2278 {
2279 if (debug_threads)
2280 debug_printf ("Not requeuing already queued non-RT signal %d"
2281 " for LWP %ld\n",
2282 sig->signal,
2283 lwpid_of (thread));
2284 return;
2285 }
2286 }
2287 }
2288
2289 p_sig = XCNEW (struct pending_signals);
2290 p_sig->prev = lwp->pending_signals_to_report;
2291 p_sig->signal = WSTOPSIG (*wstat);
2292
2293 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2294 &p_sig->info);
2295
2296 lwp->pending_signals_to_report = p_sig;
2297 }
2298
2299 /* Dequeue one signal from the "signals to report later when out of
2300 the jump pad" list. */
2301
2302 static int
2303 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2304 {
2305 struct thread_info *thread = get_lwp_thread (lwp);
2306
2307 if (lwp->pending_signals_to_report != NULL)
2308 {
2309 struct pending_signals **p_sig;
2310
2311 p_sig = &lwp->pending_signals_to_report;
2312 while ((*p_sig)->prev != NULL)
2313 p_sig = &(*p_sig)->prev;
2314
2315 *wstat = W_STOPCODE ((*p_sig)->signal);
2316 if ((*p_sig)->info.si_signo != 0)
2317 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2318 &(*p_sig)->info);
2319 free (*p_sig);
2320 *p_sig = NULL;
2321
2322 if (debug_threads)
2323 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2324 WSTOPSIG (*wstat), lwpid_of (thread));
2325
2326 if (debug_threads)
2327 {
2328 struct pending_signals *sig;
2329
2330 for (sig = lwp->pending_signals_to_report;
2331 sig != NULL;
2332 sig = sig->prev)
2333 debug_printf (" Still queued %d\n",
2334 sig->signal);
2335
2336 debug_printf (" (no more queued signals)\n");
2337 }
2338
2339 return 1;
2340 }
2341
2342 return 0;
2343 }
2344
2345 /* Fetch the possibly triggered data watchpoint info and store it in
2346 CHILD.
2347
2348 On some archs, like x86, that use debug registers to set
2349 watchpoints, it's possible that the way to know which watched
2350 address trapped, is to check the register that is used to select
2351 which address to watch. Problem is, between setting the watchpoint
2352 and reading back which data address trapped, the user may change
2353 the set of watchpoints, and, as a consequence, GDB changes the
2354 debug registers in the inferior. To avoid reading back a stale
2355 stopped-data-address when that happens, we cache in LP the fact
2356 that a watchpoint trapped, and the corresponding data address, as
2357 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2358 registers meanwhile, we have the cached data we can rely on. */
2359
2360 static int
2361 check_stopped_by_watchpoint (struct lwp_info *child)
2362 {
2363 if (the_low_target.stopped_by_watchpoint != NULL)
2364 {
2365 struct thread_info *saved_thread;
2366
2367 saved_thread = current_thread;
2368 current_thread = get_lwp_thread (child);
2369
2370 if (the_low_target.stopped_by_watchpoint ())
2371 {
2372 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2373
2374 if (the_low_target.stopped_data_address != NULL)
2375 child->stopped_data_address
2376 = the_low_target.stopped_data_address ();
2377 else
2378 child->stopped_data_address = 0;
2379 }
2380
2381 current_thread = saved_thread;
2382 }
2383
2384 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2385 }
2386
2387 /* Return the ptrace options that we want to try to enable. */
2388
2389 static int
2390 linux_low_ptrace_options (int attached)
2391 {
2392 int options = 0;
2393
2394 if (!attached)
2395 options |= PTRACE_O_EXITKILL;
2396
2397 if (report_fork_events)
2398 options |= PTRACE_O_TRACEFORK;
2399
2400 if (report_vfork_events)
2401 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2402
2403 if (report_exec_events)
2404 options |= PTRACE_O_TRACEEXEC;
2405
2406 options |= PTRACE_O_TRACESYSGOOD;
2407
2408 return options;
2409 }
2410
2411 /* Do low-level handling of the event, and check if we should go on
2412 and pass it to caller code. Return the affected lwp if we are, or
2413 NULL otherwise. */
2414
2415 static struct lwp_info *
2416 linux_low_filter_event (int lwpid, int wstat)
2417 {
2418 struct lwp_info *child;
2419 struct thread_info *thread;
2420 int have_stop_pc = 0;
2421
2422 child = find_lwp_pid (pid_to_ptid (lwpid));
2423
2424 /* Check for stop events reported by a process we didn't already
2425 know about - anything not already in our LWP list.
2426
2427 If we're expecting to receive stopped processes after
2428 fork, vfork, and clone events, then we'll just add the
2429 new one to our list and go back to waiting for the event
2430 to be reported - the stopped process might be returned
2431 from waitpid before or after the event is.
2432
2433 But note the case of a non-leader thread exec'ing after the
2434 leader having exited, and gone from our lists (because
2435 check_zombie_leaders deleted it). The non-leader thread
2436 changes its tid to the tgid. */
2437
2438 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2439 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2440 {
2441 ptid_t child_ptid;
2442
2443 /* A multi-thread exec after we had seen the leader exiting. */
2444 if (debug_threads)
2445 {
2446 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2447 "after exec.\n", lwpid);
2448 }
2449
2450 child_ptid = ptid_build (lwpid, lwpid, 0);
2451 child = add_lwp (child_ptid);
2452 child->stopped = 1;
2453 current_thread = child->thread;
2454 }
2455
2456 /* If we didn't find a process, one of two things presumably happened:
2457 - A process we started and then detached from has exited. Ignore it.
2458 - A process we are controlling has forked and the new child's stop
2459 was reported to us by the kernel. Save its PID. */
2460 if (child == NULL && WIFSTOPPED (wstat))
2461 {
2462 add_to_pid_list (&stopped_pids, lwpid, wstat);
2463 return NULL;
2464 }
2465 else if (child == NULL)
2466 return NULL;
2467
2468 thread = get_lwp_thread (child);
2469
2470 child->stopped = 1;
2471
2472 child->last_status = wstat;
2473
2474 /* Check if the thread has exited. */
2475 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2476 {
2477 if (debug_threads)
2478 debug_printf ("LLFE: %d exited.\n", lwpid);
2479
2480 if (finish_step_over (child))
2481 {
2482 /* Unsuspend all other LWPs, and set them back running again. */
2483 unsuspend_all_lwps (child);
2484 }
2485
2486 /* If there is at least one more LWP, then the exit signal was
2487 not the end of the debugged application and should be
2488 ignored, unless GDB wants to hear about thread exits. */
2489 if (report_thread_events
2490 || last_thread_of_process_p (pid_of (thread)))
2491 {
2492 /* Since events are serialized to GDB core, and we can't
2493 report this one right now. Leave the status pending for
2494 the next time we're able to report it. */
2495 mark_lwp_dead (child, wstat);
2496 return child;
2497 }
2498 else
2499 {
2500 delete_lwp (child);
2501 return NULL;
2502 }
2503 }
2504
2505 gdb_assert (WIFSTOPPED (wstat));
2506
2507 if (WIFSTOPPED (wstat))
2508 {
2509 struct process_info *proc;
2510
2511 /* Architecture-specific setup after inferior is running. */
2512 proc = find_process_pid (pid_of (thread));
2513 if (proc->tdesc == NULL)
2514 {
2515 if (proc->attached)
2516 {
2517 /* This needs to happen after we have attached to the
2518 inferior and it is stopped for the first time, but
2519 before we access any inferior registers. */
2520 linux_arch_setup_thread (thread);
2521 }
2522 else
2523 {
2524 /* The process is started, but GDBserver will do
2525 architecture-specific setup after the program stops at
2526 the first instruction. */
2527 child->status_pending_p = 1;
2528 child->status_pending = wstat;
2529 return child;
2530 }
2531 }
2532 }
2533
2534 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2535 {
2536 struct process_info *proc = find_process_pid (pid_of (thread));
2537 int options = linux_low_ptrace_options (proc->attached);
2538
2539 linux_enable_event_reporting (lwpid, options);
2540 child->must_set_ptrace_flags = 0;
2541 }
2542
2543 /* Always update syscall_state, even if it will be filtered later. */
2544 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2545 {
2546 child->syscall_state
2547 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2548 ? TARGET_WAITKIND_SYSCALL_RETURN
2549 : TARGET_WAITKIND_SYSCALL_ENTRY);
2550 }
2551 else
2552 {
2553 /* Almost all other ptrace-stops are known to be outside of system
2554 calls, with further exceptions in handle_extended_wait. */
2555 child->syscall_state = TARGET_WAITKIND_IGNORE;
2556 }
2557
2558 /* Be careful to not overwrite stop_pc until save_stop_reason is
2559 called. */
2560 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2561 && linux_is_extended_waitstatus (wstat))
2562 {
2563 child->stop_pc = get_pc (child);
2564 if (handle_extended_wait (&child, wstat))
2565 {
2566 /* The event has been handled, so just return without
2567 reporting it. */
2568 return NULL;
2569 }
2570 }
2571
2572 if (linux_wstatus_maybe_breakpoint (wstat))
2573 {
2574 if (save_stop_reason (child))
2575 have_stop_pc = 1;
2576 }
2577
2578 if (!have_stop_pc)
2579 child->stop_pc = get_pc (child);
2580
2581 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2582 && child->stop_expected)
2583 {
2584 if (debug_threads)
2585 debug_printf ("Expected stop.\n");
2586 child->stop_expected = 0;
2587
2588 if (thread->last_resume_kind == resume_stop)
2589 {
2590 /* We want to report the stop to the core. Treat the
2591 SIGSTOP as a normal event. */
2592 if (debug_threads)
2593 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2594 target_pid_to_str (ptid_of (thread)));
2595 }
2596 else if (stopping_threads != NOT_STOPPING_THREADS)
2597 {
2598 /* Stopping threads. We don't want this SIGSTOP to end up
2599 pending. */
2600 if (debug_threads)
2601 debug_printf ("LLW: SIGSTOP caught for %s "
2602 "while stopping threads.\n",
2603 target_pid_to_str (ptid_of (thread)));
2604 return NULL;
2605 }
2606 else
2607 {
2608 /* This is a delayed SIGSTOP. Filter out the event. */
2609 if (debug_threads)
2610 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2611 child->stepping ? "step" : "continue",
2612 target_pid_to_str (ptid_of (thread)));
2613
2614 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2615 return NULL;
2616 }
2617 }
2618
2619 child->status_pending_p = 1;
2620 child->status_pending = wstat;
2621 return child;
2622 }
2623
2624 /* Return true if THREAD is doing hardware single step. */
2625
2626 static int
2627 maybe_hw_step (struct thread_info *thread)
2628 {
2629 if (can_hardware_single_step ())
2630 return 1;
2631 else
2632 {
2633 /* GDBserver must insert single-step breakpoint for software
2634 single step. */
2635 gdb_assert (has_single_step_breakpoints (thread));
2636 return 0;
2637 }
2638 }
2639
2640 /* Resume LWPs that are currently stopped without any pending status
2641 to report, but are resumed from the core's perspective. */
2642
2643 static void
2644 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2645 {
2646 struct thread_info *thread = (struct thread_info *) entry;
2647 struct lwp_info *lp = get_thread_lwp (thread);
2648
2649 if (lp->stopped
2650 && !lp->suspended
2651 && !lp->status_pending_p
2652 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2653 {
2654 int step = 0;
2655
2656 if (thread->last_resume_kind == resume_step)
2657 step = maybe_hw_step (thread);
2658
2659 if (debug_threads)
2660 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2661 target_pid_to_str (ptid_of (thread)),
2662 paddress (lp->stop_pc),
2663 step);
2664
2665 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2666 }
2667 }
2668
2669 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2670 match FILTER_PTID (leaving others pending). The PTIDs can be:
2671 minus_one_ptid, to specify any child; a pid PTID, specifying all
2672 lwps of a thread group; or a PTID representing a single lwp. Store
2673 the stop status through the status pointer WSTAT. OPTIONS is
2674 passed to the waitpid call. Return 0 if no event was found and
2675 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2676 was found. Return the PID of the stopped child otherwise. */
2677
2678 static int
2679 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2680 int *wstatp, int options)
2681 {
2682 struct thread_info *event_thread;
2683 struct lwp_info *event_child, *requested_child;
2684 sigset_t block_mask, prev_mask;
2685
2686 retry:
2687 /* N.B. event_thread points to the thread_info struct that contains
2688 event_child. Keep them in sync. */
2689 event_thread = NULL;
2690 event_child = NULL;
2691 requested_child = NULL;
2692
2693 /* Check for a lwp with a pending status. */
2694
2695 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2696 {
2697 event_thread = (struct thread_info *)
2698 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2699 if (event_thread != NULL)
2700 event_child = get_thread_lwp (event_thread);
2701 if (debug_threads && event_thread)
2702 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2703 }
2704 else if (!ptid_equal (filter_ptid, null_ptid))
2705 {
2706 requested_child = find_lwp_pid (filter_ptid);
2707
2708 if (stopping_threads == NOT_STOPPING_THREADS
2709 && requested_child->status_pending_p
2710 && requested_child->collecting_fast_tracepoint)
2711 {
2712 enqueue_one_deferred_signal (requested_child,
2713 &requested_child->status_pending);
2714 requested_child->status_pending_p = 0;
2715 requested_child->status_pending = 0;
2716 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2717 }
2718
2719 if (requested_child->suspended
2720 && requested_child->status_pending_p)
2721 {
2722 internal_error (__FILE__, __LINE__,
2723 "requesting an event out of a"
2724 " suspended child?");
2725 }
2726
2727 if (requested_child->status_pending_p)
2728 {
2729 event_child = requested_child;
2730 event_thread = get_lwp_thread (event_child);
2731 }
2732 }
2733
2734 if (event_child != NULL)
2735 {
2736 if (debug_threads)
2737 debug_printf ("Got an event from pending child %ld (%04x)\n",
2738 lwpid_of (event_thread), event_child->status_pending);
2739 *wstatp = event_child->status_pending;
2740 event_child->status_pending_p = 0;
2741 event_child->status_pending = 0;
2742 current_thread = event_thread;
2743 return lwpid_of (event_thread);
2744 }
2745
2746 /* But if we don't find a pending event, we'll have to wait.
2747
2748 We only enter this loop if no process has a pending wait status.
2749 Thus any action taken in response to a wait status inside this
2750 loop is responding as soon as we detect the status, not after any
2751 pending events. */
2752
2753 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2754 all signals while here. */
2755 sigfillset (&block_mask);
2756 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2757
2758 /* Always pull all events out of the kernel. We'll randomly select
2759 an event LWP out of all that have events, to prevent
2760 starvation. */
2761 while (event_child == NULL)
2762 {
2763 pid_t ret = 0;
2764
2765 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2766 quirks:
2767
2768 - If the thread group leader exits while other threads in the
2769 thread group still exist, waitpid(TGID, ...) hangs. That
2770 waitpid won't return an exit status until the other threads
2771 in the group are reaped.
2772
2773 - When a non-leader thread execs, that thread just vanishes
2774 without reporting an exit (so we'd hang if we waited for it
2775 explicitly in that case). The exec event is reported to
2776 the TGID pid. */
2777 errno = 0;
2778 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2779
2780 if (debug_threads)
2781 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2782 ret, errno ? strerror (errno) : "ERRNO-OK");
2783
2784 if (ret > 0)
2785 {
2786 if (debug_threads)
2787 {
2788 debug_printf ("LLW: waitpid %ld received %s\n",
2789 (long) ret, status_to_str (*wstatp));
2790 }
2791
2792 /* Filter all events. IOW, leave all events pending. We'll
2793 randomly select an event LWP out of all that have events
2794 below. */
2795 linux_low_filter_event (ret, *wstatp);
2796 /* Retry until nothing comes out of waitpid. A single
2797 SIGCHLD can indicate more than one child stopped. */
2798 continue;
2799 }
2800
2801 /* Now that we've pulled all events out of the kernel, resume
2802 LWPs that don't have an interesting event to report. */
2803 if (stopping_threads == NOT_STOPPING_THREADS)
2804 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2805
2806 /* ... and find an LWP with a status to report to the core, if
2807 any. */
2808 event_thread = (struct thread_info *)
2809 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2810 if (event_thread != NULL)
2811 {
2812 event_child = get_thread_lwp (event_thread);
2813 *wstatp = event_child->status_pending;
2814 event_child->status_pending_p = 0;
2815 event_child->status_pending = 0;
2816 break;
2817 }
2818
2819 /* Check for zombie thread group leaders. Those can't be reaped
2820 until all other threads in the thread group are. */
2821 check_zombie_leaders ();
2822
2823 /* If there are no resumed children left in the set of LWPs we
2824 want to wait for, bail. We can't just block in
2825 waitpid/sigsuspend, because lwps might have been left stopped
2826 in trace-stop state, and we'd be stuck forever waiting for
2827 their status to change (which would only happen if we resumed
2828 them). Even if WNOHANG is set, this return code is preferred
2829 over 0 (below), as it is more detailed. */
2830 if ((find_inferior (&all_threads,
2831 not_stopped_callback,
2832 &wait_ptid) == NULL))
2833 {
2834 if (debug_threads)
2835 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2836 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2837 return -1;
2838 }
2839
2840 /* No interesting event to report to the caller. */
2841 if ((options & WNOHANG))
2842 {
2843 if (debug_threads)
2844 debug_printf ("WNOHANG set, no event found\n");
2845
2846 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2847 return 0;
2848 }
2849
2850 /* Block until we get an event reported with SIGCHLD. */
2851 if (debug_threads)
2852 debug_printf ("sigsuspend'ing\n");
2853
2854 sigsuspend (&prev_mask);
2855 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2856 goto retry;
2857 }
2858
2859 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2860
2861 current_thread = event_thread;
2862
2863 return lwpid_of (event_thread);
2864 }
2865
2866 /* Wait for an event from child(ren) PTID. PTIDs can be:
2867 minus_one_ptid, to specify any child; a pid PTID, specifying all
2868 lwps of a thread group; or a PTID representing a single lwp. Store
2869 the stop status through the status pointer WSTAT. OPTIONS is
2870 passed to the waitpid call. Return 0 if no event was found and
2871 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2872 was found. Return the PID of the stopped child otherwise. */
2873
2874 static int
2875 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2876 {
2877 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2878 }
2879
2880 /* Count the LWP's that have had events. */
2881
2882 static int
2883 count_events_callback (struct inferior_list_entry *entry, void *data)
2884 {
2885 struct thread_info *thread = (struct thread_info *) entry;
2886 struct lwp_info *lp = get_thread_lwp (thread);
2887 int *count = (int *) data;
2888
2889 gdb_assert (count != NULL);
2890
2891 /* Count only resumed LWPs that have an event pending. */
2892 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2893 && lp->status_pending_p)
2894 (*count)++;
2895
2896 return 0;
2897 }
2898
2899 /* Select the LWP (if any) that is currently being single-stepped. */
2900
2901 static int
2902 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2903 {
2904 struct thread_info *thread = (struct thread_info *) entry;
2905 struct lwp_info *lp = get_thread_lwp (thread);
2906
2907 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2908 && thread->last_resume_kind == resume_step
2909 && lp->status_pending_p)
2910 return 1;
2911 else
2912 return 0;
2913 }
2914
2915 /* Select the Nth LWP that has had an event. */
2916
2917 static int
2918 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2919 {
2920 struct thread_info *thread = (struct thread_info *) entry;
2921 struct lwp_info *lp = get_thread_lwp (thread);
2922 int *selector = (int *) data;
2923
2924 gdb_assert (selector != NULL);
2925
2926 /* Select only resumed LWPs that have an event pending. */
2927 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2928 && lp->status_pending_p)
2929 if ((*selector)-- == 0)
2930 return 1;
2931
2932 return 0;
2933 }
2934
2935 /* Select one LWP out of those that have events pending. */
2936
2937 static void
2938 select_event_lwp (struct lwp_info **orig_lp)
2939 {
2940 int num_events = 0;
2941 int random_selector;
2942 struct thread_info *event_thread = NULL;
2943
2944 /* In all-stop, give preference to the LWP that is being
2945 single-stepped. There will be at most one, and it's the LWP that
2946 the core is most interested in. If we didn't do this, then we'd
2947 have to handle pending step SIGTRAPs somehow in case the core
2948 later continues the previously-stepped thread, otherwise we'd
2949 report the pending SIGTRAP, and the core, not having stepped the
2950 thread, wouldn't understand what the trap was for, and therefore
2951 would report it to the user as a random signal. */
2952 if (!non_stop)
2953 {
2954 event_thread
2955 = (struct thread_info *) find_inferior (&all_threads,
2956 select_singlestep_lwp_callback,
2957 NULL);
2958 if (event_thread != NULL)
2959 {
2960 if (debug_threads)
2961 debug_printf ("SEL: Select single-step %s\n",
2962 target_pid_to_str (ptid_of (event_thread)));
2963 }
2964 }
2965 if (event_thread == NULL)
2966 {
2967 /* No single-stepping LWP. Select one at random, out of those
2968 which have had events. */
2969
2970 /* First see how many events we have. */
2971 find_inferior (&all_threads, count_events_callback, &num_events);
2972 gdb_assert (num_events > 0);
2973
2974 /* Now randomly pick a LWP out of those that have had
2975 events. */
2976 random_selector = (int)
2977 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2978
2979 if (debug_threads && num_events > 1)
2980 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2981 num_events, random_selector);
2982
2983 event_thread
2984 = (struct thread_info *) find_inferior (&all_threads,
2985 select_event_lwp_callback,
2986 &random_selector);
2987 }
2988
2989 if (event_thread != NULL)
2990 {
2991 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2992
2993 /* Switch the event LWP. */
2994 *orig_lp = event_lp;
2995 }
2996 }
2997
2998 /* Decrement the suspend count of an LWP. */
2999
3000 static int
3001 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
3002 {
3003 struct thread_info *thread = (struct thread_info *) entry;
3004 struct lwp_info *lwp = get_thread_lwp (thread);
3005
3006 /* Ignore EXCEPT. */
3007 if (lwp == except)
3008 return 0;
3009
3010 lwp_suspended_decr (lwp);
3011 return 0;
3012 }
3013
3014 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
3015 NULL. */
3016
3017 static void
3018 unsuspend_all_lwps (struct lwp_info *except)
3019 {
3020 find_inferior (&all_threads, unsuspend_one_lwp, except);
3021 }
3022
3023 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
3024 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
3025 void *data);
3026 static int lwp_running (struct inferior_list_entry *entry, void *data);
3027 static ptid_t linux_wait_1 (ptid_t ptid,
3028 struct target_waitstatus *ourstatus,
3029 int target_options);
3030
3031 /* Stabilize threads (move out of jump pads).
3032
3033 If a thread is midway collecting a fast tracepoint, we need to
3034 finish the collection and move it out of the jump pad before
3035 reporting the signal.
3036
3037 This avoids recursion while collecting (when a signal arrives
3038 midway, and the signal handler itself collects), which would trash
3039 the trace buffer. In case the user set a breakpoint in a signal
3040 handler, this avoids the backtrace showing the jump pad, etc..
3041 Most importantly, there are certain things we can't do safely if
3042 threads are stopped in a jump pad (or in its callee's). For
3043 example:
3044
3045 - starting a new trace run. A thread still collecting the
3046 previous run, could trash the trace buffer when resumed. The trace
3047 buffer control structures would have been reset but the thread had
3048 no way to tell. The thread could even midway memcpy'ing to the
3049 buffer, which would mean that when resumed, it would clobber the
3050 trace buffer that had been set for a new run.
3051
3052 - we can't rewrite/reuse the jump pads for new tracepoints
3053 safely. Say you do tstart while a thread is stopped midway while
3054 collecting. When the thread is later resumed, it finishes the
3055 collection, and returns to the jump pad, to execute the original
3056 instruction that was under the tracepoint jump at the time the
3057 older run had been started. If the jump pad had been rewritten
3058 since for something else in the new run, the thread would now
3059 execute the wrong / random instructions. */
3060
3061 static void
3062 linux_stabilize_threads (void)
3063 {
3064 struct thread_info *saved_thread;
3065 struct thread_info *thread_stuck;
3066
3067 thread_stuck
3068 = (struct thread_info *) find_inferior (&all_threads,
3069 stuck_in_jump_pad_callback,
3070 NULL);
3071 if (thread_stuck != NULL)
3072 {
3073 if (debug_threads)
3074 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
3075 lwpid_of (thread_stuck));
3076 return;
3077 }
3078
3079 saved_thread = current_thread;
3080
3081 stabilizing_threads = 1;
3082
3083 /* Kick 'em all. */
3084 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
3085
3086 /* Loop until all are stopped out of the jump pads. */
3087 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
3088 {
3089 struct target_waitstatus ourstatus;
3090 struct lwp_info *lwp;
3091 int wstat;
3092
3093 /* Note that we go through the full wait even loop. While
3094 moving threads out of jump pad, we need to be able to step
3095 over internal breakpoints and such. */
3096 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
3097
3098 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
3099 {
3100 lwp = get_thread_lwp (current_thread);
3101
3102 /* Lock it. */
3103 lwp_suspended_inc (lwp);
3104
3105 if (ourstatus.value.sig != GDB_SIGNAL_0
3106 || current_thread->last_resume_kind == resume_stop)
3107 {
3108 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3109 enqueue_one_deferred_signal (lwp, &wstat);
3110 }
3111 }
3112 }
3113
3114 unsuspend_all_lwps (NULL);
3115
3116 stabilizing_threads = 0;
3117
3118 current_thread = saved_thread;
3119
3120 if (debug_threads)
3121 {
3122 thread_stuck
3123 = (struct thread_info *) find_inferior (&all_threads,
3124 stuck_in_jump_pad_callback,
3125 NULL);
3126 if (thread_stuck != NULL)
3127 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3128 lwpid_of (thread_stuck));
3129 }
3130 }
3131
3132 /* Convenience function that is called when the kernel reports an
3133 event that is not passed out to GDB. */
3134
3135 static ptid_t
3136 ignore_event (struct target_waitstatus *ourstatus)
3137 {
3138 /* If we got an event, there may still be others, as a single
3139 SIGCHLD can indicate more than one child stopped. This forces
3140 another target_wait call. */
3141 async_file_mark ();
3142
3143 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3144 return null_ptid;
3145 }
3146
3147 /* Convenience function that is called when the kernel reports an exit
3148 event. This decides whether to report the event to GDB as a
3149 process exit event, a thread exit event, or to suppress the
3150 event. */
3151
3152 static ptid_t
3153 filter_exit_event (struct lwp_info *event_child,
3154 struct target_waitstatus *ourstatus)
3155 {
3156 struct thread_info *thread = get_lwp_thread (event_child);
3157 ptid_t ptid = ptid_of (thread);
3158
3159 if (!last_thread_of_process_p (pid_of (thread)))
3160 {
3161 if (report_thread_events)
3162 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3163 else
3164 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3165
3166 delete_lwp (event_child);
3167 }
3168 return ptid;
3169 }
3170
3171 /* Returns 1 if GDB is interested in any event_child syscalls. */
3172
3173 static int
3174 gdb_catching_syscalls_p (struct lwp_info *event_child)
3175 {
3176 struct thread_info *thread = get_lwp_thread (event_child);
3177 struct process_info *proc = get_thread_process (thread);
3178
3179 return !VEC_empty (int, proc->syscalls_to_catch);
3180 }
3181
3182 /* Returns 1 if GDB is interested in the event_child syscall.
3183 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3184
3185 static int
3186 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3187 {
3188 int i, iter;
3189 int sysno;
3190 struct thread_info *thread = get_lwp_thread (event_child);
3191 struct process_info *proc = get_thread_process (thread);
3192
3193 if (VEC_empty (int, proc->syscalls_to_catch))
3194 return 0;
3195
3196 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3197 return 1;
3198
3199 get_syscall_trapinfo (event_child, &sysno);
3200 for (i = 0;
3201 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3202 i++)
3203 if (iter == sysno)
3204 return 1;
3205
3206 return 0;
3207 }
3208
3209 /* Wait for process, returns status. */
3210
3211 static ptid_t
3212 linux_wait_1 (ptid_t ptid,
3213 struct target_waitstatus *ourstatus, int target_options)
3214 {
3215 int w;
3216 struct lwp_info *event_child;
3217 int options;
3218 int pid;
3219 int step_over_finished;
3220 int bp_explains_trap;
3221 int maybe_internal_trap;
3222 int report_to_gdb;
3223 int trace_event;
3224 int in_step_range;
3225 int any_resumed;
3226
3227 if (debug_threads)
3228 {
3229 debug_enter ();
3230 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3231 }
3232
3233 /* Translate generic target options into linux options. */
3234 options = __WALL;
3235 if (target_options & TARGET_WNOHANG)
3236 options |= WNOHANG;
3237
3238 bp_explains_trap = 0;
3239 trace_event = 0;
3240 in_step_range = 0;
3241 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3242
3243 /* Find a resumed LWP, if any. */
3244 if (find_inferior (&all_threads,
3245 status_pending_p_callback,
3246 &minus_one_ptid) != NULL)
3247 any_resumed = 1;
3248 else if ((find_inferior (&all_threads,
3249 not_stopped_callback,
3250 &minus_one_ptid) != NULL))
3251 any_resumed = 1;
3252 else
3253 any_resumed = 0;
3254
3255 if (ptid_equal (step_over_bkpt, null_ptid))
3256 pid = linux_wait_for_event (ptid, &w, options);
3257 else
3258 {
3259 if (debug_threads)
3260 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3261 target_pid_to_str (step_over_bkpt));
3262 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3263 }
3264
3265 if (pid == 0 || (pid == -1 && !any_resumed))
3266 {
3267 gdb_assert (target_options & TARGET_WNOHANG);
3268
3269 if (debug_threads)
3270 {
3271 debug_printf ("linux_wait_1 ret = null_ptid, "
3272 "TARGET_WAITKIND_IGNORE\n");
3273 debug_exit ();
3274 }
3275
3276 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3277 return null_ptid;
3278 }
3279 else if (pid == -1)
3280 {
3281 if (debug_threads)
3282 {
3283 debug_printf ("linux_wait_1 ret = null_ptid, "
3284 "TARGET_WAITKIND_NO_RESUMED\n");
3285 debug_exit ();
3286 }
3287
3288 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3289 return null_ptid;
3290 }
3291
3292 event_child = get_thread_lwp (current_thread);
3293
3294 /* linux_wait_for_event only returns an exit status for the last
3295 child of a process. Report it. */
3296 if (WIFEXITED (w) || WIFSIGNALED (w))
3297 {
3298 if (WIFEXITED (w))
3299 {
3300 ourstatus->kind = TARGET_WAITKIND_EXITED;
3301 ourstatus->value.integer = WEXITSTATUS (w);
3302
3303 if (debug_threads)
3304 {
3305 debug_printf ("linux_wait_1 ret = %s, exited with "
3306 "retcode %d\n",
3307 target_pid_to_str (ptid_of (current_thread)),
3308 WEXITSTATUS (w));
3309 debug_exit ();
3310 }
3311 }
3312 else
3313 {
3314 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3315 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3316
3317 if (debug_threads)
3318 {
3319 debug_printf ("linux_wait_1 ret = %s, terminated with "
3320 "signal %d\n",
3321 target_pid_to_str (ptid_of (current_thread)),
3322 WTERMSIG (w));
3323 debug_exit ();
3324 }
3325 }
3326
3327 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3328 return filter_exit_event (event_child, ourstatus);
3329
3330 return ptid_of (current_thread);
3331 }
3332
3333 /* If step-over executes a breakpoint instruction, in the case of a
3334 hardware single step it means a gdb/gdbserver breakpoint had been
3335 planted on top of a permanent breakpoint, in the case of a software
3336 single step it may just mean that gdbserver hit the reinsert breakpoint.
3337 The PC has been adjusted by save_stop_reason to point at
3338 the breakpoint address.
3339 So in the case of the hardware single step advance the PC manually
3340 past the breakpoint and in the case of software single step advance only
3341 if it's not the single_step_breakpoint we are hitting.
3342 This avoids that a program would keep trapping a permanent breakpoint
3343 forever. */
3344 if (!ptid_equal (step_over_bkpt, null_ptid)
3345 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3346 && (event_child->stepping
3347 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3348 {
3349 int increment_pc = 0;
3350 int breakpoint_kind = 0;
3351 CORE_ADDR stop_pc = event_child->stop_pc;
3352
3353 breakpoint_kind =
3354 the_target->breakpoint_kind_from_current_state (&stop_pc);
3355 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3356
3357 if (debug_threads)
3358 {
3359 debug_printf ("step-over for %s executed software breakpoint\n",
3360 target_pid_to_str (ptid_of (current_thread)));
3361 }
3362
3363 if (increment_pc != 0)
3364 {
3365 struct regcache *regcache
3366 = get_thread_regcache (current_thread, 1);
3367
3368 event_child->stop_pc += increment_pc;
3369 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3370
3371 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3372 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3373 }
3374 }
3375
3376 /* If this event was not handled before, and is not a SIGTRAP, we
3377 report it. SIGILL and SIGSEGV are also treated as traps in case
3378 a breakpoint is inserted at the current PC. If this target does
3379 not support internal breakpoints at all, we also report the
3380 SIGTRAP without further processing; it's of no concern to us. */
3381 maybe_internal_trap
3382 = (supports_breakpoints ()
3383 && (WSTOPSIG (w) == SIGTRAP
3384 || ((WSTOPSIG (w) == SIGILL
3385 || WSTOPSIG (w) == SIGSEGV)
3386 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3387
3388 if (maybe_internal_trap)
3389 {
3390 /* Handle anything that requires bookkeeping before deciding to
3391 report the event or continue waiting. */
3392
3393 /* First check if we can explain the SIGTRAP with an internal
3394 breakpoint, or if we should possibly report the event to GDB.
3395 Do this before anything that may remove or insert a
3396 breakpoint. */
3397 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3398
3399 /* We have a SIGTRAP, possibly a step-over dance has just
3400 finished. If so, tweak the state machine accordingly,
3401 reinsert breakpoints and delete any single-step
3402 breakpoints. */
3403 step_over_finished = finish_step_over (event_child);
3404
3405 /* Now invoke the callbacks of any internal breakpoints there. */
3406 check_breakpoints (event_child->stop_pc);
3407
3408 /* Handle tracepoint data collecting. This may overflow the
3409 trace buffer, and cause a tracing stop, removing
3410 breakpoints. */
3411 trace_event = handle_tracepoints (event_child);
3412
3413 if (bp_explains_trap)
3414 {
3415 if (debug_threads)
3416 debug_printf ("Hit a gdbserver breakpoint.\n");
3417 }
3418 }
3419 else
3420 {
3421 /* We have some other signal, possibly a step-over dance was in
3422 progress, and it should be cancelled too. */
3423 step_over_finished = finish_step_over (event_child);
3424 }
3425
3426 /* We have all the data we need. Either report the event to GDB, or
3427 resume threads and keep waiting for more. */
3428
3429 /* If we're collecting a fast tracepoint, finish the collection and
3430 move out of the jump pad before delivering a signal. See
3431 linux_stabilize_threads. */
3432
3433 if (WIFSTOPPED (w)
3434 && WSTOPSIG (w) != SIGTRAP
3435 && supports_fast_tracepoints ()
3436 && agent_loaded_p ())
3437 {
3438 if (debug_threads)
3439 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3440 "to defer or adjust it.\n",
3441 WSTOPSIG (w), lwpid_of (current_thread));
3442
3443 /* Allow debugging the jump pad itself. */
3444 if (current_thread->last_resume_kind != resume_step
3445 && maybe_move_out_of_jump_pad (event_child, &w))
3446 {
3447 enqueue_one_deferred_signal (event_child, &w);
3448
3449 if (debug_threads)
3450 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3451 WSTOPSIG (w), lwpid_of (current_thread));
3452
3453 linux_resume_one_lwp (event_child, 0, 0, NULL);
3454
3455 if (debug_threads)
3456 debug_exit ();
3457 return ignore_event (ourstatus);
3458 }
3459 }
3460
3461 if (event_child->collecting_fast_tracepoint)
3462 {
3463 if (debug_threads)
3464 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3465 "Check if we're already there.\n",
3466 lwpid_of (current_thread),
3467 event_child->collecting_fast_tracepoint);
3468
3469 trace_event = 1;
3470
3471 event_child->collecting_fast_tracepoint
3472 = linux_fast_tracepoint_collecting (event_child, NULL);
3473
3474 if (event_child->collecting_fast_tracepoint != 1)
3475 {
3476 /* No longer need this breakpoint. */
3477 if (event_child->exit_jump_pad_bkpt != NULL)
3478 {
3479 if (debug_threads)
3480 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3481 "stopping all threads momentarily.\n");
3482
3483 /* Other running threads could hit this breakpoint.
3484 We don't handle moribund locations like GDB does,
3485 instead we always pause all threads when removing
3486 breakpoints, so that any step-over or
3487 decr_pc_after_break adjustment is always taken
3488 care of while the breakpoint is still
3489 inserted. */
3490 stop_all_lwps (1, event_child);
3491
3492 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3493 event_child->exit_jump_pad_bkpt = NULL;
3494
3495 unstop_all_lwps (1, event_child);
3496
3497 gdb_assert (event_child->suspended >= 0);
3498 }
3499 }
3500
3501 if (event_child->collecting_fast_tracepoint == 0)
3502 {
3503 if (debug_threads)
3504 debug_printf ("fast tracepoint finished "
3505 "collecting successfully.\n");
3506
3507 /* We may have a deferred signal to report. */
3508 if (dequeue_one_deferred_signal (event_child, &w))
3509 {
3510 if (debug_threads)
3511 debug_printf ("dequeued one signal.\n");
3512 }
3513 else
3514 {
3515 if (debug_threads)
3516 debug_printf ("no deferred signals.\n");
3517
3518 if (stabilizing_threads)
3519 {
3520 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3521 ourstatus->value.sig = GDB_SIGNAL_0;
3522
3523 if (debug_threads)
3524 {
3525 debug_printf ("linux_wait_1 ret = %s, stopped "
3526 "while stabilizing threads\n",
3527 target_pid_to_str (ptid_of (current_thread)));
3528 debug_exit ();
3529 }
3530
3531 return ptid_of (current_thread);
3532 }
3533 }
3534 }
3535 }
3536
3537 /* Check whether GDB would be interested in this event. */
3538
3539 /* Check if GDB is interested in this syscall. */
3540 if (WIFSTOPPED (w)
3541 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3542 && !gdb_catch_this_syscall_p (event_child))
3543 {
3544 if (debug_threads)
3545 {
3546 debug_printf ("Ignored syscall for LWP %ld.\n",
3547 lwpid_of (current_thread));
3548 }
3549
3550 linux_resume_one_lwp (event_child, event_child->stepping,
3551 0, NULL);
3552
3553 if (debug_threads)
3554 debug_exit ();
3555 return ignore_event (ourstatus);
3556 }
3557
3558 /* If GDB is not interested in this signal, don't stop other
3559 threads, and don't report it to GDB. Just resume the inferior
3560 right away. We do this for threading-related signals as well as
3561 any that GDB specifically requested we ignore. But never ignore
3562 SIGSTOP if we sent it ourselves, and do not ignore signals when
3563 stepping - they may require special handling to skip the signal
3564 handler. Also never ignore signals that could be caused by a
3565 breakpoint. */
3566 if (WIFSTOPPED (w)
3567 && current_thread->last_resume_kind != resume_step
3568 && (
3569 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3570 (current_process ()->priv->thread_db != NULL
3571 && (WSTOPSIG (w) == __SIGRTMIN
3572 || WSTOPSIG (w) == __SIGRTMIN + 1))
3573 ||
3574 #endif
3575 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3576 && !(WSTOPSIG (w) == SIGSTOP
3577 && current_thread->last_resume_kind == resume_stop)
3578 && !linux_wstatus_maybe_breakpoint (w))))
3579 {
3580 siginfo_t info, *info_p;
3581
3582 if (debug_threads)
3583 debug_printf ("Ignored signal %d for LWP %ld.\n",
3584 WSTOPSIG (w), lwpid_of (current_thread));
3585
3586 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3587 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3588 info_p = &info;
3589 else
3590 info_p = NULL;
3591
3592 if (step_over_finished)
3593 {
3594 /* We cancelled this thread's step-over above. We still
3595 need to unsuspend all other LWPs, and set them back
3596 running again while the signal handler runs. */
3597 unsuspend_all_lwps (event_child);
3598
3599 /* Enqueue the pending signal info so that proceed_all_lwps
3600 doesn't lose it. */
3601 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3602
3603 proceed_all_lwps ();
3604 }
3605 else
3606 {
3607 linux_resume_one_lwp (event_child, event_child->stepping,
3608 WSTOPSIG (w), info_p);
3609 }
3610
3611 if (debug_threads)
3612 debug_exit ();
3613
3614 return ignore_event (ourstatus);
3615 }
3616
3617 /* Note that all addresses are always "out of the step range" when
3618 there's no range to begin with. */
3619 in_step_range = lwp_in_step_range (event_child);
3620
3621 /* If GDB wanted this thread to single step, and the thread is out
3622 of the step range, we always want to report the SIGTRAP, and let
3623 GDB handle it. Watchpoints should always be reported. So should
3624 signals we can't explain. A SIGTRAP we can't explain could be a
3625 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3626 do, we're be able to handle GDB breakpoints on top of internal
3627 breakpoints, by handling the internal breakpoint and still
3628 reporting the event to GDB. If we don't, we're out of luck, GDB
3629 won't see the breakpoint hit. If we see a single-step event but
3630 the thread should be continuing, don't pass the trap to gdb.
3631 That indicates that we had previously finished a single-step but
3632 left the single-step pending -- see
3633 complete_ongoing_step_over. */
3634 report_to_gdb = (!maybe_internal_trap
3635 || (current_thread->last_resume_kind == resume_step
3636 && !in_step_range)
3637 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3638 || (!in_step_range
3639 && !bp_explains_trap
3640 && !trace_event
3641 && !step_over_finished
3642 && !(current_thread->last_resume_kind == resume_continue
3643 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3644 || (gdb_breakpoint_here (event_child->stop_pc)
3645 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3646 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3647 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3648
3649 run_breakpoint_commands (event_child->stop_pc);
3650
3651 /* We found no reason GDB would want us to stop. We either hit one
3652 of our own breakpoints, or finished an internal step GDB
3653 shouldn't know about. */
3654 if (!report_to_gdb)
3655 {
3656 if (debug_threads)
3657 {
3658 if (bp_explains_trap)
3659 debug_printf ("Hit a gdbserver breakpoint.\n");
3660 if (step_over_finished)
3661 debug_printf ("Step-over finished.\n");
3662 if (trace_event)
3663 debug_printf ("Tracepoint event.\n");
3664 if (lwp_in_step_range (event_child))
3665 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3666 paddress (event_child->stop_pc),
3667 paddress (event_child->step_range_start),
3668 paddress (event_child->step_range_end));
3669 }
3670
3671 /* We're not reporting this breakpoint to GDB, so apply the
3672 decr_pc_after_break adjustment to the inferior's regcache
3673 ourselves. */
3674
3675 if (the_low_target.set_pc != NULL)
3676 {
3677 struct regcache *regcache
3678 = get_thread_regcache (current_thread, 1);
3679 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3680 }
3681
3682 /* We may have finished stepping over a breakpoint. If so,
3683 we've stopped and suspended all LWPs momentarily except the
3684 stepping one. This is where we resume them all again. We're
3685 going to keep waiting, so use proceed, which handles stepping
3686 over the next breakpoint. */
3687 if (debug_threads)
3688 debug_printf ("proceeding all threads.\n");
3689
3690 if (step_over_finished)
3691 unsuspend_all_lwps (event_child);
3692
3693 proceed_all_lwps ();
3694
3695 if (debug_threads)
3696 debug_exit ();
3697
3698 return ignore_event (ourstatus);
3699 }
3700
3701 if (debug_threads)
3702 {
3703 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3704 {
3705 char *str;
3706
3707 str = target_waitstatus_to_string (&event_child->waitstatus);
3708 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3709 lwpid_of (get_lwp_thread (event_child)), str);
3710 xfree (str);
3711 }
3712 if (current_thread->last_resume_kind == resume_step)
3713 {
3714 if (event_child->step_range_start == event_child->step_range_end)
3715 debug_printf ("GDB wanted to single-step, reporting event.\n");
3716 else if (!lwp_in_step_range (event_child))
3717 debug_printf ("Out of step range, reporting event.\n");
3718 }
3719 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3720 debug_printf ("Stopped by watchpoint.\n");
3721 else if (gdb_breakpoint_here (event_child->stop_pc))
3722 debug_printf ("Stopped by GDB breakpoint.\n");
3723 if (debug_threads)
3724 debug_printf ("Hit a non-gdbserver trap event.\n");
3725 }
3726
3727 /* Alright, we're going to report a stop. */
3728
3729 /* Remove single-step breakpoints. */
3730 if (can_software_single_step ())
3731 {
3732 /* Remove single-step breakpoints or not. It it is true, stop all
3733 lwps, so that other threads won't hit the breakpoint in the
3734 staled memory. */
3735 int remove_single_step_breakpoints_p = 0;
3736
3737 if (non_stop)
3738 {
3739 remove_single_step_breakpoints_p
3740 = has_single_step_breakpoints (current_thread);
3741 }
3742 else
3743 {
3744 /* In all-stop, a stop reply cancels all previous resume
3745 requests. Delete all single-step breakpoints. */
3746 struct inferior_list_entry *inf, *tmp;
3747
3748 ALL_INFERIORS (&all_threads, inf, tmp)
3749 {
3750 struct thread_info *thread = (struct thread_info *) inf;
3751
3752 if (has_single_step_breakpoints (thread))
3753 {
3754 remove_single_step_breakpoints_p = 1;
3755 break;
3756 }
3757 }
3758 }
3759
3760 if (remove_single_step_breakpoints_p)
3761 {
3762 /* If we remove single-step breakpoints from memory, stop all lwps,
3763 so that other threads won't hit the breakpoint in the staled
3764 memory. */
3765 stop_all_lwps (0, event_child);
3766
3767 if (non_stop)
3768 {
3769 gdb_assert (has_single_step_breakpoints (current_thread));
3770 delete_single_step_breakpoints (current_thread);
3771 }
3772 else
3773 {
3774 struct inferior_list_entry *inf, *tmp;
3775
3776 ALL_INFERIORS (&all_threads, inf, tmp)
3777 {
3778 struct thread_info *thread = (struct thread_info *) inf;
3779
3780 if (has_single_step_breakpoints (thread))
3781 delete_single_step_breakpoints (thread);
3782 }
3783 }
3784
3785 unstop_all_lwps (0, event_child);
3786 }
3787 }
3788
3789 if (!stabilizing_threads)
3790 {
3791 /* In all-stop, stop all threads. */
3792 if (!non_stop)
3793 stop_all_lwps (0, NULL);
3794
3795 if (step_over_finished)
3796 {
3797 if (!non_stop)
3798 {
3799 /* If we were doing a step-over, all other threads but
3800 the stepping one had been paused in start_step_over,
3801 with their suspend counts incremented. We don't want
3802 to do a full unstop/unpause, because we're in
3803 all-stop mode (so we want threads stopped), but we
3804 still need to unsuspend the other threads, to
3805 decrement their `suspended' count back. */
3806 unsuspend_all_lwps (event_child);
3807 }
3808 else
3809 {
3810 /* If we just finished a step-over, then all threads had
3811 been momentarily paused. In all-stop, that's fine,
3812 we want threads stopped by now anyway. In non-stop,
3813 we need to re-resume threads that GDB wanted to be
3814 running. */
3815 unstop_all_lwps (1, event_child);
3816 }
3817 }
3818
3819 /* If we're not waiting for a specific LWP, choose an event LWP
3820 from among those that have had events. Giving equal priority
3821 to all LWPs that have had events helps prevent
3822 starvation. */
3823 if (ptid_equal (ptid, minus_one_ptid))
3824 {
3825 event_child->status_pending_p = 1;
3826 event_child->status_pending = w;
3827
3828 select_event_lwp (&event_child);
3829
3830 /* current_thread and event_child must stay in sync. */
3831 current_thread = get_lwp_thread (event_child);
3832
3833 event_child->status_pending_p = 0;
3834 w = event_child->status_pending;
3835 }
3836
3837
3838 /* Stabilize threads (move out of jump pads). */
3839 if (!non_stop)
3840 stabilize_threads ();
3841 }
3842 else
3843 {
3844 /* If we just finished a step-over, then all threads had been
3845 momentarily paused. In all-stop, that's fine, we want
3846 threads stopped by now anyway. In non-stop, we need to
3847 re-resume threads that GDB wanted to be running. */
3848 if (step_over_finished)
3849 unstop_all_lwps (1, event_child);
3850 }
3851
3852 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3853 {
3854 /* If the reported event is an exit, fork, vfork or exec, let
3855 GDB know. */
3856 *ourstatus = event_child->waitstatus;
3857 /* Clear the event lwp's waitstatus since we handled it already. */
3858 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3859 }
3860 else
3861 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3862
3863 /* Now that we've selected our final event LWP, un-adjust its PC if
3864 it was a software breakpoint, and the client doesn't know we can
3865 adjust the breakpoint ourselves. */
3866 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3867 && !swbreak_feature)
3868 {
3869 int decr_pc = the_low_target.decr_pc_after_break;
3870
3871 if (decr_pc != 0)
3872 {
3873 struct regcache *regcache
3874 = get_thread_regcache (current_thread, 1);
3875 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3876 }
3877 }
3878
3879 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3880 {
3881 get_syscall_trapinfo (event_child,
3882 &ourstatus->value.syscall_number);
3883 ourstatus->kind = event_child->syscall_state;
3884 }
3885 else if (current_thread->last_resume_kind == resume_stop
3886 && WSTOPSIG (w) == SIGSTOP)
3887 {
3888 /* A thread that has been requested to stop by GDB with vCont;t,
3889 and it stopped cleanly, so report as SIG0. The use of
3890 SIGSTOP is an implementation detail. */
3891 ourstatus->value.sig = GDB_SIGNAL_0;
3892 }
3893 else if (current_thread->last_resume_kind == resume_stop
3894 && WSTOPSIG (w) != SIGSTOP)
3895 {
3896 /* A thread that has been requested to stop by GDB with vCont;t,
3897 but, it stopped for other reasons. */
3898 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3899 }
3900 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3901 {
3902 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3903 }
3904
3905 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3906
3907 if (debug_threads)
3908 {
3909 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3910 target_pid_to_str (ptid_of (current_thread)),
3911 ourstatus->kind, ourstatus->value.sig);
3912 debug_exit ();
3913 }
3914
3915 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3916 return filter_exit_event (event_child, ourstatus);
3917
3918 return ptid_of (current_thread);
3919 }
3920
3921 /* Get rid of any pending event in the pipe. */
3922 static void
3923 async_file_flush (void)
3924 {
3925 int ret;
3926 char buf;
3927
3928 do
3929 ret = read (linux_event_pipe[0], &buf, 1);
3930 while (ret >= 0 || (ret == -1 && errno == EINTR));
3931 }
3932
3933 /* Put something in the pipe, so the event loop wakes up. */
3934 static void
3935 async_file_mark (void)
3936 {
3937 int ret;
3938
3939 async_file_flush ();
3940
3941 do
3942 ret = write (linux_event_pipe[1], "+", 1);
3943 while (ret == 0 || (ret == -1 && errno == EINTR));
3944
3945 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3946 be awakened anyway. */
3947 }
3948
3949 static ptid_t
3950 linux_wait (ptid_t ptid,
3951 struct target_waitstatus *ourstatus, int target_options)
3952 {
3953 ptid_t event_ptid;
3954
3955 /* Flush the async file first. */
3956 if (target_is_async_p ())
3957 async_file_flush ();
3958
3959 do
3960 {
3961 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3962 }
3963 while ((target_options & TARGET_WNOHANG) == 0
3964 && ptid_equal (event_ptid, null_ptid)
3965 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3966
3967 /* If at least one stop was reported, there may be more. A single
3968 SIGCHLD can signal more than one child stop. */
3969 if (target_is_async_p ()
3970 && (target_options & TARGET_WNOHANG) != 0
3971 && !ptid_equal (event_ptid, null_ptid))
3972 async_file_mark ();
3973
3974 return event_ptid;
3975 }
3976
3977 /* Send a signal to an LWP. */
3978
3979 static int
3980 kill_lwp (unsigned long lwpid, int signo)
3981 {
3982 int ret;
3983
3984 errno = 0;
3985 ret = syscall (__NR_tkill, lwpid, signo);
3986 if (errno == ENOSYS)
3987 {
3988 /* If tkill fails, then we are not using nptl threads, a
3989 configuration we no longer support. */
3990 perror_with_name (("tkill"));
3991 }
3992 return ret;
3993 }
3994
3995 void
3996 linux_stop_lwp (struct lwp_info *lwp)
3997 {
3998 send_sigstop (lwp);
3999 }
4000
4001 static void
4002 send_sigstop (struct lwp_info *lwp)
4003 {
4004 int pid;
4005
4006 pid = lwpid_of (get_lwp_thread (lwp));
4007
4008 /* If we already have a pending stop signal for this process, don't
4009 send another. */
4010 if (lwp->stop_expected)
4011 {
4012 if (debug_threads)
4013 debug_printf ("Have pending sigstop for lwp %d\n", pid);
4014
4015 return;
4016 }
4017
4018 if (debug_threads)
4019 debug_printf ("Sending sigstop to lwp %d\n", pid);
4020
4021 lwp->stop_expected = 1;
4022 kill_lwp (pid, SIGSTOP);
4023 }
4024
4025 static int
4026 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
4027 {
4028 struct thread_info *thread = (struct thread_info *) entry;
4029 struct lwp_info *lwp = get_thread_lwp (thread);
4030
4031 /* Ignore EXCEPT. */
4032 if (lwp == except)
4033 return 0;
4034
4035 if (lwp->stopped)
4036 return 0;
4037
4038 send_sigstop (lwp);
4039 return 0;
4040 }
4041
4042 /* Increment the suspend count of an LWP, and stop it, if not stopped
4043 yet. */
4044 static int
4045 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
4046 void *except)
4047 {
4048 struct thread_info *thread = (struct thread_info *) entry;
4049 struct lwp_info *lwp = get_thread_lwp (thread);
4050
4051 /* Ignore EXCEPT. */
4052 if (lwp == except)
4053 return 0;
4054
4055 lwp_suspended_inc (lwp);
4056
4057 return send_sigstop_callback (entry, except);
4058 }
4059
4060 static void
4061 mark_lwp_dead (struct lwp_info *lwp, int wstat)
4062 {
4063 /* Store the exit status for later. */
4064 lwp->status_pending_p = 1;
4065 lwp->status_pending = wstat;
4066
4067 /* Store in waitstatus as well, as there's nothing else to process
4068 for this event. */
4069 if (WIFEXITED (wstat))
4070 {
4071 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
4072 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
4073 }
4074 else if (WIFSIGNALED (wstat))
4075 {
4076 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
4077 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
4078 }
4079
4080 /* Prevent trying to stop it. */
4081 lwp->stopped = 1;
4082
4083 /* No further stops are expected from a dead lwp. */
4084 lwp->stop_expected = 0;
4085 }
4086
4087 /* Return true if LWP has exited already, and has a pending exit event
4088 to report to GDB. */
4089
4090 static int
4091 lwp_is_marked_dead (struct lwp_info *lwp)
4092 {
4093 return (lwp->status_pending_p
4094 && (WIFEXITED (lwp->status_pending)
4095 || WIFSIGNALED (lwp->status_pending)));
4096 }
4097
4098 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4099
4100 static void
4101 wait_for_sigstop (void)
4102 {
4103 struct thread_info *saved_thread;
4104 ptid_t saved_tid;
4105 int wstat;
4106 int ret;
4107
4108 saved_thread = current_thread;
4109 if (saved_thread != NULL)
4110 saved_tid = saved_thread->entry.id;
4111 else
4112 saved_tid = null_ptid; /* avoid bogus unused warning */
4113
4114 if (debug_threads)
4115 debug_printf ("wait_for_sigstop: pulling events\n");
4116
4117 /* Passing NULL_PTID as filter indicates we want all events to be
4118 left pending. Eventually this returns when there are no
4119 unwaited-for children left. */
4120 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4121 &wstat, __WALL);
4122 gdb_assert (ret == -1);
4123
4124 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4125 current_thread = saved_thread;
4126 else
4127 {
4128 if (debug_threads)
4129 debug_printf ("Previously current thread died.\n");
4130
4131 /* We can't change the current inferior behind GDB's back,
4132 otherwise, a subsequent command may apply to the wrong
4133 process. */
4134 current_thread = NULL;
4135 }
4136 }
4137
4138 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
4139 move it out, because we need to report the stop event to GDB. For
4140 example, if the user puts a breakpoint in the jump pad, it's
4141 because she wants to debug it. */
4142
4143 static int
4144 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
4145 {
4146 struct thread_info *thread = (struct thread_info *) entry;
4147 struct lwp_info *lwp = get_thread_lwp (thread);
4148
4149 if (lwp->suspended != 0)
4150 {
4151 internal_error (__FILE__, __LINE__,
4152 "LWP %ld is suspended, suspended=%d\n",
4153 lwpid_of (thread), lwp->suspended);
4154 }
4155 gdb_assert (lwp->stopped);
4156
4157 /* Allow debugging the jump pad, gdb_collect, etc.. */
4158 return (supports_fast_tracepoints ()
4159 && agent_loaded_p ()
4160 && (gdb_breakpoint_here (lwp->stop_pc)
4161 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4162 || thread->last_resume_kind == resume_step)
4163 && linux_fast_tracepoint_collecting (lwp, NULL));
4164 }
4165
4166 static void
4167 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
4168 {
4169 struct thread_info *thread = (struct thread_info *) entry;
4170 struct thread_info *saved_thread;
4171 struct lwp_info *lwp = get_thread_lwp (thread);
4172 int *wstat;
4173
4174 if (lwp->suspended != 0)
4175 {
4176 internal_error (__FILE__, __LINE__,
4177 "LWP %ld is suspended, suspended=%d\n",
4178 lwpid_of (thread), lwp->suspended);
4179 }
4180 gdb_assert (lwp->stopped);
4181
4182 /* For gdb_breakpoint_here. */
4183 saved_thread = current_thread;
4184 current_thread = thread;
4185
4186 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4187
4188 /* Allow debugging the jump pad, gdb_collect, etc. */
4189 if (!gdb_breakpoint_here (lwp->stop_pc)
4190 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4191 && thread->last_resume_kind != resume_step
4192 && maybe_move_out_of_jump_pad (lwp, wstat))
4193 {
4194 if (debug_threads)
4195 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4196 lwpid_of (thread));
4197
4198 if (wstat)
4199 {
4200 lwp->status_pending_p = 0;
4201 enqueue_one_deferred_signal (lwp, wstat);
4202
4203 if (debug_threads)
4204 debug_printf ("Signal %d for LWP %ld deferred "
4205 "(in jump pad)\n",
4206 WSTOPSIG (*wstat), lwpid_of (thread));
4207 }
4208
4209 linux_resume_one_lwp (lwp, 0, 0, NULL);
4210 }
4211 else
4212 lwp_suspended_inc (lwp);
4213
4214 current_thread = saved_thread;
4215 }
4216
4217 static int
4218 lwp_running (struct inferior_list_entry *entry, void *data)
4219 {
4220 struct thread_info *thread = (struct thread_info *) entry;
4221 struct lwp_info *lwp = get_thread_lwp (thread);
4222
4223 if (lwp_is_marked_dead (lwp))
4224 return 0;
4225 if (lwp->stopped)
4226 return 0;
4227 return 1;
4228 }
4229
4230 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4231 If SUSPEND, then also increase the suspend count of every LWP,
4232 except EXCEPT. */
4233
4234 static void
4235 stop_all_lwps (int suspend, struct lwp_info *except)
4236 {
4237 /* Should not be called recursively. */
4238 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4239
4240 if (debug_threads)
4241 {
4242 debug_enter ();
4243 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4244 suspend ? "stop-and-suspend" : "stop",
4245 except != NULL
4246 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4247 : "none");
4248 }
4249
4250 stopping_threads = (suspend
4251 ? STOPPING_AND_SUSPENDING_THREADS
4252 : STOPPING_THREADS);
4253
4254 if (suspend)
4255 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4256 else
4257 find_inferior (&all_threads, send_sigstop_callback, except);
4258 wait_for_sigstop ();
4259 stopping_threads = NOT_STOPPING_THREADS;
4260
4261 if (debug_threads)
4262 {
4263 debug_printf ("stop_all_lwps done, setting stopping_threads "
4264 "back to !stopping\n");
4265 debug_exit ();
4266 }
4267 }
4268
4269 /* Enqueue one signal in the chain of signals which need to be
4270 delivered to this process on next resume. */
4271
4272 static void
4273 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4274 {
4275 struct pending_signals *p_sig = XNEW (struct pending_signals);
4276
4277 p_sig->prev = lwp->pending_signals;
4278 p_sig->signal = signal;
4279 if (info == NULL)
4280 memset (&p_sig->info, 0, sizeof (siginfo_t));
4281 else
4282 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4283 lwp->pending_signals = p_sig;
4284 }
4285
4286 /* Install breakpoints for software single stepping. */
4287
4288 static void
4289 install_software_single_step_breakpoints (struct lwp_info *lwp)
4290 {
4291 int i;
4292 CORE_ADDR pc;
4293 struct thread_info *thread = get_lwp_thread (lwp);
4294 struct regcache *regcache = get_thread_regcache (thread, 1);
4295 VEC (CORE_ADDR) *next_pcs = NULL;
4296 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
4297
4298 make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
4299
4300 current_thread = thread;
4301 next_pcs = (*the_low_target.get_next_pcs) (regcache);
4302
4303 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
4304 set_single_step_breakpoint (pc, current_ptid);
4305
4306 do_cleanups (old_chain);
4307 }
4308
4309 /* Single step via hardware or software single step.
4310 Return 1 if hardware single stepping, 0 if software single stepping
4311 or can't single step. */
4312
4313 static int
4314 single_step (struct lwp_info* lwp)
4315 {
4316 int step = 0;
4317
4318 if (can_hardware_single_step ())
4319 {
4320 step = 1;
4321 }
4322 else if (can_software_single_step ())
4323 {
4324 install_software_single_step_breakpoints (lwp);
4325 step = 0;
4326 }
4327 else
4328 {
4329 if (debug_threads)
4330 debug_printf ("stepping is not implemented on this target");
4331 }
4332
4333 return step;
4334 }
4335
4336 /* The signal can be delivered to the inferior if we are not trying to
4337 finish a fast tracepoint collect. Since signal can be delivered in
4338 the step-over, the program may go to signal handler and trap again
4339 after return from the signal handler. We can live with the spurious
4340 double traps. */
4341
4342 static int
4343 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4344 {
4345 return !lwp->collecting_fast_tracepoint;
4346 }
4347
4348 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4349 SIGNAL is nonzero, give it that signal. */
4350
4351 static void
4352 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4353 int step, int signal, siginfo_t *info)
4354 {
4355 struct thread_info *thread = get_lwp_thread (lwp);
4356 struct thread_info *saved_thread;
4357 int fast_tp_collecting;
4358 int ptrace_request;
4359 struct process_info *proc = get_thread_process (thread);
4360
4361 /* Note that target description may not be initialised
4362 (proc->tdesc == NULL) at this point because the program hasn't
4363 stopped at the first instruction yet. It means GDBserver skips
4364 the extra traps from the wrapper program (see option --wrapper).
4365 Code in this function that requires register access should be
4366 guarded by proc->tdesc == NULL or something else. */
4367
4368 if (lwp->stopped == 0)
4369 return;
4370
4371 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4372
4373 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4374
4375 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4376
4377 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4378 user used the "jump" command, or "set $pc = foo"). */
4379 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4380 {
4381 /* Collecting 'while-stepping' actions doesn't make sense
4382 anymore. */
4383 release_while_stepping_state_list (thread);
4384 }
4385
4386 /* If we have pending signals or status, and a new signal, enqueue the
4387 signal. Also enqueue the signal if it can't be delivered to the
4388 inferior right now. */
4389 if (signal != 0
4390 && (lwp->status_pending_p
4391 || lwp->pending_signals != NULL
4392 || !lwp_signal_can_be_delivered (lwp)))
4393 {
4394 enqueue_pending_signal (lwp, signal, info);
4395
4396 /* Postpone any pending signal. It was enqueued above. */
4397 signal = 0;
4398 }
4399
4400 if (lwp->status_pending_p)
4401 {
4402 if (debug_threads)
4403 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4404 " has pending status\n",
4405 lwpid_of (thread), step ? "step" : "continue",
4406 lwp->stop_expected ? "expected" : "not expected");
4407 return;
4408 }
4409
4410 saved_thread = current_thread;
4411 current_thread = thread;
4412
4413 /* This bit needs some thinking about. If we get a signal that
4414 we must report while a single-step reinsert is still pending,
4415 we often end up resuming the thread. It might be better to
4416 (ew) allow a stack of pending events; then we could be sure that
4417 the reinsert happened right away and not lose any signals.
4418
4419 Making this stack would also shrink the window in which breakpoints are
4420 uninserted (see comment in linux_wait_for_lwp) but not enough for
4421 complete correctness, so it won't solve that problem. It may be
4422 worthwhile just to solve this one, however. */
4423 if (lwp->bp_reinsert != 0)
4424 {
4425 if (debug_threads)
4426 debug_printf (" pending reinsert at 0x%s\n",
4427 paddress (lwp->bp_reinsert));
4428
4429 if (can_hardware_single_step ())
4430 {
4431 if (fast_tp_collecting == 0)
4432 {
4433 if (step == 0)
4434 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4435 if (lwp->suspended)
4436 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4437 lwp->suspended);
4438 }
4439 }
4440
4441 step = maybe_hw_step (thread);
4442 }
4443
4444 if (fast_tp_collecting == 1)
4445 {
4446 if (debug_threads)
4447 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4448 " (exit-jump-pad-bkpt)\n",
4449 lwpid_of (thread));
4450 }
4451 else if (fast_tp_collecting == 2)
4452 {
4453 if (debug_threads)
4454 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4455 " single-stepping\n",
4456 lwpid_of (thread));
4457
4458 if (can_hardware_single_step ())
4459 step = 1;
4460 else
4461 {
4462 internal_error (__FILE__, __LINE__,
4463 "moving out of jump pad single-stepping"
4464 " not implemented on this target");
4465 }
4466 }
4467
4468 /* If we have while-stepping actions in this thread set it stepping.
4469 If we have a signal to deliver, it may or may not be set to
4470 SIG_IGN, we don't know. Assume so, and allow collecting
4471 while-stepping into a signal handler. A possible smart thing to
4472 do would be to set an internal breakpoint at the signal return
4473 address, continue, and carry on catching this while-stepping
4474 action only when that breakpoint is hit. A future
4475 enhancement. */
4476 if (thread->while_stepping != NULL)
4477 {
4478 if (debug_threads)
4479 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4480 lwpid_of (thread));
4481
4482 step = single_step (lwp);
4483 }
4484
4485 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4486 {
4487 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4488
4489 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4490
4491 if (debug_threads)
4492 {
4493 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4494 (long) lwp->stop_pc);
4495 }
4496 }
4497
4498 /* If we have pending signals, consume one if it can be delivered to
4499 the inferior. */
4500 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4501 {
4502 struct pending_signals **p_sig;
4503
4504 p_sig = &lwp->pending_signals;
4505 while ((*p_sig)->prev != NULL)
4506 p_sig = &(*p_sig)->prev;
4507
4508 signal = (*p_sig)->signal;
4509 if ((*p_sig)->info.si_signo != 0)
4510 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4511 &(*p_sig)->info);
4512
4513 free (*p_sig);
4514 *p_sig = NULL;
4515 }
4516
4517 if (debug_threads)
4518 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4519 lwpid_of (thread), step ? "step" : "continue", signal,
4520 lwp->stop_expected ? "expected" : "not expected");
4521
4522 if (the_low_target.prepare_to_resume != NULL)
4523 the_low_target.prepare_to_resume (lwp);
4524
4525 regcache_invalidate_thread (thread);
4526 errno = 0;
4527 lwp->stepping = step;
4528 if (step)
4529 ptrace_request = PTRACE_SINGLESTEP;
4530 else if (gdb_catching_syscalls_p (lwp))
4531 ptrace_request = PTRACE_SYSCALL;
4532 else
4533 ptrace_request = PTRACE_CONT;
4534 ptrace (ptrace_request,
4535 lwpid_of (thread),
4536 (PTRACE_TYPE_ARG3) 0,
4537 /* Coerce to a uintptr_t first to avoid potential gcc warning
4538 of coercing an 8 byte integer to a 4 byte pointer. */
4539 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4540
4541 current_thread = saved_thread;
4542 if (errno)
4543 perror_with_name ("resuming thread");
4544
4545 /* Successfully resumed. Clear state that no longer makes sense,
4546 and mark the LWP as running. Must not do this before resuming
4547 otherwise if that fails other code will be confused. E.g., we'd
4548 later try to stop the LWP and hang forever waiting for a stop
4549 status. Note that we must not throw after this is cleared,
4550 otherwise handle_zombie_lwp_error would get confused. */
4551 lwp->stopped = 0;
4552 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4553 }
4554
4555 /* Called when we try to resume a stopped LWP and that errors out. If
4556 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4557 or about to become), discard the error, clear any pending status
4558 the LWP may have, and return true (we'll collect the exit status
4559 soon enough). Otherwise, return false. */
4560
4561 static int
4562 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4563 {
4564 struct thread_info *thread = get_lwp_thread (lp);
4565
4566 /* If we get an error after resuming the LWP successfully, we'd
4567 confuse !T state for the LWP being gone. */
4568 gdb_assert (lp->stopped);
4569
4570 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4571 because even if ptrace failed with ESRCH, the tracee may be "not
4572 yet fully dead", but already refusing ptrace requests. In that
4573 case the tracee has 'R (Running)' state for a little bit
4574 (observed in Linux 3.18). See also the note on ESRCH in the
4575 ptrace(2) man page. Instead, check whether the LWP has any state
4576 other than ptrace-stopped. */
4577
4578 /* Don't assume anything if /proc/PID/status can't be read. */
4579 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4580 {
4581 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4582 lp->status_pending_p = 0;
4583 return 1;
4584 }
4585 return 0;
4586 }
4587
4588 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4589 disappears while we try to resume it. */
4590
4591 static void
4592 linux_resume_one_lwp (struct lwp_info *lwp,
4593 int step, int signal, siginfo_t *info)
4594 {
4595 TRY
4596 {
4597 linux_resume_one_lwp_throw (lwp, step, signal, info);
4598 }
4599 CATCH (ex, RETURN_MASK_ERROR)
4600 {
4601 if (!check_ptrace_stopped_lwp_gone (lwp))
4602 throw_exception (ex);
4603 }
4604 END_CATCH
4605 }
4606
4607 struct thread_resume_array
4608 {
4609 struct thread_resume *resume;
4610 size_t n;
4611 };
4612
4613 /* This function is called once per thread via find_inferior.
4614 ARG is a pointer to a thread_resume_array struct.
4615 We look up the thread specified by ENTRY in ARG, and mark the thread
4616 with a pointer to the appropriate resume request.
4617
4618 This algorithm is O(threads * resume elements), but resume elements
4619 is small (and will remain small at least until GDB supports thread
4620 suspension). */
4621
4622 static int
4623 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4624 {
4625 struct thread_info *thread = (struct thread_info *) entry;
4626 struct lwp_info *lwp = get_thread_lwp (thread);
4627 int ndx;
4628 struct thread_resume_array *r;
4629
4630 r = (struct thread_resume_array *) arg;
4631
4632 for (ndx = 0; ndx < r->n; ndx++)
4633 {
4634 ptid_t ptid = r->resume[ndx].thread;
4635 if (ptid_equal (ptid, minus_one_ptid)
4636 || ptid_equal (ptid, entry->id)
4637 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4638 of PID'. */
4639 || (ptid_get_pid (ptid) == pid_of (thread)
4640 && (ptid_is_pid (ptid)
4641 || ptid_get_lwp (ptid) == -1)))
4642 {
4643 if (r->resume[ndx].kind == resume_stop
4644 && thread->last_resume_kind == resume_stop)
4645 {
4646 if (debug_threads)
4647 debug_printf ("already %s LWP %ld at GDB's request\n",
4648 (thread->last_status.kind
4649 == TARGET_WAITKIND_STOPPED)
4650 ? "stopped"
4651 : "stopping",
4652 lwpid_of (thread));
4653
4654 continue;
4655 }
4656
4657 lwp->resume = &r->resume[ndx];
4658 thread->last_resume_kind = lwp->resume->kind;
4659
4660 lwp->step_range_start = lwp->resume->step_range_start;
4661 lwp->step_range_end = lwp->resume->step_range_end;
4662
4663 /* If we had a deferred signal to report, dequeue one now.
4664 This can happen if LWP gets more than one signal while
4665 trying to get out of a jump pad. */
4666 if (lwp->stopped
4667 && !lwp->status_pending_p
4668 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4669 {
4670 lwp->status_pending_p = 1;
4671
4672 if (debug_threads)
4673 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4674 "leaving status pending.\n",
4675 WSTOPSIG (lwp->status_pending),
4676 lwpid_of (thread));
4677 }
4678
4679 return 0;
4680 }
4681 }
4682
4683 /* No resume action for this thread. */
4684 lwp->resume = NULL;
4685
4686 return 0;
4687 }
4688
4689 /* find_inferior callback for linux_resume.
4690 Set *FLAG_P if this lwp has an interesting status pending. */
4691
4692 static int
4693 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4694 {
4695 struct thread_info *thread = (struct thread_info *) entry;
4696 struct lwp_info *lwp = get_thread_lwp (thread);
4697
4698 /* LWPs which will not be resumed are not interesting, because
4699 we might not wait for them next time through linux_wait. */
4700 if (lwp->resume == NULL)
4701 return 0;
4702
4703 if (thread_still_has_status_pending_p (thread))
4704 * (int *) flag_p = 1;
4705
4706 return 0;
4707 }
4708
4709 /* Return 1 if this lwp that GDB wants running is stopped at an
4710 internal breakpoint that we need to step over. It assumes that any
4711 required STOP_PC adjustment has already been propagated to the
4712 inferior's regcache. */
4713
4714 static int
4715 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4716 {
4717 struct thread_info *thread = (struct thread_info *) entry;
4718 struct lwp_info *lwp = get_thread_lwp (thread);
4719 struct thread_info *saved_thread;
4720 CORE_ADDR pc;
4721 struct process_info *proc = get_thread_process (thread);
4722
4723 /* GDBserver is skipping the extra traps from the wrapper program,
4724 don't have to do step over. */
4725 if (proc->tdesc == NULL)
4726 return 0;
4727
4728 /* LWPs which will not be resumed are not interesting, because we
4729 might not wait for them next time through linux_wait. */
4730
4731 if (!lwp->stopped)
4732 {
4733 if (debug_threads)
4734 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4735 lwpid_of (thread));
4736 return 0;
4737 }
4738
4739 if (thread->last_resume_kind == resume_stop)
4740 {
4741 if (debug_threads)
4742 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4743 " stopped\n",
4744 lwpid_of (thread));
4745 return 0;
4746 }
4747
4748 gdb_assert (lwp->suspended >= 0);
4749
4750 if (lwp->suspended)
4751 {
4752 if (debug_threads)
4753 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4754 lwpid_of (thread));
4755 return 0;
4756 }
4757
4758 if (lwp->status_pending_p)
4759 {
4760 if (debug_threads)
4761 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4762 " status.\n",
4763 lwpid_of (thread));
4764 return 0;
4765 }
4766
4767 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4768 or we have. */
4769 pc = get_pc (lwp);
4770
4771 /* If the PC has changed since we stopped, then don't do anything,
4772 and let the breakpoint/tracepoint be hit. This happens if, for
4773 instance, GDB handled the decr_pc_after_break subtraction itself,
4774 GDB is OOL stepping this thread, or the user has issued a "jump"
4775 command, or poked thread's registers herself. */
4776 if (pc != lwp->stop_pc)
4777 {
4778 if (debug_threads)
4779 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4780 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4781 lwpid_of (thread),
4782 paddress (lwp->stop_pc), paddress (pc));
4783 return 0;
4784 }
4785
4786 /* On software single step target, resume the inferior with signal
4787 rather than stepping over. */
4788 if (can_software_single_step ()
4789 && lwp->pending_signals != NULL
4790 && lwp_signal_can_be_delivered (lwp))
4791 {
4792 if (debug_threads)
4793 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4794 " signals.\n",
4795 lwpid_of (thread));
4796
4797 return 0;
4798 }
4799
4800 saved_thread = current_thread;
4801 current_thread = thread;
4802
4803 /* We can only step over breakpoints we know about. */
4804 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4805 {
4806 /* Don't step over a breakpoint that GDB expects to hit
4807 though. If the condition is being evaluated on the target's side
4808 and it evaluate to false, step over this breakpoint as well. */
4809 if (gdb_breakpoint_here (pc)
4810 && gdb_condition_true_at_breakpoint (pc)
4811 && gdb_no_commands_at_breakpoint (pc))
4812 {
4813 if (debug_threads)
4814 debug_printf ("Need step over [LWP %ld]? yes, but found"
4815 " GDB breakpoint at 0x%s; skipping step over\n",
4816 lwpid_of (thread), paddress (pc));
4817
4818 current_thread = saved_thread;
4819 return 0;
4820 }
4821 else
4822 {
4823 if (debug_threads)
4824 debug_printf ("Need step over [LWP %ld]? yes, "
4825 "found breakpoint at 0x%s\n",
4826 lwpid_of (thread), paddress (pc));
4827
4828 /* We've found an lwp that needs stepping over --- return 1 so
4829 that find_inferior stops looking. */
4830 current_thread = saved_thread;
4831
4832 return 1;
4833 }
4834 }
4835
4836 current_thread = saved_thread;
4837
4838 if (debug_threads)
4839 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4840 " at 0x%s\n",
4841 lwpid_of (thread), paddress (pc));
4842
4843 return 0;
4844 }
4845
4846 /* Start a step-over operation on LWP. When LWP stopped at a
4847 breakpoint, to make progress, we need to remove the breakpoint out
4848 of the way. If we let other threads run while we do that, they may
4849 pass by the breakpoint location and miss hitting it. To avoid
4850 that, a step-over momentarily stops all threads while LWP is
4851 single-stepped by either hardware or software while the breakpoint
4852 is temporarily uninserted from the inferior. When the single-step
4853 finishes, we reinsert the breakpoint, and let all threads that are
4854 supposed to be running, run again. */
4855
4856 static int
4857 start_step_over (struct lwp_info *lwp)
4858 {
4859 struct thread_info *thread = get_lwp_thread (lwp);
4860 struct thread_info *saved_thread;
4861 CORE_ADDR pc;
4862 int step;
4863
4864 if (debug_threads)
4865 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4866 lwpid_of (thread));
4867
4868 stop_all_lwps (1, lwp);
4869
4870 if (lwp->suspended != 0)
4871 {
4872 internal_error (__FILE__, __LINE__,
4873 "LWP %ld suspended=%d\n", lwpid_of (thread),
4874 lwp->suspended);
4875 }
4876
4877 if (debug_threads)
4878 debug_printf ("Done stopping all threads for step-over.\n");
4879
4880 /* Note, we should always reach here with an already adjusted PC,
4881 either by GDB (if we're resuming due to GDB's request), or by our
4882 caller, if we just finished handling an internal breakpoint GDB
4883 shouldn't care about. */
4884 pc = get_pc (lwp);
4885
4886 saved_thread = current_thread;
4887 current_thread = thread;
4888
4889 lwp->bp_reinsert = pc;
4890 uninsert_breakpoints_at (pc);
4891 uninsert_fast_tracepoint_jumps_at (pc);
4892
4893 step = single_step (lwp);
4894
4895 current_thread = saved_thread;
4896
4897 linux_resume_one_lwp (lwp, step, 0, NULL);
4898
4899 /* Require next event from this LWP. */
4900 step_over_bkpt = thread->entry.id;
4901 return 1;
4902 }
4903
4904 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4905 start_step_over, if still there, and delete any single-step
4906 breakpoints we've set, on non hardware single-step targets. */
4907
4908 static int
4909 finish_step_over (struct lwp_info *lwp)
4910 {
4911 if (lwp->bp_reinsert != 0)
4912 {
4913 struct thread_info *saved_thread = current_thread;
4914
4915 if (debug_threads)
4916 debug_printf ("Finished step over.\n");
4917
4918 current_thread = get_lwp_thread (lwp);
4919
4920 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4921 may be no breakpoint to reinsert there by now. */
4922 reinsert_breakpoints_at (lwp->bp_reinsert);
4923 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4924
4925 lwp->bp_reinsert = 0;
4926
4927 /* Delete any single-step breakpoints. No longer needed. We
4928 don't have to worry about other threads hitting this trap,
4929 and later not being able to explain it, because we were
4930 stepping over a breakpoint, and we hold all threads but
4931 LWP stopped while doing that. */
4932 if (!can_hardware_single_step ())
4933 {
4934 gdb_assert (has_single_step_breakpoints (current_thread));
4935 delete_single_step_breakpoints (current_thread);
4936 }
4937
4938 step_over_bkpt = null_ptid;
4939 current_thread = saved_thread;
4940 return 1;
4941 }
4942 else
4943 return 0;
4944 }
4945
4946 /* If there's a step over in progress, wait until all threads stop
4947 (that is, until the stepping thread finishes its step), and
4948 unsuspend all lwps. The stepping thread ends with its status
4949 pending, which is processed later when we get back to processing
4950 events. */
4951
4952 static void
4953 complete_ongoing_step_over (void)
4954 {
4955 if (!ptid_equal (step_over_bkpt, null_ptid))
4956 {
4957 struct lwp_info *lwp;
4958 int wstat;
4959 int ret;
4960
4961 if (debug_threads)
4962 debug_printf ("detach: step over in progress, finish it first\n");
4963
4964 /* Passing NULL_PTID as filter indicates we want all events to
4965 be left pending. Eventually this returns when there are no
4966 unwaited-for children left. */
4967 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4968 &wstat, __WALL);
4969 gdb_assert (ret == -1);
4970
4971 lwp = find_lwp_pid (step_over_bkpt);
4972 if (lwp != NULL)
4973 finish_step_over (lwp);
4974 step_over_bkpt = null_ptid;
4975 unsuspend_all_lwps (lwp);
4976 }
4977 }
4978
4979 /* This function is called once per thread. We check the thread's resume
4980 request, which will tell us whether to resume, step, or leave the thread
4981 stopped; and what signal, if any, it should be sent.
4982
4983 For threads which we aren't explicitly told otherwise, we preserve
4984 the stepping flag; this is used for stepping over gdbserver-placed
4985 breakpoints.
4986
4987 If pending_flags was set in any thread, we queue any needed
4988 signals, since we won't actually resume. We already have a pending
4989 event to report, so we don't need to preserve any step requests;
4990 they should be re-issued if necessary. */
4991
4992 static int
4993 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4994 {
4995 struct thread_info *thread = (struct thread_info *) entry;
4996 struct lwp_info *lwp = get_thread_lwp (thread);
4997 int leave_all_stopped = * (int *) arg;
4998 int leave_pending;
4999
5000 if (lwp->resume == NULL)
5001 return 0;
5002
5003 if (lwp->resume->kind == resume_stop)
5004 {
5005 if (debug_threads)
5006 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
5007
5008 if (!lwp->stopped)
5009 {
5010 if (debug_threads)
5011 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
5012
5013 /* Stop the thread, and wait for the event asynchronously,
5014 through the event loop. */
5015 send_sigstop (lwp);
5016 }
5017 else
5018 {
5019 if (debug_threads)
5020 debug_printf ("already stopped LWP %ld\n",
5021 lwpid_of (thread));
5022
5023 /* The LWP may have been stopped in an internal event that
5024 was not meant to be notified back to GDB (e.g., gdbserver
5025 breakpoint), so we should be reporting a stop event in
5026 this case too. */
5027
5028 /* If the thread already has a pending SIGSTOP, this is a
5029 no-op. Otherwise, something later will presumably resume
5030 the thread and this will cause it to cancel any pending
5031 operation, due to last_resume_kind == resume_stop. If
5032 the thread already has a pending status to report, we
5033 will still report it the next time we wait - see
5034 status_pending_p_callback. */
5035
5036 /* If we already have a pending signal to report, then
5037 there's no need to queue a SIGSTOP, as this means we're
5038 midway through moving the LWP out of the jumppad, and we
5039 will report the pending signal as soon as that is
5040 finished. */
5041 if (lwp->pending_signals_to_report == NULL)
5042 send_sigstop (lwp);
5043 }
5044
5045 /* For stop requests, we're done. */
5046 lwp->resume = NULL;
5047 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5048 return 0;
5049 }
5050
5051 /* If this thread which is about to be resumed has a pending status,
5052 then don't resume it - we can just report the pending status.
5053 Likewise if it is suspended, because e.g., another thread is
5054 stepping past a breakpoint. Make sure to queue any signals that
5055 would otherwise be sent. In all-stop mode, we do this decision
5056 based on if *any* thread has a pending status. If there's a
5057 thread that needs the step-over-breakpoint dance, then don't
5058 resume any other thread but that particular one. */
5059 leave_pending = (lwp->suspended
5060 || lwp->status_pending_p
5061 || leave_all_stopped);
5062
5063 /* If we have a new signal, enqueue the signal. */
5064 if (lwp->resume->sig != 0)
5065 {
5066 siginfo_t info, *info_p;
5067
5068 /* If this is the same signal we were previously stopped by,
5069 make sure to queue its siginfo. */
5070 if (WIFSTOPPED (lwp->last_status)
5071 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5072 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5073 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5074 info_p = &info;
5075 else
5076 info_p = NULL;
5077
5078 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5079 }
5080
5081 if (!leave_pending)
5082 {
5083 if (debug_threads)
5084 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5085
5086 proceed_one_lwp (entry, NULL);
5087 }
5088 else
5089 {
5090 if (debug_threads)
5091 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5092 }
5093
5094 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5095 lwp->resume = NULL;
5096 return 0;
5097 }
5098
5099 static void
5100 linux_resume (struct thread_resume *resume_info, size_t n)
5101 {
5102 struct thread_resume_array array = { resume_info, n };
5103 struct thread_info *need_step_over = NULL;
5104 int any_pending;
5105 int leave_all_stopped;
5106
5107 if (debug_threads)
5108 {
5109 debug_enter ();
5110 debug_printf ("linux_resume:\n");
5111 }
5112
5113 find_inferior (&all_threads, linux_set_resume_request, &array);
5114
5115 /* If there is a thread which would otherwise be resumed, which has
5116 a pending status, then don't resume any threads - we can just
5117 report the pending status. Make sure to queue any signals that
5118 would otherwise be sent. In non-stop mode, we'll apply this
5119 logic to each thread individually. We consume all pending events
5120 before considering to start a step-over (in all-stop). */
5121 any_pending = 0;
5122 if (!non_stop)
5123 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
5124
5125 /* If there is a thread which would otherwise be resumed, which is
5126 stopped at a breakpoint that needs stepping over, then don't
5127 resume any threads - have it step over the breakpoint with all
5128 other threads stopped, then resume all threads again. Make sure
5129 to queue any signals that would otherwise be delivered or
5130 queued. */
5131 if (!any_pending && supports_breakpoints ())
5132 need_step_over
5133 = (struct thread_info *) find_inferior (&all_threads,
5134 need_step_over_p, NULL);
5135
5136 leave_all_stopped = (need_step_over != NULL || any_pending);
5137
5138 if (debug_threads)
5139 {
5140 if (need_step_over != NULL)
5141 debug_printf ("Not resuming all, need step over\n");
5142 else if (any_pending)
5143 debug_printf ("Not resuming, all-stop and found "
5144 "an LWP with pending status\n");
5145 else
5146 debug_printf ("Resuming, no pending status or step over needed\n");
5147 }
5148
5149 /* Even if we're leaving threads stopped, queue all signals we'd
5150 otherwise deliver. */
5151 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
5152
5153 if (need_step_over)
5154 start_step_over (get_thread_lwp (need_step_over));
5155
5156 if (debug_threads)
5157 {
5158 debug_printf ("linux_resume done\n");
5159 debug_exit ();
5160 }
5161
5162 /* We may have events that were pending that can/should be sent to
5163 the client now. Trigger a linux_wait call. */
5164 if (target_is_async_p ())
5165 async_file_mark ();
5166 }
5167
5168 /* This function is called once per thread. We check the thread's
5169 last resume request, which will tell us whether to resume, step, or
5170 leave the thread stopped. Any signal the client requested to be
5171 delivered has already been enqueued at this point.
5172
5173 If any thread that GDB wants running is stopped at an internal
5174 breakpoint that needs stepping over, we start a step-over operation
5175 on that particular thread, and leave all others stopped. */
5176
5177 static int
5178 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5179 {
5180 struct thread_info *thread = (struct thread_info *) entry;
5181 struct lwp_info *lwp = get_thread_lwp (thread);
5182 int step;
5183
5184 if (lwp == except)
5185 return 0;
5186
5187 if (debug_threads)
5188 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5189
5190 if (!lwp->stopped)
5191 {
5192 if (debug_threads)
5193 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5194 return 0;
5195 }
5196
5197 if (thread->last_resume_kind == resume_stop
5198 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5199 {
5200 if (debug_threads)
5201 debug_printf (" client wants LWP to remain %ld stopped\n",
5202 lwpid_of (thread));
5203 return 0;
5204 }
5205
5206 if (lwp->status_pending_p)
5207 {
5208 if (debug_threads)
5209 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5210 lwpid_of (thread));
5211 return 0;
5212 }
5213
5214 gdb_assert (lwp->suspended >= 0);
5215
5216 if (lwp->suspended)
5217 {
5218 if (debug_threads)
5219 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5220 return 0;
5221 }
5222
5223 if (thread->last_resume_kind == resume_stop
5224 && lwp->pending_signals_to_report == NULL
5225 && lwp->collecting_fast_tracepoint == 0)
5226 {
5227 /* We haven't reported this LWP as stopped yet (otherwise, the
5228 last_status.kind check above would catch it, and we wouldn't
5229 reach here. This LWP may have been momentarily paused by a
5230 stop_all_lwps call while handling for example, another LWP's
5231 step-over. In that case, the pending expected SIGSTOP signal
5232 that was queued at vCont;t handling time will have already
5233 been consumed by wait_for_sigstop, and so we need to requeue
5234 another one here. Note that if the LWP already has a SIGSTOP
5235 pending, this is a no-op. */
5236
5237 if (debug_threads)
5238 debug_printf ("Client wants LWP %ld to stop. "
5239 "Making sure it has a SIGSTOP pending\n",
5240 lwpid_of (thread));
5241
5242 send_sigstop (lwp);
5243 }
5244
5245 if (thread->last_resume_kind == resume_step)
5246 {
5247 if (debug_threads)
5248 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5249 lwpid_of (thread));
5250
5251 /* If resume_step is requested by GDB, install single-step
5252 breakpoints when the thread is about to be actually resumed if
5253 the single-step breakpoints weren't removed. */
5254 if (can_software_single_step ()
5255 && !has_single_step_breakpoints (thread))
5256 install_software_single_step_breakpoints (lwp);
5257
5258 step = maybe_hw_step (thread);
5259 }
5260 else if (lwp->bp_reinsert != 0)
5261 {
5262 if (debug_threads)
5263 debug_printf (" stepping LWP %ld, reinsert set\n",
5264 lwpid_of (thread));
5265
5266 step = maybe_hw_step (thread);
5267 }
5268 else
5269 step = 0;
5270
5271 linux_resume_one_lwp (lwp, step, 0, NULL);
5272 return 0;
5273 }
5274
5275 static int
5276 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5277 {
5278 struct thread_info *thread = (struct thread_info *) entry;
5279 struct lwp_info *lwp = get_thread_lwp (thread);
5280
5281 if (lwp == except)
5282 return 0;
5283
5284 lwp_suspended_decr (lwp);
5285
5286 return proceed_one_lwp (entry, except);
5287 }
5288
5289 /* When we finish a step-over, set threads running again. If there's
5290 another thread that may need a step-over, now's the time to start
5291 it. Eventually, we'll move all threads past their breakpoints. */
5292
5293 static void
5294 proceed_all_lwps (void)
5295 {
5296 struct thread_info *need_step_over;
5297
5298 /* If there is a thread which would otherwise be resumed, which is
5299 stopped at a breakpoint that needs stepping over, then don't
5300 resume any threads - have it step over the breakpoint with all
5301 other threads stopped, then resume all threads again. */
5302
5303 if (supports_breakpoints ())
5304 {
5305 need_step_over
5306 = (struct thread_info *) find_inferior (&all_threads,
5307 need_step_over_p, NULL);
5308
5309 if (need_step_over != NULL)
5310 {
5311 if (debug_threads)
5312 debug_printf ("proceed_all_lwps: found "
5313 "thread %ld needing a step-over\n",
5314 lwpid_of (need_step_over));
5315
5316 start_step_over (get_thread_lwp (need_step_over));
5317 return;
5318 }
5319 }
5320
5321 if (debug_threads)
5322 debug_printf ("Proceeding, no step-over needed\n");
5323
5324 find_inferior (&all_threads, proceed_one_lwp, NULL);
5325 }
5326
5327 /* Stopped LWPs that the client wanted to be running, that don't have
5328 pending statuses, are set to run again, except for EXCEPT, if not
5329 NULL. This undoes a stop_all_lwps call. */
5330
5331 static void
5332 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5333 {
5334 if (debug_threads)
5335 {
5336 debug_enter ();
5337 if (except)
5338 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5339 lwpid_of (get_lwp_thread (except)));
5340 else
5341 debug_printf ("unstopping all lwps\n");
5342 }
5343
5344 if (unsuspend)
5345 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5346 else
5347 find_inferior (&all_threads, proceed_one_lwp, except);
5348
5349 if (debug_threads)
5350 {
5351 debug_printf ("unstop_all_lwps done\n");
5352 debug_exit ();
5353 }
5354 }
5355
5356
5357 #ifdef HAVE_LINUX_REGSETS
5358
5359 #define use_linux_regsets 1
5360
5361 /* Returns true if REGSET has been disabled. */
5362
5363 static int
5364 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5365 {
5366 return (info->disabled_regsets != NULL
5367 && info->disabled_regsets[regset - info->regsets]);
5368 }
5369
5370 /* Disable REGSET. */
5371
5372 static void
5373 disable_regset (struct regsets_info *info, struct regset_info *regset)
5374 {
5375 int dr_offset;
5376
5377 dr_offset = regset - info->regsets;
5378 if (info->disabled_regsets == NULL)
5379 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5380 info->disabled_regsets[dr_offset] = 1;
5381 }
5382
5383 static int
5384 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5385 struct regcache *regcache)
5386 {
5387 struct regset_info *regset;
5388 int saw_general_regs = 0;
5389 int pid;
5390 struct iovec iov;
5391
5392 pid = lwpid_of (current_thread);
5393 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5394 {
5395 void *buf, *data;
5396 int nt_type, res;
5397
5398 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5399 continue;
5400
5401 buf = xmalloc (regset->size);
5402
5403 nt_type = regset->nt_type;
5404 if (nt_type)
5405 {
5406 iov.iov_base = buf;
5407 iov.iov_len = regset->size;
5408 data = (void *) &iov;
5409 }
5410 else
5411 data = buf;
5412
5413 #ifndef __sparc__
5414 res = ptrace (regset->get_request, pid,
5415 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5416 #else
5417 res = ptrace (regset->get_request, pid, data, nt_type);
5418 #endif
5419 if (res < 0)
5420 {
5421 if (errno == EIO)
5422 {
5423 /* If we get EIO on a regset, do not try it again for
5424 this process mode. */
5425 disable_regset (regsets_info, regset);
5426 }
5427 else if (errno == ENODATA)
5428 {
5429 /* ENODATA may be returned if the regset is currently
5430 not "active". This can happen in normal operation,
5431 so suppress the warning in this case. */
5432 }
5433 else if (errno == ESRCH)
5434 {
5435 /* At this point, ESRCH should mean the process is
5436 already gone, in which case we simply ignore attempts
5437 to read its registers. */
5438 }
5439 else
5440 {
5441 char s[256];
5442 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5443 pid);
5444 perror (s);
5445 }
5446 }
5447 else
5448 {
5449 if (regset->type == GENERAL_REGS)
5450 saw_general_regs = 1;
5451 regset->store_function (regcache, buf);
5452 }
5453 free (buf);
5454 }
5455 if (saw_general_regs)
5456 return 0;
5457 else
5458 return 1;
5459 }
5460
5461 static int
5462 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5463 struct regcache *regcache)
5464 {
5465 struct regset_info *regset;
5466 int saw_general_regs = 0;
5467 int pid;
5468 struct iovec iov;
5469
5470 pid = lwpid_of (current_thread);
5471 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5472 {
5473 void *buf, *data;
5474 int nt_type, res;
5475
5476 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5477 || regset->fill_function == NULL)
5478 continue;
5479
5480 buf = xmalloc (regset->size);
5481
5482 /* First fill the buffer with the current register set contents,
5483 in case there are any items in the kernel's regset that are
5484 not in gdbserver's regcache. */
5485
5486 nt_type = regset->nt_type;
5487 if (nt_type)
5488 {
5489 iov.iov_base = buf;
5490 iov.iov_len = regset->size;
5491 data = (void *) &iov;
5492 }
5493 else
5494 data = buf;
5495
5496 #ifndef __sparc__
5497 res = ptrace (regset->get_request, pid,
5498 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5499 #else
5500 res = ptrace (regset->get_request, pid, data, nt_type);
5501 #endif
5502
5503 if (res == 0)
5504 {
5505 /* Then overlay our cached registers on that. */
5506 regset->fill_function (regcache, buf);
5507
5508 /* Only now do we write the register set. */
5509 #ifndef __sparc__
5510 res = ptrace (regset->set_request, pid,
5511 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5512 #else
5513 res = ptrace (regset->set_request, pid, data, nt_type);
5514 #endif
5515 }
5516
5517 if (res < 0)
5518 {
5519 if (errno == EIO)
5520 {
5521 /* If we get EIO on a regset, do not try it again for
5522 this process mode. */
5523 disable_regset (regsets_info, regset);
5524 }
5525 else if (errno == ESRCH)
5526 {
5527 /* At this point, ESRCH should mean the process is
5528 already gone, in which case we simply ignore attempts
5529 to change its registers. See also the related
5530 comment in linux_resume_one_lwp. */
5531 free (buf);
5532 return 0;
5533 }
5534 else
5535 {
5536 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5537 }
5538 }
5539 else if (regset->type == GENERAL_REGS)
5540 saw_general_regs = 1;
5541 free (buf);
5542 }
5543 if (saw_general_regs)
5544 return 0;
5545 else
5546 return 1;
5547 }
5548
5549 #else /* !HAVE_LINUX_REGSETS */
5550
5551 #define use_linux_regsets 0
5552 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5553 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5554
5555 #endif
5556
5557 /* Return 1 if register REGNO is supported by one of the regset ptrace
5558 calls or 0 if it has to be transferred individually. */
5559
5560 static int
5561 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5562 {
5563 unsigned char mask = 1 << (regno % 8);
5564 size_t index = regno / 8;
5565
5566 return (use_linux_regsets
5567 && (regs_info->regset_bitmap == NULL
5568 || (regs_info->regset_bitmap[index] & mask) != 0));
5569 }
5570
5571 #ifdef HAVE_LINUX_USRREGS
5572
5573 static int
5574 register_addr (const struct usrregs_info *usrregs, int regnum)
5575 {
5576 int addr;
5577
5578 if (regnum < 0 || regnum >= usrregs->num_regs)
5579 error ("Invalid register number %d.", regnum);
5580
5581 addr = usrregs->regmap[regnum];
5582
5583 return addr;
5584 }
5585
5586 /* Fetch one register. */
5587 static void
5588 fetch_register (const struct usrregs_info *usrregs,
5589 struct regcache *regcache, int regno)
5590 {
5591 CORE_ADDR regaddr;
5592 int i, size;
5593 char *buf;
5594 int pid;
5595
5596 if (regno >= usrregs->num_regs)
5597 return;
5598 if ((*the_low_target.cannot_fetch_register) (regno))
5599 return;
5600
5601 regaddr = register_addr (usrregs, regno);
5602 if (regaddr == -1)
5603 return;
5604
5605 size = ((register_size (regcache->tdesc, regno)
5606 + sizeof (PTRACE_XFER_TYPE) - 1)
5607 & -sizeof (PTRACE_XFER_TYPE));
5608 buf = (char *) alloca (size);
5609
5610 pid = lwpid_of (current_thread);
5611 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5612 {
5613 errno = 0;
5614 *(PTRACE_XFER_TYPE *) (buf + i) =
5615 ptrace (PTRACE_PEEKUSER, pid,
5616 /* Coerce to a uintptr_t first to avoid potential gcc warning
5617 of coercing an 8 byte integer to a 4 byte pointer. */
5618 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5619 regaddr += sizeof (PTRACE_XFER_TYPE);
5620 if (errno != 0)
5621 error ("reading register %d: %s", regno, strerror (errno));
5622 }
5623
5624 if (the_low_target.supply_ptrace_register)
5625 the_low_target.supply_ptrace_register (regcache, regno, buf);
5626 else
5627 supply_register (regcache, regno, buf);
5628 }
5629
5630 /* Store one register. */
5631 static void
5632 store_register (const struct usrregs_info *usrregs,
5633 struct regcache *regcache, int regno)
5634 {
5635 CORE_ADDR regaddr;
5636 int i, size;
5637 char *buf;
5638 int pid;
5639
5640 if (regno >= usrregs->num_regs)
5641 return;
5642 if ((*the_low_target.cannot_store_register) (regno))
5643 return;
5644
5645 regaddr = register_addr (usrregs, regno);
5646 if (regaddr == -1)
5647 return;
5648
5649 size = ((register_size (regcache->tdesc, regno)
5650 + sizeof (PTRACE_XFER_TYPE) - 1)
5651 & -sizeof (PTRACE_XFER_TYPE));
5652 buf = (char *) alloca (size);
5653 memset (buf, 0, size);
5654
5655 if (the_low_target.collect_ptrace_register)
5656 the_low_target.collect_ptrace_register (regcache, regno, buf);
5657 else
5658 collect_register (regcache, regno, buf);
5659
5660 pid = lwpid_of (current_thread);
5661 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5662 {
5663 errno = 0;
5664 ptrace (PTRACE_POKEUSER, pid,
5665 /* Coerce to a uintptr_t first to avoid potential gcc warning
5666 about coercing an 8 byte integer to a 4 byte pointer. */
5667 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5668 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5669 if (errno != 0)
5670 {
5671 /* At this point, ESRCH should mean the process is
5672 already gone, in which case we simply ignore attempts
5673 to change its registers. See also the related
5674 comment in linux_resume_one_lwp. */
5675 if (errno == ESRCH)
5676 return;
5677
5678 if ((*the_low_target.cannot_store_register) (regno) == 0)
5679 error ("writing register %d: %s", regno, strerror (errno));
5680 }
5681 regaddr += sizeof (PTRACE_XFER_TYPE);
5682 }
5683 }
5684
5685 /* Fetch all registers, or just one, from the child process.
5686 If REGNO is -1, do this for all registers, skipping any that are
5687 assumed to have been retrieved by regsets_fetch_inferior_registers,
5688 unless ALL is non-zero.
5689 Otherwise, REGNO specifies which register (so we can save time). */
5690 static void
5691 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5692 struct regcache *regcache, int regno, int all)
5693 {
5694 struct usrregs_info *usr = regs_info->usrregs;
5695
5696 if (regno == -1)
5697 {
5698 for (regno = 0; regno < usr->num_regs; regno++)
5699 if (all || !linux_register_in_regsets (regs_info, regno))
5700 fetch_register (usr, regcache, regno);
5701 }
5702 else
5703 fetch_register (usr, regcache, regno);
5704 }
5705
5706 /* Store our register values back into the inferior.
5707 If REGNO is -1, do this for all registers, skipping any that are
5708 assumed to have been saved by regsets_store_inferior_registers,
5709 unless ALL is non-zero.
5710 Otherwise, REGNO specifies which register (so we can save time). */
5711 static void
5712 usr_store_inferior_registers (const struct regs_info *regs_info,
5713 struct regcache *regcache, int regno, int all)
5714 {
5715 struct usrregs_info *usr = regs_info->usrregs;
5716
5717 if (regno == -1)
5718 {
5719 for (regno = 0; regno < usr->num_regs; regno++)
5720 if (all || !linux_register_in_regsets (regs_info, regno))
5721 store_register (usr, regcache, regno);
5722 }
5723 else
5724 store_register (usr, regcache, regno);
5725 }
5726
5727 #else /* !HAVE_LINUX_USRREGS */
5728
5729 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5730 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5731
5732 #endif
5733
5734
5735 static void
5736 linux_fetch_registers (struct regcache *regcache, int regno)
5737 {
5738 int use_regsets;
5739 int all = 0;
5740 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5741
5742 if (regno == -1)
5743 {
5744 if (the_low_target.fetch_register != NULL
5745 && regs_info->usrregs != NULL)
5746 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5747 (*the_low_target.fetch_register) (regcache, regno);
5748
5749 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5750 if (regs_info->usrregs != NULL)
5751 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5752 }
5753 else
5754 {
5755 if (the_low_target.fetch_register != NULL
5756 && (*the_low_target.fetch_register) (regcache, regno))
5757 return;
5758
5759 use_regsets = linux_register_in_regsets (regs_info, regno);
5760 if (use_regsets)
5761 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5762 regcache);
5763 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5764 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5765 }
5766 }
5767
5768 static void
5769 linux_store_registers (struct regcache *regcache, int regno)
5770 {
5771 int use_regsets;
5772 int all = 0;
5773 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5774
5775 if (regno == -1)
5776 {
5777 all = regsets_store_inferior_registers (regs_info->regsets_info,
5778 regcache);
5779 if (regs_info->usrregs != NULL)
5780 usr_store_inferior_registers (regs_info, regcache, regno, all);
5781 }
5782 else
5783 {
5784 use_regsets = linux_register_in_regsets (regs_info, regno);
5785 if (use_regsets)
5786 all = regsets_store_inferior_registers (regs_info->regsets_info,
5787 regcache);
5788 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5789 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5790 }
5791 }
5792
5793
5794 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5795 to debugger memory starting at MYADDR. */
5796
5797 static int
5798 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5799 {
5800 int pid = lwpid_of (current_thread);
5801 register PTRACE_XFER_TYPE *buffer;
5802 register CORE_ADDR addr;
5803 register int count;
5804 char filename[64];
5805 register int i;
5806 int ret;
5807 int fd;
5808
5809 /* Try using /proc. Don't bother for one word. */
5810 if (len >= 3 * sizeof (long))
5811 {
5812 int bytes;
5813
5814 /* We could keep this file open and cache it - possibly one per
5815 thread. That requires some juggling, but is even faster. */
5816 sprintf (filename, "/proc/%d/mem", pid);
5817 fd = open (filename, O_RDONLY | O_LARGEFILE);
5818 if (fd == -1)
5819 goto no_proc;
5820
5821 /* If pread64 is available, use it. It's faster if the kernel
5822 supports it (only one syscall), and it's 64-bit safe even on
5823 32-bit platforms (for instance, SPARC debugging a SPARC64
5824 application). */
5825 #ifdef HAVE_PREAD64
5826 bytes = pread64 (fd, myaddr, len, memaddr);
5827 #else
5828 bytes = -1;
5829 if (lseek (fd, memaddr, SEEK_SET) != -1)
5830 bytes = read (fd, myaddr, len);
5831 #endif
5832
5833 close (fd);
5834 if (bytes == len)
5835 return 0;
5836
5837 /* Some data was read, we'll try to get the rest with ptrace. */
5838 if (bytes > 0)
5839 {
5840 memaddr += bytes;
5841 myaddr += bytes;
5842 len -= bytes;
5843 }
5844 }
5845
5846 no_proc:
5847 /* Round starting address down to longword boundary. */
5848 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5849 /* Round ending address up; get number of longwords that makes. */
5850 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5851 / sizeof (PTRACE_XFER_TYPE));
5852 /* Allocate buffer of that many longwords. */
5853 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5854
5855 /* Read all the longwords */
5856 errno = 0;
5857 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5858 {
5859 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5860 about coercing an 8 byte integer to a 4 byte pointer. */
5861 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5862 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5863 (PTRACE_TYPE_ARG4) 0);
5864 if (errno)
5865 break;
5866 }
5867 ret = errno;
5868
5869 /* Copy appropriate bytes out of the buffer. */
5870 if (i > 0)
5871 {
5872 i *= sizeof (PTRACE_XFER_TYPE);
5873 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5874 memcpy (myaddr,
5875 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5876 i < len ? i : len);
5877 }
5878
5879 return ret;
5880 }
5881
5882 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5883 memory at MEMADDR. On failure (cannot write to the inferior)
5884 returns the value of errno. Always succeeds if LEN is zero. */
5885
5886 static int
5887 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5888 {
5889 register int i;
5890 /* Round starting address down to longword boundary. */
5891 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5892 /* Round ending address up; get number of longwords that makes. */
5893 register int count
5894 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5895 / sizeof (PTRACE_XFER_TYPE);
5896
5897 /* Allocate buffer of that many longwords. */
5898 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5899
5900 int pid = lwpid_of (current_thread);
5901
5902 if (len == 0)
5903 {
5904 /* Zero length write always succeeds. */
5905 return 0;
5906 }
5907
5908 if (debug_threads)
5909 {
5910 /* Dump up to four bytes. */
5911 char str[4 * 2 + 1];
5912 char *p = str;
5913 int dump = len < 4 ? len : 4;
5914
5915 for (i = 0; i < dump; i++)
5916 {
5917 sprintf (p, "%02x", myaddr[i]);
5918 p += 2;
5919 }
5920 *p = '\0';
5921
5922 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5923 str, (long) memaddr, pid);
5924 }
5925
5926 /* Fill start and end extra bytes of buffer with existing memory data. */
5927
5928 errno = 0;
5929 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5930 about coercing an 8 byte integer to a 4 byte pointer. */
5931 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5932 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5933 (PTRACE_TYPE_ARG4) 0);
5934 if (errno)
5935 return errno;
5936
5937 if (count > 1)
5938 {
5939 errno = 0;
5940 buffer[count - 1]
5941 = ptrace (PTRACE_PEEKTEXT, pid,
5942 /* Coerce to a uintptr_t first to avoid potential gcc warning
5943 about coercing an 8 byte integer to a 4 byte pointer. */
5944 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5945 * sizeof (PTRACE_XFER_TYPE)),
5946 (PTRACE_TYPE_ARG4) 0);
5947 if (errno)
5948 return errno;
5949 }
5950
5951 /* Copy data to be written over corresponding part of buffer. */
5952
5953 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5954 myaddr, len);
5955
5956 /* Write the entire buffer. */
5957
5958 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5959 {
5960 errno = 0;
5961 ptrace (PTRACE_POKETEXT, pid,
5962 /* Coerce to a uintptr_t first to avoid potential gcc warning
5963 about coercing an 8 byte integer to a 4 byte pointer. */
5964 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5965 (PTRACE_TYPE_ARG4) buffer[i]);
5966 if (errno)
5967 return errno;
5968 }
5969
5970 return 0;
5971 }
5972
5973 static void
5974 linux_look_up_symbols (void)
5975 {
5976 #ifdef USE_THREAD_DB
5977 struct process_info *proc = current_process ();
5978
5979 if (proc->priv->thread_db != NULL)
5980 return;
5981
5982 thread_db_init ();
5983 #endif
5984 }
5985
5986 static void
5987 linux_request_interrupt (void)
5988 {
5989 extern unsigned long signal_pid;
5990
5991 /* Send a SIGINT to the process group. This acts just like the user
5992 typed a ^C on the controlling terminal. */
5993 kill (-signal_pid, SIGINT);
5994 }
5995
5996 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5997 to debugger memory starting at MYADDR. */
5998
5999 static int
6000 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
6001 {
6002 char filename[PATH_MAX];
6003 int fd, n;
6004 int pid = lwpid_of (current_thread);
6005
6006 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6007
6008 fd = open (filename, O_RDONLY);
6009 if (fd < 0)
6010 return -1;
6011
6012 if (offset != (CORE_ADDR) 0
6013 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6014 n = -1;
6015 else
6016 n = read (fd, myaddr, len);
6017
6018 close (fd);
6019
6020 return n;
6021 }
6022
6023 /* These breakpoint and watchpoint related wrapper functions simply
6024 pass on the function call if the target has registered a
6025 corresponding function. */
6026
6027 static int
6028 linux_supports_z_point_type (char z_type)
6029 {
6030 return (the_low_target.supports_z_point_type != NULL
6031 && the_low_target.supports_z_point_type (z_type));
6032 }
6033
6034 static int
6035 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
6036 int size, struct raw_breakpoint *bp)
6037 {
6038 if (type == raw_bkpt_type_sw)
6039 return insert_memory_breakpoint (bp);
6040 else if (the_low_target.insert_point != NULL)
6041 return the_low_target.insert_point (type, addr, size, bp);
6042 else
6043 /* Unsupported (see target.h). */
6044 return 1;
6045 }
6046
6047 static int
6048 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
6049 int size, struct raw_breakpoint *bp)
6050 {
6051 if (type == raw_bkpt_type_sw)
6052 return remove_memory_breakpoint (bp);
6053 else if (the_low_target.remove_point != NULL)
6054 return the_low_target.remove_point (type, addr, size, bp);
6055 else
6056 /* Unsupported (see target.h). */
6057 return 1;
6058 }
6059
6060 /* Implement the to_stopped_by_sw_breakpoint target_ops
6061 method. */
6062
6063 static int
6064 linux_stopped_by_sw_breakpoint (void)
6065 {
6066 struct lwp_info *lwp = get_thread_lwp (current_thread);
6067
6068 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6069 }
6070
6071 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6072 method. */
6073
6074 static int
6075 linux_supports_stopped_by_sw_breakpoint (void)
6076 {
6077 return USE_SIGTRAP_SIGINFO;
6078 }
6079
6080 /* Implement the to_stopped_by_hw_breakpoint target_ops
6081 method. */
6082
6083 static int
6084 linux_stopped_by_hw_breakpoint (void)
6085 {
6086 struct lwp_info *lwp = get_thread_lwp (current_thread);
6087
6088 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6089 }
6090
6091 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6092 method. */
6093
6094 static int
6095 linux_supports_stopped_by_hw_breakpoint (void)
6096 {
6097 return USE_SIGTRAP_SIGINFO;
6098 }
6099
6100 /* Implement the supports_hardware_single_step target_ops method. */
6101
6102 static int
6103 linux_supports_hardware_single_step (void)
6104 {
6105 return can_hardware_single_step ();
6106 }
6107
6108 static int
6109 linux_supports_software_single_step (void)
6110 {
6111 return can_software_single_step ();
6112 }
6113
6114 static int
6115 linux_stopped_by_watchpoint (void)
6116 {
6117 struct lwp_info *lwp = get_thread_lwp (current_thread);
6118
6119 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6120 }
6121
6122 static CORE_ADDR
6123 linux_stopped_data_address (void)
6124 {
6125 struct lwp_info *lwp = get_thread_lwp (current_thread);
6126
6127 return lwp->stopped_data_address;
6128 }
6129
6130 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6131 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6132 && defined(PT_TEXT_END_ADDR)
6133
6134 /* This is only used for targets that define PT_TEXT_ADDR,
6135 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6136 the target has different ways of acquiring this information, like
6137 loadmaps. */
6138
6139 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6140 to tell gdb about. */
6141
6142 static int
6143 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6144 {
6145 unsigned long text, text_end, data;
6146 int pid = lwpid_of (current_thread);
6147
6148 errno = 0;
6149
6150 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6151 (PTRACE_TYPE_ARG4) 0);
6152 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6153 (PTRACE_TYPE_ARG4) 0);
6154 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6155 (PTRACE_TYPE_ARG4) 0);
6156
6157 if (errno == 0)
6158 {
6159 /* Both text and data offsets produced at compile-time (and so
6160 used by gdb) are relative to the beginning of the program,
6161 with the data segment immediately following the text segment.
6162 However, the actual runtime layout in memory may put the data
6163 somewhere else, so when we send gdb a data base-address, we
6164 use the real data base address and subtract the compile-time
6165 data base-address from it (which is just the length of the
6166 text segment). BSS immediately follows data in both
6167 cases. */
6168 *text_p = text;
6169 *data_p = data - (text_end - text);
6170
6171 return 1;
6172 }
6173 return 0;
6174 }
6175 #endif
6176
6177 static int
6178 linux_qxfer_osdata (const char *annex,
6179 unsigned char *readbuf, unsigned const char *writebuf,
6180 CORE_ADDR offset, int len)
6181 {
6182 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6183 }
6184
6185 /* Convert a native/host siginfo object, into/from the siginfo in the
6186 layout of the inferiors' architecture. */
6187
6188 static void
6189 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6190 {
6191 int done = 0;
6192
6193 if (the_low_target.siginfo_fixup != NULL)
6194 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6195
6196 /* If there was no callback, or the callback didn't do anything,
6197 then just do a straight memcpy. */
6198 if (!done)
6199 {
6200 if (direction == 1)
6201 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6202 else
6203 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6204 }
6205 }
6206
6207 static int
6208 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6209 unsigned const char *writebuf, CORE_ADDR offset, int len)
6210 {
6211 int pid;
6212 siginfo_t siginfo;
6213 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6214
6215 if (current_thread == NULL)
6216 return -1;
6217
6218 pid = lwpid_of (current_thread);
6219
6220 if (debug_threads)
6221 debug_printf ("%s siginfo for lwp %d.\n",
6222 readbuf != NULL ? "Reading" : "Writing",
6223 pid);
6224
6225 if (offset >= sizeof (siginfo))
6226 return -1;
6227
6228 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6229 return -1;
6230
6231 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6232 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6233 inferior with a 64-bit GDBSERVER should look the same as debugging it
6234 with a 32-bit GDBSERVER, we need to convert it. */
6235 siginfo_fixup (&siginfo, inf_siginfo, 0);
6236
6237 if (offset + len > sizeof (siginfo))
6238 len = sizeof (siginfo) - offset;
6239
6240 if (readbuf != NULL)
6241 memcpy (readbuf, inf_siginfo + offset, len);
6242 else
6243 {
6244 memcpy (inf_siginfo + offset, writebuf, len);
6245
6246 /* Convert back to ptrace layout before flushing it out. */
6247 siginfo_fixup (&siginfo, inf_siginfo, 1);
6248
6249 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6250 return -1;
6251 }
6252
6253 return len;
6254 }
6255
6256 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6257 so we notice when children change state; as the handler for the
6258 sigsuspend in my_waitpid. */
6259
6260 static void
6261 sigchld_handler (int signo)
6262 {
6263 int old_errno = errno;
6264
6265 if (debug_threads)
6266 {
6267 do
6268 {
6269 /* fprintf is not async-signal-safe, so call write
6270 directly. */
6271 if (write (2, "sigchld_handler\n",
6272 sizeof ("sigchld_handler\n") - 1) < 0)
6273 break; /* just ignore */
6274 } while (0);
6275 }
6276
6277 if (target_is_async_p ())
6278 async_file_mark (); /* trigger a linux_wait */
6279
6280 errno = old_errno;
6281 }
6282
6283 static int
6284 linux_supports_non_stop (void)
6285 {
6286 return 1;
6287 }
6288
6289 static int
6290 linux_async (int enable)
6291 {
6292 int previous = target_is_async_p ();
6293
6294 if (debug_threads)
6295 debug_printf ("linux_async (%d), previous=%d\n",
6296 enable, previous);
6297
6298 if (previous != enable)
6299 {
6300 sigset_t mask;
6301 sigemptyset (&mask);
6302 sigaddset (&mask, SIGCHLD);
6303
6304 sigprocmask (SIG_BLOCK, &mask, NULL);
6305
6306 if (enable)
6307 {
6308 if (pipe (linux_event_pipe) == -1)
6309 {
6310 linux_event_pipe[0] = -1;
6311 linux_event_pipe[1] = -1;
6312 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6313
6314 warning ("creating event pipe failed.");
6315 return previous;
6316 }
6317
6318 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6319 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6320
6321 /* Register the event loop handler. */
6322 add_file_handler (linux_event_pipe[0],
6323 handle_target_event, NULL);
6324
6325 /* Always trigger a linux_wait. */
6326 async_file_mark ();
6327 }
6328 else
6329 {
6330 delete_file_handler (linux_event_pipe[0]);
6331
6332 close (linux_event_pipe[0]);
6333 close (linux_event_pipe[1]);
6334 linux_event_pipe[0] = -1;
6335 linux_event_pipe[1] = -1;
6336 }
6337
6338 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6339 }
6340
6341 return previous;
6342 }
6343
6344 static int
6345 linux_start_non_stop (int nonstop)
6346 {
6347 /* Register or unregister from event-loop accordingly. */
6348 linux_async (nonstop);
6349
6350 if (target_is_async_p () != (nonstop != 0))
6351 return -1;
6352
6353 return 0;
6354 }
6355
6356 static int
6357 linux_supports_multi_process (void)
6358 {
6359 return 1;
6360 }
6361
6362 /* Check if fork events are supported. */
6363
6364 static int
6365 linux_supports_fork_events (void)
6366 {
6367 return linux_supports_tracefork ();
6368 }
6369
6370 /* Check if vfork events are supported. */
6371
6372 static int
6373 linux_supports_vfork_events (void)
6374 {
6375 return linux_supports_tracefork ();
6376 }
6377
6378 /* Check if exec events are supported. */
6379
6380 static int
6381 linux_supports_exec_events (void)
6382 {
6383 return linux_supports_traceexec ();
6384 }
6385
6386 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6387 options for the specified lwp. */
6388
6389 static int
6390 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6391 void *args)
6392 {
6393 struct thread_info *thread = (struct thread_info *) entry;
6394 struct lwp_info *lwp = get_thread_lwp (thread);
6395
6396 if (!lwp->stopped)
6397 {
6398 /* Stop the lwp so we can modify its ptrace options. */
6399 lwp->must_set_ptrace_flags = 1;
6400 linux_stop_lwp (lwp);
6401 }
6402 else
6403 {
6404 /* Already stopped; go ahead and set the ptrace options. */
6405 struct process_info *proc = find_process_pid (pid_of (thread));
6406 int options = linux_low_ptrace_options (proc->attached);
6407
6408 linux_enable_event_reporting (lwpid_of (thread), options);
6409 lwp->must_set_ptrace_flags = 0;
6410 }
6411
6412 return 0;
6413 }
6414
6415 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6416 ptrace flags for all inferiors. This is in case the new GDB connection
6417 doesn't support the same set of events that the previous one did. */
6418
6419 static void
6420 linux_handle_new_gdb_connection (void)
6421 {
6422 pid_t pid;
6423
6424 /* Request that all the lwps reset their ptrace options. */
6425 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6426 }
6427
6428 static int
6429 linux_supports_disable_randomization (void)
6430 {
6431 #ifdef HAVE_PERSONALITY
6432 return 1;
6433 #else
6434 return 0;
6435 #endif
6436 }
6437
6438 static int
6439 linux_supports_agent (void)
6440 {
6441 return 1;
6442 }
6443
6444 static int
6445 linux_supports_range_stepping (void)
6446 {
6447 if (*the_low_target.supports_range_stepping == NULL)
6448 return 0;
6449
6450 return (*the_low_target.supports_range_stepping) ();
6451 }
6452
6453 /* Enumerate spufs IDs for process PID. */
6454 static int
6455 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6456 {
6457 int pos = 0;
6458 int written = 0;
6459 char path[128];
6460 DIR *dir;
6461 struct dirent *entry;
6462
6463 sprintf (path, "/proc/%ld/fd", pid);
6464 dir = opendir (path);
6465 if (!dir)
6466 return -1;
6467
6468 rewinddir (dir);
6469 while ((entry = readdir (dir)) != NULL)
6470 {
6471 struct stat st;
6472 struct statfs stfs;
6473 int fd;
6474
6475 fd = atoi (entry->d_name);
6476 if (!fd)
6477 continue;
6478
6479 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6480 if (stat (path, &st) != 0)
6481 continue;
6482 if (!S_ISDIR (st.st_mode))
6483 continue;
6484
6485 if (statfs (path, &stfs) != 0)
6486 continue;
6487 if (stfs.f_type != SPUFS_MAGIC)
6488 continue;
6489
6490 if (pos >= offset && pos + 4 <= offset + len)
6491 {
6492 *(unsigned int *)(buf + pos - offset) = fd;
6493 written += 4;
6494 }
6495 pos += 4;
6496 }
6497
6498 closedir (dir);
6499 return written;
6500 }
6501
6502 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6503 object type, using the /proc file system. */
6504 static int
6505 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6506 unsigned const char *writebuf,
6507 CORE_ADDR offset, int len)
6508 {
6509 long pid = lwpid_of (current_thread);
6510 char buf[128];
6511 int fd = 0;
6512 int ret = 0;
6513
6514 if (!writebuf && !readbuf)
6515 return -1;
6516
6517 if (!*annex)
6518 {
6519 if (!readbuf)
6520 return -1;
6521 else
6522 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6523 }
6524
6525 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6526 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6527 if (fd <= 0)
6528 return -1;
6529
6530 if (offset != 0
6531 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6532 {
6533 close (fd);
6534 return 0;
6535 }
6536
6537 if (writebuf)
6538 ret = write (fd, writebuf, (size_t) len);
6539 else
6540 ret = read (fd, readbuf, (size_t) len);
6541
6542 close (fd);
6543 return ret;
6544 }
6545
6546 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6547 struct target_loadseg
6548 {
6549 /* Core address to which the segment is mapped. */
6550 Elf32_Addr addr;
6551 /* VMA recorded in the program header. */
6552 Elf32_Addr p_vaddr;
6553 /* Size of this segment in memory. */
6554 Elf32_Word p_memsz;
6555 };
6556
6557 # if defined PT_GETDSBT
6558 struct target_loadmap
6559 {
6560 /* Protocol version number, must be zero. */
6561 Elf32_Word version;
6562 /* Pointer to the DSBT table, its size, and the DSBT index. */
6563 unsigned *dsbt_table;
6564 unsigned dsbt_size, dsbt_index;
6565 /* Number of segments in this map. */
6566 Elf32_Word nsegs;
6567 /* The actual memory map. */
6568 struct target_loadseg segs[/*nsegs*/];
6569 };
6570 # define LINUX_LOADMAP PT_GETDSBT
6571 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6572 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6573 # else
6574 struct target_loadmap
6575 {
6576 /* Protocol version number, must be zero. */
6577 Elf32_Half version;
6578 /* Number of segments in this map. */
6579 Elf32_Half nsegs;
6580 /* The actual memory map. */
6581 struct target_loadseg segs[/*nsegs*/];
6582 };
6583 # define LINUX_LOADMAP PTRACE_GETFDPIC
6584 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6585 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6586 # endif
6587
6588 static int
6589 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6590 unsigned char *myaddr, unsigned int len)
6591 {
6592 int pid = lwpid_of (current_thread);
6593 int addr = -1;
6594 struct target_loadmap *data = NULL;
6595 unsigned int actual_length, copy_length;
6596
6597 if (strcmp (annex, "exec") == 0)
6598 addr = (int) LINUX_LOADMAP_EXEC;
6599 else if (strcmp (annex, "interp") == 0)
6600 addr = (int) LINUX_LOADMAP_INTERP;
6601 else
6602 return -1;
6603
6604 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6605 return -1;
6606
6607 if (data == NULL)
6608 return -1;
6609
6610 actual_length = sizeof (struct target_loadmap)
6611 + sizeof (struct target_loadseg) * data->nsegs;
6612
6613 if (offset < 0 || offset > actual_length)
6614 return -1;
6615
6616 copy_length = actual_length - offset < len ? actual_length - offset : len;
6617 memcpy (myaddr, (char *) data + offset, copy_length);
6618 return copy_length;
6619 }
6620 #else
6621 # define linux_read_loadmap NULL
6622 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6623
6624 static void
6625 linux_process_qsupported (char **features, int count)
6626 {
6627 if (the_low_target.process_qsupported != NULL)
6628 the_low_target.process_qsupported (features, count);
6629 }
6630
6631 static int
6632 linux_supports_catch_syscall (void)
6633 {
6634 return (the_low_target.get_syscall_trapinfo != NULL
6635 && linux_supports_tracesysgood ());
6636 }
6637
6638 static int
6639 linux_get_ipa_tdesc_idx (void)
6640 {
6641 if (the_low_target.get_ipa_tdesc_idx == NULL)
6642 return 0;
6643
6644 return (*the_low_target.get_ipa_tdesc_idx) ();
6645 }
6646
6647 static int
6648 linux_supports_tracepoints (void)
6649 {
6650 if (*the_low_target.supports_tracepoints == NULL)
6651 return 0;
6652
6653 return (*the_low_target.supports_tracepoints) ();
6654 }
6655
6656 static CORE_ADDR
6657 linux_read_pc (struct regcache *regcache)
6658 {
6659 if (the_low_target.get_pc == NULL)
6660 return 0;
6661
6662 return (*the_low_target.get_pc) (regcache);
6663 }
6664
6665 static void
6666 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6667 {
6668 gdb_assert (the_low_target.set_pc != NULL);
6669
6670 (*the_low_target.set_pc) (regcache, pc);
6671 }
6672
6673 static int
6674 linux_thread_stopped (struct thread_info *thread)
6675 {
6676 return get_thread_lwp (thread)->stopped;
6677 }
6678
6679 /* This exposes stop-all-threads functionality to other modules. */
6680
6681 static void
6682 linux_pause_all (int freeze)
6683 {
6684 stop_all_lwps (freeze, NULL);
6685 }
6686
6687 /* This exposes unstop-all-threads functionality to other gdbserver
6688 modules. */
6689
6690 static void
6691 linux_unpause_all (int unfreeze)
6692 {
6693 unstop_all_lwps (unfreeze, NULL);
6694 }
6695
6696 static int
6697 linux_prepare_to_access_memory (void)
6698 {
6699 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6700 running LWP. */
6701 if (non_stop)
6702 linux_pause_all (1);
6703 return 0;
6704 }
6705
6706 static void
6707 linux_done_accessing_memory (void)
6708 {
6709 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6710 running LWP. */
6711 if (non_stop)
6712 linux_unpause_all (1);
6713 }
6714
6715 static int
6716 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6717 CORE_ADDR collector,
6718 CORE_ADDR lockaddr,
6719 ULONGEST orig_size,
6720 CORE_ADDR *jump_entry,
6721 CORE_ADDR *trampoline,
6722 ULONGEST *trampoline_size,
6723 unsigned char *jjump_pad_insn,
6724 ULONGEST *jjump_pad_insn_size,
6725 CORE_ADDR *adjusted_insn_addr,
6726 CORE_ADDR *adjusted_insn_addr_end,
6727 char *err)
6728 {
6729 return (*the_low_target.install_fast_tracepoint_jump_pad)
6730 (tpoint, tpaddr, collector, lockaddr, orig_size,
6731 jump_entry, trampoline, trampoline_size,
6732 jjump_pad_insn, jjump_pad_insn_size,
6733 adjusted_insn_addr, adjusted_insn_addr_end,
6734 err);
6735 }
6736
6737 static struct emit_ops *
6738 linux_emit_ops (void)
6739 {
6740 if (the_low_target.emit_ops != NULL)
6741 return (*the_low_target.emit_ops) ();
6742 else
6743 return NULL;
6744 }
6745
6746 static int
6747 linux_get_min_fast_tracepoint_insn_len (void)
6748 {
6749 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6750 }
6751
6752 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6753
6754 static int
6755 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6756 CORE_ADDR *phdr_memaddr, int *num_phdr)
6757 {
6758 char filename[PATH_MAX];
6759 int fd;
6760 const int auxv_size = is_elf64
6761 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6762 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6763
6764 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6765
6766 fd = open (filename, O_RDONLY);
6767 if (fd < 0)
6768 return 1;
6769
6770 *phdr_memaddr = 0;
6771 *num_phdr = 0;
6772 while (read (fd, buf, auxv_size) == auxv_size
6773 && (*phdr_memaddr == 0 || *num_phdr == 0))
6774 {
6775 if (is_elf64)
6776 {
6777 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6778
6779 switch (aux->a_type)
6780 {
6781 case AT_PHDR:
6782 *phdr_memaddr = aux->a_un.a_val;
6783 break;
6784 case AT_PHNUM:
6785 *num_phdr = aux->a_un.a_val;
6786 break;
6787 }
6788 }
6789 else
6790 {
6791 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6792
6793 switch (aux->a_type)
6794 {
6795 case AT_PHDR:
6796 *phdr_memaddr = aux->a_un.a_val;
6797 break;
6798 case AT_PHNUM:
6799 *num_phdr = aux->a_un.a_val;
6800 break;
6801 }
6802 }
6803 }
6804
6805 close (fd);
6806
6807 if (*phdr_memaddr == 0 || *num_phdr == 0)
6808 {
6809 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6810 "phdr_memaddr = %ld, phdr_num = %d",
6811 (long) *phdr_memaddr, *num_phdr);
6812 return 2;
6813 }
6814
6815 return 0;
6816 }
6817
6818 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6819
6820 static CORE_ADDR
6821 get_dynamic (const int pid, const int is_elf64)
6822 {
6823 CORE_ADDR phdr_memaddr, relocation;
6824 int num_phdr, i;
6825 unsigned char *phdr_buf;
6826 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6827
6828 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6829 return 0;
6830
6831 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6832 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6833
6834 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6835 return 0;
6836
6837 /* Compute relocation: it is expected to be 0 for "regular" executables,
6838 non-zero for PIE ones. */
6839 relocation = -1;
6840 for (i = 0; relocation == -1 && i < num_phdr; i++)
6841 if (is_elf64)
6842 {
6843 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6844
6845 if (p->p_type == PT_PHDR)
6846 relocation = phdr_memaddr - p->p_vaddr;
6847 }
6848 else
6849 {
6850 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6851
6852 if (p->p_type == PT_PHDR)
6853 relocation = phdr_memaddr - p->p_vaddr;
6854 }
6855
6856 if (relocation == -1)
6857 {
6858 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6859 any real world executables, including PIE executables, have always
6860 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6861 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6862 or present DT_DEBUG anyway (fpc binaries are statically linked).
6863
6864 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6865
6866 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6867
6868 return 0;
6869 }
6870
6871 for (i = 0; i < num_phdr; i++)
6872 {
6873 if (is_elf64)
6874 {
6875 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6876
6877 if (p->p_type == PT_DYNAMIC)
6878 return p->p_vaddr + relocation;
6879 }
6880 else
6881 {
6882 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6883
6884 if (p->p_type == PT_DYNAMIC)
6885 return p->p_vaddr + relocation;
6886 }
6887 }
6888
6889 return 0;
6890 }
6891
6892 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6893 can be 0 if the inferior does not yet have the library list initialized.
6894 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6895 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6896
6897 static CORE_ADDR
6898 get_r_debug (const int pid, const int is_elf64)
6899 {
6900 CORE_ADDR dynamic_memaddr;
6901 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6902 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6903 CORE_ADDR map = -1;
6904
6905 dynamic_memaddr = get_dynamic (pid, is_elf64);
6906 if (dynamic_memaddr == 0)
6907 return map;
6908
6909 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6910 {
6911 if (is_elf64)
6912 {
6913 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6914 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6915 union
6916 {
6917 Elf64_Xword map;
6918 unsigned char buf[sizeof (Elf64_Xword)];
6919 }
6920 rld_map;
6921 #endif
6922 #ifdef DT_MIPS_RLD_MAP
6923 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6924 {
6925 if (linux_read_memory (dyn->d_un.d_val,
6926 rld_map.buf, sizeof (rld_map.buf)) == 0)
6927 return rld_map.map;
6928 else
6929 break;
6930 }
6931 #endif /* DT_MIPS_RLD_MAP */
6932 #ifdef DT_MIPS_RLD_MAP_REL
6933 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6934 {
6935 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6936 rld_map.buf, sizeof (rld_map.buf)) == 0)
6937 return rld_map.map;
6938 else
6939 break;
6940 }
6941 #endif /* DT_MIPS_RLD_MAP_REL */
6942
6943 if (dyn->d_tag == DT_DEBUG && map == -1)
6944 map = dyn->d_un.d_val;
6945
6946 if (dyn->d_tag == DT_NULL)
6947 break;
6948 }
6949 else
6950 {
6951 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6952 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6953 union
6954 {
6955 Elf32_Word map;
6956 unsigned char buf[sizeof (Elf32_Word)];
6957 }
6958 rld_map;
6959 #endif
6960 #ifdef DT_MIPS_RLD_MAP
6961 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6962 {
6963 if (linux_read_memory (dyn->d_un.d_val,
6964 rld_map.buf, sizeof (rld_map.buf)) == 0)
6965 return rld_map.map;
6966 else
6967 break;
6968 }
6969 #endif /* DT_MIPS_RLD_MAP */
6970 #ifdef DT_MIPS_RLD_MAP_REL
6971 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6972 {
6973 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6974 rld_map.buf, sizeof (rld_map.buf)) == 0)
6975 return rld_map.map;
6976 else
6977 break;
6978 }
6979 #endif /* DT_MIPS_RLD_MAP_REL */
6980
6981 if (dyn->d_tag == DT_DEBUG && map == -1)
6982 map = dyn->d_un.d_val;
6983
6984 if (dyn->d_tag == DT_NULL)
6985 break;
6986 }
6987
6988 dynamic_memaddr += dyn_size;
6989 }
6990
6991 return map;
6992 }
6993
6994 /* Read one pointer from MEMADDR in the inferior. */
6995
6996 static int
6997 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6998 {
6999 int ret;
7000
7001 /* Go through a union so this works on either big or little endian
7002 hosts, when the inferior's pointer size is smaller than the size
7003 of CORE_ADDR. It is assumed the inferior's endianness is the
7004 same of the superior's. */
7005 union
7006 {
7007 CORE_ADDR core_addr;
7008 unsigned int ui;
7009 unsigned char uc;
7010 } addr;
7011
7012 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
7013 if (ret == 0)
7014 {
7015 if (ptr_size == sizeof (CORE_ADDR))
7016 *ptr = addr.core_addr;
7017 else if (ptr_size == sizeof (unsigned int))
7018 *ptr = addr.ui;
7019 else
7020 gdb_assert_not_reached ("unhandled pointer size");
7021 }
7022 return ret;
7023 }
7024
7025 struct link_map_offsets
7026 {
7027 /* Offset and size of r_debug.r_version. */
7028 int r_version_offset;
7029
7030 /* Offset and size of r_debug.r_map. */
7031 int r_map_offset;
7032
7033 /* Offset to l_addr field in struct link_map. */
7034 int l_addr_offset;
7035
7036 /* Offset to l_name field in struct link_map. */
7037 int l_name_offset;
7038
7039 /* Offset to l_ld field in struct link_map. */
7040 int l_ld_offset;
7041
7042 /* Offset to l_next field in struct link_map. */
7043 int l_next_offset;
7044
7045 /* Offset to l_prev field in struct link_map. */
7046 int l_prev_offset;
7047 };
7048
7049 /* Construct qXfer:libraries-svr4:read reply. */
7050
7051 static int
7052 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
7053 unsigned const char *writebuf,
7054 CORE_ADDR offset, int len)
7055 {
7056 char *document;
7057 unsigned document_len;
7058 struct process_info_private *const priv = current_process ()->priv;
7059 char filename[PATH_MAX];
7060 int pid, is_elf64;
7061
7062 static const struct link_map_offsets lmo_32bit_offsets =
7063 {
7064 0, /* r_version offset. */
7065 4, /* r_debug.r_map offset. */
7066 0, /* l_addr offset in link_map. */
7067 4, /* l_name offset in link_map. */
7068 8, /* l_ld offset in link_map. */
7069 12, /* l_next offset in link_map. */
7070 16 /* l_prev offset in link_map. */
7071 };
7072
7073 static const struct link_map_offsets lmo_64bit_offsets =
7074 {
7075 0, /* r_version offset. */
7076 8, /* r_debug.r_map offset. */
7077 0, /* l_addr offset in link_map. */
7078 8, /* l_name offset in link_map. */
7079 16, /* l_ld offset in link_map. */
7080 24, /* l_next offset in link_map. */
7081 32 /* l_prev offset in link_map. */
7082 };
7083 const struct link_map_offsets *lmo;
7084 unsigned int machine;
7085 int ptr_size;
7086 CORE_ADDR lm_addr = 0, lm_prev = 0;
7087 int allocated = 1024;
7088 char *p;
7089 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7090 int header_done = 0;
7091
7092 if (writebuf != NULL)
7093 return -2;
7094 if (readbuf == NULL)
7095 return -1;
7096
7097 pid = lwpid_of (current_thread);
7098 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7099 is_elf64 = elf_64_file_p (filename, &machine);
7100 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7101 ptr_size = is_elf64 ? 8 : 4;
7102
7103 while (annex[0] != '\0')
7104 {
7105 const char *sep;
7106 CORE_ADDR *addrp;
7107 int len;
7108
7109 sep = strchr (annex, '=');
7110 if (sep == NULL)
7111 break;
7112
7113 len = sep - annex;
7114 if (len == 5 && startswith (annex, "start"))
7115 addrp = &lm_addr;
7116 else if (len == 4 && startswith (annex, "prev"))
7117 addrp = &lm_prev;
7118 else
7119 {
7120 annex = strchr (sep, ';');
7121 if (annex == NULL)
7122 break;
7123 annex++;
7124 continue;
7125 }
7126
7127 annex = decode_address_to_semicolon (addrp, sep + 1);
7128 }
7129
7130 if (lm_addr == 0)
7131 {
7132 int r_version = 0;
7133
7134 if (priv->r_debug == 0)
7135 priv->r_debug = get_r_debug (pid, is_elf64);
7136
7137 /* We failed to find DT_DEBUG. Such situation will not change
7138 for this inferior - do not retry it. Report it to GDB as
7139 E01, see for the reasons at the GDB solib-svr4.c side. */
7140 if (priv->r_debug == (CORE_ADDR) -1)
7141 return -1;
7142
7143 if (priv->r_debug != 0)
7144 {
7145 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7146 (unsigned char *) &r_version,
7147 sizeof (r_version)) != 0
7148 || r_version != 1)
7149 {
7150 warning ("unexpected r_debug version %d", r_version);
7151 }
7152 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7153 &lm_addr, ptr_size) != 0)
7154 {
7155 warning ("unable to read r_map from 0x%lx",
7156 (long) priv->r_debug + lmo->r_map_offset);
7157 }
7158 }
7159 }
7160
7161 document = (char *) xmalloc (allocated);
7162 strcpy (document, "<library-list-svr4 version=\"1.0\"");
7163 p = document + strlen (document);
7164
7165 while (lm_addr
7166 && read_one_ptr (lm_addr + lmo->l_name_offset,
7167 &l_name, ptr_size) == 0
7168 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7169 &l_addr, ptr_size) == 0
7170 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7171 &l_ld, ptr_size) == 0
7172 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7173 &l_prev, ptr_size) == 0
7174 && read_one_ptr (lm_addr + lmo->l_next_offset,
7175 &l_next, ptr_size) == 0)
7176 {
7177 unsigned char libname[PATH_MAX];
7178
7179 if (lm_prev != l_prev)
7180 {
7181 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7182 (long) lm_prev, (long) l_prev);
7183 break;
7184 }
7185
7186 /* Ignore the first entry even if it has valid name as the first entry
7187 corresponds to the main executable. The first entry should not be
7188 skipped if the dynamic loader was loaded late by a static executable
7189 (see solib-svr4.c parameter ignore_first). But in such case the main
7190 executable does not have PT_DYNAMIC present and this function already
7191 exited above due to failed get_r_debug. */
7192 if (lm_prev == 0)
7193 {
7194 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7195 p = p + strlen (p);
7196 }
7197 else
7198 {
7199 /* Not checking for error because reading may stop before
7200 we've got PATH_MAX worth of characters. */
7201 libname[0] = '\0';
7202 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7203 libname[sizeof (libname) - 1] = '\0';
7204 if (libname[0] != '\0')
7205 {
7206 /* 6x the size for xml_escape_text below. */
7207 size_t len = 6 * strlen ((char *) libname);
7208 char *name;
7209
7210 if (!header_done)
7211 {
7212 /* Terminate `<library-list-svr4'. */
7213 *p++ = '>';
7214 header_done = 1;
7215 }
7216
7217 while (allocated < p - document + len + 200)
7218 {
7219 /* Expand to guarantee sufficient storage. */
7220 uintptr_t document_len = p - document;
7221
7222 document = (char *) xrealloc (document, 2 * allocated);
7223 allocated *= 2;
7224 p = document + document_len;
7225 }
7226
7227 name = xml_escape_text ((char *) libname);
7228 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7229 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7230 name, (unsigned long) lm_addr,
7231 (unsigned long) l_addr, (unsigned long) l_ld);
7232 free (name);
7233 }
7234 }
7235
7236 lm_prev = lm_addr;
7237 lm_addr = l_next;
7238 }
7239
7240 if (!header_done)
7241 {
7242 /* Empty list; terminate `<library-list-svr4'. */
7243 strcpy (p, "/>");
7244 }
7245 else
7246 strcpy (p, "</library-list-svr4>");
7247
7248 document_len = strlen (document);
7249 if (offset < document_len)
7250 document_len -= offset;
7251 else
7252 document_len = 0;
7253 if (len > document_len)
7254 len = document_len;
7255
7256 memcpy (readbuf, document + offset, len);
7257 xfree (document);
7258
7259 return len;
7260 }
7261
7262 #ifdef HAVE_LINUX_BTRACE
7263
7264 /* See to_disable_btrace target method. */
7265
7266 static int
7267 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7268 {
7269 enum btrace_error err;
7270
7271 err = linux_disable_btrace (tinfo);
7272 return (err == BTRACE_ERR_NONE ? 0 : -1);
7273 }
7274
7275 /* Encode an Intel Processor Trace configuration. */
7276
7277 static void
7278 linux_low_encode_pt_config (struct buffer *buffer,
7279 const struct btrace_data_pt_config *config)
7280 {
7281 buffer_grow_str (buffer, "<pt-config>\n");
7282
7283 switch (config->cpu.vendor)
7284 {
7285 case CV_INTEL:
7286 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7287 "model=\"%u\" stepping=\"%u\"/>\n",
7288 config->cpu.family, config->cpu.model,
7289 config->cpu.stepping);
7290 break;
7291
7292 default:
7293 break;
7294 }
7295
7296 buffer_grow_str (buffer, "</pt-config>\n");
7297 }
7298
7299 /* Encode a raw buffer. */
7300
7301 static void
7302 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7303 unsigned int size)
7304 {
7305 if (size == 0)
7306 return;
7307
7308 /* We use hex encoding - see common/rsp-low.h. */
7309 buffer_grow_str (buffer, "<raw>\n");
7310
7311 while (size-- > 0)
7312 {
7313 char elem[2];
7314
7315 elem[0] = tohex ((*data >> 4) & 0xf);
7316 elem[1] = tohex (*data++ & 0xf);
7317
7318 buffer_grow (buffer, elem, 2);
7319 }
7320
7321 buffer_grow_str (buffer, "</raw>\n");
7322 }
7323
7324 /* See to_read_btrace target method. */
7325
7326 static int
7327 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7328 enum btrace_read_type type)
7329 {
7330 struct btrace_data btrace;
7331 struct btrace_block *block;
7332 enum btrace_error err;
7333 int i;
7334
7335 btrace_data_init (&btrace);
7336
7337 err = linux_read_btrace (&btrace, tinfo, type);
7338 if (err != BTRACE_ERR_NONE)
7339 {
7340 if (err == BTRACE_ERR_OVERFLOW)
7341 buffer_grow_str0 (buffer, "E.Overflow.");
7342 else
7343 buffer_grow_str0 (buffer, "E.Generic Error.");
7344
7345 goto err;
7346 }
7347
7348 switch (btrace.format)
7349 {
7350 case BTRACE_FORMAT_NONE:
7351 buffer_grow_str0 (buffer, "E.No Trace.");
7352 goto err;
7353
7354 case BTRACE_FORMAT_BTS:
7355 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7356 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7357
7358 for (i = 0;
7359 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7360 i++)
7361 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7362 paddress (block->begin), paddress (block->end));
7363
7364 buffer_grow_str0 (buffer, "</btrace>\n");
7365 break;
7366
7367 case BTRACE_FORMAT_PT:
7368 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7369 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7370 buffer_grow_str (buffer, "<pt>\n");
7371
7372 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7373
7374 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7375 btrace.variant.pt.size);
7376
7377 buffer_grow_str (buffer, "</pt>\n");
7378 buffer_grow_str0 (buffer, "</btrace>\n");
7379 break;
7380
7381 default:
7382 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7383 goto err;
7384 }
7385
7386 btrace_data_fini (&btrace);
7387 return 0;
7388
7389 err:
7390 btrace_data_fini (&btrace);
7391 return -1;
7392 }
7393
7394 /* See to_btrace_conf target method. */
7395
7396 static int
7397 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7398 struct buffer *buffer)
7399 {
7400 const struct btrace_config *conf;
7401
7402 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7403 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7404
7405 conf = linux_btrace_conf (tinfo);
7406 if (conf != NULL)
7407 {
7408 switch (conf->format)
7409 {
7410 case BTRACE_FORMAT_NONE:
7411 break;
7412
7413 case BTRACE_FORMAT_BTS:
7414 buffer_xml_printf (buffer, "<bts");
7415 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7416 buffer_xml_printf (buffer, " />\n");
7417 break;
7418
7419 case BTRACE_FORMAT_PT:
7420 buffer_xml_printf (buffer, "<pt");
7421 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7422 buffer_xml_printf (buffer, "/>\n");
7423 break;
7424 }
7425 }
7426
7427 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7428 return 0;
7429 }
7430 #endif /* HAVE_LINUX_BTRACE */
7431
7432 /* See nat/linux-nat.h. */
7433
7434 ptid_t
7435 current_lwp_ptid (void)
7436 {
7437 return ptid_of (current_thread);
7438 }
7439
7440 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7441
7442 static int
7443 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7444 {
7445 if (the_low_target.breakpoint_kind_from_pc != NULL)
7446 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7447 else
7448 return default_breakpoint_kind_from_pc (pcptr);
7449 }
7450
7451 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7452
7453 static const gdb_byte *
7454 linux_sw_breakpoint_from_kind (int kind, int *size)
7455 {
7456 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7457
7458 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7459 }
7460
7461 /* Implementation of the target_ops method
7462 "breakpoint_kind_from_current_state". */
7463
7464 static int
7465 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7466 {
7467 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7468 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7469 else
7470 return linux_breakpoint_kind_from_pc (pcptr);
7471 }
7472
7473 /* Default implementation of linux_target_ops method "set_pc" for
7474 32-bit pc register which is literally named "pc". */
7475
7476 void
7477 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7478 {
7479 uint32_t newpc = pc;
7480
7481 supply_register_by_name (regcache, "pc", &newpc);
7482 }
7483
7484 /* Default implementation of linux_target_ops method "get_pc" for
7485 32-bit pc register which is literally named "pc". */
7486
7487 CORE_ADDR
7488 linux_get_pc_32bit (struct regcache *regcache)
7489 {
7490 uint32_t pc;
7491
7492 collect_register_by_name (regcache, "pc", &pc);
7493 if (debug_threads)
7494 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7495 return pc;
7496 }
7497
7498 /* Default implementation of linux_target_ops method "set_pc" for
7499 64-bit pc register which is literally named "pc". */
7500
7501 void
7502 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7503 {
7504 uint64_t newpc = pc;
7505
7506 supply_register_by_name (regcache, "pc", &newpc);
7507 }
7508
7509 /* Default implementation of linux_target_ops method "get_pc" for
7510 64-bit pc register which is literally named "pc". */
7511
7512 CORE_ADDR
7513 linux_get_pc_64bit (struct regcache *regcache)
7514 {
7515 uint64_t pc;
7516
7517 collect_register_by_name (regcache, "pc", &pc);
7518 if (debug_threads)
7519 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7520 return pc;
7521 }
7522
7523
7524 static struct target_ops linux_target_ops = {
7525 linux_create_inferior,
7526 linux_post_create_inferior,
7527 linux_attach,
7528 linux_kill,
7529 linux_detach,
7530 linux_mourn,
7531 linux_join,
7532 linux_thread_alive,
7533 linux_resume,
7534 linux_wait,
7535 linux_fetch_registers,
7536 linux_store_registers,
7537 linux_prepare_to_access_memory,
7538 linux_done_accessing_memory,
7539 linux_read_memory,
7540 linux_write_memory,
7541 linux_look_up_symbols,
7542 linux_request_interrupt,
7543 linux_read_auxv,
7544 linux_supports_z_point_type,
7545 linux_insert_point,
7546 linux_remove_point,
7547 linux_stopped_by_sw_breakpoint,
7548 linux_supports_stopped_by_sw_breakpoint,
7549 linux_stopped_by_hw_breakpoint,
7550 linux_supports_stopped_by_hw_breakpoint,
7551 linux_supports_hardware_single_step,
7552 linux_stopped_by_watchpoint,
7553 linux_stopped_data_address,
7554 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7555 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7556 && defined(PT_TEXT_END_ADDR)
7557 linux_read_offsets,
7558 #else
7559 NULL,
7560 #endif
7561 #ifdef USE_THREAD_DB
7562 thread_db_get_tls_address,
7563 #else
7564 NULL,
7565 #endif
7566 linux_qxfer_spu,
7567 hostio_last_error_from_errno,
7568 linux_qxfer_osdata,
7569 linux_xfer_siginfo,
7570 linux_supports_non_stop,
7571 linux_async,
7572 linux_start_non_stop,
7573 linux_supports_multi_process,
7574 linux_supports_fork_events,
7575 linux_supports_vfork_events,
7576 linux_supports_exec_events,
7577 linux_handle_new_gdb_connection,
7578 #ifdef USE_THREAD_DB
7579 thread_db_handle_monitor_command,
7580 #else
7581 NULL,
7582 #endif
7583 linux_common_core_of_thread,
7584 linux_read_loadmap,
7585 linux_process_qsupported,
7586 linux_supports_tracepoints,
7587 linux_read_pc,
7588 linux_write_pc,
7589 linux_thread_stopped,
7590 NULL,
7591 linux_pause_all,
7592 linux_unpause_all,
7593 linux_stabilize_threads,
7594 linux_install_fast_tracepoint_jump_pad,
7595 linux_emit_ops,
7596 linux_supports_disable_randomization,
7597 linux_get_min_fast_tracepoint_insn_len,
7598 linux_qxfer_libraries_svr4,
7599 linux_supports_agent,
7600 #ifdef HAVE_LINUX_BTRACE
7601 linux_supports_btrace,
7602 linux_enable_btrace,
7603 linux_low_disable_btrace,
7604 linux_low_read_btrace,
7605 linux_low_btrace_conf,
7606 #else
7607 NULL,
7608 NULL,
7609 NULL,
7610 NULL,
7611 NULL,
7612 #endif
7613 linux_supports_range_stepping,
7614 linux_proc_pid_to_exec_file,
7615 linux_mntns_open_cloexec,
7616 linux_mntns_unlink,
7617 linux_mntns_readlink,
7618 linux_breakpoint_kind_from_pc,
7619 linux_sw_breakpoint_from_kind,
7620 linux_proc_tid_get_name,
7621 linux_breakpoint_kind_from_current_state,
7622 linux_supports_software_single_step,
7623 linux_supports_catch_syscall,
7624 linux_get_ipa_tdesc_idx,
7625 };
7626
7627 #ifdef HAVE_LINUX_REGSETS
7628 void
7629 initialize_regsets_info (struct regsets_info *info)
7630 {
7631 for (info->num_regsets = 0;
7632 info->regsets[info->num_regsets].size >= 0;
7633 info->num_regsets++)
7634 ;
7635 }
7636 #endif
7637
7638 void
7639 initialize_low (void)
7640 {
7641 struct sigaction sigchld_action;
7642
7643 memset (&sigchld_action, 0, sizeof (sigchld_action));
7644 set_target_ops (&linux_target_ops);
7645
7646 linux_ptrace_init_warnings ();
7647
7648 sigchld_action.sa_handler = sigchld_handler;
7649 sigemptyset (&sigchld_action.sa_mask);
7650 sigchld_action.sa_flags = SA_RESTART;
7651 sigaction (SIGCHLD, &sigchld_action, NULL);
7652
7653 initialize_low_arch ();
7654
7655 linux_check_ptrace_features ();
7656 }