]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
Remove usage of find_inferior in unsuspend_all_lwps
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2017 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25 #include "signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #include "common-inferior.h"
51 #include "nat/fork-inferior.h"
52 #include "environ.h"
53 #ifndef ELFMAG0
54 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
55 then ELFMAG0 will have been defined. If it didn't get included by
56 gdb_proc_service.h then including it will likely introduce a duplicate
57 definition of elf_fpregset_t. */
58 #include <elf.h>
59 #endif
60 #include "nat/linux-namespaces.h"
61
62 #ifndef SPUFS_MAGIC
63 #define SPUFS_MAGIC 0x23c9b64e
64 #endif
65
66 #ifdef HAVE_PERSONALITY
67 # include <sys/personality.h>
68 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
69 # define ADDR_NO_RANDOMIZE 0x0040000
70 # endif
71 #endif
72
73 #ifndef O_LARGEFILE
74 #define O_LARGEFILE 0
75 #endif
76
77 /* Some targets did not define these ptrace constants from the start,
78 so gdbserver defines them locally here. In the future, these may
79 be removed after they are added to asm/ptrace.h. */
80 #if !(defined(PT_TEXT_ADDR) \
81 || defined(PT_DATA_ADDR) \
82 || defined(PT_TEXT_END_ADDR))
83 #if defined(__mcoldfire__)
84 /* These are still undefined in 3.10 kernels. */
85 #define PT_TEXT_ADDR 49*4
86 #define PT_DATA_ADDR 50*4
87 #define PT_TEXT_END_ADDR 51*4
88 /* BFIN already defines these since at least 2.6.32 kernels. */
89 #elif defined(BFIN)
90 #define PT_TEXT_ADDR 220
91 #define PT_TEXT_END_ADDR 224
92 #define PT_DATA_ADDR 228
93 /* These are still undefined in 3.10 kernels. */
94 #elif defined(__TMS320C6X__)
95 #define PT_TEXT_ADDR (0x10000*4)
96 #define PT_DATA_ADDR (0x10004*4)
97 #define PT_TEXT_END_ADDR (0x10008*4)
98 #endif
99 #endif
100
101 #ifdef HAVE_LINUX_BTRACE
102 # include "nat/linux-btrace.h"
103 # include "btrace-common.h"
104 #endif
105
106 #ifndef HAVE_ELF32_AUXV_T
107 /* Copied from glibc's elf.h. */
108 typedef struct
109 {
110 uint32_t a_type; /* Entry type */
111 union
112 {
113 uint32_t a_val; /* Integer value */
114 /* We use to have pointer elements added here. We cannot do that,
115 though, since it does not work when using 32-bit definitions
116 on 64-bit platforms and vice versa. */
117 } a_un;
118 } Elf32_auxv_t;
119 #endif
120
121 #ifndef HAVE_ELF64_AUXV_T
122 /* Copied from glibc's elf.h. */
123 typedef struct
124 {
125 uint64_t a_type; /* Entry type */
126 union
127 {
128 uint64_t a_val; /* Integer value */
129 /* We use to have pointer elements added here. We cannot do that,
130 though, since it does not work when using 32-bit definitions
131 on 64-bit platforms and vice versa. */
132 } a_un;
133 } Elf64_auxv_t;
134 #endif
135
136 /* Does the current host support PTRACE_GETREGSET? */
137 int have_ptrace_getregset = -1;
138
139 /* LWP accessors. */
140
141 /* See nat/linux-nat.h. */
142
143 ptid_t
144 ptid_of_lwp (struct lwp_info *lwp)
145 {
146 return ptid_of (get_lwp_thread (lwp));
147 }
148
149 /* See nat/linux-nat.h. */
150
151 void
152 lwp_set_arch_private_info (struct lwp_info *lwp,
153 struct arch_lwp_info *info)
154 {
155 lwp->arch_private = info;
156 }
157
158 /* See nat/linux-nat.h. */
159
160 struct arch_lwp_info *
161 lwp_arch_private_info (struct lwp_info *lwp)
162 {
163 return lwp->arch_private;
164 }
165
166 /* See nat/linux-nat.h. */
167
168 int
169 lwp_is_stopped (struct lwp_info *lwp)
170 {
171 return lwp->stopped;
172 }
173
174 /* See nat/linux-nat.h. */
175
176 enum target_stop_reason
177 lwp_stop_reason (struct lwp_info *lwp)
178 {
179 return lwp->stop_reason;
180 }
181
182 /* See nat/linux-nat.h. */
183
184 int
185 lwp_is_stepping (struct lwp_info *lwp)
186 {
187 return lwp->stepping;
188 }
189
190 /* A list of all unknown processes which receive stop signals. Some
191 other process will presumably claim each of these as forked
192 children momentarily. */
193
194 struct simple_pid_list
195 {
196 /* The process ID. */
197 int pid;
198
199 /* The status as reported by waitpid. */
200 int status;
201
202 /* Next in chain. */
203 struct simple_pid_list *next;
204 };
205 struct simple_pid_list *stopped_pids;
206
207 /* Trivial list manipulation functions to keep track of a list of new
208 stopped processes. */
209
210 static void
211 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
212 {
213 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
214
215 new_pid->pid = pid;
216 new_pid->status = status;
217 new_pid->next = *listp;
218 *listp = new_pid;
219 }
220
221 static int
222 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
223 {
224 struct simple_pid_list **p;
225
226 for (p = listp; *p != NULL; p = &(*p)->next)
227 if ((*p)->pid == pid)
228 {
229 struct simple_pid_list *next = (*p)->next;
230
231 *statusp = (*p)->status;
232 xfree (*p);
233 *p = next;
234 return 1;
235 }
236 return 0;
237 }
238
239 enum stopping_threads_kind
240 {
241 /* Not stopping threads presently. */
242 NOT_STOPPING_THREADS,
243
244 /* Stopping threads. */
245 STOPPING_THREADS,
246
247 /* Stopping and suspending threads. */
248 STOPPING_AND_SUSPENDING_THREADS
249 };
250
251 /* This is set while stop_all_lwps is in effect. */
252 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
253
254 /* FIXME make into a target method? */
255 int using_threads = 1;
256
257 /* True if we're presently stabilizing threads (moving them out of
258 jump pads). */
259 static int stabilizing_threads;
260
261 static void linux_resume_one_lwp (struct lwp_info *lwp,
262 int step, int signal, siginfo_t *info);
263 static void linux_resume (struct thread_resume *resume_info, size_t n);
264 static void stop_all_lwps (int suspend, struct lwp_info *except);
265 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
266 static void unsuspend_all_lwps (struct lwp_info *except);
267 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
268 int *wstat, int options);
269 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
270 static struct lwp_info *add_lwp (ptid_t ptid);
271 static void linux_mourn (struct process_info *process);
272 static int linux_stopped_by_watchpoint (void);
273 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
274 static int lwp_is_marked_dead (struct lwp_info *lwp);
275 static void proceed_all_lwps (void);
276 static int finish_step_over (struct lwp_info *lwp);
277 static int kill_lwp (unsigned long lwpid, int signo);
278 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
279 static void complete_ongoing_step_over (void);
280 static int linux_low_ptrace_options (int attached);
281 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
282 static int proceed_one_lwp (thread_info *thread, void *except);
283
284 /* When the event-loop is doing a step-over, this points at the thread
285 being stepped. */
286 ptid_t step_over_bkpt;
287
288 /* True if the low target can hardware single-step. */
289
290 static int
291 can_hardware_single_step (void)
292 {
293 if (the_low_target.supports_hardware_single_step != NULL)
294 return the_low_target.supports_hardware_single_step ();
295 else
296 return 0;
297 }
298
299 /* True if the low target can software single-step. Such targets
300 implement the GET_NEXT_PCS callback. */
301
302 static int
303 can_software_single_step (void)
304 {
305 return (the_low_target.get_next_pcs != NULL);
306 }
307
308 /* True if the low target supports memory breakpoints. If so, we'll
309 have a GET_PC implementation. */
310
311 static int
312 supports_breakpoints (void)
313 {
314 return (the_low_target.get_pc != NULL);
315 }
316
317 /* Returns true if this target can support fast tracepoints. This
318 does not mean that the in-process agent has been loaded in the
319 inferior. */
320
321 static int
322 supports_fast_tracepoints (void)
323 {
324 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
325 }
326
327 /* True if LWP is stopped in its stepping range. */
328
329 static int
330 lwp_in_step_range (struct lwp_info *lwp)
331 {
332 CORE_ADDR pc = lwp->stop_pc;
333
334 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
335 }
336
337 struct pending_signals
338 {
339 int signal;
340 siginfo_t info;
341 struct pending_signals *prev;
342 };
343
344 /* The read/write ends of the pipe registered as waitable file in the
345 event loop. */
346 static int linux_event_pipe[2] = { -1, -1 };
347
348 /* True if we're currently in async mode. */
349 #define target_is_async_p() (linux_event_pipe[0] != -1)
350
351 static void send_sigstop (struct lwp_info *lwp);
352 static void wait_for_sigstop (void);
353
354 /* Return non-zero if HEADER is a 64-bit ELF file. */
355
356 static int
357 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
358 {
359 if (header->e_ident[EI_MAG0] == ELFMAG0
360 && header->e_ident[EI_MAG1] == ELFMAG1
361 && header->e_ident[EI_MAG2] == ELFMAG2
362 && header->e_ident[EI_MAG3] == ELFMAG3)
363 {
364 *machine = header->e_machine;
365 return header->e_ident[EI_CLASS] == ELFCLASS64;
366
367 }
368 *machine = EM_NONE;
369 return -1;
370 }
371
372 /* Return non-zero if FILE is a 64-bit ELF file,
373 zero if the file is not a 64-bit ELF file,
374 and -1 if the file is not accessible or doesn't exist. */
375
376 static int
377 elf_64_file_p (const char *file, unsigned int *machine)
378 {
379 Elf64_Ehdr header;
380 int fd;
381
382 fd = open (file, O_RDONLY);
383 if (fd < 0)
384 return -1;
385
386 if (read (fd, &header, sizeof (header)) != sizeof (header))
387 {
388 close (fd);
389 return 0;
390 }
391 close (fd);
392
393 return elf_64_header_p (&header, machine);
394 }
395
396 /* Accepts an integer PID; Returns true if the executable PID is
397 running is a 64-bit ELF file.. */
398
399 int
400 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
401 {
402 char file[PATH_MAX];
403
404 sprintf (file, "/proc/%d/exe", pid);
405 return elf_64_file_p (file, machine);
406 }
407
408 static void
409 delete_lwp (struct lwp_info *lwp)
410 {
411 struct thread_info *thr = get_lwp_thread (lwp);
412
413 if (debug_threads)
414 debug_printf ("deleting %ld\n", lwpid_of (thr));
415
416 remove_thread (thr);
417
418 if (the_low_target.delete_thread != NULL)
419 the_low_target.delete_thread (lwp->arch_private);
420 else
421 gdb_assert (lwp->arch_private == NULL);
422
423 free (lwp);
424 }
425
426 /* Add a process to the common process list, and set its private
427 data. */
428
429 static struct process_info *
430 linux_add_process (int pid, int attached)
431 {
432 struct process_info *proc;
433
434 proc = add_process (pid, attached);
435 proc->priv = XCNEW (struct process_info_private);
436
437 if (the_low_target.new_process != NULL)
438 proc->priv->arch_private = the_low_target.new_process ();
439
440 return proc;
441 }
442
443 static CORE_ADDR get_pc (struct lwp_info *lwp);
444
445 /* Call the target arch_setup function on the current thread. */
446
447 static void
448 linux_arch_setup (void)
449 {
450 the_low_target.arch_setup ();
451 }
452
453 /* Call the target arch_setup function on THREAD. */
454
455 static void
456 linux_arch_setup_thread (struct thread_info *thread)
457 {
458 struct thread_info *saved_thread;
459
460 saved_thread = current_thread;
461 current_thread = thread;
462
463 linux_arch_setup ();
464
465 current_thread = saved_thread;
466 }
467
468 /* Handle a GNU/Linux extended wait response. If we see a clone,
469 fork, or vfork event, we need to add the new LWP to our list
470 (and return 0 so as not to report the trap to higher layers).
471 If we see an exec event, we will modify ORIG_EVENT_LWP to point
472 to a new LWP representing the new program. */
473
474 static int
475 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
476 {
477 struct lwp_info *event_lwp = *orig_event_lwp;
478 int event = linux_ptrace_get_extended_event (wstat);
479 struct thread_info *event_thr = get_lwp_thread (event_lwp);
480 struct lwp_info *new_lwp;
481
482 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
483
484 /* All extended events we currently use are mid-syscall. Only
485 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
486 you have to be using PTRACE_SEIZE to get that. */
487 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
488
489 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
490 || (event == PTRACE_EVENT_CLONE))
491 {
492 ptid_t ptid;
493 unsigned long new_pid;
494 int ret, status;
495
496 /* Get the pid of the new lwp. */
497 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
498 &new_pid);
499
500 /* If we haven't already seen the new PID stop, wait for it now. */
501 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
502 {
503 /* The new child has a pending SIGSTOP. We can't affect it until it
504 hits the SIGSTOP, but we're already attached. */
505
506 ret = my_waitpid (new_pid, &status, __WALL);
507
508 if (ret == -1)
509 perror_with_name ("waiting for new child");
510 else if (ret != new_pid)
511 warning ("wait returned unexpected PID %d", ret);
512 else if (!WIFSTOPPED (status))
513 warning ("wait returned unexpected status 0x%x", status);
514 }
515
516 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
517 {
518 struct process_info *parent_proc;
519 struct process_info *child_proc;
520 struct lwp_info *child_lwp;
521 struct thread_info *child_thr;
522 struct target_desc *tdesc;
523
524 ptid = ptid_build (new_pid, new_pid, 0);
525
526 if (debug_threads)
527 {
528 debug_printf ("HEW: Got fork event from LWP %ld, "
529 "new child is %d\n",
530 ptid_get_lwp (ptid_of (event_thr)),
531 ptid_get_pid (ptid));
532 }
533
534 /* Add the new process to the tables and clone the breakpoint
535 lists of the parent. We need to do this even if the new process
536 will be detached, since we will need the process object and the
537 breakpoints to remove any breakpoints from memory when we
538 detach, and the client side will access registers. */
539 child_proc = linux_add_process (new_pid, 0);
540 gdb_assert (child_proc != NULL);
541 child_lwp = add_lwp (ptid);
542 gdb_assert (child_lwp != NULL);
543 child_lwp->stopped = 1;
544 child_lwp->must_set_ptrace_flags = 1;
545 child_lwp->status_pending_p = 0;
546 child_thr = get_lwp_thread (child_lwp);
547 child_thr->last_resume_kind = resume_stop;
548 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
549
550 /* If we're suspending all threads, leave this one suspended
551 too. If the fork/clone parent is stepping over a breakpoint,
552 all other threads have been suspended already. Leave the
553 child suspended too. */
554 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
555 || event_lwp->bp_reinsert != 0)
556 {
557 if (debug_threads)
558 debug_printf ("HEW: leaving child suspended\n");
559 child_lwp->suspended = 1;
560 }
561
562 parent_proc = get_thread_process (event_thr);
563 child_proc->attached = parent_proc->attached;
564
565 if (event_lwp->bp_reinsert != 0
566 && can_software_single_step ()
567 && event == PTRACE_EVENT_VFORK)
568 {
569 /* If we leave single-step breakpoints there, child will
570 hit it, so uninsert single-step breakpoints from parent
571 (and child). Once vfork child is done, reinsert
572 them back to parent. */
573 uninsert_single_step_breakpoints (event_thr);
574 }
575
576 clone_all_breakpoints (child_thr, event_thr);
577
578 tdesc = allocate_target_description ();
579 copy_target_description (tdesc, parent_proc->tdesc);
580 child_proc->tdesc = tdesc;
581
582 /* Clone arch-specific process data. */
583 if (the_low_target.new_fork != NULL)
584 the_low_target.new_fork (parent_proc, child_proc);
585
586 /* Save fork info in the parent thread. */
587 if (event == PTRACE_EVENT_FORK)
588 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
589 else if (event == PTRACE_EVENT_VFORK)
590 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
591
592 event_lwp->waitstatus.value.related_pid = ptid;
593
594 /* The status_pending field contains bits denoting the
595 extended event, so when the pending event is handled,
596 the handler will look at lwp->waitstatus. */
597 event_lwp->status_pending_p = 1;
598 event_lwp->status_pending = wstat;
599
600 /* Link the threads until the parent event is passed on to
601 higher layers. */
602 event_lwp->fork_relative = child_lwp;
603 child_lwp->fork_relative = event_lwp;
604
605 /* If the parent thread is doing step-over with single-step
606 breakpoints, the list of single-step breakpoints are cloned
607 from the parent's. Remove them from the child process.
608 In case of vfork, we'll reinsert them back once vforked
609 child is done. */
610 if (event_lwp->bp_reinsert != 0
611 && can_software_single_step ())
612 {
613 /* The child process is forked and stopped, so it is safe
614 to access its memory without stopping all other threads
615 from other processes. */
616 delete_single_step_breakpoints (child_thr);
617
618 gdb_assert (has_single_step_breakpoints (event_thr));
619 gdb_assert (!has_single_step_breakpoints (child_thr));
620 }
621
622 /* Report the event. */
623 return 0;
624 }
625
626 if (debug_threads)
627 debug_printf ("HEW: Got clone event "
628 "from LWP %ld, new child is LWP %ld\n",
629 lwpid_of (event_thr), new_pid);
630
631 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
632 new_lwp = add_lwp (ptid);
633
634 /* Either we're going to immediately resume the new thread
635 or leave it stopped. linux_resume_one_lwp is a nop if it
636 thinks the thread is currently running, so set this first
637 before calling linux_resume_one_lwp. */
638 new_lwp->stopped = 1;
639
640 /* If we're suspending all threads, leave this one suspended
641 too. If the fork/clone parent is stepping over a breakpoint,
642 all other threads have been suspended already. Leave the
643 child suspended too. */
644 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
645 || event_lwp->bp_reinsert != 0)
646 new_lwp->suspended = 1;
647
648 /* Normally we will get the pending SIGSTOP. But in some cases
649 we might get another signal delivered to the group first.
650 If we do get another signal, be sure not to lose it. */
651 if (WSTOPSIG (status) != SIGSTOP)
652 {
653 new_lwp->stop_expected = 1;
654 new_lwp->status_pending_p = 1;
655 new_lwp->status_pending = status;
656 }
657 else if (report_thread_events)
658 {
659 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
660 new_lwp->status_pending_p = 1;
661 new_lwp->status_pending = status;
662 }
663
664 thread_db_notice_clone (event_thr, ptid);
665
666 /* Don't report the event. */
667 return 1;
668 }
669 else if (event == PTRACE_EVENT_VFORK_DONE)
670 {
671 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
672
673 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
674 {
675 reinsert_single_step_breakpoints (event_thr);
676
677 gdb_assert (has_single_step_breakpoints (event_thr));
678 }
679
680 /* Report the event. */
681 return 0;
682 }
683 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
684 {
685 struct process_info *proc;
686 std::vector<int> syscalls_to_catch;
687 ptid_t event_ptid;
688 pid_t event_pid;
689
690 if (debug_threads)
691 {
692 debug_printf ("HEW: Got exec event from LWP %ld\n",
693 lwpid_of (event_thr));
694 }
695
696 /* Get the event ptid. */
697 event_ptid = ptid_of (event_thr);
698 event_pid = ptid_get_pid (event_ptid);
699
700 /* Save the syscall list from the execing process. */
701 proc = get_thread_process (event_thr);
702 syscalls_to_catch = std::move (proc->syscalls_to_catch);
703
704 /* Delete the execing process and all its threads. */
705 linux_mourn (proc);
706 current_thread = NULL;
707
708 /* Create a new process/lwp/thread. */
709 proc = linux_add_process (event_pid, 0);
710 event_lwp = add_lwp (event_ptid);
711 event_thr = get_lwp_thread (event_lwp);
712 gdb_assert (current_thread == event_thr);
713 linux_arch_setup_thread (event_thr);
714
715 /* Set the event status. */
716 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
717 event_lwp->waitstatus.value.execd_pathname
718 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
719
720 /* Mark the exec status as pending. */
721 event_lwp->stopped = 1;
722 event_lwp->status_pending_p = 1;
723 event_lwp->status_pending = wstat;
724 event_thr->last_resume_kind = resume_continue;
725 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
726
727 /* Update syscall state in the new lwp, effectively mid-syscall too. */
728 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
729
730 /* Restore the list to catch. Don't rely on the client, which is free
731 to avoid sending a new list when the architecture doesn't change.
732 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
733 proc->syscalls_to_catch = std::move (syscalls_to_catch);
734
735 /* Report the event. */
736 *orig_event_lwp = event_lwp;
737 return 0;
738 }
739
740 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
741 }
742
743 /* Return the PC as read from the regcache of LWP, without any
744 adjustment. */
745
746 static CORE_ADDR
747 get_pc (struct lwp_info *lwp)
748 {
749 struct thread_info *saved_thread;
750 struct regcache *regcache;
751 CORE_ADDR pc;
752
753 if (the_low_target.get_pc == NULL)
754 return 0;
755
756 saved_thread = current_thread;
757 current_thread = get_lwp_thread (lwp);
758
759 regcache = get_thread_regcache (current_thread, 1);
760 pc = (*the_low_target.get_pc) (regcache);
761
762 if (debug_threads)
763 debug_printf ("pc is 0x%lx\n", (long) pc);
764
765 current_thread = saved_thread;
766 return pc;
767 }
768
769 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
770 Fill *SYSNO with the syscall nr trapped. */
771
772 static void
773 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
774 {
775 struct thread_info *saved_thread;
776 struct regcache *regcache;
777
778 if (the_low_target.get_syscall_trapinfo == NULL)
779 {
780 /* If we cannot get the syscall trapinfo, report an unknown
781 system call number. */
782 *sysno = UNKNOWN_SYSCALL;
783 return;
784 }
785
786 saved_thread = current_thread;
787 current_thread = get_lwp_thread (lwp);
788
789 regcache = get_thread_regcache (current_thread, 1);
790 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
791
792 if (debug_threads)
793 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
794
795 current_thread = saved_thread;
796 }
797
798 static int check_stopped_by_watchpoint (struct lwp_info *child);
799
800 /* Called when the LWP stopped for a signal/trap. If it stopped for a
801 trap check what caused it (breakpoint, watchpoint, trace, etc.),
802 and save the result in the LWP's stop_reason field. If it stopped
803 for a breakpoint, decrement the PC if necessary on the lwp's
804 architecture. Returns true if we now have the LWP's stop PC. */
805
806 static int
807 save_stop_reason (struct lwp_info *lwp)
808 {
809 CORE_ADDR pc;
810 CORE_ADDR sw_breakpoint_pc;
811 struct thread_info *saved_thread;
812 #if USE_SIGTRAP_SIGINFO
813 siginfo_t siginfo;
814 #endif
815
816 if (the_low_target.get_pc == NULL)
817 return 0;
818
819 pc = get_pc (lwp);
820 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
821
822 /* breakpoint_at reads from the current thread. */
823 saved_thread = current_thread;
824 current_thread = get_lwp_thread (lwp);
825
826 #if USE_SIGTRAP_SIGINFO
827 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
828 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
829 {
830 if (siginfo.si_signo == SIGTRAP)
831 {
832 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
833 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
834 {
835 /* The si_code is ambiguous on this arch -- check debug
836 registers. */
837 if (!check_stopped_by_watchpoint (lwp))
838 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
839 }
840 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
841 {
842 /* If we determine the LWP stopped for a SW breakpoint,
843 trust it. Particularly don't check watchpoint
844 registers, because at least on s390, we'd find
845 stopped-by-watchpoint as long as there's a watchpoint
846 set. */
847 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
848 }
849 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
850 {
851 /* This can indicate either a hardware breakpoint or
852 hardware watchpoint. Check debug registers. */
853 if (!check_stopped_by_watchpoint (lwp))
854 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
855 }
856 else if (siginfo.si_code == TRAP_TRACE)
857 {
858 /* We may have single stepped an instruction that
859 triggered a watchpoint. In that case, on some
860 architectures (such as x86), instead of TRAP_HWBKPT,
861 si_code indicates TRAP_TRACE, and we need to check
862 the debug registers separately. */
863 if (!check_stopped_by_watchpoint (lwp))
864 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
865 }
866 }
867 }
868 #else
869 /* We may have just stepped a breakpoint instruction. E.g., in
870 non-stop mode, GDB first tells the thread A to step a range, and
871 then the user inserts a breakpoint inside the range. In that
872 case we need to report the breakpoint PC. */
873 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
874 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
875 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
876
877 if (hardware_breakpoint_inserted_here (pc))
878 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
879
880 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
881 check_stopped_by_watchpoint (lwp);
882 #endif
883
884 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
885 {
886 if (debug_threads)
887 {
888 struct thread_info *thr = get_lwp_thread (lwp);
889
890 debug_printf ("CSBB: %s stopped by software breakpoint\n",
891 target_pid_to_str (ptid_of (thr)));
892 }
893
894 /* Back up the PC if necessary. */
895 if (pc != sw_breakpoint_pc)
896 {
897 struct regcache *regcache
898 = get_thread_regcache (current_thread, 1);
899 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
900 }
901
902 /* Update this so we record the correct stop PC below. */
903 pc = sw_breakpoint_pc;
904 }
905 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
906 {
907 if (debug_threads)
908 {
909 struct thread_info *thr = get_lwp_thread (lwp);
910
911 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
912 target_pid_to_str (ptid_of (thr)));
913 }
914 }
915 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
916 {
917 if (debug_threads)
918 {
919 struct thread_info *thr = get_lwp_thread (lwp);
920
921 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
922 target_pid_to_str (ptid_of (thr)));
923 }
924 }
925 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
926 {
927 if (debug_threads)
928 {
929 struct thread_info *thr = get_lwp_thread (lwp);
930
931 debug_printf ("CSBB: %s stopped by trace\n",
932 target_pid_to_str (ptid_of (thr)));
933 }
934 }
935
936 lwp->stop_pc = pc;
937 current_thread = saved_thread;
938 return 1;
939 }
940
941 static struct lwp_info *
942 add_lwp (ptid_t ptid)
943 {
944 struct lwp_info *lwp;
945
946 lwp = XCNEW (struct lwp_info);
947
948 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
949
950 if (the_low_target.new_thread != NULL)
951 the_low_target.new_thread (lwp);
952
953 lwp->thread = add_thread (ptid, lwp);
954
955 return lwp;
956 }
957
958 /* Callback to be used when calling fork_inferior, responsible for
959 actually initiating the tracing of the inferior. */
960
961 static void
962 linux_ptrace_fun ()
963 {
964 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
965 (PTRACE_TYPE_ARG4) 0) < 0)
966 trace_start_error_with_name ("ptrace");
967
968 if (setpgid (0, 0) < 0)
969 trace_start_error_with_name ("setpgid");
970
971 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
972 stdout to stderr so that inferior i/o doesn't corrupt the connection.
973 Also, redirect stdin to /dev/null. */
974 if (remote_connection_is_stdio ())
975 {
976 if (close (0) < 0)
977 trace_start_error_with_name ("close");
978 if (open ("/dev/null", O_RDONLY) < 0)
979 trace_start_error_with_name ("open");
980 if (dup2 (2, 1) < 0)
981 trace_start_error_with_name ("dup2");
982 if (write (2, "stdin/stdout redirected\n",
983 sizeof ("stdin/stdout redirected\n") - 1) < 0)
984 {
985 /* Errors ignored. */;
986 }
987 }
988 }
989
990 /* Start an inferior process and returns its pid.
991 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
992 are its arguments. */
993
994 static int
995 linux_create_inferior (const char *program,
996 const std::vector<char *> &program_args)
997 {
998 struct lwp_info *new_lwp;
999 int pid;
1000 ptid_t ptid;
1001 struct cleanup *restore_personality
1002 = maybe_disable_address_space_randomization (disable_randomization);
1003 std::string str_program_args = stringify_argv (program_args);
1004
1005 pid = fork_inferior (program,
1006 str_program_args.c_str (),
1007 get_environ ()->envp (), linux_ptrace_fun,
1008 NULL, NULL, NULL, NULL);
1009
1010 do_cleanups (restore_personality);
1011
1012 linux_add_process (pid, 0);
1013
1014 ptid = ptid_build (pid, pid, 0);
1015 new_lwp = add_lwp (ptid);
1016 new_lwp->must_set_ptrace_flags = 1;
1017
1018 post_fork_inferior (pid, program);
1019
1020 return pid;
1021 }
1022
1023 /* Implement the post_create_inferior target_ops method. */
1024
1025 static void
1026 linux_post_create_inferior (void)
1027 {
1028 struct lwp_info *lwp = get_thread_lwp (current_thread);
1029
1030 linux_arch_setup ();
1031
1032 if (lwp->must_set_ptrace_flags)
1033 {
1034 struct process_info *proc = current_process ();
1035 int options = linux_low_ptrace_options (proc->attached);
1036
1037 linux_enable_event_reporting (lwpid_of (current_thread), options);
1038 lwp->must_set_ptrace_flags = 0;
1039 }
1040 }
1041
1042 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1043 error. */
1044
1045 int
1046 linux_attach_lwp (ptid_t ptid)
1047 {
1048 struct lwp_info *new_lwp;
1049 int lwpid = ptid_get_lwp (ptid);
1050
1051 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1052 != 0)
1053 return errno;
1054
1055 new_lwp = add_lwp (ptid);
1056
1057 /* We need to wait for SIGSTOP before being able to make the next
1058 ptrace call on this LWP. */
1059 new_lwp->must_set_ptrace_flags = 1;
1060
1061 if (linux_proc_pid_is_stopped (lwpid))
1062 {
1063 if (debug_threads)
1064 debug_printf ("Attached to a stopped process\n");
1065
1066 /* The process is definitely stopped. It is in a job control
1067 stop, unless the kernel predates the TASK_STOPPED /
1068 TASK_TRACED distinction, in which case it might be in a
1069 ptrace stop. Make sure it is in a ptrace stop; from there we
1070 can kill it, signal it, et cetera.
1071
1072 First make sure there is a pending SIGSTOP. Since we are
1073 already attached, the process can not transition from stopped
1074 to running without a PTRACE_CONT; so we know this signal will
1075 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1076 probably already in the queue (unless this kernel is old
1077 enough to use TASK_STOPPED for ptrace stops); but since
1078 SIGSTOP is not an RT signal, it can only be queued once. */
1079 kill_lwp (lwpid, SIGSTOP);
1080
1081 /* Finally, resume the stopped process. This will deliver the
1082 SIGSTOP (or a higher priority signal, just like normal
1083 PTRACE_ATTACH), which we'll catch later on. */
1084 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1085 }
1086
1087 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1088 brings it to a halt.
1089
1090 There are several cases to consider here:
1091
1092 1) gdbserver has already attached to the process and is being notified
1093 of a new thread that is being created.
1094 In this case we should ignore that SIGSTOP and resume the
1095 process. This is handled below by setting stop_expected = 1,
1096 and the fact that add_thread sets last_resume_kind ==
1097 resume_continue.
1098
1099 2) This is the first thread (the process thread), and we're attaching
1100 to it via attach_inferior.
1101 In this case we want the process thread to stop.
1102 This is handled by having linux_attach set last_resume_kind ==
1103 resume_stop after we return.
1104
1105 If the pid we are attaching to is also the tgid, we attach to and
1106 stop all the existing threads. Otherwise, we attach to pid and
1107 ignore any other threads in the same group as this pid.
1108
1109 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1110 existing threads.
1111 In this case we want the thread to stop.
1112 FIXME: This case is currently not properly handled.
1113 We should wait for the SIGSTOP but don't. Things work apparently
1114 because enough time passes between when we ptrace (ATTACH) and when
1115 gdb makes the next ptrace call on the thread.
1116
1117 On the other hand, if we are currently trying to stop all threads, we
1118 should treat the new thread as if we had sent it a SIGSTOP. This works
1119 because we are guaranteed that the add_lwp call above added us to the
1120 end of the list, and so the new thread has not yet reached
1121 wait_for_sigstop (but will). */
1122 new_lwp->stop_expected = 1;
1123
1124 return 0;
1125 }
1126
1127 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1128 already attached. Returns true if a new LWP is found, false
1129 otherwise. */
1130
1131 static int
1132 attach_proc_task_lwp_callback (ptid_t ptid)
1133 {
1134 /* Is this a new thread? */
1135 if (find_thread_ptid (ptid) == NULL)
1136 {
1137 int lwpid = ptid_get_lwp (ptid);
1138 int err;
1139
1140 if (debug_threads)
1141 debug_printf ("Found new lwp %d\n", lwpid);
1142
1143 err = linux_attach_lwp (ptid);
1144
1145 /* Be quiet if we simply raced with the thread exiting. EPERM
1146 is returned if the thread's task still exists, and is marked
1147 as exited or zombie, as well as other conditions, so in that
1148 case, confirm the status in /proc/PID/status. */
1149 if (err == ESRCH
1150 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1151 {
1152 if (debug_threads)
1153 {
1154 debug_printf ("Cannot attach to lwp %d: "
1155 "thread is gone (%d: %s)\n",
1156 lwpid, err, strerror (err));
1157 }
1158 }
1159 else if (err != 0)
1160 {
1161 warning (_("Cannot attach to lwp %d: %s"),
1162 lwpid,
1163 linux_ptrace_attach_fail_reason_string (ptid, err));
1164 }
1165
1166 return 1;
1167 }
1168 return 0;
1169 }
1170
1171 static void async_file_mark (void);
1172
1173 /* Attach to PID. If PID is the tgid, attach to it and all
1174 of its threads. */
1175
1176 static int
1177 linux_attach (unsigned long pid)
1178 {
1179 struct process_info *proc;
1180 struct thread_info *initial_thread;
1181 ptid_t ptid = ptid_build (pid, pid, 0);
1182 int err;
1183
1184 /* Attach to PID. We will check for other threads
1185 soon. */
1186 err = linux_attach_lwp (ptid);
1187 if (err != 0)
1188 error ("Cannot attach to process %ld: %s",
1189 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1190
1191 proc = linux_add_process (pid, 1);
1192
1193 /* Don't ignore the initial SIGSTOP if we just attached to this
1194 process. It will be collected by wait shortly. */
1195 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1196 initial_thread->last_resume_kind = resume_stop;
1197
1198 /* We must attach to every LWP. If /proc is mounted, use that to
1199 find them now. On the one hand, the inferior may be using raw
1200 clone instead of using pthreads. On the other hand, even if it
1201 is using pthreads, GDB may not be connected yet (thread_db needs
1202 to do symbol lookups, through qSymbol). Also, thread_db walks
1203 structures in the inferior's address space to find the list of
1204 threads/LWPs, and those structures may well be corrupted. Note
1205 that once thread_db is loaded, we'll still use it to list threads
1206 and associate pthread info with each LWP. */
1207 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1208
1209 /* GDB will shortly read the xml target description for this
1210 process, to figure out the process' architecture. But the target
1211 description is only filled in when the first process/thread in
1212 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1213 that now, otherwise, if GDB is fast enough, it could read the
1214 target description _before_ that initial stop. */
1215 if (non_stop)
1216 {
1217 struct lwp_info *lwp;
1218 int wstat, lwpid;
1219 ptid_t pid_ptid = pid_to_ptid (pid);
1220
1221 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1222 &wstat, __WALL);
1223 gdb_assert (lwpid > 0);
1224
1225 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1226
1227 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1228 {
1229 lwp->status_pending_p = 1;
1230 lwp->status_pending = wstat;
1231 }
1232
1233 initial_thread->last_resume_kind = resume_continue;
1234
1235 async_file_mark ();
1236
1237 gdb_assert (proc->tdesc != NULL);
1238 }
1239
1240 return 0;
1241 }
1242
1243 struct counter
1244 {
1245 int pid;
1246 int count;
1247 };
1248
1249 static int
1250 second_thread_of_pid_p (thread_info *thread, void *args)
1251 {
1252 struct counter *counter = (struct counter *) args;
1253
1254 if (thread->id.pid () == counter->pid)
1255 {
1256 if (++counter->count > 1)
1257 return 1;
1258 }
1259
1260 return 0;
1261 }
1262
1263 static int
1264 last_thread_of_process_p (int pid)
1265 {
1266 struct counter counter = { pid , 0 };
1267
1268 return (find_inferior (&all_threads,
1269 second_thread_of_pid_p, &counter) == NULL);
1270 }
1271
1272 /* Kill LWP. */
1273
1274 static void
1275 linux_kill_one_lwp (struct lwp_info *lwp)
1276 {
1277 struct thread_info *thr = get_lwp_thread (lwp);
1278 int pid = lwpid_of (thr);
1279
1280 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1281 there is no signal context, and ptrace(PTRACE_KILL) (or
1282 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1283 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1284 alternative is to kill with SIGKILL. We only need one SIGKILL
1285 per process, not one for each thread. But since we still support
1286 support debugging programs using raw clone without CLONE_THREAD,
1287 we send one for each thread. For years, we used PTRACE_KILL
1288 only, so we're being a bit paranoid about some old kernels where
1289 PTRACE_KILL might work better (dubious if there are any such, but
1290 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1291 second, and so we're fine everywhere. */
1292
1293 errno = 0;
1294 kill_lwp (pid, SIGKILL);
1295 if (debug_threads)
1296 {
1297 int save_errno = errno;
1298
1299 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1300 target_pid_to_str (ptid_of (thr)),
1301 save_errno ? strerror (save_errno) : "OK");
1302 }
1303
1304 errno = 0;
1305 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1306 if (debug_threads)
1307 {
1308 int save_errno = errno;
1309
1310 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1311 target_pid_to_str (ptid_of (thr)),
1312 save_errno ? strerror (save_errno) : "OK");
1313 }
1314 }
1315
1316 /* Kill LWP and wait for it to die. */
1317
1318 static void
1319 kill_wait_lwp (struct lwp_info *lwp)
1320 {
1321 struct thread_info *thr = get_lwp_thread (lwp);
1322 int pid = ptid_get_pid (ptid_of (thr));
1323 int lwpid = ptid_get_lwp (ptid_of (thr));
1324 int wstat;
1325 int res;
1326
1327 if (debug_threads)
1328 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1329
1330 do
1331 {
1332 linux_kill_one_lwp (lwp);
1333
1334 /* Make sure it died. Notes:
1335
1336 - The loop is most likely unnecessary.
1337
1338 - We don't use linux_wait_for_event as that could delete lwps
1339 while we're iterating over them. We're not interested in
1340 any pending status at this point, only in making sure all
1341 wait status on the kernel side are collected until the
1342 process is reaped.
1343
1344 - We don't use __WALL here as the __WALL emulation relies on
1345 SIGCHLD, and killing a stopped process doesn't generate
1346 one, nor an exit status.
1347 */
1348 res = my_waitpid (lwpid, &wstat, 0);
1349 if (res == -1 && errno == ECHILD)
1350 res = my_waitpid (lwpid, &wstat, __WCLONE);
1351 } while (res > 0 && WIFSTOPPED (wstat));
1352
1353 /* Even if it was stopped, the child may have already disappeared.
1354 E.g., if it was killed by SIGKILL. */
1355 if (res < 0 && errno != ECHILD)
1356 perror_with_name ("kill_wait_lwp");
1357 }
1358
1359 /* Callback for `find_inferior'. Kills an lwp of a given process,
1360 except the leader. */
1361
1362 static int
1363 kill_one_lwp_callback (thread_info *thread, void *args)
1364 {
1365 struct lwp_info *lwp = get_thread_lwp (thread);
1366 int pid = * (int *) args;
1367
1368 if (thread->id.pid () != pid)
1369 return 0;
1370
1371 /* We avoid killing the first thread here, because of a Linux kernel (at
1372 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1373 the children get a chance to be reaped, it will remain a zombie
1374 forever. */
1375
1376 if (lwpid_of (thread) == pid)
1377 {
1378 if (debug_threads)
1379 debug_printf ("lkop: is last of process %s\n",
1380 target_pid_to_str (thread->id));
1381 return 0;
1382 }
1383
1384 kill_wait_lwp (lwp);
1385 return 0;
1386 }
1387
1388 static int
1389 linux_kill (int pid)
1390 {
1391 struct process_info *process;
1392 struct lwp_info *lwp;
1393
1394 process = find_process_pid (pid);
1395 if (process == NULL)
1396 return -1;
1397
1398 /* If we're killing a running inferior, make sure it is stopped
1399 first, as PTRACE_KILL will not work otherwise. */
1400 stop_all_lwps (0, NULL);
1401
1402 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1403
1404 /* See the comment in linux_kill_one_lwp. We did not kill the first
1405 thread in the list, so do so now. */
1406 lwp = find_lwp_pid (pid_to_ptid (pid));
1407
1408 if (lwp == NULL)
1409 {
1410 if (debug_threads)
1411 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1412 pid);
1413 }
1414 else
1415 kill_wait_lwp (lwp);
1416
1417 the_target->mourn (process);
1418
1419 /* Since we presently can only stop all lwps of all processes, we
1420 need to unstop lwps of other processes. */
1421 unstop_all_lwps (0, NULL);
1422 return 0;
1423 }
1424
1425 /* Get pending signal of THREAD, for detaching purposes. This is the
1426 signal the thread last stopped for, which we need to deliver to the
1427 thread when detaching, otherwise, it'd be suppressed/lost. */
1428
1429 static int
1430 get_detach_signal (struct thread_info *thread)
1431 {
1432 enum gdb_signal signo = GDB_SIGNAL_0;
1433 int status;
1434 struct lwp_info *lp = get_thread_lwp (thread);
1435
1436 if (lp->status_pending_p)
1437 status = lp->status_pending;
1438 else
1439 {
1440 /* If the thread had been suspended by gdbserver, and it stopped
1441 cleanly, then it'll have stopped with SIGSTOP. But we don't
1442 want to deliver that SIGSTOP. */
1443 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1444 || thread->last_status.value.sig == GDB_SIGNAL_0)
1445 return 0;
1446
1447 /* Otherwise, we may need to deliver the signal we
1448 intercepted. */
1449 status = lp->last_status;
1450 }
1451
1452 if (!WIFSTOPPED (status))
1453 {
1454 if (debug_threads)
1455 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1456 target_pid_to_str (ptid_of (thread)));
1457 return 0;
1458 }
1459
1460 /* Extended wait statuses aren't real SIGTRAPs. */
1461 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1462 {
1463 if (debug_threads)
1464 debug_printf ("GPS: lwp %s had stopped with extended "
1465 "status: no pending signal\n",
1466 target_pid_to_str (ptid_of (thread)));
1467 return 0;
1468 }
1469
1470 signo = gdb_signal_from_host (WSTOPSIG (status));
1471
1472 if (program_signals_p && !program_signals[signo])
1473 {
1474 if (debug_threads)
1475 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1476 target_pid_to_str (ptid_of (thread)),
1477 gdb_signal_to_string (signo));
1478 return 0;
1479 }
1480 else if (!program_signals_p
1481 /* If we have no way to know which signals GDB does not
1482 want to have passed to the program, assume
1483 SIGTRAP/SIGINT, which is GDB's default. */
1484 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1485 {
1486 if (debug_threads)
1487 debug_printf ("GPS: lwp %s had signal %s, "
1488 "but we don't know if we should pass it. "
1489 "Default to not.\n",
1490 target_pid_to_str (ptid_of (thread)),
1491 gdb_signal_to_string (signo));
1492 return 0;
1493 }
1494 else
1495 {
1496 if (debug_threads)
1497 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1498 target_pid_to_str (ptid_of (thread)),
1499 gdb_signal_to_string (signo));
1500
1501 return WSTOPSIG (status);
1502 }
1503 }
1504
1505 /* Detach from LWP. */
1506
1507 static void
1508 linux_detach_one_lwp (struct lwp_info *lwp)
1509 {
1510 struct thread_info *thread = get_lwp_thread (lwp);
1511 int sig;
1512 int lwpid;
1513
1514 /* If there is a pending SIGSTOP, get rid of it. */
1515 if (lwp->stop_expected)
1516 {
1517 if (debug_threads)
1518 debug_printf ("Sending SIGCONT to %s\n",
1519 target_pid_to_str (ptid_of (thread)));
1520
1521 kill_lwp (lwpid_of (thread), SIGCONT);
1522 lwp->stop_expected = 0;
1523 }
1524
1525 /* Pass on any pending signal for this thread. */
1526 sig = get_detach_signal (thread);
1527
1528 /* Preparing to resume may try to write registers, and fail if the
1529 lwp is zombie. If that happens, ignore the error. We'll handle
1530 it below, when detach fails with ESRCH. */
1531 TRY
1532 {
1533 /* Flush any pending changes to the process's registers. */
1534 regcache_invalidate_thread (thread);
1535
1536 /* Finally, let it resume. */
1537 if (the_low_target.prepare_to_resume != NULL)
1538 the_low_target.prepare_to_resume (lwp);
1539 }
1540 CATCH (ex, RETURN_MASK_ERROR)
1541 {
1542 if (!check_ptrace_stopped_lwp_gone (lwp))
1543 throw_exception (ex);
1544 }
1545 END_CATCH
1546
1547 lwpid = lwpid_of (thread);
1548 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1549 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1550 {
1551 int save_errno = errno;
1552
1553 /* We know the thread exists, so ESRCH must mean the lwp is
1554 zombie. This can happen if one of the already-detached
1555 threads exits the whole thread group. In that case we're
1556 still attached, and must reap the lwp. */
1557 if (save_errno == ESRCH)
1558 {
1559 int ret, status;
1560
1561 ret = my_waitpid (lwpid, &status, __WALL);
1562 if (ret == -1)
1563 {
1564 warning (_("Couldn't reap LWP %d while detaching: %s"),
1565 lwpid, strerror (errno));
1566 }
1567 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1568 {
1569 warning (_("Reaping LWP %d while detaching "
1570 "returned unexpected status 0x%x"),
1571 lwpid, status);
1572 }
1573 }
1574 else
1575 {
1576 error (_("Can't detach %s: %s"),
1577 target_pid_to_str (ptid_of (thread)),
1578 strerror (save_errno));
1579 }
1580 }
1581 else if (debug_threads)
1582 {
1583 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1584 target_pid_to_str (ptid_of (thread)),
1585 strsignal (sig));
1586 }
1587
1588 delete_lwp (lwp);
1589 }
1590
1591 /* Callback for find_inferior. Detaches from non-leader threads of a
1592 given process. */
1593
1594 static int
1595 linux_detach_lwp_callback (thread_info *thread, void *args)
1596 {
1597 struct lwp_info *lwp = get_thread_lwp (thread);
1598 int pid = *(int *) args;
1599 int lwpid = lwpid_of (thread);
1600
1601 /* Skip other processes. */
1602 if (thread->id.pid () != pid)
1603 return 0;
1604
1605 /* We don't actually detach from the thread group leader just yet.
1606 If the thread group exits, we must reap the zombie clone lwps
1607 before we're able to reap the leader. */
1608 if (thread->id.pid () == lwpid)
1609 return 0;
1610
1611 linux_detach_one_lwp (lwp);
1612 return 0;
1613 }
1614
1615 static int
1616 linux_detach (int pid)
1617 {
1618 struct process_info *process;
1619 struct lwp_info *main_lwp;
1620
1621 process = find_process_pid (pid);
1622 if (process == NULL)
1623 return -1;
1624
1625 /* As there's a step over already in progress, let it finish first,
1626 otherwise nesting a stabilize_threads operation on top gets real
1627 messy. */
1628 complete_ongoing_step_over ();
1629
1630 /* Stop all threads before detaching. First, ptrace requires that
1631 the thread is stopped to sucessfully detach. Second, thread_db
1632 may need to uninstall thread event breakpoints from memory, which
1633 only works with a stopped process anyway. */
1634 stop_all_lwps (0, NULL);
1635
1636 #ifdef USE_THREAD_DB
1637 thread_db_detach (process);
1638 #endif
1639
1640 /* Stabilize threads (move out of jump pads). */
1641 stabilize_threads ();
1642
1643 /* Detach from the clone lwps first. If the thread group exits just
1644 while we're detaching, we must reap the clone lwps before we're
1645 able to reap the leader. */
1646 find_inferior (&all_threads, linux_detach_lwp_callback, &pid);
1647
1648 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1649 linux_detach_one_lwp (main_lwp);
1650
1651 the_target->mourn (process);
1652
1653 /* Since we presently can only stop all lwps of all processes, we
1654 need to unstop lwps of other processes. */
1655 unstop_all_lwps (0, NULL);
1656 return 0;
1657 }
1658
1659 /* Remove all LWPs that belong to process PROC from the lwp list. */
1660
1661 static int
1662 delete_lwp_callback (thread_info *thread, void *proc)
1663 {
1664 struct lwp_info *lwp = get_thread_lwp (thread);
1665 struct process_info *process = (struct process_info *) proc;
1666
1667 if (pid_of (thread) == pid_of (process))
1668 delete_lwp (lwp);
1669
1670 return 0;
1671 }
1672
1673 static void
1674 linux_mourn (struct process_info *process)
1675 {
1676 struct process_info_private *priv;
1677
1678 #ifdef USE_THREAD_DB
1679 thread_db_mourn (process);
1680 #endif
1681
1682 find_inferior (&all_threads, delete_lwp_callback, process);
1683
1684 /* Freeing all private data. */
1685 priv = process->priv;
1686 if (the_low_target.delete_process != NULL)
1687 the_low_target.delete_process (priv->arch_private);
1688 else
1689 gdb_assert (priv->arch_private == NULL);
1690 free (priv);
1691 process->priv = NULL;
1692
1693 remove_process (process);
1694 }
1695
1696 static void
1697 linux_join (int pid)
1698 {
1699 int status, ret;
1700
1701 do {
1702 ret = my_waitpid (pid, &status, 0);
1703 if (WIFEXITED (status) || WIFSIGNALED (status))
1704 break;
1705 } while (ret != -1 || errno != ECHILD);
1706 }
1707
1708 /* Return nonzero if the given thread is still alive. */
1709 static int
1710 linux_thread_alive (ptid_t ptid)
1711 {
1712 struct lwp_info *lwp = find_lwp_pid (ptid);
1713
1714 /* We assume we always know if a thread exits. If a whole process
1715 exited but we still haven't been able to report it to GDB, we'll
1716 hold on to the last lwp of the dead process. */
1717 if (lwp != NULL)
1718 return !lwp_is_marked_dead (lwp);
1719 else
1720 return 0;
1721 }
1722
1723 /* Return 1 if this lwp still has an interesting status pending. If
1724 not (e.g., it had stopped for a breakpoint that is gone), return
1725 false. */
1726
1727 static int
1728 thread_still_has_status_pending_p (struct thread_info *thread)
1729 {
1730 struct lwp_info *lp = get_thread_lwp (thread);
1731
1732 if (!lp->status_pending_p)
1733 return 0;
1734
1735 if (thread->last_resume_kind != resume_stop
1736 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1737 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1738 {
1739 struct thread_info *saved_thread;
1740 CORE_ADDR pc;
1741 int discard = 0;
1742
1743 gdb_assert (lp->last_status != 0);
1744
1745 pc = get_pc (lp);
1746
1747 saved_thread = current_thread;
1748 current_thread = thread;
1749
1750 if (pc != lp->stop_pc)
1751 {
1752 if (debug_threads)
1753 debug_printf ("PC of %ld changed\n",
1754 lwpid_of (thread));
1755 discard = 1;
1756 }
1757
1758 #if !USE_SIGTRAP_SIGINFO
1759 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1760 && !(*the_low_target.breakpoint_at) (pc))
1761 {
1762 if (debug_threads)
1763 debug_printf ("previous SW breakpoint of %ld gone\n",
1764 lwpid_of (thread));
1765 discard = 1;
1766 }
1767 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1768 && !hardware_breakpoint_inserted_here (pc))
1769 {
1770 if (debug_threads)
1771 debug_printf ("previous HW breakpoint of %ld gone\n",
1772 lwpid_of (thread));
1773 discard = 1;
1774 }
1775 #endif
1776
1777 current_thread = saved_thread;
1778
1779 if (discard)
1780 {
1781 if (debug_threads)
1782 debug_printf ("discarding pending breakpoint status\n");
1783 lp->status_pending_p = 0;
1784 return 0;
1785 }
1786 }
1787
1788 return 1;
1789 }
1790
1791 /* Returns true if LWP is resumed from the client's perspective. */
1792
1793 static int
1794 lwp_resumed (struct lwp_info *lwp)
1795 {
1796 struct thread_info *thread = get_lwp_thread (lwp);
1797
1798 if (thread->last_resume_kind != resume_stop)
1799 return 1;
1800
1801 /* Did gdb send us a `vCont;t', but we haven't reported the
1802 corresponding stop to gdb yet? If so, the thread is still
1803 resumed/running from gdb's perspective. */
1804 if (thread->last_resume_kind == resume_stop
1805 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1806 return 1;
1807
1808 return 0;
1809 }
1810
1811 /* Return 1 if this lwp has an interesting status pending. */
1812 static int
1813 status_pending_p_callback (thread_info *thread, void *arg)
1814 {
1815 struct lwp_info *lp = get_thread_lwp (thread);
1816 ptid_t ptid = * (ptid_t *) arg;
1817
1818 /* Check if we're only interested in events from a specific process
1819 or a specific LWP. */
1820 if (!ptid_match (ptid_of (thread), ptid))
1821 return 0;
1822
1823 if (!lwp_resumed (lp))
1824 return 0;
1825
1826 if (lp->status_pending_p
1827 && !thread_still_has_status_pending_p (thread))
1828 {
1829 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1830 return 0;
1831 }
1832
1833 return lp->status_pending_p;
1834 }
1835
1836 static int
1837 same_lwp (thread_info *thread, void *data)
1838 {
1839 ptid_t ptid = *(ptid_t *) data;
1840 int lwp;
1841
1842 if (ptid_get_lwp (ptid) != 0)
1843 lwp = ptid_get_lwp (ptid);
1844 else
1845 lwp = ptid_get_pid (ptid);
1846
1847 if (thread->id.lwp () == lwp)
1848 return 1;
1849
1850 return 0;
1851 }
1852
1853 struct lwp_info *
1854 find_lwp_pid (ptid_t ptid)
1855 {
1856 thread_info *thread = find_inferior (&all_threads, same_lwp, &ptid);
1857
1858 if (thread == NULL)
1859 return NULL;
1860
1861 return get_thread_lwp (thread);
1862 }
1863
1864 /* Return the number of known LWPs in the tgid given by PID. */
1865
1866 static int
1867 num_lwps (int pid)
1868 {
1869 int count = 0;
1870
1871 for_each_thread (pid, [&] (thread_info *thread)
1872 {
1873 count++;
1874 });
1875
1876 return count;
1877 }
1878
1879 /* See nat/linux-nat.h. */
1880
1881 struct lwp_info *
1882 iterate_over_lwps (ptid_t filter,
1883 iterate_over_lwps_ftype callback,
1884 void *data)
1885 {
1886 thread_info *thread = find_thread (filter, [&] (thread_info *thread)
1887 {
1888 lwp_info *lwp = get_thread_lwp (thread);
1889
1890 return callback (lwp, data);
1891 });
1892
1893 if (thread == NULL)
1894 return NULL;
1895
1896 return get_thread_lwp (thread);
1897 }
1898
1899 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1900 their exits until all other threads in the group have exited. */
1901
1902 static void
1903 check_zombie_leaders (void)
1904 {
1905 for_each_process ([] (process_info *proc) {
1906 pid_t leader_pid = pid_of (proc);
1907 struct lwp_info *leader_lp;
1908
1909 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1910
1911 if (debug_threads)
1912 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1913 "num_lwps=%d, zombie=%d\n",
1914 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1915 linux_proc_pid_is_zombie (leader_pid));
1916
1917 if (leader_lp != NULL && !leader_lp->stopped
1918 /* Check if there are other threads in the group, as we may
1919 have raced with the inferior simply exiting. */
1920 && !last_thread_of_process_p (leader_pid)
1921 && linux_proc_pid_is_zombie (leader_pid))
1922 {
1923 /* A leader zombie can mean one of two things:
1924
1925 - It exited, and there's an exit status pending
1926 available, or only the leader exited (not the whole
1927 program). In the latter case, we can't waitpid the
1928 leader's exit status until all other threads are gone.
1929
1930 - There are 3 or more threads in the group, and a thread
1931 other than the leader exec'd. On an exec, the Linux
1932 kernel destroys all other threads (except the execing
1933 one) in the thread group, and resets the execing thread's
1934 tid to the tgid. No exit notification is sent for the
1935 execing thread -- from the ptracer's perspective, it
1936 appears as though the execing thread just vanishes.
1937 Until we reap all other threads except the leader and the
1938 execing thread, the leader will be zombie, and the
1939 execing thread will be in `D (disc sleep)'. As soon as
1940 all other threads are reaped, the execing thread changes
1941 it's tid to the tgid, and the previous (zombie) leader
1942 vanishes, giving place to the "new" leader. We could try
1943 distinguishing the exit and exec cases, by waiting once
1944 more, and seeing if something comes out, but it doesn't
1945 sound useful. The previous leader _does_ go away, and
1946 we'll re-add the new one once we see the exec event
1947 (which is just the same as what would happen if the
1948 previous leader did exit voluntarily before some other
1949 thread execs). */
1950
1951 if (debug_threads)
1952 debug_printf ("CZL: Thread group leader %d zombie "
1953 "(it exited, or another thread execd).\n",
1954 leader_pid);
1955
1956 delete_lwp (leader_lp);
1957 }
1958 });
1959 }
1960
1961 /* Callback for `find_inferior'. Returns the first LWP that is not
1962 stopped. ARG is a PTID filter. */
1963
1964 static int
1965 not_stopped_callback (thread_info *thread, void *arg)
1966 {
1967 struct lwp_info *lwp;
1968 ptid_t filter = *(ptid_t *) arg;
1969
1970 if (!ptid_match (ptid_of (thread), filter))
1971 return 0;
1972
1973 lwp = get_thread_lwp (thread);
1974 if (!lwp->stopped)
1975 return 1;
1976
1977 return 0;
1978 }
1979
1980 /* Increment LWP's suspend count. */
1981
1982 static void
1983 lwp_suspended_inc (struct lwp_info *lwp)
1984 {
1985 lwp->suspended++;
1986
1987 if (debug_threads && lwp->suspended > 4)
1988 {
1989 struct thread_info *thread = get_lwp_thread (lwp);
1990
1991 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1992 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1993 }
1994 }
1995
1996 /* Decrement LWP's suspend count. */
1997
1998 static void
1999 lwp_suspended_decr (struct lwp_info *lwp)
2000 {
2001 lwp->suspended--;
2002
2003 if (lwp->suspended < 0)
2004 {
2005 struct thread_info *thread = get_lwp_thread (lwp);
2006
2007 internal_error (__FILE__, __LINE__,
2008 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
2009 lwp->suspended);
2010 }
2011 }
2012
2013 /* This function should only be called if the LWP got a SIGTRAP.
2014
2015 Handle any tracepoint steps or hits. Return true if a tracepoint
2016 event was handled, 0 otherwise. */
2017
2018 static int
2019 handle_tracepoints (struct lwp_info *lwp)
2020 {
2021 struct thread_info *tinfo = get_lwp_thread (lwp);
2022 int tpoint_related_event = 0;
2023
2024 gdb_assert (lwp->suspended == 0);
2025
2026 /* If this tracepoint hit causes a tracing stop, we'll immediately
2027 uninsert tracepoints. To do this, we temporarily pause all
2028 threads, unpatch away, and then unpause threads. We need to make
2029 sure the unpausing doesn't resume LWP too. */
2030 lwp_suspended_inc (lwp);
2031
2032 /* And we need to be sure that any all-threads-stopping doesn't try
2033 to move threads out of the jump pads, as it could deadlock the
2034 inferior (LWP could be in the jump pad, maybe even holding the
2035 lock.) */
2036
2037 /* Do any necessary step collect actions. */
2038 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2039
2040 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2041
2042 /* See if we just hit a tracepoint and do its main collect
2043 actions. */
2044 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2045
2046 lwp_suspended_decr (lwp);
2047
2048 gdb_assert (lwp->suspended == 0);
2049 gdb_assert (!stabilizing_threads
2050 || (lwp->collecting_fast_tracepoint
2051 != fast_tpoint_collect_result::not_collecting));
2052
2053 if (tpoint_related_event)
2054 {
2055 if (debug_threads)
2056 debug_printf ("got a tracepoint event\n");
2057 return 1;
2058 }
2059
2060 return 0;
2061 }
2062
2063 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2064 collection status. */
2065
2066 static fast_tpoint_collect_result
2067 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2068 struct fast_tpoint_collect_status *status)
2069 {
2070 CORE_ADDR thread_area;
2071 struct thread_info *thread = get_lwp_thread (lwp);
2072
2073 if (the_low_target.get_thread_area == NULL)
2074 return fast_tpoint_collect_result::not_collecting;
2075
2076 /* Get the thread area address. This is used to recognize which
2077 thread is which when tracing with the in-process agent library.
2078 We don't read anything from the address, and treat it as opaque;
2079 it's the address itself that we assume is unique per-thread. */
2080 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2081 return fast_tpoint_collect_result::not_collecting;
2082
2083 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2084 }
2085
2086 /* The reason we resume in the caller, is because we want to be able
2087 to pass lwp->status_pending as WSTAT, and we need to clear
2088 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2089 refuses to resume. */
2090
2091 static int
2092 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2093 {
2094 struct thread_info *saved_thread;
2095
2096 saved_thread = current_thread;
2097 current_thread = get_lwp_thread (lwp);
2098
2099 if ((wstat == NULL
2100 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2101 && supports_fast_tracepoints ()
2102 && agent_loaded_p ())
2103 {
2104 struct fast_tpoint_collect_status status;
2105
2106 if (debug_threads)
2107 debug_printf ("Checking whether LWP %ld needs to move out of the "
2108 "jump pad.\n",
2109 lwpid_of (current_thread));
2110
2111 fast_tpoint_collect_result r
2112 = linux_fast_tracepoint_collecting (lwp, &status);
2113
2114 if (wstat == NULL
2115 || (WSTOPSIG (*wstat) != SIGILL
2116 && WSTOPSIG (*wstat) != SIGFPE
2117 && WSTOPSIG (*wstat) != SIGSEGV
2118 && WSTOPSIG (*wstat) != SIGBUS))
2119 {
2120 lwp->collecting_fast_tracepoint = r;
2121
2122 if (r != fast_tpoint_collect_result::not_collecting)
2123 {
2124 if (r == fast_tpoint_collect_result::before_insn
2125 && lwp->exit_jump_pad_bkpt == NULL)
2126 {
2127 /* Haven't executed the original instruction yet.
2128 Set breakpoint there, and wait till it's hit,
2129 then single-step until exiting the jump pad. */
2130 lwp->exit_jump_pad_bkpt
2131 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2132 }
2133
2134 if (debug_threads)
2135 debug_printf ("Checking whether LWP %ld needs to move out of "
2136 "the jump pad...it does\n",
2137 lwpid_of (current_thread));
2138 current_thread = saved_thread;
2139
2140 return 1;
2141 }
2142 }
2143 else
2144 {
2145 /* If we get a synchronous signal while collecting, *and*
2146 while executing the (relocated) original instruction,
2147 reset the PC to point at the tpoint address, before
2148 reporting to GDB. Otherwise, it's an IPA lib bug: just
2149 report the signal to GDB, and pray for the best. */
2150
2151 lwp->collecting_fast_tracepoint
2152 = fast_tpoint_collect_result::not_collecting;
2153
2154 if (r != fast_tpoint_collect_result::not_collecting
2155 && (status.adjusted_insn_addr <= lwp->stop_pc
2156 && lwp->stop_pc < status.adjusted_insn_addr_end))
2157 {
2158 siginfo_t info;
2159 struct regcache *regcache;
2160
2161 /* The si_addr on a few signals references the address
2162 of the faulting instruction. Adjust that as
2163 well. */
2164 if ((WSTOPSIG (*wstat) == SIGILL
2165 || WSTOPSIG (*wstat) == SIGFPE
2166 || WSTOPSIG (*wstat) == SIGBUS
2167 || WSTOPSIG (*wstat) == SIGSEGV)
2168 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2169 (PTRACE_TYPE_ARG3) 0, &info) == 0
2170 /* Final check just to make sure we don't clobber
2171 the siginfo of non-kernel-sent signals. */
2172 && (uintptr_t) info.si_addr == lwp->stop_pc)
2173 {
2174 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2175 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2176 (PTRACE_TYPE_ARG3) 0, &info);
2177 }
2178
2179 regcache = get_thread_regcache (current_thread, 1);
2180 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2181 lwp->stop_pc = status.tpoint_addr;
2182
2183 /* Cancel any fast tracepoint lock this thread was
2184 holding. */
2185 force_unlock_trace_buffer ();
2186 }
2187
2188 if (lwp->exit_jump_pad_bkpt != NULL)
2189 {
2190 if (debug_threads)
2191 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2192 "stopping all threads momentarily.\n");
2193
2194 stop_all_lwps (1, lwp);
2195
2196 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2197 lwp->exit_jump_pad_bkpt = NULL;
2198
2199 unstop_all_lwps (1, lwp);
2200
2201 gdb_assert (lwp->suspended >= 0);
2202 }
2203 }
2204 }
2205
2206 if (debug_threads)
2207 debug_printf ("Checking whether LWP %ld needs to move out of the "
2208 "jump pad...no\n",
2209 lwpid_of (current_thread));
2210
2211 current_thread = saved_thread;
2212 return 0;
2213 }
2214
2215 /* Enqueue one signal in the "signals to report later when out of the
2216 jump pad" list. */
2217
2218 static void
2219 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2220 {
2221 struct pending_signals *p_sig;
2222 struct thread_info *thread = get_lwp_thread (lwp);
2223
2224 if (debug_threads)
2225 debug_printf ("Deferring signal %d for LWP %ld.\n",
2226 WSTOPSIG (*wstat), lwpid_of (thread));
2227
2228 if (debug_threads)
2229 {
2230 struct pending_signals *sig;
2231
2232 for (sig = lwp->pending_signals_to_report;
2233 sig != NULL;
2234 sig = sig->prev)
2235 debug_printf (" Already queued %d\n",
2236 sig->signal);
2237
2238 debug_printf (" (no more currently queued signals)\n");
2239 }
2240
2241 /* Don't enqueue non-RT signals if they are already in the deferred
2242 queue. (SIGSTOP being the easiest signal to see ending up here
2243 twice) */
2244 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2245 {
2246 struct pending_signals *sig;
2247
2248 for (sig = lwp->pending_signals_to_report;
2249 sig != NULL;
2250 sig = sig->prev)
2251 {
2252 if (sig->signal == WSTOPSIG (*wstat))
2253 {
2254 if (debug_threads)
2255 debug_printf ("Not requeuing already queued non-RT signal %d"
2256 " for LWP %ld\n",
2257 sig->signal,
2258 lwpid_of (thread));
2259 return;
2260 }
2261 }
2262 }
2263
2264 p_sig = XCNEW (struct pending_signals);
2265 p_sig->prev = lwp->pending_signals_to_report;
2266 p_sig->signal = WSTOPSIG (*wstat);
2267
2268 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2269 &p_sig->info);
2270
2271 lwp->pending_signals_to_report = p_sig;
2272 }
2273
2274 /* Dequeue one signal from the "signals to report later when out of
2275 the jump pad" list. */
2276
2277 static int
2278 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2279 {
2280 struct thread_info *thread = get_lwp_thread (lwp);
2281
2282 if (lwp->pending_signals_to_report != NULL)
2283 {
2284 struct pending_signals **p_sig;
2285
2286 p_sig = &lwp->pending_signals_to_report;
2287 while ((*p_sig)->prev != NULL)
2288 p_sig = &(*p_sig)->prev;
2289
2290 *wstat = W_STOPCODE ((*p_sig)->signal);
2291 if ((*p_sig)->info.si_signo != 0)
2292 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2293 &(*p_sig)->info);
2294 free (*p_sig);
2295 *p_sig = NULL;
2296
2297 if (debug_threads)
2298 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2299 WSTOPSIG (*wstat), lwpid_of (thread));
2300
2301 if (debug_threads)
2302 {
2303 struct pending_signals *sig;
2304
2305 for (sig = lwp->pending_signals_to_report;
2306 sig != NULL;
2307 sig = sig->prev)
2308 debug_printf (" Still queued %d\n",
2309 sig->signal);
2310
2311 debug_printf (" (no more queued signals)\n");
2312 }
2313
2314 return 1;
2315 }
2316
2317 return 0;
2318 }
2319
2320 /* Fetch the possibly triggered data watchpoint info and store it in
2321 CHILD.
2322
2323 On some archs, like x86, that use debug registers to set
2324 watchpoints, it's possible that the way to know which watched
2325 address trapped, is to check the register that is used to select
2326 which address to watch. Problem is, between setting the watchpoint
2327 and reading back which data address trapped, the user may change
2328 the set of watchpoints, and, as a consequence, GDB changes the
2329 debug registers in the inferior. To avoid reading back a stale
2330 stopped-data-address when that happens, we cache in LP the fact
2331 that a watchpoint trapped, and the corresponding data address, as
2332 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2333 registers meanwhile, we have the cached data we can rely on. */
2334
2335 static int
2336 check_stopped_by_watchpoint (struct lwp_info *child)
2337 {
2338 if (the_low_target.stopped_by_watchpoint != NULL)
2339 {
2340 struct thread_info *saved_thread;
2341
2342 saved_thread = current_thread;
2343 current_thread = get_lwp_thread (child);
2344
2345 if (the_low_target.stopped_by_watchpoint ())
2346 {
2347 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2348
2349 if (the_low_target.stopped_data_address != NULL)
2350 child->stopped_data_address
2351 = the_low_target.stopped_data_address ();
2352 else
2353 child->stopped_data_address = 0;
2354 }
2355
2356 current_thread = saved_thread;
2357 }
2358
2359 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2360 }
2361
2362 /* Return the ptrace options that we want to try to enable. */
2363
2364 static int
2365 linux_low_ptrace_options (int attached)
2366 {
2367 int options = 0;
2368
2369 if (!attached)
2370 options |= PTRACE_O_EXITKILL;
2371
2372 if (report_fork_events)
2373 options |= PTRACE_O_TRACEFORK;
2374
2375 if (report_vfork_events)
2376 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2377
2378 if (report_exec_events)
2379 options |= PTRACE_O_TRACEEXEC;
2380
2381 options |= PTRACE_O_TRACESYSGOOD;
2382
2383 return options;
2384 }
2385
2386 /* Do low-level handling of the event, and check if we should go on
2387 and pass it to caller code. Return the affected lwp if we are, or
2388 NULL otherwise. */
2389
2390 static struct lwp_info *
2391 linux_low_filter_event (int lwpid, int wstat)
2392 {
2393 struct lwp_info *child;
2394 struct thread_info *thread;
2395 int have_stop_pc = 0;
2396
2397 child = find_lwp_pid (pid_to_ptid (lwpid));
2398
2399 /* Check for stop events reported by a process we didn't already
2400 know about - anything not already in our LWP list.
2401
2402 If we're expecting to receive stopped processes after
2403 fork, vfork, and clone events, then we'll just add the
2404 new one to our list and go back to waiting for the event
2405 to be reported - the stopped process might be returned
2406 from waitpid before or after the event is.
2407
2408 But note the case of a non-leader thread exec'ing after the
2409 leader having exited, and gone from our lists (because
2410 check_zombie_leaders deleted it). The non-leader thread
2411 changes its tid to the tgid. */
2412
2413 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2414 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2415 {
2416 ptid_t child_ptid;
2417
2418 /* A multi-thread exec after we had seen the leader exiting. */
2419 if (debug_threads)
2420 {
2421 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2422 "after exec.\n", lwpid);
2423 }
2424
2425 child_ptid = ptid_build (lwpid, lwpid, 0);
2426 child = add_lwp (child_ptid);
2427 child->stopped = 1;
2428 current_thread = child->thread;
2429 }
2430
2431 /* If we didn't find a process, one of two things presumably happened:
2432 - A process we started and then detached from has exited. Ignore it.
2433 - A process we are controlling has forked and the new child's stop
2434 was reported to us by the kernel. Save its PID. */
2435 if (child == NULL && WIFSTOPPED (wstat))
2436 {
2437 add_to_pid_list (&stopped_pids, lwpid, wstat);
2438 return NULL;
2439 }
2440 else if (child == NULL)
2441 return NULL;
2442
2443 thread = get_lwp_thread (child);
2444
2445 child->stopped = 1;
2446
2447 child->last_status = wstat;
2448
2449 /* Check if the thread has exited. */
2450 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2451 {
2452 if (debug_threads)
2453 debug_printf ("LLFE: %d exited.\n", lwpid);
2454
2455 if (finish_step_over (child))
2456 {
2457 /* Unsuspend all other LWPs, and set them back running again. */
2458 unsuspend_all_lwps (child);
2459 }
2460
2461 /* If there is at least one more LWP, then the exit signal was
2462 not the end of the debugged application and should be
2463 ignored, unless GDB wants to hear about thread exits. */
2464 if (report_thread_events
2465 || last_thread_of_process_p (pid_of (thread)))
2466 {
2467 /* Since events are serialized to GDB core, and we can't
2468 report this one right now. Leave the status pending for
2469 the next time we're able to report it. */
2470 mark_lwp_dead (child, wstat);
2471 return child;
2472 }
2473 else
2474 {
2475 delete_lwp (child);
2476 return NULL;
2477 }
2478 }
2479
2480 gdb_assert (WIFSTOPPED (wstat));
2481
2482 if (WIFSTOPPED (wstat))
2483 {
2484 struct process_info *proc;
2485
2486 /* Architecture-specific setup after inferior is running. */
2487 proc = find_process_pid (pid_of (thread));
2488 if (proc->tdesc == NULL)
2489 {
2490 if (proc->attached)
2491 {
2492 /* This needs to happen after we have attached to the
2493 inferior and it is stopped for the first time, but
2494 before we access any inferior registers. */
2495 linux_arch_setup_thread (thread);
2496 }
2497 else
2498 {
2499 /* The process is started, but GDBserver will do
2500 architecture-specific setup after the program stops at
2501 the first instruction. */
2502 child->status_pending_p = 1;
2503 child->status_pending = wstat;
2504 return child;
2505 }
2506 }
2507 }
2508
2509 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2510 {
2511 struct process_info *proc = find_process_pid (pid_of (thread));
2512 int options = linux_low_ptrace_options (proc->attached);
2513
2514 linux_enable_event_reporting (lwpid, options);
2515 child->must_set_ptrace_flags = 0;
2516 }
2517
2518 /* Always update syscall_state, even if it will be filtered later. */
2519 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2520 {
2521 child->syscall_state
2522 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2523 ? TARGET_WAITKIND_SYSCALL_RETURN
2524 : TARGET_WAITKIND_SYSCALL_ENTRY);
2525 }
2526 else
2527 {
2528 /* Almost all other ptrace-stops are known to be outside of system
2529 calls, with further exceptions in handle_extended_wait. */
2530 child->syscall_state = TARGET_WAITKIND_IGNORE;
2531 }
2532
2533 /* Be careful to not overwrite stop_pc until save_stop_reason is
2534 called. */
2535 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2536 && linux_is_extended_waitstatus (wstat))
2537 {
2538 child->stop_pc = get_pc (child);
2539 if (handle_extended_wait (&child, wstat))
2540 {
2541 /* The event has been handled, so just return without
2542 reporting it. */
2543 return NULL;
2544 }
2545 }
2546
2547 if (linux_wstatus_maybe_breakpoint (wstat))
2548 {
2549 if (save_stop_reason (child))
2550 have_stop_pc = 1;
2551 }
2552
2553 if (!have_stop_pc)
2554 child->stop_pc = get_pc (child);
2555
2556 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2557 && child->stop_expected)
2558 {
2559 if (debug_threads)
2560 debug_printf ("Expected stop.\n");
2561 child->stop_expected = 0;
2562
2563 if (thread->last_resume_kind == resume_stop)
2564 {
2565 /* We want to report the stop to the core. Treat the
2566 SIGSTOP as a normal event. */
2567 if (debug_threads)
2568 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2569 target_pid_to_str (ptid_of (thread)));
2570 }
2571 else if (stopping_threads != NOT_STOPPING_THREADS)
2572 {
2573 /* Stopping threads. We don't want this SIGSTOP to end up
2574 pending. */
2575 if (debug_threads)
2576 debug_printf ("LLW: SIGSTOP caught for %s "
2577 "while stopping threads.\n",
2578 target_pid_to_str (ptid_of (thread)));
2579 return NULL;
2580 }
2581 else
2582 {
2583 /* This is a delayed SIGSTOP. Filter out the event. */
2584 if (debug_threads)
2585 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2586 child->stepping ? "step" : "continue",
2587 target_pid_to_str (ptid_of (thread)));
2588
2589 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2590 return NULL;
2591 }
2592 }
2593
2594 child->status_pending_p = 1;
2595 child->status_pending = wstat;
2596 return child;
2597 }
2598
2599 /* Return true if THREAD is doing hardware single step. */
2600
2601 static int
2602 maybe_hw_step (struct thread_info *thread)
2603 {
2604 if (can_hardware_single_step ())
2605 return 1;
2606 else
2607 {
2608 /* GDBserver must insert single-step breakpoint for software
2609 single step. */
2610 gdb_assert (has_single_step_breakpoints (thread));
2611 return 0;
2612 }
2613 }
2614
2615 /* Resume LWPs that are currently stopped without any pending status
2616 to report, but are resumed from the core's perspective. */
2617
2618 static void
2619 resume_stopped_resumed_lwps (thread_info *thread)
2620 {
2621 struct lwp_info *lp = get_thread_lwp (thread);
2622
2623 if (lp->stopped
2624 && !lp->suspended
2625 && !lp->status_pending_p
2626 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2627 {
2628 int step = 0;
2629
2630 if (thread->last_resume_kind == resume_step)
2631 step = maybe_hw_step (thread);
2632
2633 if (debug_threads)
2634 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2635 target_pid_to_str (ptid_of (thread)),
2636 paddress (lp->stop_pc),
2637 step);
2638
2639 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2640 }
2641 }
2642
2643 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2644 match FILTER_PTID (leaving others pending). The PTIDs can be:
2645 minus_one_ptid, to specify any child; a pid PTID, specifying all
2646 lwps of a thread group; or a PTID representing a single lwp. Store
2647 the stop status through the status pointer WSTAT. OPTIONS is
2648 passed to the waitpid call. Return 0 if no event was found and
2649 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2650 was found. Return the PID of the stopped child otherwise. */
2651
2652 static int
2653 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2654 int *wstatp, int options)
2655 {
2656 struct thread_info *event_thread;
2657 struct lwp_info *event_child, *requested_child;
2658 sigset_t block_mask, prev_mask;
2659
2660 retry:
2661 /* N.B. event_thread points to the thread_info struct that contains
2662 event_child. Keep them in sync. */
2663 event_thread = NULL;
2664 event_child = NULL;
2665 requested_child = NULL;
2666
2667 /* Check for a lwp with a pending status. */
2668
2669 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2670 {
2671 event_thread = (struct thread_info *)
2672 find_inferior_in_random (&all_threads, status_pending_p_callback,
2673 &filter_ptid);
2674 if (event_thread != NULL)
2675 event_child = get_thread_lwp (event_thread);
2676 if (debug_threads && event_thread)
2677 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2678 }
2679 else if (!ptid_equal (filter_ptid, null_ptid))
2680 {
2681 requested_child = find_lwp_pid (filter_ptid);
2682
2683 if (stopping_threads == NOT_STOPPING_THREADS
2684 && requested_child->status_pending_p
2685 && (requested_child->collecting_fast_tracepoint
2686 != fast_tpoint_collect_result::not_collecting))
2687 {
2688 enqueue_one_deferred_signal (requested_child,
2689 &requested_child->status_pending);
2690 requested_child->status_pending_p = 0;
2691 requested_child->status_pending = 0;
2692 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2693 }
2694
2695 if (requested_child->suspended
2696 && requested_child->status_pending_p)
2697 {
2698 internal_error (__FILE__, __LINE__,
2699 "requesting an event out of a"
2700 " suspended child?");
2701 }
2702
2703 if (requested_child->status_pending_p)
2704 {
2705 event_child = requested_child;
2706 event_thread = get_lwp_thread (event_child);
2707 }
2708 }
2709
2710 if (event_child != NULL)
2711 {
2712 if (debug_threads)
2713 debug_printf ("Got an event from pending child %ld (%04x)\n",
2714 lwpid_of (event_thread), event_child->status_pending);
2715 *wstatp = event_child->status_pending;
2716 event_child->status_pending_p = 0;
2717 event_child->status_pending = 0;
2718 current_thread = event_thread;
2719 return lwpid_of (event_thread);
2720 }
2721
2722 /* But if we don't find a pending event, we'll have to wait.
2723
2724 We only enter this loop if no process has a pending wait status.
2725 Thus any action taken in response to a wait status inside this
2726 loop is responding as soon as we detect the status, not after any
2727 pending events. */
2728
2729 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2730 all signals while here. */
2731 sigfillset (&block_mask);
2732 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2733
2734 /* Always pull all events out of the kernel. We'll randomly select
2735 an event LWP out of all that have events, to prevent
2736 starvation. */
2737 while (event_child == NULL)
2738 {
2739 pid_t ret = 0;
2740
2741 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2742 quirks:
2743
2744 - If the thread group leader exits while other threads in the
2745 thread group still exist, waitpid(TGID, ...) hangs. That
2746 waitpid won't return an exit status until the other threads
2747 in the group are reaped.
2748
2749 - When a non-leader thread execs, that thread just vanishes
2750 without reporting an exit (so we'd hang if we waited for it
2751 explicitly in that case). The exec event is reported to
2752 the TGID pid. */
2753 errno = 0;
2754 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2755
2756 if (debug_threads)
2757 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2758 ret, errno ? strerror (errno) : "ERRNO-OK");
2759
2760 if (ret > 0)
2761 {
2762 if (debug_threads)
2763 {
2764 debug_printf ("LLW: waitpid %ld received %s\n",
2765 (long) ret, status_to_str (*wstatp));
2766 }
2767
2768 /* Filter all events. IOW, leave all events pending. We'll
2769 randomly select an event LWP out of all that have events
2770 below. */
2771 linux_low_filter_event (ret, *wstatp);
2772 /* Retry until nothing comes out of waitpid. A single
2773 SIGCHLD can indicate more than one child stopped. */
2774 continue;
2775 }
2776
2777 /* Now that we've pulled all events out of the kernel, resume
2778 LWPs that don't have an interesting event to report. */
2779 if (stopping_threads == NOT_STOPPING_THREADS)
2780 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2781
2782 /* ... and find an LWP with a status to report to the core, if
2783 any. */
2784 event_thread = (struct thread_info *)
2785 find_inferior_in_random (&all_threads, status_pending_p_callback,
2786 &filter_ptid);
2787 if (event_thread != NULL)
2788 {
2789 event_child = get_thread_lwp (event_thread);
2790 *wstatp = event_child->status_pending;
2791 event_child->status_pending_p = 0;
2792 event_child->status_pending = 0;
2793 break;
2794 }
2795
2796 /* Check for zombie thread group leaders. Those can't be reaped
2797 until all other threads in the thread group are. */
2798 check_zombie_leaders ();
2799
2800 /* If there are no resumed children left in the set of LWPs we
2801 want to wait for, bail. We can't just block in
2802 waitpid/sigsuspend, because lwps might have been left stopped
2803 in trace-stop state, and we'd be stuck forever waiting for
2804 their status to change (which would only happen if we resumed
2805 them). Even if WNOHANG is set, this return code is preferred
2806 over 0 (below), as it is more detailed. */
2807 if ((find_inferior (&all_threads,
2808 not_stopped_callback,
2809 &wait_ptid) == NULL))
2810 {
2811 if (debug_threads)
2812 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2813 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2814 return -1;
2815 }
2816
2817 /* No interesting event to report to the caller. */
2818 if ((options & WNOHANG))
2819 {
2820 if (debug_threads)
2821 debug_printf ("WNOHANG set, no event found\n");
2822
2823 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2824 return 0;
2825 }
2826
2827 /* Block until we get an event reported with SIGCHLD. */
2828 if (debug_threads)
2829 debug_printf ("sigsuspend'ing\n");
2830
2831 sigsuspend (&prev_mask);
2832 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2833 goto retry;
2834 }
2835
2836 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2837
2838 current_thread = event_thread;
2839
2840 return lwpid_of (event_thread);
2841 }
2842
2843 /* Wait for an event from child(ren) PTID. PTIDs can be:
2844 minus_one_ptid, to specify any child; a pid PTID, specifying all
2845 lwps of a thread group; or a PTID representing a single lwp. Store
2846 the stop status through the status pointer WSTAT. OPTIONS is
2847 passed to the waitpid call. Return 0 if no event was found and
2848 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2849 was found. Return the PID of the stopped child otherwise. */
2850
2851 static int
2852 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2853 {
2854 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2855 }
2856
2857 /* Count the LWP's that have had events. */
2858
2859 static int
2860 count_events_callback (thread_info *thread, void *data)
2861 {
2862 struct lwp_info *lp = get_thread_lwp (thread);
2863 int *count = (int *) data;
2864
2865 gdb_assert (count != NULL);
2866
2867 /* Count only resumed LWPs that have an event pending. */
2868 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2869 && lp->status_pending_p)
2870 (*count)++;
2871
2872 return 0;
2873 }
2874
2875 /* Select the LWP (if any) that is currently being single-stepped. */
2876
2877 static int
2878 select_singlestep_lwp_callback (thread_info *thread, void *data)
2879 {
2880 struct lwp_info *lp = get_thread_lwp (thread);
2881
2882 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2883 && thread->last_resume_kind == resume_step
2884 && lp->status_pending_p)
2885 return 1;
2886 else
2887 return 0;
2888 }
2889
2890 /* Select the Nth LWP that has had an event. */
2891
2892 static int
2893 select_event_lwp_callback (thread_info *thread, void *data)
2894 {
2895 struct lwp_info *lp = get_thread_lwp (thread);
2896 int *selector = (int *) data;
2897
2898 gdb_assert (selector != NULL);
2899
2900 /* Select only resumed LWPs that have an event pending. */
2901 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2902 && lp->status_pending_p)
2903 if ((*selector)-- == 0)
2904 return 1;
2905
2906 return 0;
2907 }
2908
2909 /* Select one LWP out of those that have events pending. */
2910
2911 static void
2912 select_event_lwp (struct lwp_info **orig_lp)
2913 {
2914 int num_events = 0;
2915 int random_selector;
2916 struct thread_info *event_thread = NULL;
2917
2918 /* In all-stop, give preference to the LWP that is being
2919 single-stepped. There will be at most one, and it's the LWP that
2920 the core is most interested in. If we didn't do this, then we'd
2921 have to handle pending step SIGTRAPs somehow in case the core
2922 later continues the previously-stepped thread, otherwise we'd
2923 report the pending SIGTRAP, and the core, not having stepped the
2924 thread, wouldn't understand what the trap was for, and therefore
2925 would report it to the user as a random signal. */
2926 if (!non_stop)
2927 {
2928 event_thread
2929 = (struct thread_info *) find_inferior (&all_threads,
2930 select_singlestep_lwp_callback,
2931 NULL);
2932 if (event_thread != NULL)
2933 {
2934 if (debug_threads)
2935 debug_printf ("SEL: Select single-step %s\n",
2936 target_pid_to_str (ptid_of (event_thread)));
2937 }
2938 }
2939 if (event_thread == NULL)
2940 {
2941 /* No single-stepping LWP. Select one at random, out of those
2942 which have had events. */
2943
2944 /* First see how many events we have. */
2945 find_inferior (&all_threads, count_events_callback, &num_events);
2946 gdb_assert (num_events > 0);
2947
2948 /* Now randomly pick a LWP out of those that have had
2949 events. */
2950 random_selector = (int)
2951 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2952
2953 if (debug_threads && num_events > 1)
2954 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2955 num_events, random_selector);
2956
2957 event_thread
2958 = (struct thread_info *) find_inferior (&all_threads,
2959 select_event_lwp_callback,
2960 &random_selector);
2961 }
2962
2963 if (event_thread != NULL)
2964 {
2965 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2966
2967 /* Switch the event LWP. */
2968 *orig_lp = event_lp;
2969 }
2970 }
2971
2972 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2973 NULL. */
2974
2975 static void
2976 unsuspend_all_lwps (struct lwp_info *except)
2977 {
2978 for_each_thread ([&] (thread_info *thread)
2979 {
2980 lwp_info *lwp = get_thread_lwp (thread);
2981
2982 if (lwp != except)
2983 lwp_suspended_decr (lwp);
2984 });
2985 }
2986
2987 static void move_out_of_jump_pad_callback (thread_info *thread);
2988 static int stuck_in_jump_pad_callback (thread_info *thread, void *data);
2989 static int lwp_running (thread_info *thread, void *data);
2990 static ptid_t linux_wait_1 (ptid_t ptid,
2991 struct target_waitstatus *ourstatus,
2992 int target_options);
2993
2994 /* Stabilize threads (move out of jump pads).
2995
2996 If a thread is midway collecting a fast tracepoint, we need to
2997 finish the collection and move it out of the jump pad before
2998 reporting the signal.
2999
3000 This avoids recursion while collecting (when a signal arrives
3001 midway, and the signal handler itself collects), which would trash
3002 the trace buffer. In case the user set a breakpoint in a signal
3003 handler, this avoids the backtrace showing the jump pad, etc..
3004 Most importantly, there are certain things we can't do safely if
3005 threads are stopped in a jump pad (or in its callee's). For
3006 example:
3007
3008 - starting a new trace run. A thread still collecting the
3009 previous run, could trash the trace buffer when resumed. The trace
3010 buffer control structures would have been reset but the thread had
3011 no way to tell. The thread could even midway memcpy'ing to the
3012 buffer, which would mean that when resumed, it would clobber the
3013 trace buffer that had been set for a new run.
3014
3015 - we can't rewrite/reuse the jump pads for new tracepoints
3016 safely. Say you do tstart while a thread is stopped midway while
3017 collecting. When the thread is later resumed, it finishes the
3018 collection, and returns to the jump pad, to execute the original
3019 instruction that was under the tracepoint jump at the time the
3020 older run had been started. If the jump pad had been rewritten
3021 since for something else in the new run, the thread would now
3022 execute the wrong / random instructions. */
3023
3024 static void
3025 linux_stabilize_threads (void)
3026 {
3027 struct thread_info *saved_thread;
3028 struct thread_info *thread_stuck;
3029
3030 thread_stuck
3031 = (struct thread_info *) find_inferior (&all_threads,
3032 stuck_in_jump_pad_callback,
3033 NULL);
3034 if (thread_stuck != NULL)
3035 {
3036 if (debug_threads)
3037 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
3038 lwpid_of (thread_stuck));
3039 return;
3040 }
3041
3042 saved_thread = current_thread;
3043
3044 stabilizing_threads = 1;
3045
3046 /* Kick 'em all. */
3047 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
3048
3049 /* Loop until all are stopped out of the jump pads. */
3050 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
3051 {
3052 struct target_waitstatus ourstatus;
3053 struct lwp_info *lwp;
3054 int wstat;
3055
3056 /* Note that we go through the full wait even loop. While
3057 moving threads out of jump pad, we need to be able to step
3058 over internal breakpoints and such. */
3059 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
3060
3061 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
3062 {
3063 lwp = get_thread_lwp (current_thread);
3064
3065 /* Lock it. */
3066 lwp_suspended_inc (lwp);
3067
3068 if (ourstatus.value.sig != GDB_SIGNAL_0
3069 || current_thread->last_resume_kind == resume_stop)
3070 {
3071 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3072 enqueue_one_deferred_signal (lwp, &wstat);
3073 }
3074 }
3075 }
3076
3077 unsuspend_all_lwps (NULL);
3078
3079 stabilizing_threads = 0;
3080
3081 current_thread = saved_thread;
3082
3083 if (debug_threads)
3084 {
3085 thread_stuck
3086 = (struct thread_info *) find_inferior (&all_threads,
3087 stuck_in_jump_pad_callback,
3088 NULL);
3089 if (thread_stuck != NULL)
3090 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3091 lwpid_of (thread_stuck));
3092 }
3093 }
3094
3095 /* Convenience function that is called when the kernel reports an
3096 event that is not passed out to GDB. */
3097
3098 static ptid_t
3099 ignore_event (struct target_waitstatus *ourstatus)
3100 {
3101 /* If we got an event, there may still be others, as a single
3102 SIGCHLD can indicate more than one child stopped. This forces
3103 another target_wait call. */
3104 async_file_mark ();
3105
3106 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3107 return null_ptid;
3108 }
3109
3110 /* Convenience function that is called when the kernel reports an exit
3111 event. This decides whether to report the event to GDB as a
3112 process exit event, a thread exit event, or to suppress the
3113 event. */
3114
3115 static ptid_t
3116 filter_exit_event (struct lwp_info *event_child,
3117 struct target_waitstatus *ourstatus)
3118 {
3119 struct thread_info *thread = get_lwp_thread (event_child);
3120 ptid_t ptid = ptid_of (thread);
3121
3122 if (!last_thread_of_process_p (pid_of (thread)))
3123 {
3124 if (report_thread_events)
3125 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3126 else
3127 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3128
3129 delete_lwp (event_child);
3130 }
3131 return ptid;
3132 }
3133
3134 /* Returns 1 if GDB is interested in any event_child syscalls. */
3135
3136 static int
3137 gdb_catching_syscalls_p (struct lwp_info *event_child)
3138 {
3139 struct thread_info *thread = get_lwp_thread (event_child);
3140 struct process_info *proc = get_thread_process (thread);
3141
3142 return !proc->syscalls_to_catch.empty ();
3143 }
3144
3145 /* Returns 1 if GDB is interested in the event_child syscall.
3146 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3147
3148 static int
3149 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3150 {
3151 int sysno;
3152 struct thread_info *thread = get_lwp_thread (event_child);
3153 struct process_info *proc = get_thread_process (thread);
3154
3155 if (proc->syscalls_to_catch.empty ())
3156 return 0;
3157
3158 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3159 return 1;
3160
3161 get_syscall_trapinfo (event_child, &sysno);
3162
3163 for (int iter : proc->syscalls_to_catch)
3164 if (iter == sysno)
3165 return 1;
3166
3167 return 0;
3168 }
3169
3170 /* Wait for process, returns status. */
3171
3172 static ptid_t
3173 linux_wait_1 (ptid_t ptid,
3174 struct target_waitstatus *ourstatus, int target_options)
3175 {
3176 int w;
3177 struct lwp_info *event_child;
3178 int options;
3179 int pid;
3180 int step_over_finished;
3181 int bp_explains_trap;
3182 int maybe_internal_trap;
3183 int report_to_gdb;
3184 int trace_event;
3185 int in_step_range;
3186 int any_resumed;
3187
3188 if (debug_threads)
3189 {
3190 debug_enter ();
3191 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3192 }
3193
3194 /* Translate generic target options into linux options. */
3195 options = __WALL;
3196 if (target_options & TARGET_WNOHANG)
3197 options |= WNOHANG;
3198
3199 bp_explains_trap = 0;
3200 trace_event = 0;
3201 in_step_range = 0;
3202 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3203
3204 /* Find a resumed LWP, if any. */
3205 if (find_inferior (&all_threads,
3206 status_pending_p_callback,
3207 &minus_one_ptid) != NULL)
3208 any_resumed = 1;
3209 else if ((find_inferior (&all_threads,
3210 not_stopped_callback,
3211 &minus_one_ptid) != NULL))
3212 any_resumed = 1;
3213 else
3214 any_resumed = 0;
3215
3216 if (ptid_equal (step_over_bkpt, null_ptid))
3217 pid = linux_wait_for_event (ptid, &w, options);
3218 else
3219 {
3220 if (debug_threads)
3221 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3222 target_pid_to_str (step_over_bkpt));
3223 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3224 }
3225
3226 if (pid == 0 || (pid == -1 && !any_resumed))
3227 {
3228 gdb_assert (target_options & TARGET_WNOHANG);
3229
3230 if (debug_threads)
3231 {
3232 debug_printf ("linux_wait_1 ret = null_ptid, "
3233 "TARGET_WAITKIND_IGNORE\n");
3234 debug_exit ();
3235 }
3236
3237 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3238 return null_ptid;
3239 }
3240 else if (pid == -1)
3241 {
3242 if (debug_threads)
3243 {
3244 debug_printf ("linux_wait_1 ret = null_ptid, "
3245 "TARGET_WAITKIND_NO_RESUMED\n");
3246 debug_exit ();
3247 }
3248
3249 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3250 return null_ptid;
3251 }
3252
3253 event_child = get_thread_lwp (current_thread);
3254
3255 /* linux_wait_for_event only returns an exit status for the last
3256 child of a process. Report it. */
3257 if (WIFEXITED (w) || WIFSIGNALED (w))
3258 {
3259 if (WIFEXITED (w))
3260 {
3261 ourstatus->kind = TARGET_WAITKIND_EXITED;
3262 ourstatus->value.integer = WEXITSTATUS (w);
3263
3264 if (debug_threads)
3265 {
3266 debug_printf ("linux_wait_1 ret = %s, exited with "
3267 "retcode %d\n",
3268 target_pid_to_str (ptid_of (current_thread)),
3269 WEXITSTATUS (w));
3270 debug_exit ();
3271 }
3272 }
3273 else
3274 {
3275 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3276 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3277
3278 if (debug_threads)
3279 {
3280 debug_printf ("linux_wait_1 ret = %s, terminated with "
3281 "signal %d\n",
3282 target_pid_to_str (ptid_of (current_thread)),
3283 WTERMSIG (w));
3284 debug_exit ();
3285 }
3286 }
3287
3288 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3289 return filter_exit_event (event_child, ourstatus);
3290
3291 return ptid_of (current_thread);
3292 }
3293
3294 /* If step-over executes a breakpoint instruction, in the case of a
3295 hardware single step it means a gdb/gdbserver breakpoint had been
3296 planted on top of a permanent breakpoint, in the case of a software
3297 single step it may just mean that gdbserver hit the reinsert breakpoint.
3298 The PC has been adjusted by save_stop_reason to point at
3299 the breakpoint address.
3300 So in the case of the hardware single step advance the PC manually
3301 past the breakpoint and in the case of software single step advance only
3302 if it's not the single_step_breakpoint we are hitting.
3303 This avoids that a program would keep trapping a permanent breakpoint
3304 forever. */
3305 if (!ptid_equal (step_over_bkpt, null_ptid)
3306 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3307 && (event_child->stepping
3308 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3309 {
3310 int increment_pc = 0;
3311 int breakpoint_kind = 0;
3312 CORE_ADDR stop_pc = event_child->stop_pc;
3313
3314 breakpoint_kind =
3315 the_target->breakpoint_kind_from_current_state (&stop_pc);
3316 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3317
3318 if (debug_threads)
3319 {
3320 debug_printf ("step-over for %s executed software breakpoint\n",
3321 target_pid_to_str (ptid_of (current_thread)));
3322 }
3323
3324 if (increment_pc != 0)
3325 {
3326 struct regcache *regcache
3327 = get_thread_regcache (current_thread, 1);
3328
3329 event_child->stop_pc += increment_pc;
3330 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3331
3332 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3333 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3334 }
3335 }
3336
3337 /* If this event was not handled before, and is not a SIGTRAP, we
3338 report it. SIGILL and SIGSEGV are also treated as traps in case
3339 a breakpoint is inserted at the current PC. If this target does
3340 not support internal breakpoints at all, we also report the
3341 SIGTRAP without further processing; it's of no concern to us. */
3342 maybe_internal_trap
3343 = (supports_breakpoints ()
3344 && (WSTOPSIG (w) == SIGTRAP
3345 || ((WSTOPSIG (w) == SIGILL
3346 || WSTOPSIG (w) == SIGSEGV)
3347 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3348
3349 if (maybe_internal_trap)
3350 {
3351 /* Handle anything that requires bookkeeping before deciding to
3352 report the event or continue waiting. */
3353
3354 /* First check if we can explain the SIGTRAP with an internal
3355 breakpoint, or if we should possibly report the event to GDB.
3356 Do this before anything that may remove or insert a
3357 breakpoint. */
3358 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3359
3360 /* We have a SIGTRAP, possibly a step-over dance has just
3361 finished. If so, tweak the state machine accordingly,
3362 reinsert breakpoints and delete any single-step
3363 breakpoints. */
3364 step_over_finished = finish_step_over (event_child);
3365
3366 /* Now invoke the callbacks of any internal breakpoints there. */
3367 check_breakpoints (event_child->stop_pc);
3368
3369 /* Handle tracepoint data collecting. This may overflow the
3370 trace buffer, and cause a tracing stop, removing
3371 breakpoints. */
3372 trace_event = handle_tracepoints (event_child);
3373
3374 if (bp_explains_trap)
3375 {
3376 if (debug_threads)
3377 debug_printf ("Hit a gdbserver breakpoint.\n");
3378 }
3379 }
3380 else
3381 {
3382 /* We have some other signal, possibly a step-over dance was in
3383 progress, and it should be cancelled too. */
3384 step_over_finished = finish_step_over (event_child);
3385 }
3386
3387 /* We have all the data we need. Either report the event to GDB, or
3388 resume threads and keep waiting for more. */
3389
3390 /* If we're collecting a fast tracepoint, finish the collection and
3391 move out of the jump pad before delivering a signal. See
3392 linux_stabilize_threads. */
3393
3394 if (WIFSTOPPED (w)
3395 && WSTOPSIG (w) != SIGTRAP
3396 && supports_fast_tracepoints ()
3397 && agent_loaded_p ())
3398 {
3399 if (debug_threads)
3400 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3401 "to defer or adjust it.\n",
3402 WSTOPSIG (w), lwpid_of (current_thread));
3403
3404 /* Allow debugging the jump pad itself. */
3405 if (current_thread->last_resume_kind != resume_step
3406 && maybe_move_out_of_jump_pad (event_child, &w))
3407 {
3408 enqueue_one_deferred_signal (event_child, &w);
3409
3410 if (debug_threads)
3411 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3412 WSTOPSIG (w), lwpid_of (current_thread));
3413
3414 linux_resume_one_lwp (event_child, 0, 0, NULL);
3415
3416 if (debug_threads)
3417 debug_exit ();
3418 return ignore_event (ourstatus);
3419 }
3420 }
3421
3422 if (event_child->collecting_fast_tracepoint
3423 != fast_tpoint_collect_result::not_collecting)
3424 {
3425 if (debug_threads)
3426 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3427 "Check if we're already there.\n",
3428 lwpid_of (current_thread),
3429 (int) event_child->collecting_fast_tracepoint);
3430
3431 trace_event = 1;
3432
3433 event_child->collecting_fast_tracepoint
3434 = linux_fast_tracepoint_collecting (event_child, NULL);
3435
3436 if (event_child->collecting_fast_tracepoint
3437 != fast_tpoint_collect_result::before_insn)
3438 {
3439 /* No longer need this breakpoint. */
3440 if (event_child->exit_jump_pad_bkpt != NULL)
3441 {
3442 if (debug_threads)
3443 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3444 "stopping all threads momentarily.\n");
3445
3446 /* Other running threads could hit this breakpoint.
3447 We don't handle moribund locations like GDB does,
3448 instead we always pause all threads when removing
3449 breakpoints, so that any step-over or
3450 decr_pc_after_break adjustment is always taken
3451 care of while the breakpoint is still
3452 inserted. */
3453 stop_all_lwps (1, event_child);
3454
3455 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3456 event_child->exit_jump_pad_bkpt = NULL;
3457
3458 unstop_all_lwps (1, event_child);
3459
3460 gdb_assert (event_child->suspended >= 0);
3461 }
3462 }
3463
3464 if (event_child->collecting_fast_tracepoint
3465 == fast_tpoint_collect_result::not_collecting)
3466 {
3467 if (debug_threads)
3468 debug_printf ("fast tracepoint finished "
3469 "collecting successfully.\n");
3470
3471 /* We may have a deferred signal to report. */
3472 if (dequeue_one_deferred_signal (event_child, &w))
3473 {
3474 if (debug_threads)
3475 debug_printf ("dequeued one signal.\n");
3476 }
3477 else
3478 {
3479 if (debug_threads)
3480 debug_printf ("no deferred signals.\n");
3481
3482 if (stabilizing_threads)
3483 {
3484 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3485 ourstatus->value.sig = GDB_SIGNAL_0;
3486
3487 if (debug_threads)
3488 {
3489 debug_printf ("linux_wait_1 ret = %s, stopped "
3490 "while stabilizing threads\n",
3491 target_pid_to_str (ptid_of (current_thread)));
3492 debug_exit ();
3493 }
3494
3495 return ptid_of (current_thread);
3496 }
3497 }
3498 }
3499 }
3500
3501 /* Check whether GDB would be interested in this event. */
3502
3503 /* Check if GDB is interested in this syscall. */
3504 if (WIFSTOPPED (w)
3505 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3506 && !gdb_catch_this_syscall_p (event_child))
3507 {
3508 if (debug_threads)
3509 {
3510 debug_printf ("Ignored syscall for LWP %ld.\n",
3511 lwpid_of (current_thread));
3512 }
3513
3514 linux_resume_one_lwp (event_child, event_child->stepping,
3515 0, NULL);
3516
3517 if (debug_threads)
3518 debug_exit ();
3519 return ignore_event (ourstatus);
3520 }
3521
3522 /* If GDB is not interested in this signal, don't stop other
3523 threads, and don't report it to GDB. Just resume the inferior
3524 right away. We do this for threading-related signals as well as
3525 any that GDB specifically requested we ignore. But never ignore
3526 SIGSTOP if we sent it ourselves, and do not ignore signals when
3527 stepping - they may require special handling to skip the signal
3528 handler. Also never ignore signals that could be caused by a
3529 breakpoint. */
3530 if (WIFSTOPPED (w)
3531 && current_thread->last_resume_kind != resume_step
3532 && (
3533 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3534 (current_process ()->priv->thread_db != NULL
3535 && (WSTOPSIG (w) == __SIGRTMIN
3536 || WSTOPSIG (w) == __SIGRTMIN + 1))
3537 ||
3538 #endif
3539 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3540 && !(WSTOPSIG (w) == SIGSTOP
3541 && current_thread->last_resume_kind == resume_stop)
3542 && !linux_wstatus_maybe_breakpoint (w))))
3543 {
3544 siginfo_t info, *info_p;
3545
3546 if (debug_threads)
3547 debug_printf ("Ignored signal %d for LWP %ld.\n",
3548 WSTOPSIG (w), lwpid_of (current_thread));
3549
3550 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3551 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3552 info_p = &info;
3553 else
3554 info_p = NULL;
3555
3556 if (step_over_finished)
3557 {
3558 /* We cancelled this thread's step-over above. We still
3559 need to unsuspend all other LWPs, and set them back
3560 running again while the signal handler runs. */
3561 unsuspend_all_lwps (event_child);
3562
3563 /* Enqueue the pending signal info so that proceed_all_lwps
3564 doesn't lose it. */
3565 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3566
3567 proceed_all_lwps ();
3568 }
3569 else
3570 {
3571 linux_resume_one_lwp (event_child, event_child->stepping,
3572 WSTOPSIG (w), info_p);
3573 }
3574
3575 if (debug_threads)
3576 debug_exit ();
3577
3578 return ignore_event (ourstatus);
3579 }
3580
3581 /* Note that all addresses are always "out of the step range" when
3582 there's no range to begin with. */
3583 in_step_range = lwp_in_step_range (event_child);
3584
3585 /* If GDB wanted this thread to single step, and the thread is out
3586 of the step range, we always want to report the SIGTRAP, and let
3587 GDB handle it. Watchpoints should always be reported. So should
3588 signals we can't explain. A SIGTRAP we can't explain could be a
3589 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3590 do, we're be able to handle GDB breakpoints on top of internal
3591 breakpoints, by handling the internal breakpoint and still
3592 reporting the event to GDB. If we don't, we're out of luck, GDB
3593 won't see the breakpoint hit. If we see a single-step event but
3594 the thread should be continuing, don't pass the trap to gdb.
3595 That indicates that we had previously finished a single-step but
3596 left the single-step pending -- see
3597 complete_ongoing_step_over. */
3598 report_to_gdb = (!maybe_internal_trap
3599 || (current_thread->last_resume_kind == resume_step
3600 && !in_step_range)
3601 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3602 || (!in_step_range
3603 && !bp_explains_trap
3604 && !trace_event
3605 && !step_over_finished
3606 && !(current_thread->last_resume_kind == resume_continue
3607 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3608 || (gdb_breakpoint_here (event_child->stop_pc)
3609 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3610 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3611 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3612
3613 run_breakpoint_commands (event_child->stop_pc);
3614
3615 /* We found no reason GDB would want us to stop. We either hit one
3616 of our own breakpoints, or finished an internal step GDB
3617 shouldn't know about. */
3618 if (!report_to_gdb)
3619 {
3620 if (debug_threads)
3621 {
3622 if (bp_explains_trap)
3623 debug_printf ("Hit a gdbserver breakpoint.\n");
3624 if (step_over_finished)
3625 debug_printf ("Step-over finished.\n");
3626 if (trace_event)
3627 debug_printf ("Tracepoint event.\n");
3628 if (lwp_in_step_range (event_child))
3629 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3630 paddress (event_child->stop_pc),
3631 paddress (event_child->step_range_start),
3632 paddress (event_child->step_range_end));
3633 }
3634
3635 /* We're not reporting this breakpoint to GDB, so apply the
3636 decr_pc_after_break adjustment to the inferior's regcache
3637 ourselves. */
3638
3639 if (the_low_target.set_pc != NULL)
3640 {
3641 struct regcache *regcache
3642 = get_thread_regcache (current_thread, 1);
3643 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3644 }
3645
3646 if (step_over_finished)
3647 {
3648 /* If we have finished stepping over a breakpoint, we've
3649 stopped and suspended all LWPs momentarily except the
3650 stepping one. This is where we resume them all again.
3651 We're going to keep waiting, so use proceed, which
3652 handles stepping over the next breakpoint. */
3653 unsuspend_all_lwps (event_child);
3654 }
3655 else
3656 {
3657 /* Remove the single-step breakpoints if any. Note that
3658 there isn't single-step breakpoint if we finished stepping
3659 over. */
3660 if (can_software_single_step ()
3661 && has_single_step_breakpoints (current_thread))
3662 {
3663 stop_all_lwps (0, event_child);
3664 delete_single_step_breakpoints (current_thread);
3665 unstop_all_lwps (0, event_child);
3666 }
3667 }
3668
3669 if (debug_threads)
3670 debug_printf ("proceeding all threads.\n");
3671 proceed_all_lwps ();
3672
3673 if (debug_threads)
3674 debug_exit ();
3675
3676 return ignore_event (ourstatus);
3677 }
3678
3679 if (debug_threads)
3680 {
3681 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3682 {
3683 std::string str
3684 = target_waitstatus_to_string (&event_child->waitstatus);
3685
3686 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3687 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3688 }
3689 if (current_thread->last_resume_kind == resume_step)
3690 {
3691 if (event_child->step_range_start == event_child->step_range_end)
3692 debug_printf ("GDB wanted to single-step, reporting event.\n");
3693 else if (!lwp_in_step_range (event_child))
3694 debug_printf ("Out of step range, reporting event.\n");
3695 }
3696 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3697 debug_printf ("Stopped by watchpoint.\n");
3698 else if (gdb_breakpoint_here (event_child->stop_pc))
3699 debug_printf ("Stopped by GDB breakpoint.\n");
3700 if (debug_threads)
3701 debug_printf ("Hit a non-gdbserver trap event.\n");
3702 }
3703
3704 /* Alright, we're going to report a stop. */
3705
3706 /* Remove single-step breakpoints. */
3707 if (can_software_single_step ())
3708 {
3709 /* Remove single-step breakpoints or not. It it is true, stop all
3710 lwps, so that other threads won't hit the breakpoint in the
3711 staled memory. */
3712 int remove_single_step_breakpoints_p = 0;
3713
3714 if (non_stop)
3715 {
3716 remove_single_step_breakpoints_p
3717 = has_single_step_breakpoints (current_thread);
3718 }
3719 else
3720 {
3721 /* In all-stop, a stop reply cancels all previous resume
3722 requests. Delete all single-step breakpoints. */
3723
3724 find_thread ([&] (thread_info *thread) {
3725 if (has_single_step_breakpoints (thread))
3726 {
3727 remove_single_step_breakpoints_p = 1;
3728 return true;
3729 }
3730
3731 return false;
3732 });
3733 }
3734
3735 if (remove_single_step_breakpoints_p)
3736 {
3737 /* If we remove single-step breakpoints from memory, stop all lwps,
3738 so that other threads won't hit the breakpoint in the staled
3739 memory. */
3740 stop_all_lwps (0, event_child);
3741
3742 if (non_stop)
3743 {
3744 gdb_assert (has_single_step_breakpoints (current_thread));
3745 delete_single_step_breakpoints (current_thread);
3746 }
3747 else
3748 {
3749 for_each_thread ([] (thread_info *thread){
3750 if (has_single_step_breakpoints (thread))
3751 delete_single_step_breakpoints (thread);
3752 });
3753 }
3754
3755 unstop_all_lwps (0, event_child);
3756 }
3757 }
3758
3759 if (!stabilizing_threads)
3760 {
3761 /* In all-stop, stop all threads. */
3762 if (!non_stop)
3763 stop_all_lwps (0, NULL);
3764
3765 if (step_over_finished)
3766 {
3767 if (!non_stop)
3768 {
3769 /* If we were doing a step-over, all other threads but
3770 the stepping one had been paused in start_step_over,
3771 with their suspend counts incremented. We don't want
3772 to do a full unstop/unpause, because we're in
3773 all-stop mode (so we want threads stopped), but we
3774 still need to unsuspend the other threads, to
3775 decrement their `suspended' count back. */
3776 unsuspend_all_lwps (event_child);
3777 }
3778 else
3779 {
3780 /* If we just finished a step-over, then all threads had
3781 been momentarily paused. In all-stop, that's fine,
3782 we want threads stopped by now anyway. In non-stop,
3783 we need to re-resume threads that GDB wanted to be
3784 running. */
3785 unstop_all_lwps (1, event_child);
3786 }
3787 }
3788
3789 /* If we're not waiting for a specific LWP, choose an event LWP
3790 from among those that have had events. Giving equal priority
3791 to all LWPs that have had events helps prevent
3792 starvation. */
3793 if (ptid_equal (ptid, minus_one_ptid))
3794 {
3795 event_child->status_pending_p = 1;
3796 event_child->status_pending = w;
3797
3798 select_event_lwp (&event_child);
3799
3800 /* current_thread and event_child must stay in sync. */
3801 current_thread = get_lwp_thread (event_child);
3802
3803 event_child->status_pending_p = 0;
3804 w = event_child->status_pending;
3805 }
3806
3807
3808 /* Stabilize threads (move out of jump pads). */
3809 if (!non_stop)
3810 stabilize_threads ();
3811 }
3812 else
3813 {
3814 /* If we just finished a step-over, then all threads had been
3815 momentarily paused. In all-stop, that's fine, we want
3816 threads stopped by now anyway. In non-stop, we need to
3817 re-resume threads that GDB wanted to be running. */
3818 if (step_over_finished)
3819 unstop_all_lwps (1, event_child);
3820 }
3821
3822 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3823 {
3824 /* If the reported event is an exit, fork, vfork or exec, let
3825 GDB know. */
3826
3827 /* Break the unreported fork relationship chain. */
3828 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3829 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3830 {
3831 event_child->fork_relative->fork_relative = NULL;
3832 event_child->fork_relative = NULL;
3833 }
3834
3835 *ourstatus = event_child->waitstatus;
3836 /* Clear the event lwp's waitstatus since we handled it already. */
3837 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3838 }
3839 else
3840 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3841
3842 /* Now that we've selected our final event LWP, un-adjust its PC if
3843 it was a software breakpoint, and the client doesn't know we can
3844 adjust the breakpoint ourselves. */
3845 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3846 && !swbreak_feature)
3847 {
3848 int decr_pc = the_low_target.decr_pc_after_break;
3849
3850 if (decr_pc != 0)
3851 {
3852 struct regcache *regcache
3853 = get_thread_regcache (current_thread, 1);
3854 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3855 }
3856 }
3857
3858 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3859 {
3860 get_syscall_trapinfo (event_child,
3861 &ourstatus->value.syscall_number);
3862 ourstatus->kind = event_child->syscall_state;
3863 }
3864 else if (current_thread->last_resume_kind == resume_stop
3865 && WSTOPSIG (w) == SIGSTOP)
3866 {
3867 /* A thread that has been requested to stop by GDB with vCont;t,
3868 and it stopped cleanly, so report as SIG0. The use of
3869 SIGSTOP is an implementation detail. */
3870 ourstatus->value.sig = GDB_SIGNAL_0;
3871 }
3872 else if (current_thread->last_resume_kind == resume_stop
3873 && WSTOPSIG (w) != SIGSTOP)
3874 {
3875 /* A thread that has been requested to stop by GDB with vCont;t,
3876 but, it stopped for other reasons. */
3877 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3878 }
3879 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3880 {
3881 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3882 }
3883
3884 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3885
3886 if (debug_threads)
3887 {
3888 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3889 target_pid_to_str (ptid_of (current_thread)),
3890 ourstatus->kind, ourstatus->value.sig);
3891 debug_exit ();
3892 }
3893
3894 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3895 return filter_exit_event (event_child, ourstatus);
3896
3897 return ptid_of (current_thread);
3898 }
3899
3900 /* Get rid of any pending event in the pipe. */
3901 static void
3902 async_file_flush (void)
3903 {
3904 int ret;
3905 char buf;
3906
3907 do
3908 ret = read (linux_event_pipe[0], &buf, 1);
3909 while (ret >= 0 || (ret == -1 && errno == EINTR));
3910 }
3911
3912 /* Put something in the pipe, so the event loop wakes up. */
3913 static void
3914 async_file_mark (void)
3915 {
3916 int ret;
3917
3918 async_file_flush ();
3919
3920 do
3921 ret = write (linux_event_pipe[1], "+", 1);
3922 while (ret == 0 || (ret == -1 && errno == EINTR));
3923
3924 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3925 be awakened anyway. */
3926 }
3927
3928 static ptid_t
3929 linux_wait (ptid_t ptid,
3930 struct target_waitstatus *ourstatus, int target_options)
3931 {
3932 ptid_t event_ptid;
3933
3934 /* Flush the async file first. */
3935 if (target_is_async_p ())
3936 async_file_flush ();
3937
3938 do
3939 {
3940 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3941 }
3942 while ((target_options & TARGET_WNOHANG) == 0
3943 && ptid_equal (event_ptid, null_ptid)
3944 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3945
3946 /* If at least one stop was reported, there may be more. A single
3947 SIGCHLD can signal more than one child stop. */
3948 if (target_is_async_p ()
3949 && (target_options & TARGET_WNOHANG) != 0
3950 && !ptid_equal (event_ptid, null_ptid))
3951 async_file_mark ();
3952
3953 return event_ptid;
3954 }
3955
3956 /* Send a signal to an LWP. */
3957
3958 static int
3959 kill_lwp (unsigned long lwpid, int signo)
3960 {
3961 int ret;
3962
3963 errno = 0;
3964 ret = syscall (__NR_tkill, lwpid, signo);
3965 if (errno == ENOSYS)
3966 {
3967 /* If tkill fails, then we are not using nptl threads, a
3968 configuration we no longer support. */
3969 perror_with_name (("tkill"));
3970 }
3971 return ret;
3972 }
3973
3974 void
3975 linux_stop_lwp (struct lwp_info *lwp)
3976 {
3977 send_sigstop (lwp);
3978 }
3979
3980 static void
3981 send_sigstop (struct lwp_info *lwp)
3982 {
3983 int pid;
3984
3985 pid = lwpid_of (get_lwp_thread (lwp));
3986
3987 /* If we already have a pending stop signal for this process, don't
3988 send another. */
3989 if (lwp->stop_expected)
3990 {
3991 if (debug_threads)
3992 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3993
3994 return;
3995 }
3996
3997 if (debug_threads)
3998 debug_printf ("Sending sigstop to lwp %d\n", pid);
3999
4000 lwp->stop_expected = 1;
4001 kill_lwp (pid, SIGSTOP);
4002 }
4003
4004 static int
4005 send_sigstop_callback (thread_info *thread, void *except)
4006 {
4007 struct lwp_info *lwp = get_thread_lwp (thread);
4008
4009 /* Ignore EXCEPT. */
4010 if (lwp == except)
4011 return 0;
4012
4013 if (lwp->stopped)
4014 return 0;
4015
4016 send_sigstop (lwp);
4017 return 0;
4018 }
4019
4020 /* Increment the suspend count of an LWP, and stop it, if not stopped
4021 yet. */
4022 static int
4023 suspend_and_send_sigstop_callback (thread_info *thread, void *except)
4024 {
4025 struct lwp_info *lwp = get_thread_lwp (thread);
4026
4027 /* Ignore EXCEPT. */
4028 if (lwp == except)
4029 return 0;
4030
4031 lwp_suspended_inc (lwp);
4032
4033 return send_sigstop_callback (thread, except);
4034 }
4035
4036 static void
4037 mark_lwp_dead (struct lwp_info *lwp, int wstat)
4038 {
4039 /* Store the exit status for later. */
4040 lwp->status_pending_p = 1;
4041 lwp->status_pending = wstat;
4042
4043 /* Store in waitstatus as well, as there's nothing else to process
4044 for this event. */
4045 if (WIFEXITED (wstat))
4046 {
4047 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
4048 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
4049 }
4050 else if (WIFSIGNALED (wstat))
4051 {
4052 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
4053 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
4054 }
4055
4056 /* Prevent trying to stop it. */
4057 lwp->stopped = 1;
4058
4059 /* No further stops are expected from a dead lwp. */
4060 lwp->stop_expected = 0;
4061 }
4062
4063 /* Return true if LWP has exited already, and has a pending exit event
4064 to report to GDB. */
4065
4066 static int
4067 lwp_is_marked_dead (struct lwp_info *lwp)
4068 {
4069 return (lwp->status_pending_p
4070 && (WIFEXITED (lwp->status_pending)
4071 || WIFSIGNALED (lwp->status_pending)));
4072 }
4073
4074 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4075
4076 static void
4077 wait_for_sigstop (void)
4078 {
4079 struct thread_info *saved_thread;
4080 ptid_t saved_tid;
4081 int wstat;
4082 int ret;
4083
4084 saved_thread = current_thread;
4085 if (saved_thread != NULL)
4086 saved_tid = saved_thread->id;
4087 else
4088 saved_tid = null_ptid; /* avoid bogus unused warning */
4089
4090 if (debug_threads)
4091 debug_printf ("wait_for_sigstop: pulling events\n");
4092
4093 /* Passing NULL_PTID as filter indicates we want all events to be
4094 left pending. Eventually this returns when there are no
4095 unwaited-for children left. */
4096 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4097 &wstat, __WALL);
4098 gdb_assert (ret == -1);
4099
4100 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4101 current_thread = saved_thread;
4102 else
4103 {
4104 if (debug_threads)
4105 debug_printf ("Previously current thread died.\n");
4106
4107 /* We can't change the current inferior behind GDB's back,
4108 otherwise, a subsequent command may apply to the wrong
4109 process. */
4110 current_thread = NULL;
4111 }
4112 }
4113
4114 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
4115 move it out, because we need to report the stop event to GDB. For
4116 example, if the user puts a breakpoint in the jump pad, it's
4117 because she wants to debug it. */
4118
4119 static int
4120 stuck_in_jump_pad_callback (thread_info *thread, void *data)
4121 {
4122 struct lwp_info *lwp = get_thread_lwp (thread);
4123
4124 if (lwp->suspended != 0)
4125 {
4126 internal_error (__FILE__, __LINE__,
4127 "LWP %ld is suspended, suspended=%d\n",
4128 lwpid_of (thread), lwp->suspended);
4129 }
4130 gdb_assert (lwp->stopped);
4131
4132 /* Allow debugging the jump pad, gdb_collect, etc.. */
4133 return (supports_fast_tracepoints ()
4134 && agent_loaded_p ()
4135 && (gdb_breakpoint_here (lwp->stop_pc)
4136 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4137 || thread->last_resume_kind == resume_step)
4138 && (linux_fast_tracepoint_collecting (lwp, NULL)
4139 != fast_tpoint_collect_result::not_collecting));
4140 }
4141
4142 static void
4143 move_out_of_jump_pad_callback (thread_info *thread)
4144 {
4145 struct thread_info *saved_thread;
4146 struct lwp_info *lwp = get_thread_lwp (thread);
4147 int *wstat;
4148
4149 if (lwp->suspended != 0)
4150 {
4151 internal_error (__FILE__, __LINE__,
4152 "LWP %ld is suspended, suspended=%d\n",
4153 lwpid_of (thread), lwp->suspended);
4154 }
4155 gdb_assert (lwp->stopped);
4156
4157 /* For gdb_breakpoint_here. */
4158 saved_thread = current_thread;
4159 current_thread = thread;
4160
4161 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4162
4163 /* Allow debugging the jump pad, gdb_collect, etc. */
4164 if (!gdb_breakpoint_here (lwp->stop_pc)
4165 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4166 && thread->last_resume_kind != resume_step
4167 && maybe_move_out_of_jump_pad (lwp, wstat))
4168 {
4169 if (debug_threads)
4170 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4171 lwpid_of (thread));
4172
4173 if (wstat)
4174 {
4175 lwp->status_pending_p = 0;
4176 enqueue_one_deferred_signal (lwp, wstat);
4177
4178 if (debug_threads)
4179 debug_printf ("Signal %d for LWP %ld deferred "
4180 "(in jump pad)\n",
4181 WSTOPSIG (*wstat), lwpid_of (thread));
4182 }
4183
4184 linux_resume_one_lwp (lwp, 0, 0, NULL);
4185 }
4186 else
4187 lwp_suspended_inc (lwp);
4188
4189 current_thread = saved_thread;
4190 }
4191
4192 static int
4193 lwp_running (thread_info *thread, void *data)
4194 {
4195 struct lwp_info *lwp = get_thread_lwp (thread);
4196
4197 if (lwp_is_marked_dead (lwp))
4198 return 0;
4199 if (lwp->stopped)
4200 return 0;
4201 return 1;
4202 }
4203
4204 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4205 If SUSPEND, then also increase the suspend count of every LWP,
4206 except EXCEPT. */
4207
4208 static void
4209 stop_all_lwps (int suspend, struct lwp_info *except)
4210 {
4211 /* Should not be called recursively. */
4212 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4213
4214 if (debug_threads)
4215 {
4216 debug_enter ();
4217 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4218 suspend ? "stop-and-suspend" : "stop",
4219 except != NULL
4220 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4221 : "none");
4222 }
4223
4224 stopping_threads = (suspend
4225 ? STOPPING_AND_SUSPENDING_THREADS
4226 : STOPPING_THREADS);
4227
4228 if (suspend)
4229 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4230 else
4231 find_inferior (&all_threads, send_sigstop_callback, except);
4232 wait_for_sigstop ();
4233 stopping_threads = NOT_STOPPING_THREADS;
4234
4235 if (debug_threads)
4236 {
4237 debug_printf ("stop_all_lwps done, setting stopping_threads "
4238 "back to !stopping\n");
4239 debug_exit ();
4240 }
4241 }
4242
4243 /* Enqueue one signal in the chain of signals which need to be
4244 delivered to this process on next resume. */
4245
4246 static void
4247 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4248 {
4249 struct pending_signals *p_sig = XNEW (struct pending_signals);
4250
4251 p_sig->prev = lwp->pending_signals;
4252 p_sig->signal = signal;
4253 if (info == NULL)
4254 memset (&p_sig->info, 0, sizeof (siginfo_t));
4255 else
4256 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4257 lwp->pending_signals = p_sig;
4258 }
4259
4260 /* Install breakpoints for software single stepping. */
4261
4262 static void
4263 install_software_single_step_breakpoints (struct lwp_info *lwp)
4264 {
4265 struct thread_info *thread = get_lwp_thread (lwp);
4266 struct regcache *regcache = get_thread_regcache (thread, 1);
4267 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
4268
4269 current_thread = thread;
4270 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4271
4272 for (CORE_ADDR pc : next_pcs)
4273 set_single_step_breakpoint (pc, current_ptid);
4274
4275 do_cleanups (old_chain);
4276 }
4277
4278 /* Single step via hardware or software single step.
4279 Return 1 if hardware single stepping, 0 if software single stepping
4280 or can't single step. */
4281
4282 static int
4283 single_step (struct lwp_info* lwp)
4284 {
4285 int step = 0;
4286
4287 if (can_hardware_single_step ())
4288 {
4289 step = 1;
4290 }
4291 else if (can_software_single_step ())
4292 {
4293 install_software_single_step_breakpoints (lwp);
4294 step = 0;
4295 }
4296 else
4297 {
4298 if (debug_threads)
4299 debug_printf ("stepping is not implemented on this target");
4300 }
4301
4302 return step;
4303 }
4304
4305 /* The signal can be delivered to the inferior if we are not trying to
4306 finish a fast tracepoint collect. Since signal can be delivered in
4307 the step-over, the program may go to signal handler and trap again
4308 after return from the signal handler. We can live with the spurious
4309 double traps. */
4310
4311 static int
4312 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4313 {
4314 return (lwp->collecting_fast_tracepoint
4315 == fast_tpoint_collect_result::not_collecting);
4316 }
4317
4318 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4319 SIGNAL is nonzero, give it that signal. */
4320
4321 static void
4322 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4323 int step, int signal, siginfo_t *info)
4324 {
4325 struct thread_info *thread = get_lwp_thread (lwp);
4326 struct thread_info *saved_thread;
4327 int ptrace_request;
4328 struct process_info *proc = get_thread_process (thread);
4329
4330 /* Note that target description may not be initialised
4331 (proc->tdesc == NULL) at this point because the program hasn't
4332 stopped at the first instruction yet. It means GDBserver skips
4333 the extra traps from the wrapper program (see option --wrapper).
4334 Code in this function that requires register access should be
4335 guarded by proc->tdesc == NULL or something else. */
4336
4337 if (lwp->stopped == 0)
4338 return;
4339
4340 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4341
4342 fast_tpoint_collect_result fast_tp_collecting
4343 = lwp->collecting_fast_tracepoint;
4344
4345 gdb_assert (!stabilizing_threads
4346 || (fast_tp_collecting
4347 != fast_tpoint_collect_result::not_collecting));
4348
4349 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4350 user used the "jump" command, or "set $pc = foo"). */
4351 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4352 {
4353 /* Collecting 'while-stepping' actions doesn't make sense
4354 anymore. */
4355 release_while_stepping_state_list (thread);
4356 }
4357
4358 /* If we have pending signals or status, and a new signal, enqueue the
4359 signal. Also enqueue the signal if it can't be delivered to the
4360 inferior right now. */
4361 if (signal != 0
4362 && (lwp->status_pending_p
4363 || lwp->pending_signals != NULL
4364 || !lwp_signal_can_be_delivered (lwp)))
4365 {
4366 enqueue_pending_signal (lwp, signal, info);
4367
4368 /* Postpone any pending signal. It was enqueued above. */
4369 signal = 0;
4370 }
4371
4372 if (lwp->status_pending_p)
4373 {
4374 if (debug_threads)
4375 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4376 " has pending status\n",
4377 lwpid_of (thread), step ? "step" : "continue",
4378 lwp->stop_expected ? "expected" : "not expected");
4379 return;
4380 }
4381
4382 saved_thread = current_thread;
4383 current_thread = thread;
4384
4385 /* This bit needs some thinking about. If we get a signal that
4386 we must report while a single-step reinsert is still pending,
4387 we often end up resuming the thread. It might be better to
4388 (ew) allow a stack of pending events; then we could be sure that
4389 the reinsert happened right away and not lose any signals.
4390
4391 Making this stack would also shrink the window in which breakpoints are
4392 uninserted (see comment in linux_wait_for_lwp) but not enough for
4393 complete correctness, so it won't solve that problem. It may be
4394 worthwhile just to solve this one, however. */
4395 if (lwp->bp_reinsert != 0)
4396 {
4397 if (debug_threads)
4398 debug_printf (" pending reinsert at 0x%s\n",
4399 paddress (lwp->bp_reinsert));
4400
4401 if (can_hardware_single_step ())
4402 {
4403 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4404 {
4405 if (step == 0)
4406 warning ("BAD - reinserting but not stepping.");
4407 if (lwp->suspended)
4408 warning ("BAD - reinserting and suspended(%d).",
4409 lwp->suspended);
4410 }
4411 }
4412
4413 step = maybe_hw_step (thread);
4414 }
4415
4416 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4417 {
4418 if (debug_threads)
4419 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4420 " (exit-jump-pad-bkpt)\n",
4421 lwpid_of (thread));
4422 }
4423 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4424 {
4425 if (debug_threads)
4426 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4427 " single-stepping\n",
4428 lwpid_of (thread));
4429
4430 if (can_hardware_single_step ())
4431 step = 1;
4432 else
4433 {
4434 internal_error (__FILE__, __LINE__,
4435 "moving out of jump pad single-stepping"
4436 " not implemented on this target");
4437 }
4438 }
4439
4440 /* If we have while-stepping actions in this thread set it stepping.
4441 If we have a signal to deliver, it may or may not be set to
4442 SIG_IGN, we don't know. Assume so, and allow collecting
4443 while-stepping into a signal handler. A possible smart thing to
4444 do would be to set an internal breakpoint at the signal return
4445 address, continue, and carry on catching this while-stepping
4446 action only when that breakpoint is hit. A future
4447 enhancement. */
4448 if (thread->while_stepping != NULL)
4449 {
4450 if (debug_threads)
4451 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4452 lwpid_of (thread));
4453
4454 step = single_step (lwp);
4455 }
4456
4457 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4458 {
4459 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4460
4461 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4462
4463 if (debug_threads)
4464 {
4465 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4466 (long) lwp->stop_pc);
4467 }
4468 }
4469
4470 /* If we have pending signals, consume one if it can be delivered to
4471 the inferior. */
4472 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4473 {
4474 struct pending_signals **p_sig;
4475
4476 p_sig = &lwp->pending_signals;
4477 while ((*p_sig)->prev != NULL)
4478 p_sig = &(*p_sig)->prev;
4479
4480 signal = (*p_sig)->signal;
4481 if ((*p_sig)->info.si_signo != 0)
4482 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4483 &(*p_sig)->info);
4484
4485 free (*p_sig);
4486 *p_sig = NULL;
4487 }
4488
4489 if (debug_threads)
4490 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4491 lwpid_of (thread), step ? "step" : "continue", signal,
4492 lwp->stop_expected ? "expected" : "not expected");
4493
4494 if (the_low_target.prepare_to_resume != NULL)
4495 the_low_target.prepare_to_resume (lwp);
4496
4497 regcache_invalidate_thread (thread);
4498 errno = 0;
4499 lwp->stepping = step;
4500 if (step)
4501 ptrace_request = PTRACE_SINGLESTEP;
4502 else if (gdb_catching_syscalls_p (lwp))
4503 ptrace_request = PTRACE_SYSCALL;
4504 else
4505 ptrace_request = PTRACE_CONT;
4506 ptrace (ptrace_request,
4507 lwpid_of (thread),
4508 (PTRACE_TYPE_ARG3) 0,
4509 /* Coerce to a uintptr_t first to avoid potential gcc warning
4510 of coercing an 8 byte integer to a 4 byte pointer. */
4511 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4512
4513 current_thread = saved_thread;
4514 if (errno)
4515 perror_with_name ("resuming thread");
4516
4517 /* Successfully resumed. Clear state that no longer makes sense,
4518 and mark the LWP as running. Must not do this before resuming
4519 otherwise if that fails other code will be confused. E.g., we'd
4520 later try to stop the LWP and hang forever waiting for a stop
4521 status. Note that we must not throw after this is cleared,
4522 otherwise handle_zombie_lwp_error would get confused. */
4523 lwp->stopped = 0;
4524 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4525 }
4526
4527 /* Called when we try to resume a stopped LWP and that errors out. If
4528 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4529 or about to become), discard the error, clear any pending status
4530 the LWP may have, and return true (we'll collect the exit status
4531 soon enough). Otherwise, return false. */
4532
4533 static int
4534 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4535 {
4536 struct thread_info *thread = get_lwp_thread (lp);
4537
4538 /* If we get an error after resuming the LWP successfully, we'd
4539 confuse !T state for the LWP being gone. */
4540 gdb_assert (lp->stopped);
4541
4542 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4543 because even if ptrace failed with ESRCH, the tracee may be "not
4544 yet fully dead", but already refusing ptrace requests. In that
4545 case the tracee has 'R (Running)' state for a little bit
4546 (observed in Linux 3.18). See also the note on ESRCH in the
4547 ptrace(2) man page. Instead, check whether the LWP has any state
4548 other than ptrace-stopped. */
4549
4550 /* Don't assume anything if /proc/PID/status can't be read. */
4551 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4552 {
4553 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4554 lp->status_pending_p = 0;
4555 return 1;
4556 }
4557 return 0;
4558 }
4559
4560 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4561 disappears while we try to resume it. */
4562
4563 static void
4564 linux_resume_one_lwp (struct lwp_info *lwp,
4565 int step, int signal, siginfo_t *info)
4566 {
4567 TRY
4568 {
4569 linux_resume_one_lwp_throw (lwp, step, signal, info);
4570 }
4571 CATCH (ex, RETURN_MASK_ERROR)
4572 {
4573 if (!check_ptrace_stopped_lwp_gone (lwp))
4574 throw_exception (ex);
4575 }
4576 END_CATCH
4577 }
4578
4579 struct thread_resume_array
4580 {
4581 struct thread_resume *resume;
4582 size_t n;
4583 };
4584
4585 /* This function is called once per thread via find_inferior.
4586 ARG is a pointer to a thread_resume_array struct.
4587 We look up the thread specified by ENTRY in ARG, and mark the thread
4588 with a pointer to the appropriate resume request.
4589
4590 This algorithm is O(threads * resume elements), but resume elements
4591 is small (and will remain small at least until GDB supports thread
4592 suspension). */
4593
4594 static int
4595 linux_set_resume_request (thread_info *thread, void *arg)
4596 {
4597 struct lwp_info *lwp = get_thread_lwp (thread);
4598 int ndx;
4599 struct thread_resume_array *r;
4600
4601 r = (struct thread_resume_array *) arg;
4602
4603 for (ndx = 0; ndx < r->n; ndx++)
4604 {
4605 ptid_t ptid = r->resume[ndx].thread;
4606 if (ptid_equal (ptid, minus_one_ptid)
4607 || ptid == thread->id
4608 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4609 of PID'. */
4610 || (ptid_get_pid (ptid) == pid_of (thread)
4611 && (ptid_is_pid (ptid)
4612 || ptid_get_lwp (ptid) == -1)))
4613 {
4614 if (r->resume[ndx].kind == resume_stop
4615 && thread->last_resume_kind == resume_stop)
4616 {
4617 if (debug_threads)
4618 debug_printf ("already %s LWP %ld at GDB's request\n",
4619 (thread->last_status.kind
4620 == TARGET_WAITKIND_STOPPED)
4621 ? "stopped"
4622 : "stopping",
4623 lwpid_of (thread));
4624
4625 continue;
4626 }
4627
4628 /* Ignore (wildcard) resume requests for already-resumed
4629 threads. */
4630 if (r->resume[ndx].kind != resume_stop
4631 && thread->last_resume_kind != resume_stop)
4632 {
4633 if (debug_threads)
4634 debug_printf ("already %s LWP %ld at GDB's request\n",
4635 (thread->last_resume_kind
4636 == resume_step)
4637 ? "stepping"
4638 : "continuing",
4639 lwpid_of (thread));
4640 continue;
4641 }
4642
4643 /* Don't let wildcard resumes resume fork children that GDB
4644 does not yet know are new fork children. */
4645 if (lwp->fork_relative != NULL)
4646 {
4647 struct lwp_info *rel = lwp->fork_relative;
4648
4649 if (rel->status_pending_p
4650 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4651 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4652 {
4653 if (debug_threads)
4654 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4655 lwpid_of (thread));
4656 continue;
4657 }
4658 }
4659
4660 /* If the thread has a pending event that has already been
4661 reported to GDBserver core, but GDB has not pulled the
4662 event out of the vStopped queue yet, likewise, ignore the
4663 (wildcard) resume request. */
4664 if (in_queued_stop_replies (thread->id))
4665 {
4666 if (debug_threads)
4667 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4668 lwpid_of (thread));
4669 continue;
4670 }
4671
4672 lwp->resume = &r->resume[ndx];
4673 thread->last_resume_kind = lwp->resume->kind;
4674
4675 lwp->step_range_start = lwp->resume->step_range_start;
4676 lwp->step_range_end = lwp->resume->step_range_end;
4677
4678 /* If we had a deferred signal to report, dequeue one now.
4679 This can happen if LWP gets more than one signal while
4680 trying to get out of a jump pad. */
4681 if (lwp->stopped
4682 && !lwp->status_pending_p
4683 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4684 {
4685 lwp->status_pending_p = 1;
4686
4687 if (debug_threads)
4688 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4689 "leaving status pending.\n",
4690 WSTOPSIG (lwp->status_pending),
4691 lwpid_of (thread));
4692 }
4693
4694 return 0;
4695 }
4696 }
4697
4698 /* No resume action for this thread. */
4699 lwp->resume = NULL;
4700
4701 return 0;
4702 }
4703
4704 /* find_inferior callback for linux_resume.
4705 Set *FLAG_P if this lwp has an interesting status pending. */
4706
4707 static int
4708 resume_status_pending_p (thread_info *thread, void *flag_p)
4709 {
4710 struct lwp_info *lwp = get_thread_lwp (thread);
4711
4712 /* LWPs which will not be resumed are not interesting, because
4713 we might not wait for them next time through linux_wait. */
4714 if (lwp->resume == NULL)
4715 return 0;
4716
4717 if (thread_still_has_status_pending_p (thread))
4718 * (int *) flag_p = 1;
4719
4720 return 0;
4721 }
4722
4723 /* Return 1 if this lwp that GDB wants running is stopped at an
4724 internal breakpoint that we need to step over. It assumes that any
4725 required STOP_PC adjustment has already been propagated to the
4726 inferior's regcache. */
4727
4728 static int
4729 need_step_over_p (thread_info *thread, void *dummy)
4730 {
4731 struct lwp_info *lwp = get_thread_lwp (thread);
4732 struct thread_info *saved_thread;
4733 CORE_ADDR pc;
4734 struct process_info *proc = get_thread_process (thread);
4735
4736 /* GDBserver is skipping the extra traps from the wrapper program,
4737 don't have to do step over. */
4738 if (proc->tdesc == NULL)
4739 return 0;
4740
4741 /* LWPs which will not be resumed are not interesting, because we
4742 might not wait for them next time through linux_wait. */
4743
4744 if (!lwp->stopped)
4745 {
4746 if (debug_threads)
4747 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4748 lwpid_of (thread));
4749 return 0;
4750 }
4751
4752 if (thread->last_resume_kind == resume_stop)
4753 {
4754 if (debug_threads)
4755 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4756 " stopped\n",
4757 lwpid_of (thread));
4758 return 0;
4759 }
4760
4761 gdb_assert (lwp->suspended >= 0);
4762
4763 if (lwp->suspended)
4764 {
4765 if (debug_threads)
4766 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4767 lwpid_of (thread));
4768 return 0;
4769 }
4770
4771 if (lwp->status_pending_p)
4772 {
4773 if (debug_threads)
4774 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4775 " status.\n",
4776 lwpid_of (thread));
4777 return 0;
4778 }
4779
4780 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4781 or we have. */
4782 pc = get_pc (lwp);
4783
4784 /* If the PC has changed since we stopped, then don't do anything,
4785 and let the breakpoint/tracepoint be hit. This happens if, for
4786 instance, GDB handled the decr_pc_after_break subtraction itself,
4787 GDB is OOL stepping this thread, or the user has issued a "jump"
4788 command, or poked thread's registers herself. */
4789 if (pc != lwp->stop_pc)
4790 {
4791 if (debug_threads)
4792 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4793 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4794 lwpid_of (thread),
4795 paddress (lwp->stop_pc), paddress (pc));
4796 return 0;
4797 }
4798
4799 /* On software single step target, resume the inferior with signal
4800 rather than stepping over. */
4801 if (can_software_single_step ()
4802 && lwp->pending_signals != NULL
4803 && lwp_signal_can_be_delivered (lwp))
4804 {
4805 if (debug_threads)
4806 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4807 " signals.\n",
4808 lwpid_of (thread));
4809
4810 return 0;
4811 }
4812
4813 saved_thread = current_thread;
4814 current_thread = thread;
4815
4816 /* We can only step over breakpoints we know about. */
4817 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4818 {
4819 /* Don't step over a breakpoint that GDB expects to hit
4820 though. If the condition is being evaluated on the target's side
4821 and it evaluate to false, step over this breakpoint as well. */
4822 if (gdb_breakpoint_here (pc)
4823 && gdb_condition_true_at_breakpoint (pc)
4824 && gdb_no_commands_at_breakpoint (pc))
4825 {
4826 if (debug_threads)
4827 debug_printf ("Need step over [LWP %ld]? yes, but found"
4828 " GDB breakpoint at 0x%s; skipping step over\n",
4829 lwpid_of (thread), paddress (pc));
4830
4831 current_thread = saved_thread;
4832 return 0;
4833 }
4834 else
4835 {
4836 if (debug_threads)
4837 debug_printf ("Need step over [LWP %ld]? yes, "
4838 "found breakpoint at 0x%s\n",
4839 lwpid_of (thread), paddress (pc));
4840
4841 /* We've found an lwp that needs stepping over --- return 1 so
4842 that find_inferior stops looking. */
4843 current_thread = saved_thread;
4844
4845 return 1;
4846 }
4847 }
4848
4849 current_thread = saved_thread;
4850
4851 if (debug_threads)
4852 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4853 " at 0x%s\n",
4854 lwpid_of (thread), paddress (pc));
4855
4856 return 0;
4857 }
4858
4859 /* Start a step-over operation on LWP. When LWP stopped at a
4860 breakpoint, to make progress, we need to remove the breakpoint out
4861 of the way. If we let other threads run while we do that, they may
4862 pass by the breakpoint location and miss hitting it. To avoid
4863 that, a step-over momentarily stops all threads while LWP is
4864 single-stepped by either hardware or software while the breakpoint
4865 is temporarily uninserted from the inferior. When the single-step
4866 finishes, we reinsert the breakpoint, and let all threads that are
4867 supposed to be running, run again. */
4868
4869 static int
4870 start_step_over (struct lwp_info *lwp)
4871 {
4872 struct thread_info *thread = get_lwp_thread (lwp);
4873 struct thread_info *saved_thread;
4874 CORE_ADDR pc;
4875 int step;
4876
4877 if (debug_threads)
4878 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4879 lwpid_of (thread));
4880
4881 stop_all_lwps (1, lwp);
4882
4883 if (lwp->suspended != 0)
4884 {
4885 internal_error (__FILE__, __LINE__,
4886 "LWP %ld suspended=%d\n", lwpid_of (thread),
4887 lwp->suspended);
4888 }
4889
4890 if (debug_threads)
4891 debug_printf ("Done stopping all threads for step-over.\n");
4892
4893 /* Note, we should always reach here with an already adjusted PC,
4894 either by GDB (if we're resuming due to GDB's request), or by our
4895 caller, if we just finished handling an internal breakpoint GDB
4896 shouldn't care about. */
4897 pc = get_pc (lwp);
4898
4899 saved_thread = current_thread;
4900 current_thread = thread;
4901
4902 lwp->bp_reinsert = pc;
4903 uninsert_breakpoints_at (pc);
4904 uninsert_fast_tracepoint_jumps_at (pc);
4905
4906 step = single_step (lwp);
4907
4908 current_thread = saved_thread;
4909
4910 linux_resume_one_lwp (lwp, step, 0, NULL);
4911
4912 /* Require next event from this LWP. */
4913 step_over_bkpt = thread->id;
4914 return 1;
4915 }
4916
4917 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4918 start_step_over, if still there, and delete any single-step
4919 breakpoints we've set, on non hardware single-step targets. */
4920
4921 static int
4922 finish_step_over (struct lwp_info *lwp)
4923 {
4924 if (lwp->bp_reinsert != 0)
4925 {
4926 struct thread_info *saved_thread = current_thread;
4927
4928 if (debug_threads)
4929 debug_printf ("Finished step over.\n");
4930
4931 current_thread = get_lwp_thread (lwp);
4932
4933 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4934 may be no breakpoint to reinsert there by now. */
4935 reinsert_breakpoints_at (lwp->bp_reinsert);
4936 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4937
4938 lwp->bp_reinsert = 0;
4939
4940 /* Delete any single-step breakpoints. No longer needed. We
4941 don't have to worry about other threads hitting this trap,
4942 and later not being able to explain it, because we were
4943 stepping over a breakpoint, and we hold all threads but
4944 LWP stopped while doing that. */
4945 if (!can_hardware_single_step ())
4946 {
4947 gdb_assert (has_single_step_breakpoints (current_thread));
4948 delete_single_step_breakpoints (current_thread);
4949 }
4950
4951 step_over_bkpt = null_ptid;
4952 current_thread = saved_thread;
4953 return 1;
4954 }
4955 else
4956 return 0;
4957 }
4958
4959 /* If there's a step over in progress, wait until all threads stop
4960 (that is, until the stepping thread finishes its step), and
4961 unsuspend all lwps. The stepping thread ends with its status
4962 pending, which is processed later when we get back to processing
4963 events. */
4964
4965 static void
4966 complete_ongoing_step_over (void)
4967 {
4968 if (!ptid_equal (step_over_bkpt, null_ptid))
4969 {
4970 struct lwp_info *lwp;
4971 int wstat;
4972 int ret;
4973
4974 if (debug_threads)
4975 debug_printf ("detach: step over in progress, finish it first\n");
4976
4977 /* Passing NULL_PTID as filter indicates we want all events to
4978 be left pending. Eventually this returns when there are no
4979 unwaited-for children left. */
4980 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4981 &wstat, __WALL);
4982 gdb_assert (ret == -1);
4983
4984 lwp = find_lwp_pid (step_over_bkpt);
4985 if (lwp != NULL)
4986 finish_step_over (lwp);
4987 step_over_bkpt = null_ptid;
4988 unsuspend_all_lwps (lwp);
4989 }
4990 }
4991
4992 /* This function is called once per thread. We check the thread's resume
4993 request, which will tell us whether to resume, step, or leave the thread
4994 stopped; and what signal, if any, it should be sent.
4995
4996 For threads which we aren't explicitly told otherwise, we preserve
4997 the stepping flag; this is used for stepping over gdbserver-placed
4998 breakpoints.
4999
5000 If pending_flags was set in any thread, we queue any needed
5001 signals, since we won't actually resume. We already have a pending
5002 event to report, so we don't need to preserve any step requests;
5003 they should be re-issued if necessary. */
5004
5005 static int
5006 linux_resume_one_thread (thread_info *thread, void *arg)
5007 {
5008 struct lwp_info *lwp = get_thread_lwp (thread);
5009 int leave_all_stopped = * (int *) arg;
5010 int leave_pending;
5011
5012 if (lwp->resume == NULL)
5013 return 0;
5014
5015 if (lwp->resume->kind == resume_stop)
5016 {
5017 if (debug_threads)
5018 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
5019
5020 if (!lwp->stopped)
5021 {
5022 if (debug_threads)
5023 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
5024
5025 /* Stop the thread, and wait for the event asynchronously,
5026 through the event loop. */
5027 send_sigstop (lwp);
5028 }
5029 else
5030 {
5031 if (debug_threads)
5032 debug_printf ("already stopped LWP %ld\n",
5033 lwpid_of (thread));
5034
5035 /* The LWP may have been stopped in an internal event that
5036 was not meant to be notified back to GDB (e.g., gdbserver
5037 breakpoint), so we should be reporting a stop event in
5038 this case too. */
5039
5040 /* If the thread already has a pending SIGSTOP, this is a
5041 no-op. Otherwise, something later will presumably resume
5042 the thread and this will cause it to cancel any pending
5043 operation, due to last_resume_kind == resume_stop. If
5044 the thread already has a pending status to report, we
5045 will still report it the next time we wait - see
5046 status_pending_p_callback. */
5047
5048 /* If we already have a pending signal to report, then
5049 there's no need to queue a SIGSTOP, as this means we're
5050 midway through moving the LWP out of the jumppad, and we
5051 will report the pending signal as soon as that is
5052 finished. */
5053 if (lwp->pending_signals_to_report == NULL)
5054 send_sigstop (lwp);
5055 }
5056
5057 /* For stop requests, we're done. */
5058 lwp->resume = NULL;
5059 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5060 return 0;
5061 }
5062
5063 /* If this thread which is about to be resumed has a pending status,
5064 then don't resume it - we can just report the pending status.
5065 Likewise if it is suspended, because e.g., another thread is
5066 stepping past a breakpoint. Make sure to queue any signals that
5067 would otherwise be sent. In all-stop mode, we do this decision
5068 based on if *any* thread has a pending status. If there's a
5069 thread that needs the step-over-breakpoint dance, then don't
5070 resume any other thread but that particular one. */
5071 leave_pending = (lwp->suspended
5072 || lwp->status_pending_p
5073 || leave_all_stopped);
5074
5075 /* If we have a new signal, enqueue the signal. */
5076 if (lwp->resume->sig != 0)
5077 {
5078 siginfo_t info, *info_p;
5079
5080 /* If this is the same signal we were previously stopped by,
5081 make sure to queue its siginfo. */
5082 if (WIFSTOPPED (lwp->last_status)
5083 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5084 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5085 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5086 info_p = &info;
5087 else
5088 info_p = NULL;
5089
5090 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5091 }
5092
5093 if (!leave_pending)
5094 {
5095 if (debug_threads)
5096 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5097
5098 proceed_one_lwp (thread, NULL);
5099 }
5100 else
5101 {
5102 if (debug_threads)
5103 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5104 }
5105
5106 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5107 lwp->resume = NULL;
5108 return 0;
5109 }
5110
5111 static void
5112 linux_resume (struct thread_resume *resume_info, size_t n)
5113 {
5114 struct thread_resume_array array = { resume_info, n };
5115 struct thread_info *need_step_over = NULL;
5116 int any_pending;
5117 int leave_all_stopped;
5118
5119 if (debug_threads)
5120 {
5121 debug_enter ();
5122 debug_printf ("linux_resume:\n");
5123 }
5124
5125 find_inferior (&all_threads, linux_set_resume_request, &array);
5126
5127 /* If there is a thread which would otherwise be resumed, which has
5128 a pending status, then don't resume any threads - we can just
5129 report the pending status. Make sure to queue any signals that
5130 would otherwise be sent. In non-stop mode, we'll apply this
5131 logic to each thread individually. We consume all pending events
5132 before considering to start a step-over (in all-stop). */
5133 any_pending = 0;
5134 if (!non_stop)
5135 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
5136
5137 /* If there is a thread which would otherwise be resumed, which is
5138 stopped at a breakpoint that needs stepping over, then don't
5139 resume any threads - have it step over the breakpoint with all
5140 other threads stopped, then resume all threads again. Make sure
5141 to queue any signals that would otherwise be delivered or
5142 queued. */
5143 if (!any_pending && supports_breakpoints ())
5144 need_step_over
5145 = (struct thread_info *) find_inferior (&all_threads,
5146 need_step_over_p, NULL);
5147
5148 leave_all_stopped = (need_step_over != NULL || any_pending);
5149
5150 if (debug_threads)
5151 {
5152 if (need_step_over != NULL)
5153 debug_printf ("Not resuming all, need step over\n");
5154 else if (any_pending)
5155 debug_printf ("Not resuming, all-stop and found "
5156 "an LWP with pending status\n");
5157 else
5158 debug_printf ("Resuming, no pending status or step over needed\n");
5159 }
5160
5161 /* Even if we're leaving threads stopped, queue all signals we'd
5162 otherwise deliver. */
5163 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
5164
5165 if (need_step_over)
5166 start_step_over (get_thread_lwp (need_step_over));
5167
5168 if (debug_threads)
5169 {
5170 debug_printf ("linux_resume done\n");
5171 debug_exit ();
5172 }
5173
5174 /* We may have events that were pending that can/should be sent to
5175 the client now. Trigger a linux_wait call. */
5176 if (target_is_async_p ())
5177 async_file_mark ();
5178 }
5179
5180 /* This function is called once per thread. We check the thread's
5181 last resume request, which will tell us whether to resume, step, or
5182 leave the thread stopped. Any signal the client requested to be
5183 delivered has already been enqueued at this point.
5184
5185 If any thread that GDB wants running is stopped at an internal
5186 breakpoint that needs stepping over, we start a step-over operation
5187 on that particular thread, and leave all others stopped. */
5188
5189 static int
5190 proceed_one_lwp (thread_info *thread, void *except)
5191 {
5192 struct lwp_info *lwp = get_thread_lwp (thread);
5193 int step;
5194
5195 if (lwp == except)
5196 return 0;
5197
5198 if (debug_threads)
5199 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5200
5201 if (!lwp->stopped)
5202 {
5203 if (debug_threads)
5204 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5205 return 0;
5206 }
5207
5208 if (thread->last_resume_kind == resume_stop
5209 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5210 {
5211 if (debug_threads)
5212 debug_printf (" client wants LWP to remain %ld stopped\n",
5213 lwpid_of (thread));
5214 return 0;
5215 }
5216
5217 if (lwp->status_pending_p)
5218 {
5219 if (debug_threads)
5220 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5221 lwpid_of (thread));
5222 return 0;
5223 }
5224
5225 gdb_assert (lwp->suspended >= 0);
5226
5227 if (lwp->suspended)
5228 {
5229 if (debug_threads)
5230 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5231 return 0;
5232 }
5233
5234 if (thread->last_resume_kind == resume_stop
5235 && lwp->pending_signals_to_report == NULL
5236 && (lwp->collecting_fast_tracepoint
5237 == fast_tpoint_collect_result::not_collecting))
5238 {
5239 /* We haven't reported this LWP as stopped yet (otherwise, the
5240 last_status.kind check above would catch it, and we wouldn't
5241 reach here. This LWP may have been momentarily paused by a
5242 stop_all_lwps call while handling for example, another LWP's
5243 step-over. In that case, the pending expected SIGSTOP signal
5244 that was queued at vCont;t handling time will have already
5245 been consumed by wait_for_sigstop, and so we need to requeue
5246 another one here. Note that if the LWP already has a SIGSTOP
5247 pending, this is a no-op. */
5248
5249 if (debug_threads)
5250 debug_printf ("Client wants LWP %ld to stop. "
5251 "Making sure it has a SIGSTOP pending\n",
5252 lwpid_of (thread));
5253
5254 send_sigstop (lwp);
5255 }
5256
5257 if (thread->last_resume_kind == resume_step)
5258 {
5259 if (debug_threads)
5260 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5261 lwpid_of (thread));
5262
5263 /* If resume_step is requested by GDB, install single-step
5264 breakpoints when the thread is about to be actually resumed if
5265 the single-step breakpoints weren't removed. */
5266 if (can_software_single_step ()
5267 && !has_single_step_breakpoints (thread))
5268 install_software_single_step_breakpoints (lwp);
5269
5270 step = maybe_hw_step (thread);
5271 }
5272 else if (lwp->bp_reinsert != 0)
5273 {
5274 if (debug_threads)
5275 debug_printf (" stepping LWP %ld, reinsert set\n",
5276 lwpid_of (thread));
5277
5278 step = maybe_hw_step (thread);
5279 }
5280 else
5281 step = 0;
5282
5283 linux_resume_one_lwp (lwp, step, 0, NULL);
5284 return 0;
5285 }
5286
5287 static int
5288 unsuspend_and_proceed_one_lwp (thread_info *thread, void *except)
5289 {
5290 struct lwp_info *lwp = get_thread_lwp (thread);
5291
5292 if (lwp == except)
5293 return 0;
5294
5295 lwp_suspended_decr (lwp);
5296
5297 return proceed_one_lwp (thread, except);
5298 }
5299
5300 /* When we finish a step-over, set threads running again. If there's
5301 another thread that may need a step-over, now's the time to start
5302 it. Eventually, we'll move all threads past their breakpoints. */
5303
5304 static void
5305 proceed_all_lwps (void)
5306 {
5307 struct thread_info *need_step_over;
5308
5309 /* If there is a thread which would otherwise be resumed, which is
5310 stopped at a breakpoint that needs stepping over, then don't
5311 resume any threads - have it step over the breakpoint with all
5312 other threads stopped, then resume all threads again. */
5313
5314 if (supports_breakpoints ())
5315 {
5316 need_step_over
5317 = (struct thread_info *) find_inferior (&all_threads,
5318 need_step_over_p, NULL);
5319
5320 if (need_step_over != NULL)
5321 {
5322 if (debug_threads)
5323 debug_printf ("proceed_all_lwps: found "
5324 "thread %ld needing a step-over\n",
5325 lwpid_of (need_step_over));
5326
5327 start_step_over (get_thread_lwp (need_step_over));
5328 return;
5329 }
5330 }
5331
5332 if (debug_threads)
5333 debug_printf ("Proceeding, no step-over needed\n");
5334
5335 find_inferior (&all_threads, proceed_one_lwp, NULL);
5336 }
5337
5338 /* Stopped LWPs that the client wanted to be running, that don't have
5339 pending statuses, are set to run again, except for EXCEPT, if not
5340 NULL. This undoes a stop_all_lwps call. */
5341
5342 static void
5343 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5344 {
5345 if (debug_threads)
5346 {
5347 debug_enter ();
5348 if (except)
5349 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5350 lwpid_of (get_lwp_thread (except)));
5351 else
5352 debug_printf ("unstopping all lwps\n");
5353 }
5354
5355 if (unsuspend)
5356 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5357 else
5358 find_inferior (&all_threads, proceed_one_lwp, except);
5359
5360 if (debug_threads)
5361 {
5362 debug_printf ("unstop_all_lwps done\n");
5363 debug_exit ();
5364 }
5365 }
5366
5367
5368 #ifdef HAVE_LINUX_REGSETS
5369
5370 #define use_linux_regsets 1
5371
5372 /* Returns true if REGSET has been disabled. */
5373
5374 static int
5375 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5376 {
5377 return (info->disabled_regsets != NULL
5378 && info->disabled_regsets[regset - info->regsets]);
5379 }
5380
5381 /* Disable REGSET. */
5382
5383 static void
5384 disable_regset (struct regsets_info *info, struct regset_info *regset)
5385 {
5386 int dr_offset;
5387
5388 dr_offset = regset - info->regsets;
5389 if (info->disabled_regsets == NULL)
5390 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5391 info->disabled_regsets[dr_offset] = 1;
5392 }
5393
5394 static int
5395 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5396 struct regcache *regcache)
5397 {
5398 struct regset_info *regset;
5399 int saw_general_regs = 0;
5400 int pid;
5401 struct iovec iov;
5402
5403 pid = lwpid_of (current_thread);
5404 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5405 {
5406 void *buf, *data;
5407 int nt_type, res;
5408
5409 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5410 continue;
5411
5412 buf = xmalloc (regset->size);
5413
5414 nt_type = regset->nt_type;
5415 if (nt_type)
5416 {
5417 iov.iov_base = buf;
5418 iov.iov_len = regset->size;
5419 data = (void *) &iov;
5420 }
5421 else
5422 data = buf;
5423
5424 #ifndef __sparc__
5425 res = ptrace (regset->get_request, pid,
5426 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5427 #else
5428 res = ptrace (regset->get_request, pid, data, nt_type);
5429 #endif
5430 if (res < 0)
5431 {
5432 if (errno == EIO)
5433 {
5434 /* If we get EIO on a regset, do not try it again for
5435 this process mode. */
5436 disable_regset (regsets_info, regset);
5437 }
5438 else if (errno == ENODATA)
5439 {
5440 /* ENODATA may be returned if the regset is currently
5441 not "active". This can happen in normal operation,
5442 so suppress the warning in this case. */
5443 }
5444 else if (errno == ESRCH)
5445 {
5446 /* At this point, ESRCH should mean the process is
5447 already gone, in which case we simply ignore attempts
5448 to read its registers. */
5449 }
5450 else
5451 {
5452 char s[256];
5453 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5454 pid);
5455 perror (s);
5456 }
5457 }
5458 else
5459 {
5460 if (regset->type == GENERAL_REGS)
5461 saw_general_regs = 1;
5462 regset->store_function (regcache, buf);
5463 }
5464 free (buf);
5465 }
5466 if (saw_general_regs)
5467 return 0;
5468 else
5469 return 1;
5470 }
5471
5472 static int
5473 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5474 struct regcache *regcache)
5475 {
5476 struct regset_info *regset;
5477 int saw_general_regs = 0;
5478 int pid;
5479 struct iovec iov;
5480
5481 pid = lwpid_of (current_thread);
5482 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5483 {
5484 void *buf, *data;
5485 int nt_type, res;
5486
5487 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5488 || regset->fill_function == NULL)
5489 continue;
5490
5491 buf = xmalloc (regset->size);
5492
5493 /* First fill the buffer with the current register set contents,
5494 in case there are any items in the kernel's regset that are
5495 not in gdbserver's regcache. */
5496
5497 nt_type = regset->nt_type;
5498 if (nt_type)
5499 {
5500 iov.iov_base = buf;
5501 iov.iov_len = regset->size;
5502 data = (void *) &iov;
5503 }
5504 else
5505 data = buf;
5506
5507 #ifndef __sparc__
5508 res = ptrace (regset->get_request, pid,
5509 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5510 #else
5511 res = ptrace (regset->get_request, pid, data, nt_type);
5512 #endif
5513
5514 if (res == 0)
5515 {
5516 /* Then overlay our cached registers on that. */
5517 regset->fill_function (regcache, buf);
5518
5519 /* Only now do we write the register set. */
5520 #ifndef __sparc__
5521 res = ptrace (regset->set_request, pid,
5522 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5523 #else
5524 res = ptrace (regset->set_request, pid, data, nt_type);
5525 #endif
5526 }
5527
5528 if (res < 0)
5529 {
5530 if (errno == EIO)
5531 {
5532 /* If we get EIO on a regset, do not try it again for
5533 this process mode. */
5534 disable_regset (regsets_info, regset);
5535 }
5536 else if (errno == ESRCH)
5537 {
5538 /* At this point, ESRCH should mean the process is
5539 already gone, in which case we simply ignore attempts
5540 to change its registers. See also the related
5541 comment in linux_resume_one_lwp. */
5542 free (buf);
5543 return 0;
5544 }
5545 else
5546 {
5547 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5548 }
5549 }
5550 else if (regset->type == GENERAL_REGS)
5551 saw_general_regs = 1;
5552 free (buf);
5553 }
5554 if (saw_general_regs)
5555 return 0;
5556 else
5557 return 1;
5558 }
5559
5560 #else /* !HAVE_LINUX_REGSETS */
5561
5562 #define use_linux_regsets 0
5563 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5564 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5565
5566 #endif
5567
5568 /* Return 1 if register REGNO is supported by one of the regset ptrace
5569 calls or 0 if it has to be transferred individually. */
5570
5571 static int
5572 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5573 {
5574 unsigned char mask = 1 << (regno % 8);
5575 size_t index = regno / 8;
5576
5577 return (use_linux_regsets
5578 && (regs_info->regset_bitmap == NULL
5579 || (regs_info->regset_bitmap[index] & mask) != 0));
5580 }
5581
5582 #ifdef HAVE_LINUX_USRREGS
5583
5584 static int
5585 register_addr (const struct usrregs_info *usrregs, int regnum)
5586 {
5587 int addr;
5588
5589 if (regnum < 0 || regnum >= usrregs->num_regs)
5590 error ("Invalid register number %d.", regnum);
5591
5592 addr = usrregs->regmap[regnum];
5593
5594 return addr;
5595 }
5596
5597 /* Fetch one register. */
5598 static void
5599 fetch_register (const struct usrregs_info *usrregs,
5600 struct regcache *regcache, int regno)
5601 {
5602 CORE_ADDR regaddr;
5603 int i, size;
5604 char *buf;
5605 int pid;
5606
5607 if (regno >= usrregs->num_regs)
5608 return;
5609 if ((*the_low_target.cannot_fetch_register) (regno))
5610 return;
5611
5612 regaddr = register_addr (usrregs, regno);
5613 if (regaddr == -1)
5614 return;
5615
5616 size = ((register_size (regcache->tdesc, regno)
5617 + sizeof (PTRACE_XFER_TYPE) - 1)
5618 & -sizeof (PTRACE_XFER_TYPE));
5619 buf = (char *) alloca (size);
5620
5621 pid = lwpid_of (current_thread);
5622 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5623 {
5624 errno = 0;
5625 *(PTRACE_XFER_TYPE *) (buf + i) =
5626 ptrace (PTRACE_PEEKUSER, pid,
5627 /* Coerce to a uintptr_t first to avoid potential gcc warning
5628 of coercing an 8 byte integer to a 4 byte pointer. */
5629 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5630 regaddr += sizeof (PTRACE_XFER_TYPE);
5631 if (errno != 0)
5632 error ("reading register %d: %s", regno, strerror (errno));
5633 }
5634
5635 if (the_low_target.supply_ptrace_register)
5636 the_low_target.supply_ptrace_register (regcache, regno, buf);
5637 else
5638 supply_register (regcache, regno, buf);
5639 }
5640
5641 /* Store one register. */
5642 static void
5643 store_register (const struct usrregs_info *usrregs,
5644 struct regcache *regcache, int regno)
5645 {
5646 CORE_ADDR regaddr;
5647 int i, size;
5648 char *buf;
5649 int pid;
5650
5651 if (regno >= usrregs->num_regs)
5652 return;
5653 if ((*the_low_target.cannot_store_register) (regno))
5654 return;
5655
5656 regaddr = register_addr (usrregs, regno);
5657 if (regaddr == -1)
5658 return;
5659
5660 size = ((register_size (regcache->tdesc, regno)
5661 + sizeof (PTRACE_XFER_TYPE) - 1)
5662 & -sizeof (PTRACE_XFER_TYPE));
5663 buf = (char *) alloca (size);
5664 memset (buf, 0, size);
5665
5666 if (the_low_target.collect_ptrace_register)
5667 the_low_target.collect_ptrace_register (regcache, regno, buf);
5668 else
5669 collect_register (regcache, regno, buf);
5670
5671 pid = lwpid_of (current_thread);
5672 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5673 {
5674 errno = 0;
5675 ptrace (PTRACE_POKEUSER, pid,
5676 /* Coerce to a uintptr_t first to avoid potential gcc warning
5677 about coercing an 8 byte integer to a 4 byte pointer. */
5678 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5679 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5680 if (errno != 0)
5681 {
5682 /* At this point, ESRCH should mean the process is
5683 already gone, in which case we simply ignore attempts
5684 to change its registers. See also the related
5685 comment in linux_resume_one_lwp. */
5686 if (errno == ESRCH)
5687 return;
5688
5689 if ((*the_low_target.cannot_store_register) (regno) == 0)
5690 error ("writing register %d: %s", regno, strerror (errno));
5691 }
5692 regaddr += sizeof (PTRACE_XFER_TYPE);
5693 }
5694 }
5695
5696 /* Fetch all registers, or just one, from the child process.
5697 If REGNO is -1, do this for all registers, skipping any that are
5698 assumed to have been retrieved by regsets_fetch_inferior_registers,
5699 unless ALL is non-zero.
5700 Otherwise, REGNO specifies which register (so we can save time). */
5701 static void
5702 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5703 struct regcache *regcache, int regno, int all)
5704 {
5705 struct usrregs_info *usr = regs_info->usrregs;
5706
5707 if (regno == -1)
5708 {
5709 for (regno = 0; regno < usr->num_regs; regno++)
5710 if (all || !linux_register_in_regsets (regs_info, regno))
5711 fetch_register (usr, regcache, regno);
5712 }
5713 else
5714 fetch_register (usr, regcache, regno);
5715 }
5716
5717 /* Store our register values back into the inferior.
5718 If REGNO is -1, do this for all registers, skipping any that are
5719 assumed to have been saved by regsets_store_inferior_registers,
5720 unless ALL is non-zero.
5721 Otherwise, REGNO specifies which register (so we can save time). */
5722 static void
5723 usr_store_inferior_registers (const struct regs_info *regs_info,
5724 struct regcache *regcache, int regno, int all)
5725 {
5726 struct usrregs_info *usr = regs_info->usrregs;
5727
5728 if (regno == -1)
5729 {
5730 for (regno = 0; regno < usr->num_regs; regno++)
5731 if (all || !linux_register_in_regsets (regs_info, regno))
5732 store_register (usr, regcache, regno);
5733 }
5734 else
5735 store_register (usr, regcache, regno);
5736 }
5737
5738 #else /* !HAVE_LINUX_USRREGS */
5739
5740 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5741 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5742
5743 #endif
5744
5745
5746 static void
5747 linux_fetch_registers (struct regcache *regcache, int regno)
5748 {
5749 int use_regsets;
5750 int all = 0;
5751 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5752
5753 if (regno == -1)
5754 {
5755 if (the_low_target.fetch_register != NULL
5756 && regs_info->usrregs != NULL)
5757 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5758 (*the_low_target.fetch_register) (regcache, regno);
5759
5760 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5761 if (regs_info->usrregs != NULL)
5762 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5763 }
5764 else
5765 {
5766 if (the_low_target.fetch_register != NULL
5767 && (*the_low_target.fetch_register) (regcache, regno))
5768 return;
5769
5770 use_regsets = linux_register_in_regsets (regs_info, regno);
5771 if (use_regsets)
5772 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5773 regcache);
5774 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5775 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5776 }
5777 }
5778
5779 static void
5780 linux_store_registers (struct regcache *regcache, int regno)
5781 {
5782 int use_regsets;
5783 int all = 0;
5784 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5785
5786 if (regno == -1)
5787 {
5788 all = regsets_store_inferior_registers (regs_info->regsets_info,
5789 regcache);
5790 if (regs_info->usrregs != NULL)
5791 usr_store_inferior_registers (regs_info, regcache, regno, all);
5792 }
5793 else
5794 {
5795 use_regsets = linux_register_in_regsets (regs_info, regno);
5796 if (use_regsets)
5797 all = regsets_store_inferior_registers (regs_info->regsets_info,
5798 regcache);
5799 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5800 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5801 }
5802 }
5803
5804
5805 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5806 to debugger memory starting at MYADDR. */
5807
5808 static int
5809 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5810 {
5811 int pid = lwpid_of (current_thread);
5812 PTRACE_XFER_TYPE *buffer;
5813 CORE_ADDR addr;
5814 int count;
5815 char filename[64];
5816 int i;
5817 int ret;
5818 int fd;
5819
5820 /* Try using /proc. Don't bother for one word. */
5821 if (len >= 3 * sizeof (long))
5822 {
5823 int bytes;
5824
5825 /* We could keep this file open and cache it - possibly one per
5826 thread. That requires some juggling, but is even faster. */
5827 sprintf (filename, "/proc/%d/mem", pid);
5828 fd = open (filename, O_RDONLY | O_LARGEFILE);
5829 if (fd == -1)
5830 goto no_proc;
5831
5832 /* If pread64 is available, use it. It's faster if the kernel
5833 supports it (only one syscall), and it's 64-bit safe even on
5834 32-bit platforms (for instance, SPARC debugging a SPARC64
5835 application). */
5836 #ifdef HAVE_PREAD64
5837 bytes = pread64 (fd, myaddr, len, memaddr);
5838 #else
5839 bytes = -1;
5840 if (lseek (fd, memaddr, SEEK_SET) != -1)
5841 bytes = read (fd, myaddr, len);
5842 #endif
5843
5844 close (fd);
5845 if (bytes == len)
5846 return 0;
5847
5848 /* Some data was read, we'll try to get the rest with ptrace. */
5849 if (bytes > 0)
5850 {
5851 memaddr += bytes;
5852 myaddr += bytes;
5853 len -= bytes;
5854 }
5855 }
5856
5857 no_proc:
5858 /* Round starting address down to longword boundary. */
5859 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5860 /* Round ending address up; get number of longwords that makes. */
5861 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5862 / sizeof (PTRACE_XFER_TYPE));
5863 /* Allocate buffer of that many longwords. */
5864 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5865
5866 /* Read all the longwords */
5867 errno = 0;
5868 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5869 {
5870 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5871 about coercing an 8 byte integer to a 4 byte pointer. */
5872 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5873 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5874 (PTRACE_TYPE_ARG4) 0);
5875 if (errno)
5876 break;
5877 }
5878 ret = errno;
5879
5880 /* Copy appropriate bytes out of the buffer. */
5881 if (i > 0)
5882 {
5883 i *= sizeof (PTRACE_XFER_TYPE);
5884 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5885 memcpy (myaddr,
5886 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5887 i < len ? i : len);
5888 }
5889
5890 return ret;
5891 }
5892
5893 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5894 memory at MEMADDR. On failure (cannot write to the inferior)
5895 returns the value of errno. Always succeeds if LEN is zero. */
5896
5897 static int
5898 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5899 {
5900 int i;
5901 /* Round starting address down to longword boundary. */
5902 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5903 /* Round ending address up; get number of longwords that makes. */
5904 int count
5905 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5906 / sizeof (PTRACE_XFER_TYPE);
5907
5908 /* Allocate buffer of that many longwords. */
5909 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5910
5911 int pid = lwpid_of (current_thread);
5912
5913 if (len == 0)
5914 {
5915 /* Zero length write always succeeds. */
5916 return 0;
5917 }
5918
5919 if (debug_threads)
5920 {
5921 /* Dump up to four bytes. */
5922 char str[4 * 2 + 1];
5923 char *p = str;
5924 int dump = len < 4 ? len : 4;
5925
5926 for (i = 0; i < dump; i++)
5927 {
5928 sprintf (p, "%02x", myaddr[i]);
5929 p += 2;
5930 }
5931 *p = '\0';
5932
5933 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5934 str, (long) memaddr, pid);
5935 }
5936
5937 /* Fill start and end extra bytes of buffer with existing memory data. */
5938
5939 errno = 0;
5940 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5941 about coercing an 8 byte integer to a 4 byte pointer. */
5942 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5943 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5944 (PTRACE_TYPE_ARG4) 0);
5945 if (errno)
5946 return errno;
5947
5948 if (count > 1)
5949 {
5950 errno = 0;
5951 buffer[count - 1]
5952 = ptrace (PTRACE_PEEKTEXT, pid,
5953 /* Coerce to a uintptr_t first to avoid potential gcc warning
5954 about coercing an 8 byte integer to a 4 byte pointer. */
5955 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5956 * sizeof (PTRACE_XFER_TYPE)),
5957 (PTRACE_TYPE_ARG4) 0);
5958 if (errno)
5959 return errno;
5960 }
5961
5962 /* Copy data to be written over corresponding part of buffer. */
5963
5964 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5965 myaddr, len);
5966
5967 /* Write the entire buffer. */
5968
5969 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5970 {
5971 errno = 0;
5972 ptrace (PTRACE_POKETEXT, pid,
5973 /* Coerce to a uintptr_t first to avoid potential gcc warning
5974 about coercing an 8 byte integer to a 4 byte pointer. */
5975 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5976 (PTRACE_TYPE_ARG4) buffer[i]);
5977 if (errno)
5978 return errno;
5979 }
5980
5981 return 0;
5982 }
5983
5984 static void
5985 linux_look_up_symbols (void)
5986 {
5987 #ifdef USE_THREAD_DB
5988 struct process_info *proc = current_process ();
5989
5990 if (proc->priv->thread_db != NULL)
5991 return;
5992
5993 thread_db_init ();
5994 #endif
5995 }
5996
5997 static void
5998 linux_request_interrupt (void)
5999 {
6000 /* Send a SIGINT to the process group. This acts just like the user
6001 typed a ^C on the controlling terminal. */
6002 kill (-signal_pid, SIGINT);
6003 }
6004
6005 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
6006 to debugger memory starting at MYADDR. */
6007
6008 static int
6009 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
6010 {
6011 char filename[PATH_MAX];
6012 int fd, n;
6013 int pid = lwpid_of (current_thread);
6014
6015 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6016
6017 fd = open (filename, O_RDONLY);
6018 if (fd < 0)
6019 return -1;
6020
6021 if (offset != (CORE_ADDR) 0
6022 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6023 n = -1;
6024 else
6025 n = read (fd, myaddr, len);
6026
6027 close (fd);
6028
6029 return n;
6030 }
6031
6032 /* These breakpoint and watchpoint related wrapper functions simply
6033 pass on the function call if the target has registered a
6034 corresponding function. */
6035
6036 static int
6037 linux_supports_z_point_type (char z_type)
6038 {
6039 return (the_low_target.supports_z_point_type != NULL
6040 && the_low_target.supports_z_point_type (z_type));
6041 }
6042
6043 static int
6044 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
6045 int size, struct raw_breakpoint *bp)
6046 {
6047 if (type == raw_bkpt_type_sw)
6048 return insert_memory_breakpoint (bp);
6049 else if (the_low_target.insert_point != NULL)
6050 return the_low_target.insert_point (type, addr, size, bp);
6051 else
6052 /* Unsupported (see target.h). */
6053 return 1;
6054 }
6055
6056 static int
6057 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
6058 int size, struct raw_breakpoint *bp)
6059 {
6060 if (type == raw_bkpt_type_sw)
6061 return remove_memory_breakpoint (bp);
6062 else if (the_low_target.remove_point != NULL)
6063 return the_low_target.remove_point (type, addr, size, bp);
6064 else
6065 /* Unsupported (see target.h). */
6066 return 1;
6067 }
6068
6069 /* Implement the to_stopped_by_sw_breakpoint target_ops
6070 method. */
6071
6072 static int
6073 linux_stopped_by_sw_breakpoint (void)
6074 {
6075 struct lwp_info *lwp = get_thread_lwp (current_thread);
6076
6077 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6078 }
6079
6080 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6081 method. */
6082
6083 static int
6084 linux_supports_stopped_by_sw_breakpoint (void)
6085 {
6086 return USE_SIGTRAP_SIGINFO;
6087 }
6088
6089 /* Implement the to_stopped_by_hw_breakpoint target_ops
6090 method. */
6091
6092 static int
6093 linux_stopped_by_hw_breakpoint (void)
6094 {
6095 struct lwp_info *lwp = get_thread_lwp (current_thread);
6096
6097 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6098 }
6099
6100 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6101 method. */
6102
6103 static int
6104 linux_supports_stopped_by_hw_breakpoint (void)
6105 {
6106 return USE_SIGTRAP_SIGINFO;
6107 }
6108
6109 /* Implement the supports_hardware_single_step target_ops method. */
6110
6111 static int
6112 linux_supports_hardware_single_step (void)
6113 {
6114 return can_hardware_single_step ();
6115 }
6116
6117 static int
6118 linux_supports_software_single_step (void)
6119 {
6120 return can_software_single_step ();
6121 }
6122
6123 static int
6124 linux_stopped_by_watchpoint (void)
6125 {
6126 struct lwp_info *lwp = get_thread_lwp (current_thread);
6127
6128 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6129 }
6130
6131 static CORE_ADDR
6132 linux_stopped_data_address (void)
6133 {
6134 struct lwp_info *lwp = get_thread_lwp (current_thread);
6135
6136 return lwp->stopped_data_address;
6137 }
6138
6139 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6140 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6141 && defined(PT_TEXT_END_ADDR)
6142
6143 /* This is only used for targets that define PT_TEXT_ADDR,
6144 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6145 the target has different ways of acquiring this information, like
6146 loadmaps. */
6147
6148 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6149 to tell gdb about. */
6150
6151 static int
6152 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6153 {
6154 unsigned long text, text_end, data;
6155 int pid = lwpid_of (current_thread);
6156
6157 errno = 0;
6158
6159 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6160 (PTRACE_TYPE_ARG4) 0);
6161 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6162 (PTRACE_TYPE_ARG4) 0);
6163 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6164 (PTRACE_TYPE_ARG4) 0);
6165
6166 if (errno == 0)
6167 {
6168 /* Both text and data offsets produced at compile-time (and so
6169 used by gdb) are relative to the beginning of the program,
6170 with the data segment immediately following the text segment.
6171 However, the actual runtime layout in memory may put the data
6172 somewhere else, so when we send gdb a data base-address, we
6173 use the real data base address and subtract the compile-time
6174 data base-address from it (which is just the length of the
6175 text segment). BSS immediately follows data in both
6176 cases. */
6177 *text_p = text;
6178 *data_p = data - (text_end - text);
6179
6180 return 1;
6181 }
6182 return 0;
6183 }
6184 #endif
6185
6186 static int
6187 linux_qxfer_osdata (const char *annex,
6188 unsigned char *readbuf, unsigned const char *writebuf,
6189 CORE_ADDR offset, int len)
6190 {
6191 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6192 }
6193
6194 /* Convert a native/host siginfo object, into/from the siginfo in the
6195 layout of the inferiors' architecture. */
6196
6197 static void
6198 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6199 {
6200 int done = 0;
6201
6202 if (the_low_target.siginfo_fixup != NULL)
6203 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6204
6205 /* If there was no callback, or the callback didn't do anything,
6206 then just do a straight memcpy. */
6207 if (!done)
6208 {
6209 if (direction == 1)
6210 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6211 else
6212 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6213 }
6214 }
6215
6216 static int
6217 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6218 unsigned const char *writebuf, CORE_ADDR offset, int len)
6219 {
6220 int pid;
6221 siginfo_t siginfo;
6222 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6223
6224 if (current_thread == NULL)
6225 return -1;
6226
6227 pid = lwpid_of (current_thread);
6228
6229 if (debug_threads)
6230 debug_printf ("%s siginfo for lwp %d.\n",
6231 readbuf != NULL ? "Reading" : "Writing",
6232 pid);
6233
6234 if (offset >= sizeof (siginfo))
6235 return -1;
6236
6237 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6238 return -1;
6239
6240 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6241 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6242 inferior with a 64-bit GDBSERVER should look the same as debugging it
6243 with a 32-bit GDBSERVER, we need to convert it. */
6244 siginfo_fixup (&siginfo, inf_siginfo, 0);
6245
6246 if (offset + len > sizeof (siginfo))
6247 len = sizeof (siginfo) - offset;
6248
6249 if (readbuf != NULL)
6250 memcpy (readbuf, inf_siginfo + offset, len);
6251 else
6252 {
6253 memcpy (inf_siginfo + offset, writebuf, len);
6254
6255 /* Convert back to ptrace layout before flushing it out. */
6256 siginfo_fixup (&siginfo, inf_siginfo, 1);
6257
6258 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6259 return -1;
6260 }
6261
6262 return len;
6263 }
6264
6265 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6266 so we notice when children change state; as the handler for the
6267 sigsuspend in my_waitpid. */
6268
6269 static void
6270 sigchld_handler (int signo)
6271 {
6272 int old_errno = errno;
6273
6274 if (debug_threads)
6275 {
6276 do
6277 {
6278 /* fprintf is not async-signal-safe, so call write
6279 directly. */
6280 if (write (2, "sigchld_handler\n",
6281 sizeof ("sigchld_handler\n") - 1) < 0)
6282 break; /* just ignore */
6283 } while (0);
6284 }
6285
6286 if (target_is_async_p ())
6287 async_file_mark (); /* trigger a linux_wait */
6288
6289 errno = old_errno;
6290 }
6291
6292 static int
6293 linux_supports_non_stop (void)
6294 {
6295 return 1;
6296 }
6297
6298 static int
6299 linux_async (int enable)
6300 {
6301 int previous = target_is_async_p ();
6302
6303 if (debug_threads)
6304 debug_printf ("linux_async (%d), previous=%d\n",
6305 enable, previous);
6306
6307 if (previous != enable)
6308 {
6309 sigset_t mask;
6310 sigemptyset (&mask);
6311 sigaddset (&mask, SIGCHLD);
6312
6313 sigprocmask (SIG_BLOCK, &mask, NULL);
6314
6315 if (enable)
6316 {
6317 if (pipe (linux_event_pipe) == -1)
6318 {
6319 linux_event_pipe[0] = -1;
6320 linux_event_pipe[1] = -1;
6321 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6322
6323 warning ("creating event pipe failed.");
6324 return previous;
6325 }
6326
6327 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6328 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6329
6330 /* Register the event loop handler. */
6331 add_file_handler (linux_event_pipe[0],
6332 handle_target_event, NULL);
6333
6334 /* Always trigger a linux_wait. */
6335 async_file_mark ();
6336 }
6337 else
6338 {
6339 delete_file_handler (linux_event_pipe[0]);
6340
6341 close (linux_event_pipe[0]);
6342 close (linux_event_pipe[1]);
6343 linux_event_pipe[0] = -1;
6344 linux_event_pipe[1] = -1;
6345 }
6346
6347 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6348 }
6349
6350 return previous;
6351 }
6352
6353 static int
6354 linux_start_non_stop (int nonstop)
6355 {
6356 /* Register or unregister from event-loop accordingly. */
6357 linux_async (nonstop);
6358
6359 if (target_is_async_p () != (nonstop != 0))
6360 return -1;
6361
6362 return 0;
6363 }
6364
6365 static int
6366 linux_supports_multi_process (void)
6367 {
6368 return 1;
6369 }
6370
6371 /* Check if fork events are supported. */
6372
6373 static int
6374 linux_supports_fork_events (void)
6375 {
6376 return linux_supports_tracefork ();
6377 }
6378
6379 /* Check if vfork events are supported. */
6380
6381 static int
6382 linux_supports_vfork_events (void)
6383 {
6384 return linux_supports_tracefork ();
6385 }
6386
6387 /* Check if exec events are supported. */
6388
6389 static int
6390 linux_supports_exec_events (void)
6391 {
6392 return linux_supports_traceexec ();
6393 }
6394
6395 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6396 ptrace flags for all inferiors. This is in case the new GDB connection
6397 doesn't support the same set of events that the previous one did. */
6398
6399 static void
6400 linux_handle_new_gdb_connection (void)
6401 {
6402 /* Request that all the lwps reset their ptrace options. */
6403 for_each_thread ([] (thread_info *thread)
6404 {
6405 struct lwp_info *lwp = get_thread_lwp (thread);
6406
6407 if (!lwp->stopped)
6408 {
6409 /* Stop the lwp so we can modify its ptrace options. */
6410 lwp->must_set_ptrace_flags = 1;
6411 linux_stop_lwp (lwp);
6412 }
6413 else
6414 {
6415 /* Already stopped; go ahead and set the ptrace options. */
6416 struct process_info *proc = find_process_pid (pid_of (thread));
6417 int options = linux_low_ptrace_options (proc->attached);
6418
6419 linux_enable_event_reporting (lwpid_of (thread), options);
6420 lwp->must_set_ptrace_flags = 0;
6421 }
6422 });
6423 }
6424
6425 static int
6426 linux_supports_disable_randomization (void)
6427 {
6428 #ifdef HAVE_PERSONALITY
6429 return 1;
6430 #else
6431 return 0;
6432 #endif
6433 }
6434
6435 static int
6436 linux_supports_agent (void)
6437 {
6438 return 1;
6439 }
6440
6441 static int
6442 linux_supports_range_stepping (void)
6443 {
6444 if (can_software_single_step ())
6445 return 1;
6446 if (*the_low_target.supports_range_stepping == NULL)
6447 return 0;
6448
6449 return (*the_low_target.supports_range_stepping) ();
6450 }
6451
6452 /* Enumerate spufs IDs for process PID. */
6453 static int
6454 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6455 {
6456 int pos = 0;
6457 int written = 0;
6458 char path[128];
6459 DIR *dir;
6460 struct dirent *entry;
6461
6462 sprintf (path, "/proc/%ld/fd", pid);
6463 dir = opendir (path);
6464 if (!dir)
6465 return -1;
6466
6467 rewinddir (dir);
6468 while ((entry = readdir (dir)) != NULL)
6469 {
6470 struct stat st;
6471 struct statfs stfs;
6472 int fd;
6473
6474 fd = atoi (entry->d_name);
6475 if (!fd)
6476 continue;
6477
6478 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6479 if (stat (path, &st) != 0)
6480 continue;
6481 if (!S_ISDIR (st.st_mode))
6482 continue;
6483
6484 if (statfs (path, &stfs) != 0)
6485 continue;
6486 if (stfs.f_type != SPUFS_MAGIC)
6487 continue;
6488
6489 if (pos >= offset && pos + 4 <= offset + len)
6490 {
6491 *(unsigned int *)(buf + pos - offset) = fd;
6492 written += 4;
6493 }
6494 pos += 4;
6495 }
6496
6497 closedir (dir);
6498 return written;
6499 }
6500
6501 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6502 object type, using the /proc file system. */
6503 static int
6504 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6505 unsigned const char *writebuf,
6506 CORE_ADDR offset, int len)
6507 {
6508 long pid = lwpid_of (current_thread);
6509 char buf[128];
6510 int fd = 0;
6511 int ret = 0;
6512
6513 if (!writebuf && !readbuf)
6514 return -1;
6515
6516 if (!*annex)
6517 {
6518 if (!readbuf)
6519 return -1;
6520 else
6521 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6522 }
6523
6524 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6525 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6526 if (fd <= 0)
6527 return -1;
6528
6529 if (offset != 0
6530 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6531 {
6532 close (fd);
6533 return 0;
6534 }
6535
6536 if (writebuf)
6537 ret = write (fd, writebuf, (size_t) len);
6538 else
6539 ret = read (fd, readbuf, (size_t) len);
6540
6541 close (fd);
6542 return ret;
6543 }
6544
6545 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6546 struct target_loadseg
6547 {
6548 /* Core address to which the segment is mapped. */
6549 Elf32_Addr addr;
6550 /* VMA recorded in the program header. */
6551 Elf32_Addr p_vaddr;
6552 /* Size of this segment in memory. */
6553 Elf32_Word p_memsz;
6554 };
6555
6556 # if defined PT_GETDSBT
6557 struct target_loadmap
6558 {
6559 /* Protocol version number, must be zero. */
6560 Elf32_Word version;
6561 /* Pointer to the DSBT table, its size, and the DSBT index. */
6562 unsigned *dsbt_table;
6563 unsigned dsbt_size, dsbt_index;
6564 /* Number of segments in this map. */
6565 Elf32_Word nsegs;
6566 /* The actual memory map. */
6567 struct target_loadseg segs[/*nsegs*/];
6568 };
6569 # define LINUX_LOADMAP PT_GETDSBT
6570 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6571 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6572 # else
6573 struct target_loadmap
6574 {
6575 /* Protocol version number, must be zero. */
6576 Elf32_Half version;
6577 /* Number of segments in this map. */
6578 Elf32_Half nsegs;
6579 /* The actual memory map. */
6580 struct target_loadseg segs[/*nsegs*/];
6581 };
6582 # define LINUX_LOADMAP PTRACE_GETFDPIC
6583 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6584 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6585 # endif
6586
6587 static int
6588 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6589 unsigned char *myaddr, unsigned int len)
6590 {
6591 int pid = lwpid_of (current_thread);
6592 int addr = -1;
6593 struct target_loadmap *data = NULL;
6594 unsigned int actual_length, copy_length;
6595
6596 if (strcmp (annex, "exec") == 0)
6597 addr = (int) LINUX_LOADMAP_EXEC;
6598 else if (strcmp (annex, "interp") == 0)
6599 addr = (int) LINUX_LOADMAP_INTERP;
6600 else
6601 return -1;
6602
6603 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6604 return -1;
6605
6606 if (data == NULL)
6607 return -1;
6608
6609 actual_length = sizeof (struct target_loadmap)
6610 + sizeof (struct target_loadseg) * data->nsegs;
6611
6612 if (offset < 0 || offset > actual_length)
6613 return -1;
6614
6615 copy_length = actual_length - offset < len ? actual_length - offset : len;
6616 memcpy (myaddr, (char *) data + offset, copy_length);
6617 return copy_length;
6618 }
6619 #else
6620 # define linux_read_loadmap NULL
6621 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6622
6623 static void
6624 linux_process_qsupported (char **features, int count)
6625 {
6626 if (the_low_target.process_qsupported != NULL)
6627 the_low_target.process_qsupported (features, count);
6628 }
6629
6630 static int
6631 linux_supports_catch_syscall (void)
6632 {
6633 return (the_low_target.get_syscall_trapinfo != NULL
6634 && linux_supports_tracesysgood ());
6635 }
6636
6637 static int
6638 linux_get_ipa_tdesc_idx (void)
6639 {
6640 if (the_low_target.get_ipa_tdesc_idx == NULL)
6641 return 0;
6642
6643 return (*the_low_target.get_ipa_tdesc_idx) ();
6644 }
6645
6646 static int
6647 linux_supports_tracepoints (void)
6648 {
6649 if (*the_low_target.supports_tracepoints == NULL)
6650 return 0;
6651
6652 return (*the_low_target.supports_tracepoints) ();
6653 }
6654
6655 static CORE_ADDR
6656 linux_read_pc (struct regcache *regcache)
6657 {
6658 if (the_low_target.get_pc == NULL)
6659 return 0;
6660
6661 return (*the_low_target.get_pc) (regcache);
6662 }
6663
6664 static void
6665 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6666 {
6667 gdb_assert (the_low_target.set_pc != NULL);
6668
6669 (*the_low_target.set_pc) (regcache, pc);
6670 }
6671
6672 static int
6673 linux_thread_stopped (struct thread_info *thread)
6674 {
6675 return get_thread_lwp (thread)->stopped;
6676 }
6677
6678 /* This exposes stop-all-threads functionality to other modules. */
6679
6680 static void
6681 linux_pause_all (int freeze)
6682 {
6683 stop_all_lwps (freeze, NULL);
6684 }
6685
6686 /* This exposes unstop-all-threads functionality to other gdbserver
6687 modules. */
6688
6689 static void
6690 linux_unpause_all (int unfreeze)
6691 {
6692 unstop_all_lwps (unfreeze, NULL);
6693 }
6694
6695 static int
6696 linux_prepare_to_access_memory (void)
6697 {
6698 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6699 running LWP. */
6700 if (non_stop)
6701 linux_pause_all (1);
6702 return 0;
6703 }
6704
6705 static void
6706 linux_done_accessing_memory (void)
6707 {
6708 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6709 running LWP. */
6710 if (non_stop)
6711 linux_unpause_all (1);
6712 }
6713
6714 static int
6715 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6716 CORE_ADDR collector,
6717 CORE_ADDR lockaddr,
6718 ULONGEST orig_size,
6719 CORE_ADDR *jump_entry,
6720 CORE_ADDR *trampoline,
6721 ULONGEST *trampoline_size,
6722 unsigned char *jjump_pad_insn,
6723 ULONGEST *jjump_pad_insn_size,
6724 CORE_ADDR *adjusted_insn_addr,
6725 CORE_ADDR *adjusted_insn_addr_end,
6726 char *err)
6727 {
6728 return (*the_low_target.install_fast_tracepoint_jump_pad)
6729 (tpoint, tpaddr, collector, lockaddr, orig_size,
6730 jump_entry, trampoline, trampoline_size,
6731 jjump_pad_insn, jjump_pad_insn_size,
6732 adjusted_insn_addr, adjusted_insn_addr_end,
6733 err);
6734 }
6735
6736 static struct emit_ops *
6737 linux_emit_ops (void)
6738 {
6739 if (the_low_target.emit_ops != NULL)
6740 return (*the_low_target.emit_ops) ();
6741 else
6742 return NULL;
6743 }
6744
6745 static int
6746 linux_get_min_fast_tracepoint_insn_len (void)
6747 {
6748 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6749 }
6750
6751 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6752
6753 static int
6754 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6755 CORE_ADDR *phdr_memaddr, int *num_phdr)
6756 {
6757 char filename[PATH_MAX];
6758 int fd;
6759 const int auxv_size = is_elf64
6760 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6761 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6762
6763 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6764
6765 fd = open (filename, O_RDONLY);
6766 if (fd < 0)
6767 return 1;
6768
6769 *phdr_memaddr = 0;
6770 *num_phdr = 0;
6771 while (read (fd, buf, auxv_size) == auxv_size
6772 && (*phdr_memaddr == 0 || *num_phdr == 0))
6773 {
6774 if (is_elf64)
6775 {
6776 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6777
6778 switch (aux->a_type)
6779 {
6780 case AT_PHDR:
6781 *phdr_memaddr = aux->a_un.a_val;
6782 break;
6783 case AT_PHNUM:
6784 *num_phdr = aux->a_un.a_val;
6785 break;
6786 }
6787 }
6788 else
6789 {
6790 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6791
6792 switch (aux->a_type)
6793 {
6794 case AT_PHDR:
6795 *phdr_memaddr = aux->a_un.a_val;
6796 break;
6797 case AT_PHNUM:
6798 *num_phdr = aux->a_un.a_val;
6799 break;
6800 }
6801 }
6802 }
6803
6804 close (fd);
6805
6806 if (*phdr_memaddr == 0 || *num_phdr == 0)
6807 {
6808 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6809 "phdr_memaddr = %ld, phdr_num = %d",
6810 (long) *phdr_memaddr, *num_phdr);
6811 return 2;
6812 }
6813
6814 return 0;
6815 }
6816
6817 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6818
6819 static CORE_ADDR
6820 get_dynamic (const int pid, const int is_elf64)
6821 {
6822 CORE_ADDR phdr_memaddr, relocation;
6823 int num_phdr, i;
6824 unsigned char *phdr_buf;
6825 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6826
6827 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6828 return 0;
6829
6830 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6831 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6832
6833 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6834 return 0;
6835
6836 /* Compute relocation: it is expected to be 0 for "regular" executables,
6837 non-zero for PIE ones. */
6838 relocation = -1;
6839 for (i = 0; relocation == -1 && i < num_phdr; i++)
6840 if (is_elf64)
6841 {
6842 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6843
6844 if (p->p_type == PT_PHDR)
6845 relocation = phdr_memaddr - p->p_vaddr;
6846 }
6847 else
6848 {
6849 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6850
6851 if (p->p_type == PT_PHDR)
6852 relocation = phdr_memaddr - p->p_vaddr;
6853 }
6854
6855 if (relocation == -1)
6856 {
6857 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6858 any real world executables, including PIE executables, have always
6859 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6860 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6861 or present DT_DEBUG anyway (fpc binaries are statically linked).
6862
6863 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6864
6865 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6866
6867 return 0;
6868 }
6869
6870 for (i = 0; i < num_phdr; i++)
6871 {
6872 if (is_elf64)
6873 {
6874 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6875
6876 if (p->p_type == PT_DYNAMIC)
6877 return p->p_vaddr + relocation;
6878 }
6879 else
6880 {
6881 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6882
6883 if (p->p_type == PT_DYNAMIC)
6884 return p->p_vaddr + relocation;
6885 }
6886 }
6887
6888 return 0;
6889 }
6890
6891 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6892 can be 0 if the inferior does not yet have the library list initialized.
6893 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6894 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6895
6896 static CORE_ADDR
6897 get_r_debug (const int pid, const int is_elf64)
6898 {
6899 CORE_ADDR dynamic_memaddr;
6900 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6901 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6902 CORE_ADDR map = -1;
6903
6904 dynamic_memaddr = get_dynamic (pid, is_elf64);
6905 if (dynamic_memaddr == 0)
6906 return map;
6907
6908 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6909 {
6910 if (is_elf64)
6911 {
6912 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6913 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6914 union
6915 {
6916 Elf64_Xword map;
6917 unsigned char buf[sizeof (Elf64_Xword)];
6918 }
6919 rld_map;
6920 #endif
6921 #ifdef DT_MIPS_RLD_MAP
6922 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6923 {
6924 if (linux_read_memory (dyn->d_un.d_val,
6925 rld_map.buf, sizeof (rld_map.buf)) == 0)
6926 return rld_map.map;
6927 else
6928 break;
6929 }
6930 #endif /* DT_MIPS_RLD_MAP */
6931 #ifdef DT_MIPS_RLD_MAP_REL
6932 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6933 {
6934 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6935 rld_map.buf, sizeof (rld_map.buf)) == 0)
6936 return rld_map.map;
6937 else
6938 break;
6939 }
6940 #endif /* DT_MIPS_RLD_MAP_REL */
6941
6942 if (dyn->d_tag == DT_DEBUG && map == -1)
6943 map = dyn->d_un.d_val;
6944
6945 if (dyn->d_tag == DT_NULL)
6946 break;
6947 }
6948 else
6949 {
6950 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6951 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6952 union
6953 {
6954 Elf32_Word map;
6955 unsigned char buf[sizeof (Elf32_Word)];
6956 }
6957 rld_map;
6958 #endif
6959 #ifdef DT_MIPS_RLD_MAP
6960 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6961 {
6962 if (linux_read_memory (dyn->d_un.d_val,
6963 rld_map.buf, sizeof (rld_map.buf)) == 0)
6964 return rld_map.map;
6965 else
6966 break;
6967 }
6968 #endif /* DT_MIPS_RLD_MAP */
6969 #ifdef DT_MIPS_RLD_MAP_REL
6970 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6971 {
6972 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6973 rld_map.buf, sizeof (rld_map.buf)) == 0)
6974 return rld_map.map;
6975 else
6976 break;
6977 }
6978 #endif /* DT_MIPS_RLD_MAP_REL */
6979
6980 if (dyn->d_tag == DT_DEBUG && map == -1)
6981 map = dyn->d_un.d_val;
6982
6983 if (dyn->d_tag == DT_NULL)
6984 break;
6985 }
6986
6987 dynamic_memaddr += dyn_size;
6988 }
6989
6990 return map;
6991 }
6992
6993 /* Read one pointer from MEMADDR in the inferior. */
6994
6995 static int
6996 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6997 {
6998 int ret;
6999
7000 /* Go through a union so this works on either big or little endian
7001 hosts, when the inferior's pointer size is smaller than the size
7002 of CORE_ADDR. It is assumed the inferior's endianness is the
7003 same of the superior's. */
7004 union
7005 {
7006 CORE_ADDR core_addr;
7007 unsigned int ui;
7008 unsigned char uc;
7009 } addr;
7010
7011 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
7012 if (ret == 0)
7013 {
7014 if (ptr_size == sizeof (CORE_ADDR))
7015 *ptr = addr.core_addr;
7016 else if (ptr_size == sizeof (unsigned int))
7017 *ptr = addr.ui;
7018 else
7019 gdb_assert_not_reached ("unhandled pointer size");
7020 }
7021 return ret;
7022 }
7023
7024 struct link_map_offsets
7025 {
7026 /* Offset and size of r_debug.r_version. */
7027 int r_version_offset;
7028
7029 /* Offset and size of r_debug.r_map. */
7030 int r_map_offset;
7031
7032 /* Offset to l_addr field in struct link_map. */
7033 int l_addr_offset;
7034
7035 /* Offset to l_name field in struct link_map. */
7036 int l_name_offset;
7037
7038 /* Offset to l_ld field in struct link_map. */
7039 int l_ld_offset;
7040
7041 /* Offset to l_next field in struct link_map. */
7042 int l_next_offset;
7043
7044 /* Offset to l_prev field in struct link_map. */
7045 int l_prev_offset;
7046 };
7047
7048 /* Construct qXfer:libraries-svr4:read reply. */
7049
7050 static int
7051 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
7052 unsigned const char *writebuf,
7053 CORE_ADDR offset, int len)
7054 {
7055 char *document;
7056 unsigned document_len;
7057 struct process_info_private *const priv = current_process ()->priv;
7058 char filename[PATH_MAX];
7059 int pid, is_elf64;
7060
7061 static const struct link_map_offsets lmo_32bit_offsets =
7062 {
7063 0, /* r_version offset. */
7064 4, /* r_debug.r_map offset. */
7065 0, /* l_addr offset in link_map. */
7066 4, /* l_name offset in link_map. */
7067 8, /* l_ld offset in link_map. */
7068 12, /* l_next offset in link_map. */
7069 16 /* l_prev offset in link_map. */
7070 };
7071
7072 static const struct link_map_offsets lmo_64bit_offsets =
7073 {
7074 0, /* r_version offset. */
7075 8, /* r_debug.r_map offset. */
7076 0, /* l_addr offset in link_map. */
7077 8, /* l_name offset in link_map. */
7078 16, /* l_ld offset in link_map. */
7079 24, /* l_next offset in link_map. */
7080 32 /* l_prev offset in link_map. */
7081 };
7082 const struct link_map_offsets *lmo;
7083 unsigned int machine;
7084 int ptr_size;
7085 CORE_ADDR lm_addr = 0, lm_prev = 0;
7086 int allocated = 1024;
7087 char *p;
7088 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7089 int header_done = 0;
7090
7091 if (writebuf != NULL)
7092 return -2;
7093 if (readbuf == NULL)
7094 return -1;
7095
7096 pid = lwpid_of (current_thread);
7097 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7098 is_elf64 = elf_64_file_p (filename, &machine);
7099 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7100 ptr_size = is_elf64 ? 8 : 4;
7101
7102 while (annex[0] != '\0')
7103 {
7104 const char *sep;
7105 CORE_ADDR *addrp;
7106 int len;
7107
7108 sep = strchr (annex, '=');
7109 if (sep == NULL)
7110 break;
7111
7112 len = sep - annex;
7113 if (len == 5 && startswith (annex, "start"))
7114 addrp = &lm_addr;
7115 else if (len == 4 && startswith (annex, "prev"))
7116 addrp = &lm_prev;
7117 else
7118 {
7119 annex = strchr (sep, ';');
7120 if (annex == NULL)
7121 break;
7122 annex++;
7123 continue;
7124 }
7125
7126 annex = decode_address_to_semicolon (addrp, sep + 1);
7127 }
7128
7129 if (lm_addr == 0)
7130 {
7131 int r_version = 0;
7132
7133 if (priv->r_debug == 0)
7134 priv->r_debug = get_r_debug (pid, is_elf64);
7135
7136 /* We failed to find DT_DEBUG. Such situation will not change
7137 for this inferior - do not retry it. Report it to GDB as
7138 E01, see for the reasons at the GDB solib-svr4.c side. */
7139 if (priv->r_debug == (CORE_ADDR) -1)
7140 return -1;
7141
7142 if (priv->r_debug != 0)
7143 {
7144 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7145 (unsigned char *) &r_version,
7146 sizeof (r_version)) != 0
7147 || r_version != 1)
7148 {
7149 warning ("unexpected r_debug version %d", r_version);
7150 }
7151 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7152 &lm_addr, ptr_size) != 0)
7153 {
7154 warning ("unable to read r_map from 0x%lx",
7155 (long) priv->r_debug + lmo->r_map_offset);
7156 }
7157 }
7158 }
7159
7160 document = (char *) xmalloc (allocated);
7161 strcpy (document, "<library-list-svr4 version=\"1.0\"");
7162 p = document + strlen (document);
7163
7164 while (lm_addr
7165 && read_one_ptr (lm_addr + lmo->l_name_offset,
7166 &l_name, ptr_size) == 0
7167 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7168 &l_addr, ptr_size) == 0
7169 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7170 &l_ld, ptr_size) == 0
7171 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7172 &l_prev, ptr_size) == 0
7173 && read_one_ptr (lm_addr + lmo->l_next_offset,
7174 &l_next, ptr_size) == 0)
7175 {
7176 unsigned char libname[PATH_MAX];
7177
7178 if (lm_prev != l_prev)
7179 {
7180 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7181 (long) lm_prev, (long) l_prev);
7182 break;
7183 }
7184
7185 /* Ignore the first entry even if it has valid name as the first entry
7186 corresponds to the main executable. The first entry should not be
7187 skipped if the dynamic loader was loaded late by a static executable
7188 (see solib-svr4.c parameter ignore_first). But in such case the main
7189 executable does not have PT_DYNAMIC present and this function already
7190 exited above due to failed get_r_debug. */
7191 if (lm_prev == 0)
7192 {
7193 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7194 p = p + strlen (p);
7195 }
7196 else
7197 {
7198 /* Not checking for error because reading may stop before
7199 we've got PATH_MAX worth of characters. */
7200 libname[0] = '\0';
7201 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7202 libname[sizeof (libname) - 1] = '\0';
7203 if (libname[0] != '\0')
7204 {
7205 /* 6x the size for xml_escape_text below. */
7206 size_t len = 6 * strlen ((char *) libname);
7207
7208 if (!header_done)
7209 {
7210 /* Terminate `<library-list-svr4'. */
7211 *p++ = '>';
7212 header_done = 1;
7213 }
7214
7215 while (allocated < p - document + len + 200)
7216 {
7217 /* Expand to guarantee sufficient storage. */
7218 uintptr_t document_len = p - document;
7219
7220 document = (char *) xrealloc (document, 2 * allocated);
7221 allocated *= 2;
7222 p = document + document_len;
7223 }
7224
7225 std::string name = xml_escape_text ((char *) libname);
7226 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7227 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7228 name.c_str (), (unsigned long) lm_addr,
7229 (unsigned long) l_addr, (unsigned long) l_ld);
7230 }
7231 }
7232
7233 lm_prev = lm_addr;
7234 lm_addr = l_next;
7235 }
7236
7237 if (!header_done)
7238 {
7239 /* Empty list; terminate `<library-list-svr4'. */
7240 strcpy (p, "/>");
7241 }
7242 else
7243 strcpy (p, "</library-list-svr4>");
7244
7245 document_len = strlen (document);
7246 if (offset < document_len)
7247 document_len -= offset;
7248 else
7249 document_len = 0;
7250 if (len > document_len)
7251 len = document_len;
7252
7253 memcpy (readbuf, document + offset, len);
7254 xfree (document);
7255
7256 return len;
7257 }
7258
7259 #ifdef HAVE_LINUX_BTRACE
7260
7261 /* See to_disable_btrace target method. */
7262
7263 static int
7264 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7265 {
7266 enum btrace_error err;
7267
7268 err = linux_disable_btrace (tinfo);
7269 return (err == BTRACE_ERR_NONE ? 0 : -1);
7270 }
7271
7272 /* Encode an Intel Processor Trace configuration. */
7273
7274 static void
7275 linux_low_encode_pt_config (struct buffer *buffer,
7276 const struct btrace_data_pt_config *config)
7277 {
7278 buffer_grow_str (buffer, "<pt-config>\n");
7279
7280 switch (config->cpu.vendor)
7281 {
7282 case CV_INTEL:
7283 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7284 "model=\"%u\" stepping=\"%u\"/>\n",
7285 config->cpu.family, config->cpu.model,
7286 config->cpu.stepping);
7287 break;
7288
7289 default:
7290 break;
7291 }
7292
7293 buffer_grow_str (buffer, "</pt-config>\n");
7294 }
7295
7296 /* Encode a raw buffer. */
7297
7298 static void
7299 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7300 unsigned int size)
7301 {
7302 if (size == 0)
7303 return;
7304
7305 /* We use hex encoding - see common/rsp-low.h. */
7306 buffer_grow_str (buffer, "<raw>\n");
7307
7308 while (size-- > 0)
7309 {
7310 char elem[2];
7311
7312 elem[0] = tohex ((*data >> 4) & 0xf);
7313 elem[1] = tohex (*data++ & 0xf);
7314
7315 buffer_grow (buffer, elem, 2);
7316 }
7317
7318 buffer_grow_str (buffer, "</raw>\n");
7319 }
7320
7321 /* See to_read_btrace target method. */
7322
7323 static int
7324 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7325 enum btrace_read_type type)
7326 {
7327 struct btrace_data btrace;
7328 struct btrace_block *block;
7329 enum btrace_error err;
7330 int i;
7331
7332 btrace_data_init (&btrace);
7333
7334 err = linux_read_btrace (&btrace, tinfo, type);
7335 if (err != BTRACE_ERR_NONE)
7336 {
7337 if (err == BTRACE_ERR_OVERFLOW)
7338 buffer_grow_str0 (buffer, "E.Overflow.");
7339 else
7340 buffer_grow_str0 (buffer, "E.Generic Error.");
7341
7342 goto err;
7343 }
7344
7345 switch (btrace.format)
7346 {
7347 case BTRACE_FORMAT_NONE:
7348 buffer_grow_str0 (buffer, "E.No Trace.");
7349 goto err;
7350
7351 case BTRACE_FORMAT_BTS:
7352 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7353 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7354
7355 for (i = 0;
7356 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7357 i++)
7358 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7359 paddress (block->begin), paddress (block->end));
7360
7361 buffer_grow_str0 (buffer, "</btrace>\n");
7362 break;
7363
7364 case BTRACE_FORMAT_PT:
7365 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7366 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7367 buffer_grow_str (buffer, "<pt>\n");
7368
7369 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7370
7371 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7372 btrace.variant.pt.size);
7373
7374 buffer_grow_str (buffer, "</pt>\n");
7375 buffer_grow_str0 (buffer, "</btrace>\n");
7376 break;
7377
7378 default:
7379 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7380 goto err;
7381 }
7382
7383 btrace_data_fini (&btrace);
7384 return 0;
7385
7386 err:
7387 btrace_data_fini (&btrace);
7388 return -1;
7389 }
7390
7391 /* See to_btrace_conf target method. */
7392
7393 static int
7394 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7395 struct buffer *buffer)
7396 {
7397 const struct btrace_config *conf;
7398
7399 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7400 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7401
7402 conf = linux_btrace_conf (tinfo);
7403 if (conf != NULL)
7404 {
7405 switch (conf->format)
7406 {
7407 case BTRACE_FORMAT_NONE:
7408 break;
7409
7410 case BTRACE_FORMAT_BTS:
7411 buffer_xml_printf (buffer, "<bts");
7412 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7413 buffer_xml_printf (buffer, " />\n");
7414 break;
7415
7416 case BTRACE_FORMAT_PT:
7417 buffer_xml_printf (buffer, "<pt");
7418 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7419 buffer_xml_printf (buffer, "/>\n");
7420 break;
7421 }
7422 }
7423
7424 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7425 return 0;
7426 }
7427 #endif /* HAVE_LINUX_BTRACE */
7428
7429 /* See nat/linux-nat.h. */
7430
7431 ptid_t
7432 current_lwp_ptid (void)
7433 {
7434 return ptid_of (current_thread);
7435 }
7436
7437 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7438
7439 static int
7440 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7441 {
7442 if (the_low_target.breakpoint_kind_from_pc != NULL)
7443 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7444 else
7445 return default_breakpoint_kind_from_pc (pcptr);
7446 }
7447
7448 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7449
7450 static const gdb_byte *
7451 linux_sw_breakpoint_from_kind (int kind, int *size)
7452 {
7453 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7454
7455 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7456 }
7457
7458 /* Implementation of the target_ops method
7459 "breakpoint_kind_from_current_state". */
7460
7461 static int
7462 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7463 {
7464 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7465 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7466 else
7467 return linux_breakpoint_kind_from_pc (pcptr);
7468 }
7469
7470 /* Default implementation of linux_target_ops method "set_pc" for
7471 32-bit pc register which is literally named "pc". */
7472
7473 void
7474 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7475 {
7476 uint32_t newpc = pc;
7477
7478 supply_register_by_name (regcache, "pc", &newpc);
7479 }
7480
7481 /* Default implementation of linux_target_ops method "get_pc" for
7482 32-bit pc register which is literally named "pc". */
7483
7484 CORE_ADDR
7485 linux_get_pc_32bit (struct regcache *regcache)
7486 {
7487 uint32_t pc;
7488
7489 collect_register_by_name (regcache, "pc", &pc);
7490 if (debug_threads)
7491 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7492 return pc;
7493 }
7494
7495 /* Default implementation of linux_target_ops method "set_pc" for
7496 64-bit pc register which is literally named "pc". */
7497
7498 void
7499 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7500 {
7501 uint64_t newpc = pc;
7502
7503 supply_register_by_name (regcache, "pc", &newpc);
7504 }
7505
7506 /* Default implementation of linux_target_ops method "get_pc" for
7507 64-bit pc register which is literally named "pc". */
7508
7509 CORE_ADDR
7510 linux_get_pc_64bit (struct regcache *regcache)
7511 {
7512 uint64_t pc;
7513
7514 collect_register_by_name (regcache, "pc", &pc);
7515 if (debug_threads)
7516 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7517 return pc;
7518 }
7519
7520
7521 static struct target_ops linux_target_ops = {
7522 linux_create_inferior,
7523 linux_post_create_inferior,
7524 linux_attach,
7525 linux_kill,
7526 linux_detach,
7527 linux_mourn,
7528 linux_join,
7529 linux_thread_alive,
7530 linux_resume,
7531 linux_wait,
7532 linux_fetch_registers,
7533 linux_store_registers,
7534 linux_prepare_to_access_memory,
7535 linux_done_accessing_memory,
7536 linux_read_memory,
7537 linux_write_memory,
7538 linux_look_up_symbols,
7539 linux_request_interrupt,
7540 linux_read_auxv,
7541 linux_supports_z_point_type,
7542 linux_insert_point,
7543 linux_remove_point,
7544 linux_stopped_by_sw_breakpoint,
7545 linux_supports_stopped_by_sw_breakpoint,
7546 linux_stopped_by_hw_breakpoint,
7547 linux_supports_stopped_by_hw_breakpoint,
7548 linux_supports_hardware_single_step,
7549 linux_stopped_by_watchpoint,
7550 linux_stopped_data_address,
7551 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7552 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7553 && defined(PT_TEXT_END_ADDR)
7554 linux_read_offsets,
7555 #else
7556 NULL,
7557 #endif
7558 #ifdef USE_THREAD_DB
7559 thread_db_get_tls_address,
7560 #else
7561 NULL,
7562 #endif
7563 linux_qxfer_spu,
7564 hostio_last_error_from_errno,
7565 linux_qxfer_osdata,
7566 linux_xfer_siginfo,
7567 linux_supports_non_stop,
7568 linux_async,
7569 linux_start_non_stop,
7570 linux_supports_multi_process,
7571 linux_supports_fork_events,
7572 linux_supports_vfork_events,
7573 linux_supports_exec_events,
7574 linux_handle_new_gdb_connection,
7575 #ifdef USE_THREAD_DB
7576 thread_db_handle_monitor_command,
7577 #else
7578 NULL,
7579 #endif
7580 linux_common_core_of_thread,
7581 linux_read_loadmap,
7582 linux_process_qsupported,
7583 linux_supports_tracepoints,
7584 linux_read_pc,
7585 linux_write_pc,
7586 linux_thread_stopped,
7587 NULL,
7588 linux_pause_all,
7589 linux_unpause_all,
7590 linux_stabilize_threads,
7591 linux_install_fast_tracepoint_jump_pad,
7592 linux_emit_ops,
7593 linux_supports_disable_randomization,
7594 linux_get_min_fast_tracepoint_insn_len,
7595 linux_qxfer_libraries_svr4,
7596 linux_supports_agent,
7597 #ifdef HAVE_LINUX_BTRACE
7598 linux_supports_btrace,
7599 linux_enable_btrace,
7600 linux_low_disable_btrace,
7601 linux_low_read_btrace,
7602 linux_low_btrace_conf,
7603 #else
7604 NULL,
7605 NULL,
7606 NULL,
7607 NULL,
7608 NULL,
7609 #endif
7610 linux_supports_range_stepping,
7611 linux_proc_pid_to_exec_file,
7612 linux_mntns_open_cloexec,
7613 linux_mntns_unlink,
7614 linux_mntns_readlink,
7615 linux_breakpoint_kind_from_pc,
7616 linux_sw_breakpoint_from_kind,
7617 linux_proc_tid_get_name,
7618 linux_breakpoint_kind_from_current_state,
7619 linux_supports_software_single_step,
7620 linux_supports_catch_syscall,
7621 linux_get_ipa_tdesc_idx,
7622 #if USE_THREAD_DB
7623 thread_db_thread_handle,
7624 #else
7625 NULL,
7626 #endif
7627 };
7628
7629 #ifdef HAVE_LINUX_REGSETS
7630 void
7631 initialize_regsets_info (struct regsets_info *info)
7632 {
7633 for (info->num_regsets = 0;
7634 info->regsets[info->num_regsets].size >= 0;
7635 info->num_regsets++)
7636 ;
7637 }
7638 #endif
7639
7640 void
7641 initialize_low (void)
7642 {
7643 struct sigaction sigchld_action;
7644
7645 memset (&sigchld_action, 0, sizeof (sigchld_action));
7646 set_target_ops (&linux_target_ops);
7647
7648 linux_ptrace_init_warnings ();
7649
7650 sigchld_action.sa_handler = sigchld_handler;
7651 sigemptyset (&sigchld_action.sa_mask);
7652 sigchld_action.sa_flags = SA_RESTART;
7653 sigaction (SIGCHLD, &sigchld_action, NULL);
7654
7655 initialize_low_arch ();
7656
7657 linux_check_ptrace_features ();
7658 }