]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
linux_qxfer_libraries_svr4: Use std::string
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2018 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25 #include "signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #include "common-inferior.h"
51 #include "nat/fork-inferior.h"
52 #include "environ.h"
53 #include "common/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifndef SPUFS_MAGIC
64 #define SPUFS_MAGIC 0x23c9b64e
65 #endif
66
67 #ifdef HAVE_PERSONALITY
68 # include <sys/personality.h>
69 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
70 # define ADDR_NO_RANDOMIZE 0x0040000
71 # endif
72 #endif
73
74 #ifndef O_LARGEFILE
75 #define O_LARGEFILE 0
76 #endif
77
78 /* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81 #if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84 #if defined(__mcoldfire__)
85 /* These are still undefined in 3.10 kernels. */
86 #define PT_TEXT_ADDR 49*4
87 #define PT_DATA_ADDR 50*4
88 #define PT_TEXT_END_ADDR 51*4
89 /* BFIN already defines these since at least 2.6.32 kernels. */
90 #elif defined(BFIN)
91 #define PT_TEXT_ADDR 220
92 #define PT_TEXT_END_ADDR 224
93 #define PT_DATA_ADDR 228
94 /* These are still undefined in 3.10 kernels. */
95 #elif defined(__TMS320C6X__)
96 #define PT_TEXT_ADDR (0x10000*4)
97 #define PT_DATA_ADDR (0x10004*4)
98 #define PT_TEXT_END_ADDR (0x10008*4)
99 #endif
100 #endif
101
102 #ifdef HAVE_LINUX_BTRACE
103 # include "nat/linux-btrace.h"
104 # include "btrace-common.h"
105 #endif
106
107 #ifndef HAVE_ELF32_AUXV_T
108 /* Copied from glibc's elf.h. */
109 typedef struct
110 {
111 uint32_t a_type; /* Entry type */
112 union
113 {
114 uint32_t a_val; /* Integer value */
115 /* We use to have pointer elements added here. We cannot do that,
116 though, since it does not work when using 32-bit definitions
117 on 64-bit platforms and vice versa. */
118 } a_un;
119 } Elf32_auxv_t;
120 #endif
121
122 #ifndef HAVE_ELF64_AUXV_T
123 /* Copied from glibc's elf.h. */
124 typedef struct
125 {
126 uint64_t a_type; /* Entry type */
127 union
128 {
129 uint64_t a_val; /* Integer value */
130 /* We use to have pointer elements added here. We cannot do that,
131 though, since it does not work when using 32-bit definitions
132 on 64-bit platforms and vice versa. */
133 } a_un;
134 } Elf64_auxv_t;
135 #endif
136
137 /* Does the current host support PTRACE_GETREGSET? */
138 int have_ptrace_getregset = -1;
139
140 /* LWP accessors. */
141
142 /* See nat/linux-nat.h. */
143
144 ptid_t
145 ptid_of_lwp (struct lwp_info *lwp)
146 {
147 return ptid_of (get_lwp_thread (lwp));
148 }
149
150 /* See nat/linux-nat.h. */
151
152 void
153 lwp_set_arch_private_info (struct lwp_info *lwp,
154 struct arch_lwp_info *info)
155 {
156 lwp->arch_private = info;
157 }
158
159 /* See nat/linux-nat.h. */
160
161 struct arch_lwp_info *
162 lwp_arch_private_info (struct lwp_info *lwp)
163 {
164 return lwp->arch_private;
165 }
166
167 /* See nat/linux-nat.h. */
168
169 int
170 lwp_is_stopped (struct lwp_info *lwp)
171 {
172 return lwp->stopped;
173 }
174
175 /* See nat/linux-nat.h. */
176
177 enum target_stop_reason
178 lwp_stop_reason (struct lwp_info *lwp)
179 {
180 return lwp->stop_reason;
181 }
182
183 /* See nat/linux-nat.h. */
184
185 int
186 lwp_is_stepping (struct lwp_info *lwp)
187 {
188 return lwp->stepping;
189 }
190
191 /* A list of all unknown processes which receive stop signals. Some
192 other process will presumably claim each of these as forked
193 children momentarily. */
194
195 struct simple_pid_list
196 {
197 /* The process ID. */
198 int pid;
199
200 /* The status as reported by waitpid. */
201 int status;
202
203 /* Next in chain. */
204 struct simple_pid_list *next;
205 };
206 struct simple_pid_list *stopped_pids;
207
208 /* Trivial list manipulation functions to keep track of a list of new
209 stopped processes. */
210
211 static void
212 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
213 {
214 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
215
216 new_pid->pid = pid;
217 new_pid->status = status;
218 new_pid->next = *listp;
219 *listp = new_pid;
220 }
221
222 static int
223 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
224 {
225 struct simple_pid_list **p;
226
227 for (p = listp; *p != NULL; p = &(*p)->next)
228 if ((*p)->pid == pid)
229 {
230 struct simple_pid_list *next = (*p)->next;
231
232 *statusp = (*p)->status;
233 xfree (*p);
234 *p = next;
235 return 1;
236 }
237 return 0;
238 }
239
240 enum stopping_threads_kind
241 {
242 /* Not stopping threads presently. */
243 NOT_STOPPING_THREADS,
244
245 /* Stopping threads. */
246 STOPPING_THREADS,
247
248 /* Stopping and suspending threads. */
249 STOPPING_AND_SUSPENDING_THREADS
250 };
251
252 /* This is set while stop_all_lwps is in effect. */
253 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
254
255 /* FIXME make into a target method? */
256 int using_threads = 1;
257
258 /* True if we're presently stabilizing threads (moving them out of
259 jump pads). */
260 static int stabilizing_threads;
261
262 static void linux_resume_one_lwp (struct lwp_info *lwp,
263 int step, int signal, siginfo_t *info);
264 static void linux_resume (struct thread_resume *resume_info, size_t n);
265 static void stop_all_lwps (int suspend, struct lwp_info *except);
266 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
267 static void unsuspend_all_lwps (struct lwp_info *except);
268 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
269 int *wstat, int options);
270 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
271 static struct lwp_info *add_lwp (ptid_t ptid);
272 static void linux_mourn (struct process_info *process);
273 static int linux_stopped_by_watchpoint (void);
274 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
275 static int lwp_is_marked_dead (struct lwp_info *lwp);
276 static void proceed_all_lwps (void);
277 static int finish_step_over (struct lwp_info *lwp);
278 static int kill_lwp (unsigned long lwpid, int signo);
279 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
280 static void complete_ongoing_step_over (void);
281 static int linux_low_ptrace_options (int attached);
282 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
283 static void proceed_one_lwp (thread_info *thread, lwp_info *except);
284
285 /* When the event-loop is doing a step-over, this points at the thread
286 being stepped. */
287 ptid_t step_over_bkpt;
288
289 /* True if the low target can hardware single-step. */
290
291 static int
292 can_hardware_single_step (void)
293 {
294 if (the_low_target.supports_hardware_single_step != NULL)
295 return the_low_target.supports_hardware_single_step ();
296 else
297 return 0;
298 }
299
300 /* True if the low target can software single-step. Such targets
301 implement the GET_NEXT_PCS callback. */
302
303 static int
304 can_software_single_step (void)
305 {
306 return (the_low_target.get_next_pcs != NULL);
307 }
308
309 /* True if the low target supports memory breakpoints. If so, we'll
310 have a GET_PC implementation. */
311
312 static int
313 supports_breakpoints (void)
314 {
315 return (the_low_target.get_pc != NULL);
316 }
317
318 /* Returns true if this target can support fast tracepoints. This
319 does not mean that the in-process agent has been loaded in the
320 inferior. */
321
322 static int
323 supports_fast_tracepoints (void)
324 {
325 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
326 }
327
328 /* True if LWP is stopped in its stepping range. */
329
330 static int
331 lwp_in_step_range (struct lwp_info *lwp)
332 {
333 CORE_ADDR pc = lwp->stop_pc;
334
335 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
336 }
337
338 struct pending_signals
339 {
340 int signal;
341 siginfo_t info;
342 struct pending_signals *prev;
343 };
344
345 /* The read/write ends of the pipe registered as waitable file in the
346 event loop. */
347 static int linux_event_pipe[2] = { -1, -1 };
348
349 /* True if we're currently in async mode. */
350 #define target_is_async_p() (linux_event_pipe[0] != -1)
351
352 static void send_sigstop (struct lwp_info *lwp);
353 static void wait_for_sigstop (void);
354
355 /* Return non-zero if HEADER is a 64-bit ELF file. */
356
357 static int
358 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
359 {
360 if (header->e_ident[EI_MAG0] == ELFMAG0
361 && header->e_ident[EI_MAG1] == ELFMAG1
362 && header->e_ident[EI_MAG2] == ELFMAG2
363 && header->e_ident[EI_MAG3] == ELFMAG3)
364 {
365 *machine = header->e_machine;
366 return header->e_ident[EI_CLASS] == ELFCLASS64;
367
368 }
369 *machine = EM_NONE;
370 return -1;
371 }
372
373 /* Return non-zero if FILE is a 64-bit ELF file,
374 zero if the file is not a 64-bit ELF file,
375 and -1 if the file is not accessible or doesn't exist. */
376
377 static int
378 elf_64_file_p (const char *file, unsigned int *machine)
379 {
380 Elf64_Ehdr header;
381 int fd;
382
383 fd = open (file, O_RDONLY);
384 if (fd < 0)
385 return -1;
386
387 if (read (fd, &header, sizeof (header)) != sizeof (header))
388 {
389 close (fd);
390 return 0;
391 }
392 close (fd);
393
394 return elf_64_header_p (&header, machine);
395 }
396
397 /* Accepts an integer PID; Returns true if the executable PID is
398 running is a 64-bit ELF file.. */
399
400 int
401 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
402 {
403 char file[PATH_MAX];
404
405 sprintf (file, "/proc/%d/exe", pid);
406 return elf_64_file_p (file, machine);
407 }
408
409 static void
410 delete_lwp (struct lwp_info *lwp)
411 {
412 struct thread_info *thr = get_lwp_thread (lwp);
413
414 if (debug_threads)
415 debug_printf ("deleting %ld\n", lwpid_of (thr));
416
417 remove_thread (thr);
418
419 if (the_low_target.delete_thread != NULL)
420 the_low_target.delete_thread (lwp->arch_private);
421 else
422 gdb_assert (lwp->arch_private == NULL);
423
424 free (lwp);
425 }
426
427 /* Add a process to the common process list, and set its private
428 data. */
429
430 static struct process_info *
431 linux_add_process (int pid, int attached)
432 {
433 struct process_info *proc;
434
435 proc = add_process (pid, attached);
436 proc->priv = XCNEW (struct process_info_private);
437
438 if (the_low_target.new_process != NULL)
439 proc->priv->arch_private = the_low_target.new_process ();
440
441 return proc;
442 }
443
444 static CORE_ADDR get_pc (struct lwp_info *lwp);
445
446 /* Call the target arch_setup function on the current thread. */
447
448 static void
449 linux_arch_setup (void)
450 {
451 the_low_target.arch_setup ();
452 }
453
454 /* Call the target arch_setup function on THREAD. */
455
456 static void
457 linux_arch_setup_thread (struct thread_info *thread)
458 {
459 struct thread_info *saved_thread;
460
461 saved_thread = current_thread;
462 current_thread = thread;
463
464 linux_arch_setup ();
465
466 current_thread = saved_thread;
467 }
468
469 /* Handle a GNU/Linux extended wait response. If we see a clone,
470 fork, or vfork event, we need to add the new LWP to our list
471 (and return 0 so as not to report the trap to higher layers).
472 If we see an exec event, we will modify ORIG_EVENT_LWP to point
473 to a new LWP representing the new program. */
474
475 static int
476 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
477 {
478 struct lwp_info *event_lwp = *orig_event_lwp;
479 int event = linux_ptrace_get_extended_event (wstat);
480 struct thread_info *event_thr = get_lwp_thread (event_lwp);
481 struct lwp_info *new_lwp;
482
483 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
484
485 /* All extended events we currently use are mid-syscall. Only
486 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
487 you have to be using PTRACE_SEIZE to get that. */
488 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
489
490 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
491 || (event == PTRACE_EVENT_CLONE))
492 {
493 ptid_t ptid;
494 unsigned long new_pid;
495 int ret, status;
496
497 /* Get the pid of the new lwp. */
498 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
499 &new_pid);
500
501 /* If we haven't already seen the new PID stop, wait for it now. */
502 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
503 {
504 /* The new child has a pending SIGSTOP. We can't affect it until it
505 hits the SIGSTOP, but we're already attached. */
506
507 ret = my_waitpid (new_pid, &status, __WALL);
508
509 if (ret == -1)
510 perror_with_name ("waiting for new child");
511 else if (ret != new_pid)
512 warning ("wait returned unexpected PID %d", ret);
513 else if (!WIFSTOPPED (status))
514 warning ("wait returned unexpected status 0x%x", status);
515 }
516
517 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
518 {
519 struct process_info *parent_proc;
520 struct process_info *child_proc;
521 struct lwp_info *child_lwp;
522 struct thread_info *child_thr;
523 struct target_desc *tdesc;
524
525 ptid = ptid_build (new_pid, new_pid, 0);
526
527 if (debug_threads)
528 {
529 debug_printf ("HEW: Got fork event from LWP %ld, "
530 "new child is %d\n",
531 ptid_get_lwp (ptid_of (event_thr)),
532 ptid_get_pid (ptid));
533 }
534
535 /* Add the new process to the tables and clone the breakpoint
536 lists of the parent. We need to do this even if the new process
537 will be detached, since we will need the process object and the
538 breakpoints to remove any breakpoints from memory when we
539 detach, and the client side will access registers. */
540 child_proc = linux_add_process (new_pid, 0);
541 gdb_assert (child_proc != NULL);
542 child_lwp = add_lwp (ptid);
543 gdb_assert (child_lwp != NULL);
544 child_lwp->stopped = 1;
545 child_lwp->must_set_ptrace_flags = 1;
546 child_lwp->status_pending_p = 0;
547 child_thr = get_lwp_thread (child_lwp);
548 child_thr->last_resume_kind = resume_stop;
549 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
550
551 /* If we're suspending all threads, leave this one suspended
552 too. If the fork/clone parent is stepping over a breakpoint,
553 all other threads have been suspended already. Leave the
554 child suspended too. */
555 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
556 || event_lwp->bp_reinsert != 0)
557 {
558 if (debug_threads)
559 debug_printf ("HEW: leaving child suspended\n");
560 child_lwp->suspended = 1;
561 }
562
563 parent_proc = get_thread_process (event_thr);
564 child_proc->attached = parent_proc->attached;
565
566 if (event_lwp->bp_reinsert != 0
567 && can_software_single_step ()
568 && event == PTRACE_EVENT_VFORK)
569 {
570 /* If we leave single-step breakpoints there, child will
571 hit it, so uninsert single-step breakpoints from parent
572 (and child). Once vfork child is done, reinsert
573 them back to parent. */
574 uninsert_single_step_breakpoints (event_thr);
575 }
576
577 clone_all_breakpoints (child_thr, event_thr);
578
579 tdesc = allocate_target_description ();
580 copy_target_description (tdesc, parent_proc->tdesc);
581 child_proc->tdesc = tdesc;
582
583 /* Clone arch-specific process data. */
584 if (the_low_target.new_fork != NULL)
585 the_low_target.new_fork (parent_proc, child_proc);
586
587 /* Save fork info in the parent thread. */
588 if (event == PTRACE_EVENT_FORK)
589 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
590 else if (event == PTRACE_EVENT_VFORK)
591 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
592
593 event_lwp->waitstatus.value.related_pid = ptid;
594
595 /* The status_pending field contains bits denoting the
596 extended event, so when the pending event is handled,
597 the handler will look at lwp->waitstatus. */
598 event_lwp->status_pending_p = 1;
599 event_lwp->status_pending = wstat;
600
601 /* Link the threads until the parent event is passed on to
602 higher layers. */
603 event_lwp->fork_relative = child_lwp;
604 child_lwp->fork_relative = event_lwp;
605
606 /* If the parent thread is doing step-over with single-step
607 breakpoints, the list of single-step breakpoints are cloned
608 from the parent's. Remove them from the child process.
609 In case of vfork, we'll reinsert them back once vforked
610 child is done. */
611 if (event_lwp->bp_reinsert != 0
612 && can_software_single_step ())
613 {
614 /* The child process is forked and stopped, so it is safe
615 to access its memory without stopping all other threads
616 from other processes. */
617 delete_single_step_breakpoints (child_thr);
618
619 gdb_assert (has_single_step_breakpoints (event_thr));
620 gdb_assert (!has_single_step_breakpoints (child_thr));
621 }
622
623 /* Report the event. */
624 return 0;
625 }
626
627 if (debug_threads)
628 debug_printf ("HEW: Got clone event "
629 "from LWP %ld, new child is LWP %ld\n",
630 lwpid_of (event_thr), new_pid);
631
632 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
633 new_lwp = add_lwp (ptid);
634
635 /* Either we're going to immediately resume the new thread
636 or leave it stopped. linux_resume_one_lwp is a nop if it
637 thinks the thread is currently running, so set this first
638 before calling linux_resume_one_lwp. */
639 new_lwp->stopped = 1;
640
641 /* If we're suspending all threads, leave this one suspended
642 too. If the fork/clone parent is stepping over a breakpoint,
643 all other threads have been suspended already. Leave the
644 child suspended too. */
645 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
646 || event_lwp->bp_reinsert != 0)
647 new_lwp->suspended = 1;
648
649 /* Normally we will get the pending SIGSTOP. But in some cases
650 we might get another signal delivered to the group first.
651 If we do get another signal, be sure not to lose it. */
652 if (WSTOPSIG (status) != SIGSTOP)
653 {
654 new_lwp->stop_expected = 1;
655 new_lwp->status_pending_p = 1;
656 new_lwp->status_pending = status;
657 }
658 else if (report_thread_events)
659 {
660 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
661 new_lwp->status_pending_p = 1;
662 new_lwp->status_pending = status;
663 }
664
665 #ifdef USE_THREAD_DB
666 thread_db_notice_clone (event_thr, ptid);
667 #endif
668
669 /* Don't report the event. */
670 return 1;
671 }
672 else if (event == PTRACE_EVENT_VFORK_DONE)
673 {
674 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
675
676 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
677 {
678 reinsert_single_step_breakpoints (event_thr);
679
680 gdb_assert (has_single_step_breakpoints (event_thr));
681 }
682
683 /* Report the event. */
684 return 0;
685 }
686 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
687 {
688 struct process_info *proc;
689 std::vector<int> syscalls_to_catch;
690 ptid_t event_ptid;
691 pid_t event_pid;
692
693 if (debug_threads)
694 {
695 debug_printf ("HEW: Got exec event from LWP %ld\n",
696 lwpid_of (event_thr));
697 }
698
699 /* Get the event ptid. */
700 event_ptid = ptid_of (event_thr);
701 event_pid = ptid_get_pid (event_ptid);
702
703 /* Save the syscall list from the execing process. */
704 proc = get_thread_process (event_thr);
705 syscalls_to_catch = std::move (proc->syscalls_to_catch);
706
707 /* Delete the execing process and all its threads. */
708 linux_mourn (proc);
709 current_thread = NULL;
710
711 /* Create a new process/lwp/thread. */
712 proc = linux_add_process (event_pid, 0);
713 event_lwp = add_lwp (event_ptid);
714 event_thr = get_lwp_thread (event_lwp);
715 gdb_assert (current_thread == event_thr);
716 linux_arch_setup_thread (event_thr);
717
718 /* Set the event status. */
719 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
720 event_lwp->waitstatus.value.execd_pathname
721 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
722
723 /* Mark the exec status as pending. */
724 event_lwp->stopped = 1;
725 event_lwp->status_pending_p = 1;
726 event_lwp->status_pending = wstat;
727 event_thr->last_resume_kind = resume_continue;
728 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
729
730 /* Update syscall state in the new lwp, effectively mid-syscall too. */
731 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
732
733 /* Restore the list to catch. Don't rely on the client, which is free
734 to avoid sending a new list when the architecture doesn't change.
735 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
736 proc->syscalls_to_catch = std::move (syscalls_to_catch);
737
738 /* Report the event. */
739 *orig_event_lwp = event_lwp;
740 return 0;
741 }
742
743 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
744 }
745
746 /* Return the PC as read from the regcache of LWP, without any
747 adjustment. */
748
749 static CORE_ADDR
750 get_pc (struct lwp_info *lwp)
751 {
752 struct thread_info *saved_thread;
753 struct regcache *regcache;
754 CORE_ADDR pc;
755
756 if (the_low_target.get_pc == NULL)
757 return 0;
758
759 saved_thread = current_thread;
760 current_thread = get_lwp_thread (lwp);
761
762 regcache = get_thread_regcache (current_thread, 1);
763 pc = (*the_low_target.get_pc) (regcache);
764
765 if (debug_threads)
766 debug_printf ("pc is 0x%lx\n", (long) pc);
767
768 current_thread = saved_thread;
769 return pc;
770 }
771
772 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
773 Fill *SYSNO with the syscall nr trapped. */
774
775 static void
776 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
777 {
778 struct thread_info *saved_thread;
779 struct regcache *regcache;
780
781 if (the_low_target.get_syscall_trapinfo == NULL)
782 {
783 /* If we cannot get the syscall trapinfo, report an unknown
784 system call number. */
785 *sysno = UNKNOWN_SYSCALL;
786 return;
787 }
788
789 saved_thread = current_thread;
790 current_thread = get_lwp_thread (lwp);
791
792 regcache = get_thread_regcache (current_thread, 1);
793 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
794
795 if (debug_threads)
796 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
797
798 current_thread = saved_thread;
799 }
800
801 static int check_stopped_by_watchpoint (struct lwp_info *child);
802
803 /* Called when the LWP stopped for a signal/trap. If it stopped for a
804 trap check what caused it (breakpoint, watchpoint, trace, etc.),
805 and save the result in the LWP's stop_reason field. If it stopped
806 for a breakpoint, decrement the PC if necessary on the lwp's
807 architecture. Returns true if we now have the LWP's stop PC. */
808
809 static int
810 save_stop_reason (struct lwp_info *lwp)
811 {
812 CORE_ADDR pc;
813 CORE_ADDR sw_breakpoint_pc;
814 struct thread_info *saved_thread;
815 #if USE_SIGTRAP_SIGINFO
816 siginfo_t siginfo;
817 #endif
818
819 if (the_low_target.get_pc == NULL)
820 return 0;
821
822 pc = get_pc (lwp);
823 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
824
825 /* breakpoint_at reads from the current thread. */
826 saved_thread = current_thread;
827 current_thread = get_lwp_thread (lwp);
828
829 #if USE_SIGTRAP_SIGINFO
830 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
831 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
832 {
833 if (siginfo.si_signo == SIGTRAP)
834 {
835 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
836 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
837 {
838 /* The si_code is ambiguous on this arch -- check debug
839 registers. */
840 if (!check_stopped_by_watchpoint (lwp))
841 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
842 }
843 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
844 {
845 /* If we determine the LWP stopped for a SW breakpoint,
846 trust it. Particularly don't check watchpoint
847 registers, because at least on s390, we'd find
848 stopped-by-watchpoint as long as there's a watchpoint
849 set. */
850 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
851 }
852 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
853 {
854 /* This can indicate either a hardware breakpoint or
855 hardware watchpoint. Check debug registers. */
856 if (!check_stopped_by_watchpoint (lwp))
857 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
858 }
859 else if (siginfo.si_code == TRAP_TRACE)
860 {
861 /* We may have single stepped an instruction that
862 triggered a watchpoint. In that case, on some
863 architectures (such as x86), instead of TRAP_HWBKPT,
864 si_code indicates TRAP_TRACE, and we need to check
865 the debug registers separately. */
866 if (!check_stopped_by_watchpoint (lwp))
867 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
868 }
869 }
870 }
871 #else
872 /* We may have just stepped a breakpoint instruction. E.g., in
873 non-stop mode, GDB first tells the thread A to step a range, and
874 then the user inserts a breakpoint inside the range. In that
875 case we need to report the breakpoint PC. */
876 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
877 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
878 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
879
880 if (hardware_breakpoint_inserted_here (pc))
881 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
882
883 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
884 check_stopped_by_watchpoint (lwp);
885 #endif
886
887 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
888 {
889 if (debug_threads)
890 {
891 struct thread_info *thr = get_lwp_thread (lwp);
892
893 debug_printf ("CSBB: %s stopped by software breakpoint\n",
894 target_pid_to_str (ptid_of (thr)));
895 }
896
897 /* Back up the PC if necessary. */
898 if (pc != sw_breakpoint_pc)
899 {
900 struct regcache *regcache
901 = get_thread_regcache (current_thread, 1);
902 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
903 }
904
905 /* Update this so we record the correct stop PC below. */
906 pc = sw_breakpoint_pc;
907 }
908 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
909 {
910 if (debug_threads)
911 {
912 struct thread_info *thr = get_lwp_thread (lwp);
913
914 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
915 target_pid_to_str (ptid_of (thr)));
916 }
917 }
918 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
919 {
920 if (debug_threads)
921 {
922 struct thread_info *thr = get_lwp_thread (lwp);
923
924 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
925 target_pid_to_str (ptid_of (thr)));
926 }
927 }
928 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
929 {
930 if (debug_threads)
931 {
932 struct thread_info *thr = get_lwp_thread (lwp);
933
934 debug_printf ("CSBB: %s stopped by trace\n",
935 target_pid_to_str (ptid_of (thr)));
936 }
937 }
938
939 lwp->stop_pc = pc;
940 current_thread = saved_thread;
941 return 1;
942 }
943
944 static struct lwp_info *
945 add_lwp (ptid_t ptid)
946 {
947 struct lwp_info *lwp;
948
949 lwp = XCNEW (struct lwp_info);
950
951 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
952
953 if (the_low_target.new_thread != NULL)
954 the_low_target.new_thread (lwp);
955
956 lwp->thread = add_thread (ptid, lwp);
957
958 return lwp;
959 }
960
961 /* Callback to be used when calling fork_inferior, responsible for
962 actually initiating the tracing of the inferior. */
963
964 static void
965 linux_ptrace_fun ()
966 {
967 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
968 (PTRACE_TYPE_ARG4) 0) < 0)
969 trace_start_error_with_name ("ptrace");
970
971 if (setpgid (0, 0) < 0)
972 trace_start_error_with_name ("setpgid");
973
974 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
975 stdout to stderr so that inferior i/o doesn't corrupt the connection.
976 Also, redirect stdin to /dev/null. */
977 if (remote_connection_is_stdio ())
978 {
979 if (close (0) < 0)
980 trace_start_error_with_name ("close");
981 if (open ("/dev/null", O_RDONLY) < 0)
982 trace_start_error_with_name ("open");
983 if (dup2 (2, 1) < 0)
984 trace_start_error_with_name ("dup2");
985 if (write (2, "stdin/stdout redirected\n",
986 sizeof ("stdin/stdout redirected\n") - 1) < 0)
987 {
988 /* Errors ignored. */;
989 }
990 }
991 }
992
993 /* Start an inferior process and returns its pid.
994 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
995 are its arguments. */
996
997 static int
998 linux_create_inferior (const char *program,
999 const std::vector<char *> &program_args)
1000 {
1001 struct lwp_info *new_lwp;
1002 int pid;
1003 ptid_t ptid;
1004
1005 {
1006 maybe_disable_address_space_randomization restore_personality
1007 (disable_randomization);
1008 std::string str_program_args = stringify_argv (program_args);
1009
1010 pid = fork_inferior (program,
1011 str_program_args.c_str (),
1012 get_environ ()->envp (), linux_ptrace_fun,
1013 NULL, NULL, NULL, NULL);
1014 }
1015
1016 linux_add_process (pid, 0);
1017
1018 ptid = ptid_build (pid, pid, 0);
1019 new_lwp = add_lwp (ptid);
1020 new_lwp->must_set_ptrace_flags = 1;
1021
1022 post_fork_inferior (pid, program);
1023
1024 return pid;
1025 }
1026
1027 /* Implement the post_create_inferior target_ops method. */
1028
1029 static void
1030 linux_post_create_inferior (void)
1031 {
1032 struct lwp_info *lwp = get_thread_lwp (current_thread);
1033
1034 linux_arch_setup ();
1035
1036 if (lwp->must_set_ptrace_flags)
1037 {
1038 struct process_info *proc = current_process ();
1039 int options = linux_low_ptrace_options (proc->attached);
1040
1041 linux_enable_event_reporting (lwpid_of (current_thread), options);
1042 lwp->must_set_ptrace_flags = 0;
1043 }
1044 }
1045
1046 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1047 error. */
1048
1049 int
1050 linux_attach_lwp (ptid_t ptid)
1051 {
1052 struct lwp_info *new_lwp;
1053 int lwpid = ptid_get_lwp (ptid);
1054
1055 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1056 != 0)
1057 return errno;
1058
1059 new_lwp = add_lwp (ptid);
1060
1061 /* We need to wait for SIGSTOP before being able to make the next
1062 ptrace call on this LWP. */
1063 new_lwp->must_set_ptrace_flags = 1;
1064
1065 if (linux_proc_pid_is_stopped (lwpid))
1066 {
1067 if (debug_threads)
1068 debug_printf ("Attached to a stopped process\n");
1069
1070 /* The process is definitely stopped. It is in a job control
1071 stop, unless the kernel predates the TASK_STOPPED /
1072 TASK_TRACED distinction, in which case it might be in a
1073 ptrace stop. Make sure it is in a ptrace stop; from there we
1074 can kill it, signal it, et cetera.
1075
1076 First make sure there is a pending SIGSTOP. Since we are
1077 already attached, the process can not transition from stopped
1078 to running without a PTRACE_CONT; so we know this signal will
1079 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1080 probably already in the queue (unless this kernel is old
1081 enough to use TASK_STOPPED for ptrace stops); but since
1082 SIGSTOP is not an RT signal, it can only be queued once. */
1083 kill_lwp (lwpid, SIGSTOP);
1084
1085 /* Finally, resume the stopped process. This will deliver the
1086 SIGSTOP (or a higher priority signal, just like normal
1087 PTRACE_ATTACH), which we'll catch later on. */
1088 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1089 }
1090
1091 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1092 brings it to a halt.
1093
1094 There are several cases to consider here:
1095
1096 1) gdbserver has already attached to the process and is being notified
1097 of a new thread that is being created.
1098 In this case we should ignore that SIGSTOP and resume the
1099 process. This is handled below by setting stop_expected = 1,
1100 and the fact that add_thread sets last_resume_kind ==
1101 resume_continue.
1102
1103 2) This is the first thread (the process thread), and we're attaching
1104 to it via attach_inferior.
1105 In this case we want the process thread to stop.
1106 This is handled by having linux_attach set last_resume_kind ==
1107 resume_stop after we return.
1108
1109 If the pid we are attaching to is also the tgid, we attach to and
1110 stop all the existing threads. Otherwise, we attach to pid and
1111 ignore any other threads in the same group as this pid.
1112
1113 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1114 existing threads.
1115 In this case we want the thread to stop.
1116 FIXME: This case is currently not properly handled.
1117 We should wait for the SIGSTOP but don't. Things work apparently
1118 because enough time passes between when we ptrace (ATTACH) and when
1119 gdb makes the next ptrace call on the thread.
1120
1121 On the other hand, if we are currently trying to stop all threads, we
1122 should treat the new thread as if we had sent it a SIGSTOP. This works
1123 because we are guaranteed that the add_lwp call above added us to the
1124 end of the list, and so the new thread has not yet reached
1125 wait_for_sigstop (but will). */
1126 new_lwp->stop_expected = 1;
1127
1128 return 0;
1129 }
1130
1131 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1132 already attached. Returns true if a new LWP is found, false
1133 otherwise. */
1134
1135 static int
1136 attach_proc_task_lwp_callback (ptid_t ptid)
1137 {
1138 /* Is this a new thread? */
1139 if (find_thread_ptid (ptid) == NULL)
1140 {
1141 int lwpid = ptid_get_lwp (ptid);
1142 int err;
1143
1144 if (debug_threads)
1145 debug_printf ("Found new lwp %d\n", lwpid);
1146
1147 err = linux_attach_lwp (ptid);
1148
1149 /* Be quiet if we simply raced with the thread exiting. EPERM
1150 is returned if the thread's task still exists, and is marked
1151 as exited or zombie, as well as other conditions, so in that
1152 case, confirm the status in /proc/PID/status. */
1153 if (err == ESRCH
1154 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1155 {
1156 if (debug_threads)
1157 {
1158 debug_printf ("Cannot attach to lwp %d: "
1159 "thread is gone (%d: %s)\n",
1160 lwpid, err, strerror (err));
1161 }
1162 }
1163 else if (err != 0)
1164 {
1165 std::string reason
1166 = linux_ptrace_attach_fail_reason_string (ptid, err);
1167
1168 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1169 }
1170
1171 return 1;
1172 }
1173 return 0;
1174 }
1175
1176 static void async_file_mark (void);
1177
1178 /* Attach to PID. If PID is the tgid, attach to it and all
1179 of its threads. */
1180
1181 static int
1182 linux_attach (unsigned long pid)
1183 {
1184 struct process_info *proc;
1185 struct thread_info *initial_thread;
1186 ptid_t ptid = ptid_build (pid, pid, 0);
1187 int err;
1188
1189 /* Attach to PID. We will check for other threads
1190 soon. */
1191 err = linux_attach_lwp (ptid);
1192 if (err != 0)
1193 {
1194 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1195
1196 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1197 }
1198
1199 proc = linux_add_process (pid, 1);
1200
1201 /* Don't ignore the initial SIGSTOP if we just attached to this
1202 process. It will be collected by wait shortly. */
1203 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1204 initial_thread->last_resume_kind = resume_stop;
1205
1206 /* We must attach to every LWP. If /proc is mounted, use that to
1207 find them now. On the one hand, the inferior may be using raw
1208 clone instead of using pthreads. On the other hand, even if it
1209 is using pthreads, GDB may not be connected yet (thread_db needs
1210 to do symbol lookups, through qSymbol). Also, thread_db walks
1211 structures in the inferior's address space to find the list of
1212 threads/LWPs, and those structures may well be corrupted. Note
1213 that once thread_db is loaded, we'll still use it to list threads
1214 and associate pthread info with each LWP. */
1215 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1216
1217 /* GDB will shortly read the xml target description for this
1218 process, to figure out the process' architecture. But the target
1219 description is only filled in when the first process/thread in
1220 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1221 that now, otherwise, if GDB is fast enough, it could read the
1222 target description _before_ that initial stop. */
1223 if (non_stop)
1224 {
1225 struct lwp_info *lwp;
1226 int wstat, lwpid;
1227 ptid_t pid_ptid = pid_to_ptid (pid);
1228
1229 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1230 &wstat, __WALL);
1231 gdb_assert (lwpid > 0);
1232
1233 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1234
1235 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1236 {
1237 lwp->status_pending_p = 1;
1238 lwp->status_pending = wstat;
1239 }
1240
1241 initial_thread->last_resume_kind = resume_continue;
1242
1243 async_file_mark ();
1244
1245 gdb_assert (proc->tdesc != NULL);
1246 }
1247
1248 return 0;
1249 }
1250
1251 static int
1252 last_thread_of_process_p (int pid)
1253 {
1254 bool seen_one = false;
1255
1256 thread_info *thread = find_thread (pid, [&] (thread_info *thread)
1257 {
1258 if (!seen_one)
1259 {
1260 /* This is the first thread of this process we see. */
1261 seen_one = true;
1262 return false;
1263 }
1264 else
1265 {
1266 /* This is the second thread of this process we see. */
1267 return true;
1268 }
1269 });
1270
1271 return thread == NULL;
1272 }
1273
1274 /* Kill LWP. */
1275
1276 static void
1277 linux_kill_one_lwp (struct lwp_info *lwp)
1278 {
1279 struct thread_info *thr = get_lwp_thread (lwp);
1280 int pid = lwpid_of (thr);
1281
1282 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1283 there is no signal context, and ptrace(PTRACE_KILL) (or
1284 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1285 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1286 alternative is to kill with SIGKILL. We only need one SIGKILL
1287 per process, not one for each thread. But since we still support
1288 support debugging programs using raw clone without CLONE_THREAD,
1289 we send one for each thread. For years, we used PTRACE_KILL
1290 only, so we're being a bit paranoid about some old kernels where
1291 PTRACE_KILL might work better (dubious if there are any such, but
1292 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1293 second, and so we're fine everywhere. */
1294
1295 errno = 0;
1296 kill_lwp (pid, SIGKILL);
1297 if (debug_threads)
1298 {
1299 int save_errno = errno;
1300
1301 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1302 target_pid_to_str (ptid_of (thr)),
1303 save_errno ? strerror (save_errno) : "OK");
1304 }
1305
1306 errno = 0;
1307 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1308 if (debug_threads)
1309 {
1310 int save_errno = errno;
1311
1312 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1313 target_pid_to_str (ptid_of (thr)),
1314 save_errno ? strerror (save_errno) : "OK");
1315 }
1316 }
1317
1318 /* Kill LWP and wait for it to die. */
1319
1320 static void
1321 kill_wait_lwp (struct lwp_info *lwp)
1322 {
1323 struct thread_info *thr = get_lwp_thread (lwp);
1324 int pid = ptid_get_pid (ptid_of (thr));
1325 int lwpid = ptid_get_lwp (ptid_of (thr));
1326 int wstat;
1327 int res;
1328
1329 if (debug_threads)
1330 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1331
1332 do
1333 {
1334 linux_kill_one_lwp (lwp);
1335
1336 /* Make sure it died. Notes:
1337
1338 - The loop is most likely unnecessary.
1339
1340 - We don't use linux_wait_for_event as that could delete lwps
1341 while we're iterating over them. We're not interested in
1342 any pending status at this point, only in making sure all
1343 wait status on the kernel side are collected until the
1344 process is reaped.
1345
1346 - We don't use __WALL here as the __WALL emulation relies on
1347 SIGCHLD, and killing a stopped process doesn't generate
1348 one, nor an exit status.
1349 */
1350 res = my_waitpid (lwpid, &wstat, 0);
1351 if (res == -1 && errno == ECHILD)
1352 res = my_waitpid (lwpid, &wstat, __WCLONE);
1353 } while (res > 0 && WIFSTOPPED (wstat));
1354
1355 /* Even if it was stopped, the child may have already disappeared.
1356 E.g., if it was killed by SIGKILL. */
1357 if (res < 0 && errno != ECHILD)
1358 perror_with_name ("kill_wait_lwp");
1359 }
1360
1361 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1362 except the leader. */
1363
1364 static void
1365 kill_one_lwp_callback (thread_info *thread, int pid)
1366 {
1367 struct lwp_info *lwp = get_thread_lwp (thread);
1368
1369 /* We avoid killing the first thread here, because of a Linux kernel (at
1370 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1371 the children get a chance to be reaped, it will remain a zombie
1372 forever. */
1373
1374 if (lwpid_of (thread) == pid)
1375 {
1376 if (debug_threads)
1377 debug_printf ("lkop: is last of process %s\n",
1378 target_pid_to_str (thread->id));
1379 return;
1380 }
1381
1382 kill_wait_lwp (lwp);
1383 }
1384
1385 static int
1386 linux_kill (int pid)
1387 {
1388 struct process_info *process;
1389 struct lwp_info *lwp;
1390
1391 process = find_process_pid (pid);
1392 if (process == NULL)
1393 return -1;
1394
1395 /* If we're killing a running inferior, make sure it is stopped
1396 first, as PTRACE_KILL will not work otherwise. */
1397 stop_all_lwps (0, NULL);
1398
1399 for_each_thread (pid, [&] (thread_info *thread)
1400 {
1401 kill_one_lwp_callback (thread, pid);
1402 });
1403
1404 /* See the comment in linux_kill_one_lwp. We did not kill the first
1405 thread in the list, so do so now. */
1406 lwp = find_lwp_pid (pid_to_ptid (pid));
1407
1408 if (lwp == NULL)
1409 {
1410 if (debug_threads)
1411 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1412 pid);
1413 }
1414 else
1415 kill_wait_lwp (lwp);
1416
1417 the_target->mourn (process);
1418
1419 /* Since we presently can only stop all lwps of all processes, we
1420 need to unstop lwps of other processes. */
1421 unstop_all_lwps (0, NULL);
1422 return 0;
1423 }
1424
1425 /* Get pending signal of THREAD, for detaching purposes. This is the
1426 signal the thread last stopped for, which we need to deliver to the
1427 thread when detaching, otherwise, it'd be suppressed/lost. */
1428
1429 static int
1430 get_detach_signal (struct thread_info *thread)
1431 {
1432 enum gdb_signal signo = GDB_SIGNAL_0;
1433 int status;
1434 struct lwp_info *lp = get_thread_lwp (thread);
1435
1436 if (lp->status_pending_p)
1437 status = lp->status_pending;
1438 else
1439 {
1440 /* If the thread had been suspended by gdbserver, and it stopped
1441 cleanly, then it'll have stopped with SIGSTOP. But we don't
1442 want to deliver that SIGSTOP. */
1443 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1444 || thread->last_status.value.sig == GDB_SIGNAL_0)
1445 return 0;
1446
1447 /* Otherwise, we may need to deliver the signal we
1448 intercepted. */
1449 status = lp->last_status;
1450 }
1451
1452 if (!WIFSTOPPED (status))
1453 {
1454 if (debug_threads)
1455 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1456 target_pid_to_str (ptid_of (thread)));
1457 return 0;
1458 }
1459
1460 /* Extended wait statuses aren't real SIGTRAPs. */
1461 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1462 {
1463 if (debug_threads)
1464 debug_printf ("GPS: lwp %s had stopped with extended "
1465 "status: no pending signal\n",
1466 target_pid_to_str (ptid_of (thread)));
1467 return 0;
1468 }
1469
1470 signo = gdb_signal_from_host (WSTOPSIG (status));
1471
1472 if (program_signals_p && !program_signals[signo])
1473 {
1474 if (debug_threads)
1475 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1476 target_pid_to_str (ptid_of (thread)),
1477 gdb_signal_to_string (signo));
1478 return 0;
1479 }
1480 else if (!program_signals_p
1481 /* If we have no way to know which signals GDB does not
1482 want to have passed to the program, assume
1483 SIGTRAP/SIGINT, which is GDB's default. */
1484 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1485 {
1486 if (debug_threads)
1487 debug_printf ("GPS: lwp %s had signal %s, "
1488 "but we don't know if we should pass it. "
1489 "Default to not.\n",
1490 target_pid_to_str (ptid_of (thread)),
1491 gdb_signal_to_string (signo));
1492 return 0;
1493 }
1494 else
1495 {
1496 if (debug_threads)
1497 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1498 target_pid_to_str (ptid_of (thread)),
1499 gdb_signal_to_string (signo));
1500
1501 return WSTOPSIG (status);
1502 }
1503 }
1504
1505 /* Detach from LWP. */
1506
1507 static void
1508 linux_detach_one_lwp (struct lwp_info *lwp)
1509 {
1510 struct thread_info *thread = get_lwp_thread (lwp);
1511 int sig;
1512 int lwpid;
1513
1514 /* If there is a pending SIGSTOP, get rid of it. */
1515 if (lwp->stop_expected)
1516 {
1517 if (debug_threads)
1518 debug_printf ("Sending SIGCONT to %s\n",
1519 target_pid_to_str (ptid_of (thread)));
1520
1521 kill_lwp (lwpid_of (thread), SIGCONT);
1522 lwp->stop_expected = 0;
1523 }
1524
1525 /* Pass on any pending signal for this thread. */
1526 sig = get_detach_signal (thread);
1527
1528 /* Preparing to resume may try to write registers, and fail if the
1529 lwp is zombie. If that happens, ignore the error. We'll handle
1530 it below, when detach fails with ESRCH. */
1531 TRY
1532 {
1533 /* Flush any pending changes to the process's registers. */
1534 regcache_invalidate_thread (thread);
1535
1536 /* Finally, let it resume. */
1537 if (the_low_target.prepare_to_resume != NULL)
1538 the_low_target.prepare_to_resume (lwp);
1539 }
1540 CATCH (ex, RETURN_MASK_ERROR)
1541 {
1542 if (!check_ptrace_stopped_lwp_gone (lwp))
1543 throw_exception (ex);
1544 }
1545 END_CATCH
1546
1547 lwpid = lwpid_of (thread);
1548 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1549 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1550 {
1551 int save_errno = errno;
1552
1553 /* We know the thread exists, so ESRCH must mean the lwp is
1554 zombie. This can happen if one of the already-detached
1555 threads exits the whole thread group. In that case we're
1556 still attached, and must reap the lwp. */
1557 if (save_errno == ESRCH)
1558 {
1559 int ret, status;
1560
1561 ret = my_waitpid (lwpid, &status, __WALL);
1562 if (ret == -1)
1563 {
1564 warning (_("Couldn't reap LWP %d while detaching: %s"),
1565 lwpid, strerror (errno));
1566 }
1567 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1568 {
1569 warning (_("Reaping LWP %d while detaching "
1570 "returned unexpected status 0x%x"),
1571 lwpid, status);
1572 }
1573 }
1574 else
1575 {
1576 error (_("Can't detach %s: %s"),
1577 target_pid_to_str (ptid_of (thread)),
1578 strerror (save_errno));
1579 }
1580 }
1581 else if (debug_threads)
1582 {
1583 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1584 target_pid_to_str (ptid_of (thread)),
1585 strsignal (sig));
1586 }
1587
1588 delete_lwp (lwp);
1589 }
1590
1591 /* Callback for for_each_thread. Detaches from non-leader threads of a
1592 given process. */
1593
1594 static void
1595 linux_detach_lwp_callback (thread_info *thread)
1596 {
1597 /* We don't actually detach from the thread group leader just yet.
1598 If the thread group exits, we must reap the zombie clone lwps
1599 before we're able to reap the leader. */
1600 if (thread->id.pid () == thread->id.lwp ())
1601 return;
1602
1603 lwp_info *lwp = get_thread_lwp (thread);
1604 linux_detach_one_lwp (lwp);
1605 }
1606
1607 static int
1608 linux_detach (int pid)
1609 {
1610 struct process_info *process;
1611 struct lwp_info *main_lwp;
1612
1613 process = find_process_pid (pid);
1614 if (process == NULL)
1615 return -1;
1616
1617 /* As there's a step over already in progress, let it finish first,
1618 otherwise nesting a stabilize_threads operation on top gets real
1619 messy. */
1620 complete_ongoing_step_over ();
1621
1622 /* Stop all threads before detaching. First, ptrace requires that
1623 the thread is stopped to sucessfully detach. Second, thread_db
1624 may need to uninstall thread event breakpoints from memory, which
1625 only works with a stopped process anyway. */
1626 stop_all_lwps (0, NULL);
1627
1628 #ifdef USE_THREAD_DB
1629 thread_db_detach (process);
1630 #endif
1631
1632 /* Stabilize threads (move out of jump pads). */
1633 stabilize_threads ();
1634
1635 /* Detach from the clone lwps first. If the thread group exits just
1636 while we're detaching, we must reap the clone lwps before we're
1637 able to reap the leader. */
1638 for_each_thread (pid, linux_detach_lwp_callback);
1639
1640 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1641 linux_detach_one_lwp (main_lwp);
1642
1643 the_target->mourn (process);
1644
1645 /* Since we presently can only stop all lwps of all processes, we
1646 need to unstop lwps of other processes. */
1647 unstop_all_lwps (0, NULL);
1648 return 0;
1649 }
1650
1651 /* Remove all LWPs that belong to process PROC from the lwp list. */
1652
1653 static void
1654 linux_mourn (struct process_info *process)
1655 {
1656 struct process_info_private *priv;
1657
1658 #ifdef USE_THREAD_DB
1659 thread_db_mourn (process);
1660 #endif
1661
1662 for_each_thread (process->pid, [] (thread_info *thread)
1663 {
1664 delete_lwp (get_thread_lwp (thread));
1665 });
1666
1667 /* Freeing all private data. */
1668 priv = process->priv;
1669 if (the_low_target.delete_process != NULL)
1670 the_low_target.delete_process (priv->arch_private);
1671 else
1672 gdb_assert (priv->arch_private == NULL);
1673 free (priv);
1674 process->priv = NULL;
1675
1676 remove_process (process);
1677 }
1678
1679 static void
1680 linux_join (int pid)
1681 {
1682 int status, ret;
1683
1684 do {
1685 ret = my_waitpid (pid, &status, 0);
1686 if (WIFEXITED (status) || WIFSIGNALED (status))
1687 break;
1688 } while (ret != -1 || errno != ECHILD);
1689 }
1690
1691 /* Return nonzero if the given thread is still alive. */
1692 static int
1693 linux_thread_alive (ptid_t ptid)
1694 {
1695 struct lwp_info *lwp = find_lwp_pid (ptid);
1696
1697 /* We assume we always know if a thread exits. If a whole process
1698 exited but we still haven't been able to report it to GDB, we'll
1699 hold on to the last lwp of the dead process. */
1700 if (lwp != NULL)
1701 return !lwp_is_marked_dead (lwp);
1702 else
1703 return 0;
1704 }
1705
1706 /* Return 1 if this lwp still has an interesting status pending. If
1707 not (e.g., it had stopped for a breakpoint that is gone), return
1708 false. */
1709
1710 static int
1711 thread_still_has_status_pending_p (struct thread_info *thread)
1712 {
1713 struct lwp_info *lp = get_thread_lwp (thread);
1714
1715 if (!lp->status_pending_p)
1716 return 0;
1717
1718 if (thread->last_resume_kind != resume_stop
1719 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1720 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1721 {
1722 struct thread_info *saved_thread;
1723 CORE_ADDR pc;
1724 int discard = 0;
1725
1726 gdb_assert (lp->last_status != 0);
1727
1728 pc = get_pc (lp);
1729
1730 saved_thread = current_thread;
1731 current_thread = thread;
1732
1733 if (pc != lp->stop_pc)
1734 {
1735 if (debug_threads)
1736 debug_printf ("PC of %ld changed\n",
1737 lwpid_of (thread));
1738 discard = 1;
1739 }
1740
1741 #if !USE_SIGTRAP_SIGINFO
1742 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1743 && !(*the_low_target.breakpoint_at) (pc))
1744 {
1745 if (debug_threads)
1746 debug_printf ("previous SW breakpoint of %ld gone\n",
1747 lwpid_of (thread));
1748 discard = 1;
1749 }
1750 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1751 && !hardware_breakpoint_inserted_here (pc))
1752 {
1753 if (debug_threads)
1754 debug_printf ("previous HW breakpoint of %ld gone\n",
1755 lwpid_of (thread));
1756 discard = 1;
1757 }
1758 #endif
1759
1760 current_thread = saved_thread;
1761
1762 if (discard)
1763 {
1764 if (debug_threads)
1765 debug_printf ("discarding pending breakpoint status\n");
1766 lp->status_pending_p = 0;
1767 return 0;
1768 }
1769 }
1770
1771 return 1;
1772 }
1773
1774 /* Returns true if LWP is resumed from the client's perspective. */
1775
1776 static int
1777 lwp_resumed (struct lwp_info *lwp)
1778 {
1779 struct thread_info *thread = get_lwp_thread (lwp);
1780
1781 if (thread->last_resume_kind != resume_stop)
1782 return 1;
1783
1784 /* Did gdb send us a `vCont;t', but we haven't reported the
1785 corresponding stop to gdb yet? If so, the thread is still
1786 resumed/running from gdb's perspective. */
1787 if (thread->last_resume_kind == resume_stop
1788 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1789 return 1;
1790
1791 return 0;
1792 }
1793
1794 /* Return true if this lwp has an interesting status pending. */
1795 static bool
1796 status_pending_p_callback (thread_info *thread, ptid_t ptid)
1797 {
1798 struct lwp_info *lp = get_thread_lwp (thread);
1799
1800 /* Check if we're only interested in events from a specific process
1801 or a specific LWP. */
1802 if (!thread->id.matches (ptid))
1803 return 0;
1804
1805 if (!lwp_resumed (lp))
1806 return 0;
1807
1808 if (lp->status_pending_p
1809 && !thread_still_has_status_pending_p (thread))
1810 {
1811 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1812 return 0;
1813 }
1814
1815 return lp->status_pending_p;
1816 }
1817
1818 struct lwp_info *
1819 find_lwp_pid (ptid_t ptid)
1820 {
1821 thread_info *thread = find_thread ([&] (thread_info *thread)
1822 {
1823 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1824 return thread->id.lwp () == lwp;
1825 });
1826
1827 if (thread == NULL)
1828 return NULL;
1829
1830 return get_thread_lwp (thread);
1831 }
1832
1833 /* Return the number of known LWPs in the tgid given by PID. */
1834
1835 static int
1836 num_lwps (int pid)
1837 {
1838 int count = 0;
1839
1840 for_each_thread (pid, [&] (thread_info *thread)
1841 {
1842 count++;
1843 });
1844
1845 return count;
1846 }
1847
1848 /* See nat/linux-nat.h. */
1849
1850 struct lwp_info *
1851 iterate_over_lwps (ptid_t filter,
1852 iterate_over_lwps_ftype callback,
1853 void *data)
1854 {
1855 thread_info *thread = find_thread (filter, [&] (thread_info *thread)
1856 {
1857 lwp_info *lwp = get_thread_lwp (thread);
1858
1859 return callback (lwp, data);
1860 });
1861
1862 if (thread == NULL)
1863 return NULL;
1864
1865 return get_thread_lwp (thread);
1866 }
1867
1868 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1869 their exits until all other threads in the group have exited. */
1870
1871 static void
1872 check_zombie_leaders (void)
1873 {
1874 for_each_process ([] (process_info *proc) {
1875 pid_t leader_pid = pid_of (proc);
1876 struct lwp_info *leader_lp;
1877
1878 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1879
1880 if (debug_threads)
1881 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1882 "num_lwps=%d, zombie=%d\n",
1883 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1884 linux_proc_pid_is_zombie (leader_pid));
1885
1886 if (leader_lp != NULL && !leader_lp->stopped
1887 /* Check if there are other threads in the group, as we may
1888 have raced with the inferior simply exiting. */
1889 && !last_thread_of_process_p (leader_pid)
1890 && linux_proc_pid_is_zombie (leader_pid))
1891 {
1892 /* A leader zombie can mean one of two things:
1893
1894 - It exited, and there's an exit status pending
1895 available, or only the leader exited (not the whole
1896 program). In the latter case, we can't waitpid the
1897 leader's exit status until all other threads are gone.
1898
1899 - There are 3 or more threads in the group, and a thread
1900 other than the leader exec'd. On an exec, the Linux
1901 kernel destroys all other threads (except the execing
1902 one) in the thread group, and resets the execing thread's
1903 tid to the tgid. No exit notification is sent for the
1904 execing thread -- from the ptracer's perspective, it
1905 appears as though the execing thread just vanishes.
1906 Until we reap all other threads except the leader and the
1907 execing thread, the leader will be zombie, and the
1908 execing thread will be in `D (disc sleep)'. As soon as
1909 all other threads are reaped, the execing thread changes
1910 it's tid to the tgid, and the previous (zombie) leader
1911 vanishes, giving place to the "new" leader. We could try
1912 distinguishing the exit and exec cases, by waiting once
1913 more, and seeing if something comes out, but it doesn't
1914 sound useful. The previous leader _does_ go away, and
1915 we'll re-add the new one once we see the exec event
1916 (which is just the same as what would happen if the
1917 previous leader did exit voluntarily before some other
1918 thread execs). */
1919
1920 if (debug_threads)
1921 debug_printf ("CZL: Thread group leader %d zombie "
1922 "(it exited, or another thread execd).\n",
1923 leader_pid);
1924
1925 delete_lwp (leader_lp);
1926 }
1927 });
1928 }
1929
1930 /* Callback for `find_thread'. Returns the first LWP that is not
1931 stopped. */
1932
1933 static bool
1934 not_stopped_callback (thread_info *thread, ptid_t filter)
1935 {
1936 if (!thread->id.matches (filter))
1937 return false;
1938
1939 lwp_info *lwp = get_thread_lwp (thread);
1940
1941 return !lwp->stopped;
1942 }
1943
1944 /* Increment LWP's suspend count. */
1945
1946 static void
1947 lwp_suspended_inc (struct lwp_info *lwp)
1948 {
1949 lwp->suspended++;
1950
1951 if (debug_threads && lwp->suspended > 4)
1952 {
1953 struct thread_info *thread = get_lwp_thread (lwp);
1954
1955 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1956 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1957 }
1958 }
1959
1960 /* Decrement LWP's suspend count. */
1961
1962 static void
1963 lwp_suspended_decr (struct lwp_info *lwp)
1964 {
1965 lwp->suspended--;
1966
1967 if (lwp->suspended < 0)
1968 {
1969 struct thread_info *thread = get_lwp_thread (lwp);
1970
1971 internal_error (__FILE__, __LINE__,
1972 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1973 lwp->suspended);
1974 }
1975 }
1976
1977 /* This function should only be called if the LWP got a SIGTRAP.
1978
1979 Handle any tracepoint steps or hits. Return true if a tracepoint
1980 event was handled, 0 otherwise. */
1981
1982 static int
1983 handle_tracepoints (struct lwp_info *lwp)
1984 {
1985 struct thread_info *tinfo = get_lwp_thread (lwp);
1986 int tpoint_related_event = 0;
1987
1988 gdb_assert (lwp->suspended == 0);
1989
1990 /* If this tracepoint hit causes a tracing stop, we'll immediately
1991 uninsert tracepoints. To do this, we temporarily pause all
1992 threads, unpatch away, and then unpause threads. We need to make
1993 sure the unpausing doesn't resume LWP too. */
1994 lwp_suspended_inc (lwp);
1995
1996 /* And we need to be sure that any all-threads-stopping doesn't try
1997 to move threads out of the jump pads, as it could deadlock the
1998 inferior (LWP could be in the jump pad, maybe even holding the
1999 lock.) */
2000
2001 /* Do any necessary step collect actions. */
2002 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2003
2004 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2005
2006 /* See if we just hit a tracepoint and do its main collect
2007 actions. */
2008 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2009
2010 lwp_suspended_decr (lwp);
2011
2012 gdb_assert (lwp->suspended == 0);
2013 gdb_assert (!stabilizing_threads
2014 || (lwp->collecting_fast_tracepoint
2015 != fast_tpoint_collect_result::not_collecting));
2016
2017 if (tpoint_related_event)
2018 {
2019 if (debug_threads)
2020 debug_printf ("got a tracepoint event\n");
2021 return 1;
2022 }
2023
2024 return 0;
2025 }
2026
2027 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2028 collection status. */
2029
2030 static fast_tpoint_collect_result
2031 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2032 struct fast_tpoint_collect_status *status)
2033 {
2034 CORE_ADDR thread_area;
2035 struct thread_info *thread = get_lwp_thread (lwp);
2036
2037 if (the_low_target.get_thread_area == NULL)
2038 return fast_tpoint_collect_result::not_collecting;
2039
2040 /* Get the thread area address. This is used to recognize which
2041 thread is which when tracing with the in-process agent library.
2042 We don't read anything from the address, and treat it as opaque;
2043 it's the address itself that we assume is unique per-thread. */
2044 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2045 return fast_tpoint_collect_result::not_collecting;
2046
2047 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2048 }
2049
2050 /* The reason we resume in the caller, is because we want to be able
2051 to pass lwp->status_pending as WSTAT, and we need to clear
2052 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2053 refuses to resume. */
2054
2055 static int
2056 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2057 {
2058 struct thread_info *saved_thread;
2059
2060 saved_thread = current_thread;
2061 current_thread = get_lwp_thread (lwp);
2062
2063 if ((wstat == NULL
2064 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2065 && supports_fast_tracepoints ()
2066 && agent_loaded_p ())
2067 {
2068 struct fast_tpoint_collect_status status;
2069
2070 if (debug_threads)
2071 debug_printf ("Checking whether LWP %ld needs to move out of the "
2072 "jump pad.\n",
2073 lwpid_of (current_thread));
2074
2075 fast_tpoint_collect_result r
2076 = linux_fast_tracepoint_collecting (lwp, &status);
2077
2078 if (wstat == NULL
2079 || (WSTOPSIG (*wstat) != SIGILL
2080 && WSTOPSIG (*wstat) != SIGFPE
2081 && WSTOPSIG (*wstat) != SIGSEGV
2082 && WSTOPSIG (*wstat) != SIGBUS))
2083 {
2084 lwp->collecting_fast_tracepoint = r;
2085
2086 if (r != fast_tpoint_collect_result::not_collecting)
2087 {
2088 if (r == fast_tpoint_collect_result::before_insn
2089 && lwp->exit_jump_pad_bkpt == NULL)
2090 {
2091 /* Haven't executed the original instruction yet.
2092 Set breakpoint there, and wait till it's hit,
2093 then single-step until exiting the jump pad. */
2094 lwp->exit_jump_pad_bkpt
2095 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2096 }
2097
2098 if (debug_threads)
2099 debug_printf ("Checking whether LWP %ld needs to move out of "
2100 "the jump pad...it does\n",
2101 lwpid_of (current_thread));
2102 current_thread = saved_thread;
2103
2104 return 1;
2105 }
2106 }
2107 else
2108 {
2109 /* If we get a synchronous signal while collecting, *and*
2110 while executing the (relocated) original instruction,
2111 reset the PC to point at the tpoint address, before
2112 reporting to GDB. Otherwise, it's an IPA lib bug: just
2113 report the signal to GDB, and pray for the best. */
2114
2115 lwp->collecting_fast_tracepoint
2116 = fast_tpoint_collect_result::not_collecting;
2117
2118 if (r != fast_tpoint_collect_result::not_collecting
2119 && (status.adjusted_insn_addr <= lwp->stop_pc
2120 && lwp->stop_pc < status.adjusted_insn_addr_end))
2121 {
2122 siginfo_t info;
2123 struct regcache *regcache;
2124
2125 /* The si_addr on a few signals references the address
2126 of the faulting instruction. Adjust that as
2127 well. */
2128 if ((WSTOPSIG (*wstat) == SIGILL
2129 || WSTOPSIG (*wstat) == SIGFPE
2130 || WSTOPSIG (*wstat) == SIGBUS
2131 || WSTOPSIG (*wstat) == SIGSEGV)
2132 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2133 (PTRACE_TYPE_ARG3) 0, &info) == 0
2134 /* Final check just to make sure we don't clobber
2135 the siginfo of non-kernel-sent signals. */
2136 && (uintptr_t) info.si_addr == lwp->stop_pc)
2137 {
2138 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2139 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2140 (PTRACE_TYPE_ARG3) 0, &info);
2141 }
2142
2143 regcache = get_thread_regcache (current_thread, 1);
2144 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2145 lwp->stop_pc = status.tpoint_addr;
2146
2147 /* Cancel any fast tracepoint lock this thread was
2148 holding. */
2149 force_unlock_trace_buffer ();
2150 }
2151
2152 if (lwp->exit_jump_pad_bkpt != NULL)
2153 {
2154 if (debug_threads)
2155 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2156 "stopping all threads momentarily.\n");
2157
2158 stop_all_lwps (1, lwp);
2159
2160 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2161 lwp->exit_jump_pad_bkpt = NULL;
2162
2163 unstop_all_lwps (1, lwp);
2164
2165 gdb_assert (lwp->suspended >= 0);
2166 }
2167 }
2168 }
2169
2170 if (debug_threads)
2171 debug_printf ("Checking whether LWP %ld needs to move out of the "
2172 "jump pad...no\n",
2173 lwpid_of (current_thread));
2174
2175 current_thread = saved_thread;
2176 return 0;
2177 }
2178
2179 /* Enqueue one signal in the "signals to report later when out of the
2180 jump pad" list. */
2181
2182 static void
2183 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2184 {
2185 struct pending_signals *p_sig;
2186 struct thread_info *thread = get_lwp_thread (lwp);
2187
2188 if (debug_threads)
2189 debug_printf ("Deferring signal %d for LWP %ld.\n",
2190 WSTOPSIG (*wstat), lwpid_of (thread));
2191
2192 if (debug_threads)
2193 {
2194 struct pending_signals *sig;
2195
2196 for (sig = lwp->pending_signals_to_report;
2197 sig != NULL;
2198 sig = sig->prev)
2199 debug_printf (" Already queued %d\n",
2200 sig->signal);
2201
2202 debug_printf (" (no more currently queued signals)\n");
2203 }
2204
2205 /* Don't enqueue non-RT signals if they are already in the deferred
2206 queue. (SIGSTOP being the easiest signal to see ending up here
2207 twice) */
2208 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2209 {
2210 struct pending_signals *sig;
2211
2212 for (sig = lwp->pending_signals_to_report;
2213 sig != NULL;
2214 sig = sig->prev)
2215 {
2216 if (sig->signal == WSTOPSIG (*wstat))
2217 {
2218 if (debug_threads)
2219 debug_printf ("Not requeuing already queued non-RT signal %d"
2220 " for LWP %ld\n",
2221 sig->signal,
2222 lwpid_of (thread));
2223 return;
2224 }
2225 }
2226 }
2227
2228 p_sig = XCNEW (struct pending_signals);
2229 p_sig->prev = lwp->pending_signals_to_report;
2230 p_sig->signal = WSTOPSIG (*wstat);
2231
2232 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2233 &p_sig->info);
2234
2235 lwp->pending_signals_to_report = p_sig;
2236 }
2237
2238 /* Dequeue one signal from the "signals to report later when out of
2239 the jump pad" list. */
2240
2241 static int
2242 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2243 {
2244 struct thread_info *thread = get_lwp_thread (lwp);
2245
2246 if (lwp->pending_signals_to_report != NULL)
2247 {
2248 struct pending_signals **p_sig;
2249
2250 p_sig = &lwp->pending_signals_to_report;
2251 while ((*p_sig)->prev != NULL)
2252 p_sig = &(*p_sig)->prev;
2253
2254 *wstat = W_STOPCODE ((*p_sig)->signal);
2255 if ((*p_sig)->info.si_signo != 0)
2256 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2257 &(*p_sig)->info);
2258 free (*p_sig);
2259 *p_sig = NULL;
2260
2261 if (debug_threads)
2262 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2263 WSTOPSIG (*wstat), lwpid_of (thread));
2264
2265 if (debug_threads)
2266 {
2267 struct pending_signals *sig;
2268
2269 for (sig = lwp->pending_signals_to_report;
2270 sig != NULL;
2271 sig = sig->prev)
2272 debug_printf (" Still queued %d\n",
2273 sig->signal);
2274
2275 debug_printf (" (no more queued signals)\n");
2276 }
2277
2278 return 1;
2279 }
2280
2281 return 0;
2282 }
2283
2284 /* Fetch the possibly triggered data watchpoint info and store it in
2285 CHILD.
2286
2287 On some archs, like x86, that use debug registers to set
2288 watchpoints, it's possible that the way to know which watched
2289 address trapped, is to check the register that is used to select
2290 which address to watch. Problem is, between setting the watchpoint
2291 and reading back which data address trapped, the user may change
2292 the set of watchpoints, and, as a consequence, GDB changes the
2293 debug registers in the inferior. To avoid reading back a stale
2294 stopped-data-address when that happens, we cache in LP the fact
2295 that a watchpoint trapped, and the corresponding data address, as
2296 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2297 registers meanwhile, we have the cached data we can rely on. */
2298
2299 static int
2300 check_stopped_by_watchpoint (struct lwp_info *child)
2301 {
2302 if (the_low_target.stopped_by_watchpoint != NULL)
2303 {
2304 struct thread_info *saved_thread;
2305
2306 saved_thread = current_thread;
2307 current_thread = get_lwp_thread (child);
2308
2309 if (the_low_target.stopped_by_watchpoint ())
2310 {
2311 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2312
2313 if (the_low_target.stopped_data_address != NULL)
2314 child->stopped_data_address
2315 = the_low_target.stopped_data_address ();
2316 else
2317 child->stopped_data_address = 0;
2318 }
2319
2320 current_thread = saved_thread;
2321 }
2322
2323 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2324 }
2325
2326 /* Return the ptrace options that we want to try to enable. */
2327
2328 static int
2329 linux_low_ptrace_options (int attached)
2330 {
2331 int options = 0;
2332
2333 if (!attached)
2334 options |= PTRACE_O_EXITKILL;
2335
2336 if (report_fork_events)
2337 options |= PTRACE_O_TRACEFORK;
2338
2339 if (report_vfork_events)
2340 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2341
2342 if (report_exec_events)
2343 options |= PTRACE_O_TRACEEXEC;
2344
2345 options |= PTRACE_O_TRACESYSGOOD;
2346
2347 return options;
2348 }
2349
2350 /* Do low-level handling of the event, and check if we should go on
2351 and pass it to caller code. Return the affected lwp if we are, or
2352 NULL otherwise. */
2353
2354 static struct lwp_info *
2355 linux_low_filter_event (int lwpid, int wstat)
2356 {
2357 struct lwp_info *child;
2358 struct thread_info *thread;
2359 int have_stop_pc = 0;
2360
2361 child = find_lwp_pid (pid_to_ptid (lwpid));
2362
2363 /* Check for stop events reported by a process we didn't already
2364 know about - anything not already in our LWP list.
2365
2366 If we're expecting to receive stopped processes after
2367 fork, vfork, and clone events, then we'll just add the
2368 new one to our list and go back to waiting for the event
2369 to be reported - the stopped process might be returned
2370 from waitpid before or after the event is.
2371
2372 But note the case of a non-leader thread exec'ing after the
2373 leader having exited, and gone from our lists (because
2374 check_zombie_leaders deleted it). The non-leader thread
2375 changes its tid to the tgid. */
2376
2377 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2378 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2379 {
2380 ptid_t child_ptid;
2381
2382 /* A multi-thread exec after we had seen the leader exiting. */
2383 if (debug_threads)
2384 {
2385 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2386 "after exec.\n", lwpid);
2387 }
2388
2389 child_ptid = ptid_build (lwpid, lwpid, 0);
2390 child = add_lwp (child_ptid);
2391 child->stopped = 1;
2392 current_thread = child->thread;
2393 }
2394
2395 /* If we didn't find a process, one of two things presumably happened:
2396 - A process we started and then detached from has exited. Ignore it.
2397 - A process we are controlling has forked and the new child's stop
2398 was reported to us by the kernel. Save its PID. */
2399 if (child == NULL && WIFSTOPPED (wstat))
2400 {
2401 add_to_pid_list (&stopped_pids, lwpid, wstat);
2402 return NULL;
2403 }
2404 else if (child == NULL)
2405 return NULL;
2406
2407 thread = get_lwp_thread (child);
2408
2409 child->stopped = 1;
2410
2411 child->last_status = wstat;
2412
2413 /* Check if the thread has exited. */
2414 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2415 {
2416 if (debug_threads)
2417 debug_printf ("LLFE: %d exited.\n", lwpid);
2418
2419 if (finish_step_over (child))
2420 {
2421 /* Unsuspend all other LWPs, and set them back running again. */
2422 unsuspend_all_lwps (child);
2423 }
2424
2425 /* If there is at least one more LWP, then the exit signal was
2426 not the end of the debugged application and should be
2427 ignored, unless GDB wants to hear about thread exits. */
2428 if (report_thread_events
2429 || last_thread_of_process_p (pid_of (thread)))
2430 {
2431 /* Since events are serialized to GDB core, and we can't
2432 report this one right now. Leave the status pending for
2433 the next time we're able to report it. */
2434 mark_lwp_dead (child, wstat);
2435 return child;
2436 }
2437 else
2438 {
2439 delete_lwp (child);
2440 return NULL;
2441 }
2442 }
2443
2444 gdb_assert (WIFSTOPPED (wstat));
2445
2446 if (WIFSTOPPED (wstat))
2447 {
2448 struct process_info *proc;
2449
2450 /* Architecture-specific setup after inferior is running. */
2451 proc = find_process_pid (pid_of (thread));
2452 if (proc->tdesc == NULL)
2453 {
2454 if (proc->attached)
2455 {
2456 /* This needs to happen after we have attached to the
2457 inferior and it is stopped for the first time, but
2458 before we access any inferior registers. */
2459 linux_arch_setup_thread (thread);
2460 }
2461 else
2462 {
2463 /* The process is started, but GDBserver will do
2464 architecture-specific setup after the program stops at
2465 the first instruction. */
2466 child->status_pending_p = 1;
2467 child->status_pending = wstat;
2468 return child;
2469 }
2470 }
2471 }
2472
2473 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2474 {
2475 struct process_info *proc = find_process_pid (pid_of (thread));
2476 int options = linux_low_ptrace_options (proc->attached);
2477
2478 linux_enable_event_reporting (lwpid, options);
2479 child->must_set_ptrace_flags = 0;
2480 }
2481
2482 /* Always update syscall_state, even if it will be filtered later. */
2483 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2484 {
2485 child->syscall_state
2486 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2487 ? TARGET_WAITKIND_SYSCALL_RETURN
2488 : TARGET_WAITKIND_SYSCALL_ENTRY);
2489 }
2490 else
2491 {
2492 /* Almost all other ptrace-stops are known to be outside of system
2493 calls, with further exceptions in handle_extended_wait. */
2494 child->syscall_state = TARGET_WAITKIND_IGNORE;
2495 }
2496
2497 /* Be careful to not overwrite stop_pc until save_stop_reason is
2498 called. */
2499 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2500 && linux_is_extended_waitstatus (wstat))
2501 {
2502 child->stop_pc = get_pc (child);
2503 if (handle_extended_wait (&child, wstat))
2504 {
2505 /* The event has been handled, so just return without
2506 reporting it. */
2507 return NULL;
2508 }
2509 }
2510
2511 if (linux_wstatus_maybe_breakpoint (wstat))
2512 {
2513 if (save_stop_reason (child))
2514 have_stop_pc = 1;
2515 }
2516
2517 if (!have_stop_pc)
2518 child->stop_pc = get_pc (child);
2519
2520 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2521 && child->stop_expected)
2522 {
2523 if (debug_threads)
2524 debug_printf ("Expected stop.\n");
2525 child->stop_expected = 0;
2526
2527 if (thread->last_resume_kind == resume_stop)
2528 {
2529 /* We want to report the stop to the core. Treat the
2530 SIGSTOP as a normal event. */
2531 if (debug_threads)
2532 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2533 target_pid_to_str (ptid_of (thread)));
2534 }
2535 else if (stopping_threads != NOT_STOPPING_THREADS)
2536 {
2537 /* Stopping threads. We don't want this SIGSTOP to end up
2538 pending. */
2539 if (debug_threads)
2540 debug_printf ("LLW: SIGSTOP caught for %s "
2541 "while stopping threads.\n",
2542 target_pid_to_str (ptid_of (thread)));
2543 return NULL;
2544 }
2545 else
2546 {
2547 /* This is a delayed SIGSTOP. Filter out the event. */
2548 if (debug_threads)
2549 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2550 child->stepping ? "step" : "continue",
2551 target_pid_to_str (ptid_of (thread)));
2552
2553 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2554 return NULL;
2555 }
2556 }
2557
2558 child->status_pending_p = 1;
2559 child->status_pending = wstat;
2560 return child;
2561 }
2562
2563 /* Return true if THREAD is doing hardware single step. */
2564
2565 static int
2566 maybe_hw_step (struct thread_info *thread)
2567 {
2568 if (can_hardware_single_step ())
2569 return 1;
2570 else
2571 {
2572 /* GDBserver must insert single-step breakpoint for software
2573 single step. */
2574 gdb_assert (has_single_step_breakpoints (thread));
2575 return 0;
2576 }
2577 }
2578
2579 /* Resume LWPs that are currently stopped without any pending status
2580 to report, but are resumed from the core's perspective. */
2581
2582 static void
2583 resume_stopped_resumed_lwps (thread_info *thread)
2584 {
2585 struct lwp_info *lp = get_thread_lwp (thread);
2586
2587 if (lp->stopped
2588 && !lp->suspended
2589 && !lp->status_pending_p
2590 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2591 {
2592 int step = 0;
2593
2594 if (thread->last_resume_kind == resume_step)
2595 step = maybe_hw_step (thread);
2596
2597 if (debug_threads)
2598 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2599 target_pid_to_str (ptid_of (thread)),
2600 paddress (lp->stop_pc),
2601 step);
2602
2603 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2604 }
2605 }
2606
2607 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2608 match FILTER_PTID (leaving others pending). The PTIDs can be:
2609 minus_one_ptid, to specify any child; a pid PTID, specifying all
2610 lwps of a thread group; or a PTID representing a single lwp. Store
2611 the stop status through the status pointer WSTAT. OPTIONS is
2612 passed to the waitpid call. Return 0 if no event was found and
2613 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2614 was found. Return the PID of the stopped child otherwise. */
2615
2616 static int
2617 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2618 int *wstatp, int options)
2619 {
2620 struct thread_info *event_thread;
2621 struct lwp_info *event_child, *requested_child;
2622 sigset_t block_mask, prev_mask;
2623
2624 retry:
2625 /* N.B. event_thread points to the thread_info struct that contains
2626 event_child. Keep them in sync. */
2627 event_thread = NULL;
2628 event_child = NULL;
2629 requested_child = NULL;
2630
2631 /* Check for a lwp with a pending status. */
2632
2633 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2634 {
2635 event_thread = find_thread_in_random ([&] (thread_info *thread)
2636 {
2637 return status_pending_p_callback (thread, filter_ptid);
2638 });
2639
2640 if (event_thread != NULL)
2641 event_child = get_thread_lwp (event_thread);
2642 if (debug_threads && event_thread)
2643 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2644 }
2645 else if (!ptid_equal (filter_ptid, null_ptid))
2646 {
2647 requested_child = find_lwp_pid (filter_ptid);
2648
2649 if (stopping_threads == NOT_STOPPING_THREADS
2650 && requested_child->status_pending_p
2651 && (requested_child->collecting_fast_tracepoint
2652 != fast_tpoint_collect_result::not_collecting))
2653 {
2654 enqueue_one_deferred_signal (requested_child,
2655 &requested_child->status_pending);
2656 requested_child->status_pending_p = 0;
2657 requested_child->status_pending = 0;
2658 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2659 }
2660
2661 if (requested_child->suspended
2662 && requested_child->status_pending_p)
2663 {
2664 internal_error (__FILE__, __LINE__,
2665 "requesting an event out of a"
2666 " suspended child?");
2667 }
2668
2669 if (requested_child->status_pending_p)
2670 {
2671 event_child = requested_child;
2672 event_thread = get_lwp_thread (event_child);
2673 }
2674 }
2675
2676 if (event_child != NULL)
2677 {
2678 if (debug_threads)
2679 debug_printf ("Got an event from pending child %ld (%04x)\n",
2680 lwpid_of (event_thread), event_child->status_pending);
2681 *wstatp = event_child->status_pending;
2682 event_child->status_pending_p = 0;
2683 event_child->status_pending = 0;
2684 current_thread = event_thread;
2685 return lwpid_of (event_thread);
2686 }
2687
2688 /* But if we don't find a pending event, we'll have to wait.
2689
2690 We only enter this loop if no process has a pending wait status.
2691 Thus any action taken in response to a wait status inside this
2692 loop is responding as soon as we detect the status, not after any
2693 pending events. */
2694
2695 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2696 all signals while here. */
2697 sigfillset (&block_mask);
2698 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2699
2700 /* Always pull all events out of the kernel. We'll randomly select
2701 an event LWP out of all that have events, to prevent
2702 starvation. */
2703 while (event_child == NULL)
2704 {
2705 pid_t ret = 0;
2706
2707 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2708 quirks:
2709
2710 - If the thread group leader exits while other threads in the
2711 thread group still exist, waitpid(TGID, ...) hangs. That
2712 waitpid won't return an exit status until the other threads
2713 in the group are reaped.
2714
2715 - When a non-leader thread execs, that thread just vanishes
2716 without reporting an exit (so we'd hang if we waited for it
2717 explicitly in that case). The exec event is reported to
2718 the TGID pid. */
2719 errno = 0;
2720 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2721
2722 if (debug_threads)
2723 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2724 ret, errno ? strerror (errno) : "ERRNO-OK");
2725
2726 if (ret > 0)
2727 {
2728 if (debug_threads)
2729 {
2730 debug_printf ("LLW: waitpid %ld received %s\n",
2731 (long) ret, status_to_str (*wstatp));
2732 }
2733
2734 /* Filter all events. IOW, leave all events pending. We'll
2735 randomly select an event LWP out of all that have events
2736 below. */
2737 linux_low_filter_event (ret, *wstatp);
2738 /* Retry until nothing comes out of waitpid. A single
2739 SIGCHLD can indicate more than one child stopped. */
2740 continue;
2741 }
2742
2743 /* Now that we've pulled all events out of the kernel, resume
2744 LWPs that don't have an interesting event to report. */
2745 if (stopping_threads == NOT_STOPPING_THREADS)
2746 for_each_thread (resume_stopped_resumed_lwps);
2747
2748 /* ... and find an LWP with a status to report to the core, if
2749 any. */
2750 event_thread = find_thread_in_random ([&] (thread_info *thread)
2751 {
2752 return status_pending_p_callback (thread, filter_ptid);
2753 });
2754
2755 if (event_thread != NULL)
2756 {
2757 event_child = get_thread_lwp (event_thread);
2758 *wstatp = event_child->status_pending;
2759 event_child->status_pending_p = 0;
2760 event_child->status_pending = 0;
2761 break;
2762 }
2763
2764 /* Check for zombie thread group leaders. Those can't be reaped
2765 until all other threads in the thread group are. */
2766 check_zombie_leaders ();
2767
2768 auto not_stopped = [&] (thread_info *thread)
2769 {
2770 return not_stopped_callback (thread, wait_ptid);
2771 };
2772
2773 /* If there are no resumed children left in the set of LWPs we
2774 want to wait for, bail. We can't just block in
2775 waitpid/sigsuspend, because lwps might have been left stopped
2776 in trace-stop state, and we'd be stuck forever waiting for
2777 their status to change (which would only happen if we resumed
2778 them). Even if WNOHANG is set, this return code is preferred
2779 over 0 (below), as it is more detailed. */
2780 if (find_thread (not_stopped) == NULL)
2781 {
2782 if (debug_threads)
2783 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2784 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2785 return -1;
2786 }
2787
2788 /* No interesting event to report to the caller. */
2789 if ((options & WNOHANG))
2790 {
2791 if (debug_threads)
2792 debug_printf ("WNOHANG set, no event found\n");
2793
2794 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2795 return 0;
2796 }
2797
2798 /* Block until we get an event reported with SIGCHLD. */
2799 if (debug_threads)
2800 debug_printf ("sigsuspend'ing\n");
2801
2802 sigsuspend (&prev_mask);
2803 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2804 goto retry;
2805 }
2806
2807 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2808
2809 current_thread = event_thread;
2810
2811 return lwpid_of (event_thread);
2812 }
2813
2814 /* Wait for an event from child(ren) PTID. PTIDs can be:
2815 minus_one_ptid, to specify any child; a pid PTID, specifying all
2816 lwps of a thread group; or a PTID representing a single lwp. Store
2817 the stop status through the status pointer WSTAT. OPTIONS is
2818 passed to the waitpid call. Return 0 if no event was found and
2819 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2820 was found. Return the PID of the stopped child otherwise. */
2821
2822 static int
2823 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2824 {
2825 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2826 }
2827
2828 /* Select one LWP out of those that have events pending. */
2829
2830 static void
2831 select_event_lwp (struct lwp_info **orig_lp)
2832 {
2833 int random_selector;
2834 struct thread_info *event_thread = NULL;
2835
2836 /* In all-stop, give preference to the LWP that is being
2837 single-stepped. There will be at most one, and it's the LWP that
2838 the core is most interested in. If we didn't do this, then we'd
2839 have to handle pending step SIGTRAPs somehow in case the core
2840 later continues the previously-stepped thread, otherwise we'd
2841 report the pending SIGTRAP, and the core, not having stepped the
2842 thread, wouldn't understand what the trap was for, and therefore
2843 would report it to the user as a random signal. */
2844 if (!non_stop)
2845 {
2846 event_thread = find_thread ([] (thread_info *thread)
2847 {
2848 lwp_info *lp = get_thread_lwp (thread);
2849
2850 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2851 && thread->last_resume_kind == resume_step
2852 && lp->status_pending_p);
2853 });
2854
2855 if (event_thread != NULL)
2856 {
2857 if (debug_threads)
2858 debug_printf ("SEL: Select single-step %s\n",
2859 target_pid_to_str (ptid_of (event_thread)));
2860 }
2861 }
2862 if (event_thread == NULL)
2863 {
2864 /* No single-stepping LWP. Select one at random, out of those
2865 which have had events. */
2866
2867 /* First see how many events we have. */
2868 int num_events = 0;
2869 for_each_thread ([&] (thread_info *thread)
2870 {
2871 lwp_info *lp = get_thread_lwp (thread);
2872
2873 /* Count only resumed LWPs that have an event pending. */
2874 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2875 && lp->status_pending_p)
2876 num_events++;
2877 });
2878 gdb_assert (num_events > 0);
2879
2880 /* Now randomly pick a LWP out of those that have had
2881 events. */
2882 random_selector = (int)
2883 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2884
2885 if (debug_threads && num_events > 1)
2886 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2887 num_events, random_selector);
2888
2889 event_thread = find_thread ([&] (thread_info *thread)
2890 {
2891 lwp_info *lp = get_thread_lwp (thread);
2892
2893 /* Select only resumed LWPs that have an event pending. */
2894 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2895 && lp->status_pending_p)
2896 if (random_selector-- == 0)
2897 return true;
2898
2899 return false;
2900 });
2901 }
2902
2903 if (event_thread != NULL)
2904 {
2905 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2906
2907 /* Switch the event LWP. */
2908 *orig_lp = event_lp;
2909 }
2910 }
2911
2912 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2913 NULL. */
2914
2915 static void
2916 unsuspend_all_lwps (struct lwp_info *except)
2917 {
2918 for_each_thread ([&] (thread_info *thread)
2919 {
2920 lwp_info *lwp = get_thread_lwp (thread);
2921
2922 if (lwp != except)
2923 lwp_suspended_decr (lwp);
2924 });
2925 }
2926
2927 static void move_out_of_jump_pad_callback (thread_info *thread);
2928 static bool stuck_in_jump_pad_callback (thread_info *thread);
2929 static bool lwp_running (thread_info *thread);
2930 static ptid_t linux_wait_1 (ptid_t ptid,
2931 struct target_waitstatus *ourstatus,
2932 int target_options);
2933
2934 /* Stabilize threads (move out of jump pads).
2935
2936 If a thread is midway collecting a fast tracepoint, we need to
2937 finish the collection and move it out of the jump pad before
2938 reporting the signal.
2939
2940 This avoids recursion while collecting (when a signal arrives
2941 midway, and the signal handler itself collects), which would trash
2942 the trace buffer. In case the user set a breakpoint in a signal
2943 handler, this avoids the backtrace showing the jump pad, etc..
2944 Most importantly, there are certain things we can't do safely if
2945 threads are stopped in a jump pad (or in its callee's). For
2946 example:
2947
2948 - starting a new trace run. A thread still collecting the
2949 previous run, could trash the trace buffer when resumed. The trace
2950 buffer control structures would have been reset but the thread had
2951 no way to tell. The thread could even midway memcpy'ing to the
2952 buffer, which would mean that when resumed, it would clobber the
2953 trace buffer that had been set for a new run.
2954
2955 - we can't rewrite/reuse the jump pads for new tracepoints
2956 safely. Say you do tstart while a thread is stopped midway while
2957 collecting. When the thread is later resumed, it finishes the
2958 collection, and returns to the jump pad, to execute the original
2959 instruction that was under the tracepoint jump at the time the
2960 older run had been started. If the jump pad had been rewritten
2961 since for something else in the new run, the thread would now
2962 execute the wrong / random instructions. */
2963
2964 static void
2965 linux_stabilize_threads (void)
2966 {
2967 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
2968
2969 if (thread_stuck != NULL)
2970 {
2971 if (debug_threads)
2972 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2973 lwpid_of (thread_stuck));
2974 return;
2975 }
2976
2977 thread_info *saved_thread = current_thread;
2978
2979 stabilizing_threads = 1;
2980
2981 /* Kick 'em all. */
2982 for_each_thread (move_out_of_jump_pad_callback);
2983
2984 /* Loop until all are stopped out of the jump pads. */
2985 while (find_thread (lwp_running) != NULL)
2986 {
2987 struct target_waitstatus ourstatus;
2988 struct lwp_info *lwp;
2989 int wstat;
2990
2991 /* Note that we go through the full wait even loop. While
2992 moving threads out of jump pad, we need to be able to step
2993 over internal breakpoints and such. */
2994 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2995
2996 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2997 {
2998 lwp = get_thread_lwp (current_thread);
2999
3000 /* Lock it. */
3001 lwp_suspended_inc (lwp);
3002
3003 if (ourstatus.value.sig != GDB_SIGNAL_0
3004 || current_thread->last_resume_kind == resume_stop)
3005 {
3006 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3007 enqueue_one_deferred_signal (lwp, &wstat);
3008 }
3009 }
3010 }
3011
3012 unsuspend_all_lwps (NULL);
3013
3014 stabilizing_threads = 0;
3015
3016 current_thread = saved_thread;
3017
3018 if (debug_threads)
3019 {
3020 thread_stuck = find_thread (stuck_in_jump_pad_callback);
3021
3022 if (thread_stuck != NULL)
3023 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3024 lwpid_of (thread_stuck));
3025 }
3026 }
3027
3028 /* Convenience function that is called when the kernel reports an
3029 event that is not passed out to GDB. */
3030
3031 static ptid_t
3032 ignore_event (struct target_waitstatus *ourstatus)
3033 {
3034 /* If we got an event, there may still be others, as a single
3035 SIGCHLD can indicate more than one child stopped. This forces
3036 another target_wait call. */
3037 async_file_mark ();
3038
3039 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3040 return null_ptid;
3041 }
3042
3043 /* Convenience function that is called when the kernel reports an exit
3044 event. This decides whether to report the event to GDB as a
3045 process exit event, a thread exit event, or to suppress the
3046 event. */
3047
3048 static ptid_t
3049 filter_exit_event (struct lwp_info *event_child,
3050 struct target_waitstatus *ourstatus)
3051 {
3052 struct thread_info *thread = get_lwp_thread (event_child);
3053 ptid_t ptid = ptid_of (thread);
3054
3055 if (!last_thread_of_process_p (pid_of (thread)))
3056 {
3057 if (report_thread_events)
3058 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3059 else
3060 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3061
3062 delete_lwp (event_child);
3063 }
3064 return ptid;
3065 }
3066
3067 /* Returns 1 if GDB is interested in any event_child syscalls. */
3068
3069 static int
3070 gdb_catching_syscalls_p (struct lwp_info *event_child)
3071 {
3072 struct thread_info *thread = get_lwp_thread (event_child);
3073 struct process_info *proc = get_thread_process (thread);
3074
3075 return !proc->syscalls_to_catch.empty ();
3076 }
3077
3078 /* Returns 1 if GDB is interested in the event_child syscall.
3079 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3080
3081 static int
3082 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3083 {
3084 int sysno;
3085 struct thread_info *thread = get_lwp_thread (event_child);
3086 struct process_info *proc = get_thread_process (thread);
3087
3088 if (proc->syscalls_to_catch.empty ())
3089 return 0;
3090
3091 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3092 return 1;
3093
3094 get_syscall_trapinfo (event_child, &sysno);
3095
3096 for (int iter : proc->syscalls_to_catch)
3097 if (iter == sysno)
3098 return 1;
3099
3100 return 0;
3101 }
3102
3103 /* Wait for process, returns status. */
3104
3105 static ptid_t
3106 linux_wait_1 (ptid_t ptid,
3107 struct target_waitstatus *ourstatus, int target_options)
3108 {
3109 int w;
3110 struct lwp_info *event_child;
3111 int options;
3112 int pid;
3113 int step_over_finished;
3114 int bp_explains_trap;
3115 int maybe_internal_trap;
3116 int report_to_gdb;
3117 int trace_event;
3118 int in_step_range;
3119 int any_resumed;
3120
3121 if (debug_threads)
3122 {
3123 debug_enter ();
3124 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3125 }
3126
3127 /* Translate generic target options into linux options. */
3128 options = __WALL;
3129 if (target_options & TARGET_WNOHANG)
3130 options |= WNOHANG;
3131
3132 bp_explains_trap = 0;
3133 trace_event = 0;
3134 in_step_range = 0;
3135 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3136
3137 auto status_pending_p_any = [&] (thread_info *thread)
3138 {
3139 return status_pending_p_callback (thread, minus_one_ptid);
3140 };
3141
3142 auto not_stopped = [&] (thread_info *thread)
3143 {
3144 return not_stopped_callback (thread, minus_one_ptid);
3145 };
3146
3147 /* Find a resumed LWP, if any. */
3148 if (find_thread (status_pending_p_any) != NULL)
3149 any_resumed = 1;
3150 else if (find_thread (not_stopped) != NULL)
3151 any_resumed = 1;
3152 else
3153 any_resumed = 0;
3154
3155 if (ptid_equal (step_over_bkpt, null_ptid))
3156 pid = linux_wait_for_event (ptid, &w, options);
3157 else
3158 {
3159 if (debug_threads)
3160 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3161 target_pid_to_str (step_over_bkpt));
3162 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3163 }
3164
3165 if (pid == 0 || (pid == -1 && !any_resumed))
3166 {
3167 gdb_assert (target_options & TARGET_WNOHANG);
3168
3169 if (debug_threads)
3170 {
3171 debug_printf ("linux_wait_1 ret = null_ptid, "
3172 "TARGET_WAITKIND_IGNORE\n");
3173 debug_exit ();
3174 }
3175
3176 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3177 return null_ptid;
3178 }
3179 else if (pid == -1)
3180 {
3181 if (debug_threads)
3182 {
3183 debug_printf ("linux_wait_1 ret = null_ptid, "
3184 "TARGET_WAITKIND_NO_RESUMED\n");
3185 debug_exit ();
3186 }
3187
3188 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3189 return null_ptid;
3190 }
3191
3192 event_child = get_thread_lwp (current_thread);
3193
3194 /* linux_wait_for_event only returns an exit status for the last
3195 child of a process. Report it. */
3196 if (WIFEXITED (w) || WIFSIGNALED (w))
3197 {
3198 if (WIFEXITED (w))
3199 {
3200 ourstatus->kind = TARGET_WAITKIND_EXITED;
3201 ourstatus->value.integer = WEXITSTATUS (w);
3202
3203 if (debug_threads)
3204 {
3205 debug_printf ("linux_wait_1 ret = %s, exited with "
3206 "retcode %d\n",
3207 target_pid_to_str (ptid_of (current_thread)),
3208 WEXITSTATUS (w));
3209 debug_exit ();
3210 }
3211 }
3212 else
3213 {
3214 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3215 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3216
3217 if (debug_threads)
3218 {
3219 debug_printf ("linux_wait_1 ret = %s, terminated with "
3220 "signal %d\n",
3221 target_pid_to_str (ptid_of (current_thread)),
3222 WTERMSIG (w));
3223 debug_exit ();
3224 }
3225 }
3226
3227 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3228 return filter_exit_event (event_child, ourstatus);
3229
3230 return ptid_of (current_thread);
3231 }
3232
3233 /* If step-over executes a breakpoint instruction, in the case of a
3234 hardware single step it means a gdb/gdbserver breakpoint had been
3235 planted on top of a permanent breakpoint, in the case of a software
3236 single step it may just mean that gdbserver hit the reinsert breakpoint.
3237 The PC has been adjusted by save_stop_reason to point at
3238 the breakpoint address.
3239 So in the case of the hardware single step advance the PC manually
3240 past the breakpoint and in the case of software single step advance only
3241 if it's not the single_step_breakpoint we are hitting.
3242 This avoids that a program would keep trapping a permanent breakpoint
3243 forever. */
3244 if (!ptid_equal (step_over_bkpt, null_ptid)
3245 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3246 && (event_child->stepping
3247 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3248 {
3249 int increment_pc = 0;
3250 int breakpoint_kind = 0;
3251 CORE_ADDR stop_pc = event_child->stop_pc;
3252
3253 breakpoint_kind =
3254 the_target->breakpoint_kind_from_current_state (&stop_pc);
3255 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3256
3257 if (debug_threads)
3258 {
3259 debug_printf ("step-over for %s executed software breakpoint\n",
3260 target_pid_to_str (ptid_of (current_thread)));
3261 }
3262
3263 if (increment_pc != 0)
3264 {
3265 struct regcache *regcache
3266 = get_thread_regcache (current_thread, 1);
3267
3268 event_child->stop_pc += increment_pc;
3269 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3270
3271 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3272 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3273 }
3274 }
3275
3276 /* If this event was not handled before, and is not a SIGTRAP, we
3277 report it. SIGILL and SIGSEGV are also treated as traps in case
3278 a breakpoint is inserted at the current PC. If this target does
3279 not support internal breakpoints at all, we also report the
3280 SIGTRAP without further processing; it's of no concern to us. */
3281 maybe_internal_trap
3282 = (supports_breakpoints ()
3283 && (WSTOPSIG (w) == SIGTRAP
3284 || ((WSTOPSIG (w) == SIGILL
3285 || WSTOPSIG (w) == SIGSEGV)
3286 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3287
3288 if (maybe_internal_trap)
3289 {
3290 /* Handle anything that requires bookkeeping before deciding to
3291 report the event or continue waiting. */
3292
3293 /* First check if we can explain the SIGTRAP with an internal
3294 breakpoint, or if we should possibly report the event to GDB.
3295 Do this before anything that may remove or insert a
3296 breakpoint. */
3297 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3298
3299 /* We have a SIGTRAP, possibly a step-over dance has just
3300 finished. If so, tweak the state machine accordingly,
3301 reinsert breakpoints and delete any single-step
3302 breakpoints. */
3303 step_over_finished = finish_step_over (event_child);
3304
3305 /* Now invoke the callbacks of any internal breakpoints there. */
3306 check_breakpoints (event_child->stop_pc);
3307
3308 /* Handle tracepoint data collecting. This may overflow the
3309 trace buffer, and cause a tracing stop, removing
3310 breakpoints. */
3311 trace_event = handle_tracepoints (event_child);
3312
3313 if (bp_explains_trap)
3314 {
3315 if (debug_threads)
3316 debug_printf ("Hit a gdbserver breakpoint.\n");
3317 }
3318 }
3319 else
3320 {
3321 /* We have some other signal, possibly a step-over dance was in
3322 progress, and it should be cancelled too. */
3323 step_over_finished = finish_step_over (event_child);
3324 }
3325
3326 /* We have all the data we need. Either report the event to GDB, or
3327 resume threads and keep waiting for more. */
3328
3329 /* If we're collecting a fast tracepoint, finish the collection and
3330 move out of the jump pad before delivering a signal. See
3331 linux_stabilize_threads. */
3332
3333 if (WIFSTOPPED (w)
3334 && WSTOPSIG (w) != SIGTRAP
3335 && supports_fast_tracepoints ()
3336 && agent_loaded_p ())
3337 {
3338 if (debug_threads)
3339 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3340 "to defer or adjust it.\n",
3341 WSTOPSIG (w), lwpid_of (current_thread));
3342
3343 /* Allow debugging the jump pad itself. */
3344 if (current_thread->last_resume_kind != resume_step
3345 && maybe_move_out_of_jump_pad (event_child, &w))
3346 {
3347 enqueue_one_deferred_signal (event_child, &w);
3348
3349 if (debug_threads)
3350 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3351 WSTOPSIG (w), lwpid_of (current_thread));
3352
3353 linux_resume_one_lwp (event_child, 0, 0, NULL);
3354
3355 if (debug_threads)
3356 debug_exit ();
3357 return ignore_event (ourstatus);
3358 }
3359 }
3360
3361 if (event_child->collecting_fast_tracepoint
3362 != fast_tpoint_collect_result::not_collecting)
3363 {
3364 if (debug_threads)
3365 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3366 "Check if we're already there.\n",
3367 lwpid_of (current_thread),
3368 (int) event_child->collecting_fast_tracepoint);
3369
3370 trace_event = 1;
3371
3372 event_child->collecting_fast_tracepoint
3373 = linux_fast_tracepoint_collecting (event_child, NULL);
3374
3375 if (event_child->collecting_fast_tracepoint
3376 != fast_tpoint_collect_result::before_insn)
3377 {
3378 /* No longer need this breakpoint. */
3379 if (event_child->exit_jump_pad_bkpt != NULL)
3380 {
3381 if (debug_threads)
3382 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3383 "stopping all threads momentarily.\n");
3384
3385 /* Other running threads could hit this breakpoint.
3386 We don't handle moribund locations like GDB does,
3387 instead we always pause all threads when removing
3388 breakpoints, so that any step-over or
3389 decr_pc_after_break adjustment is always taken
3390 care of while the breakpoint is still
3391 inserted. */
3392 stop_all_lwps (1, event_child);
3393
3394 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3395 event_child->exit_jump_pad_bkpt = NULL;
3396
3397 unstop_all_lwps (1, event_child);
3398
3399 gdb_assert (event_child->suspended >= 0);
3400 }
3401 }
3402
3403 if (event_child->collecting_fast_tracepoint
3404 == fast_tpoint_collect_result::not_collecting)
3405 {
3406 if (debug_threads)
3407 debug_printf ("fast tracepoint finished "
3408 "collecting successfully.\n");
3409
3410 /* We may have a deferred signal to report. */
3411 if (dequeue_one_deferred_signal (event_child, &w))
3412 {
3413 if (debug_threads)
3414 debug_printf ("dequeued one signal.\n");
3415 }
3416 else
3417 {
3418 if (debug_threads)
3419 debug_printf ("no deferred signals.\n");
3420
3421 if (stabilizing_threads)
3422 {
3423 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3424 ourstatus->value.sig = GDB_SIGNAL_0;
3425
3426 if (debug_threads)
3427 {
3428 debug_printf ("linux_wait_1 ret = %s, stopped "
3429 "while stabilizing threads\n",
3430 target_pid_to_str (ptid_of (current_thread)));
3431 debug_exit ();
3432 }
3433
3434 return ptid_of (current_thread);
3435 }
3436 }
3437 }
3438 }
3439
3440 /* Check whether GDB would be interested in this event. */
3441
3442 /* Check if GDB is interested in this syscall. */
3443 if (WIFSTOPPED (w)
3444 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3445 && !gdb_catch_this_syscall_p (event_child))
3446 {
3447 if (debug_threads)
3448 {
3449 debug_printf ("Ignored syscall for LWP %ld.\n",
3450 lwpid_of (current_thread));
3451 }
3452
3453 linux_resume_one_lwp (event_child, event_child->stepping,
3454 0, NULL);
3455
3456 if (debug_threads)
3457 debug_exit ();
3458 return ignore_event (ourstatus);
3459 }
3460
3461 /* If GDB is not interested in this signal, don't stop other
3462 threads, and don't report it to GDB. Just resume the inferior
3463 right away. We do this for threading-related signals as well as
3464 any that GDB specifically requested we ignore. But never ignore
3465 SIGSTOP if we sent it ourselves, and do not ignore signals when
3466 stepping - they may require special handling to skip the signal
3467 handler. Also never ignore signals that could be caused by a
3468 breakpoint. */
3469 if (WIFSTOPPED (w)
3470 && current_thread->last_resume_kind != resume_step
3471 && (
3472 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3473 (current_process ()->priv->thread_db != NULL
3474 && (WSTOPSIG (w) == __SIGRTMIN
3475 || WSTOPSIG (w) == __SIGRTMIN + 1))
3476 ||
3477 #endif
3478 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3479 && !(WSTOPSIG (w) == SIGSTOP
3480 && current_thread->last_resume_kind == resume_stop)
3481 && !linux_wstatus_maybe_breakpoint (w))))
3482 {
3483 siginfo_t info, *info_p;
3484
3485 if (debug_threads)
3486 debug_printf ("Ignored signal %d for LWP %ld.\n",
3487 WSTOPSIG (w), lwpid_of (current_thread));
3488
3489 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3490 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3491 info_p = &info;
3492 else
3493 info_p = NULL;
3494
3495 if (step_over_finished)
3496 {
3497 /* We cancelled this thread's step-over above. We still
3498 need to unsuspend all other LWPs, and set them back
3499 running again while the signal handler runs. */
3500 unsuspend_all_lwps (event_child);
3501
3502 /* Enqueue the pending signal info so that proceed_all_lwps
3503 doesn't lose it. */
3504 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3505
3506 proceed_all_lwps ();
3507 }
3508 else
3509 {
3510 linux_resume_one_lwp (event_child, event_child->stepping,
3511 WSTOPSIG (w), info_p);
3512 }
3513
3514 if (debug_threads)
3515 debug_exit ();
3516
3517 return ignore_event (ourstatus);
3518 }
3519
3520 /* Note that all addresses are always "out of the step range" when
3521 there's no range to begin with. */
3522 in_step_range = lwp_in_step_range (event_child);
3523
3524 /* If GDB wanted this thread to single step, and the thread is out
3525 of the step range, we always want to report the SIGTRAP, and let
3526 GDB handle it. Watchpoints should always be reported. So should
3527 signals we can't explain. A SIGTRAP we can't explain could be a
3528 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3529 do, we're be able to handle GDB breakpoints on top of internal
3530 breakpoints, by handling the internal breakpoint and still
3531 reporting the event to GDB. If we don't, we're out of luck, GDB
3532 won't see the breakpoint hit. If we see a single-step event but
3533 the thread should be continuing, don't pass the trap to gdb.
3534 That indicates that we had previously finished a single-step but
3535 left the single-step pending -- see
3536 complete_ongoing_step_over. */
3537 report_to_gdb = (!maybe_internal_trap
3538 || (current_thread->last_resume_kind == resume_step
3539 && !in_step_range)
3540 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3541 || (!in_step_range
3542 && !bp_explains_trap
3543 && !trace_event
3544 && !step_over_finished
3545 && !(current_thread->last_resume_kind == resume_continue
3546 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3547 || (gdb_breakpoint_here (event_child->stop_pc)
3548 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3549 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3550 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3551
3552 run_breakpoint_commands (event_child->stop_pc);
3553
3554 /* We found no reason GDB would want us to stop. We either hit one
3555 of our own breakpoints, or finished an internal step GDB
3556 shouldn't know about. */
3557 if (!report_to_gdb)
3558 {
3559 if (debug_threads)
3560 {
3561 if (bp_explains_trap)
3562 debug_printf ("Hit a gdbserver breakpoint.\n");
3563 if (step_over_finished)
3564 debug_printf ("Step-over finished.\n");
3565 if (trace_event)
3566 debug_printf ("Tracepoint event.\n");
3567 if (lwp_in_step_range (event_child))
3568 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3569 paddress (event_child->stop_pc),
3570 paddress (event_child->step_range_start),
3571 paddress (event_child->step_range_end));
3572 }
3573
3574 /* We're not reporting this breakpoint to GDB, so apply the
3575 decr_pc_after_break adjustment to the inferior's regcache
3576 ourselves. */
3577
3578 if (the_low_target.set_pc != NULL)
3579 {
3580 struct regcache *regcache
3581 = get_thread_regcache (current_thread, 1);
3582 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3583 }
3584
3585 if (step_over_finished)
3586 {
3587 /* If we have finished stepping over a breakpoint, we've
3588 stopped and suspended all LWPs momentarily except the
3589 stepping one. This is where we resume them all again.
3590 We're going to keep waiting, so use proceed, which
3591 handles stepping over the next breakpoint. */
3592 unsuspend_all_lwps (event_child);
3593 }
3594 else
3595 {
3596 /* Remove the single-step breakpoints if any. Note that
3597 there isn't single-step breakpoint if we finished stepping
3598 over. */
3599 if (can_software_single_step ()
3600 && has_single_step_breakpoints (current_thread))
3601 {
3602 stop_all_lwps (0, event_child);
3603 delete_single_step_breakpoints (current_thread);
3604 unstop_all_lwps (0, event_child);
3605 }
3606 }
3607
3608 if (debug_threads)
3609 debug_printf ("proceeding all threads.\n");
3610 proceed_all_lwps ();
3611
3612 if (debug_threads)
3613 debug_exit ();
3614
3615 return ignore_event (ourstatus);
3616 }
3617
3618 if (debug_threads)
3619 {
3620 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3621 {
3622 std::string str
3623 = target_waitstatus_to_string (&event_child->waitstatus);
3624
3625 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3626 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3627 }
3628 if (current_thread->last_resume_kind == resume_step)
3629 {
3630 if (event_child->step_range_start == event_child->step_range_end)
3631 debug_printf ("GDB wanted to single-step, reporting event.\n");
3632 else if (!lwp_in_step_range (event_child))
3633 debug_printf ("Out of step range, reporting event.\n");
3634 }
3635 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3636 debug_printf ("Stopped by watchpoint.\n");
3637 else if (gdb_breakpoint_here (event_child->stop_pc))
3638 debug_printf ("Stopped by GDB breakpoint.\n");
3639 if (debug_threads)
3640 debug_printf ("Hit a non-gdbserver trap event.\n");
3641 }
3642
3643 /* Alright, we're going to report a stop. */
3644
3645 /* Remove single-step breakpoints. */
3646 if (can_software_single_step ())
3647 {
3648 /* Remove single-step breakpoints or not. It it is true, stop all
3649 lwps, so that other threads won't hit the breakpoint in the
3650 staled memory. */
3651 int remove_single_step_breakpoints_p = 0;
3652
3653 if (non_stop)
3654 {
3655 remove_single_step_breakpoints_p
3656 = has_single_step_breakpoints (current_thread);
3657 }
3658 else
3659 {
3660 /* In all-stop, a stop reply cancels all previous resume
3661 requests. Delete all single-step breakpoints. */
3662
3663 find_thread ([&] (thread_info *thread) {
3664 if (has_single_step_breakpoints (thread))
3665 {
3666 remove_single_step_breakpoints_p = 1;
3667 return true;
3668 }
3669
3670 return false;
3671 });
3672 }
3673
3674 if (remove_single_step_breakpoints_p)
3675 {
3676 /* If we remove single-step breakpoints from memory, stop all lwps,
3677 so that other threads won't hit the breakpoint in the staled
3678 memory. */
3679 stop_all_lwps (0, event_child);
3680
3681 if (non_stop)
3682 {
3683 gdb_assert (has_single_step_breakpoints (current_thread));
3684 delete_single_step_breakpoints (current_thread);
3685 }
3686 else
3687 {
3688 for_each_thread ([] (thread_info *thread){
3689 if (has_single_step_breakpoints (thread))
3690 delete_single_step_breakpoints (thread);
3691 });
3692 }
3693
3694 unstop_all_lwps (0, event_child);
3695 }
3696 }
3697
3698 if (!stabilizing_threads)
3699 {
3700 /* In all-stop, stop all threads. */
3701 if (!non_stop)
3702 stop_all_lwps (0, NULL);
3703
3704 if (step_over_finished)
3705 {
3706 if (!non_stop)
3707 {
3708 /* If we were doing a step-over, all other threads but
3709 the stepping one had been paused in start_step_over,
3710 with their suspend counts incremented. We don't want
3711 to do a full unstop/unpause, because we're in
3712 all-stop mode (so we want threads stopped), but we
3713 still need to unsuspend the other threads, to
3714 decrement their `suspended' count back. */
3715 unsuspend_all_lwps (event_child);
3716 }
3717 else
3718 {
3719 /* If we just finished a step-over, then all threads had
3720 been momentarily paused. In all-stop, that's fine,
3721 we want threads stopped by now anyway. In non-stop,
3722 we need to re-resume threads that GDB wanted to be
3723 running. */
3724 unstop_all_lwps (1, event_child);
3725 }
3726 }
3727
3728 /* If we're not waiting for a specific LWP, choose an event LWP
3729 from among those that have had events. Giving equal priority
3730 to all LWPs that have had events helps prevent
3731 starvation. */
3732 if (ptid_equal (ptid, minus_one_ptid))
3733 {
3734 event_child->status_pending_p = 1;
3735 event_child->status_pending = w;
3736
3737 select_event_lwp (&event_child);
3738
3739 /* current_thread and event_child must stay in sync. */
3740 current_thread = get_lwp_thread (event_child);
3741
3742 event_child->status_pending_p = 0;
3743 w = event_child->status_pending;
3744 }
3745
3746
3747 /* Stabilize threads (move out of jump pads). */
3748 if (!non_stop)
3749 stabilize_threads ();
3750 }
3751 else
3752 {
3753 /* If we just finished a step-over, then all threads had been
3754 momentarily paused. In all-stop, that's fine, we want
3755 threads stopped by now anyway. In non-stop, we need to
3756 re-resume threads that GDB wanted to be running. */
3757 if (step_over_finished)
3758 unstop_all_lwps (1, event_child);
3759 }
3760
3761 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3762 {
3763 /* If the reported event is an exit, fork, vfork or exec, let
3764 GDB know. */
3765
3766 /* Break the unreported fork relationship chain. */
3767 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3768 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3769 {
3770 event_child->fork_relative->fork_relative = NULL;
3771 event_child->fork_relative = NULL;
3772 }
3773
3774 *ourstatus = event_child->waitstatus;
3775 /* Clear the event lwp's waitstatus since we handled it already. */
3776 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3777 }
3778 else
3779 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3780
3781 /* Now that we've selected our final event LWP, un-adjust its PC if
3782 it was a software breakpoint, and the client doesn't know we can
3783 adjust the breakpoint ourselves. */
3784 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3785 && !swbreak_feature)
3786 {
3787 int decr_pc = the_low_target.decr_pc_after_break;
3788
3789 if (decr_pc != 0)
3790 {
3791 struct regcache *regcache
3792 = get_thread_regcache (current_thread, 1);
3793 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3794 }
3795 }
3796
3797 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3798 {
3799 get_syscall_trapinfo (event_child,
3800 &ourstatus->value.syscall_number);
3801 ourstatus->kind = event_child->syscall_state;
3802 }
3803 else if (current_thread->last_resume_kind == resume_stop
3804 && WSTOPSIG (w) == SIGSTOP)
3805 {
3806 /* A thread that has been requested to stop by GDB with vCont;t,
3807 and it stopped cleanly, so report as SIG0. The use of
3808 SIGSTOP is an implementation detail. */
3809 ourstatus->value.sig = GDB_SIGNAL_0;
3810 }
3811 else if (current_thread->last_resume_kind == resume_stop
3812 && WSTOPSIG (w) != SIGSTOP)
3813 {
3814 /* A thread that has been requested to stop by GDB with vCont;t,
3815 but, it stopped for other reasons. */
3816 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3817 }
3818 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3819 {
3820 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3821 }
3822
3823 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3824
3825 if (debug_threads)
3826 {
3827 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3828 target_pid_to_str (ptid_of (current_thread)),
3829 ourstatus->kind, ourstatus->value.sig);
3830 debug_exit ();
3831 }
3832
3833 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3834 return filter_exit_event (event_child, ourstatus);
3835
3836 return ptid_of (current_thread);
3837 }
3838
3839 /* Get rid of any pending event in the pipe. */
3840 static void
3841 async_file_flush (void)
3842 {
3843 int ret;
3844 char buf;
3845
3846 do
3847 ret = read (linux_event_pipe[0], &buf, 1);
3848 while (ret >= 0 || (ret == -1 && errno == EINTR));
3849 }
3850
3851 /* Put something in the pipe, so the event loop wakes up. */
3852 static void
3853 async_file_mark (void)
3854 {
3855 int ret;
3856
3857 async_file_flush ();
3858
3859 do
3860 ret = write (linux_event_pipe[1], "+", 1);
3861 while (ret == 0 || (ret == -1 && errno == EINTR));
3862
3863 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3864 be awakened anyway. */
3865 }
3866
3867 static ptid_t
3868 linux_wait (ptid_t ptid,
3869 struct target_waitstatus *ourstatus, int target_options)
3870 {
3871 ptid_t event_ptid;
3872
3873 /* Flush the async file first. */
3874 if (target_is_async_p ())
3875 async_file_flush ();
3876
3877 do
3878 {
3879 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3880 }
3881 while ((target_options & TARGET_WNOHANG) == 0
3882 && ptid_equal (event_ptid, null_ptid)
3883 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3884
3885 /* If at least one stop was reported, there may be more. A single
3886 SIGCHLD can signal more than one child stop. */
3887 if (target_is_async_p ()
3888 && (target_options & TARGET_WNOHANG) != 0
3889 && !ptid_equal (event_ptid, null_ptid))
3890 async_file_mark ();
3891
3892 return event_ptid;
3893 }
3894
3895 /* Send a signal to an LWP. */
3896
3897 static int
3898 kill_lwp (unsigned long lwpid, int signo)
3899 {
3900 int ret;
3901
3902 errno = 0;
3903 ret = syscall (__NR_tkill, lwpid, signo);
3904 if (errno == ENOSYS)
3905 {
3906 /* If tkill fails, then we are not using nptl threads, a
3907 configuration we no longer support. */
3908 perror_with_name (("tkill"));
3909 }
3910 return ret;
3911 }
3912
3913 void
3914 linux_stop_lwp (struct lwp_info *lwp)
3915 {
3916 send_sigstop (lwp);
3917 }
3918
3919 static void
3920 send_sigstop (struct lwp_info *lwp)
3921 {
3922 int pid;
3923
3924 pid = lwpid_of (get_lwp_thread (lwp));
3925
3926 /* If we already have a pending stop signal for this process, don't
3927 send another. */
3928 if (lwp->stop_expected)
3929 {
3930 if (debug_threads)
3931 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3932
3933 return;
3934 }
3935
3936 if (debug_threads)
3937 debug_printf ("Sending sigstop to lwp %d\n", pid);
3938
3939 lwp->stop_expected = 1;
3940 kill_lwp (pid, SIGSTOP);
3941 }
3942
3943 static void
3944 send_sigstop (thread_info *thread, lwp_info *except)
3945 {
3946 struct lwp_info *lwp = get_thread_lwp (thread);
3947
3948 /* Ignore EXCEPT. */
3949 if (lwp == except)
3950 return;
3951
3952 if (lwp->stopped)
3953 return;
3954
3955 send_sigstop (lwp);
3956 }
3957
3958 /* Increment the suspend count of an LWP, and stop it, if not stopped
3959 yet. */
3960 static void
3961 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3962 {
3963 struct lwp_info *lwp = get_thread_lwp (thread);
3964
3965 /* Ignore EXCEPT. */
3966 if (lwp == except)
3967 return;
3968
3969 lwp_suspended_inc (lwp);
3970
3971 send_sigstop (thread, except);
3972 }
3973
3974 static void
3975 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3976 {
3977 /* Store the exit status for later. */
3978 lwp->status_pending_p = 1;
3979 lwp->status_pending = wstat;
3980
3981 /* Store in waitstatus as well, as there's nothing else to process
3982 for this event. */
3983 if (WIFEXITED (wstat))
3984 {
3985 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3986 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3987 }
3988 else if (WIFSIGNALED (wstat))
3989 {
3990 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3991 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3992 }
3993
3994 /* Prevent trying to stop it. */
3995 lwp->stopped = 1;
3996
3997 /* No further stops are expected from a dead lwp. */
3998 lwp->stop_expected = 0;
3999 }
4000
4001 /* Return true if LWP has exited already, and has a pending exit event
4002 to report to GDB. */
4003
4004 static int
4005 lwp_is_marked_dead (struct lwp_info *lwp)
4006 {
4007 return (lwp->status_pending_p
4008 && (WIFEXITED (lwp->status_pending)
4009 || WIFSIGNALED (lwp->status_pending)));
4010 }
4011
4012 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4013
4014 static void
4015 wait_for_sigstop (void)
4016 {
4017 struct thread_info *saved_thread;
4018 ptid_t saved_tid;
4019 int wstat;
4020 int ret;
4021
4022 saved_thread = current_thread;
4023 if (saved_thread != NULL)
4024 saved_tid = saved_thread->id;
4025 else
4026 saved_tid = null_ptid; /* avoid bogus unused warning */
4027
4028 if (debug_threads)
4029 debug_printf ("wait_for_sigstop: pulling events\n");
4030
4031 /* Passing NULL_PTID as filter indicates we want all events to be
4032 left pending. Eventually this returns when there are no
4033 unwaited-for children left. */
4034 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4035 &wstat, __WALL);
4036 gdb_assert (ret == -1);
4037
4038 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4039 current_thread = saved_thread;
4040 else
4041 {
4042 if (debug_threads)
4043 debug_printf ("Previously current thread died.\n");
4044
4045 /* We can't change the current inferior behind GDB's back,
4046 otherwise, a subsequent command may apply to the wrong
4047 process. */
4048 current_thread = NULL;
4049 }
4050 }
4051
4052 /* Returns true if THREAD is stopped in a jump pad, and we can't
4053 move it out, because we need to report the stop event to GDB. For
4054 example, if the user puts a breakpoint in the jump pad, it's
4055 because she wants to debug it. */
4056
4057 static bool
4058 stuck_in_jump_pad_callback (thread_info *thread)
4059 {
4060 struct lwp_info *lwp = get_thread_lwp (thread);
4061
4062 if (lwp->suspended != 0)
4063 {
4064 internal_error (__FILE__, __LINE__,
4065 "LWP %ld is suspended, suspended=%d\n",
4066 lwpid_of (thread), lwp->suspended);
4067 }
4068 gdb_assert (lwp->stopped);
4069
4070 /* Allow debugging the jump pad, gdb_collect, etc.. */
4071 return (supports_fast_tracepoints ()
4072 && agent_loaded_p ()
4073 && (gdb_breakpoint_here (lwp->stop_pc)
4074 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4075 || thread->last_resume_kind == resume_step)
4076 && (linux_fast_tracepoint_collecting (lwp, NULL)
4077 != fast_tpoint_collect_result::not_collecting));
4078 }
4079
4080 static void
4081 move_out_of_jump_pad_callback (thread_info *thread)
4082 {
4083 struct thread_info *saved_thread;
4084 struct lwp_info *lwp = get_thread_lwp (thread);
4085 int *wstat;
4086
4087 if (lwp->suspended != 0)
4088 {
4089 internal_error (__FILE__, __LINE__,
4090 "LWP %ld is suspended, suspended=%d\n",
4091 lwpid_of (thread), lwp->suspended);
4092 }
4093 gdb_assert (lwp->stopped);
4094
4095 /* For gdb_breakpoint_here. */
4096 saved_thread = current_thread;
4097 current_thread = thread;
4098
4099 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4100
4101 /* Allow debugging the jump pad, gdb_collect, etc. */
4102 if (!gdb_breakpoint_here (lwp->stop_pc)
4103 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4104 && thread->last_resume_kind != resume_step
4105 && maybe_move_out_of_jump_pad (lwp, wstat))
4106 {
4107 if (debug_threads)
4108 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4109 lwpid_of (thread));
4110
4111 if (wstat)
4112 {
4113 lwp->status_pending_p = 0;
4114 enqueue_one_deferred_signal (lwp, wstat);
4115
4116 if (debug_threads)
4117 debug_printf ("Signal %d for LWP %ld deferred "
4118 "(in jump pad)\n",
4119 WSTOPSIG (*wstat), lwpid_of (thread));
4120 }
4121
4122 linux_resume_one_lwp (lwp, 0, 0, NULL);
4123 }
4124 else
4125 lwp_suspended_inc (lwp);
4126
4127 current_thread = saved_thread;
4128 }
4129
4130 static bool
4131 lwp_running (thread_info *thread)
4132 {
4133 struct lwp_info *lwp = get_thread_lwp (thread);
4134
4135 if (lwp_is_marked_dead (lwp))
4136 return false;
4137
4138 return !lwp->stopped;
4139 }
4140
4141 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4142 If SUSPEND, then also increase the suspend count of every LWP,
4143 except EXCEPT. */
4144
4145 static void
4146 stop_all_lwps (int suspend, struct lwp_info *except)
4147 {
4148 /* Should not be called recursively. */
4149 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4150
4151 if (debug_threads)
4152 {
4153 debug_enter ();
4154 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4155 suspend ? "stop-and-suspend" : "stop",
4156 except != NULL
4157 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4158 : "none");
4159 }
4160
4161 stopping_threads = (suspend
4162 ? STOPPING_AND_SUSPENDING_THREADS
4163 : STOPPING_THREADS);
4164
4165 if (suspend)
4166 for_each_thread ([&] (thread_info *thread)
4167 {
4168 suspend_and_send_sigstop (thread, except);
4169 });
4170 else
4171 for_each_thread ([&] (thread_info *thread)
4172 {
4173 send_sigstop (thread, except);
4174 });
4175
4176 wait_for_sigstop ();
4177 stopping_threads = NOT_STOPPING_THREADS;
4178
4179 if (debug_threads)
4180 {
4181 debug_printf ("stop_all_lwps done, setting stopping_threads "
4182 "back to !stopping\n");
4183 debug_exit ();
4184 }
4185 }
4186
4187 /* Enqueue one signal in the chain of signals which need to be
4188 delivered to this process on next resume. */
4189
4190 static void
4191 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4192 {
4193 struct pending_signals *p_sig = XNEW (struct pending_signals);
4194
4195 p_sig->prev = lwp->pending_signals;
4196 p_sig->signal = signal;
4197 if (info == NULL)
4198 memset (&p_sig->info, 0, sizeof (siginfo_t));
4199 else
4200 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4201 lwp->pending_signals = p_sig;
4202 }
4203
4204 /* Install breakpoints for software single stepping. */
4205
4206 static void
4207 install_software_single_step_breakpoints (struct lwp_info *lwp)
4208 {
4209 struct thread_info *thread = get_lwp_thread (lwp);
4210 struct regcache *regcache = get_thread_regcache (thread, 1);
4211
4212 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4213
4214 current_thread = thread;
4215 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4216
4217 for (CORE_ADDR pc : next_pcs)
4218 set_single_step_breakpoint (pc, current_ptid);
4219 }
4220
4221 /* Single step via hardware or software single step.
4222 Return 1 if hardware single stepping, 0 if software single stepping
4223 or can't single step. */
4224
4225 static int
4226 single_step (struct lwp_info* lwp)
4227 {
4228 int step = 0;
4229
4230 if (can_hardware_single_step ())
4231 {
4232 step = 1;
4233 }
4234 else if (can_software_single_step ())
4235 {
4236 install_software_single_step_breakpoints (lwp);
4237 step = 0;
4238 }
4239 else
4240 {
4241 if (debug_threads)
4242 debug_printf ("stepping is not implemented on this target");
4243 }
4244
4245 return step;
4246 }
4247
4248 /* The signal can be delivered to the inferior if we are not trying to
4249 finish a fast tracepoint collect. Since signal can be delivered in
4250 the step-over, the program may go to signal handler and trap again
4251 after return from the signal handler. We can live with the spurious
4252 double traps. */
4253
4254 static int
4255 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4256 {
4257 return (lwp->collecting_fast_tracepoint
4258 == fast_tpoint_collect_result::not_collecting);
4259 }
4260
4261 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4262 SIGNAL is nonzero, give it that signal. */
4263
4264 static void
4265 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4266 int step, int signal, siginfo_t *info)
4267 {
4268 struct thread_info *thread = get_lwp_thread (lwp);
4269 struct thread_info *saved_thread;
4270 int ptrace_request;
4271 struct process_info *proc = get_thread_process (thread);
4272
4273 /* Note that target description may not be initialised
4274 (proc->tdesc == NULL) at this point because the program hasn't
4275 stopped at the first instruction yet. It means GDBserver skips
4276 the extra traps from the wrapper program (see option --wrapper).
4277 Code in this function that requires register access should be
4278 guarded by proc->tdesc == NULL or something else. */
4279
4280 if (lwp->stopped == 0)
4281 return;
4282
4283 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4284
4285 fast_tpoint_collect_result fast_tp_collecting
4286 = lwp->collecting_fast_tracepoint;
4287
4288 gdb_assert (!stabilizing_threads
4289 || (fast_tp_collecting
4290 != fast_tpoint_collect_result::not_collecting));
4291
4292 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4293 user used the "jump" command, or "set $pc = foo"). */
4294 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4295 {
4296 /* Collecting 'while-stepping' actions doesn't make sense
4297 anymore. */
4298 release_while_stepping_state_list (thread);
4299 }
4300
4301 /* If we have pending signals or status, and a new signal, enqueue the
4302 signal. Also enqueue the signal if it can't be delivered to the
4303 inferior right now. */
4304 if (signal != 0
4305 && (lwp->status_pending_p
4306 || lwp->pending_signals != NULL
4307 || !lwp_signal_can_be_delivered (lwp)))
4308 {
4309 enqueue_pending_signal (lwp, signal, info);
4310
4311 /* Postpone any pending signal. It was enqueued above. */
4312 signal = 0;
4313 }
4314
4315 if (lwp->status_pending_p)
4316 {
4317 if (debug_threads)
4318 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4319 " has pending status\n",
4320 lwpid_of (thread), step ? "step" : "continue",
4321 lwp->stop_expected ? "expected" : "not expected");
4322 return;
4323 }
4324
4325 saved_thread = current_thread;
4326 current_thread = thread;
4327
4328 /* This bit needs some thinking about. If we get a signal that
4329 we must report while a single-step reinsert is still pending,
4330 we often end up resuming the thread. It might be better to
4331 (ew) allow a stack of pending events; then we could be sure that
4332 the reinsert happened right away and not lose any signals.
4333
4334 Making this stack would also shrink the window in which breakpoints are
4335 uninserted (see comment in linux_wait_for_lwp) but not enough for
4336 complete correctness, so it won't solve that problem. It may be
4337 worthwhile just to solve this one, however. */
4338 if (lwp->bp_reinsert != 0)
4339 {
4340 if (debug_threads)
4341 debug_printf (" pending reinsert at 0x%s\n",
4342 paddress (lwp->bp_reinsert));
4343
4344 if (can_hardware_single_step ())
4345 {
4346 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4347 {
4348 if (step == 0)
4349 warning ("BAD - reinserting but not stepping.");
4350 if (lwp->suspended)
4351 warning ("BAD - reinserting and suspended(%d).",
4352 lwp->suspended);
4353 }
4354 }
4355
4356 step = maybe_hw_step (thread);
4357 }
4358
4359 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4360 {
4361 if (debug_threads)
4362 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4363 " (exit-jump-pad-bkpt)\n",
4364 lwpid_of (thread));
4365 }
4366 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4367 {
4368 if (debug_threads)
4369 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4370 " single-stepping\n",
4371 lwpid_of (thread));
4372
4373 if (can_hardware_single_step ())
4374 step = 1;
4375 else
4376 {
4377 internal_error (__FILE__, __LINE__,
4378 "moving out of jump pad single-stepping"
4379 " not implemented on this target");
4380 }
4381 }
4382
4383 /* If we have while-stepping actions in this thread set it stepping.
4384 If we have a signal to deliver, it may or may not be set to
4385 SIG_IGN, we don't know. Assume so, and allow collecting
4386 while-stepping into a signal handler. A possible smart thing to
4387 do would be to set an internal breakpoint at the signal return
4388 address, continue, and carry on catching this while-stepping
4389 action only when that breakpoint is hit. A future
4390 enhancement. */
4391 if (thread->while_stepping != NULL)
4392 {
4393 if (debug_threads)
4394 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4395 lwpid_of (thread));
4396
4397 step = single_step (lwp);
4398 }
4399
4400 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4401 {
4402 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4403
4404 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4405
4406 if (debug_threads)
4407 {
4408 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4409 (long) lwp->stop_pc);
4410 }
4411 }
4412
4413 /* If we have pending signals, consume one if it can be delivered to
4414 the inferior. */
4415 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4416 {
4417 struct pending_signals **p_sig;
4418
4419 p_sig = &lwp->pending_signals;
4420 while ((*p_sig)->prev != NULL)
4421 p_sig = &(*p_sig)->prev;
4422
4423 signal = (*p_sig)->signal;
4424 if ((*p_sig)->info.si_signo != 0)
4425 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4426 &(*p_sig)->info);
4427
4428 free (*p_sig);
4429 *p_sig = NULL;
4430 }
4431
4432 if (debug_threads)
4433 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4434 lwpid_of (thread), step ? "step" : "continue", signal,
4435 lwp->stop_expected ? "expected" : "not expected");
4436
4437 if (the_low_target.prepare_to_resume != NULL)
4438 the_low_target.prepare_to_resume (lwp);
4439
4440 regcache_invalidate_thread (thread);
4441 errno = 0;
4442 lwp->stepping = step;
4443 if (step)
4444 ptrace_request = PTRACE_SINGLESTEP;
4445 else if (gdb_catching_syscalls_p (lwp))
4446 ptrace_request = PTRACE_SYSCALL;
4447 else
4448 ptrace_request = PTRACE_CONT;
4449 ptrace (ptrace_request,
4450 lwpid_of (thread),
4451 (PTRACE_TYPE_ARG3) 0,
4452 /* Coerce to a uintptr_t first to avoid potential gcc warning
4453 of coercing an 8 byte integer to a 4 byte pointer. */
4454 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4455
4456 current_thread = saved_thread;
4457 if (errno)
4458 perror_with_name ("resuming thread");
4459
4460 /* Successfully resumed. Clear state that no longer makes sense,
4461 and mark the LWP as running. Must not do this before resuming
4462 otherwise if that fails other code will be confused. E.g., we'd
4463 later try to stop the LWP and hang forever waiting for a stop
4464 status. Note that we must not throw after this is cleared,
4465 otherwise handle_zombie_lwp_error would get confused. */
4466 lwp->stopped = 0;
4467 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4468 }
4469
4470 /* Called when we try to resume a stopped LWP and that errors out. If
4471 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4472 or about to become), discard the error, clear any pending status
4473 the LWP may have, and return true (we'll collect the exit status
4474 soon enough). Otherwise, return false. */
4475
4476 static int
4477 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4478 {
4479 struct thread_info *thread = get_lwp_thread (lp);
4480
4481 /* If we get an error after resuming the LWP successfully, we'd
4482 confuse !T state for the LWP being gone. */
4483 gdb_assert (lp->stopped);
4484
4485 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4486 because even if ptrace failed with ESRCH, the tracee may be "not
4487 yet fully dead", but already refusing ptrace requests. In that
4488 case the tracee has 'R (Running)' state for a little bit
4489 (observed in Linux 3.18). See also the note on ESRCH in the
4490 ptrace(2) man page. Instead, check whether the LWP has any state
4491 other than ptrace-stopped. */
4492
4493 /* Don't assume anything if /proc/PID/status can't be read. */
4494 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4495 {
4496 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4497 lp->status_pending_p = 0;
4498 return 1;
4499 }
4500 return 0;
4501 }
4502
4503 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4504 disappears while we try to resume it. */
4505
4506 static void
4507 linux_resume_one_lwp (struct lwp_info *lwp,
4508 int step, int signal, siginfo_t *info)
4509 {
4510 TRY
4511 {
4512 linux_resume_one_lwp_throw (lwp, step, signal, info);
4513 }
4514 CATCH (ex, RETURN_MASK_ERROR)
4515 {
4516 if (!check_ptrace_stopped_lwp_gone (lwp))
4517 throw_exception (ex);
4518 }
4519 END_CATCH
4520 }
4521
4522 /* This function is called once per thread via for_each_thread.
4523 We look up which resume request applies to THREAD and mark it with a
4524 pointer to the appropriate resume request.
4525
4526 This algorithm is O(threads * resume elements), but resume elements
4527 is small (and will remain small at least until GDB supports thread
4528 suspension). */
4529
4530 static void
4531 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4532 {
4533 struct lwp_info *lwp = get_thread_lwp (thread);
4534
4535 for (int ndx = 0; ndx < n; ndx++)
4536 {
4537 ptid_t ptid = resume[ndx].thread;
4538 if (ptid_equal (ptid, minus_one_ptid)
4539 || ptid == thread->id
4540 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4541 of PID'. */
4542 || (ptid_get_pid (ptid) == pid_of (thread)
4543 && (ptid_is_pid (ptid)
4544 || ptid_get_lwp (ptid) == -1)))
4545 {
4546 if (resume[ndx].kind == resume_stop
4547 && thread->last_resume_kind == resume_stop)
4548 {
4549 if (debug_threads)
4550 debug_printf ("already %s LWP %ld at GDB's request\n",
4551 (thread->last_status.kind
4552 == TARGET_WAITKIND_STOPPED)
4553 ? "stopped"
4554 : "stopping",
4555 lwpid_of (thread));
4556
4557 continue;
4558 }
4559
4560 /* Ignore (wildcard) resume requests for already-resumed
4561 threads. */
4562 if (resume[ndx].kind != resume_stop
4563 && thread->last_resume_kind != resume_stop)
4564 {
4565 if (debug_threads)
4566 debug_printf ("already %s LWP %ld at GDB's request\n",
4567 (thread->last_resume_kind
4568 == resume_step)
4569 ? "stepping"
4570 : "continuing",
4571 lwpid_of (thread));
4572 continue;
4573 }
4574
4575 /* Don't let wildcard resumes resume fork children that GDB
4576 does not yet know are new fork children. */
4577 if (lwp->fork_relative != NULL)
4578 {
4579 struct lwp_info *rel = lwp->fork_relative;
4580
4581 if (rel->status_pending_p
4582 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4583 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4584 {
4585 if (debug_threads)
4586 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4587 lwpid_of (thread));
4588 continue;
4589 }
4590 }
4591
4592 /* If the thread has a pending event that has already been
4593 reported to GDBserver core, but GDB has not pulled the
4594 event out of the vStopped queue yet, likewise, ignore the
4595 (wildcard) resume request. */
4596 if (in_queued_stop_replies (thread->id))
4597 {
4598 if (debug_threads)
4599 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4600 lwpid_of (thread));
4601 continue;
4602 }
4603
4604 lwp->resume = &resume[ndx];
4605 thread->last_resume_kind = lwp->resume->kind;
4606
4607 lwp->step_range_start = lwp->resume->step_range_start;
4608 lwp->step_range_end = lwp->resume->step_range_end;
4609
4610 /* If we had a deferred signal to report, dequeue one now.
4611 This can happen if LWP gets more than one signal while
4612 trying to get out of a jump pad. */
4613 if (lwp->stopped
4614 && !lwp->status_pending_p
4615 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4616 {
4617 lwp->status_pending_p = 1;
4618
4619 if (debug_threads)
4620 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4621 "leaving status pending.\n",
4622 WSTOPSIG (lwp->status_pending),
4623 lwpid_of (thread));
4624 }
4625
4626 return;
4627 }
4628 }
4629
4630 /* No resume action for this thread. */
4631 lwp->resume = NULL;
4632 }
4633
4634 /* find_thread callback for linux_resume. Return true if this lwp has an
4635 interesting status pending. */
4636
4637 static bool
4638 resume_status_pending_p (thread_info *thread)
4639 {
4640 struct lwp_info *lwp = get_thread_lwp (thread);
4641
4642 /* LWPs which will not be resumed are not interesting, because
4643 we might not wait for them next time through linux_wait. */
4644 if (lwp->resume == NULL)
4645 return false;
4646
4647 return thread_still_has_status_pending_p (thread);
4648 }
4649
4650 /* Return 1 if this lwp that GDB wants running is stopped at an
4651 internal breakpoint that we need to step over. It assumes that any
4652 required STOP_PC adjustment has already been propagated to the
4653 inferior's regcache. */
4654
4655 static bool
4656 need_step_over_p (thread_info *thread)
4657 {
4658 struct lwp_info *lwp = get_thread_lwp (thread);
4659 struct thread_info *saved_thread;
4660 CORE_ADDR pc;
4661 struct process_info *proc = get_thread_process (thread);
4662
4663 /* GDBserver is skipping the extra traps from the wrapper program,
4664 don't have to do step over. */
4665 if (proc->tdesc == NULL)
4666 return false;
4667
4668 /* LWPs which will not be resumed are not interesting, because we
4669 might not wait for them next time through linux_wait. */
4670
4671 if (!lwp->stopped)
4672 {
4673 if (debug_threads)
4674 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4675 lwpid_of (thread));
4676 return false;
4677 }
4678
4679 if (thread->last_resume_kind == resume_stop)
4680 {
4681 if (debug_threads)
4682 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4683 " stopped\n",
4684 lwpid_of (thread));
4685 return false;
4686 }
4687
4688 gdb_assert (lwp->suspended >= 0);
4689
4690 if (lwp->suspended)
4691 {
4692 if (debug_threads)
4693 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4694 lwpid_of (thread));
4695 return false;
4696 }
4697
4698 if (lwp->status_pending_p)
4699 {
4700 if (debug_threads)
4701 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4702 " status.\n",
4703 lwpid_of (thread));
4704 return false;
4705 }
4706
4707 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4708 or we have. */
4709 pc = get_pc (lwp);
4710
4711 /* If the PC has changed since we stopped, then don't do anything,
4712 and let the breakpoint/tracepoint be hit. This happens if, for
4713 instance, GDB handled the decr_pc_after_break subtraction itself,
4714 GDB is OOL stepping this thread, or the user has issued a "jump"
4715 command, or poked thread's registers herself. */
4716 if (pc != lwp->stop_pc)
4717 {
4718 if (debug_threads)
4719 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4720 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4721 lwpid_of (thread),
4722 paddress (lwp->stop_pc), paddress (pc));
4723 return false;
4724 }
4725
4726 /* On software single step target, resume the inferior with signal
4727 rather than stepping over. */
4728 if (can_software_single_step ()
4729 && lwp->pending_signals != NULL
4730 && lwp_signal_can_be_delivered (lwp))
4731 {
4732 if (debug_threads)
4733 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4734 " signals.\n",
4735 lwpid_of (thread));
4736
4737 return false;
4738 }
4739
4740 saved_thread = current_thread;
4741 current_thread = thread;
4742
4743 /* We can only step over breakpoints we know about. */
4744 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4745 {
4746 /* Don't step over a breakpoint that GDB expects to hit
4747 though. If the condition is being evaluated on the target's side
4748 and it evaluate to false, step over this breakpoint as well. */
4749 if (gdb_breakpoint_here (pc)
4750 && gdb_condition_true_at_breakpoint (pc)
4751 && gdb_no_commands_at_breakpoint (pc))
4752 {
4753 if (debug_threads)
4754 debug_printf ("Need step over [LWP %ld]? yes, but found"
4755 " GDB breakpoint at 0x%s; skipping step over\n",
4756 lwpid_of (thread), paddress (pc));
4757
4758 current_thread = saved_thread;
4759 return false;
4760 }
4761 else
4762 {
4763 if (debug_threads)
4764 debug_printf ("Need step over [LWP %ld]? yes, "
4765 "found breakpoint at 0x%s\n",
4766 lwpid_of (thread), paddress (pc));
4767
4768 /* We've found an lwp that needs stepping over --- return 1 so
4769 that find_thread stops looking. */
4770 current_thread = saved_thread;
4771
4772 return true;
4773 }
4774 }
4775
4776 current_thread = saved_thread;
4777
4778 if (debug_threads)
4779 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4780 " at 0x%s\n",
4781 lwpid_of (thread), paddress (pc));
4782
4783 return false;
4784 }
4785
4786 /* Start a step-over operation on LWP. When LWP stopped at a
4787 breakpoint, to make progress, we need to remove the breakpoint out
4788 of the way. If we let other threads run while we do that, they may
4789 pass by the breakpoint location and miss hitting it. To avoid
4790 that, a step-over momentarily stops all threads while LWP is
4791 single-stepped by either hardware or software while the breakpoint
4792 is temporarily uninserted from the inferior. When the single-step
4793 finishes, we reinsert the breakpoint, and let all threads that are
4794 supposed to be running, run again. */
4795
4796 static int
4797 start_step_over (struct lwp_info *lwp)
4798 {
4799 struct thread_info *thread = get_lwp_thread (lwp);
4800 struct thread_info *saved_thread;
4801 CORE_ADDR pc;
4802 int step;
4803
4804 if (debug_threads)
4805 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4806 lwpid_of (thread));
4807
4808 stop_all_lwps (1, lwp);
4809
4810 if (lwp->suspended != 0)
4811 {
4812 internal_error (__FILE__, __LINE__,
4813 "LWP %ld suspended=%d\n", lwpid_of (thread),
4814 lwp->suspended);
4815 }
4816
4817 if (debug_threads)
4818 debug_printf ("Done stopping all threads for step-over.\n");
4819
4820 /* Note, we should always reach here with an already adjusted PC,
4821 either by GDB (if we're resuming due to GDB's request), or by our
4822 caller, if we just finished handling an internal breakpoint GDB
4823 shouldn't care about. */
4824 pc = get_pc (lwp);
4825
4826 saved_thread = current_thread;
4827 current_thread = thread;
4828
4829 lwp->bp_reinsert = pc;
4830 uninsert_breakpoints_at (pc);
4831 uninsert_fast_tracepoint_jumps_at (pc);
4832
4833 step = single_step (lwp);
4834
4835 current_thread = saved_thread;
4836
4837 linux_resume_one_lwp (lwp, step, 0, NULL);
4838
4839 /* Require next event from this LWP. */
4840 step_over_bkpt = thread->id;
4841 return 1;
4842 }
4843
4844 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4845 start_step_over, if still there, and delete any single-step
4846 breakpoints we've set, on non hardware single-step targets. */
4847
4848 static int
4849 finish_step_over (struct lwp_info *lwp)
4850 {
4851 if (lwp->bp_reinsert != 0)
4852 {
4853 struct thread_info *saved_thread = current_thread;
4854
4855 if (debug_threads)
4856 debug_printf ("Finished step over.\n");
4857
4858 current_thread = get_lwp_thread (lwp);
4859
4860 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4861 may be no breakpoint to reinsert there by now. */
4862 reinsert_breakpoints_at (lwp->bp_reinsert);
4863 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4864
4865 lwp->bp_reinsert = 0;
4866
4867 /* Delete any single-step breakpoints. No longer needed. We
4868 don't have to worry about other threads hitting this trap,
4869 and later not being able to explain it, because we were
4870 stepping over a breakpoint, and we hold all threads but
4871 LWP stopped while doing that. */
4872 if (!can_hardware_single_step ())
4873 {
4874 gdb_assert (has_single_step_breakpoints (current_thread));
4875 delete_single_step_breakpoints (current_thread);
4876 }
4877
4878 step_over_bkpt = null_ptid;
4879 current_thread = saved_thread;
4880 return 1;
4881 }
4882 else
4883 return 0;
4884 }
4885
4886 /* If there's a step over in progress, wait until all threads stop
4887 (that is, until the stepping thread finishes its step), and
4888 unsuspend all lwps. The stepping thread ends with its status
4889 pending, which is processed later when we get back to processing
4890 events. */
4891
4892 static void
4893 complete_ongoing_step_over (void)
4894 {
4895 if (!ptid_equal (step_over_bkpt, null_ptid))
4896 {
4897 struct lwp_info *lwp;
4898 int wstat;
4899 int ret;
4900
4901 if (debug_threads)
4902 debug_printf ("detach: step over in progress, finish it first\n");
4903
4904 /* Passing NULL_PTID as filter indicates we want all events to
4905 be left pending. Eventually this returns when there are no
4906 unwaited-for children left. */
4907 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4908 &wstat, __WALL);
4909 gdb_assert (ret == -1);
4910
4911 lwp = find_lwp_pid (step_over_bkpt);
4912 if (lwp != NULL)
4913 finish_step_over (lwp);
4914 step_over_bkpt = null_ptid;
4915 unsuspend_all_lwps (lwp);
4916 }
4917 }
4918
4919 /* This function is called once per thread. We check the thread's resume
4920 request, which will tell us whether to resume, step, or leave the thread
4921 stopped; and what signal, if any, it should be sent.
4922
4923 For threads which we aren't explicitly told otherwise, we preserve
4924 the stepping flag; this is used for stepping over gdbserver-placed
4925 breakpoints.
4926
4927 If pending_flags was set in any thread, we queue any needed
4928 signals, since we won't actually resume. We already have a pending
4929 event to report, so we don't need to preserve any step requests;
4930 they should be re-issued if necessary. */
4931
4932 static void
4933 linux_resume_one_thread (thread_info *thread, bool leave_all_stopped)
4934 {
4935 struct lwp_info *lwp = get_thread_lwp (thread);
4936 int leave_pending;
4937
4938 if (lwp->resume == NULL)
4939 return;
4940
4941 if (lwp->resume->kind == resume_stop)
4942 {
4943 if (debug_threads)
4944 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4945
4946 if (!lwp->stopped)
4947 {
4948 if (debug_threads)
4949 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4950
4951 /* Stop the thread, and wait for the event asynchronously,
4952 through the event loop. */
4953 send_sigstop (lwp);
4954 }
4955 else
4956 {
4957 if (debug_threads)
4958 debug_printf ("already stopped LWP %ld\n",
4959 lwpid_of (thread));
4960
4961 /* The LWP may have been stopped in an internal event that
4962 was not meant to be notified back to GDB (e.g., gdbserver
4963 breakpoint), so we should be reporting a stop event in
4964 this case too. */
4965
4966 /* If the thread already has a pending SIGSTOP, this is a
4967 no-op. Otherwise, something later will presumably resume
4968 the thread and this will cause it to cancel any pending
4969 operation, due to last_resume_kind == resume_stop. If
4970 the thread already has a pending status to report, we
4971 will still report it the next time we wait - see
4972 status_pending_p_callback. */
4973
4974 /* If we already have a pending signal to report, then
4975 there's no need to queue a SIGSTOP, as this means we're
4976 midway through moving the LWP out of the jumppad, and we
4977 will report the pending signal as soon as that is
4978 finished. */
4979 if (lwp->pending_signals_to_report == NULL)
4980 send_sigstop (lwp);
4981 }
4982
4983 /* For stop requests, we're done. */
4984 lwp->resume = NULL;
4985 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4986 return;
4987 }
4988
4989 /* If this thread which is about to be resumed has a pending status,
4990 then don't resume it - we can just report the pending status.
4991 Likewise if it is suspended, because e.g., another thread is
4992 stepping past a breakpoint. Make sure to queue any signals that
4993 would otherwise be sent. In all-stop mode, we do this decision
4994 based on if *any* thread has a pending status. If there's a
4995 thread that needs the step-over-breakpoint dance, then don't
4996 resume any other thread but that particular one. */
4997 leave_pending = (lwp->suspended
4998 || lwp->status_pending_p
4999 || leave_all_stopped);
5000
5001 /* If we have a new signal, enqueue the signal. */
5002 if (lwp->resume->sig != 0)
5003 {
5004 siginfo_t info, *info_p;
5005
5006 /* If this is the same signal we were previously stopped by,
5007 make sure to queue its siginfo. */
5008 if (WIFSTOPPED (lwp->last_status)
5009 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5010 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5011 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5012 info_p = &info;
5013 else
5014 info_p = NULL;
5015
5016 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5017 }
5018
5019 if (!leave_pending)
5020 {
5021 if (debug_threads)
5022 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5023
5024 proceed_one_lwp (thread, NULL);
5025 }
5026 else
5027 {
5028 if (debug_threads)
5029 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5030 }
5031
5032 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5033 lwp->resume = NULL;
5034 }
5035
5036 static void
5037 linux_resume (struct thread_resume *resume_info, size_t n)
5038 {
5039 struct thread_info *need_step_over = NULL;
5040
5041 if (debug_threads)
5042 {
5043 debug_enter ();
5044 debug_printf ("linux_resume:\n");
5045 }
5046
5047 for_each_thread ([&] (thread_info *thread)
5048 {
5049 linux_set_resume_request (thread, resume_info, n);
5050 });
5051
5052 /* If there is a thread which would otherwise be resumed, which has
5053 a pending status, then don't resume any threads - we can just
5054 report the pending status. Make sure to queue any signals that
5055 would otherwise be sent. In non-stop mode, we'll apply this
5056 logic to each thread individually. We consume all pending events
5057 before considering to start a step-over (in all-stop). */
5058 bool any_pending = false;
5059 if (!non_stop)
5060 any_pending = find_thread (resume_status_pending_p) != NULL;
5061
5062 /* If there is a thread which would otherwise be resumed, which is
5063 stopped at a breakpoint that needs stepping over, then don't
5064 resume any threads - have it step over the breakpoint with all
5065 other threads stopped, then resume all threads again. Make sure
5066 to queue any signals that would otherwise be delivered or
5067 queued. */
5068 if (!any_pending && supports_breakpoints ())
5069 need_step_over = find_thread (need_step_over_p);
5070
5071 bool leave_all_stopped = (need_step_over != NULL || any_pending);
5072
5073 if (debug_threads)
5074 {
5075 if (need_step_over != NULL)
5076 debug_printf ("Not resuming all, need step over\n");
5077 else if (any_pending)
5078 debug_printf ("Not resuming, all-stop and found "
5079 "an LWP with pending status\n");
5080 else
5081 debug_printf ("Resuming, no pending status or step over needed\n");
5082 }
5083
5084 /* Even if we're leaving threads stopped, queue all signals we'd
5085 otherwise deliver. */
5086 for_each_thread ([&] (thread_info *thread)
5087 {
5088 linux_resume_one_thread (thread, leave_all_stopped);
5089 });
5090
5091 if (need_step_over)
5092 start_step_over (get_thread_lwp (need_step_over));
5093
5094 if (debug_threads)
5095 {
5096 debug_printf ("linux_resume done\n");
5097 debug_exit ();
5098 }
5099
5100 /* We may have events that were pending that can/should be sent to
5101 the client now. Trigger a linux_wait call. */
5102 if (target_is_async_p ())
5103 async_file_mark ();
5104 }
5105
5106 /* This function is called once per thread. We check the thread's
5107 last resume request, which will tell us whether to resume, step, or
5108 leave the thread stopped. Any signal the client requested to be
5109 delivered has already been enqueued at this point.
5110
5111 If any thread that GDB wants running is stopped at an internal
5112 breakpoint that needs stepping over, we start a step-over operation
5113 on that particular thread, and leave all others stopped. */
5114
5115 static void
5116 proceed_one_lwp (thread_info *thread, lwp_info *except)
5117 {
5118 struct lwp_info *lwp = get_thread_lwp (thread);
5119 int step;
5120
5121 if (lwp == except)
5122 return;
5123
5124 if (debug_threads)
5125 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5126
5127 if (!lwp->stopped)
5128 {
5129 if (debug_threads)
5130 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5131 return;
5132 }
5133
5134 if (thread->last_resume_kind == resume_stop
5135 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5136 {
5137 if (debug_threads)
5138 debug_printf (" client wants LWP to remain %ld stopped\n",
5139 lwpid_of (thread));
5140 return;
5141 }
5142
5143 if (lwp->status_pending_p)
5144 {
5145 if (debug_threads)
5146 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5147 lwpid_of (thread));
5148 return;
5149 }
5150
5151 gdb_assert (lwp->suspended >= 0);
5152
5153 if (lwp->suspended)
5154 {
5155 if (debug_threads)
5156 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5157 return;
5158 }
5159
5160 if (thread->last_resume_kind == resume_stop
5161 && lwp->pending_signals_to_report == NULL
5162 && (lwp->collecting_fast_tracepoint
5163 == fast_tpoint_collect_result::not_collecting))
5164 {
5165 /* We haven't reported this LWP as stopped yet (otherwise, the
5166 last_status.kind check above would catch it, and we wouldn't
5167 reach here. This LWP may have been momentarily paused by a
5168 stop_all_lwps call while handling for example, another LWP's
5169 step-over. In that case, the pending expected SIGSTOP signal
5170 that was queued at vCont;t handling time will have already
5171 been consumed by wait_for_sigstop, and so we need to requeue
5172 another one here. Note that if the LWP already has a SIGSTOP
5173 pending, this is a no-op. */
5174
5175 if (debug_threads)
5176 debug_printf ("Client wants LWP %ld to stop. "
5177 "Making sure it has a SIGSTOP pending\n",
5178 lwpid_of (thread));
5179
5180 send_sigstop (lwp);
5181 }
5182
5183 if (thread->last_resume_kind == resume_step)
5184 {
5185 if (debug_threads)
5186 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5187 lwpid_of (thread));
5188
5189 /* If resume_step is requested by GDB, install single-step
5190 breakpoints when the thread is about to be actually resumed if
5191 the single-step breakpoints weren't removed. */
5192 if (can_software_single_step ()
5193 && !has_single_step_breakpoints (thread))
5194 install_software_single_step_breakpoints (lwp);
5195
5196 step = maybe_hw_step (thread);
5197 }
5198 else if (lwp->bp_reinsert != 0)
5199 {
5200 if (debug_threads)
5201 debug_printf (" stepping LWP %ld, reinsert set\n",
5202 lwpid_of (thread));
5203
5204 step = maybe_hw_step (thread);
5205 }
5206 else
5207 step = 0;
5208
5209 linux_resume_one_lwp (lwp, step, 0, NULL);
5210 }
5211
5212 static void
5213 unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except)
5214 {
5215 struct lwp_info *lwp = get_thread_lwp (thread);
5216
5217 if (lwp == except)
5218 return;
5219
5220 lwp_suspended_decr (lwp);
5221
5222 proceed_one_lwp (thread, except);
5223 }
5224
5225 /* When we finish a step-over, set threads running again. If there's
5226 another thread that may need a step-over, now's the time to start
5227 it. Eventually, we'll move all threads past their breakpoints. */
5228
5229 static void
5230 proceed_all_lwps (void)
5231 {
5232 struct thread_info *need_step_over;
5233
5234 /* If there is a thread which would otherwise be resumed, which is
5235 stopped at a breakpoint that needs stepping over, then don't
5236 resume any threads - have it step over the breakpoint with all
5237 other threads stopped, then resume all threads again. */
5238
5239 if (supports_breakpoints ())
5240 {
5241 need_step_over = find_thread (need_step_over_p);
5242
5243 if (need_step_over != NULL)
5244 {
5245 if (debug_threads)
5246 debug_printf ("proceed_all_lwps: found "
5247 "thread %ld needing a step-over\n",
5248 lwpid_of (need_step_over));
5249
5250 start_step_over (get_thread_lwp (need_step_over));
5251 return;
5252 }
5253 }
5254
5255 if (debug_threads)
5256 debug_printf ("Proceeding, no step-over needed\n");
5257
5258 for_each_thread ([] (thread_info *thread)
5259 {
5260 proceed_one_lwp (thread, NULL);
5261 });
5262 }
5263
5264 /* Stopped LWPs that the client wanted to be running, that don't have
5265 pending statuses, are set to run again, except for EXCEPT, if not
5266 NULL. This undoes a stop_all_lwps call. */
5267
5268 static void
5269 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5270 {
5271 if (debug_threads)
5272 {
5273 debug_enter ();
5274 if (except)
5275 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5276 lwpid_of (get_lwp_thread (except)));
5277 else
5278 debug_printf ("unstopping all lwps\n");
5279 }
5280
5281 if (unsuspend)
5282 for_each_thread ([&] (thread_info *thread)
5283 {
5284 unsuspend_and_proceed_one_lwp (thread, except);
5285 });
5286 else
5287 for_each_thread ([&] (thread_info *thread)
5288 {
5289 proceed_one_lwp (thread, except);
5290 });
5291
5292 if (debug_threads)
5293 {
5294 debug_printf ("unstop_all_lwps done\n");
5295 debug_exit ();
5296 }
5297 }
5298
5299
5300 #ifdef HAVE_LINUX_REGSETS
5301
5302 #define use_linux_regsets 1
5303
5304 /* Returns true if REGSET has been disabled. */
5305
5306 static int
5307 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5308 {
5309 return (info->disabled_regsets != NULL
5310 && info->disabled_regsets[regset - info->regsets]);
5311 }
5312
5313 /* Disable REGSET. */
5314
5315 static void
5316 disable_regset (struct regsets_info *info, struct regset_info *regset)
5317 {
5318 int dr_offset;
5319
5320 dr_offset = regset - info->regsets;
5321 if (info->disabled_regsets == NULL)
5322 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5323 info->disabled_regsets[dr_offset] = 1;
5324 }
5325
5326 static int
5327 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5328 struct regcache *regcache)
5329 {
5330 struct regset_info *regset;
5331 int saw_general_regs = 0;
5332 int pid;
5333 struct iovec iov;
5334
5335 pid = lwpid_of (current_thread);
5336 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5337 {
5338 void *buf, *data;
5339 int nt_type, res;
5340
5341 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5342 continue;
5343
5344 buf = xmalloc (regset->size);
5345
5346 nt_type = regset->nt_type;
5347 if (nt_type)
5348 {
5349 iov.iov_base = buf;
5350 iov.iov_len = regset->size;
5351 data = (void *) &iov;
5352 }
5353 else
5354 data = buf;
5355
5356 #ifndef __sparc__
5357 res = ptrace (regset->get_request, pid,
5358 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5359 #else
5360 res = ptrace (regset->get_request, pid, data, nt_type);
5361 #endif
5362 if (res < 0)
5363 {
5364 if (errno == EIO)
5365 {
5366 /* If we get EIO on a regset, do not try it again for
5367 this process mode. */
5368 disable_regset (regsets_info, regset);
5369 }
5370 else if (errno == ENODATA)
5371 {
5372 /* ENODATA may be returned if the regset is currently
5373 not "active". This can happen in normal operation,
5374 so suppress the warning in this case. */
5375 }
5376 else if (errno == ESRCH)
5377 {
5378 /* At this point, ESRCH should mean the process is
5379 already gone, in which case we simply ignore attempts
5380 to read its registers. */
5381 }
5382 else
5383 {
5384 char s[256];
5385 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5386 pid);
5387 perror (s);
5388 }
5389 }
5390 else
5391 {
5392 if (regset->type == GENERAL_REGS)
5393 saw_general_regs = 1;
5394 regset->store_function (regcache, buf);
5395 }
5396 free (buf);
5397 }
5398 if (saw_general_regs)
5399 return 0;
5400 else
5401 return 1;
5402 }
5403
5404 static int
5405 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5406 struct regcache *regcache)
5407 {
5408 struct regset_info *regset;
5409 int saw_general_regs = 0;
5410 int pid;
5411 struct iovec iov;
5412
5413 pid = lwpid_of (current_thread);
5414 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5415 {
5416 void *buf, *data;
5417 int nt_type, res;
5418
5419 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5420 || regset->fill_function == NULL)
5421 continue;
5422
5423 buf = xmalloc (regset->size);
5424
5425 /* First fill the buffer with the current register set contents,
5426 in case there are any items in the kernel's regset that are
5427 not in gdbserver's regcache. */
5428
5429 nt_type = regset->nt_type;
5430 if (nt_type)
5431 {
5432 iov.iov_base = buf;
5433 iov.iov_len = regset->size;
5434 data = (void *) &iov;
5435 }
5436 else
5437 data = buf;
5438
5439 #ifndef __sparc__
5440 res = ptrace (regset->get_request, pid,
5441 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5442 #else
5443 res = ptrace (regset->get_request, pid, data, nt_type);
5444 #endif
5445
5446 if (res == 0)
5447 {
5448 /* Then overlay our cached registers on that. */
5449 regset->fill_function (regcache, buf);
5450
5451 /* Only now do we write the register set. */
5452 #ifndef __sparc__
5453 res = ptrace (regset->set_request, pid,
5454 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5455 #else
5456 res = ptrace (regset->set_request, pid, data, nt_type);
5457 #endif
5458 }
5459
5460 if (res < 0)
5461 {
5462 if (errno == EIO)
5463 {
5464 /* If we get EIO on a regset, do not try it again for
5465 this process mode. */
5466 disable_regset (regsets_info, regset);
5467 }
5468 else if (errno == ESRCH)
5469 {
5470 /* At this point, ESRCH should mean the process is
5471 already gone, in which case we simply ignore attempts
5472 to change its registers. See also the related
5473 comment in linux_resume_one_lwp. */
5474 free (buf);
5475 return 0;
5476 }
5477 else
5478 {
5479 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5480 }
5481 }
5482 else if (regset->type == GENERAL_REGS)
5483 saw_general_regs = 1;
5484 free (buf);
5485 }
5486 if (saw_general_regs)
5487 return 0;
5488 else
5489 return 1;
5490 }
5491
5492 #else /* !HAVE_LINUX_REGSETS */
5493
5494 #define use_linux_regsets 0
5495 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5496 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5497
5498 #endif
5499
5500 /* Return 1 if register REGNO is supported by one of the regset ptrace
5501 calls or 0 if it has to be transferred individually. */
5502
5503 static int
5504 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5505 {
5506 unsigned char mask = 1 << (regno % 8);
5507 size_t index = regno / 8;
5508
5509 return (use_linux_regsets
5510 && (regs_info->regset_bitmap == NULL
5511 || (regs_info->regset_bitmap[index] & mask) != 0));
5512 }
5513
5514 #ifdef HAVE_LINUX_USRREGS
5515
5516 static int
5517 register_addr (const struct usrregs_info *usrregs, int regnum)
5518 {
5519 int addr;
5520
5521 if (regnum < 0 || regnum >= usrregs->num_regs)
5522 error ("Invalid register number %d.", regnum);
5523
5524 addr = usrregs->regmap[regnum];
5525
5526 return addr;
5527 }
5528
5529 /* Fetch one register. */
5530 static void
5531 fetch_register (const struct usrregs_info *usrregs,
5532 struct regcache *regcache, int regno)
5533 {
5534 CORE_ADDR regaddr;
5535 int i, size;
5536 char *buf;
5537 int pid;
5538
5539 if (regno >= usrregs->num_regs)
5540 return;
5541 if ((*the_low_target.cannot_fetch_register) (regno))
5542 return;
5543
5544 regaddr = register_addr (usrregs, regno);
5545 if (regaddr == -1)
5546 return;
5547
5548 size = ((register_size (regcache->tdesc, regno)
5549 + sizeof (PTRACE_XFER_TYPE) - 1)
5550 & -sizeof (PTRACE_XFER_TYPE));
5551 buf = (char *) alloca (size);
5552
5553 pid = lwpid_of (current_thread);
5554 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5555 {
5556 errno = 0;
5557 *(PTRACE_XFER_TYPE *) (buf + i) =
5558 ptrace (PTRACE_PEEKUSER, pid,
5559 /* Coerce to a uintptr_t first to avoid potential gcc warning
5560 of coercing an 8 byte integer to a 4 byte pointer. */
5561 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5562 regaddr += sizeof (PTRACE_XFER_TYPE);
5563 if (errno != 0)
5564 {
5565 /* Mark register REGNO unavailable. */
5566 supply_register (regcache, regno, NULL);
5567 return;
5568 }
5569 }
5570
5571 if (the_low_target.supply_ptrace_register)
5572 the_low_target.supply_ptrace_register (regcache, regno, buf);
5573 else
5574 supply_register (regcache, regno, buf);
5575 }
5576
5577 /* Store one register. */
5578 static void
5579 store_register (const struct usrregs_info *usrregs,
5580 struct regcache *regcache, int regno)
5581 {
5582 CORE_ADDR regaddr;
5583 int i, size;
5584 char *buf;
5585 int pid;
5586
5587 if (regno >= usrregs->num_regs)
5588 return;
5589 if ((*the_low_target.cannot_store_register) (regno))
5590 return;
5591
5592 regaddr = register_addr (usrregs, regno);
5593 if (regaddr == -1)
5594 return;
5595
5596 size = ((register_size (regcache->tdesc, regno)
5597 + sizeof (PTRACE_XFER_TYPE) - 1)
5598 & -sizeof (PTRACE_XFER_TYPE));
5599 buf = (char *) alloca (size);
5600 memset (buf, 0, size);
5601
5602 if (the_low_target.collect_ptrace_register)
5603 the_low_target.collect_ptrace_register (regcache, regno, buf);
5604 else
5605 collect_register (regcache, regno, buf);
5606
5607 pid = lwpid_of (current_thread);
5608 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5609 {
5610 errno = 0;
5611 ptrace (PTRACE_POKEUSER, pid,
5612 /* Coerce to a uintptr_t first to avoid potential gcc warning
5613 about coercing an 8 byte integer to a 4 byte pointer. */
5614 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5615 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5616 if (errno != 0)
5617 {
5618 /* At this point, ESRCH should mean the process is
5619 already gone, in which case we simply ignore attempts
5620 to change its registers. See also the related
5621 comment in linux_resume_one_lwp. */
5622 if (errno == ESRCH)
5623 return;
5624
5625 if ((*the_low_target.cannot_store_register) (regno) == 0)
5626 error ("writing register %d: %s", regno, strerror (errno));
5627 }
5628 regaddr += sizeof (PTRACE_XFER_TYPE);
5629 }
5630 }
5631
5632 /* Fetch all registers, or just one, from the child process.
5633 If REGNO is -1, do this for all registers, skipping any that are
5634 assumed to have been retrieved by regsets_fetch_inferior_registers,
5635 unless ALL is non-zero.
5636 Otherwise, REGNO specifies which register (so we can save time). */
5637 static void
5638 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5639 struct regcache *regcache, int regno, int all)
5640 {
5641 struct usrregs_info *usr = regs_info->usrregs;
5642
5643 if (regno == -1)
5644 {
5645 for (regno = 0; regno < usr->num_regs; regno++)
5646 if (all || !linux_register_in_regsets (regs_info, regno))
5647 fetch_register (usr, regcache, regno);
5648 }
5649 else
5650 fetch_register (usr, regcache, regno);
5651 }
5652
5653 /* Store our register values back into the inferior.
5654 If REGNO is -1, do this for all registers, skipping any that are
5655 assumed to have been saved by regsets_store_inferior_registers,
5656 unless ALL is non-zero.
5657 Otherwise, REGNO specifies which register (so we can save time). */
5658 static void
5659 usr_store_inferior_registers (const struct regs_info *regs_info,
5660 struct regcache *regcache, int regno, int all)
5661 {
5662 struct usrregs_info *usr = regs_info->usrregs;
5663
5664 if (regno == -1)
5665 {
5666 for (regno = 0; regno < usr->num_regs; regno++)
5667 if (all || !linux_register_in_regsets (regs_info, regno))
5668 store_register (usr, regcache, regno);
5669 }
5670 else
5671 store_register (usr, regcache, regno);
5672 }
5673
5674 #else /* !HAVE_LINUX_USRREGS */
5675
5676 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5677 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5678
5679 #endif
5680
5681
5682 static void
5683 linux_fetch_registers (struct regcache *regcache, int regno)
5684 {
5685 int use_regsets;
5686 int all = 0;
5687 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5688
5689 if (regno == -1)
5690 {
5691 if (the_low_target.fetch_register != NULL
5692 && regs_info->usrregs != NULL)
5693 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5694 (*the_low_target.fetch_register) (regcache, regno);
5695
5696 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5697 if (regs_info->usrregs != NULL)
5698 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5699 }
5700 else
5701 {
5702 if (the_low_target.fetch_register != NULL
5703 && (*the_low_target.fetch_register) (regcache, regno))
5704 return;
5705
5706 use_regsets = linux_register_in_regsets (regs_info, regno);
5707 if (use_regsets)
5708 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5709 regcache);
5710 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5711 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5712 }
5713 }
5714
5715 static void
5716 linux_store_registers (struct regcache *regcache, int regno)
5717 {
5718 int use_regsets;
5719 int all = 0;
5720 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5721
5722 if (regno == -1)
5723 {
5724 all = regsets_store_inferior_registers (regs_info->regsets_info,
5725 regcache);
5726 if (regs_info->usrregs != NULL)
5727 usr_store_inferior_registers (regs_info, regcache, regno, all);
5728 }
5729 else
5730 {
5731 use_regsets = linux_register_in_regsets (regs_info, regno);
5732 if (use_regsets)
5733 all = regsets_store_inferior_registers (regs_info->regsets_info,
5734 regcache);
5735 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5736 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5737 }
5738 }
5739
5740
5741 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5742 to debugger memory starting at MYADDR. */
5743
5744 static int
5745 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5746 {
5747 int pid = lwpid_of (current_thread);
5748 PTRACE_XFER_TYPE *buffer;
5749 CORE_ADDR addr;
5750 int count;
5751 char filename[64];
5752 int i;
5753 int ret;
5754 int fd;
5755
5756 /* Try using /proc. Don't bother for one word. */
5757 if (len >= 3 * sizeof (long))
5758 {
5759 int bytes;
5760
5761 /* We could keep this file open and cache it - possibly one per
5762 thread. That requires some juggling, but is even faster. */
5763 sprintf (filename, "/proc/%d/mem", pid);
5764 fd = open (filename, O_RDONLY | O_LARGEFILE);
5765 if (fd == -1)
5766 goto no_proc;
5767
5768 /* If pread64 is available, use it. It's faster if the kernel
5769 supports it (only one syscall), and it's 64-bit safe even on
5770 32-bit platforms (for instance, SPARC debugging a SPARC64
5771 application). */
5772 #ifdef HAVE_PREAD64
5773 bytes = pread64 (fd, myaddr, len, memaddr);
5774 #else
5775 bytes = -1;
5776 if (lseek (fd, memaddr, SEEK_SET) != -1)
5777 bytes = read (fd, myaddr, len);
5778 #endif
5779
5780 close (fd);
5781 if (bytes == len)
5782 return 0;
5783
5784 /* Some data was read, we'll try to get the rest with ptrace. */
5785 if (bytes > 0)
5786 {
5787 memaddr += bytes;
5788 myaddr += bytes;
5789 len -= bytes;
5790 }
5791 }
5792
5793 no_proc:
5794 /* Round starting address down to longword boundary. */
5795 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5796 /* Round ending address up; get number of longwords that makes. */
5797 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5798 / sizeof (PTRACE_XFER_TYPE));
5799 /* Allocate buffer of that many longwords. */
5800 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5801
5802 /* Read all the longwords */
5803 errno = 0;
5804 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5805 {
5806 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5807 about coercing an 8 byte integer to a 4 byte pointer. */
5808 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5809 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5810 (PTRACE_TYPE_ARG4) 0);
5811 if (errno)
5812 break;
5813 }
5814 ret = errno;
5815
5816 /* Copy appropriate bytes out of the buffer. */
5817 if (i > 0)
5818 {
5819 i *= sizeof (PTRACE_XFER_TYPE);
5820 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5821 memcpy (myaddr,
5822 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5823 i < len ? i : len);
5824 }
5825
5826 return ret;
5827 }
5828
5829 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5830 memory at MEMADDR. On failure (cannot write to the inferior)
5831 returns the value of errno. Always succeeds if LEN is zero. */
5832
5833 static int
5834 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5835 {
5836 int i;
5837 /* Round starting address down to longword boundary. */
5838 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5839 /* Round ending address up; get number of longwords that makes. */
5840 int count
5841 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5842 / sizeof (PTRACE_XFER_TYPE);
5843
5844 /* Allocate buffer of that many longwords. */
5845 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5846
5847 int pid = lwpid_of (current_thread);
5848
5849 if (len == 0)
5850 {
5851 /* Zero length write always succeeds. */
5852 return 0;
5853 }
5854
5855 if (debug_threads)
5856 {
5857 /* Dump up to four bytes. */
5858 char str[4 * 2 + 1];
5859 char *p = str;
5860 int dump = len < 4 ? len : 4;
5861
5862 for (i = 0; i < dump; i++)
5863 {
5864 sprintf (p, "%02x", myaddr[i]);
5865 p += 2;
5866 }
5867 *p = '\0';
5868
5869 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5870 str, (long) memaddr, pid);
5871 }
5872
5873 /* Fill start and end extra bytes of buffer with existing memory data. */
5874
5875 errno = 0;
5876 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5877 about coercing an 8 byte integer to a 4 byte pointer. */
5878 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5879 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5880 (PTRACE_TYPE_ARG4) 0);
5881 if (errno)
5882 return errno;
5883
5884 if (count > 1)
5885 {
5886 errno = 0;
5887 buffer[count - 1]
5888 = ptrace (PTRACE_PEEKTEXT, pid,
5889 /* Coerce to a uintptr_t first to avoid potential gcc warning
5890 about coercing an 8 byte integer to a 4 byte pointer. */
5891 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5892 * sizeof (PTRACE_XFER_TYPE)),
5893 (PTRACE_TYPE_ARG4) 0);
5894 if (errno)
5895 return errno;
5896 }
5897
5898 /* Copy data to be written over corresponding part of buffer. */
5899
5900 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5901 myaddr, len);
5902
5903 /* Write the entire buffer. */
5904
5905 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5906 {
5907 errno = 0;
5908 ptrace (PTRACE_POKETEXT, pid,
5909 /* Coerce to a uintptr_t first to avoid potential gcc warning
5910 about coercing an 8 byte integer to a 4 byte pointer. */
5911 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5912 (PTRACE_TYPE_ARG4) buffer[i]);
5913 if (errno)
5914 return errno;
5915 }
5916
5917 return 0;
5918 }
5919
5920 static void
5921 linux_look_up_symbols (void)
5922 {
5923 #ifdef USE_THREAD_DB
5924 struct process_info *proc = current_process ();
5925
5926 if (proc->priv->thread_db != NULL)
5927 return;
5928
5929 thread_db_init ();
5930 #endif
5931 }
5932
5933 static void
5934 linux_request_interrupt (void)
5935 {
5936 /* Send a SIGINT to the process group. This acts just like the user
5937 typed a ^C on the controlling terminal. */
5938 kill (-signal_pid, SIGINT);
5939 }
5940
5941 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5942 to debugger memory starting at MYADDR. */
5943
5944 static int
5945 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5946 {
5947 char filename[PATH_MAX];
5948 int fd, n;
5949 int pid = lwpid_of (current_thread);
5950
5951 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5952
5953 fd = open (filename, O_RDONLY);
5954 if (fd < 0)
5955 return -1;
5956
5957 if (offset != (CORE_ADDR) 0
5958 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5959 n = -1;
5960 else
5961 n = read (fd, myaddr, len);
5962
5963 close (fd);
5964
5965 return n;
5966 }
5967
5968 /* These breakpoint and watchpoint related wrapper functions simply
5969 pass on the function call if the target has registered a
5970 corresponding function. */
5971
5972 static int
5973 linux_supports_z_point_type (char z_type)
5974 {
5975 return (the_low_target.supports_z_point_type != NULL
5976 && the_low_target.supports_z_point_type (z_type));
5977 }
5978
5979 static int
5980 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5981 int size, struct raw_breakpoint *bp)
5982 {
5983 if (type == raw_bkpt_type_sw)
5984 return insert_memory_breakpoint (bp);
5985 else if (the_low_target.insert_point != NULL)
5986 return the_low_target.insert_point (type, addr, size, bp);
5987 else
5988 /* Unsupported (see target.h). */
5989 return 1;
5990 }
5991
5992 static int
5993 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5994 int size, struct raw_breakpoint *bp)
5995 {
5996 if (type == raw_bkpt_type_sw)
5997 return remove_memory_breakpoint (bp);
5998 else if (the_low_target.remove_point != NULL)
5999 return the_low_target.remove_point (type, addr, size, bp);
6000 else
6001 /* Unsupported (see target.h). */
6002 return 1;
6003 }
6004
6005 /* Implement the to_stopped_by_sw_breakpoint target_ops
6006 method. */
6007
6008 static int
6009 linux_stopped_by_sw_breakpoint (void)
6010 {
6011 struct lwp_info *lwp = get_thread_lwp (current_thread);
6012
6013 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6014 }
6015
6016 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6017 method. */
6018
6019 static int
6020 linux_supports_stopped_by_sw_breakpoint (void)
6021 {
6022 return USE_SIGTRAP_SIGINFO;
6023 }
6024
6025 /* Implement the to_stopped_by_hw_breakpoint target_ops
6026 method. */
6027
6028 static int
6029 linux_stopped_by_hw_breakpoint (void)
6030 {
6031 struct lwp_info *lwp = get_thread_lwp (current_thread);
6032
6033 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6034 }
6035
6036 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6037 method. */
6038
6039 static int
6040 linux_supports_stopped_by_hw_breakpoint (void)
6041 {
6042 return USE_SIGTRAP_SIGINFO;
6043 }
6044
6045 /* Implement the supports_hardware_single_step target_ops method. */
6046
6047 static int
6048 linux_supports_hardware_single_step (void)
6049 {
6050 return can_hardware_single_step ();
6051 }
6052
6053 static int
6054 linux_supports_software_single_step (void)
6055 {
6056 return can_software_single_step ();
6057 }
6058
6059 static int
6060 linux_stopped_by_watchpoint (void)
6061 {
6062 struct lwp_info *lwp = get_thread_lwp (current_thread);
6063
6064 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6065 }
6066
6067 static CORE_ADDR
6068 linux_stopped_data_address (void)
6069 {
6070 struct lwp_info *lwp = get_thread_lwp (current_thread);
6071
6072 return lwp->stopped_data_address;
6073 }
6074
6075 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6076 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6077 && defined(PT_TEXT_END_ADDR)
6078
6079 /* This is only used for targets that define PT_TEXT_ADDR,
6080 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6081 the target has different ways of acquiring this information, like
6082 loadmaps. */
6083
6084 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6085 to tell gdb about. */
6086
6087 static int
6088 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6089 {
6090 unsigned long text, text_end, data;
6091 int pid = lwpid_of (current_thread);
6092
6093 errno = 0;
6094
6095 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6096 (PTRACE_TYPE_ARG4) 0);
6097 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6098 (PTRACE_TYPE_ARG4) 0);
6099 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6100 (PTRACE_TYPE_ARG4) 0);
6101
6102 if (errno == 0)
6103 {
6104 /* Both text and data offsets produced at compile-time (and so
6105 used by gdb) are relative to the beginning of the program,
6106 with the data segment immediately following the text segment.
6107 However, the actual runtime layout in memory may put the data
6108 somewhere else, so when we send gdb a data base-address, we
6109 use the real data base address and subtract the compile-time
6110 data base-address from it (which is just the length of the
6111 text segment). BSS immediately follows data in both
6112 cases. */
6113 *text_p = text;
6114 *data_p = data - (text_end - text);
6115
6116 return 1;
6117 }
6118 return 0;
6119 }
6120 #endif
6121
6122 static int
6123 linux_qxfer_osdata (const char *annex,
6124 unsigned char *readbuf, unsigned const char *writebuf,
6125 CORE_ADDR offset, int len)
6126 {
6127 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6128 }
6129
6130 /* Convert a native/host siginfo object, into/from the siginfo in the
6131 layout of the inferiors' architecture. */
6132
6133 static void
6134 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6135 {
6136 int done = 0;
6137
6138 if (the_low_target.siginfo_fixup != NULL)
6139 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6140
6141 /* If there was no callback, or the callback didn't do anything,
6142 then just do a straight memcpy. */
6143 if (!done)
6144 {
6145 if (direction == 1)
6146 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6147 else
6148 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6149 }
6150 }
6151
6152 static int
6153 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6154 unsigned const char *writebuf, CORE_ADDR offset, int len)
6155 {
6156 int pid;
6157 siginfo_t siginfo;
6158 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6159
6160 if (current_thread == NULL)
6161 return -1;
6162
6163 pid = lwpid_of (current_thread);
6164
6165 if (debug_threads)
6166 debug_printf ("%s siginfo for lwp %d.\n",
6167 readbuf != NULL ? "Reading" : "Writing",
6168 pid);
6169
6170 if (offset >= sizeof (siginfo))
6171 return -1;
6172
6173 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6174 return -1;
6175
6176 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6177 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6178 inferior with a 64-bit GDBSERVER should look the same as debugging it
6179 with a 32-bit GDBSERVER, we need to convert it. */
6180 siginfo_fixup (&siginfo, inf_siginfo, 0);
6181
6182 if (offset + len > sizeof (siginfo))
6183 len = sizeof (siginfo) - offset;
6184
6185 if (readbuf != NULL)
6186 memcpy (readbuf, inf_siginfo + offset, len);
6187 else
6188 {
6189 memcpy (inf_siginfo + offset, writebuf, len);
6190
6191 /* Convert back to ptrace layout before flushing it out. */
6192 siginfo_fixup (&siginfo, inf_siginfo, 1);
6193
6194 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6195 return -1;
6196 }
6197
6198 return len;
6199 }
6200
6201 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6202 so we notice when children change state; as the handler for the
6203 sigsuspend in my_waitpid. */
6204
6205 static void
6206 sigchld_handler (int signo)
6207 {
6208 int old_errno = errno;
6209
6210 if (debug_threads)
6211 {
6212 do
6213 {
6214 /* fprintf is not async-signal-safe, so call write
6215 directly. */
6216 if (write (2, "sigchld_handler\n",
6217 sizeof ("sigchld_handler\n") - 1) < 0)
6218 break; /* just ignore */
6219 } while (0);
6220 }
6221
6222 if (target_is_async_p ())
6223 async_file_mark (); /* trigger a linux_wait */
6224
6225 errno = old_errno;
6226 }
6227
6228 static int
6229 linux_supports_non_stop (void)
6230 {
6231 return 1;
6232 }
6233
6234 static int
6235 linux_async (int enable)
6236 {
6237 int previous = target_is_async_p ();
6238
6239 if (debug_threads)
6240 debug_printf ("linux_async (%d), previous=%d\n",
6241 enable, previous);
6242
6243 if (previous != enable)
6244 {
6245 sigset_t mask;
6246 sigemptyset (&mask);
6247 sigaddset (&mask, SIGCHLD);
6248
6249 sigprocmask (SIG_BLOCK, &mask, NULL);
6250
6251 if (enable)
6252 {
6253 if (pipe (linux_event_pipe) == -1)
6254 {
6255 linux_event_pipe[0] = -1;
6256 linux_event_pipe[1] = -1;
6257 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6258
6259 warning ("creating event pipe failed.");
6260 return previous;
6261 }
6262
6263 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6264 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6265
6266 /* Register the event loop handler. */
6267 add_file_handler (linux_event_pipe[0],
6268 handle_target_event, NULL);
6269
6270 /* Always trigger a linux_wait. */
6271 async_file_mark ();
6272 }
6273 else
6274 {
6275 delete_file_handler (linux_event_pipe[0]);
6276
6277 close (linux_event_pipe[0]);
6278 close (linux_event_pipe[1]);
6279 linux_event_pipe[0] = -1;
6280 linux_event_pipe[1] = -1;
6281 }
6282
6283 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6284 }
6285
6286 return previous;
6287 }
6288
6289 static int
6290 linux_start_non_stop (int nonstop)
6291 {
6292 /* Register or unregister from event-loop accordingly. */
6293 linux_async (nonstop);
6294
6295 if (target_is_async_p () != (nonstop != 0))
6296 return -1;
6297
6298 return 0;
6299 }
6300
6301 static int
6302 linux_supports_multi_process (void)
6303 {
6304 return 1;
6305 }
6306
6307 /* Check if fork events are supported. */
6308
6309 static int
6310 linux_supports_fork_events (void)
6311 {
6312 return linux_supports_tracefork ();
6313 }
6314
6315 /* Check if vfork events are supported. */
6316
6317 static int
6318 linux_supports_vfork_events (void)
6319 {
6320 return linux_supports_tracefork ();
6321 }
6322
6323 /* Check if exec events are supported. */
6324
6325 static int
6326 linux_supports_exec_events (void)
6327 {
6328 return linux_supports_traceexec ();
6329 }
6330
6331 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6332 ptrace flags for all inferiors. This is in case the new GDB connection
6333 doesn't support the same set of events that the previous one did. */
6334
6335 static void
6336 linux_handle_new_gdb_connection (void)
6337 {
6338 /* Request that all the lwps reset their ptrace options. */
6339 for_each_thread ([] (thread_info *thread)
6340 {
6341 struct lwp_info *lwp = get_thread_lwp (thread);
6342
6343 if (!lwp->stopped)
6344 {
6345 /* Stop the lwp so we can modify its ptrace options. */
6346 lwp->must_set_ptrace_flags = 1;
6347 linux_stop_lwp (lwp);
6348 }
6349 else
6350 {
6351 /* Already stopped; go ahead and set the ptrace options. */
6352 struct process_info *proc = find_process_pid (pid_of (thread));
6353 int options = linux_low_ptrace_options (proc->attached);
6354
6355 linux_enable_event_reporting (lwpid_of (thread), options);
6356 lwp->must_set_ptrace_flags = 0;
6357 }
6358 });
6359 }
6360
6361 static int
6362 linux_supports_disable_randomization (void)
6363 {
6364 #ifdef HAVE_PERSONALITY
6365 return 1;
6366 #else
6367 return 0;
6368 #endif
6369 }
6370
6371 static int
6372 linux_supports_agent (void)
6373 {
6374 return 1;
6375 }
6376
6377 static int
6378 linux_supports_range_stepping (void)
6379 {
6380 if (can_software_single_step ())
6381 return 1;
6382 if (*the_low_target.supports_range_stepping == NULL)
6383 return 0;
6384
6385 return (*the_low_target.supports_range_stepping) ();
6386 }
6387
6388 /* Enumerate spufs IDs for process PID. */
6389 static int
6390 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6391 {
6392 int pos = 0;
6393 int written = 0;
6394 char path[128];
6395 DIR *dir;
6396 struct dirent *entry;
6397
6398 sprintf (path, "/proc/%ld/fd", pid);
6399 dir = opendir (path);
6400 if (!dir)
6401 return -1;
6402
6403 rewinddir (dir);
6404 while ((entry = readdir (dir)) != NULL)
6405 {
6406 struct stat st;
6407 struct statfs stfs;
6408 int fd;
6409
6410 fd = atoi (entry->d_name);
6411 if (!fd)
6412 continue;
6413
6414 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6415 if (stat (path, &st) != 0)
6416 continue;
6417 if (!S_ISDIR (st.st_mode))
6418 continue;
6419
6420 if (statfs (path, &stfs) != 0)
6421 continue;
6422 if (stfs.f_type != SPUFS_MAGIC)
6423 continue;
6424
6425 if (pos >= offset && pos + 4 <= offset + len)
6426 {
6427 *(unsigned int *)(buf + pos - offset) = fd;
6428 written += 4;
6429 }
6430 pos += 4;
6431 }
6432
6433 closedir (dir);
6434 return written;
6435 }
6436
6437 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6438 object type, using the /proc file system. */
6439 static int
6440 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6441 unsigned const char *writebuf,
6442 CORE_ADDR offset, int len)
6443 {
6444 long pid = lwpid_of (current_thread);
6445 char buf[128];
6446 int fd = 0;
6447 int ret = 0;
6448
6449 if (!writebuf && !readbuf)
6450 return -1;
6451
6452 if (!*annex)
6453 {
6454 if (!readbuf)
6455 return -1;
6456 else
6457 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6458 }
6459
6460 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6461 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6462 if (fd <= 0)
6463 return -1;
6464
6465 if (offset != 0
6466 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6467 {
6468 close (fd);
6469 return 0;
6470 }
6471
6472 if (writebuf)
6473 ret = write (fd, writebuf, (size_t) len);
6474 else
6475 ret = read (fd, readbuf, (size_t) len);
6476
6477 close (fd);
6478 return ret;
6479 }
6480
6481 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6482 struct target_loadseg
6483 {
6484 /* Core address to which the segment is mapped. */
6485 Elf32_Addr addr;
6486 /* VMA recorded in the program header. */
6487 Elf32_Addr p_vaddr;
6488 /* Size of this segment in memory. */
6489 Elf32_Word p_memsz;
6490 };
6491
6492 # if defined PT_GETDSBT
6493 struct target_loadmap
6494 {
6495 /* Protocol version number, must be zero. */
6496 Elf32_Word version;
6497 /* Pointer to the DSBT table, its size, and the DSBT index. */
6498 unsigned *dsbt_table;
6499 unsigned dsbt_size, dsbt_index;
6500 /* Number of segments in this map. */
6501 Elf32_Word nsegs;
6502 /* The actual memory map. */
6503 struct target_loadseg segs[/*nsegs*/];
6504 };
6505 # define LINUX_LOADMAP PT_GETDSBT
6506 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6507 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6508 # else
6509 struct target_loadmap
6510 {
6511 /* Protocol version number, must be zero. */
6512 Elf32_Half version;
6513 /* Number of segments in this map. */
6514 Elf32_Half nsegs;
6515 /* The actual memory map. */
6516 struct target_loadseg segs[/*nsegs*/];
6517 };
6518 # define LINUX_LOADMAP PTRACE_GETFDPIC
6519 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6520 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6521 # endif
6522
6523 static int
6524 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6525 unsigned char *myaddr, unsigned int len)
6526 {
6527 int pid = lwpid_of (current_thread);
6528 int addr = -1;
6529 struct target_loadmap *data = NULL;
6530 unsigned int actual_length, copy_length;
6531
6532 if (strcmp (annex, "exec") == 0)
6533 addr = (int) LINUX_LOADMAP_EXEC;
6534 else if (strcmp (annex, "interp") == 0)
6535 addr = (int) LINUX_LOADMAP_INTERP;
6536 else
6537 return -1;
6538
6539 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6540 return -1;
6541
6542 if (data == NULL)
6543 return -1;
6544
6545 actual_length = sizeof (struct target_loadmap)
6546 + sizeof (struct target_loadseg) * data->nsegs;
6547
6548 if (offset < 0 || offset > actual_length)
6549 return -1;
6550
6551 copy_length = actual_length - offset < len ? actual_length - offset : len;
6552 memcpy (myaddr, (char *) data + offset, copy_length);
6553 return copy_length;
6554 }
6555 #else
6556 # define linux_read_loadmap NULL
6557 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6558
6559 static void
6560 linux_process_qsupported (char **features, int count)
6561 {
6562 if (the_low_target.process_qsupported != NULL)
6563 the_low_target.process_qsupported (features, count);
6564 }
6565
6566 static int
6567 linux_supports_catch_syscall (void)
6568 {
6569 return (the_low_target.get_syscall_trapinfo != NULL
6570 && linux_supports_tracesysgood ());
6571 }
6572
6573 static int
6574 linux_get_ipa_tdesc_idx (void)
6575 {
6576 if (the_low_target.get_ipa_tdesc_idx == NULL)
6577 return 0;
6578
6579 return (*the_low_target.get_ipa_tdesc_idx) ();
6580 }
6581
6582 static int
6583 linux_supports_tracepoints (void)
6584 {
6585 if (*the_low_target.supports_tracepoints == NULL)
6586 return 0;
6587
6588 return (*the_low_target.supports_tracepoints) ();
6589 }
6590
6591 static CORE_ADDR
6592 linux_read_pc (struct regcache *regcache)
6593 {
6594 if (the_low_target.get_pc == NULL)
6595 return 0;
6596
6597 return (*the_low_target.get_pc) (regcache);
6598 }
6599
6600 static void
6601 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6602 {
6603 gdb_assert (the_low_target.set_pc != NULL);
6604
6605 (*the_low_target.set_pc) (regcache, pc);
6606 }
6607
6608 static int
6609 linux_thread_stopped (struct thread_info *thread)
6610 {
6611 return get_thread_lwp (thread)->stopped;
6612 }
6613
6614 /* This exposes stop-all-threads functionality to other modules. */
6615
6616 static void
6617 linux_pause_all (int freeze)
6618 {
6619 stop_all_lwps (freeze, NULL);
6620 }
6621
6622 /* This exposes unstop-all-threads functionality to other gdbserver
6623 modules. */
6624
6625 static void
6626 linux_unpause_all (int unfreeze)
6627 {
6628 unstop_all_lwps (unfreeze, NULL);
6629 }
6630
6631 static int
6632 linux_prepare_to_access_memory (void)
6633 {
6634 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6635 running LWP. */
6636 if (non_stop)
6637 linux_pause_all (1);
6638 return 0;
6639 }
6640
6641 static void
6642 linux_done_accessing_memory (void)
6643 {
6644 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6645 running LWP. */
6646 if (non_stop)
6647 linux_unpause_all (1);
6648 }
6649
6650 static int
6651 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6652 CORE_ADDR collector,
6653 CORE_ADDR lockaddr,
6654 ULONGEST orig_size,
6655 CORE_ADDR *jump_entry,
6656 CORE_ADDR *trampoline,
6657 ULONGEST *trampoline_size,
6658 unsigned char *jjump_pad_insn,
6659 ULONGEST *jjump_pad_insn_size,
6660 CORE_ADDR *adjusted_insn_addr,
6661 CORE_ADDR *adjusted_insn_addr_end,
6662 char *err)
6663 {
6664 return (*the_low_target.install_fast_tracepoint_jump_pad)
6665 (tpoint, tpaddr, collector, lockaddr, orig_size,
6666 jump_entry, trampoline, trampoline_size,
6667 jjump_pad_insn, jjump_pad_insn_size,
6668 adjusted_insn_addr, adjusted_insn_addr_end,
6669 err);
6670 }
6671
6672 static struct emit_ops *
6673 linux_emit_ops (void)
6674 {
6675 if (the_low_target.emit_ops != NULL)
6676 return (*the_low_target.emit_ops) ();
6677 else
6678 return NULL;
6679 }
6680
6681 static int
6682 linux_get_min_fast_tracepoint_insn_len (void)
6683 {
6684 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6685 }
6686
6687 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6688
6689 static int
6690 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6691 CORE_ADDR *phdr_memaddr, int *num_phdr)
6692 {
6693 char filename[PATH_MAX];
6694 int fd;
6695 const int auxv_size = is_elf64
6696 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6697 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6698
6699 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6700
6701 fd = open (filename, O_RDONLY);
6702 if (fd < 0)
6703 return 1;
6704
6705 *phdr_memaddr = 0;
6706 *num_phdr = 0;
6707 while (read (fd, buf, auxv_size) == auxv_size
6708 && (*phdr_memaddr == 0 || *num_phdr == 0))
6709 {
6710 if (is_elf64)
6711 {
6712 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6713
6714 switch (aux->a_type)
6715 {
6716 case AT_PHDR:
6717 *phdr_memaddr = aux->a_un.a_val;
6718 break;
6719 case AT_PHNUM:
6720 *num_phdr = aux->a_un.a_val;
6721 break;
6722 }
6723 }
6724 else
6725 {
6726 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6727
6728 switch (aux->a_type)
6729 {
6730 case AT_PHDR:
6731 *phdr_memaddr = aux->a_un.a_val;
6732 break;
6733 case AT_PHNUM:
6734 *num_phdr = aux->a_un.a_val;
6735 break;
6736 }
6737 }
6738 }
6739
6740 close (fd);
6741
6742 if (*phdr_memaddr == 0 || *num_phdr == 0)
6743 {
6744 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6745 "phdr_memaddr = %ld, phdr_num = %d",
6746 (long) *phdr_memaddr, *num_phdr);
6747 return 2;
6748 }
6749
6750 return 0;
6751 }
6752
6753 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6754
6755 static CORE_ADDR
6756 get_dynamic (const int pid, const int is_elf64)
6757 {
6758 CORE_ADDR phdr_memaddr, relocation;
6759 int num_phdr, i;
6760 unsigned char *phdr_buf;
6761 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6762
6763 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6764 return 0;
6765
6766 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6767 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6768
6769 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6770 return 0;
6771
6772 /* Compute relocation: it is expected to be 0 for "regular" executables,
6773 non-zero for PIE ones. */
6774 relocation = -1;
6775 for (i = 0; relocation == -1 && i < num_phdr; i++)
6776 if (is_elf64)
6777 {
6778 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6779
6780 if (p->p_type == PT_PHDR)
6781 relocation = phdr_memaddr - p->p_vaddr;
6782 }
6783 else
6784 {
6785 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6786
6787 if (p->p_type == PT_PHDR)
6788 relocation = phdr_memaddr - p->p_vaddr;
6789 }
6790
6791 if (relocation == -1)
6792 {
6793 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6794 any real world executables, including PIE executables, have always
6795 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6796 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6797 or present DT_DEBUG anyway (fpc binaries are statically linked).
6798
6799 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6800
6801 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6802
6803 return 0;
6804 }
6805
6806 for (i = 0; i < num_phdr; i++)
6807 {
6808 if (is_elf64)
6809 {
6810 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6811
6812 if (p->p_type == PT_DYNAMIC)
6813 return p->p_vaddr + relocation;
6814 }
6815 else
6816 {
6817 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6818
6819 if (p->p_type == PT_DYNAMIC)
6820 return p->p_vaddr + relocation;
6821 }
6822 }
6823
6824 return 0;
6825 }
6826
6827 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6828 can be 0 if the inferior does not yet have the library list initialized.
6829 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6830 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6831
6832 static CORE_ADDR
6833 get_r_debug (const int pid, const int is_elf64)
6834 {
6835 CORE_ADDR dynamic_memaddr;
6836 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6837 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6838 CORE_ADDR map = -1;
6839
6840 dynamic_memaddr = get_dynamic (pid, is_elf64);
6841 if (dynamic_memaddr == 0)
6842 return map;
6843
6844 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6845 {
6846 if (is_elf64)
6847 {
6848 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6849 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6850 union
6851 {
6852 Elf64_Xword map;
6853 unsigned char buf[sizeof (Elf64_Xword)];
6854 }
6855 rld_map;
6856 #endif
6857 #ifdef DT_MIPS_RLD_MAP
6858 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6859 {
6860 if (linux_read_memory (dyn->d_un.d_val,
6861 rld_map.buf, sizeof (rld_map.buf)) == 0)
6862 return rld_map.map;
6863 else
6864 break;
6865 }
6866 #endif /* DT_MIPS_RLD_MAP */
6867 #ifdef DT_MIPS_RLD_MAP_REL
6868 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6869 {
6870 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6871 rld_map.buf, sizeof (rld_map.buf)) == 0)
6872 return rld_map.map;
6873 else
6874 break;
6875 }
6876 #endif /* DT_MIPS_RLD_MAP_REL */
6877
6878 if (dyn->d_tag == DT_DEBUG && map == -1)
6879 map = dyn->d_un.d_val;
6880
6881 if (dyn->d_tag == DT_NULL)
6882 break;
6883 }
6884 else
6885 {
6886 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6887 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6888 union
6889 {
6890 Elf32_Word map;
6891 unsigned char buf[sizeof (Elf32_Word)];
6892 }
6893 rld_map;
6894 #endif
6895 #ifdef DT_MIPS_RLD_MAP
6896 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6897 {
6898 if (linux_read_memory (dyn->d_un.d_val,
6899 rld_map.buf, sizeof (rld_map.buf)) == 0)
6900 return rld_map.map;
6901 else
6902 break;
6903 }
6904 #endif /* DT_MIPS_RLD_MAP */
6905 #ifdef DT_MIPS_RLD_MAP_REL
6906 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6907 {
6908 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6909 rld_map.buf, sizeof (rld_map.buf)) == 0)
6910 return rld_map.map;
6911 else
6912 break;
6913 }
6914 #endif /* DT_MIPS_RLD_MAP_REL */
6915
6916 if (dyn->d_tag == DT_DEBUG && map == -1)
6917 map = dyn->d_un.d_val;
6918
6919 if (dyn->d_tag == DT_NULL)
6920 break;
6921 }
6922
6923 dynamic_memaddr += dyn_size;
6924 }
6925
6926 return map;
6927 }
6928
6929 /* Read one pointer from MEMADDR in the inferior. */
6930
6931 static int
6932 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6933 {
6934 int ret;
6935
6936 /* Go through a union so this works on either big or little endian
6937 hosts, when the inferior's pointer size is smaller than the size
6938 of CORE_ADDR. It is assumed the inferior's endianness is the
6939 same of the superior's. */
6940 union
6941 {
6942 CORE_ADDR core_addr;
6943 unsigned int ui;
6944 unsigned char uc;
6945 } addr;
6946
6947 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6948 if (ret == 0)
6949 {
6950 if (ptr_size == sizeof (CORE_ADDR))
6951 *ptr = addr.core_addr;
6952 else if (ptr_size == sizeof (unsigned int))
6953 *ptr = addr.ui;
6954 else
6955 gdb_assert_not_reached ("unhandled pointer size");
6956 }
6957 return ret;
6958 }
6959
6960 struct link_map_offsets
6961 {
6962 /* Offset and size of r_debug.r_version. */
6963 int r_version_offset;
6964
6965 /* Offset and size of r_debug.r_map. */
6966 int r_map_offset;
6967
6968 /* Offset to l_addr field in struct link_map. */
6969 int l_addr_offset;
6970
6971 /* Offset to l_name field in struct link_map. */
6972 int l_name_offset;
6973
6974 /* Offset to l_ld field in struct link_map. */
6975 int l_ld_offset;
6976
6977 /* Offset to l_next field in struct link_map. */
6978 int l_next_offset;
6979
6980 /* Offset to l_prev field in struct link_map. */
6981 int l_prev_offset;
6982 };
6983
6984 /* Construct qXfer:libraries-svr4:read reply. */
6985
6986 static int
6987 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6988 unsigned const char *writebuf,
6989 CORE_ADDR offset, int len)
6990 {
6991 struct process_info_private *const priv = current_process ()->priv;
6992 char filename[PATH_MAX];
6993 int pid, is_elf64;
6994
6995 static const struct link_map_offsets lmo_32bit_offsets =
6996 {
6997 0, /* r_version offset. */
6998 4, /* r_debug.r_map offset. */
6999 0, /* l_addr offset in link_map. */
7000 4, /* l_name offset in link_map. */
7001 8, /* l_ld offset in link_map. */
7002 12, /* l_next offset in link_map. */
7003 16 /* l_prev offset in link_map. */
7004 };
7005
7006 static const struct link_map_offsets lmo_64bit_offsets =
7007 {
7008 0, /* r_version offset. */
7009 8, /* r_debug.r_map offset. */
7010 0, /* l_addr offset in link_map. */
7011 8, /* l_name offset in link_map. */
7012 16, /* l_ld offset in link_map. */
7013 24, /* l_next offset in link_map. */
7014 32 /* l_prev offset in link_map. */
7015 };
7016 const struct link_map_offsets *lmo;
7017 unsigned int machine;
7018 int ptr_size;
7019 CORE_ADDR lm_addr = 0, lm_prev = 0;
7020 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7021 int header_done = 0;
7022
7023 if (writebuf != NULL)
7024 return -2;
7025 if (readbuf == NULL)
7026 return -1;
7027
7028 pid = lwpid_of (current_thread);
7029 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7030 is_elf64 = elf_64_file_p (filename, &machine);
7031 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7032 ptr_size = is_elf64 ? 8 : 4;
7033
7034 while (annex[0] != '\0')
7035 {
7036 const char *sep;
7037 CORE_ADDR *addrp;
7038 int len;
7039
7040 sep = strchr (annex, '=');
7041 if (sep == NULL)
7042 break;
7043
7044 len = sep - annex;
7045 if (len == 5 && startswith (annex, "start"))
7046 addrp = &lm_addr;
7047 else if (len == 4 && startswith (annex, "prev"))
7048 addrp = &lm_prev;
7049 else
7050 {
7051 annex = strchr (sep, ';');
7052 if (annex == NULL)
7053 break;
7054 annex++;
7055 continue;
7056 }
7057
7058 annex = decode_address_to_semicolon (addrp, sep + 1);
7059 }
7060
7061 if (lm_addr == 0)
7062 {
7063 int r_version = 0;
7064
7065 if (priv->r_debug == 0)
7066 priv->r_debug = get_r_debug (pid, is_elf64);
7067
7068 /* We failed to find DT_DEBUG. Such situation will not change
7069 for this inferior - do not retry it. Report it to GDB as
7070 E01, see for the reasons at the GDB solib-svr4.c side. */
7071 if (priv->r_debug == (CORE_ADDR) -1)
7072 return -1;
7073
7074 if (priv->r_debug != 0)
7075 {
7076 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7077 (unsigned char *) &r_version,
7078 sizeof (r_version)) != 0
7079 || r_version != 1)
7080 {
7081 warning ("unexpected r_debug version %d", r_version);
7082 }
7083 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7084 &lm_addr, ptr_size) != 0)
7085 {
7086 warning ("unable to read r_map from 0x%lx",
7087 (long) priv->r_debug + lmo->r_map_offset);
7088 }
7089 }
7090 }
7091
7092 std::string document = "<library-list-svr4 version=\"1.0\"";
7093
7094 while (lm_addr
7095 && read_one_ptr (lm_addr + lmo->l_name_offset,
7096 &l_name, ptr_size) == 0
7097 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7098 &l_addr, ptr_size) == 0
7099 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7100 &l_ld, ptr_size) == 0
7101 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7102 &l_prev, ptr_size) == 0
7103 && read_one_ptr (lm_addr + lmo->l_next_offset,
7104 &l_next, ptr_size) == 0)
7105 {
7106 unsigned char libname[PATH_MAX];
7107
7108 if (lm_prev != l_prev)
7109 {
7110 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7111 (long) lm_prev, (long) l_prev);
7112 break;
7113 }
7114
7115 /* Ignore the first entry even if it has valid name as the first entry
7116 corresponds to the main executable. The first entry should not be
7117 skipped if the dynamic loader was loaded late by a static executable
7118 (see solib-svr4.c parameter ignore_first). But in such case the main
7119 executable does not have PT_DYNAMIC present and this function already
7120 exited above due to failed get_r_debug. */
7121 if (lm_prev == 0)
7122 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7123 else
7124 {
7125 /* Not checking for error because reading may stop before
7126 we've got PATH_MAX worth of characters. */
7127 libname[0] = '\0';
7128 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7129 libname[sizeof (libname) - 1] = '\0';
7130 if (libname[0] != '\0')
7131 {
7132 if (!header_done)
7133 {
7134 /* Terminate `<library-list-svr4'. */
7135 document += '>';
7136 header_done = 1;
7137 }
7138
7139 std::string name = xml_escape_text ((char *) libname);
7140 string_appendf (document,
7141 "<library name=\"%s\" lm=\"0x%lx\" "
7142 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7143 name.c_str (), (unsigned long) lm_addr,
7144 (unsigned long) l_addr, (unsigned long) l_ld);
7145 }
7146 }
7147
7148 lm_prev = lm_addr;
7149 lm_addr = l_next;
7150 }
7151
7152 if (!header_done)
7153 {
7154 /* Empty list; terminate `<library-list-svr4'. */
7155 document += "/>";
7156 }
7157 else
7158 document += "</library-list-svr4>";
7159
7160 int document_len = document.length ();
7161 if (offset < document_len)
7162 document_len -= offset;
7163 else
7164 document_len = 0;
7165 if (len > document_len)
7166 len = document_len;
7167
7168 memcpy (readbuf, document.data () + offset, len);
7169
7170 return len;
7171 }
7172
7173 #ifdef HAVE_LINUX_BTRACE
7174
7175 /* See to_disable_btrace target method. */
7176
7177 static int
7178 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7179 {
7180 enum btrace_error err;
7181
7182 err = linux_disable_btrace (tinfo);
7183 return (err == BTRACE_ERR_NONE ? 0 : -1);
7184 }
7185
7186 /* Encode an Intel Processor Trace configuration. */
7187
7188 static void
7189 linux_low_encode_pt_config (struct buffer *buffer,
7190 const struct btrace_data_pt_config *config)
7191 {
7192 buffer_grow_str (buffer, "<pt-config>\n");
7193
7194 switch (config->cpu.vendor)
7195 {
7196 case CV_INTEL:
7197 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7198 "model=\"%u\" stepping=\"%u\"/>\n",
7199 config->cpu.family, config->cpu.model,
7200 config->cpu.stepping);
7201 break;
7202
7203 default:
7204 break;
7205 }
7206
7207 buffer_grow_str (buffer, "</pt-config>\n");
7208 }
7209
7210 /* Encode a raw buffer. */
7211
7212 static void
7213 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7214 unsigned int size)
7215 {
7216 if (size == 0)
7217 return;
7218
7219 /* We use hex encoding - see common/rsp-low.h. */
7220 buffer_grow_str (buffer, "<raw>\n");
7221
7222 while (size-- > 0)
7223 {
7224 char elem[2];
7225
7226 elem[0] = tohex ((*data >> 4) & 0xf);
7227 elem[1] = tohex (*data++ & 0xf);
7228
7229 buffer_grow (buffer, elem, 2);
7230 }
7231
7232 buffer_grow_str (buffer, "</raw>\n");
7233 }
7234
7235 /* See to_read_btrace target method. */
7236
7237 static int
7238 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7239 enum btrace_read_type type)
7240 {
7241 struct btrace_data btrace;
7242 struct btrace_block *block;
7243 enum btrace_error err;
7244 int i;
7245
7246 btrace_data_init (&btrace);
7247
7248 err = linux_read_btrace (&btrace, tinfo, type);
7249 if (err != BTRACE_ERR_NONE)
7250 {
7251 if (err == BTRACE_ERR_OVERFLOW)
7252 buffer_grow_str0 (buffer, "E.Overflow.");
7253 else
7254 buffer_grow_str0 (buffer, "E.Generic Error.");
7255
7256 goto err;
7257 }
7258
7259 switch (btrace.format)
7260 {
7261 case BTRACE_FORMAT_NONE:
7262 buffer_grow_str0 (buffer, "E.No Trace.");
7263 goto err;
7264
7265 case BTRACE_FORMAT_BTS:
7266 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7267 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7268
7269 for (i = 0;
7270 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7271 i++)
7272 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7273 paddress (block->begin), paddress (block->end));
7274
7275 buffer_grow_str0 (buffer, "</btrace>\n");
7276 break;
7277
7278 case BTRACE_FORMAT_PT:
7279 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7280 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7281 buffer_grow_str (buffer, "<pt>\n");
7282
7283 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7284
7285 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7286 btrace.variant.pt.size);
7287
7288 buffer_grow_str (buffer, "</pt>\n");
7289 buffer_grow_str0 (buffer, "</btrace>\n");
7290 break;
7291
7292 default:
7293 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7294 goto err;
7295 }
7296
7297 btrace_data_fini (&btrace);
7298 return 0;
7299
7300 err:
7301 btrace_data_fini (&btrace);
7302 return -1;
7303 }
7304
7305 /* See to_btrace_conf target method. */
7306
7307 static int
7308 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7309 struct buffer *buffer)
7310 {
7311 const struct btrace_config *conf;
7312
7313 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7314 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7315
7316 conf = linux_btrace_conf (tinfo);
7317 if (conf != NULL)
7318 {
7319 switch (conf->format)
7320 {
7321 case BTRACE_FORMAT_NONE:
7322 break;
7323
7324 case BTRACE_FORMAT_BTS:
7325 buffer_xml_printf (buffer, "<bts");
7326 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7327 buffer_xml_printf (buffer, " />\n");
7328 break;
7329
7330 case BTRACE_FORMAT_PT:
7331 buffer_xml_printf (buffer, "<pt");
7332 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7333 buffer_xml_printf (buffer, "/>\n");
7334 break;
7335 }
7336 }
7337
7338 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7339 return 0;
7340 }
7341 #endif /* HAVE_LINUX_BTRACE */
7342
7343 /* See nat/linux-nat.h. */
7344
7345 ptid_t
7346 current_lwp_ptid (void)
7347 {
7348 return ptid_of (current_thread);
7349 }
7350
7351 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7352
7353 static int
7354 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7355 {
7356 if (the_low_target.breakpoint_kind_from_pc != NULL)
7357 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7358 else
7359 return default_breakpoint_kind_from_pc (pcptr);
7360 }
7361
7362 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7363
7364 static const gdb_byte *
7365 linux_sw_breakpoint_from_kind (int kind, int *size)
7366 {
7367 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7368
7369 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7370 }
7371
7372 /* Implementation of the target_ops method
7373 "breakpoint_kind_from_current_state". */
7374
7375 static int
7376 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7377 {
7378 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7379 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7380 else
7381 return linux_breakpoint_kind_from_pc (pcptr);
7382 }
7383
7384 /* Default implementation of linux_target_ops method "set_pc" for
7385 32-bit pc register which is literally named "pc". */
7386
7387 void
7388 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7389 {
7390 uint32_t newpc = pc;
7391
7392 supply_register_by_name (regcache, "pc", &newpc);
7393 }
7394
7395 /* Default implementation of linux_target_ops method "get_pc" for
7396 32-bit pc register which is literally named "pc". */
7397
7398 CORE_ADDR
7399 linux_get_pc_32bit (struct regcache *regcache)
7400 {
7401 uint32_t pc;
7402
7403 collect_register_by_name (regcache, "pc", &pc);
7404 if (debug_threads)
7405 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7406 return pc;
7407 }
7408
7409 /* Default implementation of linux_target_ops method "set_pc" for
7410 64-bit pc register which is literally named "pc". */
7411
7412 void
7413 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7414 {
7415 uint64_t newpc = pc;
7416
7417 supply_register_by_name (regcache, "pc", &newpc);
7418 }
7419
7420 /* Default implementation of linux_target_ops method "get_pc" for
7421 64-bit pc register which is literally named "pc". */
7422
7423 CORE_ADDR
7424 linux_get_pc_64bit (struct regcache *regcache)
7425 {
7426 uint64_t pc;
7427
7428 collect_register_by_name (regcache, "pc", &pc);
7429 if (debug_threads)
7430 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7431 return pc;
7432 }
7433
7434
7435 static struct target_ops linux_target_ops = {
7436 linux_create_inferior,
7437 linux_post_create_inferior,
7438 linux_attach,
7439 linux_kill,
7440 linux_detach,
7441 linux_mourn,
7442 linux_join,
7443 linux_thread_alive,
7444 linux_resume,
7445 linux_wait,
7446 linux_fetch_registers,
7447 linux_store_registers,
7448 linux_prepare_to_access_memory,
7449 linux_done_accessing_memory,
7450 linux_read_memory,
7451 linux_write_memory,
7452 linux_look_up_symbols,
7453 linux_request_interrupt,
7454 linux_read_auxv,
7455 linux_supports_z_point_type,
7456 linux_insert_point,
7457 linux_remove_point,
7458 linux_stopped_by_sw_breakpoint,
7459 linux_supports_stopped_by_sw_breakpoint,
7460 linux_stopped_by_hw_breakpoint,
7461 linux_supports_stopped_by_hw_breakpoint,
7462 linux_supports_hardware_single_step,
7463 linux_stopped_by_watchpoint,
7464 linux_stopped_data_address,
7465 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7466 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7467 && defined(PT_TEXT_END_ADDR)
7468 linux_read_offsets,
7469 #else
7470 NULL,
7471 #endif
7472 #ifdef USE_THREAD_DB
7473 thread_db_get_tls_address,
7474 #else
7475 NULL,
7476 #endif
7477 linux_qxfer_spu,
7478 hostio_last_error_from_errno,
7479 linux_qxfer_osdata,
7480 linux_xfer_siginfo,
7481 linux_supports_non_stop,
7482 linux_async,
7483 linux_start_non_stop,
7484 linux_supports_multi_process,
7485 linux_supports_fork_events,
7486 linux_supports_vfork_events,
7487 linux_supports_exec_events,
7488 linux_handle_new_gdb_connection,
7489 #ifdef USE_THREAD_DB
7490 thread_db_handle_monitor_command,
7491 #else
7492 NULL,
7493 #endif
7494 linux_common_core_of_thread,
7495 linux_read_loadmap,
7496 linux_process_qsupported,
7497 linux_supports_tracepoints,
7498 linux_read_pc,
7499 linux_write_pc,
7500 linux_thread_stopped,
7501 NULL,
7502 linux_pause_all,
7503 linux_unpause_all,
7504 linux_stabilize_threads,
7505 linux_install_fast_tracepoint_jump_pad,
7506 linux_emit_ops,
7507 linux_supports_disable_randomization,
7508 linux_get_min_fast_tracepoint_insn_len,
7509 linux_qxfer_libraries_svr4,
7510 linux_supports_agent,
7511 #ifdef HAVE_LINUX_BTRACE
7512 linux_enable_btrace,
7513 linux_low_disable_btrace,
7514 linux_low_read_btrace,
7515 linux_low_btrace_conf,
7516 #else
7517 NULL,
7518 NULL,
7519 NULL,
7520 NULL,
7521 #endif
7522 linux_supports_range_stepping,
7523 linux_proc_pid_to_exec_file,
7524 linux_mntns_open_cloexec,
7525 linux_mntns_unlink,
7526 linux_mntns_readlink,
7527 linux_breakpoint_kind_from_pc,
7528 linux_sw_breakpoint_from_kind,
7529 linux_proc_tid_get_name,
7530 linux_breakpoint_kind_from_current_state,
7531 linux_supports_software_single_step,
7532 linux_supports_catch_syscall,
7533 linux_get_ipa_tdesc_idx,
7534 #if USE_THREAD_DB
7535 thread_db_thread_handle,
7536 #else
7537 NULL,
7538 #endif
7539 };
7540
7541 #ifdef HAVE_LINUX_REGSETS
7542 void
7543 initialize_regsets_info (struct regsets_info *info)
7544 {
7545 for (info->num_regsets = 0;
7546 info->regsets[info->num_regsets].size >= 0;
7547 info->num_regsets++)
7548 ;
7549 }
7550 #endif
7551
7552 void
7553 initialize_low (void)
7554 {
7555 struct sigaction sigchld_action;
7556
7557 memset (&sigchld_action, 0, sizeof (sigchld_action));
7558 set_target_ops (&linux_target_ops);
7559
7560 linux_ptrace_init_warnings ();
7561
7562 sigchld_action.sa_handler = sigchld_handler;
7563 sigemptyset (&sigchld_action.sa_mask);
7564 sigchld_action.sa_flags = SA_RESTART;
7565 sigaction (SIGCHLD, &sigchld_action, NULL);
7566
7567 initialize_low_arch ();
7568
7569 linux_check_ptrace_features ();
7570 }