]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdbserver/linux-low.cc
30552da7559e9fa71fee70ddc6fe3b1631a54700
[thirdparty/binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2022 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
48 #include <inttypes.h>
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifndef O_LARGEFILE
64 #define O_LARGEFILE 0
65 #endif
66
67 #ifndef AT_HWCAP2
68 #define AT_HWCAP2 26
69 #endif
70
71 /* Some targets did not define these ptrace constants from the start,
72 so gdbserver defines them locally here. In the future, these may
73 be removed after they are added to asm/ptrace.h. */
74 #if !(defined(PT_TEXT_ADDR) \
75 || defined(PT_DATA_ADDR) \
76 || defined(PT_TEXT_END_ADDR))
77 #if defined(__mcoldfire__)
78 /* These are still undefined in 3.10 kernels. */
79 #define PT_TEXT_ADDR 49*4
80 #define PT_DATA_ADDR 50*4
81 #define PT_TEXT_END_ADDR 51*4
82 /* These are still undefined in 3.10 kernels. */
83 #elif defined(__TMS320C6X__)
84 #define PT_TEXT_ADDR (0x10000*4)
85 #define PT_DATA_ADDR (0x10004*4)
86 #define PT_TEXT_END_ADDR (0x10008*4)
87 #endif
88 #endif
89
90 #if (defined(__UCLIBC__) \
91 && defined(HAS_NOMMU) \
92 && defined(PT_TEXT_ADDR) \
93 && defined(PT_DATA_ADDR) \
94 && defined(PT_TEXT_END_ADDR))
95 #define SUPPORTS_READ_OFFSETS
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "gdbsupport/btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* See nat/linux-nat.h. */
180
181 int
182 lwp_is_stepping (struct lwp_info *lwp)
183 {
184 return lwp->stepping;
185 }
186
187 /* A list of all unknown processes which receive stop signals. Some
188 other process will presumably claim each of these as forked
189 children momentarily. */
190
191 struct simple_pid_list
192 {
193 /* The process ID. */
194 int pid;
195
196 /* The status as reported by waitpid. */
197 int status;
198
199 /* Next in chain. */
200 struct simple_pid_list *next;
201 };
202 static struct simple_pid_list *stopped_pids;
203
204 /* Trivial list manipulation functions to keep track of a list of new
205 stopped processes. */
206
207 static void
208 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
209 {
210 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
211
212 new_pid->pid = pid;
213 new_pid->status = status;
214 new_pid->next = *listp;
215 *listp = new_pid;
216 }
217
218 static int
219 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
220 {
221 struct simple_pid_list **p;
222
223 for (p = listp; *p != NULL; p = &(*p)->next)
224 if ((*p)->pid == pid)
225 {
226 struct simple_pid_list *next = (*p)->next;
227
228 *statusp = (*p)->status;
229 xfree (*p);
230 *p = next;
231 return 1;
232 }
233 return 0;
234 }
235
236 enum stopping_threads_kind
237 {
238 /* Not stopping threads presently. */
239 NOT_STOPPING_THREADS,
240
241 /* Stopping threads. */
242 STOPPING_THREADS,
243
244 /* Stopping and suspending threads. */
245 STOPPING_AND_SUSPENDING_THREADS
246 };
247
248 /* This is set while stop_all_lwps is in effect. */
249 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
250
251 /* FIXME make into a target method? */
252 int using_threads = 1;
253
254 /* True if we're presently stabilizing threads (moving them out of
255 jump pads). */
256 static int stabilizing_threads;
257
258 static void unsuspend_all_lwps (struct lwp_info *except);
259 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
260 static int lwp_is_marked_dead (struct lwp_info *lwp);
261 static int kill_lwp (unsigned long lwpid, int signo);
262 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
263 static int linux_low_ptrace_options (int attached);
264 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
265
266 /* When the event-loop is doing a step-over, this points at the thread
267 being stepped. */
268 static ptid_t step_over_bkpt;
269
270 bool
271 linux_process_target::low_supports_breakpoints ()
272 {
273 return false;
274 }
275
276 CORE_ADDR
277 linux_process_target::low_get_pc (regcache *regcache)
278 {
279 return 0;
280 }
281
282 void
283 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
284 {
285 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
286 }
287
288 std::vector<CORE_ADDR>
289 linux_process_target::low_get_next_pcs (regcache *regcache)
290 {
291 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
292 "implemented");
293 }
294
295 int
296 linux_process_target::low_decr_pc_after_break ()
297 {
298 return 0;
299 }
300
301 /* True if LWP is stopped in its stepping range. */
302
303 static int
304 lwp_in_step_range (struct lwp_info *lwp)
305 {
306 CORE_ADDR pc = lwp->stop_pc;
307
308 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
309 }
310
311 /* The read/write ends of the pipe registered as waitable file in the
312 event loop. */
313 static int linux_event_pipe[2] = { -1, -1 };
314
315 /* True if we're currently in async mode. */
316 #define target_is_async_p() (linux_event_pipe[0] != -1)
317
318 static void send_sigstop (struct lwp_info *lwp);
319
320 /* Return non-zero if HEADER is a 64-bit ELF file. */
321
322 static int
323 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
324 {
325 if (header->e_ident[EI_MAG0] == ELFMAG0
326 && header->e_ident[EI_MAG1] == ELFMAG1
327 && header->e_ident[EI_MAG2] == ELFMAG2
328 && header->e_ident[EI_MAG3] == ELFMAG3)
329 {
330 *machine = header->e_machine;
331 return header->e_ident[EI_CLASS] == ELFCLASS64;
332
333 }
334 *machine = EM_NONE;
335 return -1;
336 }
337
338 /* Return non-zero if FILE is a 64-bit ELF file,
339 zero if the file is not a 64-bit ELF file,
340 and -1 if the file is not accessible or doesn't exist. */
341
342 static int
343 elf_64_file_p (const char *file, unsigned int *machine)
344 {
345 Elf64_Ehdr header;
346 int fd;
347
348 fd = open (file, O_RDONLY);
349 if (fd < 0)
350 return -1;
351
352 if (read (fd, &header, sizeof (header)) != sizeof (header))
353 {
354 close (fd);
355 return 0;
356 }
357 close (fd);
358
359 return elf_64_header_p (&header, machine);
360 }
361
362 /* Accepts an integer PID; Returns true if the executable PID is
363 running is a 64-bit ELF file.. */
364
365 int
366 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
367 {
368 char file[PATH_MAX];
369
370 sprintf (file, "/proc/%d/exe", pid);
371 return elf_64_file_p (file, machine);
372 }
373
374 void
375 linux_process_target::delete_lwp (lwp_info *lwp)
376 {
377 struct thread_info *thr = get_lwp_thread (lwp);
378
379 threads_debug_printf ("deleting %ld", lwpid_of (thr));
380
381 remove_thread (thr);
382
383 low_delete_thread (lwp->arch_private);
384
385 delete lwp;
386 }
387
388 void
389 linux_process_target::low_delete_thread (arch_lwp_info *info)
390 {
391 /* Default implementation should be overridden if architecture-specific
392 info is being used. */
393 gdb_assert (info == nullptr);
394 }
395
396 process_info *
397 linux_process_target::add_linux_process (int pid, int attached)
398 {
399 struct process_info *proc;
400
401 proc = add_process (pid, attached);
402 proc->priv = XCNEW (struct process_info_private);
403
404 proc->priv->arch_private = low_new_process ();
405
406 return proc;
407 }
408
409 arch_process_info *
410 linux_process_target::low_new_process ()
411 {
412 return nullptr;
413 }
414
415 void
416 linux_process_target::low_delete_process (arch_process_info *info)
417 {
418 /* Default implementation must be overridden if architecture-specific
419 info exists. */
420 gdb_assert (info == nullptr);
421 }
422
423 void
424 linux_process_target::low_new_fork (process_info *parent, process_info *child)
425 {
426 /* Nop. */
427 }
428
429 void
430 linux_process_target::arch_setup_thread (thread_info *thread)
431 {
432 scoped_restore_current_thread restore_thread;
433 switch_to_thread (thread);
434
435 low_arch_setup ();
436 }
437
438 int
439 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
440 int wstat)
441 {
442 client_state &cs = get_client_state ();
443 struct lwp_info *event_lwp = *orig_event_lwp;
444 int event = linux_ptrace_get_extended_event (wstat);
445 struct thread_info *event_thr = get_lwp_thread (event_lwp);
446 struct lwp_info *new_lwp;
447
448 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
449
450 /* All extended events we currently use are mid-syscall. Only
451 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
452 you have to be using PTRACE_SEIZE to get that. */
453 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
454
455 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
456 || (event == PTRACE_EVENT_CLONE))
457 {
458 ptid_t ptid;
459 unsigned long new_pid;
460 int ret, status;
461
462 /* Get the pid of the new lwp. */
463 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
464 &new_pid);
465
466 /* If we haven't already seen the new PID stop, wait for it now. */
467 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
468 {
469 /* The new child has a pending SIGSTOP. We can't affect it until it
470 hits the SIGSTOP, but we're already attached. */
471
472 ret = my_waitpid (new_pid, &status, __WALL);
473
474 if (ret == -1)
475 perror_with_name ("waiting for new child");
476 else if (ret != new_pid)
477 warning ("wait returned unexpected PID %d", ret);
478 else if (!WIFSTOPPED (status))
479 warning ("wait returned unexpected status 0x%x", status);
480 }
481
482 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
483 {
484 struct process_info *parent_proc;
485 struct process_info *child_proc;
486 struct lwp_info *child_lwp;
487 struct thread_info *child_thr;
488
489 ptid = ptid_t (new_pid, new_pid);
490
491 threads_debug_printf ("Got fork event from LWP %ld, "
492 "new child is %d",
493 ptid_of (event_thr).lwp (),
494 ptid.pid ());
495
496 /* Add the new process to the tables and clone the breakpoint
497 lists of the parent. We need to do this even if the new process
498 will be detached, since we will need the process object and the
499 breakpoints to remove any breakpoints from memory when we
500 detach, and the client side will access registers. */
501 child_proc = add_linux_process (new_pid, 0);
502 gdb_assert (child_proc != NULL);
503 child_lwp = add_lwp (ptid);
504 gdb_assert (child_lwp != NULL);
505 child_lwp->stopped = 1;
506 child_lwp->must_set_ptrace_flags = 1;
507 child_lwp->status_pending_p = 0;
508 child_thr = get_lwp_thread (child_lwp);
509 child_thr->last_resume_kind = resume_stop;
510 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
511
512 /* If we're suspending all threads, leave this one suspended
513 too. If the fork/clone parent is stepping over a breakpoint,
514 all other threads have been suspended already. Leave the
515 child suspended too. */
516 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
517 || event_lwp->bp_reinsert != 0)
518 {
519 threads_debug_printf ("leaving child suspended");
520 child_lwp->suspended = 1;
521 }
522
523 parent_proc = get_thread_process (event_thr);
524 child_proc->attached = parent_proc->attached;
525
526 if (event_lwp->bp_reinsert != 0
527 && supports_software_single_step ()
528 && event == PTRACE_EVENT_VFORK)
529 {
530 /* If we leave single-step breakpoints there, child will
531 hit it, so uninsert single-step breakpoints from parent
532 (and child). Once vfork child is done, reinsert
533 them back to parent. */
534 uninsert_single_step_breakpoints (event_thr);
535 }
536
537 clone_all_breakpoints (child_thr, event_thr);
538
539 target_desc_up tdesc = allocate_target_description ();
540 copy_target_description (tdesc.get (), parent_proc->tdesc);
541 child_proc->tdesc = tdesc.release ();
542
543 /* Clone arch-specific process data. */
544 low_new_fork (parent_proc, child_proc);
545
546 /* Save fork info in the parent thread. */
547 if (event == PTRACE_EVENT_FORK)
548 event_lwp->waitstatus.set_forked (ptid);
549 else if (event == PTRACE_EVENT_VFORK)
550 event_lwp->waitstatus.set_vforked (ptid);
551
552 /* The status_pending field contains bits denoting the
553 extended event, so when the pending event is handled,
554 the handler will look at lwp->waitstatus. */
555 event_lwp->status_pending_p = 1;
556 event_lwp->status_pending = wstat;
557
558 /* Link the threads until the parent event is passed on to
559 higher layers. */
560 event_lwp->fork_relative = child_lwp;
561 child_lwp->fork_relative = event_lwp;
562
563 /* If the parent thread is doing step-over with single-step
564 breakpoints, the list of single-step breakpoints are cloned
565 from the parent's. Remove them from the child process.
566 In case of vfork, we'll reinsert them back once vforked
567 child is done. */
568 if (event_lwp->bp_reinsert != 0
569 && supports_software_single_step ())
570 {
571 /* The child process is forked and stopped, so it is safe
572 to access its memory without stopping all other threads
573 from other processes. */
574 delete_single_step_breakpoints (child_thr);
575
576 gdb_assert (has_single_step_breakpoints (event_thr));
577 gdb_assert (!has_single_step_breakpoints (child_thr));
578 }
579
580 /* Report the event. */
581 return 0;
582 }
583
584 threads_debug_printf
585 ("Got clone event from LWP %ld, new child is LWP %ld",
586 lwpid_of (event_thr), new_pid);
587
588 ptid = ptid_t (pid_of (event_thr), new_pid);
589 new_lwp = add_lwp (ptid);
590
591 /* Either we're going to immediately resume the new thread
592 or leave it stopped. resume_one_lwp is a nop if it
593 thinks the thread is currently running, so set this first
594 before calling resume_one_lwp. */
595 new_lwp->stopped = 1;
596
597 /* If we're suspending all threads, leave this one suspended
598 too. If the fork/clone parent is stepping over a breakpoint,
599 all other threads have been suspended already. Leave the
600 child suspended too. */
601 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
602 || event_lwp->bp_reinsert != 0)
603 new_lwp->suspended = 1;
604
605 /* Normally we will get the pending SIGSTOP. But in some cases
606 we might get another signal delivered to the group first.
607 If we do get another signal, be sure not to lose it. */
608 if (WSTOPSIG (status) != SIGSTOP)
609 {
610 new_lwp->stop_expected = 1;
611 new_lwp->status_pending_p = 1;
612 new_lwp->status_pending = status;
613 }
614 else if (cs.report_thread_events)
615 {
616 new_lwp->waitstatus.set_thread_created ();
617 new_lwp->status_pending_p = 1;
618 new_lwp->status_pending = status;
619 }
620
621 #ifdef USE_THREAD_DB
622 thread_db_notice_clone (event_thr, ptid);
623 #endif
624
625 /* Don't report the event. */
626 return 1;
627 }
628 else if (event == PTRACE_EVENT_VFORK_DONE)
629 {
630 event_lwp->waitstatus.set_vfork_done ();
631
632 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
633 {
634 reinsert_single_step_breakpoints (event_thr);
635
636 gdb_assert (has_single_step_breakpoints (event_thr));
637 }
638
639 /* Report the event. */
640 return 0;
641 }
642 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
643 {
644 struct process_info *proc;
645 std::vector<int> syscalls_to_catch;
646 ptid_t event_ptid;
647 pid_t event_pid;
648
649 threads_debug_printf ("Got exec event from LWP %ld",
650 lwpid_of (event_thr));
651
652 /* Get the event ptid. */
653 event_ptid = ptid_of (event_thr);
654 event_pid = event_ptid.pid ();
655
656 /* Save the syscall list from the execing process. */
657 proc = get_thread_process (event_thr);
658 syscalls_to_catch = std::move (proc->syscalls_to_catch);
659
660 /* Delete the execing process and all its threads. */
661 mourn (proc);
662 switch_to_thread (nullptr);
663
664 /* Create a new process/lwp/thread. */
665 proc = add_linux_process (event_pid, 0);
666 event_lwp = add_lwp (event_ptid);
667 event_thr = get_lwp_thread (event_lwp);
668 gdb_assert (current_thread == event_thr);
669 arch_setup_thread (event_thr);
670
671 /* Set the event status. */
672 event_lwp->waitstatus.set_execd
673 (make_unique_xstrdup
674 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
675
676 /* Mark the exec status as pending. */
677 event_lwp->stopped = 1;
678 event_lwp->status_pending_p = 1;
679 event_lwp->status_pending = wstat;
680 event_thr->last_resume_kind = resume_continue;
681 event_thr->last_status.set_ignore ();
682
683 /* Update syscall state in the new lwp, effectively mid-syscall too. */
684 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
685
686 /* Restore the list to catch. Don't rely on the client, which is free
687 to avoid sending a new list when the architecture doesn't change.
688 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
689 proc->syscalls_to_catch = std::move (syscalls_to_catch);
690
691 /* Report the event. */
692 *orig_event_lwp = event_lwp;
693 return 0;
694 }
695
696 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
697 }
698
699 CORE_ADDR
700 linux_process_target::get_pc (lwp_info *lwp)
701 {
702 struct regcache *regcache;
703 CORE_ADDR pc;
704
705 if (!low_supports_breakpoints ())
706 return 0;
707
708 scoped_restore_current_thread restore_thread;
709 switch_to_thread (get_lwp_thread (lwp));
710
711 regcache = get_thread_regcache (current_thread, 1);
712 pc = low_get_pc (regcache);
713
714 threads_debug_printf ("pc is 0x%lx", (long) pc);
715
716 return pc;
717 }
718
719 void
720 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
721 {
722 struct regcache *regcache;
723
724 scoped_restore_current_thread restore_thread;
725 switch_to_thread (get_lwp_thread (lwp));
726
727 regcache = get_thread_regcache (current_thread, 1);
728 low_get_syscall_trapinfo (regcache, sysno);
729
730 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
731 }
732
733 void
734 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
735 {
736 /* By default, report an unknown system call number. */
737 *sysno = UNKNOWN_SYSCALL;
738 }
739
740 bool
741 linux_process_target::save_stop_reason (lwp_info *lwp)
742 {
743 CORE_ADDR pc;
744 CORE_ADDR sw_breakpoint_pc;
745 #if USE_SIGTRAP_SIGINFO
746 siginfo_t siginfo;
747 #endif
748
749 if (!low_supports_breakpoints ())
750 return false;
751
752 pc = get_pc (lwp);
753 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
754
755 /* breakpoint_at reads from the current thread. */
756 scoped_restore_current_thread restore_thread;
757 switch_to_thread (get_lwp_thread (lwp));
758
759 #if USE_SIGTRAP_SIGINFO
760 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
761 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
762 {
763 if (siginfo.si_signo == SIGTRAP)
764 {
765 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
766 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
767 {
768 /* The si_code is ambiguous on this arch -- check debug
769 registers. */
770 if (!check_stopped_by_watchpoint (lwp))
771 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
772 }
773 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
774 {
775 /* If we determine the LWP stopped for a SW breakpoint,
776 trust it. Particularly don't check watchpoint
777 registers, because at least on s390, we'd find
778 stopped-by-watchpoint as long as there's a watchpoint
779 set. */
780 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
781 }
782 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
783 {
784 /* This can indicate either a hardware breakpoint or
785 hardware watchpoint. Check debug registers. */
786 if (!check_stopped_by_watchpoint (lwp))
787 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
788 }
789 else if (siginfo.si_code == TRAP_TRACE)
790 {
791 /* We may have single stepped an instruction that
792 triggered a watchpoint. In that case, on some
793 architectures (such as x86), instead of TRAP_HWBKPT,
794 si_code indicates TRAP_TRACE, and we need to check
795 the debug registers separately. */
796 if (!check_stopped_by_watchpoint (lwp))
797 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
798 }
799 }
800 }
801 #else
802 /* We may have just stepped a breakpoint instruction. E.g., in
803 non-stop mode, GDB first tells the thread A to step a range, and
804 then the user inserts a breakpoint inside the range. In that
805 case we need to report the breakpoint PC. */
806 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
807 && low_breakpoint_at (sw_breakpoint_pc))
808 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
809
810 if (hardware_breakpoint_inserted_here (pc))
811 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
812
813 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
814 check_stopped_by_watchpoint (lwp);
815 #endif
816
817 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
818 {
819 threads_debug_printf
820 ("%s stopped by software breakpoint",
821 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
822
823 /* Back up the PC if necessary. */
824 if (pc != sw_breakpoint_pc)
825 {
826 struct regcache *regcache
827 = get_thread_regcache (current_thread, 1);
828 low_set_pc (regcache, sw_breakpoint_pc);
829 }
830
831 /* Update this so we record the correct stop PC below. */
832 pc = sw_breakpoint_pc;
833 }
834 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
835 threads_debug_printf
836 ("%s stopped by hardware breakpoint",
837 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
838 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
839 threads_debug_printf
840 ("%s stopped by hardware watchpoint",
841 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
842 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
843 threads_debug_printf
844 ("%s stopped by trace",
845 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
846
847 lwp->stop_pc = pc;
848 return true;
849 }
850
851 lwp_info *
852 linux_process_target::add_lwp (ptid_t ptid)
853 {
854 lwp_info *lwp = new lwp_info;
855
856 lwp->thread = add_thread (ptid, lwp);
857
858 low_new_thread (lwp);
859
860 return lwp;
861 }
862
863 void
864 linux_process_target::low_new_thread (lwp_info *info)
865 {
866 /* Nop. */
867 }
868
869 /* Callback to be used when calling fork_inferior, responsible for
870 actually initiating the tracing of the inferior. */
871
872 static void
873 linux_ptrace_fun ()
874 {
875 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
876 (PTRACE_TYPE_ARG4) 0) < 0)
877 trace_start_error_with_name ("ptrace");
878
879 if (setpgid (0, 0) < 0)
880 trace_start_error_with_name ("setpgid");
881
882 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
883 stdout to stderr so that inferior i/o doesn't corrupt the connection.
884 Also, redirect stdin to /dev/null. */
885 if (remote_connection_is_stdio ())
886 {
887 if (close (0) < 0)
888 trace_start_error_with_name ("close");
889 if (open ("/dev/null", O_RDONLY) < 0)
890 trace_start_error_with_name ("open");
891 if (dup2 (2, 1) < 0)
892 trace_start_error_with_name ("dup2");
893 if (write (2, "stdin/stdout redirected\n",
894 sizeof ("stdin/stdout redirected\n") - 1) < 0)
895 {
896 /* Errors ignored. */;
897 }
898 }
899 }
900
901 /* Start an inferior process and returns its pid.
902 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
903 are its arguments. */
904
905 int
906 linux_process_target::create_inferior (const char *program,
907 const std::vector<char *> &program_args)
908 {
909 client_state &cs = get_client_state ();
910 struct lwp_info *new_lwp;
911 int pid;
912 ptid_t ptid;
913
914 {
915 maybe_disable_address_space_randomization restore_personality
916 (cs.disable_randomization);
917 std::string str_program_args = construct_inferior_arguments (program_args);
918
919 pid = fork_inferior (program,
920 str_program_args.c_str (),
921 get_environ ()->envp (), linux_ptrace_fun,
922 NULL, NULL, NULL, NULL);
923 }
924
925 add_linux_process (pid, 0);
926
927 ptid = ptid_t (pid, pid);
928 new_lwp = add_lwp (ptid);
929 new_lwp->must_set_ptrace_flags = 1;
930
931 post_fork_inferior (pid, program);
932
933 return pid;
934 }
935
936 /* Implement the post_create_inferior target_ops method. */
937
938 void
939 linux_process_target::post_create_inferior ()
940 {
941 struct lwp_info *lwp = get_thread_lwp (current_thread);
942
943 low_arch_setup ();
944
945 if (lwp->must_set_ptrace_flags)
946 {
947 struct process_info *proc = current_process ();
948 int options = linux_low_ptrace_options (proc->attached);
949
950 linux_enable_event_reporting (lwpid_of (current_thread), options);
951 lwp->must_set_ptrace_flags = 0;
952 }
953 }
954
955 int
956 linux_process_target::attach_lwp (ptid_t ptid)
957 {
958 struct lwp_info *new_lwp;
959 int lwpid = ptid.lwp ();
960
961 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
962 != 0)
963 return errno;
964
965 new_lwp = add_lwp (ptid);
966
967 /* We need to wait for SIGSTOP before being able to make the next
968 ptrace call on this LWP. */
969 new_lwp->must_set_ptrace_flags = 1;
970
971 if (linux_proc_pid_is_stopped (lwpid))
972 {
973 threads_debug_printf ("Attached to a stopped process");
974
975 /* The process is definitely stopped. It is in a job control
976 stop, unless the kernel predates the TASK_STOPPED /
977 TASK_TRACED distinction, in which case it might be in a
978 ptrace stop. Make sure it is in a ptrace stop; from there we
979 can kill it, signal it, et cetera.
980
981 First make sure there is a pending SIGSTOP. Since we are
982 already attached, the process can not transition from stopped
983 to running without a PTRACE_CONT; so we know this signal will
984 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
985 probably already in the queue (unless this kernel is old
986 enough to use TASK_STOPPED for ptrace stops); but since
987 SIGSTOP is not an RT signal, it can only be queued once. */
988 kill_lwp (lwpid, SIGSTOP);
989
990 /* Finally, resume the stopped process. This will deliver the
991 SIGSTOP (or a higher priority signal, just like normal
992 PTRACE_ATTACH), which we'll catch later on. */
993 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
994 }
995
996 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
997 brings it to a halt.
998
999 There are several cases to consider here:
1000
1001 1) gdbserver has already attached to the process and is being notified
1002 of a new thread that is being created.
1003 In this case we should ignore that SIGSTOP and resume the
1004 process. This is handled below by setting stop_expected = 1,
1005 and the fact that add_thread sets last_resume_kind ==
1006 resume_continue.
1007
1008 2) This is the first thread (the process thread), and we're attaching
1009 to it via attach_inferior.
1010 In this case we want the process thread to stop.
1011 This is handled by having linux_attach set last_resume_kind ==
1012 resume_stop after we return.
1013
1014 If the pid we are attaching to is also the tgid, we attach to and
1015 stop all the existing threads. Otherwise, we attach to pid and
1016 ignore any other threads in the same group as this pid.
1017
1018 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1019 existing threads.
1020 In this case we want the thread to stop.
1021 FIXME: This case is currently not properly handled.
1022 We should wait for the SIGSTOP but don't. Things work apparently
1023 because enough time passes between when we ptrace (ATTACH) and when
1024 gdb makes the next ptrace call on the thread.
1025
1026 On the other hand, if we are currently trying to stop all threads, we
1027 should treat the new thread as if we had sent it a SIGSTOP. This works
1028 because we are guaranteed that the add_lwp call above added us to the
1029 end of the list, and so the new thread has not yet reached
1030 wait_for_sigstop (but will). */
1031 new_lwp->stop_expected = 1;
1032
1033 return 0;
1034 }
1035
1036 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1037 already attached. Returns true if a new LWP is found, false
1038 otherwise. */
1039
1040 static int
1041 attach_proc_task_lwp_callback (ptid_t ptid)
1042 {
1043 /* Is this a new thread? */
1044 if (find_thread_ptid (ptid) == NULL)
1045 {
1046 int lwpid = ptid.lwp ();
1047 int err;
1048
1049 threads_debug_printf ("Found new lwp %d", lwpid);
1050
1051 err = the_linux_target->attach_lwp (ptid);
1052
1053 /* Be quiet if we simply raced with the thread exiting. EPERM
1054 is returned if the thread's task still exists, and is marked
1055 as exited or zombie, as well as other conditions, so in that
1056 case, confirm the status in /proc/PID/status. */
1057 if (err == ESRCH
1058 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1059 threads_debug_printf
1060 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1061 lwpid, err, safe_strerror (err));
1062 else if (err != 0)
1063 {
1064 std::string reason
1065 = linux_ptrace_attach_fail_reason_string (ptid, err);
1066
1067 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1068 }
1069
1070 return 1;
1071 }
1072 return 0;
1073 }
1074
1075 static void async_file_mark (void);
1076
1077 /* Attach to PID. If PID is the tgid, attach to it and all
1078 of its threads. */
1079
1080 int
1081 linux_process_target::attach (unsigned long pid)
1082 {
1083 struct process_info *proc;
1084 struct thread_info *initial_thread;
1085 ptid_t ptid = ptid_t (pid, pid);
1086 int err;
1087
1088 proc = add_linux_process (pid, 1);
1089
1090 /* Attach to PID. We will check for other threads
1091 soon. */
1092 err = attach_lwp (ptid);
1093 if (err != 0)
1094 {
1095 remove_process (proc);
1096
1097 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1098 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1099 }
1100
1101 /* Don't ignore the initial SIGSTOP if we just attached to this
1102 process. It will be collected by wait shortly. */
1103 initial_thread = find_thread_ptid (ptid_t (pid, pid));
1104 initial_thread->last_resume_kind = resume_stop;
1105
1106 /* We must attach to every LWP. If /proc is mounted, use that to
1107 find them now. On the one hand, the inferior may be using raw
1108 clone instead of using pthreads. On the other hand, even if it
1109 is using pthreads, GDB may not be connected yet (thread_db needs
1110 to do symbol lookups, through qSymbol). Also, thread_db walks
1111 structures in the inferior's address space to find the list of
1112 threads/LWPs, and those structures may well be corrupted. Note
1113 that once thread_db is loaded, we'll still use it to list threads
1114 and associate pthread info with each LWP. */
1115 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1116
1117 /* GDB will shortly read the xml target description for this
1118 process, to figure out the process' architecture. But the target
1119 description is only filled in when the first process/thread in
1120 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1121 that now, otherwise, if GDB is fast enough, it could read the
1122 target description _before_ that initial stop. */
1123 if (non_stop)
1124 {
1125 struct lwp_info *lwp;
1126 int wstat, lwpid;
1127 ptid_t pid_ptid = ptid_t (pid);
1128
1129 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1130 gdb_assert (lwpid > 0);
1131
1132 lwp = find_lwp_pid (ptid_t (lwpid));
1133
1134 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1135 {
1136 lwp->status_pending_p = 1;
1137 lwp->status_pending = wstat;
1138 }
1139
1140 initial_thread->last_resume_kind = resume_continue;
1141
1142 async_file_mark ();
1143
1144 gdb_assert (proc->tdesc != NULL);
1145 }
1146
1147 return 0;
1148 }
1149
1150 static int
1151 last_thread_of_process_p (int pid)
1152 {
1153 bool seen_one = false;
1154
1155 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1156 {
1157 if (!seen_one)
1158 {
1159 /* This is the first thread of this process we see. */
1160 seen_one = true;
1161 return false;
1162 }
1163 else
1164 {
1165 /* This is the second thread of this process we see. */
1166 return true;
1167 }
1168 });
1169
1170 return thread == NULL;
1171 }
1172
1173 /* Kill LWP. */
1174
1175 static void
1176 linux_kill_one_lwp (struct lwp_info *lwp)
1177 {
1178 struct thread_info *thr = get_lwp_thread (lwp);
1179 int pid = lwpid_of (thr);
1180
1181 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1182 there is no signal context, and ptrace(PTRACE_KILL) (or
1183 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1184 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1185 alternative is to kill with SIGKILL. We only need one SIGKILL
1186 per process, not one for each thread. But since we still support
1187 support debugging programs using raw clone without CLONE_THREAD,
1188 we send one for each thread. For years, we used PTRACE_KILL
1189 only, so we're being a bit paranoid about some old kernels where
1190 PTRACE_KILL might work better (dubious if there are any such, but
1191 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1192 second, and so we're fine everywhere. */
1193
1194 errno = 0;
1195 kill_lwp (pid, SIGKILL);
1196 if (debug_threads)
1197 {
1198 int save_errno = errno;
1199
1200 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1201 target_pid_to_str (ptid_of (thr)).c_str (),
1202 save_errno ? safe_strerror (save_errno) : "OK");
1203 }
1204
1205 errno = 0;
1206 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1207 if (debug_threads)
1208 {
1209 int save_errno = errno;
1210
1211 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1212 target_pid_to_str (ptid_of (thr)).c_str (),
1213 save_errno ? safe_strerror (save_errno) : "OK");
1214 }
1215 }
1216
1217 /* Kill LWP and wait for it to die. */
1218
1219 static void
1220 kill_wait_lwp (struct lwp_info *lwp)
1221 {
1222 struct thread_info *thr = get_lwp_thread (lwp);
1223 int pid = ptid_of (thr).pid ();
1224 int lwpid = ptid_of (thr).lwp ();
1225 int wstat;
1226 int res;
1227
1228 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
1229
1230 do
1231 {
1232 linux_kill_one_lwp (lwp);
1233
1234 /* Make sure it died. Notes:
1235
1236 - The loop is most likely unnecessary.
1237
1238 - We don't use wait_for_event as that could delete lwps
1239 while we're iterating over them. We're not interested in
1240 any pending status at this point, only in making sure all
1241 wait status on the kernel side are collected until the
1242 process is reaped.
1243
1244 - We don't use __WALL here as the __WALL emulation relies on
1245 SIGCHLD, and killing a stopped process doesn't generate
1246 one, nor an exit status.
1247 */
1248 res = my_waitpid (lwpid, &wstat, 0);
1249 if (res == -1 && errno == ECHILD)
1250 res = my_waitpid (lwpid, &wstat, __WCLONE);
1251 } while (res > 0 && WIFSTOPPED (wstat));
1252
1253 /* Even if it was stopped, the child may have already disappeared.
1254 E.g., if it was killed by SIGKILL. */
1255 if (res < 0 && errno != ECHILD)
1256 perror_with_name ("kill_wait_lwp");
1257 }
1258
1259 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1260 except the leader. */
1261
1262 static void
1263 kill_one_lwp_callback (thread_info *thread, int pid)
1264 {
1265 struct lwp_info *lwp = get_thread_lwp (thread);
1266
1267 /* We avoid killing the first thread here, because of a Linux kernel (at
1268 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1269 the children get a chance to be reaped, it will remain a zombie
1270 forever. */
1271
1272 if (lwpid_of (thread) == pid)
1273 {
1274 threads_debug_printf ("is last of process %s",
1275 target_pid_to_str (thread->id).c_str ());
1276 return;
1277 }
1278
1279 kill_wait_lwp (lwp);
1280 }
1281
1282 int
1283 linux_process_target::kill (process_info *process)
1284 {
1285 int pid = process->pid;
1286
1287 /* If we're killing a running inferior, make sure it is stopped
1288 first, as PTRACE_KILL will not work otherwise. */
1289 stop_all_lwps (0, NULL);
1290
1291 for_each_thread (pid, [&] (thread_info *thread)
1292 {
1293 kill_one_lwp_callback (thread, pid);
1294 });
1295
1296 /* See the comment in linux_kill_one_lwp. We did not kill the first
1297 thread in the list, so do so now. */
1298 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1299
1300 if (lwp == NULL)
1301 threads_debug_printf ("cannot find lwp for pid: %d", pid);
1302 else
1303 kill_wait_lwp (lwp);
1304
1305 mourn (process);
1306
1307 /* Since we presently can only stop all lwps of all processes, we
1308 need to unstop lwps of other processes. */
1309 unstop_all_lwps (0, NULL);
1310 return 0;
1311 }
1312
1313 /* Get pending signal of THREAD, for detaching purposes. This is the
1314 signal the thread last stopped for, which we need to deliver to the
1315 thread when detaching, otherwise, it'd be suppressed/lost. */
1316
1317 static int
1318 get_detach_signal (struct thread_info *thread)
1319 {
1320 client_state &cs = get_client_state ();
1321 enum gdb_signal signo = GDB_SIGNAL_0;
1322 int status;
1323 struct lwp_info *lp = get_thread_lwp (thread);
1324
1325 if (lp->status_pending_p)
1326 status = lp->status_pending;
1327 else
1328 {
1329 /* If the thread had been suspended by gdbserver, and it stopped
1330 cleanly, then it'll have stopped with SIGSTOP. But we don't
1331 want to deliver that SIGSTOP. */
1332 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1333 || thread->last_status.sig () == GDB_SIGNAL_0)
1334 return 0;
1335
1336 /* Otherwise, we may need to deliver the signal we
1337 intercepted. */
1338 status = lp->last_status;
1339 }
1340
1341 if (!WIFSTOPPED (status))
1342 {
1343 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1344 target_pid_to_str (ptid_of (thread)).c_str ());
1345 return 0;
1346 }
1347
1348 /* Extended wait statuses aren't real SIGTRAPs. */
1349 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1350 {
1351 threads_debug_printf ("lwp %s had stopped with extended "
1352 "status: no pending signal",
1353 target_pid_to_str (ptid_of (thread)).c_str ());
1354 return 0;
1355 }
1356
1357 signo = gdb_signal_from_host (WSTOPSIG (status));
1358
1359 if (cs.program_signals_p && !cs.program_signals[signo])
1360 {
1361 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1362 target_pid_to_str (ptid_of (thread)).c_str (),
1363 gdb_signal_to_string (signo));
1364 return 0;
1365 }
1366 else if (!cs.program_signals_p
1367 /* If we have no way to know which signals GDB does not
1368 want to have passed to the program, assume
1369 SIGTRAP/SIGINT, which is GDB's default. */
1370 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1371 {
1372 threads_debug_printf ("lwp %s had signal %s, "
1373 "but we don't know if we should pass it. "
1374 "Default to not.",
1375 target_pid_to_str (ptid_of (thread)).c_str (),
1376 gdb_signal_to_string (signo));
1377 return 0;
1378 }
1379 else
1380 {
1381 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1382 target_pid_to_str (ptid_of (thread)).c_str (),
1383 gdb_signal_to_string (signo));
1384
1385 return WSTOPSIG (status);
1386 }
1387 }
1388
1389 void
1390 linux_process_target::detach_one_lwp (lwp_info *lwp)
1391 {
1392 struct thread_info *thread = get_lwp_thread (lwp);
1393 int sig;
1394 int lwpid;
1395
1396 /* If there is a pending SIGSTOP, get rid of it. */
1397 if (lwp->stop_expected)
1398 {
1399 threads_debug_printf ("Sending SIGCONT to %s",
1400 target_pid_to_str (ptid_of (thread)).c_str ());
1401
1402 kill_lwp (lwpid_of (thread), SIGCONT);
1403 lwp->stop_expected = 0;
1404 }
1405
1406 /* Pass on any pending signal for this thread. */
1407 sig = get_detach_signal (thread);
1408
1409 /* Preparing to resume may try to write registers, and fail if the
1410 lwp is zombie. If that happens, ignore the error. We'll handle
1411 it below, when detach fails with ESRCH. */
1412 try
1413 {
1414 /* Flush any pending changes to the process's registers. */
1415 regcache_invalidate_thread (thread);
1416
1417 /* Finally, let it resume. */
1418 low_prepare_to_resume (lwp);
1419 }
1420 catch (const gdb_exception_error &ex)
1421 {
1422 if (!check_ptrace_stopped_lwp_gone (lwp))
1423 throw;
1424 }
1425
1426 lwpid = lwpid_of (thread);
1427 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1428 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1429 {
1430 int save_errno = errno;
1431
1432 /* We know the thread exists, so ESRCH must mean the lwp is
1433 zombie. This can happen if one of the already-detached
1434 threads exits the whole thread group. In that case we're
1435 still attached, and must reap the lwp. */
1436 if (save_errno == ESRCH)
1437 {
1438 int ret, status;
1439
1440 ret = my_waitpid (lwpid, &status, __WALL);
1441 if (ret == -1)
1442 {
1443 warning (_("Couldn't reap LWP %d while detaching: %s"),
1444 lwpid, safe_strerror (errno));
1445 }
1446 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1447 {
1448 warning (_("Reaping LWP %d while detaching "
1449 "returned unexpected status 0x%x"),
1450 lwpid, status);
1451 }
1452 }
1453 else
1454 {
1455 error (_("Can't detach %s: %s"),
1456 target_pid_to_str (ptid_of (thread)).c_str (),
1457 safe_strerror (save_errno));
1458 }
1459 }
1460 else
1461 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1462 target_pid_to_str (ptid_of (thread)).c_str (),
1463 strsignal (sig));
1464
1465 delete_lwp (lwp);
1466 }
1467
1468 int
1469 linux_process_target::detach (process_info *process)
1470 {
1471 struct lwp_info *main_lwp;
1472
1473 /* As there's a step over already in progress, let it finish first,
1474 otherwise nesting a stabilize_threads operation on top gets real
1475 messy. */
1476 complete_ongoing_step_over ();
1477
1478 /* Stop all threads before detaching. First, ptrace requires that
1479 the thread is stopped to successfully detach. Second, thread_db
1480 may need to uninstall thread event breakpoints from memory, which
1481 only works with a stopped process anyway. */
1482 stop_all_lwps (0, NULL);
1483
1484 #ifdef USE_THREAD_DB
1485 thread_db_detach (process);
1486 #endif
1487
1488 /* Stabilize threads (move out of jump pads). */
1489 target_stabilize_threads ();
1490
1491 /* Detach from the clone lwps first. If the thread group exits just
1492 while we're detaching, we must reap the clone lwps before we're
1493 able to reap the leader. */
1494 for_each_thread (process->pid, [this] (thread_info *thread)
1495 {
1496 /* We don't actually detach from the thread group leader just yet.
1497 If the thread group exits, we must reap the zombie clone lwps
1498 before we're able to reap the leader. */
1499 if (thread->id.pid () == thread->id.lwp ())
1500 return;
1501
1502 lwp_info *lwp = get_thread_lwp (thread);
1503 detach_one_lwp (lwp);
1504 });
1505
1506 main_lwp = find_lwp_pid (ptid_t (process->pid));
1507 detach_one_lwp (main_lwp);
1508
1509 mourn (process);
1510
1511 /* Since we presently can only stop all lwps of all processes, we
1512 need to unstop lwps of other processes. */
1513 unstop_all_lwps (0, NULL);
1514 return 0;
1515 }
1516
1517 /* Remove all LWPs that belong to process PROC from the lwp list. */
1518
1519 void
1520 linux_process_target::mourn (process_info *process)
1521 {
1522 struct process_info_private *priv;
1523
1524 #ifdef USE_THREAD_DB
1525 thread_db_mourn (process);
1526 #endif
1527
1528 for_each_thread (process->pid, [this] (thread_info *thread)
1529 {
1530 delete_lwp (get_thread_lwp (thread));
1531 });
1532
1533 /* Freeing all private data. */
1534 priv = process->priv;
1535 low_delete_process (priv->arch_private);
1536 free (priv);
1537 process->priv = NULL;
1538
1539 remove_process (process);
1540 }
1541
1542 void
1543 linux_process_target::join (int pid)
1544 {
1545 int status, ret;
1546
1547 do {
1548 ret = my_waitpid (pid, &status, 0);
1549 if (WIFEXITED (status) || WIFSIGNALED (status))
1550 break;
1551 } while (ret != -1 || errno != ECHILD);
1552 }
1553
1554 /* Return true if the given thread is still alive. */
1555
1556 bool
1557 linux_process_target::thread_alive (ptid_t ptid)
1558 {
1559 struct lwp_info *lwp = find_lwp_pid (ptid);
1560
1561 /* We assume we always know if a thread exits. If a whole process
1562 exited but we still haven't been able to report it to GDB, we'll
1563 hold on to the last lwp of the dead process. */
1564 if (lwp != NULL)
1565 return !lwp_is_marked_dead (lwp);
1566 else
1567 return 0;
1568 }
1569
1570 bool
1571 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1572 {
1573 struct lwp_info *lp = get_thread_lwp (thread);
1574
1575 if (!lp->status_pending_p)
1576 return 0;
1577
1578 if (thread->last_resume_kind != resume_stop
1579 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1580 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1581 {
1582 CORE_ADDR pc;
1583 int discard = 0;
1584
1585 gdb_assert (lp->last_status != 0);
1586
1587 pc = get_pc (lp);
1588
1589 scoped_restore_current_thread restore_thread;
1590 switch_to_thread (thread);
1591
1592 if (pc != lp->stop_pc)
1593 {
1594 threads_debug_printf ("PC of %ld changed",
1595 lwpid_of (thread));
1596 discard = 1;
1597 }
1598
1599 #if !USE_SIGTRAP_SIGINFO
1600 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1601 && !low_breakpoint_at (pc))
1602 {
1603 threads_debug_printf ("previous SW breakpoint of %ld gone",
1604 lwpid_of (thread));
1605 discard = 1;
1606 }
1607 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1608 && !hardware_breakpoint_inserted_here (pc))
1609 {
1610 threads_debug_printf ("previous HW breakpoint of %ld gone",
1611 lwpid_of (thread));
1612 discard = 1;
1613 }
1614 #endif
1615
1616 if (discard)
1617 {
1618 threads_debug_printf ("discarding pending breakpoint status");
1619 lp->status_pending_p = 0;
1620 return 0;
1621 }
1622 }
1623
1624 return 1;
1625 }
1626
1627 /* Returns true if LWP is resumed from the client's perspective. */
1628
1629 static int
1630 lwp_resumed (struct lwp_info *lwp)
1631 {
1632 struct thread_info *thread = get_lwp_thread (lwp);
1633
1634 if (thread->last_resume_kind != resume_stop)
1635 return 1;
1636
1637 /* Did gdb send us a `vCont;t', but we haven't reported the
1638 corresponding stop to gdb yet? If so, the thread is still
1639 resumed/running from gdb's perspective. */
1640 if (thread->last_resume_kind == resume_stop
1641 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1642 return 1;
1643
1644 return 0;
1645 }
1646
1647 bool
1648 linux_process_target::status_pending_p_callback (thread_info *thread,
1649 ptid_t ptid)
1650 {
1651 struct lwp_info *lp = get_thread_lwp (thread);
1652
1653 /* Check if we're only interested in events from a specific process
1654 or a specific LWP. */
1655 if (!thread->id.matches (ptid))
1656 return 0;
1657
1658 if (!lwp_resumed (lp))
1659 return 0;
1660
1661 if (lp->status_pending_p
1662 && !thread_still_has_status_pending (thread))
1663 {
1664 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1665 return 0;
1666 }
1667
1668 return lp->status_pending_p;
1669 }
1670
1671 struct lwp_info *
1672 find_lwp_pid (ptid_t ptid)
1673 {
1674 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1675 {
1676 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1677 return thr_arg->id.lwp () == lwp;
1678 });
1679
1680 if (thread == NULL)
1681 return NULL;
1682
1683 return get_thread_lwp (thread);
1684 }
1685
1686 /* Return the number of known LWPs in the tgid given by PID. */
1687
1688 static int
1689 num_lwps (int pid)
1690 {
1691 int count = 0;
1692
1693 for_each_thread (pid, [&] (thread_info *thread)
1694 {
1695 count++;
1696 });
1697
1698 return count;
1699 }
1700
1701 /* See nat/linux-nat.h. */
1702
1703 struct lwp_info *
1704 iterate_over_lwps (ptid_t filter,
1705 gdb::function_view<iterate_over_lwps_ftype> callback)
1706 {
1707 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1708 {
1709 lwp_info *lwp = get_thread_lwp (thr_arg);
1710
1711 return callback (lwp);
1712 });
1713
1714 if (thread == NULL)
1715 return NULL;
1716
1717 return get_thread_lwp (thread);
1718 }
1719
1720 void
1721 linux_process_target::check_zombie_leaders ()
1722 {
1723 for_each_process ([this] (process_info *proc) {
1724 pid_t leader_pid = pid_of (proc);
1725 struct lwp_info *leader_lp;
1726
1727 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1728
1729 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1730 "num_lwps=%d, zombie=%d",
1731 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1732 linux_proc_pid_is_zombie (leader_pid));
1733
1734 if (leader_lp != NULL && !leader_lp->stopped
1735 /* Check if there are other threads in the group, as we may
1736 have raced with the inferior simply exiting. */
1737 && !last_thread_of_process_p (leader_pid)
1738 && linux_proc_pid_is_zombie (leader_pid))
1739 {
1740 /* A leader zombie can mean one of two things:
1741
1742 - It exited, and there's an exit status pending
1743 available, or only the leader exited (not the whole
1744 program). In the latter case, we can't waitpid the
1745 leader's exit status until all other threads are gone.
1746
1747 - There are 3 or more threads in the group, and a thread
1748 other than the leader exec'd. On an exec, the Linux
1749 kernel destroys all other threads (except the execing
1750 one) in the thread group, and resets the execing thread's
1751 tid to the tgid. No exit notification is sent for the
1752 execing thread -- from the ptracer's perspective, it
1753 appears as though the execing thread just vanishes.
1754 Until we reap all other threads except the leader and the
1755 execing thread, the leader will be zombie, and the
1756 execing thread will be in `D (disc sleep)'. As soon as
1757 all other threads are reaped, the execing thread changes
1758 it's tid to the tgid, and the previous (zombie) leader
1759 vanishes, giving place to the "new" leader. We could try
1760 distinguishing the exit and exec cases, by waiting once
1761 more, and seeing if something comes out, but it doesn't
1762 sound useful. The previous leader _does_ go away, and
1763 we'll re-add the new one once we see the exec event
1764 (which is just the same as what would happen if the
1765 previous leader did exit voluntarily before some other
1766 thread execs). */
1767
1768 threads_debug_printf ("Thread group leader %d zombie "
1769 "(it exited, or another thread execd).",
1770 leader_pid);
1771
1772 delete_lwp (leader_lp);
1773 }
1774 });
1775 }
1776
1777 /* Callback for `find_thread'. Returns the first LWP that is not
1778 stopped. */
1779
1780 static bool
1781 not_stopped_callback (thread_info *thread, ptid_t filter)
1782 {
1783 if (!thread->id.matches (filter))
1784 return false;
1785
1786 lwp_info *lwp = get_thread_lwp (thread);
1787
1788 return !lwp->stopped;
1789 }
1790
1791 /* Increment LWP's suspend count. */
1792
1793 static void
1794 lwp_suspended_inc (struct lwp_info *lwp)
1795 {
1796 lwp->suspended++;
1797
1798 if (lwp->suspended > 4)
1799 threads_debug_printf
1800 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1801 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
1802 }
1803
1804 /* Decrement LWP's suspend count. */
1805
1806 static void
1807 lwp_suspended_decr (struct lwp_info *lwp)
1808 {
1809 lwp->suspended--;
1810
1811 if (lwp->suspended < 0)
1812 {
1813 struct thread_info *thread = get_lwp_thread (lwp);
1814
1815 internal_error (__FILE__, __LINE__,
1816 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1817 lwp->suspended);
1818 }
1819 }
1820
1821 /* This function should only be called if the LWP got a SIGTRAP.
1822
1823 Handle any tracepoint steps or hits. Return true if a tracepoint
1824 event was handled, 0 otherwise. */
1825
1826 static int
1827 handle_tracepoints (struct lwp_info *lwp)
1828 {
1829 struct thread_info *tinfo = get_lwp_thread (lwp);
1830 int tpoint_related_event = 0;
1831
1832 gdb_assert (lwp->suspended == 0);
1833
1834 /* If this tracepoint hit causes a tracing stop, we'll immediately
1835 uninsert tracepoints. To do this, we temporarily pause all
1836 threads, unpatch away, and then unpause threads. We need to make
1837 sure the unpausing doesn't resume LWP too. */
1838 lwp_suspended_inc (lwp);
1839
1840 /* And we need to be sure that any all-threads-stopping doesn't try
1841 to move threads out of the jump pads, as it could deadlock the
1842 inferior (LWP could be in the jump pad, maybe even holding the
1843 lock.) */
1844
1845 /* Do any necessary step collect actions. */
1846 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1847
1848 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1849
1850 /* See if we just hit a tracepoint and do its main collect
1851 actions. */
1852 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1853
1854 lwp_suspended_decr (lwp);
1855
1856 gdb_assert (lwp->suspended == 0);
1857 gdb_assert (!stabilizing_threads
1858 || (lwp->collecting_fast_tracepoint
1859 != fast_tpoint_collect_result::not_collecting));
1860
1861 if (tpoint_related_event)
1862 {
1863 threads_debug_printf ("got a tracepoint event");
1864 return 1;
1865 }
1866
1867 return 0;
1868 }
1869
1870 fast_tpoint_collect_result
1871 linux_process_target::linux_fast_tracepoint_collecting
1872 (lwp_info *lwp, fast_tpoint_collect_status *status)
1873 {
1874 CORE_ADDR thread_area;
1875 struct thread_info *thread = get_lwp_thread (lwp);
1876
1877 /* Get the thread area address. This is used to recognize which
1878 thread is which when tracing with the in-process agent library.
1879 We don't read anything from the address, and treat it as opaque;
1880 it's the address itself that we assume is unique per-thread. */
1881 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
1882 return fast_tpoint_collect_result::not_collecting;
1883
1884 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1885 }
1886
1887 int
1888 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1889 {
1890 return -1;
1891 }
1892
1893 bool
1894 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
1895 {
1896 scoped_restore_current_thread restore_thread;
1897 switch_to_thread (get_lwp_thread (lwp));
1898
1899 if ((wstat == NULL
1900 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1901 && supports_fast_tracepoints ()
1902 && agent_loaded_p ())
1903 {
1904 struct fast_tpoint_collect_status status;
1905
1906 threads_debug_printf
1907 ("Checking whether LWP %ld needs to move out of the jump pad.",
1908 lwpid_of (current_thread));
1909
1910 fast_tpoint_collect_result r
1911 = linux_fast_tracepoint_collecting (lwp, &status);
1912
1913 if (wstat == NULL
1914 || (WSTOPSIG (*wstat) != SIGILL
1915 && WSTOPSIG (*wstat) != SIGFPE
1916 && WSTOPSIG (*wstat) != SIGSEGV
1917 && WSTOPSIG (*wstat) != SIGBUS))
1918 {
1919 lwp->collecting_fast_tracepoint = r;
1920
1921 if (r != fast_tpoint_collect_result::not_collecting)
1922 {
1923 if (r == fast_tpoint_collect_result::before_insn
1924 && lwp->exit_jump_pad_bkpt == NULL)
1925 {
1926 /* Haven't executed the original instruction yet.
1927 Set breakpoint there, and wait till it's hit,
1928 then single-step until exiting the jump pad. */
1929 lwp->exit_jump_pad_bkpt
1930 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1931 }
1932
1933 threads_debug_printf
1934 ("Checking whether LWP %ld needs to move out of the jump pad..."
1935 " it does", lwpid_of (current_thread));
1936
1937 return true;
1938 }
1939 }
1940 else
1941 {
1942 /* If we get a synchronous signal while collecting, *and*
1943 while executing the (relocated) original instruction,
1944 reset the PC to point at the tpoint address, before
1945 reporting to GDB. Otherwise, it's an IPA lib bug: just
1946 report the signal to GDB, and pray for the best. */
1947
1948 lwp->collecting_fast_tracepoint
1949 = fast_tpoint_collect_result::not_collecting;
1950
1951 if (r != fast_tpoint_collect_result::not_collecting
1952 && (status.adjusted_insn_addr <= lwp->stop_pc
1953 && lwp->stop_pc < status.adjusted_insn_addr_end))
1954 {
1955 siginfo_t info;
1956 struct regcache *regcache;
1957
1958 /* The si_addr on a few signals references the address
1959 of the faulting instruction. Adjust that as
1960 well. */
1961 if ((WSTOPSIG (*wstat) == SIGILL
1962 || WSTOPSIG (*wstat) == SIGFPE
1963 || WSTOPSIG (*wstat) == SIGBUS
1964 || WSTOPSIG (*wstat) == SIGSEGV)
1965 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1966 (PTRACE_TYPE_ARG3) 0, &info) == 0
1967 /* Final check just to make sure we don't clobber
1968 the siginfo of non-kernel-sent signals. */
1969 && (uintptr_t) info.si_addr == lwp->stop_pc)
1970 {
1971 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1972 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1973 (PTRACE_TYPE_ARG3) 0, &info);
1974 }
1975
1976 regcache = get_thread_regcache (current_thread, 1);
1977 low_set_pc (regcache, status.tpoint_addr);
1978 lwp->stop_pc = status.tpoint_addr;
1979
1980 /* Cancel any fast tracepoint lock this thread was
1981 holding. */
1982 force_unlock_trace_buffer ();
1983 }
1984
1985 if (lwp->exit_jump_pad_bkpt != NULL)
1986 {
1987 threads_debug_printf
1988 ("Cancelling fast exit-jump-pad: removing bkpt."
1989 "stopping all threads momentarily.");
1990
1991 stop_all_lwps (1, lwp);
1992
1993 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1994 lwp->exit_jump_pad_bkpt = NULL;
1995
1996 unstop_all_lwps (1, lwp);
1997
1998 gdb_assert (lwp->suspended >= 0);
1999 }
2000 }
2001 }
2002
2003 threads_debug_printf
2004 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2005 lwpid_of (current_thread));
2006
2007 return false;
2008 }
2009
2010 /* Enqueue one signal in the "signals to report later when out of the
2011 jump pad" list. */
2012
2013 static void
2014 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2015 {
2016 struct thread_info *thread = get_lwp_thread (lwp);
2017
2018 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2019 WSTOPSIG (*wstat), lwpid_of (thread));
2020
2021 if (debug_threads)
2022 {
2023 for (const auto &sig : lwp->pending_signals_to_report)
2024 threads_debug_printf (" Already queued %d", sig.signal);
2025
2026 threads_debug_printf (" (no more currently queued signals)");
2027 }
2028
2029 /* Don't enqueue non-RT signals if they are already in the deferred
2030 queue. (SIGSTOP being the easiest signal to see ending up here
2031 twice) */
2032 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2033 {
2034 for (const auto &sig : lwp->pending_signals_to_report)
2035 {
2036 if (sig.signal == WSTOPSIG (*wstat))
2037 {
2038 threads_debug_printf
2039 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2040 sig.signal, lwpid_of (thread));
2041 return;
2042 }
2043 }
2044 }
2045
2046 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2047
2048 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2049 &lwp->pending_signals_to_report.back ().info);
2050 }
2051
2052 /* Dequeue one signal from the "signals to report later when out of
2053 the jump pad" list. */
2054
2055 static int
2056 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2057 {
2058 struct thread_info *thread = get_lwp_thread (lwp);
2059
2060 if (!lwp->pending_signals_to_report.empty ())
2061 {
2062 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2063
2064 *wstat = W_STOPCODE (p_sig.signal);
2065 if (p_sig.info.si_signo != 0)
2066 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2067 &p_sig.info);
2068
2069 lwp->pending_signals_to_report.pop_front ();
2070
2071 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2072 WSTOPSIG (*wstat), lwpid_of (thread));
2073
2074 if (debug_threads)
2075 {
2076 for (const auto &sig : lwp->pending_signals_to_report)
2077 threads_debug_printf (" Still queued %d", sig.signal);
2078
2079 threads_debug_printf (" (no more queued signals)");
2080 }
2081
2082 return 1;
2083 }
2084
2085 return 0;
2086 }
2087
2088 bool
2089 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2090 {
2091 scoped_restore_current_thread restore_thread;
2092 switch_to_thread (get_lwp_thread (child));
2093
2094 if (low_stopped_by_watchpoint ())
2095 {
2096 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2097 child->stopped_data_address = low_stopped_data_address ();
2098 }
2099
2100 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2101 }
2102
2103 bool
2104 linux_process_target::low_stopped_by_watchpoint ()
2105 {
2106 return false;
2107 }
2108
2109 CORE_ADDR
2110 linux_process_target::low_stopped_data_address ()
2111 {
2112 return 0;
2113 }
2114
2115 /* Return the ptrace options that we want to try to enable. */
2116
2117 static int
2118 linux_low_ptrace_options (int attached)
2119 {
2120 client_state &cs = get_client_state ();
2121 int options = 0;
2122
2123 if (!attached)
2124 options |= PTRACE_O_EXITKILL;
2125
2126 if (cs.report_fork_events)
2127 options |= PTRACE_O_TRACEFORK;
2128
2129 if (cs.report_vfork_events)
2130 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2131
2132 if (cs.report_exec_events)
2133 options |= PTRACE_O_TRACEEXEC;
2134
2135 options |= PTRACE_O_TRACESYSGOOD;
2136
2137 return options;
2138 }
2139
2140 void
2141 linux_process_target::filter_event (int lwpid, int wstat)
2142 {
2143 client_state &cs = get_client_state ();
2144 struct lwp_info *child;
2145 struct thread_info *thread;
2146 int have_stop_pc = 0;
2147
2148 child = find_lwp_pid (ptid_t (lwpid));
2149
2150 /* Check for stop events reported by a process we didn't already
2151 know about - anything not already in our LWP list.
2152
2153 If we're expecting to receive stopped processes after
2154 fork, vfork, and clone events, then we'll just add the
2155 new one to our list and go back to waiting for the event
2156 to be reported - the stopped process might be returned
2157 from waitpid before or after the event is.
2158
2159 But note the case of a non-leader thread exec'ing after the
2160 leader having exited, and gone from our lists (because
2161 check_zombie_leaders deleted it). The non-leader thread
2162 changes its tid to the tgid. */
2163
2164 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2165 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2166 {
2167 ptid_t child_ptid;
2168
2169 /* A multi-thread exec after we had seen the leader exiting. */
2170 threads_debug_printf ("Re-adding thread group leader LWP %d after exec.",
2171 lwpid);
2172
2173 child_ptid = ptid_t (lwpid, lwpid);
2174 child = add_lwp (child_ptid);
2175 child->stopped = 1;
2176 switch_to_thread (child->thread);
2177 }
2178
2179 /* If we didn't find a process, one of two things presumably happened:
2180 - A process we started and then detached from has exited. Ignore it.
2181 - A process we are controlling has forked and the new child's stop
2182 was reported to us by the kernel. Save its PID. */
2183 if (child == NULL && WIFSTOPPED (wstat))
2184 {
2185 add_to_pid_list (&stopped_pids, lwpid, wstat);
2186 return;
2187 }
2188 else if (child == NULL)
2189 return;
2190
2191 thread = get_lwp_thread (child);
2192
2193 child->stopped = 1;
2194
2195 child->last_status = wstat;
2196
2197 /* Check if the thread has exited. */
2198 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2199 {
2200 threads_debug_printf ("%d exited", lwpid);
2201
2202 if (finish_step_over (child))
2203 {
2204 /* Unsuspend all other LWPs, and set them back running again. */
2205 unsuspend_all_lwps (child);
2206 }
2207
2208 /* If there is at least one more LWP, then the exit signal was
2209 not the end of the debugged application and should be
2210 ignored, unless GDB wants to hear about thread exits. */
2211 if (cs.report_thread_events
2212 || last_thread_of_process_p (pid_of (thread)))
2213 {
2214 /* Since events are serialized to GDB core, and we can't
2215 report this one right now. Leave the status pending for
2216 the next time we're able to report it. */
2217 mark_lwp_dead (child, wstat);
2218 return;
2219 }
2220 else
2221 {
2222 delete_lwp (child);
2223 return;
2224 }
2225 }
2226
2227 gdb_assert (WIFSTOPPED (wstat));
2228
2229 if (WIFSTOPPED (wstat))
2230 {
2231 struct process_info *proc;
2232
2233 /* Architecture-specific setup after inferior is running. */
2234 proc = find_process_pid (pid_of (thread));
2235 if (proc->tdesc == NULL)
2236 {
2237 if (proc->attached)
2238 {
2239 /* This needs to happen after we have attached to the
2240 inferior and it is stopped for the first time, but
2241 before we access any inferior registers. */
2242 arch_setup_thread (thread);
2243 }
2244 else
2245 {
2246 /* The process is started, but GDBserver will do
2247 architecture-specific setup after the program stops at
2248 the first instruction. */
2249 child->status_pending_p = 1;
2250 child->status_pending = wstat;
2251 return;
2252 }
2253 }
2254 }
2255
2256 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2257 {
2258 struct process_info *proc = find_process_pid (pid_of (thread));
2259 int options = linux_low_ptrace_options (proc->attached);
2260
2261 linux_enable_event_reporting (lwpid, options);
2262 child->must_set_ptrace_flags = 0;
2263 }
2264
2265 /* Always update syscall_state, even if it will be filtered later. */
2266 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2267 {
2268 child->syscall_state
2269 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2270 ? TARGET_WAITKIND_SYSCALL_RETURN
2271 : TARGET_WAITKIND_SYSCALL_ENTRY);
2272 }
2273 else
2274 {
2275 /* Almost all other ptrace-stops are known to be outside of system
2276 calls, with further exceptions in handle_extended_wait. */
2277 child->syscall_state = TARGET_WAITKIND_IGNORE;
2278 }
2279
2280 /* Be careful to not overwrite stop_pc until save_stop_reason is
2281 called. */
2282 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2283 && linux_is_extended_waitstatus (wstat))
2284 {
2285 child->stop_pc = get_pc (child);
2286 if (handle_extended_wait (&child, wstat))
2287 {
2288 /* The event has been handled, so just return without
2289 reporting it. */
2290 return;
2291 }
2292 }
2293
2294 if (linux_wstatus_maybe_breakpoint (wstat))
2295 {
2296 if (save_stop_reason (child))
2297 have_stop_pc = 1;
2298 }
2299
2300 if (!have_stop_pc)
2301 child->stop_pc = get_pc (child);
2302
2303 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2304 && child->stop_expected)
2305 {
2306 threads_debug_printf ("Expected stop.");
2307
2308 child->stop_expected = 0;
2309
2310 if (thread->last_resume_kind == resume_stop)
2311 {
2312 /* We want to report the stop to the core. Treat the
2313 SIGSTOP as a normal event. */
2314 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2315 target_pid_to_str (ptid_of (thread)).c_str ());
2316 }
2317 else if (stopping_threads != NOT_STOPPING_THREADS)
2318 {
2319 /* Stopping threads. We don't want this SIGSTOP to end up
2320 pending. */
2321 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2322 target_pid_to_str (ptid_of (thread)).c_str ());
2323 return;
2324 }
2325 else
2326 {
2327 /* This is a delayed SIGSTOP. Filter out the event. */
2328 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2329 child->stepping ? "step" : "continue",
2330 target_pid_to_str (ptid_of (thread)).c_str ());
2331
2332 resume_one_lwp (child, child->stepping, 0, NULL);
2333 return;
2334 }
2335 }
2336
2337 child->status_pending_p = 1;
2338 child->status_pending = wstat;
2339 return;
2340 }
2341
2342 bool
2343 linux_process_target::maybe_hw_step (thread_info *thread)
2344 {
2345 if (supports_hardware_single_step ())
2346 return true;
2347 else
2348 {
2349 /* GDBserver must insert single-step breakpoint for software
2350 single step. */
2351 gdb_assert (has_single_step_breakpoints (thread));
2352 return false;
2353 }
2354 }
2355
2356 void
2357 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2358 {
2359 struct lwp_info *lp = get_thread_lwp (thread);
2360
2361 if (lp->stopped
2362 && !lp->suspended
2363 && !lp->status_pending_p
2364 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2365 {
2366 int step = 0;
2367
2368 if (thread->last_resume_kind == resume_step)
2369 step = maybe_hw_step (thread);
2370
2371 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2372 target_pid_to_str (ptid_of (thread)).c_str (),
2373 paddress (lp->stop_pc), step);
2374
2375 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2376 }
2377 }
2378
2379 int
2380 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2381 ptid_t filter_ptid,
2382 int *wstatp, int options)
2383 {
2384 struct thread_info *event_thread;
2385 struct lwp_info *event_child, *requested_child;
2386 sigset_t block_mask, prev_mask;
2387
2388 retry:
2389 /* N.B. event_thread points to the thread_info struct that contains
2390 event_child. Keep them in sync. */
2391 event_thread = NULL;
2392 event_child = NULL;
2393 requested_child = NULL;
2394
2395 /* Check for a lwp with a pending status. */
2396
2397 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2398 {
2399 event_thread = find_thread_in_random ([&] (thread_info *thread)
2400 {
2401 return status_pending_p_callback (thread, filter_ptid);
2402 });
2403
2404 if (event_thread != NULL)
2405 {
2406 event_child = get_thread_lwp (event_thread);
2407 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2408 }
2409 }
2410 else if (filter_ptid != null_ptid)
2411 {
2412 requested_child = find_lwp_pid (filter_ptid);
2413
2414 if (stopping_threads == NOT_STOPPING_THREADS
2415 && requested_child->status_pending_p
2416 && (requested_child->collecting_fast_tracepoint
2417 != fast_tpoint_collect_result::not_collecting))
2418 {
2419 enqueue_one_deferred_signal (requested_child,
2420 &requested_child->status_pending);
2421 requested_child->status_pending_p = 0;
2422 requested_child->status_pending = 0;
2423 resume_one_lwp (requested_child, 0, 0, NULL);
2424 }
2425
2426 if (requested_child->suspended
2427 && requested_child->status_pending_p)
2428 {
2429 internal_error (__FILE__, __LINE__,
2430 "requesting an event out of a"
2431 " suspended child?");
2432 }
2433
2434 if (requested_child->status_pending_p)
2435 {
2436 event_child = requested_child;
2437 event_thread = get_lwp_thread (event_child);
2438 }
2439 }
2440
2441 if (event_child != NULL)
2442 {
2443 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2444 lwpid_of (event_thread),
2445 event_child->status_pending);
2446
2447 *wstatp = event_child->status_pending;
2448 event_child->status_pending_p = 0;
2449 event_child->status_pending = 0;
2450 switch_to_thread (event_thread);
2451 return lwpid_of (event_thread);
2452 }
2453
2454 /* But if we don't find a pending event, we'll have to wait.
2455
2456 We only enter this loop if no process has a pending wait status.
2457 Thus any action taken in response to a wait status inside this
2458 loop is responding as soon as we detect the status, not after any
2459 pending events. */
2460
2461 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2462 all signals while here. */
2463 sigfillset (&block_mask);
2464 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2465
2466 /* Always pull all events out of the kernel. We'll randomly select
2467 an event LWP out of all that have events, to prevent
2468 starvation. */
2469 while (event_child == NULL)
2470 {
2471 pid_t ret = 0;
2472
2473 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2474 quirks:
2475
2476 - If the thread group leader exits while other threads in the
2477 thread group still exist, waitpid(TGID, ...) hangs. That
2478 waitpid won't return an exit status until the other threads
2479 in the group are reaped.
2480
2481 - When a non-leader thread execs, that thread just vanishes
2482 without reporting an exit (so we'd hang if we waited for it
2483 explicitly in that case). The exec event is reported to
2484 the TGID pid. */
2485 errno = 0;
2486 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2487
2488 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2489 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2490
2491 if (ret > 0)
2492 {
2493 threads_debug_printf ("waitpid %ld received %s",
2494 (long) ret, status_to_str (*wstatp).c_str ());
2495
2496 /* Filter all events. IOW, leave all events pending. We'll
2497 randomly select an event LWP out of all that have events
2498 below. */
2499 filter_event (ret, *wstatp);
2500 /* Retry until nothing comes out of waitpid. A single
2501 SIGCHLD can indicate more than one child stopped. */
2502 continue;
2503 }
2504
2505 /* Now that we've pulled all events out of the kernel, resume
2506 LWPs that don't have an interesting event to report. */
2507 if (stopping_threads == NOT_STOPPING_THREADS)
2508 for_each_thread ([this] (thread_info *thread)
2509 {
2510 resume_stopped_resumed_lwps (thread);
2511 });
2512
2513 /* ... and find an LWP with a status to report to the core, if
2514 any. */
2515 event_thread = find_thread_in_random ([&] (thread_info *thread)
2516 {
2517 return status_pending_p_callback (thread, filter_ptid);
2518 });
2519
2520 if (event_thread != NULL)
2521 {
2522 event_child = get_thread_lwp (event_thread);
2523 *wstatp = event_child->status_pending;
2524 event_child->status_pending_p = 0;
2525 event_child->status_pending = 0;
2526 break;
2527 }
2528
2529 /* Check for zombie thread group leaders. Those can't be reaped
2530 until all other threads in the thread group are. */
2531 check_zombie_leaders ();
2532
2533 auto not_stopped = [&] (thread_info *thread)
2534 {
2535 return not_stopped_callback (thread, wait_ptid);
2536 };
2537
2538 /* If there are no resumed children left in the set of LWPs we
2539 want to wait for, bail. We can't just block in
2540 waitpid/sigsuspend, because lwps might have been left stopped
2541 in trace-stop state, and we'd be stuck forever waiting for
2542 their status to change (which would only happen if we resumed
2543 them). Even if WNOHANG is set, this return code is preferred
2544 over 0 (below), as it is more detailed. */
2545 if (find_thread (not_stopped) == NULL)
2546 {
2547 threads_debug_printf ("exit (no unwaited-for LWP)");
2548
2549 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2550 return -1;
2551 }
2552
2553 /* No interesting event to report to the caller. */
2554 if ((options & WNOHANG))
2555 {
2556 threads_debug_printf ("WNOHANG set, no event found");
2557
2558 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2559 return 0;
2560 }
2561
2562 /* Block until we get an event reported with SIGCHLD. */
2563 threads_debug_printf ("sigsuspend'ing");
2564
2565 sigsuspend (&prev_mask);
2566 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2567 goto retry;
2568 }
2569
2570 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2571
2572 switch_to_thread (event_thread);
2573
2574 return lwpid_of (event_thread);
2575 }
2576
2577 int
2578 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2579 {
2580 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2581 }
2582
2583 /* Select one LWP out of those that have events pending. */
2584
2585 static void
2586 select_event_lwp (struct lwp_info **orig_lp)
2587 {
2588 struct thread_info *event_thread = NULL;
2589
2590 /* In all-stop, give preference to the LWP that is being
2591 single-stepped. There will be at most one, and it's the LWP that
2592 the core is most interested in. If we didn't do this, then we'd
2593 have to handle pending step SIGTRAPs somehow in case the core
2594 later continues the previously-stepped thread, otherwise we'd
2595 report the pending SIGTRAP, and the core, not having stepped the
2596 thread, wouldn't understand what the trap was for, and therefore
2597 would report it to the user as a random signal. */
2598 if (!non_stop)
2599 {
2600 event_thread = find_thread ([] (thread_info *thread)
2601 {
2602 lwp_info *lp = get_thread_lwp (thread);
2603
2604 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2605 && thread->last_resume_kind == resume_step
2606 && lp->status_pending_p);
2607 });
2608
2609 if (event_thread != NULL)
2610 threads_debug_printf
2611 ("Select single-step %s",
2612 target_pid_to_str (ptid_of (event_thread)).c_str ());
2613 }
2614 if (event_thread == NULL)
2615 {
2616 /* No single-stepping LWP. Select one at random, out of those
2617 which have had events. */
2618
2619 event_thread = find_thread_in_random ([&] (thread_info *thread)
2620 {
2621 lwp_info *lp = get_thread_lwp (thread);
2622
2623 /* Only resumed LWPs that have an event pending. */
2624 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2625 && lp->status_pending_p);
2626 });
2627 }
2628
2629 if (event_thread != NULL)
2630 {
2631 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2632
2633 /* Switch the event LWP. */
2634 *orig_lp = event_lp;
2635 }
2636 }
2637
2638 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2639 NULL. */
2640
2641 static void
2642 unsuspend_all_lwps (struct lwp_info *except)
2643 {
2644 for_each_thread ([&] (thread_info *thread)
2645 {
2646 lwp_info *lwp = get_thread_lwp (thread);
2647
2648 if (lwp != except)
2649 lwp_suspended_decr (lwp);
2650 });
2651 }
2652
2653 static bool lwp_running (thread_info *thread);
2654
2655 /* Stabilize threads (move out of jump pads).
2656
2657 If a thread is midway collecting a fast tracepoint, we need to
2658 finish the collection and move it out of the jump pad before
2659 reporting the signal.
2660
2661 This avoids recursion while collecting (when a signal arrives
2662 midway, and the signal handler itself collects), which would trash
2663 the trace buffer. In case the user set a breakpoint in a signal
2664 handler, this avoids the backtrace showing the jump pad, etc..
2665 Most importantly, there are certain things we can't do safely if
2666 threads are stopped in a jump pad (or in its callee's). For
2667 example:
2668
2669 - starting a new trace run. A thread still collecting the
2670 previous run, could trash the trace buffer when resumed. The trace
2671 buffer control structures would have been reset but the thread had
2672 no way to tell. The thread could even midway memcpy'ing to the
2673 buffer, which would mean that when resumed, it would clobber the
2674 trace buffer that had been set for a new run.
2675
2676 - we can't rewrite/reuse the jump pads for new tracepoints
2677 safely. Say you do tstart while a thread is stopped midway while
2678 collecting. When the thread is later resumed, it finishes the
2679 collection, and returns to the jump pad, to execute the original
2680 instruction that was under the tracepoint jump at the time the
2681 older run had been started. If the jump pad had been rewritten
2682 since for something else in the new run, the thread would now
2683 execute the wrong / random instructions. */
2684
2685 void
2686 linux_process_target::stabilize_threads ()
2687 {
2688 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2689 {
2690 return stuck_in_jump_pad (thread);
2691 });
2692
2693 if (thread_stuck != NULL)
2694 {
2695 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2696 lwpid_of (thread_stuck));
2697 return;
2698 }
2699
2700 scoped_restore_current_thread restore_thread;
2701
2702 stabilizing_threads = 1;
2703
2704 /* Kick 'em all. */
2705 for_each_thread ([this] (thread_info *thread)
2706 {
2707 move_out_of_jump_pad (thread);
2708 });
2709
2710 /* Loop until all are stopped out of the jump pads. */
2711 while (find_thread (lwp_running) != NULL)
2712 {
2713 struct target_waitstatus ourstatus;
2714 struct lwp_info *lwp;
2715 int wstat;
2716
2717 /* Note that we go through the full wait even loop. While
2718 moving threads out of jump pad, we need to be able to step
2719 over internal breakpoints and such. */
2720 wait_1 (minus_one_ptid, &ourstatus, 0);
2721
2722 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2723 {
2724 lwp = get_thread_lwp (current_thread);
2725
2726 /* Lock it. */
2727 lwp_suspended_inc (lwp);
2728
2729 if (ourstatus.sig () != GDB_SIGNAL_0
2730 || current_thread->last_resume_kind == resume_stop)
2731 {
2732 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2733 enqueue_one_deferred_signal (lwp, &wstat);
2734 }
2735 }
2736 }
2737
2738 unsuspend_all_lwps (NULL);
2739
2740 stabilizing_threads = 0;
2741
2742 if (debug_threads)
2743 {
2744 thread_stuck = find_thread ([this] (thread_info *thread)
2745 {
2746 return stuck_in_jump_pad (thread);
2747 });
2748
2749 if (thread_stuck != NULL)
2750 threads_debug_printf
2751 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2752 lwpid_of (thread_stuck));
2753 }
2754 }
2755
2756 /* Convenience function that is called when the kernel reports an
2757 event that is not passed out to GDB. */
2758
2759 static ptid_t
2760 ignore_event (struct target_waitstatus *ourstatus)
2761 {
2762 /* If we got an event, there may still be others, as a single
2763 SIGCHLD can indicate more than one child stopped. This forces
2764 another target_wait call. */
2765 async_file_mark ();
2766
2767 ourstatus->set_ignore ();
2768 return null_ptid;
2769 }
2770
2771 ptid_t
2772 linux_process_target::filter_exit_event (lwp_info *event_child,
2773 target_waitstatus *ourstatus)
2774 {
2775 client_state &cs = get_client_state ();
2776 struct thread_info *thread = get_lwp_thread (event_child);
2777 ptid_t ptid = ptid_of (thread);
2778
2779 if (!last_thread_of_process_p (pid_of (thread)))
2780 {
2781 if (cs.report_thread_events)
2782 ourstatus->set_thread_exited (0);
2783 else
2784 ourstatus->set_ignore ();
2785
2786 delete_lwp (event_child);
2787 }
2788 return ptid;
2789 }
2790
2791 /* Returns 1 if GDB is interested in any event_child syscalls. */
2792
2793 static int
2794 gdb_catching_syscalls_p (struct lwp_info *event_child)
2795 {
2796 struct thread_info *thread = get_lwp_thread (event_child);
2797 struct process_info *proc = get_thread_process (thread);
2798
2799 return !proc->syscalls_to_catch.empty ();
2800 }
2801
2802 bool
2803 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2804 {
2805 int sysno;
2806 struct thread_info *thread = get_lwp_thread (event_child);
2807 struct process_info *proc = get_thread_process (thread);
2808
2809 if (proc->syscalls_to_catch.empty ())
2810 return false;
2811
2812 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2813 return true;
2814
2815 get_syscall_trapinfo (event_child, &sysno);
2816
2817 for (int iter : proc->syscalls_to_catch)
2818 if (iter == sysno)
2819 return true;
2820
2821 return false;
2822 }
2823
2824 ptid_t
2825 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2826 target_wait_flags target_options)
2827 {
2828 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2829
2830 client_state &cs = get_client_state ();
2831 int w;
2832 struct lwp_info *event_child;
2833 int options;
2834 int pid;
2835 int step_over_finished;
2836 int bp_explains_trap;
2837 int maybe_internal_trap;
2838 int report_to_gdb;
2839 int trace_event;
2840 int in_step_range;
2841 int any_resumed;
2842
2843 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
2844
2845 /* Translate generic target options into linux options. */
2846 options = __WALL;
2847 if (target_options & TARGET_WNOHANG)
2848 options |= WNOHANG;
2849
2850 bp_explains_trap = 0;
2851 trace_event = 0;
2852 in_step_range = 0;
2853 ourstatus->set_ignore ();
2854
2855 auto status_pending_p_any = [&] (thread_info *thread)
2856 {
2857 return status_pending_p_callback (thread, minus_one_ptid);
2858 };
2859
2860 auto not_stopped = [&] (thread_info *thread)
2861 {
2862 return not_stopped_callback (thread, minus_one_ptid);
2863 };
2864
2865 /* Find a resumed LWP, if any. */
2866 if (find_thread (status_pending_p_any) != NULL)
2867 any_resumed = 1;
2868 else if (find_thread (not_stopped) != NULL)
2869 any_resumed = 1;
2870 else
2871 any_resumed = 0;
2872
2873 if (step_over_bkpt == null_ptid)
2874 pid = wait_for_event (ptid, &w, options);
2875 else
2876 {
2877 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
2878 target_pid_to_str (step_over_bkpt).c_str ());
2879 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2880 }
2881
2882 if (pid == 0 || (pid == -1 && !any_resumed))
2883 {
2884 gdb_assert (target_options & TARGET_WNOHANG);
2885
2886 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
2887
2888 ourstatus->set_ignore ();
2889 return null_ptid;
2890 }
2891 else if (pid == -1)
2892 {
2893 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
2894
2895 ourstatus->set_no_resumed ();
2896 return null_ptid;
2897 }
2898
2899 event_child = get_thread_lwp (current_thread);
2900
2901 /* wait_for_event only returns an exit status for the last
2902 child of a process. Report it. */
2903 if (WIFEXITED (w) || WIFSIGNALED (w))
2904 {
2905 if (WIFEXITED (w))
2906 {
2907 ourstatus->set_exited (WEXITSTATUS (w));
2908
2909 threads_debug_printf
2910 ("ret = %s, exited with retcode %d",
2911 target_pid_to_str (ptid_of (current_thread)).c_str (),
2912 WEXITSTATUS (w));
2913 }
2914 else
2915 {
2916 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
2917
2918 threads_debug_printf
2919 ("ret = %s, terminated with signal %d",
2920 target_pid_to_str (ptid_of (current_thread)).c_str (),
2921 WTERMSIG (w));
2922 }
2923
2924 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
2925 return filter_exit_event (event_child, ourstatus);
2926
2927 return ptid_of (current_thread);
2928 }
2929
2930 /* If step-over executes a breakpoint instruction, in the case of a
2931 hardware single step it means a gdb/gdbserver breakpoint had been
2932 planted on top of a permanent breakpoint, in the case of a software
2933 single step it may just mean that gdbserver hit the reinsert breakpoint.
2934 The PC has been adjusted by save_stop_reason to point at
2935 the breakpoint address.
2936 So in the case of the hardware single step advance the PC manually
2937 past the breakpoint and in the case of software single step advance only
2938 if it's not the single_step_breakpoint we are hitting.
2939 This avoids that a program would keep trapping a permanent breakpoint
2940 forever. */
2941 if (step_over_bkpt != null_ptid
2942 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2943 && (event_child->stepping
2944 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
2945 {
2946 int increment_pc = 0;
2947 int breakpoint_kind = 0;
2948 CORE_ADDR stop_pc = event_child->stop_pc;
2949
2950 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
2951 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
2952
2953 threads_debug_printf
2954 ("step-over for %s executed software breakpoint",
2955 target_pid_to_str (ptid_of (current_thread)).c_str ());
2956
2957 if (increment_pc != 0)
2958 {
2959 struct regcache *regcache
2960 = get_thread_regcache (current_thread, 1);
2961
2962 event_child->stop_pc += increment_pc;
2963 low_set_pc (regcache, event_child->stop_pc);
2964
2965 if (!low_breakpoint_at (event_child->stop_pc))
2966 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2967 }
2968 }
2969
2970 /* If this event was not handled before, and is not a SIGTRAP, we
2971 report it. SIGILL and SIGSEGV are also treated as traps in case
2972 a breakpoint is inserted at the current PC. If this target does
2973 not support internal breakpoints at all, we also report the
2974 SIGTRAP without further processing; it's of no concern to us. */
2975 maybe_internal_trap
2976 = (low_supports_breakpoints ()
2977 && (WSTOPSIG (w) == SIGTRAP
2978 || ((WSTOPSIG (w) == SIGILL
2979 || WSTOPSIG (w) == SIGSEGV)
2980 && low_breakpoint_at (event_child->stop_pc))));
2981
2982 if (maybe_internal_trap)
2983 {
2984 /* Handle anything that requires bookkeeping before deciding to
2985 report the event or continue waiting. */
2986
2987 /* First check if we can explain the SIGTRAP with an internal
2988 breakpoint, or if we should possibly report the event to GDB.
2989 Do this before anything that may remove or insert a
2990 breakpoint. */
2991 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2992
2993 /* We have a SIGTRAP, possibly a step-over dance has just
2994 finished. If so, tweak the state machine accordingly,
2995 reinsert breakpoints and delete any single-step
2996 breakpoints. */
2997 step_over_finished = finish_step_over (event_child);
2998
2999 /* Now invoke the callbacks of any internal breakpoints there. */
3000 check_breakpoints (event_child->stop_pc);
3001
3002 /* Handle tracepoint data collecting. This may overflow the
3003 trace buffer, and cause a tracing stop, removing
3004 breakpoints. */
3005 trace_event = handle_tracepoints (event_child);
3006
3007 if (bp_explains_trap)
3008 threads_debug_printf ("Hit a gdbserver breakpoint.");
3009 }
3010 else
3011 {
3012 /* We have some other signal, possibly a step-over dance was in
3013 progress, and it should be cancelled too. */
3014 step_over_finished = finish_step_over (event_child);
3015 }
3016
3017 /* We have all the data we need. Either report the event to GDB, or
3018 resume threads and keep waiting for more. */
3019
3020 /* If we're collecting a fast tracepoint, finish the collection and
3021 move out of the jump pad before delivering a signal. See
3022 linux_stabilize_threads. */
3023
3024 if (WIFSTOPPED (w)
3025 && WSTOPSIG (w) != SIGTRAP
3026 && supports_fast_tracepoints ()
3027 && agent_loaded_p ())
3028 {
3029 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3030 "to defer or adjust it.",
3031 WSTOPSIG (w), lwpid_of (current_thread));
3032
3033 /* Allow debugging the jump pad itself. */
3034 if (current_thread->last_resume_kind != resume_step
3035 && maybe_move_out_of_jump_pad (event_child, &w))
3036 {
3037 enqueue_one_deferred_signal (event_child, &w);
3038
3039 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3040 WSTOPSIG (w), lwpid_of (current_thread));
3041
3042 resume_one_lwp (event_child, 0, 0, NULL);
3043
3044 return ignore_event (ourstatus);
3045 }
3046 }
3047
3048 if (event_child->collecting_fast_tracepoint
3049 != fast_tpoint_collect_result::not_collecting)
3050 {
3051 threads_debug_printf
3052 ("LWP %ld was trying to move out of the jump pad (%d). "
3053 "Check if we're already there.",
3054 lwpid_of (current_thread),
3055 (int) event_child->collecting_fast_tracepoint);
3056
3057 trace_event = 1;
3058
3059 event_child->collecting_fast_tracepoint
3060 = linux_fast_tracepoint_collecting (event_child, NULL);
3061
3062 if (event_child->collecting_fast_tracepoint
3063 != fast_tpoint_collect_result::before_insn)
3064 {
3065 /* No longer need this breakpoint. */
3066 if (event_child->exit_jump_pad_bkpt != NULL)
3067 {
3068 threads_debug_printf
3069 ("No longer need exit-jump-pad bkpt; removing it."
3070 "stopping all threads momentarily.");
3071
3072 /* Other running threads could hit this breakpoint.
3073 We don't handle moribund locations like GDB does,
3074 instead we always pause all threads when removing
3075 breakpoints, so that any step-over or
3076 decr_pc_after_break adjustment is always taken
3077 care of while the breakpoint is still
3078 inserted. */
3079 stop_all_lwps (1, event_child);
3080
3081 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3082 event_child->exit_jump_pad_bkpt = NULL;
3083
3084 unstop_all_lwps (1, event_child);
3085
3086 gdb_assert (event_child->suspended >= 0);
3087 }
3088 }
3089
3090 if (event_child->collecting_fast_tracepoint
3091 == fast_tpoint_collect_result::not_collecting)
3092 {
3093 threads_debug_printf
3094 ("fast tracepoint finished collecting successfully.");
3095
3096 /* We may have a deferred signal to report. */
3097 if (dequeue_one_deferred_signal (event_child, &w))
3098 threads_debug_printf ("dequeued one signal.");
3099 else
3100 {
3101 threads_debug_printf ("no deferred signals.");
3102
3103 if (stabilizing_threads)
3104 {
3105 ourstatus->set_stopped (GDB_SIGNAL_0);
3106
3107 threads_debug_printf
3108 ("ret = %s, stopped while stabilizing threads",
3109 target_pid_to_str (ptid_of (current_thread)).c_str ());
3110
3111 return ptid_of (current_thread);
3112 }
3113 }
3114 }
3115 }
3116
3117 /* Check whether GDB would be interested in this event. */
3118
3119 /* Check if GDB is interested in this syscall. */
3120 if (WIFSTOPPED (w)
3121 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3122 && !gdb_catch_this_syscall (event_child))
3123 {
3124 threads_debug_printf ("Ignored syscall for LWP %ld.",
3125 lwpid_of (current_thread));
3126
3127 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3128
3129 return ignore_event (ourstatus);
3130 }
3131
3132 /* If GDB is not interested in this signal, don't stop other
3133 threads, and don't report it to GDB. Just resume the inferior
3134 right away. We do this for threading-related signals as well as
3135 any that GDB specifically requested we ignore. But never ignore
3136 SIGSTOP if we sent it ourselves, and do not ignore signals when
3137 stepping - they may require special handling to skip the signal
3138 handler. Also never ignore signals that could be caused by a
3139 breakpoint. */
3140 if (WIFSTOPPED (w)
3141 && current_thread->last_resume_kind != resume_step
3142 && (
3143 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3144 (current_process ()->priv->thread_db != NULL
3145 && (WSTOPSIG (w) == __SIGRTMIN
3146 || WSTOPSIG (w) == __SIGRTMIN + 1))
3147 ||
3148 #endif
3149 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3150 && !(WSTOPSIG (w) == SIGSTOP
3151 && current_thread->last_resume_kind == resume_stop)
3152 && !linux_wstatus_maybe_breakpoint (w))))
3153 {
3154 siginfo_t info, *info_p;
3155
3156 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3157 WSTOPSIG (w), lwpid_of (current_thread));
3158
3159 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3160 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3161 info_p = &info;
3162 else
3163 info_p = NULL;
3164
3165 if (step_over_finished)
3166 {
3167 /* We cancelled this thread's step-over above. We still
3168 need to unsuspend all other LWPs, and set them back
3169 running again while the signal handler runs. */
3170 unsuspend_all_lwps (event_child);
3171
3172 /* Enqueue the pending signal info so that proceed_all_lwps
3173 doesn't lose it. */
3174 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3175
3176 proceed_all_lwps ();
3177 }
3178 else
3179 {
3180 resume_one_lwp (event_child, event_child->stepping,
3181 WSTOPSIG (w), info_p);
3182 }
3183
3184 return ignore_event (ourstatus);
3185 }
3186
3187 /* Note that all addresses are always "out of the step range" when
3188 there's no range to begin with. */
3189 in_step_range = lwp_in_step_range (event_child);
3190
3191 /* If GDB wanted this thread to single step, and the thread is out
3192 of the step range, we always want to report the SIGTRAP, and let
3193 GDB handle it. Watchpoints should always be reported. So should
3194 signals we can't explain. A SIGTRAP we can't explain could be a
3195 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3196 do, we're be able to handle GDB breakpoints on top of internal
3197 breakpoints, by handling the internal breakpoint and still
3198 reporting the event to GDB. If we don't, we're out of luck, GDB
3199 won't see the breakpoint hit. If we see a single-step event but
3200 the thread should be continuing, don't pass the trap to gdb.
3201 That indicates that we had previously finished a single-step but
3202 left the single-step pending -- see
3203 complete_ongoing_step_over. */
3204 report_to_gdb = (!maybe_internal_trap
3205 || (current_thread->last_resume_kind == resume_step
3206 && !in_step_range)
3207 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3208 || (!in_step_range
3209 && !bp_explains_trap
3210 && !trace_event
3211 && !step_over_finished
3212 && !(current_thread->last_resume_kind == resume_continue
3213 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3214 || (gdb_breakpoint_here (event_child->stop_pc)
3215 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3216 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3217 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3218
3219 run_breakpoint_commands (event_child->stop_pc);
3220
3221 /* We found no reason GDB would want us to stop. We either hit one
3222 of our own breakpoints, or finished an internal step GDB
3223 shouldn't know about. */
3224 if (!report_to_gdb)
3225 {
3226 if (bp_explains_trap)
3227 threads_debug_printf ("Hit a gdbserver breakpoint.");
3228
3229 if (step_over_finished)
3230 threads_debug_printf ("Step-over finished.");
3231
3232 if (trace_event)
3233 threads_debug_printf ("Tracepoint event.");
3234
3235 if (lwp_in_step_range (event_child))
3236 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3237 paddress (event_child->stop_pc),
3238 paddress (event_child->step_range_start),
3239 paddress (event_child->step_range_end));
3240
3241 /* We're not reporting this breakpoint to GDB, so apply the
3242 decr_pc_after_break adjustment to the inferior's regcache
3243 ourselves. */
3244
3245 if (low_supports_breakpoints ())
3246 {
3247 struct regcache *regcache
3248 = get_thread_regcache (current_thread, 1);
3249 low_set_pc (regcache, event_child->stop_pc);
3250 }
3251
3252 if (step_over_finished)
3253 {
3254 /* If we have finished stepping over a breakpoint, we've
3255 stopped and suspended all LWPs momentarily except the
3256 stepping one. This is where we resume them all again.
3257 We're going to keep waiting, so use proceed, which
3258 handles stepping over the next breakpoint. */
3259 unsuspend_all_lwps (event_child);
3260 }
3261 else
3262 {
3263 /* Remove the single-step breakpoints if any. Note that
3264 there isn't single-step breakpoint if we finished stepping
3265 over. */
3266 if (supports_software_single_step ()
3267 && has_single_step_breakpoints (current_thread))
3268 {
3269 stop_all_lwps (0, event_child);
3270 delete_single_step_breakpoints (current_thread);
3271 unstop_all_lwps (0, event_child);
3272 }
3273 }
3274
3275 threads_debug_printf ("proceeding all threads.");
3276
3277 proceed_all_lwps ();
3278
3279 return ignore_event (ourstatus);
3280 }
3281
3282 if (debug_threads)
3283 {
3284 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3285 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3286 lwpid_of (get_lwp_thread (event_child)),
3287 event_child->waitstatus.to_string ().c_str ());
3288
3289 if (current_thread->last_resume_kind == resume_step)
3290 {
3291 if (event_child->step_range_start == event_child->step_range_end)
3292 threads_debug_printf
3293 ("GDB wanted to single-step, reporting event.");
3294 else if (!lwp_in_step_range (event_child))
3295 threads_debug_printf ("Out of step range, reporting event.");
3296 }
3297
3298 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3299 threads_debug_printf ("Stopped by watchpoint.");
3300 else if (gdb_breakpoint_here (event_child->stop_pc))
3301 threads_debug_printf ("Stopped by GDB breakpoint.");
3302 }
3303
3304 threads_debug_printf ("Hit a non-gdbserver trap event.");
3305
3306 /* Alright, we're going to report a stop. */
3307
3308 /* Remove single-step breakpoints. */
3309 if (supports_software_single_step ())
3310 {
3311 /* Remove single-step breakpoints or not. It it is true, stop all
3312 lwps, so that other threads won't hit the breakpoint in the
3313 staled memory. */
3314 int remove_single_step_breakpoints_p = 0;
3315
3316 if (non_stop)
3317 {
3318 remove_single_step_breakpoints_p
3319 = has_single_step_breakpoints (current_thread);
3320 }
3321 else
3322 {
3323 /* In all-stop, a stop reply cancels all previous resume
3324 requests. Delete all single-step breakpoints. */
3325
3326 find_thread ([&] (thread_info *thread) {
3327 if (has_single_step_breakpoints (thread))
3328 {
3329 remove_single_step_breakpoints_p = 1;
3330 return true;
3331 }
3332
3333 return false;
3334 });
3335 }
3336
3337 if (remove_single_step_breakpoints_p)
3338 {
3339 /* If we remove single-step breakpoints from memory, stop all lwps,
3340 so that other threads won't hit the breakpoint in the staled
3341 memory. */
3342 stop_all_lwps (0, event_child);
3343
3344 if (non_stop)
3345 {
3346 gdb_assert (has_single_step_breakpoints (current_thread));
3347 delete_single_step_breakpoints (current_thread);
3348 }
3349 else
3350 {
3351 for_each_thread ([] (thread_info *thread){
3352 if (has_single_step_breakpoints (thread))
3353 delete_single_step_breakpoints (thread);
3354 });
3355 }
3356
3357 unstop_all_lwps (0, event_child);
3358 }
3359 }
3360
3361 if (!stabilizing_threads)
3362 {
3363 /* In all-stop, stop all threads. */
3364 if (!non_stop)
3365 stop_all_lwps (0, NULL);
3366
3367 if (step_over_finished)
3368 {
3369 if (!non_stop)
3370 {
3371 /* If we were doing a step-over, all other threads but
3372 the stepping one had been paused in start_step_over,
3373 with their suspend counts incremented. We don't want
3374 to do a full unstop/unpause, because we're in
3375 all-stop mode (so we want threads stopped), but we
3376 still need to unsuspend the other threads, to
3377 decrement their `suspended' count back. */
3378 unsuspend_all_lwps (event_child);
3379 }
3380 else
3381 {
3382 /* If we just finished a step-over, then all threads had
3383 been momentarily paused. In all-stop, that's fine,
3384 we want threads stopped by now anyway. In non-stop,
3385 we need to re-resume threads that GDB wanted to be
3386 running. */
3387 unstop_all_lwps (1, event_child);
3388 }
3389 }
3390
3391 /* If we're not waiting for a specific LWP, choose an event LWP
3392 from among those that have had events. Giving equal priority
3393 to all LWPs that have had events helps prevent
3394 starvation. */
3395 if (ptid == minus_one_ptid)
3396 {
3397 event_child->status_pending_p = 1;
3398 event_child->status_pending = w;
3399
3400 select_event_lwp (&event_child);
3401
3402 /* current_thread and event_child must stay in sync. */
3403 switch_to_thread (get_lwp_thread (event_child));
3404
3405 event_child->status_pending_p = 0;
3406 w = event_child->status_pending;
3407 }
3408
3409
3410 /* Stabilize threads (move out of jump pads). */
3411 if (!non_stop)
3412 target_stabilize_threads ();
3413 }
3414 else
3415 {
3416 /* If we just finished a step-over, then all threads had been
3417 momentarily paused. In all-stop, that's fine, we want
3418 threads stopped by now anyway. In non-stop, we need to
3419 re-resume threads that GDB wanted to be running. */
3420 if (step_over_finished)
3421 unstop_all_lwps (1, event_child);
3422 }
3423
3424 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3425 {
3426 /* If the reported event is an exit, fork, vfork or exec, let
3427 GDB know. */
3428
3429 /* Break the unreported fork relationship chain. */
3430 if (event_child->waitstatus.kind () == TARGET_WAITKIND_FORKED
3431 || event_child->waitstatus.kind () == TARGET_WAITKIND_VFORKED)
3432 {
3433 event_child->fork_relative->fork_relative = NULL;
3434 event_child->fork_relative = NULL;
3435 }
3436
3437 *ourstatus = event_child->waitstatus;
3438 /* Clear the event lwp's waitstatus since we handled it already. */
3439 event_child->waitstatus.set_ignore ();
3440 }
3441 else
3442 {
3443 /* The actual stop signal is overwritten below. */
3444 ourstatus->set_stopped (GDB_SIGNAL_0);
3445 }
3446
3447 /* Now that we've selected our final event LWP, un-adjust its PC if
3448 it was a software breakpoint, and the client doesn't know we can
3449 adjust the breakpoint ourselves. */
3450 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3451 && !cs.swbreak_feature)
3452 {
3453 int decr_pc = low_decr_pc_after_break ();
3454
3455 if (decr_pc != 0)
3456 {
3457 struct regcache *regcache
3458 = get_thread_regcache (current_thread, 1);
3459 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3460 }
3461 }
3462
3463 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3464 {
3465 int syscall_number;
3466
3467 get_syscall_trapinfo (event_child, &syscall_number);
3468 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3469 ourstatus->set_syscall_entry (syscall_number);
3470 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3471 ourstatus->set_syscall_return (syscall_number);
3472 else
3473 gdb_assert_not_reached ("unexpected syscall state");
3474 }
3475 else if (current_thread->last_resume_kind == resume_stop
3476 && WSTOPSIG (w) == SIGSTOP)
3477 {
3478 /* A thread that has been requested to stop by GDB with vCont;t,
3479 and it stopped cleanly, so report as SIG0. The use of
3480 SIGSTOP is an implementation detail. */
3481 ourstatus->set_stopped (GDB_SIGNAL_0);
3482 }
3483 else if (current_thread->last_resume_kind == resume_stop
3484 && WSTOPSIG (w) != SIGSTOP)
3485 {
3486 /* A thread that has been requested to stop by GDB with vCont;t,
3487 but, it stopped for other reasons. */
3488 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3489 }
3490 else if (ourstatus->kind () == TARGET_WAITKIND_STOPPED)
3491 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3492
3493 gdb_assert (step_over_bkpt == null_ptid);
3494
3495 threads_debug_printf ("ret = %s, %d, %d",
3496 target_pid_to_str (ptid_of (current_thread)).c_str (),
3497 ourstatus->kind (), ourstatus->sig ());
3498
3499 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3500 return filter_exit_event (event_child, ourstatus);
3501
3502 return ptid_of (current_thread);
3503 }
3504
3505 /* Get rid of any pending event in the pipe. */
3506 static void
3507 async_file_flush (void)
3508 {
3509 int ret;
3510 char buf;
3511
3512 do
3513 ret = read (linux_event_pipe[0], &buf, 1);
3514 while (ret >= 0 || (ret == -1 && errno == EINTR));
3515 }
3516
3517 /* Put something in the pipe, so the event loop wakes up. */
3518 static void
3519 async_file_mark (void)
3520 {
3521 int ret;
3522
3523 async_file_flush ();
3524
3525 do
3526 ret = write (linux_event_pipe[1], "+", 1);
3527 while (ret == 0 || (ret == -1 && errno == EINTR));
3528
3529 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3530 be awakened anyway. */
3531 }
3532
3533 ptid_t
3534 linux_process_target::wait (ptid_t ptid,
3535 target_waitstatus *ourstatus,
3536 target_wait_flags target_options)
3537 {
3538 ptid_t event_ptid;
3539
3540 /* Flush the async file first. */
3541 if (target_is_async_p ())
3542 async_file_flush ();
3543
3544 do
3545 {
3546 event_ptid = wait_1 (ptid, ourstatus, target_options);
3547 }
3548 while ((target_options & TARGET_WNOHANG) == 0
3549 && event_ptid == null_ptid
3550 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3551
3552 /* If at least one stop was reported, there may be more. A single
3553 SIGCHLD can signal more than one child stop. */
3554 if (target_is_async_p ()
3555 && (target_options & TARGET_WNOHANG) != 0
3556 && event_ptid != null_ptid)
3557 async_file_mark ();
3558
3559 return event_ptid;
3560 }
3561
3562 /* Send a signal to an LWP. */
3563
3564 static int
3565 kill_lwp (unsigned long lwpid, int signo)
3566 {
3567 int ret;
3568
3569 errno = 0;
3570 ret = syscall (__NR_tkill, lwpid, signo);
3571 if (errno == ENOSYS)
3572 {
3573 /* If tkill fails, then we are not using nptl threads, a
3574 configuration we no longer support. */
3575 perror_with_name (("tkill"));
3576 }
3577 return ret;
3578 }
3579
3580 void
3581 linux_stop_lwp (struct lwp_info *lwp)
3582 {
3583 send_sigstop (lwp);
3584 }
3585
3586 static void
3587 send_sigstop (struct lwp_info *lwp)
3588 {
3589 int pid;
3590
3591 pid = lwpid_of (get_lwp_thread (lwp));
3592
3593 /* If we already have a pending stop signal for this process, don't
3594 send another. */
3595 if (lwp->stop_expected)
3596 {
3597 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
3598
3599 return;
3600 }
3601
3602 threads_debug_printf ("Sending sigstop to lwp %d", pid);
3603
3604 lwp->stop_expected = 1;
3605 kill_lwp (pid, SIGSTOP);
3606 }
3607
3608 static void
3609 send_sigstop (thread_info *thread, lwp_info *except)
3610 {
3611 struct lwp_info *lwp = get_thread_lwp (thread);
3612
3613 /* Ignore EXCEPT. */
3614 if (lwp == except)
3615 return;
3616
3617 if (lwp->stopped)
3618 return;
3619
3620 send_sigstop (lwp);
3621 }
3622
3623 /* Increment the suspend count of an LWP, and stop it, if not stopped
3624 yet. */
3625 static void
3626 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3627 {
3628 struct lwp_info *lwp = get_thread_lwp (thread);
3629
3630 /* Ignore EXCEPT. */
3631 if (lwp == except)
3632 return;
3633
3634 lwp_suspended_inc (lwp);
3635
3636 send_sigstop (thread, except);
3637 }
3638
3639 static void
3640 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3641 {
3642 /* Store the exit status for later. */
3643 lwp->status_pending_p = 1;
3644 lwp->status_pending = wstat;
3645
3646 /* Store in waitstatus as well, as there's nothing else to process
3647 for this event. */
3648 if (WIFEXITED (wstat))
3649 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3650 else if (WIFSIGNALED (wstat))
3651 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3652
3653 /* Prevent trying to stop it. */
3654 lwp->stopped = 1;
3655
3656 /* No further stops are expected from a dead lwp. */
3657 lwp->stop_expected = 0;
3658 }
3659
3660 /* Return true if LWP has exited already, and has a pending exit event
3661 to report to GDB. */
3662
3663 static int
3664 lwp_is_marked_dead (struct lwp_info *lwp)
3665 {
3666 return (lwp->status_pending_p
3667 && (WIFEXITED (lwp->status_pending)
3668 || WIFSIGNALED (lwp->status_pending)));
3669 }
3670
3671 void
3672 linux_process_target::wait_for_sigstop ()
3673 {
3674 struct thread_info *saved_thread;
3675 ptid_t saved_tid;
3676 int wstat;
3677 int ret;
3678
3679 saved_thread = current_thread;
3680 if (saved_thread != NULL)
3681 saved_tid = saved_thread->id;
3682 else
3683 saved_tid = null_ptid; /* avoid bogus unused warning */
3684
3685 scoped_restore_current_thread restore_thread;
3686
3687 threads_debug_printf ("pulling events");
3688
3689 /* Passing NULL_PTID as filter indicates we want all events to be
3690 left pending. Eventually this returns when there are no
3691 unwaited-for children left. */
3692 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3693 gdb_assert (ret == -1);
3694
3695 if (saved_thread == NULL || mythread_alive (saved_tid))
3696 return;
3697 else
3698 {
3699 threads_debug_printf ("Previously current thread died.");
3700
3701 /* We can't change the current inferior behind GDB's back,
3702 otherwise, a subsequent command may apply to the wrong
3703 process. */
3704 restore_thread.dont_restore ();
3705 switch_to_thread (nullptr);
3706 }
3707 }
3708
3709 bool
3710 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3711 {
3712 struct lwp_info *lwp = get_thread_lwp (thread);
3713
3714 if (lwp->suspended != 0)
3715 {
3716 internal_error (__FILE__, __LINE__,
3717 "LWP %ld is suspended, suspended=%d\n",
3718 lwpid_of (thread), lwp->suspended);
3719 }
3720 gdb_assert (lwp->stopped);
3721
3722 /* Allow debugging the jump pad, gdb_collect, etc.. */
3723 return (supports_fast_tracepoints ()
3724 && agent_loaded_p ()
3725 && (gdb_breakpoint_here (lwp->stop_pc)
3726 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3727 || thread->last_resume_kind == resume_step)
3728 && (linux_fast_tracepoint_collecting (lwp, NULL)
3729 != fast_tpoint_collect_result::not_collecting));
3730 }
3731
3732 void
3733 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3734 {
3735 struct lwp_info *lwp = get_thread_lwp (thread);
3736 int *wstat;
3737
3738 if (lwp->suspended != 0)
3739 {
3740 internal_error (__FILE__, __LINE__,
3741 "LWP %ld is suspended, suspended=%d\n",
3742 lwpid_of (thread), lwp->suspended);
3743 }
3744 gdb_assert (lwp->stopped);
3745
3746 /* For gdb_breakpoint_here. */
3747 scoped_restore_current_thread restore_thread;
3748 switch_to_thread (thread);
3749
3750 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3751
3752 /* Allow debugging the jump pad, gdb_collect, etc. */
3753 if (!gdb_breakpoint_here (lwp->stop_pc)
3754 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3755 && thread->last_resume_kind != resume_step
3756 && maybe_move_out_of_jump_pad (lwp, wstat))
3757 {
3758 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3759 lwpid_of (thread));
3760
3761 if (wstat)
3762 {
3763 lwp->status_pending_p = 0;
3764 enqueue_one_deferred_signal (lwp, wstat);
3765
3766 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3767 WSTOPSIG (*wstat), lwpid_of (thread));
3768 }
3769
3770 resume_one_lwp (lwp, 0, 0, NULL);
3771 }
3772 else
3773 lwp_suspended_inc (lwp);
3774 }
3775
3776 static bool
3777 lwp_running (thread_info *thread)
3778 {
3779 struct lwp_info *lwp = get_thread_lwp (thread);
3780
3781 if (lwp_is_marked_dead (lwp))
3782 return false;
3783
3784 return !lwp->stopped;
3785 }
3786
3787 void
3788 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3789 {
3790 /* Should not be called recursively. */
3791 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3792
3793 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3794
3795 threads_debug_printf
3796 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3797 (except != NULL
3798 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3799 : "none"));
3800
3801 stopping_threads = (suspend
3802 ? STOPPING_AND_SUSPENDING_THREADS
3803 : STOPPING_THREADS);
3804
3805 if (suspend)
3806 for_each_thread ([&] (thread_info *thread)
3807 {
3808 suspend_and_send_sigstop (thread, except);
3809 });
3810 else
3811 for_each_thread ([&] (thread_info *thread)
3812 {
3813 send_sigstop (thread, except);
3814 });
3815
3816 wait_for_sigstop ();
3817 stopping_threads = NOT_STOPPING_THREADS;
3818
3819 threads_debug_printf ("setting stopping_threads back to !stopping");
3820 }
3821
3822 /* Enqueue one signal in the chain of signals which need to be
3823 delivered to this process on next resume. */
3824
3825 static void
3826 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3827 {
3828 lwp->pending_signals.emplace_back (signal);
3829 if (info == nullptr)
3830 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
3831 else
3832 lwp->pending_signals.back ().info = *info;
3833 }
3834
3835 void
3836 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
3837 {
3838 struct thread_info *thread = get_lwp_thread (lwp);
3839 struct regcache *regcache = get_thread_regcache (thread, 1);
3840
3841 scoped_restore_current_thread restore_thread;
3842
3843 switch_to_thread (thread);
3844 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
3845
3846 for (CORE_ADDR pc : next_pcs)
3847 set_single_step_breakpoint (pc, current_ptid);
3848 }
3849
3850 int
3851 linux_process_target::single_step (lwp_info* lwp)
3852 {
3853 int step = 0;
3854
3855 if (supports_hardware_single_step ())
3856 {
3857 step = 1;
3858 }
3859 else if (supports_software_single_step ())
3860 {
3861 install_software_single_step_breakpoints (lwp);
3862 step = 0;
3863 }
3864 else
3865 threads_debug_printf ("stepping is not implemented on this target");
3866
3867 return step;
3868 }
3869
3870 /* The signal can be delivered to the inferior if we are not trying to
3871 finish a fast tracepoint collect. Since signal can be delivered in
3872 the step-over, the program may go to signal handler and trap again
3873 after return from the signal handler. We can live with the spurious
3874 double traps. */
3875
3876 static int
3877 lwp_signal_can_be_delivered (struct lwp_info *lwp)
3878 {
3879 return (lwp->collecting_fast_tracepoint
3880 == fast_tpoint_collect_result::not_collecting);
3881 }
3882
3883 void
3884 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
3885 int signal, siginfo_t *info)
3886 {
3887 struct thread_info *thread = get_lwp_thread (lwp);
3888 int ptrace_request;
3889 struct process_info *proc = get_thread_process (thread);
3890
3891 /* Note that target description may not be initialised
3892 (proc->tdesc == NULL) at this point because the program hasn't
3893 stopped at the first instruction yet. It means GDBserver skips
3894 the extra traps from the wrapper program (see option --wrapper).
3895 Code in this function that requires register access should be
3896 guarded by proc->tdesc == NULL or something else. */
3897
3898 if (lwp->stopped == 0)
3899 return;
3900
3901 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
3902
3903 fast_tpoint_collect_result fast_tp_collecting
3904 = lwp->collecting_fast_tracepoint;
3905
3906 gdb_assert (!stabilizing_threads
3907 || (fast_tp_collecting
3908 != fast_tpoint_collect_result::not_collecting));
3909
3910 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3911 user used the "jump" command, or "set $pc = foo"). */
3912 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
3913 {
3914 /* Collecting 'while-stepping' actions doesn't make sense
3915 anymore. */
3916 release_while_stepping_state_list (thread);
3917 }
3918
3919 /* If we have pending signals or status, and a new signal, enqueue the
3920 signal. Also enqueue the signal if it can't be delivered to the
3921 inferior right now. */
3922 if (signal != 0
3923 && (lwp->status_pending_p
3924 || !lwp->pending_signals.empty ()
3925 || !lwp_signal_can_be_delivered (lwp)))
3926 {
3927 enqueue_pending_signal (lwp, signal, info);
3928
3929 /* Postpone any pending signal. It was enqueued above. */
3930 signal = 0;
3931 }
3932
3933 if (lwp->status_pending_p)
3934 {
3935 threads_debug_printf
3936 ("Not resuming lwp %ld (%s, stop %s); has pending status",
3937 lwpid_of (thread), step ? "step" : "continue",
3938 lwp->stop_expected ? "expected" : "not expected");
3939 return;
3940 }
3941
3942 scoped_restore_current_thread restore_thread;
3943 switch_to_thread (thread);
3944
3945 /* This bit needs some thinking about. If we get a signal that
3946 we must report while a single-step reinsert is still pending,
3947 we often end up resuming the thread. It might be better to
3948 (ew) allow a stack of pending events; then we could be sure that
3949 the reinsert happened right away and not lose any signals.
3950
3951 Making this stack would also shrink the window in which breakpoints are
3952 uninserted (see comment in linux_wait_for_lwp) but not enough for
3953 complete correctness, so it won't solve that problem. It may be
3954 worthwhile just to solve this one, however. */
3955 if (lwp->bp_reinsert != 0)
3956 {
3957 threads_debug_printf (" pending reinsert at 0x%s",
3958 paddress (lwp->bp_reinsert));
3959
3960 if (supports_hardware_single_step ())
3961 {
3962 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
3963 {
3964 if (step == 0)
3965 warning ("BAD - reinserting but not stepping.");
3966 if (lwp->suspended)
3967 warning ("BAD - reinserting and suspended(%d).",
3968 lwp->suspended);
3969 }
3970 }
3971
3972 step = maybe_hw_step (thread);
3973 }
3974
3975 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
3976 threads_debug_printf
3977 ("lwp %ld wants to get out of fast tracepoint jump pad "
3978 "(exit-jump-pad-bkpt)", lwpid_of (thread));
3979
3980 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
3981 {
3982 threads_debug_printf
3983 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
3984 lwpid_of (thread));
3985
3986 if (supports_hardware_single_step ())
3987 step = 1;
3988 else
3989 {
3990 internal_error (__FILE__, __LINE__,
3991 "moving out of jump pad single-stepping"
3992 " not implemented on this target");
3993 }
3994 }
3995
3996 /* If we have while-stepping actions in this thread set it stepping.
3997 If we have a signal to deliver, it may or may not be set to
3998 SIG_IGN, we don't know. Assume so, and allow collecting
3999 while-stepping into a signal handler. A possible smart thing to
4000 do would be to set an internal breakpoint at the signal return
4001 address, continue, and carry on catching this while-stepping
4002 action only when that breakpoint is hit. A future
4003 enhancement. */
4004 if (thread->while_stepping != NULL)
4005 {
4006 threads_debug_printf
4007 ("lwp %ld has a while-stepping action -> forcing step.",
4008 lwpid_of (thread));
4009
4010 step = single_step (lwp);
4011 }
4012
4013 if (proc->tdesc != NULL && low_supports_breakpoints ())
4014 {
4015 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4016
4017 lwp->stop_pc = low_get_pc (regcache);
4018
4019 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4020 (long) lwp->stop_pc);
4021 }
4022
4023 /* If we have pending signals, consume one if it can be delivered to
4024 the inferior. */
4025 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4026 {
4027 const pending_signal &p_sig = lwp->pending_signals.front ();
4028
4029 signal = p_sig.signal;
4030 if (p_sig.info.si_signo != 0)
4031 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4032 &p_sig.info);
4033
4034 lwp->pending_signals.pop_front ();
4035 }
4036
4037 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4038 lwpid_of (thread), step ? "step" : "continue", signal,
4039 lwp->stop_expected ? "expected" : "not expected");
4040
4041 low_prepare_to_resume (lwp);
4042
4043 regcache_invalidate_thread (thread);
4044 errno = 0;
4045 lwp->stepping = step;
4046 if (step)
4047 ptrace_request = PTRACE_SINGLESTEP;
4048 else if (gdb_catching_syscalls_p (lwp))
4049 ptrace_request = PTRACE_SYSCALL;
4050 else
4051 ptrace_request = PTRACE_CONT;
4052 ptrace (ptrace_request,
4053 lwpid_of (thread),
4054 (PTRACE_TYPE_ARG3) 0,
4055 /* Coerce to a uintptr_t first to avoid potential gcc warning
4056 of coercing an 8 byte integer to a 4 byte pointer. */
4057 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4058
4059 if (errno)
4060 perror_with_name ("resuming thread");
4061
4062 /* Successfully resumed. Clear state that no longer makes sense,
4063 and mark the LWP as running. Must not do this before resuming
4064 otherwise if that fails other code will be confused. E.g., we'd
4065 later try to stop the LWP and hang forever waiting for a stop
4066 status. Note that we must not throw after this is cleared,
4067 otherwise handle_zombie_lwp_error would get confused. */
4068 lwp->stopped = 0;
4069 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4070 }
4071
4072 void
4073 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4074 {
4075 /* Nop. */
4076 }
4077
4078 /* Called when we try to resume a stopped LWP and that errors out. If
4079 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4080 or about to become), discard the error, clear any pending status
4081 the LWP may have, and return true (we'll collect the exit status
4082 soon enough). Otherwise, return false. */
4083
4084 static int
4085 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4086 {
4087 struct thread_info *thread = get_lwp_thread (lp);
4088
4089 /* If we get an error after resuming the LWP successfully, we'd
4090 confuse !T state for the LWP being gone. */
4091 gdb_assert (lp->stopped);
4092
4093 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4094 because even if ptrace failed with ESRCH, the tracee may be "not
4095 yet fully dead", but already refusing ptrace requests. In that
4096 case the tracee has 'R (Running)' state for a little bit
4097 (observed in Linux 3.18). See also the note on ESRCH in the
4098 ptrace(2) man page. Instead, check whether the LWP has any state
4099 other than ptrace-stopped. */
4100
4101 /* Don't assume anything if /proc/PID/status can't be read. */
4102 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4103 {
4104 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4105 lp->status_pending_p = 0;
4106 return 1;
4107 }
4108 return 0;
4109 }
4110
4111 void
4112 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4113 siginfo_t *info)
4114 {
4115 try
4116 {
4117 resume_one_lwp_throw (lwp, step, signal, info);
4118 }
4119 catch (const gdb_exception_error &ex)
4120 {
4121 if (!check_ptrace_stopped_lwp_gone (lwp))
4122 throw;
4123 }
4124 }
4125
4126 /* This function is called once per thread via for_each_thread.
4127 We look up which resume request applies to THREAD and mark it with a
4128 pointer to the appropriate resume request.
4129
4130 This algorithm is O(threads * resume elements), but resume elements
4131 is small (and will remain small at least until GDB supports thread
4132 suspension). */
4133
4134 static void
4135 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4136 {
4137 struct lwp_info *lwp = get_thread_lwp (thread);
4138
4139 for (int ndx = 0; ndx < n; ndx++)
4140 {
4141 ptid_t ptid = resume[ndx].thread;
4142 if (ptid == minus_one_ptid
4143 || ptid == thread->id
4144 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4145 of PID'. */
4146 || (ptid.pid () == pid_of (thread)
4147 && (ptid.is_pid ()
4148 || ptid.lwp () == -1)))
4149 {
4150 if (resume[ndx].kind == resume_stop
4151 && thread->last_resume_kind == resume_stop)
4152 {
4153 threads_debug_printf
4154 ("already %s LWP %ld at GDB's request",
4155 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4156 ? "stopped" : "stopping"),
4157 lwpid_of (thread));
4158
4159 continue;
4160 }
4161
4162 /* Ignore (wildcard) resume requests for already-resumed
4163 threads. */
4164 if (resume[ndx].kind != resume_stop
4165 && thread->last_resume_kind != resume_stop)
4166 {
4167 threads_debug_printf
4168 ("already %s LWP %ld at GDB's request",
4169 (thread->last_resume_kind == resume_step
4170 ? "stepping" : "continuing"),
4171 lwpid_of (thread));
4172 continue;
4173 }
4174
4175 /* Don't let wildcard resumes resume fork children that GDB
4176 does not yet know are new fork children. */
4177 if (lwp->fork_relative != NULL)
4178 {
4179 struct lwp_info *rel = lwp->fork_relative;
4180
4181 if (rel->status_pending_p
4182 && (rel->waitstatus.kind () == TARGET_WAITKIND_FORKED
4183 || rel->waitstatus.kind () == TARGET_WAITKIND_VFORKED))
4184 {
4185 threads_debug_printf
4186 ("not resuming LWP %ld: has queued stop reply",
4187 lwpid_of (thread));
4188 continue;
4189 }
4190 }
4191
4192 /* If the thread has a pending event that has already been
4193 reported to GDBserver core, but GDB has not pulled the
4194 event out of the vStopped queue yet, likewise, ignore the
4195 (wildcard) resume request. */
4196 if (in_queued_stop_replies (thread->id))
4197 {
4198 threads_debug_printf
4199 ("not resuming LWP %ld: has queued stop reply",
4200 lwpid_of (thread));
4201 continue;
4202 }
4203
4204 lwp->resume = &resume[ndx];
4205 thread->last_resume_kind = lwp->resume->kind;
4206
4207 lwp->step_range_start = lwp->resume->step_range_start;
4208 lwp->step_range_end = lwp->resume->step_range_end;
4209
4210 /* If we had a deferred signal to report, dequeue one now.
4211 This can happen if LWP gets more than one signal while
4212 trying to get out of a jump pad. */
4213 if (lwp->stopped
4214 && !lwp->status_pending_p
4215 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4216 {
4217 lwp->status_pending_p = 1;
4218
4219 threads_debug_printf
4220 ("Dequeueing deferred signal %d for LWP %ld, "
4221 "leaving status pending.",
4222 WSTOPSIG (lwp->status_pending),
4223 lwpid_of (thread));
4224 }
4225
4226 return;
4227 }
4228 }
4229
4230 /* No resume action for this thread. */
4231 lwp->resume = NULL;
4232 }
4233
4234 bool
4235 linux_process_target::resume_status_pending (thread_info *thread)
4236 {
4237 struct lwp_info *lwp = get_thread_lwp (thread);
4238
4239 /* LWPs which will not be resumed are not interesting, because
4240 we might not wait for them next time through linux_wait. */
4241 if (lwp->resume == NULL)
4242 return false;
4243
4244 return thread_still_has_status_pending (thread);
4245 }
4246
4247 bool
4248 linux_process_target::thread_needs_step_over (thread_info *thread)
4249 {
4250 struct lwp_info *lwp = get_thread_lwp (thread);
4251 CORE_ADDR pc;
4252 struct process_info *proc = get_thread_process (thread);
4253
4254 /* GDBserver is skipping the extra traps from the wrapper program,
4255 don't have to do step over. */
4256 if (proc->tdesc == NULL)
4257 return false;
4258
4259 /* LWPs which will not be resumed are not interesting, because we
4260 might not wait for them next time through linux_wait. */
4261
4262 if (!lwp->stopped)
4263 {
4264 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4265 lwpid_of (thread));
4266 return false;
4267 }
4268
4269 if (thread->last_resume_kind == resume_stop)
4270 {
4271 threads_debug_printf
4272 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4273 lwpid_of (thread));
4274 return false;
4275 }
4276
4277 gdb_assert (lwp->suspended >= 0);
4278
4279 if (lwp->suspended)
4280 {
4281 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4282 lwpid_of (thread));
4283 return false;
4284 }
4285
4286 if (lwp->status_pending_p)
4287 {
4288 threads_debug_printf
4289 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4290 lwpid_of (thread));
4291 return false;
4292 }
4293
4294 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4295 or we have. */
4296 pc = get_pc (lwp);
4297
4298 /* If the PC has changed since we stopped, then don't do anything,
4299 and let the breakpoint/tracepoint be hit. This happens if, for
4300 instance, GDB handled the decr_pc_after_break subtraction itself,
4301 GDB is OOL stepping this thread, or the user has issued a "jump"
4302 command, or poked thread's registers herself. */
4303 if (pc != lwp->stop_pc)
4304 {
4305 threads_debug_printf
4306 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4307 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4308 paddress (lwp->stop_pc), paddress (pc));
4309 return false;
4310 }
4311
4312 /* On software single step target, resume the inferior with signal
4313 rather than stepping over. */
4314 if (supports_software_single_step ()
4315 && !lwp->pending_signals.empty ()
4316 && lwp_signal_can_be_delivered (lwp))
4317 {
4318 threads_debug_printf
4319 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4320 lwpid_of (thread));
4321
4322 return false;
4323 }
4324
4325 scoped_restore_current_thread restore_thread;
4326 switch_to_thread (thread);
4327
4328 /* We can only step over breakpoints we know about. */
4329 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4330 {
4331 /* Don't step over a breakpoint that GDB expects to hit
4332 though. If the condition is being evaluated on the target's side
4333 and it evaluate to false, step over this breakpoint as well. */
4334 if (gdb_breakpoint_here (pc)
4335 && gdb_condition_true_at_breakpoint (pc)
4336 && gdb_no_commands_at_breakpoint (pc))
4337 {
4338 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4339 " GDB breakpoint at 0x%s; skipping step over",
4340 lwpid_of (thread), paddress (pc));
4341
4342 return false;
4343 }
4344 else
4345 {
4346 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4347 "found breakpoint at 0x%s",
4348 lwpid_of (thread), paddress (pc));
4349
4350 /* We've found an lwp that needs stepping over --- return 1 so
4351 that find_thread stops looking. */
4352 return true;
4353 }
4354 }
4355
4356 threads_debug_printf
4357 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4358 lwpid_of (thread), paddress (pc));
4359
4360 return false;
4361 }
4362
4363 void
4364 linux_process_target::start_step_over (lwp_info *lwp)
4365 {
4366 struct thread_info *thread = get_lwp_thread (lwp);
4367 CORE_ADDR pc;
4368
4369 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4370 lwpid_of (thread));
4371
4372 stop_all_lwps (1, lwp);
4373
4374 if (lwp->suspended != 0)
4375 {
4376 internal_error (__FILE__, __LINE__,
4377 "LWP %ld suspended=%d\n", lwpid_of (thread),
4378 lwp->suspended);
4379 }
4380
4381 threads_debug_printf ("Done stopping all threads for step-over.");
4382
4383 /* Note, we should always reach here with an already adjusted PC,
4384 either by GDB (if we're resuming due to GDB's request), or by our
4385 caller, if we just finished handling an internal breakpoint GDB
4386 shouldn't care about. */
4387 pc = get_pc (lwp);
4388
4389 bool step = false;
4390 {
4391 scoped_restore_current_thread restore_thread;
4392 switch_to_thread (thread);
4393
4394 lwp->bp_reinsert = pc;
4395 uninsert_breakpoints_at (pc);
4396 uninsert_fast_tracepoint_jumps_at (pc);
4397
4398 step = single_step (lwp);
4399 }
4400
4401 resume_one_lwp (lwp, step, 0, NULL);
4402
4403 /* Require next event from this LWP. */
4404 step_over_bkpt = thread->id;
4405 }
4406
4407 bool
4408 linux_process_target::finish_step_over (lwp_info *lwp)
4409 {
4410 if (lwp->bp_reinsert != 0)
4411 {
4412 scoped_restore_current_thread restore_thread;
4413
4414 threads_debug_printf ("Finished step over.");
4415
4416 switch_to_thread (get_lwp_thread (lwp));
4417
4418 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4419 may be no breakpoint to reinsert there by now. */
4420 reinsert_breakpoints_at (lwp->bp_reinsert);
4421 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4422
4423 lwp->bp_reinsert = 0;
4424
4425 /* Delete any single-step breakpoints. No longer needed. We
4426 don't have to worry about other threads hitting this trap,
4427 and later not being able to explain it, because we were
4428 stepping over a breakpoint, and we hold all threads but
4429 LWP stopped while doing that. */
4430 if (!supports_hardware_single_step ())
4431 {
4432 gdb_assert (has_single_step_breakpoints (current_thread));
4433 delete_single_step_breakpoints (current_thread);
4434 }
4435
4436 step_over_bkpt = null_ptid;
4437 return true;
4438 }
4439 else
4440 return false;
4441 }
4442
4443 void
4444 linux_process_target::complete_ongoing_step_over ()
4445 {
4446 if (step_over_bkpt != null_ptid)
4447 {
4448 struct lwp_info *lwp;
4449 int wstat;
4450 int ret;
4451
4452 threads_debug_printf ("detach: step over in progress, finish it first");
4453
4454 /* Passing NULL_PTID as filter indicates we want all events to
4455 be left pending. Eventually this returns when there are no
4456 unwaited-for children left. */
4457 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4458 __WALL);
4459 gdb_assert (ret == -1);
4460
4461 lwp = find_lwp_pid (step_over_bkpt);
4462 if (lwp != NULL)
4463 {
4464 finish_step_over (lwp);
4465
4466 /* If we got our step SIGTRAP, don't leave it pending,
4467 otherwise we would report it to GDB as a spurious
4468 SIGTRAP. */
4469 gdb_assert (lwp->status_pending_p);
4470 if (WIFSTOPPED (lwp->status_pending)
4471 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4472 {
4473 thread_info *thread = get_lwp_thread (lwp);
4474 if (thread->last_resume_kind != resume_step)
4475 {
4476 threads_debug_printf ("detach: discard step-over SIGTRAP");
4477
4478 lwp->status_pending_p = 0;
4479 lwp->status_pending = 0;
4480 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4481 }
4482 else
4483 threads_debug_printf
4484 ("detach: resume_step, not discarding step-over SIGTRAP");
4485 }
4486 }
4487 step_over_bkpt = null_ptid;
4488 unsuspend_all_lwps (lwp);
4489 }
4490 }
4491
4492 void
4493 linux_process_target::resume_one_thread (thread_info *thread,
4494 bool leave_all_stopped)
4495 {
4496 struct lwp_info *lwp = get_thread_lwp (thread);
4497 int leave_pending;
4498
4499 if (lwp->resume == NULL)
4500 return;
4501
4502 if (lwp->resume->kind == resume_stop)
4503 {
4504 threads_debug_printf ("resume_stop request for LWP %ld",
4505 lwpid_of (thread));
4506
4507 if (!lwp->stopped)
4508 {
4509 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
4510
4511 /* Stop the thread, and wait for the event asynchronously,
4512 through the event loop. */
4513 send_sigstop (lwp);
4514 }
4515 else
4516 {
4517 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
4518
4519 /* The LWP may have been stopped in an internal event that
4520 was not meant to be notified back to GDB (e.g., gdbserver
4521 breakpoint), so we should be reporting a stop event in
4522 this case too. */
4523
4524 /* If the thread already has a pending SIGSTOP, this is a
4525 no-op. Otherwise, something later will presumably resume
4526 the thread and this will cause it to cancel any pending
4527 operation, due to last_resume_kind == resume_stop. If
4528 the thread already has a pending status to report, we
4529 will still report it the next time we wait - see
4530 status_pending_p_callback. */
4531
4532 /* If we already have a pending signal to report, then
4533 there's no need to queue a SIGSTOP, as this means we're
4534 midway through moving the LWP out of the jumppad, and we
4535 will report the pending signal as soon as that is
4536 finished. */
4537 if (lwp->pending_signals_to_report.empty ())
4538 send_sigstop (lwp);
4539 }
4540
4541 /* For stop requests, we're done. */
4542 lwp->resume = NULL;
4543 thread->last_status.set_ignore ();
4544 return;
4545 }
4546
4547 /* If this thread which is about to be resumed has a pending status,
4548 then don't resume it - we can just report the pending status.
4549 Likewise if it is suspended, because e.g., another thread is
4550 stepping past a breakpoint. Make sure to queue any signals that
4551 would otherwise be sent. In all-stop mode, we do this decision
4552 based on if *any* thread has a pending status. If there's a
4553 thread that needs the step-over-breakpoint dance, then don't
4554 resume any other thread but that particular one. */
4555 leave_pending = (lwp->suspended
4556 || lwp->status_pending_p
4557 || leave_all_stopped);
4558
4559 /* If we have a new signal, enqueue the signal. */
4560 if (lwp->resume->sig != 0)
4561 {
4562 siginfo_t info, *info_p;
4563
4564 /* If this is the same signal we were previously stopped by,
4565 make sure to queue its siginfo. */
4566 if (WIFSTOPPED (lwp->last_status)
4567 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4568 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4569 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4570 info_p = &info;
4571 else
4572 info_p = NULL;
4573
4574 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4575 }
4576
4577 if (!leave_pending)
4578 {
4579 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
4580
4581 proceed_one_lwp (thread, NULL);
4582 }
4583 else
4584 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
4585
4586 thread->last_status.set_ignore ();
4587 lwp->resume = NULL;
4588 }
4589
4590 void
4591 linux_process_target::resume (thread_resume *resume_info, size_t n)
4592 {
4593 struct thread_info *need_step_over = NULL;
4594
4595 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4596
4597 for_each_thread ([&] (thread_info *thread)
4598 {
4599 linux_set_resume_request (thread, resume_info, n);
4600 });
4601
4602 /* If there is a thread which would otherwise be resumed, which has
4603 a pending status, then don't resume any threads - we can just
4604 report the pending status. Make sure to queue any signals that
4605 would otherwise be sent. In non-stop mode, we'll apply this
4606 logic to each thread individually. We consume all pending events
4607 before considering to start a step-over (in all-stop). */
4608 bool any_pending = false;
4609 if (!non_stop)
4610 any_pending = find_thread ([this] (thread_info *thread)
4611 {
4612 return resume_status_pending (thread);
4613 }) != nullptr;
4614
4615 /* If there is a thread which would otherwise be resumed, which is
4616 stopped at a breakpoint that needs stepping over, then don't
4617 resume any threads - have it step over the breakpoint with all
4618 other threads stopped, then resume all threads again. Make sure
4619 to queue any signals that would otherwise be delivered or
4620 queued. */
4621 if (!any_pending && low_supports_breakpoints ())
4622 need_step_over = find_thread ([this] (thread_info *thread)
4623 {
4624 return thread_needs_step_over (thread);
4625 });
4626
4627 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4628
4629 if (need_step_over != NULL)
4630 threads_debug_printf ("Not resuming all, need step over");
4631 else if (any_pending)
4632 threads_debug_printf ("Not resuming, all-stop and found "
4633 "an LWP with pending status");
4634 else
4635 threads_debug_printf ("Resuming, no pending status or step over needed");
4636
4637 /* Even if we're leaving threads stopped, queue all signals we'd
4638 otherwise deliver. */
4639 for_each_thread ([&] (thread_info *thread)
4640 {
4641 resume_one_thread (thread, leave_all_stopped);
4642 });
4643
4644 if (need_step_over)
4645 start_step_over (get_thread_lwp (need_step_over));
4646
4647 /* We may have events that were pending that can/should be sent to
4648 the client now. Trigger a linux_wait call. */
4649 if (target_is_async_p ())
4650 async_file_mark ();
4651 }
4652
4653 void
4654 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4655 {
4656 struct lwp_info *lwp = get_thread_lwp (thread);
4657 int step;
4658
4659 if (lwp == except)
4660 return;
4661
4662 threads_debug_printf ("lwp %ld", lwpid_of (thread));
4663
4664 if (!lwp->stopped)
4665 {
4666 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
4667 return;
4668 }
4669
4670 if (thread->last_resume_kind == resume_stop
4671 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4672 {
4673 threads_debug_printf (" client wants LWP to remain %ld stopped",
4674 lwpid_of (thread));
4675 return;
4676 }
4677
4678 if (lwp->status_pending_p)
4679 {
4680 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4681 lwpid_of (thread));
4682 return;
4683 }
4684
4685 gdb_assert (lwp->suspended >= 0);
4686
4687 if (lwp->suspended)
4688 {
4689 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
4690 return;
4691 }
4692
4693 if (thread->last_resume_kind == resume_stop
4694 && lwp->pending_signals_to_report.empty ()
4695 && (lwp->collecting_fast_tracepoint
4696 == fast_tpoint_collect_result::not_collecting))
4697 {
4698 /* We haven't reported this LWP as stopped yet (otherwise, the
4699 last_status.kind check above would catch it, and we wouldn't
4700 reach here. This LWP may have been momentarily paused by a
4701 stop_all_lwps call while handling for example, another LWP's
4702 step-over. In that case, the pending expected SIGSTOP signal
4703 that was queued at vCont;t handling time will have already
4704 been consumed by wait_for_sigstop, and so we need to requeue
4705 another one here. Note that if the LWP already has a SIGSTOP
4706 pending, this is a no-op. */
4707
4708 threads_debug_printf
4709 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4710 lwpid_of (thread));
4711
4712 send_sigstop (lwp);
4713 }
4714
4715 if (thread->last_resume_kind == resume_step)
4716 {
4717 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4718 lwpid_of (thread));
4719
4720 /* If resume_step is requested by GDB, install single-step
4721 breakpoints when the thread is about to be actually resumed if
4722 the single-step breakpoints weren't removed. */
4723 if (supports_software_single_step ()
4724 && !has_single_step_breakpoints (thread))
4725 install_software_single_step_breakpoints (lwp);
4726
4727 step = maybe_hw_step (thread);
4728 }
4729 else if (lwp->bp_reinsert != 0)
4730 {
4731 threads_debug_printf (" stepping LWP %ld, reinsert set",
4732 lwpid_of (thread));
4733
4734 step = maybe_hw_step (thread);
4735 }
4736 else
4737 step = 0;
4738
4739 resume_one_lwp (lwp, step, 0, NULL);
4740 }
4741
4742 void
4743 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4744 lwp_info *except)
4745 {
4746 struct lwp_info *lwp = get_thread_lwp (thread);
4747
4748 if (lwp == except)
4749 return;
4750
4751 lwp_suspended_decr (lwp);
4752
4753 proceed_one_lwp (thread, except);
4754 }
4755
4756 void
4757 linux_process_target::proceed_all_lwps ()
4758 {
4759 struct thread_info *need_step_over;
4760
4761 /* If there is a thread which would otherwise be resumed, which is
4762 stopped at a breakpoint that needs stepping over, then don't
4763 resume any threads - have it step over the breakpoint with all
4764 other threads stopped, then resume all threads again. */
4765
4766 if (low_supports_breakpoints ())
4767 {
4768 need_step_over = find_thread ([this] (thread_info *thread)
4769 {
4770 return thread_needs_step_over (thread);
4771 });
4772
4773 if (need_step_over != NULL)
4774 {
4775 threads_debug_printf ("found thread %ld needing a step-over",
4776 lwpid_of (need_step_over));
4777
4778 start_step_over (get_thread_lwp (need_step_over));
4779 return;
4780 }
4781 }
4782
4783 threads_debug_printf ("Proceeding, no step-over needed");
4784
4785 for_each_thread ([this] (thread_info *thread)
4786 {
4787 proceed_one_lwp (thread, NULL);
4788 });
4789 }
4790
4791 void
4792 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
4793 {
4794 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4795
4796 if (except)
4797 threads_debug_printf ("except=(LWP %ld)",
4798 lwpid_of (get_lwp_thread (except)));
4799 else
4800 threads_debug_printf ("except=nullptr");
4801
4802 if (unsuspend)
4803 for_each_thread ([&] (thread_info *thread)
4804 {
4805 unsuspend_and_proceed_one_lwp (thread, except);
4806 });
4807 else
4808 for_each_thread ([&] (thread_info *thread)
4809 {
4810 proceed_one_lwp (thread, except);
4811 });
4812 }
4813
4814
4815 #ifdef HAVE_LINUX_REGSETS
4816
4817 #define use_linux_regsets 1
4818
4819 /* Returns true if REGSET has been disabled. */
4820
4821 static int
4822 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4823 {
4824 return (info->disabled_regsets != NULL
4825 && info->disabled_regsets[regset - info->regsets]);
4826 }
4827
4828 /* Disable REGSET. */
4829
4830 static void
4831 disable_regset (struct regsets_info *info, struct regset_info *regset)
4832 {
4833 int dr_offset;
4834
4835 dr_offset = regset - info->regsets;
4836 if (info->disabled_regsets == NULL)
4837 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
4838 info->disabled_regsets[dr_offset] = 1;
4839 }
4840
4841 static int
4842 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4843 struct regcache *regcache)
4844 {
4845 struct regset_info *regset;
4846 int saw_general_regs = 0;
4847 int pid;
4848 struct iovec iov;
4849
4850 pid = lwpid_of (current_thread);
4851 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4852 {
4853 void *buf, *data;
4854 int nt_type, res;
4855
4856 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4857 continue;
4858
4859 buf = xmalloc (regset->size);
4860
4861 nt_type = regset->nt_type;
4862 if (nt_type)
4863 {
4864 iov.iov_base = buf;
4865 iov.iov_len = regset->size;
4866 data = (void *) &iov;
4867 }
4868 else
4869 data = buf;
4870
4871 #ifndef __sparc__
4872 res = ptrace (regset->get_request, pid,
4873 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4874 #else
4875 res = ptrace (regset->get_request, pid, data, nt_type);
4876 #endif
4877 if (res < 0)
4878 {
4879 if (errno == EIO
4880 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
4881 {
4882 /* If we get EIO on a regset, or an EINVAL and the regset is
4883 optional, do not try it again for this process mode. */
4884 disable_regset (regsets_info, regset);
4885 }
4886 else if (errno == ENODATA)
4887 {
4888 /* ENODATA may be returned if the regset is currently
4889 not "active". This can happen in normal operation,
4890 so suppress the warning in this case. */
4891 }
4892 else if (errno == ESRCH)
4893 {
4894 /* At this point, ESRCH should mean the process is
4895 already gone, in which case we simply ignore attempts
4896 to read its registers. */
4897 }
4898 else
4899 {
4900 char s[256];
4901 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4902 pid);
4903 perror (s);
4904 }
4905 }
4906 else
4907 {
4908 if (regset->type == GENERAL_REGS)
4909 saw_general_regs = 1;
4910 regset->store_function (regcache, buf);
4911 }
4912 free (buf);
4913 }
4914 if (saw_general_regs)
4915 return 0;
4916 else
4917 return 1;
4918 }
4919
4920 static int
4921 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4922 struct regcache *regcache)
4923 {
4924 struct regset_info *regset;
4925 int saw_general_regs = 0;
4926 int pid;
4927 struct iovec iov;
4928
4929 pid = lwpid_of (current_thread);
4930 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4931 {
4932 void *buf, *data;
4933 int nt_type, res;
4934
4935 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4936 || regset->fill_function == NULL)
4937 continue;
4938
4939 buf = xmalloc (regset->size);
4940
4941 /* First fill the buffer with the current register set contents,
4942 in case there are any items in the kernel's regset that are
4943 not in gdbserver's regcache. */
4944
4945 nt_type = regset->nt_type;
4946 if (nt_type)
4947 {
4948 iov.iov_base = buf;
4949 iov.iov_len = regset->size;
4950 data = (void *) &iov;
4951 }
4952 else
4953 data = buf;
4954
4955 #ifndef __sparc__
4956 res = ptrace (regset->get_request, pid,
4957 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4958 #else
4959 res = ptrace (regset->get_request, pid, data, nt_type);
4960 #endif
4961
4962 if (res == 0)
4963 {
4964 /* Then overlay our cached registers on that. */
4965 regset->fill_function (regcache, buf);
4966
4967 /* Only now do we write the register set. */
4968 #ifndef __sparc__
4969 res = ptrace (regset->set_request, pid,
4970 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4971 #else
4972 res = ptrace (regset->set_request, pid, data, nt_type);
4973 #endif
4974 }
4975
4976 if (res < 0)
4977 {
4978 if (errno == EIO
4979 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
4980 {
4981 /* If we get EIO on a regset, or an EINVAL and the regset is
4982 optional, do not try it again for this process mode. */
4983 disable_regset (regsets_info, regset);
4984 }
4985 else if (errno == ESRCH)
4986 {
4987 /* At this point, ESRCH should mean the process is
4988 already gone, in which case we simply ignore attempts
4989 to change its registers. See also the related
4990 comment in resume_one_lwp. */
4991 free (buf);
4992 return 0;
4993 }
4994 else
4995 {
4996 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4997 }
4998 }
4999 else if (regset->type == GENERAL_REGS)
5000 saw_general_regs = 1;
5001 free (buf);
5002 }
5003 if (saw_general_regs)
5004 return 0;
5005 else
5006 return 1;
5007 }
5008
5009 #else /* !HAVE_LINUX_REGSETS */
5010
5011 #define use_linux_regsets 0
5012 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5013 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5014
5015 #endif
5016
5017 /* Return 1 if register REGNO is supported by one of the regset ptrace
5018 calls or 0 if it has to be transferred individually. */
5019
5020 static int
5021 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5022 {
5023 unsigned char mask = 1 << (regno % 8);
5024 size_t index = regno / 8;
5025
5026 return (use_linux_regsets
5027 && (regs_info->regset_bitmap == NULL
5028 || (regs_info->regset_bitmap[index] & mask) != 0));
5029 }
5030
5031 #ifdef HAVE_LINUX_USRREGS
5032
5033 static int
5034 register_addr (const struct usrregs_info *usrregs, int regnum)
5035 {
5036 int addr;
5037
5038 if (regnum < 0 || regnum >= usrregs->num_regs)
5039 error ("Invalid register number %d.", regnum);
5040
5041 addr = usrregs->regmap[regnum];
5042
5043 return addr;
5044 }
5045
5046
5047 void
5048 linux_process_target::fetch_register (const usrregs_info *usrregs,
5049 regcache *regcache, int regno)
5050 {
5051 CORE_ADDR regaddr;
5052 int i, size;
5053 char *buf;
5054 int pid;
5055
5056 if (regno >= usrregs->num_regs)
5057 return;
5058 if (low_cannot_fetch_register (regno))
5059 return;
5060
5061 regaddr = register_addr (usrregs, regno);
5062 if (regaddr == -1)
5063 return;
5064
5065 size = ((register_size (regcache->tdesc, regno)
5066 + sizeof (PTRACE_XFER_TYPE) - 1)
5067 & -sizeof (PTRACE_XFER_TYPE));
5068 buf = (char *) alloca (size);
5069
5070 pid = lwpid_of (current_thread);
5071 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5072 {
5073 errno = 0;
5074 *(PTRACE_XFER_TYPE *) (buf + i) =
5075 ptrace (PTRACE_PEEKUSER, pid,
5076 /* Coerce to a uintptr_t first to avoid potential gcc warning
5077 of coercing an 8 byte integer to a 4 byte pointer. */
5078 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5079 regaddr += sizeof (PTRACE_XFER_TYPE);
5080 if (errno != 0)
5081 {
5082 /* Mark register REGNO unavailable. */
5083 supply_register (regcache, regno, NULL);
5084 return;
5085 }
5086 }
5087
5088 low_supply_ptrace_register (regcache, regno, buf);
5089 }
5090
5091 void
5092 linux_process_target::store_register (const usrregs_info *usrregs,
5093 regcache *regcache, int regno)
5094 {
5095 CORE_ADDR regaddr;
5096 int i, size;
5097 char *buf;
5098 int pid;
5099
5100 if (regno >= usrregs->num_regs)
5101 return;
5102 if (low_cannot_store_register (regno))
5103 return;
5104
5105 regaddr = register_addr (usrregs, regno);
5106 if (regaddr == -1)
5107 return;
5108
5109 size = ((register_size (regcache->tdesc, regno)
5110 + sizeof (PTRACE_XFER_TYPE) - 1)
5111 & -sizeof (PTRACE_XFER_TYPE));
5112 buf = (char *) alloca (size);
5113 memset (buf, 0, size);
5114
5115 low_collect_ptrace_register (regcache, regno, buf);
5116
5117 pid = lwpid_of (current_thread);
5118 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5119 {
5120 errno = 0;
5121 ptrace (PTRACE_POKEUSER, pid,
5122 /* Coerce to a uintptr_t first to avoid potential gcc warning
5123 about coercing an 8 byte integer to a 4 byte pointer. */
5124 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5125 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5126 if (errno != 0)
5127 {
5128 /* At this point, ESRCH should mean the process is
5129 already gone, in which case we simply ignore attempts
5130 to change its registers. See also the related
5131 comment in resume_one_lwp. */
5132 if (errno == ESRCH)
5133 return;
5134
5135
5136 if (!low_cannot_store_register (regno))
5137 error ("writing register %d: %s", regno, safe_strerror (errno));
5138 }
5139 regaddr += sizeof (PTRACE_XFER_TYPE);
5140 }
5141 }
5142 #endif /* HAVE_LINUX_USRREGS */
5143
5144 void
5145 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5146 int regno, char *buf)
5147 {
5148 collect_register (regcache, regno, buf);
5149 }
5150
5151 void
5152 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5153 int regno, const char *buf)
5154 {
5155 supply_register (regcache, regno, buf);
5156 }
5157
5158 void
5159 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5160 regcache *regcache,
5161 int regno, int all)
5162 {
5163 #ifdef HAVE_LINUX_USRREGS
5164 struct usrregs_info *usr = regs_info->usrregs;
5165
5166 if (regno == -1)
5167 {
5168 for (regno = 0; regno < usr->num_regs; regno++)
5169 if (all || !linux_register_in_regsets (regs_info, regno))
5170 fetch_register (usr, regcache, regno);
5171 }
5172 else
5173 fetch_register (usr, regcache, regno);
5174 #endif
5175 }
5176
5177 void
5178 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5179 regcache *regcache,
5180 int regno, int all)
5181 {
5182 #ifdef HAVE_LINUX_USRREGS
5183 struct usrregs_info *usr = regs_info->usrregs;
5184
5185 if (regno == -1)
5186 {
5187 for (regno = 0; regno < usr->num_regs; regno++)
5188 if (all || !linux_register_in_regsets (regs_info, regno))
5189 store_register (usr, regcache, regno);
5190 }
5191 else
5192 store_register (usr, regcache, regno);
5193 #endif
5194 }
5195
5196 void
5197 linux_process_target::fetch_registers (regcache *regcache, int regno)
5198 {
5199 int use_regsets;
5200 int all = 0;
5201 const regs_info *regs_info = get_regs_info ();
5202
5203 if (regno == -1)
5204 {
5205 if (regs_info->usrregs != NULL)
5206 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5207 low_fetch_register (regcache, regno);
5208
5209 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5210 if (regs_info->usrregs != NULL)
5211 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5212 }
5213 else
5214 {
5215 if (low_fetch_register (regcache, regno))
5216 return;
5217
5218 use_regsets = linux_register_in_regsets (regs_info, regno);
5219 if (use_regsets)
5220 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5221 regcache);
5222 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5223 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5224 }
5225 }
5226
5227 void
5228 linux_process_target::store_registers (regcache *regcache, int regno)
5229 {
5230 int use_regsets;
5231 int all = 0;
5232 const regs_info *regs_info = get_regs_info ();
5233
5234 if (regno == -1)
5235 {
5236 all = regsets_store_inferior_registers (regs_info->regsets_info,
5237 regcache);
5238 if (regs_info->usrregs != NULL)
5239 usr_store_inferior_registers (regs_info, regcache, regno, all);
5240 }
5241 else
5242 {
5243 use_regsets = linux_register_in_regsets (regs_info, regno);
5244 if (use_regsets)
5245 all = regsets_store_inferior_registers (regs_info->regsets_info,
5246 regcache);
5247 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5248 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5249 }
5250 }
5251
5252 bool
5253 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5254 {
5255 return false;
5256 }
5257
5258 /* A wrapper for the read_memory target op. */
5259
5260 static int
5261 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5262 {
5263 return the_target->read_memory (memaddr, myaddr, len);
5264 }
5265
5266 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5267 to debugger memory starting at MYADDR. */
5268
5269 int
5270 linux_process_target::read_memory (CORE_ADDR memaddr,
5271 unsigned char *myaddr, int len)
5272 {
5273 int pid = lwpid_of (current_thread);
5274 PTRACE_XFER_TYPE *buffer;
5275 CORE_ADDR addr;
5276 int count;
5277 char filename[64];
5278 int i;
5279 int ret;
5280 int fd;
5281
5282 /* Try using /proc. Don't bother for one word. */
5283 if (len >= 3 * sizeof (long))
5284 {
5285 int bytes;
5286
5287 /* We could keep this file open and cache it - possibly one per
5288 thread. That requires some juggling, but is even faster. */
5289 sprintf (filename, "/proc/%d/mem", pid);
5290 fd = open (filename, O_RDONLY | O_LARGEFILE);
5291 if (fd == -1)
5292 goto no_proc;
5293
5294 /* If pread64 is available, use it. It's faster if the kernel
5295 supports it (only one syscall), and it's 64-bit safe even on
5296 32-bit platforms (for instance, SPARC debugging a SPARC64
5297 application). */
5298 #ifdef HAVE_PREAD64
5299 bytes = pread64 (fd, myaddr, len, memaddr);
5300 #else
5301 bytes = -1;
5302 if (lseek (fd, memaddr, SEEK_SET) != -1)
5303 bytes = read (fd, myaddr, len);
5304 #endif
5305
5306 close (fd);
5307 if (bytes == len)
5308 return 0;
5309
5310 /* Some data was read, we'll try to get the rest with ptrace. */
5311 if (bytes > 0)
5312 {
5313 memaddr += bytes;
5314 myaddr += bytes;
5315 len -= bytes;
5316 }
5317 }
5318
5319 no_proc:
5320 /* Round starting address down to longword boundary. */
5321 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5322 /* Round ending address up; get number of longwords that makes. */
5323 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5324 / sizeof (PTRACE_XFER_TYPE));
5325 /* Allocate buffer of that many longwords. */
5326 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5327
5328 /* Read all the longwords */
5329 errno = 0;
5330 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5331 {
5332 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5333 about coercing an 8 byte integer to a 4 byte pointer. */
5334 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5335 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5336 (PTRACE_TYPE_ARG4) 0);
5337 if (errno)
5338 break;
5339 }
5340 ret = errno;
5341
5342 /* Copy appropriate bytes out of the buffer. */
5343 if (i > 0)
5344 {
5345 i *= sizeof (PTRACE_XFER_TYPE);
5346 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5347 memcpy (myaddr,
5348 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5349 i < len ? i : len);
5350 }
5351
5352 return ret;
5353 }
5354
5355 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5356 memory at MEMADDR. On failure (cannot write to the inferior)
5357 returns the value of errno. Always succeeds if LEN is zero. */
5358
5359 int
5360 linux_process_target::write_memory (CORE_ADDR memaddr,
5361 const unsigned char *myaddr, int len)
5362 {
5363 int i;
5364 /* Round starting address down to longword boundary. */
5365 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5366 /* Round ending address up; get number of longwords that makes. */
5367 int count
5368 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5369 / sizeof (PTRACE_XFER_TYPE);
5370
5371 /* Allocate buffer of that many longwords. */
5372 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5373
5374 int pid = lwpid_of (current_thread);
5375
5376 if (len == 0)
5377 {
5378 /* Zero length write always succeeds. */
5379 return 0;
5380 }
5381
5382 if (debug_threads)
5383 {
5384 /* Dump up to four bytes. */
5385 char str[4 * 2 + 1];
5386 char *p = str;
5387 int dump = len < 4 ? len : 4;
5388
5389 for (i = 0; i < dump; i++)
5390 {
5391 sprintf (p, "%02x", myaddr[i]);
5392 p += 2;
5393 }
5394 *p = '\0';
5395
5396 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
5397 str, (long) memaddr, pid);
5398 }
5399
5400 /* Fill start and end extra bytes of buffer with existing memory data. */
5401
5402 errno = 0;
5403 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5404 about coercing an 8 byte integer to a 4 byte pointer. */
5405 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5406 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5407 (PTRACE_TYPE_ARG4) 0);
5408 if (errno)
5409 return errno;
5410
5411 if (count > 1)
5412 {
5413 errno = 0;
5414 buffer[count - 1]
5415 = ptrace (PTRACE_PEEKTEXT, pid,
5416 /* Coerce to a uintptr_t first to avoid potential gcc warning
5417 about coercing an 8 byte integer to a 4 byte pointer. */
5418 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5419 * sizeof (PTRACE_XFER_TYPE)),
5420 (PTRACE_TYPE_ARG4) 0);
5421 if (errno)
5422 return errno;
5423 }
5424
5425 /* Copy data to be written over corresponding part of buffer. */
5426
5427 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5428 myaddr, len);
5429
5430 /* Write the entire buffer. */
5431
5432 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5433 {
5434 errno = 0;
5435 ptrace (PTRACE_POKETEXT, pid,
5436 /* Coerce to a uintptr_t first to avoid potential gcc warning
5437 about coercing an 8 byte integer to a 4 byte pointer. */
5438 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5439 (PTRACE_TYPE_ARG4) buffer[i]);
5440 if (errno)
5441 return errno;
5442 }
5443
5444 return 0;
5445 }
5446
5447 void
5448 linux_process_target::look_up_symbols ()
5449 {
5450 #ifdef USE_THREAD_DB
5451 struct process_info *proc = current_process ();
5452
5453 if (proc->priv->thread_db != NULL)
5454 return;
5455
5456 thread_db_init ();
5457 #endif
5458 }
5459
5460 void
5461 linux_process_target::request_interrupt ()
5462 {
5463 /* Send a SIGINT to the process group. This acts just like the user
5464 typed a ^C on the controlling terminal. */
5465 ::kill (-signal_pid, SIGINT);
5466 }
5467
5468 bool
5469 linux_process_target::supports_read_auxv ()
5470 {
5471 return true;
5472 }
5473
5474 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5475 to debugger memory starting at MYADDR. */
5476
5477 int
5478 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5479 unsigned int len)
5480 {
5481 char filename[PATH_MAX];
5482 int fd, n;
5483 int pid = lwpid_of (current_thread);
5484
5485 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5486
5487 fd = open (filename, O_RDONLY);
5488 if (fd < 0)
5489 return -1;
5490
5491 if (offset != (CORE_ADDR) 0
5492 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5493 n = -1;
5494 else
5495 n = read (fd, myaddr, len);
5496
5497 close (fd);
5498
5499 return n;
5500 }
5501
5502 int
5503 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5504 int size, raw_breakpoint *bp)
5505 {
5506 if (type == raw_bkpt_type_sw)
5507 return insert_memory_breakpoint (bp);
5508 else
5509 return low_insert_point (type, addr, size, bp);
5510 }
5511
5512 int
5513 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5514 int size, raw_breakpoint *bp)
5515 {
5516 /* Unsupported (see target.h). */
5517 return 1;
5518 }
5519
5520 int
5521 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5522 int size, raw_breakpoint *bp)
5523 {
5524 if (type == raw_bkpt_type_sw)
5525 return remove_memory_breakpoint (bp);
5526 else
5527 return low_remove_point (type, addr, size, bp);
5528 }
5529
5530 int
5531 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5532 int size, raw_breakpoint *bp)
5533 {
5534 /* Unsupported (see target.h). */
5535 return 1;
5536 }
5537
5538 /* Implement the stopped_by_sw_breakpoint target_ops
5539 method. */
5540
5541 bool
5542 linux_process_target::stopped_by_sw_breakpoint ()
5543 {
5544 struct lwp_info *lwp = get_thread_lwp (current_thread);
5545
5546 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5547 }
5548
5549 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5550 method. */
5551
5552 bool
5553 linux_process_target::supports_stopped_by_sw_breakpoint ()
5554 {
5555 return USE_SIGTRAP_SIGINFO;
5556 }
5557
5558 /* Implement the stopped_by_hw_breakpoint target_ops
5559 method. */
5560
5561 bool
5562 linux_process_target::stopped_by_hw_breakpoint ()
5563 {
5564 struct lwp_info *lwp = get_thread_lwp (current_thread);
5565
5566 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5567 }
5568
5569 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5570 method. */
5571
5572 bool
5573 linux_process_target::supports_stopped_by_hw_breakpoint ()
5574 {
5575 return USE_SIGTRAP_SIGINFO;
5576 }
5577
5578 /* Implement the supports_hardware_single_step target_ops method. */
5579
5580 bool
5581 linux_process_target::supports_hardware_single_step ()
5582 {
5583 return true;
5584 }
5585
5586 bool
5587 linux_process_target::stopped_by_watchpoint ()
5588 {
5589 struct lwp_info *lwp = get_thread_lwp (current_thread);
5590
5591 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5592 }
5593
5594 CORE_ADDR
5595 linux_process_target::stopped_data_address ()
5596 {
5597 struct lwp_info *lwp = get_thread_lwp (current_thread);
5598
5599 return lwp->stopped_data_address;
5600 }
5601
5602 /* This is only used for targets that define PT_TEXT_ADDR,
5603 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5604 the target has different ways of acquiring this information, like
5605 loadmaps. */
5606
5607 bool
5608 linux_process_target::supports_read_offsets ()
5609 {
5610 #ifdef SUPPORTS_READ_OFFSETS
5611 return true;
5612 #else
5613 return false;
5614 #endif
5615 }
5616
5617 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5618 to tell gdb about. */
5619
5620 int
5621 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5622 {
5623 #ifdef SUPPORTS_READ_OFFSETS
5624 unsigned long text, text_end, data;
5625 int pid = lwpid_of (current_thread);
5626
5627 errno = 0;
5628
5629 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5630 (PTRACE_TYPE_ARG4) 0);
5631 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5632 (PTRACE_TYPE_ARG4) 0);
5633 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5634 (PTRACE_TYPE_ARG4) 0);
5635
5636 if (errno == 0)
5637 {
5638 /* Both text and data offsets produced at compile-time (and so
5639 used by gdb) are relative to the beginning of the program,
5640 with the data segment immediately following the text segment.
5641 However, the actual runtime layout in memory may put the data
5642 somewhere else, so when we send gdb a data base-address, we
5643 use the real data base address and subtract the compile-time
5644 data base-address from it (which is just the length of the
5645 text segment). BSS immediately follows data in both
5646 cases. */
5647 *text_p = text;
5648 *data_p = data - (text_end - text);
5649
5650 return 1;
5651 }
5652 return 0;
5653 #else
5654 gdb_assert_not_reached ("target op read_offsets not supported");
5655 #endif
5656 }
5657
5658 bool
5659 linux_process_target::supports_get_tls_address ()
5660 {
5661 #ifdef USE_THREAD_DB
5662 return true;
5663 #else
5664 return false;
5665 #endif
5666 }
5667
5668 int
5669 linux_process_target::get_tls_address (thread_info *thread,
5670 CORE_ADDR offset,
5671 CORE_ADDR load_module,
5672 CORE_ADDR *address)
5673 {
5674 #ifdef USE_THREAD_DB
5675 return thread_db_get_tls_address (thread, offset, load_module, address);
5676 #else
5677 return -1;
5678 #endif
5679 }
5680
5681 bool
5682 linux_process_target::supports_qxfer_osdata ()
5683 {
5684 return true;
5685 }
5686
5687 int
5688 linux_process_target::qxfer_osdata (const char *annex,
5689 unsigned char *readbuf,
5690 unsigned const char *writebuf,
5691 CORE_ADDR offset, int len)
5692 {
5693 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5694 }
5695
5696 void
5697 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5698 gdb_byte *inf_siginfo, int direction)
5699 {
5700 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5701
5702 /* If there was no callback, or the callback didn't do anything,
5703 then just do a straight memcpy. */
5704 if (!done)
5705 {
5706 if (direction == 1)
5707 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5708 else
5709 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5710 }
5711 }
5712
5713 bool
5714 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5715 int direction)
5716 {
5717 return false;
5718 }
5719
5720 bool
5721 linux_process_target::supports_qxfer_siginfo ()
5722 {
5723 return true;
5724 }
5725
5726 int
5727 linux_process_target::qxfer_siginfo (const char *annex,
5728 unsigned char *readbuf,
5729 unsigned const char *writebuf,
5730 CORE_ADDR offset, int len)
5731 {
5732 int pid;
5733 siginfo_t siginfo;
5734 gdb_byte inf_siginfo[sizeof (siginfo_t)];
5735
5736 if (current_thread == NULL)
5737 return -1;
5738
5739 pid = lwpid_of (current_thread);
5740
5741 threads_debug_printf ("%s siginfo for lwp %d.",
5742 readbuf != NULL ? "Reading" : "Writing",
5743 pid);
5744
5745 if (offset >= sizeof (siginfo))
5746 return -1;
5747
5748 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5749 return -1;
5750
5751 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5752 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5753 inferior with a 64-bit GDBSERVER should look the same as debugging it
5754 with a 32-bit GDBSERVER, we need to convert it. */
5755 siginfo_fixup (&siginfo, inf_siginfo, 0);
5756
5757 if (offset + len > sizeof (siginfo))
5758 len = sizeof (siginfo) - offset;
5759
5760 if (readbuf != NULL)
5761 memcpy (readbuf, inf_siginfo + offset, len);
5762 else
5763 {
5764 memcpy (inf_siginfo + offset, writebuf, len);
5765
5766 /* Convert back to ptrace layout before flushing it out. */
5767 siginfo_fixup (&siginfo, inf_siginfo, 1);
5768
5769 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5770 return -1;
5771 }
5772
5773 return len;
5774 }
5775
5776 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5777 so we notice when children change state; as the handler for the
5778 sigsuspend in my_waitpid. */
5779
5780 static void
5781 sigchld_handler (int signo)
5782 {
5783 int old_errno = errno;
5784
5785 if (debug_threads)
5786 {
5787 do
5788 {
5789 /* Use the async signal safe debug function. */
5790 if (debug_write ("sigchld_handler\n",
5791 sizeof ("sigchld_handler\n") - 1) < 0)
5792 break; /* just ignore */
5793 } while (0);
5794 }
5795
5796 if (target_is_async_p ())
5797 async_file_mark (); /* trigger a linux_wait */
5798
5799 errno = old_errno;
5800 }
5801
5802 bool
5803 linux_process_target::supports_non_stop ()
5804 {
5805 return true;
5806 }
5807
5808 bool
5809 linux_process_target::async (bool enable)
5810 {
5811 bool previous = target_is_async_p ();
5812
5813 threads_debug_printf ("async (%d), previous=%d",
5814 enable, previous);
5815
5816 if (previous != enable)
5817 {
5818 sigset_t mask;
5819 sigemptyset (&mask);
5820 sigaddset (&mask, SIGCHLD);
5821
5822 gdb_sigmask (SIG_BLOCK, &mask, NULL);
5823
5824 if (enable)
5825 {
5826 if (pipe (linux_event_pipe) == -1)
5827 {
5828 linux_event_pipe[0] = -1;
5829 linux_event_pipe[1] = -1;
5830 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5831
5832 warning ("creating event pipe failed.");
5833 return previous;
5834 }
5835
5836 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5837 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5838
5839 /* Register the event loop handler. */
5840 add_file_handler (linux_event_pipe[0],
5841 handle_target_event, NULL,
5842 "linux-low");
5843
5844 /* Always trigger a linux_wait. */
5845 async_file_mark ();
5846 }
5847 else
5848 {
5849 delete_file_handler (linux_event_pipe[0]);
5850
5851 close (linux_event_pipe[0]);
5852 close (linux_event_pipe[1]);
5853 linux_event_pipe[0] = -1;
5854 linux_event_pipe[1] = -1;
5855 }
5856
5857 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5858 }
5859
5860 return previous;
5861 }
5862
5863 int
5864 linux_process_target::start_non_stop (bool nonstop)
5865 {
5866 /* Register or unregister from event-loop accordingly. */
5867 target_async (nonstop);
5868
5869 if (target_is_async_p () != (nonstop != false))
5870 return -1;
5871
5872 return 0;
5873 }
5874
5875 bool
5876 linux_process_target::supports_multi_process ()
5877 {
5878 return true;
5879 }
5880
5881 /* Check if fork events are supported. */
5882
5883 bool
5884 linux_process_target::supports_fork_events ()
5885 {
5886 return true;
5887 }
5888
5889 /* Check if vfork events are supported. */
5890
5891 bool
5892 linux_process_target::supports_vfork_events ()
5893 {
5894 return true;
5895 }
5896
5897 /* Check if exec events are supported. */
5898
5899 bool
5900 linux_process_target::supports_exec_events ()
5901 {
5902 return true;
5903 }
5904
5905 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5906 ptrace flags for all inferiors. This is in case the new GDB connection
5907 doesn't support the same set of events that the previous one did. */
5908
5909 void
5910 linux_process_target::handle_new_gdb_connection ()
5911 {
5912 /* Request that all the lwps reset their ptrace options. */
5913 for_each_thread ([] (thread_info *thread)
5914 {
5915 struct lwp_info *lwp = get_thread_lwp (thread);
5916
5917 if (!lwp->stopped)
5918 {
5919 /* Stop the lwp so we can modify its ptrace options. */
5920 lwp->must_set_ptrace_flags = 1;
5921 linux_stop_lwp (lwp);
5922 }
5923 else
5924 {
5925 /* Already stopped; go ahead and set the ptrace options. */
5926 struct process_info *proc = find_process_pid (pid_of (thread));
5927 int options = linux_low_ptrace_options (proc->attached);
5928
5929 linux_enable_event_reporting (lwpid_of (thread), options);
5930 lwp->must_set_ptrace_flags = 0;
5931 }
5932 });
5933 }
5934
5935 int
5936 linux_process_target::handle_monitor_command (char *mon)
5937 {
5938 #ifdef USE_THREAD_DB
5939 return thread_db_handle_monitor_command (mon);
5940 #else
5941 return 0;
5942 #endif
5943 }
5944
5945 int
5946 linux_process_target::core_of_thread (ptid_t ptid)
5947 {
5948 return linux_common_core_of_thread (ptid);
5949 }
5950
5951 bool
5952 linux_process_target::supports_disable_randomization ()
5953 {
5954 return true;
5955 }
5956
5957 bool
5958 linux_process_target::supports_agent ()
5959 {
5960 return true;
5961 }
5962
5963 bool
5964 linux_process_target::supports_range_stepping ()
5965 {
5966 if (supports_software_single_step ())
5967 return true;
5968
5969 return low_supports_range_stepping ();
5970 }
5971
5972 bool
5973 linux_process_target::low_supports_range_stepping ()
5974 {
5975 return false;
5976 }
5977
5978 bool
5979 linux_process_target::supports_pid_to_exec_file ()
5980 {
5981 return true;
5982 }
5983
5984 const char *
5985 linux_process_target::pid_to_exec_file (int pid)
5986 {
5987 return linux_proc_pid_to_exec_file (pid);
5988 }
5989
5990 bool
5991 linux_process_target::supports_multifs ()
5992 {
5993 return true;
5994 }
5995
5996 int
5997 linux_process_target::multifs_open (int pid, const char *filename,
5998 int flags, mode_t mode)
5999 {
6000 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6001 }
6002
6003 int
6004 linux_process_target::multifs_unlink (int pid, const char *filename)
6005 {
6006 return linux_mntns_unlink (pid, filename);
6007 }
6008
6009 ssize_t
6010 linux_process_target::multifs_readlink (int pid, const char *filename,
6011 char *buf, size_t bufsiz)
6012 {
6013 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6014 }
6015
6016 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6017 struct target_loadseg
6018 {
6019 /* Core address to which the segment is mapped. */
6020 Elf32_Addr addr;
6021 /* VMA recorded in the program header. */
6022 Elf32_Addr p_vaddr;
6023 /* Size of this segment in memory. */
6024 Elf32_Word p_memsz;
6025 };
6026
6027 # if defined PT_GETDSBT
6028 struct target_loadmap
6029 {
6030 /* Protocol version number, must be zero. */
6031 Elf32_Word version;
6032 /* Pointer to the DSBT table, its size, and the DSBT index. */
6033 unsigned *dsbt_table;
6034 unsigned dsbt_size, dsbt_index;
6035 /* Number of segments in this map. */
6036 Elf32_Word nsegs;
6037 /* The actual memory map. */
6038 struct target_loadseg segs[/*nsegs*/];
6039 };
6040 # define LINUX_LOADMAP PT_GETDSBT
6041 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6042 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6043 # else
6044 struct target_loadmap
6045 {
6046 /* Protocol version number, must be zero. */
6047 Elf32_Half version;
6048 /* Number of segments in this map. */
6049 Elf32_Half nsegs;
6050 /* The actual memory map. */
6051 struct target_loadseg segs[/*nsegs*/];
6052 };
6053 # define LINUX_LOADMAP PTRACE_GETFDPIC
6054 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6055 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6056 # endif
6057
6058 bool
6059 linux_process_target::supports_read_loadmap ()
6060 {
6061 return true;
6062 }
6063
6064 int
6065 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6066 unsigned char *myaddr, unsigned int len)
6067 {
6068 int pid = lwpid_of (current_thread);
6069 int addr = -1;
6070 struct target_loadmap *data = NULL;
6071 unsigned int actual_length, copy_length;
6072
6073 if (strcmp (annex, "exec") == 0)
6074 addr = (int) LINUX_LOADMAP_EXEC;
6075 else if (strcmp (annex, "interp") == 0)
6076 addr = (int) LINUX_LOADMAP_INTERP;
6077 else
6078 return -1;
6079
6080 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6081 return -1;
6082
6083 if (data == NULL)
6084 return -1;
6085
6086 actual_length = sizeof (struct target_loadmap)
6087 + sizeof (struct target_loadseg) * data->nsegs;
6088
6089 if (offset < 0 || offset > actual_length)
6090 return -1;
6091
6092 copy_length = actual_length - offset < len ? actual_length - offset : len;
6093 memcpy (myaddr, (char *) data + offset, copy_length);
6094 return copy_length;
6095 }
6096 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6097
6098 bool
6099 linux_process_target::supports_catch_syscall ()
6100 {
6101 return low_supports_catch_syscall ();
6102 }
6103
6104 bool
6105 linux_process_target::low_supports_catch_syscall ()
6106 {
6107 return false;
6108 }
6109
6110 CORE_ADDR
6111 linux_process_target::read_pc (regcache *regcache)
6112 {
6113 if (!low_supports_breakpoints ())
6114 return 0;
6115
6116 return low_get_pc (regcache);
6117 }
6118
6119 void
6120 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6121 {
6122 gdb_assert (low_supports_breakpoints ());
6123
6124 low_set_pc (regcache, pc);
6125 }
6126
6127 bool
6128 linux_process_target::supports_thread_stopped ()
6129 {
6130 return true;
6131 }
6132
6133 bool
6134 linux_process_target::thread_stopped (thread_info *thread)
6135 {
6136 return get_thread_lwp (thread)->stopped;
6137 }
6138
6139 /* This exposes stop-all-threads functionality to other modules. */
6140
6141 void
6142 linux_process_target::pause_all (bool freeze)
6143 {
6144 stop_all_lwps (freeze, NULL);
6145 }
6146
6147 /* This exposes unstop-all-threads functionality to other gdbserver
6148 modules. */
6149
6150 void
6151 linux_process_target::unpause_all (bool unfreeze)
6152 {
6153 unstop_all_lwps (unfreeze, NULL);
6154 }
6155
6156 int
6157 linux_process_target::prepare_to_access_memory ()
6158 {
6159 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6160 running LWP. */
6161 if (non_stop)
6162 target_pause_all (true);
6163 return 0;
6164 }
6165
6166 void
6167 linux_process_target::done_accessing_memory ()
6168 {
6169 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6170 running LWP. */
6171 if (non_stop)
6172 target_unpause_all (true);
6173 }
6174
6175 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6176
6177 static int
6178 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6179 CORE_ADDR *phdr_memaddr, int *num_phdr)
6180 {
6181 char filename[PATH_MAX];
6182 int fd;
6183 const int auxv_size = is_elf64
6184 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6185 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6186
6187 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6188
6189 fd = open (filename, O_RDONLY);
6190 if (fd < 0)
6191 return 1;
6192
6193 *phdr_memaddr = 0;
6194 *num_phdr = 0;
6195 while (read (fd, buf, auxv_size) == auxv_size
6196 && (*phdr_memaddr == 0 || *num_phdr == 0))
6197 {
6198 if (is_elf64)
6199 {
6200 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6201
6202 switch (aux->a_type)
6203 {
6204 case AT_PHDR:
6205 *phdr_memaddr = aux->a_un.a_val;
6206 break;
6207 case AT_PHNUM:
6208 *num_phdr = aux->a_un.a_val;
6209 break;
6210 }
6211 }
6212 else
6213 {
6214 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6215
6216 switch (aux->a_type)
6217 {
6218 case AT_PHDR:
6219 *phdr_memaddr = aux->a_un.a_val;
6220 break;
6221 case AT_PHNUM:
6222 *num_phdr = aux->a_un.a_val;
6223 break;
6224 }
6225 }
6226 }
6227
6228 close (fd);
6229
6230 if (*phdr_memaddr == 0 || *num_phdr == 0)
6231 {
6232 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6233 "phdr_memaddr = %ld, phdr_num = %d",
6234 (long) *phdr_memaddr, *num_phdr);
6235 return 2;
6236 }
6237
6238 return 0;
6239 }
6240
6241 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6242
6243 static CORE_ADDR
6244 get_dynamic (const int pid, const int is_elf64)
6245 {
6246 CORE_ADDR phdr_memaddr, relocation;
6247 int num_phdr, i;
6248 unsigned char *phdr_buf;
6249 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6250
6251 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6252 return 0;
6253
6254 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6255 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6256
6257 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6258 return 0;
6259
6260 /* Compute relocation: it is expected to be 0 for "regular" executables,
6261 non-zero for PIE ones. */
6262 relocation = -1;
6263 for (i = 0; relocation == -1 && i < num_phdr; i++)
6264 if (is_elf64)
6265 {
6266 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6267
6268 if (p->p_type == PT_PHDR)
6269 relocation = phdr_memaddr - p->p_vaddr;
6270 }
6271 else
6272 {
6273 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6274
6275 if (p->p_type == PT_PHDR)
6276 relocation = phdr_memaddr - p->p_vaddr;
6277 }
6278
6279 if (relocation == -1)
6280 {
6281 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6282 any real world executables, including PIE executables, have always
6283 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6284 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6285 or present DT_DEBUG anyway (fpc binaries are statically linked).
6286
6287 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6288
6289 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6290
6291 return 0;
6292 }
6293
6294 for (i = 0; i < num_phdr; i++)
6295 {
6296 if (is_elf64)
6297 {
6298 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6299
6300 if (p->p_type == PT_DYNAMIC)
6301 return p->p_vaddr + relocation;
6302 }
6303 else
6304 {
6305 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6306
6307 if (p->p_type == PT_DYNAMIC)
6308 return p->p_vaddr + relocation;
6309 }
6310 }
6311
6312 return 0;
6313 }
6314
6315 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6316 can be 0 if the inferior does not yet have the library list initialized.
6317 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6318 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6319
6320 static CORE_ADDR
6321 get_r_debug (const int pid, const int is_elf64)
6322 {
6323 CORE_ADDR dynamic_memaddr;
6324 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6325 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6326 CORE_ADDR map = -1;
6327
6328 dynamic_memaddr = get_dynamic (pid, is_elf64);
6329 if (dynamic_memaddr == 0)
6330 return map;
6331
6332 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6333 {
6334 if (is_elf64)
6335 {
6336 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6337 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6338 union
6339 {
6340 Elf64_Xword map;
6341 unsigned char buf[sizeof (Elf64_Xword)];
6342 }
6343 rld_map;
6344 #endif
6345 #ifdef DT_MIPS_RLD_MAP
6346 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6347 {
6348 if (linux_read_memory (dyn->d_un.d_val,
6349 rld_map.buf, sizeof (rld_map.buf)) == 0)
6350 return rld_map.map;
6351 else
6352 break;
6353 }
6354 #endif /* DT_MIPS_RLD_MAP */
6355 #ifdef DT_MIPS_RLD_MAP_REL
6356 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6357 {
6358 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6359 rld_map.buf, sizeof (rld_map.buf)) == 0)
6360 return rld_map.map;
6361 else
6362 break;
6363 }
6364 #endif /* DT_MIPS_RLD_MAP_REL */
6365
6366 if (dyn->d_tag == DT_DEBUG && map == -1)
6367 map = dyn->d_un.d_val;
6368
6369 if (dyn->d_tag == DT_NULL)
6370 break;
6371 }
6372 else
6373 {
6374 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6375 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6376 union
6377 {
6378 Elf32_Word map;
6379 unsigned char buf[sizeof (Elf32_Word)];
6380 }
6381 rld_map;
6382 #endif
6383 #ifdef DT_MIPS_RLD_MAP
6384 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6385 {
6386 if (linux_read_memory (dyn->d_un.d_val,
6387 rld_map.buf, sizeof (rld_map.buf)) == 0)
6388 return rld_map.map;
6389 else
6390 break;
6391 }
6392 #endif /* DT_MIPS_RLD_MAP */
6393 #ifdef DT_MIPS_RLD_MAP_REL
6394 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6395 {
6396 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6397 rld_map.buf, sizeof (rld_map.buf)) == 0)
6398 return rld_map.map;
6399 else
6400 break;
6401 }
6402 #endif /* DT_MIPS_RLD_MAP_REL */
6403
6404 if (dyn->d_tag == DT_DEBUG && map == -1)
6405 map = dyn->d_un.d_val;
6406
6407 if (dyn->d_tag == DT_NULL)
6408 break;
6409 }
6410
6411 dynamic_memaddr += dyn_size;
6412 }
6413
6414 return map;
6415 }
6416
6417 /* Read one pointer from MEMADDR in the inferior. */
6418
6419 static int
6420 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6421 {
6422 int ret;
6423
6424 /* Go through a union so this works on either big or little endian
6425 hosts, when the inferior's pointer size is smaller than the size
6426 of CORE_ADDR. It is assumed the inferior's endianness is the
6427 same of the superior's. */
6428 union
6429 {
6430 CORE_ADDR core_addr;
6431 unsigned int ui;
6432 unsigned char uc;
6433 } addr;
6434
6435 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6436 if (ret == 0)
6437 {
6438 if (ptr_size == sizeof (CORE_ADDR))
6439 *ptr = addr.core_addr;
6440 else if (ptr_size == sizeof (unsigned int))
6441 *ptr = addr.ui;
6442 else
6443 gdb_assert_not_reached ("unhandled pointer size");
6444 }
6445 return ret;
6446 }
6447
6448 bool
6449 linux_process_target::supports_qxfer_libraries_svr4 ()
6450 {
6451 return true;
6452 }
6453
6454 struct link_map_offsets
6455 {
6456 /* Offset and size of r_debug.r_version. */
6457 int r_version_offset;
6458
6459 /* Offset and size of r_debug.r_map. */
6460 int r_map_offset;
6461
6462 /* Offset to l_addr field in struct link_map. */
6463 int l_addr_offset;
6464
6465 /* Offset to l_name field in struct link_map. */
6466 int l_name_offset;
6467
6468 /* Offset to l_ld field in struct link_map. */
6469 int l_ld_offset;
6470
6471 /* Offset to l_next field in struct link_map. */
6472 int l_next_offset;
6473
6474 /* Offset to l_prev field in struct link_map. */
6475 int l_prev_offset;
6476 };
6477
6478 /* Construct qXfer:libraries-svr4:read reply. */
6479
6480 int
6481 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6482 unsigned char *readbuf,
6483 unsigned const char *writebuf,
6484 CORE_ADDR offset, int len)
6485 {
6486 struct process_info_private *const priv = current_process ()->priv;
6487 char filename[PATH_MAX];
6488 int pid, is_elf64;
6489
6490 static const struct link_map_offsets lmo_32bit_offsets =
6491 {
6492 0, /* r_version offset. */
6493 4, /* r_debug.r_map offset. */
6494 0, /* l_addr offset in link_map. */
6495 4, /* l_name offset in link_map. */
6496 8, /* l_ld offset in link_map. */
6497 12, /* l_next offset in link_map. */
6498 16 /* l_prev offset in link_map. */
6499 };
6500
6501 static const struct link_map_offsets lmo_64bit_offsets =
6502 {
6503 0, /* r_version offset. */
6504 8, /* r_debug.r_map offset. */
6505 0, /* l_addr offset in link_map. */
6506 8, /* l_name offset in link_map. */
6507 16, /* l_ld offset in link_map. */
6508 24, /* l_next offset in link_map. */
6509 32 /* l_prev offset in link_map. */
6510 };
6511 const struct link_map_offsets *lmo;
6512 unsigned int machine;
6513 int ptr_size;
6514 CORE_ADDR lm_addr = 0, lm_prev = 0;
6515 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6516 int header_done = 0;
6517
6518 if (writebuf != NULL)
6519 return -2;
6520 if (readbuf == NULL)
6521 return -1;
6522
6523 pid = lwpid_of (current_thread);
6524 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6525 is_elf64 = elf_64_file_p (filename, &machine);
6526 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6527 ptr_size = is_elf64 ? 8 : 4;
6528
6529 while (annex[0] != '\0')
6530 {
6531 const char *sep;
6532 CORE_ADDR *addrp;
6533 int name_len;
6534
6535 sep = strchr (annex, '=');
6536 if (sep == NULL)
6537 break;
6538
6539 name_len = sep - annex;
6540 if (name_len == 5 && startswith (annex, "start"))
6541 addrp = &lm_addr;
6542 else if (name_len == 4 && startswith (annex, "prev"))
6543 addrp = &lm_prev;
6544 else
6545 {
6546 annex = strchr (sep, ';');
6547 if (annex == NULL)
6548 break;
6549 annex++;
6550 continue;
6551 }
6552
6553 annex = decode_address_to_semicolon (addrp, sep + 1);
6554 }
6555
6556 if (lm_addr == 0)
6557 {
6558 int r_version = 0;
6559
6560 if (priv->r_debug == 0)
6561 priv->r_debug = get_r_debug (pid, is_elf64);
6562
6563 /* We failed to find DT_DEBUG. Such situation will not change
6564 for this inferior - do not retry it. Report it to GDB as
6565 E01, see for the reasons at the GDB solib-svr4.c side. */
6566 if (priv->r_debug == (CORE_ADDR) -1)
6567 return -1;
6568
6569 if (priv->r_debug != 0)
6570 {
6571 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6572 (unsigned char *) &r_version,
6573 sizeof (r_version)) != 0
6574 || r_version < 1)
6575 {
6576 warning ("unexpected r_debug version %d", r_version);
6577 }
6578 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6579 &lm_addr, ptr_size) != 0)
6580 {
6581 warning ("unable to read r_map from 0x%lx",
6582 (long) priv->r_debug + lmo->r_map_offset);
6583 }
6584 }
6585 }
6586
6587 std::string document = "<library-list-svr4 version=\"1.0\"";
6588
6589 while (lm_addr
6590 && read_one_ptr (lm_addr + lmo->l_name_offset,
6591 &l_name, ptr_size) == 0
6592 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6593 &l_addr, ptr_size) == 0
6594 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6595 &l_ld, ptr_size) == 0
6596 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6597 &l_prev, ptr_size) == 0
6598 && read_one_ptr (lm_addr + lmo->l_next_offset,
6599 &l_next, ptr_size) == 0)
6600 {
6601 unsigned char libname[PATH_MAX];
6602
6603 if (lm_prev != l_prev)
6604 {
6605 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6606 (long) lm_prev, (long) l_prev);
6607 break;
6608 }
6609
6610 /* Ignore the first entry even if it has valid name as the first entry
6611 corresponds to the main executable. The first entry should not be
6612 skipped if the dynamic loader was loaded late by a static executable
6613 (see solib-svr4.c parameter ignore_first). But in such case the main
6614 executable does not have PT_DYNAMIC present and this function already
6615 exited above due to failed get_r_debug. */
6616 if (lm_prev == 0)
6617 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6618 else
6619 {
6620 /* Not checking for error because reading may stop before
6621 we've got PATH_MAX worth of characters. */
6622 libname[0] = '\0';
6623 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6624 libname[sizeof (libname) - 1] = '\0';
6625 if (libname[0] != '\0')
6626 {
6627 if (!header_done)
6628 {
6629 /* Terminate `<library-list-svr4'. */
6630 document += '>';
6631 header_done = 1;
6632 }
6633
6634 string_appendf (document, "<library name=\"");
6635 xml_escape_text_append (&document, (char *) libname);
6636 string_appendf (document, "\" lm=\"0x%lx\" "
6637 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6638 (unsigned long) lm_addr, (unsigned long) l_addr,
6639 (unsigned long) l_ld);
6640 }
6641 }
6642
6643 lm_prev = lm_addr;
6644 lm_addr = l_next;
6645 }
6646
6647 if (!header_done)
6648 {
6649 /* Empty list; terminate `<library-list-svr4'. */
6650 document += "/>";
6651 }
6652 else
6653 document += "</library-list-svr4>";
6654
6655 int document_len = document.length ();
6656 if (offset < document_len)
6657 document_len -= offset;
6658 else
6659 document_len = 0;
6660 if (len > document_len)
6661 len = document_len;
6662
6663 memcpy (readbuf, document.data () + offset, len);
6664
6665 return len;
6666 }
6667
6668 #ifdef HAVE_LINUX_BTRACE
6669
6670 btrace_target_info *
6671 linux_process_target::enable_btrace (thread_info *tp,
6672 const btrace_config *conf)
6673 {
6674 return linux_enable_btrace (tp->id, conf);
6675 }
6676
6677 /* See to_disable_btrace target method. */
6678
6679 int
6680 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6681 {
6682 enum btrace_error err;
6683
6684 err = linux_disable_btrace (tinfo);
6685 return (err == BTRACE_ERR_NONE ? 0 : -1);
6686 }
6687
6688 /* Encode an Intel Processor Trace configuration. */
6689
6690 static void
6691 linux_low_encode_pt_config (struct buffer *buffer,
6692 const struct btrace_data_pt_config *config)
6693 {
6694 buffer_grow_str (buffer, "<pt-config>\n");
6695
6696 switch (config->cpu.vendor)
6697 {
6698 case CV_INTEL:
6699 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6700 "model=\"%u\" stepping=\"%u\"/>\n",
6701 config->cpu.family, config->cpu.model,
6702 config->cpu.stepping);
6703 break;
6704
6705 default:
6706 break;
6707 }
6708
6709 buffer_grow_str (buffer, "</pt-config>\n");
6710 }
6711
6712 /* Encode a raw buffer. */
6713
6714 static void
6715 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6716 unsigned int size)
6717 {
6718 if (size == 0)
6719 return;
6720
6721 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6722 buffer_grow_str (buffer, "<raw>\n");
6723
6724 while (size-- > 0)
6725 {
6726 char elem[2];
6727
6728 elem[0] = tohex ((*data >> 4) & 0xf);
6729 elem[1] = tohex (*data++ & 0xf);
6730
6731 buffer_grow (buffer, elem, 2);
6732 }
6733
6734 buffer_grow_str (buffer, "</raw>\n");
6735 }
6736
6737 /* See to_read_btrace target method. */
6738
6739 int
6740 linux_process_target::read_btrace (btrace_target_info *tinfo,
6741 buffer *buffer,
6742 enum btrace_read_type type)
6743 {
6744 struct btrace_data btrace;
6745 enum btrace_error err;
6746
6747 err = linux_read_btrace (&btrace, tinfo, type);
6748 if (err != BTRACE_ERR_NONE)
6749 {
6750 if (err == BTRACE_ERR_OVERFLOW)
6751 buffer_grow_str0 (buffer, "E.Overflow.");
6752 else
6753 buffer_grow_str0 (buffer, "E.Generic Error.");
6754
6755 return -1;
6756 }
6757
6758 switch (btrace.format)
6759 {
6760 case BTRACE_FORMAT_NONE:
6761 buffer_grow_str0 (buffer, "E.No Trace.");
6762 return -1;
6763
6764 case BTRACE_FORMAT_BTS:
6765 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6766 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6767
6768 for (const btrace_block &block : *btrace.variant.bts.blocks)
6769 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6770 paddress (block.begin), paddress (block.end));
6771
6772 buffer_grow_str0 (buffer, "</btrace>\n");
6773 break;
6774
6775 case BTRACE_FORMAT_PT:
6776 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6777 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6778 buffer_grow_str (buffer, "<pt>\n");
6779
6780 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6781
6782 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6783 btrace.variant.pt.size);
6784
6785 buffer_grow_str (buffer, "</pt>\n");
6786 buffer_grow_str0 (buffer, "</btrace>\n");
6787 break;
6788
6789 default:
6790 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6791 return -1;
6792 }
6793
6794 return 0;
6795 }
6796
6797 /* See to_btrace_conf target method. */
6798
6799 int
6800 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
6801 buffer *buffer)
6802 {
6803 const struct btrace_config *conf;
6804
6805 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6806 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6807
6808 conf = linux_btrace_conf (tinfo);
6809 if (conf != NULL)
6810 {
6811 switch (conf->format)
6812 {
6813 case BTRACE_FORMAT_NONE:
6814 break;
6815
6816 case BTRACE_FORMAT_BTS:
6817 buffer_xml_printf (buffer, "<bts");
6818 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6819 buffer_xml_printf (buffer, " />\n");
6820 break;
6821
6822 case BTRACE_FORMAT_PT:
6823 buffer_xml_printf (buffer, "<pt");
6824 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6825 buffer_xml_printf (buffer, "/>\n");
6826 break;
6827 }
6828 }
6829
6830 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6831 return 0;
6832 }
6833 #endif /* HAVE_LINUX_BTRACE */
6834
6835 /* See nat/linux-nat.h. */
6836
6837 ptid_t
6838 current_lwp_ptid (void)
6839 {
6840 return ptid_of (current_thread);
6841 }
6842
6843 const char *
6844 linux_process_target::thread_name (ptid_t thread)
6845 {
6846 return linux_proc_tid_get_name (thread);
6847 }
6848
6849 #if USE_THREAD_DB
6850 bool
6851 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
6852 int *handle_len)
6853 {
6854 return thread_db_thread_handle (ptid, handle, handle_len);
6855 }
6856 #endif
6857
6858 thread_info *
6859 linux_process_target::thread_pending_parent (thread_info *thread)
6860 {
6861 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
6862
6863 if (parent == nullptr)
6864 return nullptr;
6865
6866 return get_lwp_thread (parent);
6867 }
6868
6869 thread_info *
6870 linux_process_target::thread_pending_child (thread_info *thread)
6871 {
6872 lwp_info *child = get_thread_lwp (thread)->pending_child ();
6873
6874 if (child == nullptr)
6875 return nullptr;
6876
6877 return get_lwp_thread (child);
6878 }
6879
6880 /* Default implementation of linux_target_ops method "set_pc" for
6881 32-bit pc register which is literally named "pc". */
6882
6883 void
6884 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
6885 {
6886 uint32_t newpc = pc;
6887
6888 supply_register_by_name (regcache, "pc", &newpc);
6889 }
6890
6891 /* Default implementation of linux_target_ops method "get_pc" for
6892 32-bit pc register which is literally named "pc". */
6893
6894 CORE_ADDR
6895 linux_get_pc_32bit (struct regcache *regcache)
6896 {
6897 uint32_t pc;
6898
6899 collect_register_by_name (regcache, "pc", &pc);
6900 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
6901 return pc;
6902 }
6903
6904 /* Default implementation of linux_target_ops method "set_pc" for
6905 64-bit pc register which is literally named "pc". */
6906
6907 void
6908 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
6909 {
6910 uint64_t newpc = pc;
6911
6912 supply_register_by_name (regcache, "pc", &newpc);
6913 }
6914
6915 /* Default implementation of linux_target_ops method "get_pc" for
6916 64-bit pc register which is literally named "pc". */
6917
6918 CORE_ADDR
6919 linux_get_pc_64bit (struct regcache *regcache)
6920 {
6921 uint64_t pc;
6922
6923 collect_register_by_name (regcache, "pc", &pc);
6924 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
6925 return pc;
6926 }
6927
6928 /* See linux-low.h. */
6929
6930 int
6931 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
6932 {
6933 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
6934 int offset = 0;
6935
6936 gdb_assert (wordsize == 4 || wordsize == 8);
6937
6938 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
6939 {
6940 if (wordsize == 4)
6941 {
6942 uint32_t *data_p = (uint32_t *) data;
6943 if (data_p[0] == match)
6944 {
6945 *valp = data_p[1];
6946 return 1;
6947 }
6948 }
6949 else
6950 {
6951 uint64_t *data_p = (uint64_t *) data;
6952 if (data_p[0] == match)
6953 {
6954 *valp = data_p[1];
6955 return 1;
6956 }
6957 }
6958
6959 offset += 2 * wordsize;
6960 }
6961
6962 return 0;
6963 }
6964
6965 /* See linux-low.h. */
6966
6967 CORE_ADDR
6968 linux_get_hwcap (int wordsize)
6969 {
6970 CORE_ADDR hwcap = 0;
6971 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
6972 return hwcap;
6973 }
6974
6975 /* See linux-low.h. */
6976
6977 CORE_ADDR
6978 linux_get_hwcap2 (int wordsize)
6979 {
6980 CORE_ADDR hwcap2 = 0;
6981 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
6982 return hwcap2;
6983 }
6984
6985 #ifdef HAVE_LINUX_REGSETS
6986 void
6987 initialize_regsets_info (struct regsets_info *info)
6988 {
6989 for (info->num_regsets = 0;
6990 info->regsets[info->num_regsets].size >= 0;
6991 info->num_regsets++)
6992 ;
6993 }
6994 #endif
6995
6996 void
6997 initialize_low (void)
6998 {
6999 struct sigaction sigchld_action;
7000
7001 memset (&sigchld_action, 0, sizeof (sigchld_action));
7002 set_target_ops (the_linux_target);
7003
7004 linux_ptrace_init_warnings ();
7005 linux_proc_init_warnings ();
7006
7007 sigchld_action.sa_handler = sigchld_handler;
7008 sigemptyset (&sigchld_action.sa_mask);
7009 sigchld_action.sa_flags = SA_RESTART;
7010 sigaction (SIGCHLD, &sigchld_action, NULL);
7011
7012 initialize_low_arch ();
7013
7014 linux_check_ptrace_features ();
7015 }