]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
Remove usage of find_inferior when calling linux_set_resume_request
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2017 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25 #include "signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #include "common-inferior.h"
51 #include "nat/fork-inferior.h"
52 #include "environ.h"
53 #ifndef ELFMAG0
54 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
55 then ELFMAG0 will have been defined. If it didn't get included by
56 gdb_proc_service.h then including it will likely introduce a duplicate
57 definition of elf_fpregset_t. */
58 #include <elf.h>
59 #endif
60 #include "nat/linux-namespaces.h"
61
62 #ifndef SPUFS_MAGIC
63 #define SPUFS_MAGIC 0x23c9b64e
64 #endif
65
66 #ifdef HAVE_PERSONALITY
67 # include <sys/personality.h>
68 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
69 # define ADDR_NO_RANDOMIZE 0x0040000
70 # endif
71 #endif
72
73 #ifndef O_LARGEFILE
74 #define O_LARGEFILE 0
75 #endif
76
77 /* Some targets did not define these ptrace constants from the start,
78 so gdbserver defines them locally here. In the future, these may
79 be removed after they are added to asm/ptrace.h. */
80 #if !(defined(PT_TEXT_ADDR) \
81 || defined(PT_DATA_ADDR) \
82 || defined(PT_TEXT_END_ADDR))
83 #if defined(__mcoldfire__)
84 /* These are still undefined in 3.10 kernels. */
85 #define PT_TEXT_ADDR 49*4
86 #define PT_DATA_ADDR 50*4
87 #define PT_TEXT_END_ADDR 51*4
88 /* BFIN already defines these since at least 2.6.32 kernels. */
89 #elif defined(BFIN)
90 #define PT_TEXT_ADDR 220
91 #define PT_TEXT_END_ADDR 224
92 #define PT_DATA_ADDR 228
93 /* These are still undefined in 3.10 kernels. */
94 #elif defined(__TMS320C6X__)
95 #define PT_TEXT_ADDR (0x10000*4)
96 #define PT_DATA_ADDR (0x10004*4)
97 #define PT_TEXT_END_ADDR (0x10008*4)
98 #endif
99 #endif
100
101 #ifdef HAVE_LINUX_BTRACE
102 # include "nat/linux-btrace.h"
103 # include "btrace-common.h"
104 #endif
105
106 #ifndef HAVE_ELF32_AUXV_T
107 /* Copied from glibc's elf.h. */
108 typedef struct
109 {
110 uint32_t a_type; /* Entry type */
111 union
112 {
113 uint32_t a_val; /* Integer value */
114 /* We use to have pointer elements added here. We cannot do that,
115 though, since it does not work when using 32-bit definitions
116 on 64-bit platforms and vice versa. */
117 } a_un;
118 } Elf32_auxv_t;
119 #endif
120
121 #ifndef HAVE_ELF64_AUXV_T
122 /* Copied from glibc's elf.h. */
123 typedef struct
124 {
125 uint64_t a_type; /* Entry type */
126 union
127 {
128 uint64_t a_val; /* Integer value */
129 /* We use to have pointer elements added here. We cannot do that,
130 though, since it does not work when using 32-bit definitions
131 on 64-bit platforms and vice versa. */
132 } a_un;
133 } Elf64_auxv_t;
134 #endif
135
136 /* Does the current host support PTRACE_GETREGSET? */
137 int have_ptrace_getregset = -1;
138
139 /* LWP accessors. */
140
141 /* See nat/linux-nat.h. */
142
143 ptid_t
144 ptid_of_lwp (struct lwp_info *lwp)
145 {
146 return ptid_of (get_lwp_thread (lwp));
147 }
148
149 /* See nat/linux-nat.h. */
150
151 void
152 lwp_set_arch_private_info (struct lwp_info *lwp,
153 struct arch_lwp_info *info)
154 {
155 lwp->arch_private = info;
156 }
157
158 /* See nat/linux-nat.h. */
159
160 struct arch_lwp_info *
161 lwp_arch_private_info (struct lwp_info *lwp)
162 {
163 return lwp->arch_private;
164 }
165
166 /* See nat/linux-nat.h. */
167
168 int
169 lwp_is_stopped (struct lwp_info *lwp)
170 {
171 return lwp->stopped;
172 }
173
174 /* See nat/linux-nat.h. */
175
176 enum target_stop_reason
177 lwp_stop_reason (struct lwp_info *lwp)
178 {
179 return lwp->stop_reason;
180 }
181
182 /* See nat/linux-nat.h. */
183
184 int
185 lwp_is_stepping (struct lwp_info *lwp)
186 {
187 return lwp->stepping;
188 }
189
190 /* A list of all unknown processes which receive stop signals. Some
191 other process will presumably claim each of these as forked
192 children momentarily. */
193
194 struct simple_pid_list
195 {
196 /* The process ID. */
197 int pid;
198
199 /* The status as reported by waitpid. */
200 int status;
201
202 /* Next in chain. */
203 struct simple_pid_list *next;
204 };
205 struct simple_pid_list *stopped_pids;
206
207 /* Trivial list manipulation functions to keep track of a list of new
208 stopped processes. */
209
210 static void
211 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
212 {
213 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
214
215 new_pid->pid = pid;
216 new_pid->status = status;
217 new_pid->next = *listp;
218 *listp = new_pid;
219 }
220
221 static int
222 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
223 {
224 struct simple_pid_list **p;
225
226 for (p = listp; *p != NULL; p = &(*p)->next)
227 if ((*p)->pid == pid)
228 {
229 struct simple_pid_list *next = (*p)->next;
230
231 *statusp = (*p)->status;
232 xfree (*p);
233 *p = next;
234 return 1;
235 }
236 return 0;
237 }
238
239 enum stopping_threads_kind
240 {
241 /* Not stopping threads presently. */
242 NOT_STOPPING_THREADS,
243
244 /* Stopping threads. */
245 STOPPING_THREADS,
246
247 /* Stopping and suspending threads. */
248 STOPPING_AND_SUSPENDING_THREADS
249 };
250
251 /* This is set while stop_all_lwps is in effect. */
252 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
253
254 /* FIXME make into a target method? */
255 int using_threads = 1;
256
257 /* True if we're presently stabilizing threads (moving them out of
258 jump pads). */
259 static int stabilizing_threads;
260
261 static void linux_resume_one_lwp (struct lwp_info *lwp,
262 int step, int signal, siginfo_t *info);
263 static void linux_resume (struct thread_resume *resume_info, size_t n);
264 static void stop_all_lwps (int suspend, struct lwp_info *except);
265 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
266 static void unsuspend_all_lwps (struct lwp_info *except);
267 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
268 int *wstat, int options);
269 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
270 static struct lwp_info *add_lwp (ptid_t ptid);
271 static void linux_mourn (struct process_info *process);
272 static int linux_stopped_by_watchpoint (void);
273 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
274 static int lwp_is_marked_dead (struct lwp_info *lwp);
275 static void proceed_all_lwps (void);
276 static int finish_step_over (struct lwp_info *lwp);
277 static int kill_lwp (unsigned long lwpid, int signo);
278 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
279 static void complete_ongoing_step_over (void);
280 static int linux_low_ptrace_options (int attached);
281 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
282 static int proceed_one_lwp (thread_info *thread, void *except);
283
284 /* When the event-loop is doing a step-over, this points at the thread
285 being stepped. */
286 ptid_t step_over_bkpt;
287
288 /* True if the low target can hardware single-step. */
289
290 static int
291 can_hardware_single_step (void)
292 {
293 if (the_low_target.supports_hardware_single_step != NULL)
294 return the_low_target.supports_hardware_single_step ();
295 else
296 return 0;
297 }
298
299 /* True if the low target can software single-step. Such targets
300 implement the GET_NEXT_PCS callback. */
301
302 static int
303 can_software_single_step (void)
304 {
305 return (the_low_target.get_next_pcs != NULL);
306 }
307
308 /* True if the low target supports memory breakpoints. If so, we'll
309 have a GET_PC implementation. */
310
311 static int
312 supports_breakpoints (void)
313 {
314 return (the_low_target.get_pc != NULL);
315 }
316
317 /* Returns true if this target can support fast tracepoints. This
318 does not mean that the in-process agent has been loaded in the
319 inferior. */
320
321 static int
322 supports_fast_tracepoints (void)
323 {
324 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
325 }
326
327 /* True if LWP is stopped in its stepping range. */
328
329 static int
330 lwp_in_step_range (struct lwp_info *lwp)
331 {
332 CORE_ADDR pc = lwp->stop_pc;
333
334 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
335 }
336
337 struct pending_signals
338 {
339 int signal;
340 siginfo_t info;
341 struct pending_signals *prev;
342 };
343
344 /* The read/write ends of the pipe registered as waitable file in the
345 event loop. */
346 static int linux_event_pipe[2] = { -1, -1 };
347
348 /* True if we're currently in async mode. */
349 #define target_is_async_p() (linux_event_pipe[0] != -1)
350
351 static void send_sigstop (struct lwp_info *lwp);
352 static void wait_for_sigstop (void);
353
354 /* Return non-zero if HEADER is a 64-bit ELF file. */
355
356 static int
357 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
358 {
359 if (header->e_ident[EI_MAG0] == ELFMAG0
360 && header->e_ident[EI_MAG1] == ELFMAG1
361 && header->e_ident[EI_MAG2] == ELFMAG2
362 && header->e_ident[EI_MAG3] == ELFMAG3)
363 {
364 *machine = header->e_machine;
365 return header->e_ident[EI_CLASS] == ELFCLASS64;
366
367 }
368 *machine = EM_NONE;
369 return -1;
370 }
371
372 /* Return non-zero if FILE is a 64-bit ELF file,
373 zero if the file is not a 64-bit ELF file,
374 and -1 if the file is not accessible or doesn't exist. */
375
376 static int
377 elf_64_file_p (const char *file, unsigned int *machine)
378 {
379 Elf64_Ehdr header;
380 int fd;
381
382 fd = open (file, O_RDONLY);
383 if (fd < 0)
384 return -1;
385
386 if (read (fd, &header, sizeof (header)) != sizeof (header))
387 {
388 close (fd);
389 return 0;
390 }
391 close (fd);
392
393 return elf_64_header_p (&header, machine);
394 }
395
396 /* Accepts an integer PID; Returns true if the executable PID is
397 running is a 64-bit ELF file.. */
398
399 int
400 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
401 {
402 char file[PATH_MAX];
403
404 sprintf (file, "/proc/%d/exe", pid);
405 return elf_64_file_p (file, machine);
406 }
407
408 static void
409 delete_lwp (struct lwp_info *lwp)
410 {
411 struct thread_info *thr = get_lwp_thread (lwp);
412
413 if (debug_threads)
414 debug_printf ("deleting %ld\n", lwpid_of (thr));
415
416 remove_thread (thr);
417
418 if (the_low_target.delete_thread != NULL)
419 the_low_target.delete_thread (lwp->arch_private);
420 else
421 gdb_assert (lwp->arch_private == NULL);
422
423 free (lwp);
424 }
425
426 /* Add a process to the common process list, and set its private
427 data. */
428
429 static struct process_info *
430 linux_add_process (int pid, int attached)
431 {
432 struct process_info *proc;
433
434 proc = add_process (pid, attached);
435 proc->priv = XCNEW (struct process_info_private);
436
437 if (the_low_target.new_process != NULL)
438 proc->priv->arch_private = the_low_target.new_process ();
439
440 return proc;
441 }
442
443 static CORE_ADDR get_pc (struct lwp_info *lwp);
444
445 /* Call the target arch_setup function on the current thread. */
446
447 static void
448 linux_arch_setup (void)
449 {
450 the_low_target.arch_setup ();
451 }
452
453 /* Call the target arch_setup function on THREAD. */
454
455 static void
456 linux_arch_setup_thread (struct thread_info *thread)
457 {
458 struct thread_info *saved_thread;
459
460 saved_thread = current_thread;
461 current_thread = thread;
462
463 linux_arch_setup ();
464
465 current_thread = saved_thread;
466 }
467
468 /* Handle a GNU/Linux extended wait response. If we see a clone,
469 fork, or vfork event, we need to add the new LWP to our list
470 (and return 0 so as not to report the trap to higher layers).
471 If we see an exec event, we will modify ORIG_EVENT_LWP to point
472 to a new LWP representing the new program. */
473
474 static int
475 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
476 {
477 struct lwp_info *event_lwp = *orig_event_lwp;
478 int event = linux_ptrace_get_extended_event (wstat);
479 struct thread_info *event_thr = get_lwp_thread (event_lwp);
480 struct lwp_info *new_lwp;
481
482 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
483
484 /* All extended events we currently use are mid-syscall. Only
485 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
486 you have to be using PTRACE_SEIZE to get that. */
487 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
488
489 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
490 || (event == PTRACE_EVENT_CLONE))
491 {
492 ptid_t ptid;
493 unsigned long new_pid;
494 int ret, status;
495
496 /* Get the pid of the new lwp. */
497 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
498 &new_pid);
499
500 /* If we haven't already seen the new PID stop, wait for it now. */
501 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
502 {
503 /* The new child has a pending SIGSTOP. We can't affect it until it
504 hits the SIGSTOP, but we're already attached. */
505
506 ret = my_waitpid (new_pid, &status, __WALL);
507
508 if (ret == -1)
509 perror_with_name ("waiting for new child");
510 else if (ret != new_pid)
511 warning ("wait returned unexpected PID %d", ret);
512 else if (!WIFSTOPPED (status))
513 warning ("wait returned unexpected status 0x%x", status);
514 }
515
516 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
517 {
518 struct process_info *parent_proc;
519 struct process_info *child_proc;
520 struct lwp_info *child_lwp;
521 struct thread_info *child_thr;
522 struct target_desc *tdesc;
523
524 ptid = ptid_build (new_pid, new_pid, 0);
525
526 if (debug_threads)
527 {
528 debug_printf ("HEW: Got fork event from LWP %ld, "
529 "new child is %d\n",
530 ptid_get_lwp (ptid_of (event_thr)),
531 ptid_get_pid (ptid));
532 }
533
534 /* Add the new process to the tables and clone the breakpoint
535 lists of the parent. We need to do this even if the new process
536 will be detached, since we will need the process object and the
537 breakpoints to remove any breakpoints from memory when we
538 detach, and the client side will access registers. */
539 child_proc = linux_add_process (new_pid, 0);
540 gdb_assert (child_proc != NULL);
541 child_lwp = add_lwp (ptid);
542 gdb_assert (child_lwp != NULL);
543 child_lwp->stopped = 1;
544 child_lwp->must_set_ptrace_flags = 1;
545 child_lwp->status_pending_p = 0;
546 child_thr = get_lwp_thread (child_lwp);
547 child_thr->last_resume_kind = resume_stop;
548 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
549
550 /* If we're suspending all threads, leave this one suspended
551 too. If the fork/clone parent is stepping over a breakpoint,
552 all other threads have been suspended already. Leave the
553 child suspended too. */
554 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
555 || event_lwp->bp_reinsert != 0)
556 {
557 if (debug_threads)
558 debug_printf ("HEW: leaving child suspended\n");
559 child_lwp->suspended = 1;
560 }
561
562 parent_proc = get_thread_process (event_thr);
563 child_proc->attached = parent_proc->attached;
564
565 if (event_lwp->bp_reinsert != 0
566 && can_software_single_step ()
567 && event == PTRACE_EVENT_VFORK)
568 {
569 /* If we leave single-step breakpoints there, child will
570 hit it, so uninsert single-step breakpoints from parent
571 (and child). Once vfork child is done, reinsert
572 them back to parent. */
573 uninsert_single_step_breakpoints (event_thr);
574 }
575
576 clone_all_breakpoints (child_thr, event_thr);
577
578 tdesc = allocate_target_description ();
579 copy_target_description (tdesc, parent_proc->tdesc);
580 child_proc->tdesc = tdesc;
581
582 /* Clone arch-specific process data. */
583 if (the_low_target.new_fork != NULL)
584 the_low_target.new_fork (parent_proc, child_proc);
585
586 /* Save fork info in the parent thread. */
587 if (event == PTRACE_EVENT_FORK)
588 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
589 else if (event == PTRACE_EVENT_VFORK)
590 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
591
592 event_lwp->waitstatus.value.related_pid = ptid;
593
594 /* The status_pending field contains bits denoting the
595 extended event, so when the pending event is handled,
596 the handler will look at lwp->waitstatus. */
597 event_lwp->status_pending_p = 1;
598 event_lwp->status_pending = wstat;
599
600 /* Link the threads until the parent event is passed on to
601 higher layers. */
602 event_lwp->fork_relative = child_lwp;
603 child_lwp->fork_relative = event_lwp;
604
605 /* If the parent thread is doing step-over with single-step
606 breakpoints, the list of single-step breakpoints are cloned
607 from the parent's. Remove them from the child process.
608 In case of vfork, we'll reinsert them back once vforked
609 child is done. */
610 if (event_lwp->bp_reinsert != 0
611 && can_software_single_step ())
612 {
613 /* The child process is forked and stopped, so it is safe
614 to access its memory without stopping all other threads
615 from other processes. */
616 delete_single_step_breakpoints (child_thr);
617
618 gdb_assert (has_single_step_breakpoints (event_thr));
619 gdb_assert (!has_single_step_breakpoints (child_thr));
620 }
621
622 /* Report the event. */
623 return 0;
624 }
625
626 if (debug_threads)
627 debug_printf ("HEW: Got clone event "
628 "from LWP %ld, new child is LWP %ld\n",
629 lwpid_of (event_thr), new_pid);
630
631 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
632 new_lwp = add_lwp (ptid);
633
634 /* Either we're going to immediately resume the new thread
635 or leave it stopped. linux_resume_one_lwp is a nop if it
636 thinks the thread is currently running, so set this first
637 before calling linux_resume_one_lwp. */
638 new_lwp->stopped = 1;
639
640 /* If we're suspending all threads, leave this one suspended
641 too. If the fork/clone parent is stepping over a breakpoint,
642 all other threads have been suspended already. Leave the
643 child suspended too. */
644 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
645 || event_lwp->bp_reinsert != 0)
646 new_lwp->suspended = 1;
647
648 /* Normally we will get the pending SIGSTOP. But in some cases
649 we might get another signal delivered to the group first.
650 If we do get another signal, be sure not to lose it. */
651 if (WSTOPSIG (status) != SIGSTOP)
652 {
653 new_lwp->stop_expected = 1;
654 new_lwp->status_pending_p = 1;
655 new_lwp->status_pending = status;
656 }
657 else if (report_thread_events)
658 {
659 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
660 new_lwp->status_pending_p = 1;
661 new_lwp->status_pending = status;
662 }
663
664 thread_db_notice_clone (event_thr, ptid);
665
666 /* Don't report the event. */
667 return 1;
668 }
669 else if (event == PTRACE_EVENT_VFORK_DONE)
670 {
671 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
672
673 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
674 {
675 reinsert_single_step_breakpoints (event_thr);
676
677 gdb_assert (has_single_step_breakpoints (event_thr));
678 }
679
680 /* Report the event. */
681 return 0;
682 }
683 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
684 {
685 struct process_info *proc;
686 std::vector<int> syscalls_to_catch;
687 ptid_t event_ptid;
688 pid_t event_pid;
689
690 if (debug_threads)
691 {
692 debug_printf ("HEW: Got exec event from LWP %ld\n",
693 lwpid_of (event_thr));
694 }
695
696 /* Get the event ptid. */
697 event_ptid = ptid_of (event_thr);
698 event_pid = ptid_get_pid (event_ptid);
699
700 /* Save the syscall list from the execing process. */
701 proc = get_thread_process (event_thr);
702 syscalls_to_catch = std::move (proc->syscalls_to_catch);
703
704 /* Delete the execing process and all its threads. */
705 linux_mourn (proc);
706 current_thread = NULL;
707
708 /* Create a new process/lwp/thread. */
709 proc = linux_add_process (event_pid, 0);
710 event_lwp = add_lwp (event_ptid);
711 event_thr = get_lwp_thread (event_lwp);
712 gdb_assert (current_thread == event_thr);
713 linux_arch_setup_thread (event_thr);
714
715 /* Set the event status. */
716 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
717 event_lwp->waitstatus.value.execd_pathname
718 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
719
720 /* Mark the exec status as pending. */
721 event_lwp->stopped = 1;
722 event_lwp->status_pending_p = 1;
723 event_lwp->status_pending = wstat;
724 event_thr->last_resume_kind = resume_continue;
725 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
726
727 /* Update syscall state in the new lwp, effectively mid-syscall too. */
728 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
729
730 /* Restore the list to catch. Don't rely on the client, which is free
731 to avoid sending a new list when the architecture doesn't change.
732 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
733 proc->syscalls_to_catch = std::move (syscalls_to_catch);
734
735 /* Report the event. */
736 *orig_event_lwp = event_lwp;
737 return 0;
738 }
739
740 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
741 }
742
743 /* Return the PC as read from the regcache of LWP, without any
744 adjustment. */
745
746 static CORE_ADDR
747 get_pc (struct lwp_info *lwp)
748 {
749 struct thread_info *saved_thread;
750 struct regcache *regcache;
751 CORE_ADDR pc;
752
753 if (the_low_target.get_pc == NULL)
754 return 0;
755
756 saved_thread = current_thread;
757 current_thread = get_lwp_thread (lwp);
758
759 regcache = get_thread_regcache (current_thread, 1);
760 pc = (*the_low_target.get_pc) (regcache);
761
762 if (debug_threads)
763 debug_printf ("pc is 0x%lx\n", (long) pc);
764
765 current_thread = saved_thread;
766 return pc;
767 }
768
769 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
770 Fill *SYSNO with the syscall nr trapped. */
771
772 static void
773 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
774 {
775 struct thread_info *saved_thread;
776 struct regcache *regcache;
777
778 if (the_low_target.get_syscall_trapinfo == NULL)
779 {
780 /* If we cannot get the syscall trapinfo, report an unknown
781 system call number. */
782 *sysno = UNKNOWN_SYSCALL;
783 return;
784 }
785
786 saved_thread = current_thread;
787 current_thread = get_lwp_thread (lwp);
788
789 regcache = get_thread_regcache (current_thread, 1);
790 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
791
792 if (debug_threads)
793 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
794
795 current_thread = saved_thread;
796 }
797
798 static int check_stopped_by_watchpoint (struct lwp_info *child);
799
800 /* Called when the LWP stopped for a signal/trap. If it stopped for a
801 trap check what caused it (breakpoint, watchpoint, trace, etc.),
802 and save the result in the LWP's stop_reason field. If it stopped
803 for a breakpoint, decrement the PC if necessary on the lwp's
804 architecture. Returns true if we now have the LWP's stop PC. */
805
806 static int
807 save_stop_reason (struct lwp_info *lwp)
808 {
809 CORE_ADDR pc;
810 CORE_ADDR sw_breakpoint_pc;
811 struct thread_info *saved_thread;
812 #if USE_SIGTRAP_SIGINFO
813 siginfo_t siginfo;
814 #endif
815
816 if (the_low_target.get_pc == NULL)
817 return 0;
818
819 pc = get_pc (lwp);
820 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
821
822 /* breakpoint_at reads from the current thread. */
823 saved_thread = current_thread;
824 current_thread = get_lwp_thread (lwp);
825
826 #if USE_SIGTRAP_SIGINFO
827 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
828 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
829 {
830 if (siginfo.si_signo == SIGTRAP)
831 {
832 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
833 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
834 {
835 /* The si_code is ambiguous on this arch -- check debug
836 registers. */
837 if (!check_stopped_by_watchpoint (lwp))
838 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
839 }
840 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
841 {
842 /* If we determine the LWP stopped for a SW breakpoint,
843 trust it. Particularly don't check watchpoint
844 registers, because at least on s390, we'd find
845 stopped-by-watchpoint as long as there's a watchpoint
846 set. */
847 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
848 }
849 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
850 {
851 /* This can indicate either a hardware breakpoint or
852 hardware watchpoint. Check debug registers. */
853 if (!check_stopped_by_watchpoint (lwp))
854 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
855 }
856 else if (siginfo.si_code == TRAP_TRACE)
857 {
858 /* We may have single stepped an instruction that
859 triggered a watchpoint. In that case, on some
860 architectures (such as x86), instead of TRAP_HWBKPT,
861 si_code indicates TRAP_TRACE, and we need to check
862 the debug registers separately. */
863 if (!check_stopped_by_watchpoint (lwp))
864 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
865 }
866 }
867 }
868 #else
869 /* We may have just stepped a breakpoint instruction. E.g., in
870 non-stop mode, GDB first tells the thread A to step a range, and
871 then the user inserts a breakpoint inside the range. In that
872 case we need to report the breakpoint PC. */
873 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
874 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
875 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
876
877 if (hardware_breakpoint_inserted_here (pc))
878 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
879
880 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
881 check_stopped_by_watchpoint (lwp);
882 #endif
883
884 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
885 {
886 if (debug_threads)
887 {
888 struct thread_info *thr = get_lwp_thread (lwp);
889
890 debug_printf ("CSBB: %s stopped by software breakpoint\n",
891 target_pid_to_str (ptid_of (thr)));
892 }
893
894 /* Back up the PC if necessary. */
895 if (pc != sw_breakpoint_pc)
896 {
897 struct regcache *regcache
898 = get_thread_regcache (current_thread, 1);
899 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
900 }
901
902 /* Update this so we record the correct stop PC below. */
903 pc = sw_breakpoint_pc;
904 }
905 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
906 {
907 if (debug_threads)
908 {
909 struct thread_info *thr = get_lwp_thread (lwp);
910
911 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
912 target_pid_to_str (ptid_of (thr)));
913 }
914 }
915 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
916 {
917 if (debug_threads)
918 {
919 struct thread_info *thr = get_lwp_thread (lwp);
920
921 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
922 target_pid_to_str (ptid_of (thr)));
923 }
924 }
925 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
926 {
927 if (debug_threads)
928 {
929 struct thread_info *thr = get_lwp_thread (lwp);
930
931 debug_printf ("CSBB: %s stopped by trace\n",
932 target_pid_to_str (ptid_of (thr)));
933 }
934 }
935
936 lwp->stop_pc = pc;
937 current_thread = saved_thread;
938 return 1;
939 }
940
941 static struct lwp_info *
942 add_lwp (ptid_t ptid)
943 {
944 struct lwp_info *lwp;
945
946 lwp = XCNEW (struct lwp_info);
947
948 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
949
950 if (the_low_target.new_thread != NULL)
951 the_low_target.new_thread (lwp);
952
953 lwp->thread = add_thread (ptid, lwp);
954
955 return lwp;
956 }
957
958 /* Callback to be used when calling fork_inferior, responsible for
959 actually initiating the tracing of the inferior. */
960
961 static void
962 linux_ptrace_fun ()
963 {
964 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
965 (PTRACE_TYPE_ARG4) 0) < 0)
966 trace_start_error_with_name ("ptrace");
967
968 if (setpgid (0, 0) < 0)
969 trace_start_error_with_name ("setpgid");
970
971 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
972 stdout to stderr so that inferior i/o doesn't corrupt the connection.
973 Also, redirect stdin to /dev/null. */
974 if (remote_connection_is_stdio ())
975 {
976 if (close (0) < 0)
977 trace_start_error_with_name ("close");
978 if (open ("/dev/null", O_RDONLY) < 0)
979 trace_start_error_with_name ("open");
980 if (dup2 (2, 1) < 0)
981 trace_start_error_with_name ("dup2");
982 if (write (2, "stdin/stdout redirected\n",
983 sizeof ("stdin/stdout redirected\n") - 1) < 0)
984 {
985 /* Errors ignored. */;
986 }
987 }
988 }
989
990 /* Start an inferior process and returns its pid.
991 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
992 are its arguments. */
993
994 static int
995 linux_create_inferior (const char *program,
996 const std::vector<char *> &program_args)
997 {
998 struct lwp_info *new_lwp;
999 int pid;
1000 ptid_t ptid;
1001 struct cleanup *restore_personality
1002 = maybe_disable_address_space_randomization (disable_randomization);
1003 std::string str_program_args = stringify_argv (program_args);
1004
1005 pid = fork_inferior (program,
1006 str_program_args.c_str (),
1007 get_environ ()->envp (), linux_ptrace_fun,
1008 NULL, NULL, NULL, NULL);
1009
1010 do_cleanups (restore_personality);
1011
1012 linux_add_process (pid, 0);
1013
1014 ptid = ptid_build (pid, pid, 0);
1015 new_lwp = add_lwp (ptid);
1016 new_lwp->must_set_ptrace_flags = 1;
1017
1018 post_fork_inferior (pid, program);
1019
1020 return pid;
1021 }
1022
1023 /* Implement the post_create_inferior target_ops method. */
1024
1025 static void
1026 linux_post_create_inferior (void)
1027 {
1028 struct lwp_info *lwp = get_thread_lwp (current_thread);
1029
1030 linux_arch_setup ();
1031
1032 if (lwp->must_set_ptrace_flags)
1033 {
1034 struct process_info *proc = current_process ();
1035 int options = linux_low_ptrace_options (proc->attached);
1036
1037 linux_enable_event_reporting (lwpid_of (current_thread), options);
1038 lwp->must_set_ptrace_flags = 0;
1039 }
1040 }
1041
1042 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1043 error. */
1044
1045 int
1046 linux_attach_lwp (ptid_t ptid)
1047 {
1048 struct lwp_info *new_lwp;
1049 int lwpid = ptid_get_lwp (ptid);
1050
1051 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1052 != 0)
1053 return errno;
1054
1055 new_lwp = add_lwp (ptid);
1056
1057 /* We need to wait for SIGSTOP before being able to make the next
1058 ptrace call on this LWP. */
1059 new_lwp->must_set_ptrace_flags = 1;
1060
1061 if (linux_proc_pid_is_stopped (lwpid))
1062 {
1063 if (debug_threads)
1064 debug_printf ("Attached to a stopped process\n");
1065
1066 /* The process is definitely stopped. It is in a job control
1067 stop, unless the kernel predates the TASK_STOPPED /
1068 TASK_TRACED distinction, in which case it might be in a
1069 ptrace stop. Make sure it is in a ptrace stop; from there we
1070 can kill it, signal it, et cetera.
1071
1072 First make sure there is a pending SIGSTOP. Since we are
1073 already attached, the process can not transition from stopped
1074 to running without a PTRACE_CONT; so we know this signal will
1075 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1076 probably already in the queue (unless this kernel is old
1077 enough to use TASK_STOPPED for ptrace stops); but since
1078 SIGSTOP is not an RT signal, it can only be queued once. */
1079 kill_lwp (lwpid, SIGSTOP);
1080
1081 /* Finally, resume the stopped process. This will deliver the
1082 SIGSTOP (or a higher priority signal, just like normal
1083 PTRACE_ATTACH), which we'll catch later on. */
1084 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1085 }
1086
1087 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1088 brings it to a halt.
1089
1090 There are several cases to consider here:
1091
1092 1) gdbserver has already attached to the process and is being notified
1093 of a new thread that is being created.
1094 In this case we should ignore that SIGSTOP and resume the
1095 process. This is handled below by setting stop_expected = 1,
1096 and the fact that add_thread sets last_resume_kind ==
1097 resume_continue.
1098
1099 2) This is the first thread (the process thread), and we're attaching
1100 to it via attach_inferior.
1101 In this case we want the process thread to stop.
1102 This is handled by having linux_attach set last_resume_kind ==
1103 resume_stop after we return.
1104
1105 If the pid we are attaching to is also the tgid, we attach to and
1106 stop all the existing threads. Otherwise, we attach to pid and
1107 ignore any other threads in the same group as this pid.
1108
1109 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1110 existing threads.
1111 In this case we want the thread to stop.
1112 FIXME: This case is currently not properly handled.
1113 We should wait for the SIGSTOP but don't. Things work apparently
1114 because enough time passes between when we ptrace (ATTACH) and when
1115 gdb makes the next ptrace call on the thread.
1116
1117 On the other hand, if we are currently trying to stop all threads, we
1118 should treat the new thread as if we had sent it a SIGSTOP. This works
1119 because we are guaranteed that the add_lwp call above added us to the
1120 end of the list, and so the new thread has not yet reached
1121 wait_for_sigstop (but will). */
1122 new_lwp->stop_expected = 1;
1123
1124 return 0;
1125 }
1126
1127 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1128 already attached. Returns true if a new LWP is found, false
1129 otherwise. */
1130
1131 static int
1132 attach_proc_task_lwp_callback (ptid_t ptid)
1133 {
1134 /* Is this a new thread? */
1135 if (find_thread_ptid (ptid) == NULL)
1136 {
1137 int lwpid = ptid_get_lwp (ptid);
1138 int err;
1139
1140 if (debug_threads)
1141 debug_printf ("Found new lwp %d\n", lwpid);
1142
1143 err = linux_attach_lwp (ptid);
1144
1145 /* Be quiet if we simply raced with the thread exiting. EPERM
1146 is returned if the thread's task still exists, and is marked
1147 as exited or zombie, as well as other conditions, so in that
1148 case, confirm the status in /proc/PID/status. */
1149 if (err == ESRCH
1150 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1151 {
1152 if (debug_threads)
1153 {
1154 debug_printf ("Cannot attach to lwp %d: "
1155 "thread is gone (%d: %s)\n",
1156 lwpid, err, strerror (err));
1157 }
1158 }
1159 else if (err != 0)
1160 {
1161 warning (_("Cannot attach to lwp %d: %s"),
1162 lwpid,
1163 linux_ptrace_attach_fail_reason_string (ptid, err));
1164 }
1165
1166 return 1;
1167 }
1168 return 0;
1169 }
1170
1171 static void async_file_mark (void);
1172
1173 /* Attach to PID. If PID is the tgid, attach to it and all
1174 of its threads. */
1175
1176 static int
1177 linux_attach (unsigned long pid)
1178 {
1179 struct process_info *proc;
1180 struct thread_info *initial_thread;
1181 ptid_t ptid = ptid_build (pid, pid, 0);
1182 int err;
1183
1184 /* Attach to PID. We will check for other threads
1185 soon. */
1186 err = linux_attach_lwp (ptid);
1187 if (err != 0)
1188 error ("Cannot attach to process %ld: %s",
1189 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1190
1191 proc = linux_add_process (pid, 1);
1192
1193 /* Don't ignore the initial SIGSTOP if we just attached to this
1194 process. It will be collected by wait shortly. */
1195 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1196 initial_thread->last_resume_kind = resume_stop;
1197
1198 /* We must attach to every LWP. If /proc is mounted, use that to
1199 find them now. On the one hand, the inferior may be using raw
1200 clone instead of using pthreads. On the other hand, even if it
1201 is using pthreads, GDB may not be connected yet (thread_db needs
1202 to do symbol lookups, through qSymbol). Also, thread_db walks
1203 structures in the inferior's address space to find the list of
1204 threads/LWPs, and those structures may well be corrupted. Note
1205 that once thread_db is loaded, we'll still use it to list threads
1206 and associate pthread info with each LWP. */
1207 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1208
1209 /* GDB will shortly read the xml target description for this
1210 process, to figure out the process' architecture. But the target
1211 description is only filled in when the first process/thread in
1212 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1213 that now, otherwise, if GDB is fast enough, it could read the
1214 target description _before_ that initial stop. */
1215 if (non_stop)
1216 {
1217 struct lwp_info *lwp;
1218 int wstat, lwpid;
1219 ptid_t pid_ptid = pid_to_ptid (pid);
1220
1221 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1222 &wstat, __WALL);
1223 gdb_assert (lwpid > 0);
1224
1225 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1226
1227 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1228 {
1229 lwp->status_pending_p = 1;
1230 lwp->status_pending = wstat;
1231 }
1232
1233 initial_thread->last_resume_kind = resume_continue;
1234
1235 async_file_mark ();
1236
1237 gdb_assert (proc->tdesc != NULL);
1238 }
1239
1240 return 0;
1241 }
1242
1243 struct counter
1244 {
1245 int pid;
1246 int count;
1247 };
1248
1249 static int
1250 second_thread_of_pid_p (thread_info *thread, void *args)
1251 {
1252 struct counter *counter = (struct counter *) args;
1253
1254 if (thread->id.pid () == counter->pid)
1255 {
1256 if (++counter->count > 1)
1257 return 1;
1258 }
1259
1260 return 0;
1261 }
1262
1263 static int
1264 last_thread_of_process_p (int pid)
1265 {
1266 struct counter counter = { pid , 0 };
1267
1268 return (find_inferior (&all_threads,
1269 second_thread_of_pid_p, &counter) == NULL);
1270 }
1271
1272 /* Kill LWP. */
1273
1274 static void
1275 linux_kill_one_lwp (struct lwp_info *lwp)
1276 {
1277 struct thread_info *thr = get_lwp_thread (lwp);
1278 int pid = lwpid_of (thr);
1279
1280 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1281 there is no signal context, and ptrace(PTRACE_KILL) (or
1282 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1283 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1284 alternative is to kill with SIGKILL. We only need one SIGKILL
1285 per process, not one for each thread. But since we still support
1286 support debugging programs using raw clone without CLONE_THREAD,
1287 we send one for each thread. For years, we used PTRACE_KILL
1288 only, so we're being a bit paranoid about some old kernels where
1289 PTRACE_KILL might work better (dubious if there are any such, but
1290 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1291 second, and so we're fine everywhere. */
1292
1293 errno = 0;
1294 kill_lwp (pid, SIGKILL);
1295 if (debug_threads)
1296 {
1297 int save_errno = errno;
1298
1299 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1300 target_pid_to_str (ptid_of (thr)),
1301 save_errno ? strerror (save_errno) : "OK");
1302 }
1303
1304 errno = 0;
1305 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1306 if (debug_threads)
1307 {
1308 int save_errno = errno;
1309
1310 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1311 target_pid_to_str (ptid_of (thr)),
1312 save_errno ? strerror (save_errno) : "OK");
1313 }
1314 }
1315
1316 /* Kill LWP and wait for it to die. */
1317
1318 static void
1319 kill_wait_lwp (struct lwp_info *lwp)
1320 {
1321 struct thread_info *thr = get_lwp_thread (lwp);
1322 int pid = ptid_get_pid (ptid_of (thr));
1323 int lwpid = ptid_get_lwp (ptid_of (thr));
1324 int wstat;
1325 int res;
1326
1327 if (debug_threads)
1328 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1329
1330 do
1331 {
1332 linux_kill_one_lwp (lwp);
1333
1334 /* Make sure it died. Notes:
1335
1336 - The loop is most likely unnecessary.
1337
1338 - We don't use linux_wait_for_event as that could delete lwps
1339 while we're iterating over them. We're not interested in
1340 any pending status at this point, only in making sure all
1341 wait status on the kernel side are collected until the
1342 process is reaped.
1343
1344 - We don't use __WALL here as the __WALL emulation relies on
1345 SIGCHLD, and killing a stopped process doesn't generate
1346 one, nor an exit status.
1347 */
1348 res = my_waitpid (lwpid, &wstat, 0);
1349 if (res == -1 && errno == ECHILD)
1350 res = my_waitpid (lwpid, &wstat, __WCLONE);
1351 } while (res > 0 && WIFSTOPPED (wstat));
1352
1353 /* Even if it was stopped, the child may have already disappeared.
1354 E.g., if it was killed by SIGKILL. */
1355 if (res < 0 && errno != ECHILD)
1356 perror_with_name ("kill_wait_lwp");
1357 }
1358
1359 /* Callback for `find_inferior'. Kills an lwp of a given process,
1360 except the leader. */
1361
1362 static int
1363 kill_one_lwp_callback (thread_info *thread, void *args)
1364 {
1365 struct lwp_info *lwp = get_thread_lwp (thread);
1366 int pid = * (int *) args;
1367
1368 if (thread->id.pid () != pid)
1369 return 0;
1370
1371 /* We avoid killing the first thread here, because of a Linux kernel (at
1372 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1373 the children get a chance to be reaped, it will remain a zombie
1374 forever. */
1375
1376 if (lwpid_of (thread) == pid)
1377 {
1378 if (debug_threads)
1379 debug_printf ("lkop: is last of process %s\n",
1380 target_pid_to_str (thread->id));
1381 return 0;
1382 }
1383
1384 kill_wait_lwp (lwp);
1385 return 0;
1386 }
1387
1388 static int
1389 linux_kill (int pid)
1390 {
1391 struct process_info *process;
1392 struct lwp_info *lwp;
1393
1394 process = find_process_pid (pid);
1395 if (process == NULL)
1396 return -1;
1397
1398 /* If we're killing a running inferior, make sure it is stopped
1399 first, as PTRACE_KILL will not work otherwise. */
1400 stop_all_lwps (0, NULL);
1401
1402 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1403
1404 /* See the comment in linux_kill_one_lwp. We did not kill the first
1405 thread in the list, so do so now. */
1406 lwp = find_lwp_pid (pid_to_ptid (pid));
1407
1408 if (lwp == NULL)
1409 {
1410 if (debug_threads)
1411 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1412 pid);
1413 }
1414 else
1415 kill_wait_lwp (lwp);
1416
1417 the_target->mourn (process);
1418
1419 /* Since we presently can only stop all lwps of all processes, we
1420 need to unstop lwps of other processes. */
1421 unstop_all_lwps (0, NULL);
1422 return 0;
1423 }
1424
1425 /* Get pending signal of THREAD, for detaching purposes. This is the
1426 signal the thread last stopped for, which we need to deliver to the
1427 thread when detaching, otherwise, it'd be suppressed/lost. */
1428
1429 static int
1430 get_detach_signal (struct thread_info *thread)
1431 {
1432 enum gdb_signal signo = GDB_SIGNAL_0;
1433 int status;
1434 struct lwp_info *lp = get_thread_lwp (thread);
1435
1436 if (lp->status_pending_p)
1437 status = lp->status_pending;
1438 else
1439 {
1440 /* If the thread had been suspended by gdbserver, and it stopped
1441 cleanly, then it'll have stopped with SIGSTOP. But we don't
1442 want to deliver that SIGSTOP. */
1443 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1444 || thread->last_status.value.sig == GDB_SIGNAL_0)
1445 return 0;
1446
1447 /* Otherwise, we may need to deliver the signal we
1448 intercepted. */
1449 status = lp->last_status;
1450 }
1451
1452 if (!WIFSTOPPED (status))
1453 {
1454 if (debug_threads)
1455 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1456 target_pid_to_str (ptid_of (thread)));
1457 return 0;
1458 }
1459
1460 /* Extended wait statuses aren't real SIGTRAPs. */
1461 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1462 {
1463 if (debug_threads)
1464 debug_printf ("GPS: lwp %s had stopped with extended "
1465 "status: no pending signal\n",
1466 target_pid_to_str (ptid_of (thread)));
1467 return 0;
1468 }
1469
1470 signo = gdb_signal_from_host (WSTOPSIG (status));
1471
1472 if (program_signals_p && !program_signals[signo])
1473 {
1474 if (debug_threads)
1475 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1476 target_pid_to_str (ptid_of (thread)),
1477 gdb_signal_to_string (signo));
1478 return 0;
1479 }
1480 else if (!program_signals_p
1481 /* If we have no way to know which signals GDB does not
1482 want to have passed to the program, assume
1483 SIGTRAP/SIGINT, which is GDB's default. */
1484 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1485 {
1486 if (debug_threads)
1487 debug_printf ("GPS: lwp %s had signal %s, "
1488 "but we don't know if we should pass it. "
1489 "Default to not.\n",
1490 target_pid_to_str (ptid_of (thread)),
1491 gdb_signal_to_string (signo));
1492 return 0;
1493 }
1494 else
1495 {
1496 if (debug_threads)
1497 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1498 target_pid_to_str (ptid_of (thread)),
1499 gdb_signal_to_string (signo));
1500
1501 return WSTOPSIG (status);
1502 }
1503 }
1504
1505 /* Detach from LWP. */
1506
1507 static void
1508 linux_detach_one_lwp (struct lwp_info *lwp)
1509 {
1510 struct thread_info *thread = get_lwp_thread (lwp);
1511 int sig;
1512 int lwpid;
1513
1514 /* If there is a pending SIGSTOP, get rid of it. */
1515 if (lwp->stop_expected)
1516 {
1517 if (debug_threads)
1518 debug_printf ("Sending SIGCONT to %s\n",
1519 target_pid_to_str (ptid_of (thread)));
1520
1521 kill_lwp (lwpid_of (thread), SIGCONT);
1522 lwp->stop_expected = 0;
1523 }
1524
1525 /* Pass on any pending signal for this thread. */
1526 sig = get_detach_signal (thread);
1527
1528 /* Preparing to resume may try to write registers, and fail if the
1529 lwp is zombie. If that happens, ignore the error. We'll handle
1530 it below, when detach fails with ESRCH. */
1531 TRY
1532 {
1533 /* Flush any pending changes to the process's registers. */
1534 regcache_invalidate_thread (thread);
1535
1536 /* Finally, let it resume. */
1537 if (the_low_target.prepare_to_resume != NULL)
1538 the_low_target.prepare_to_resume (lwp);
1539 }
1540 CATCH (ex, RETURN_MASK_ERROR)
1541 {
1542 if (!check_ptrace_stopped_lwp_gone (lwp))
1543 throw_exception (ex);
1544 }
1545 END_CATCH
1546
1547 lwpid = lwpid_of (thread);
1548 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1549 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1550 {
1551 int save_errno = errno;
1552
1553 /* We know the thread exists, so ESRCH must mean the lwp is
1554 zombie. This can happen if one of the already-detached
1555 threads exits the whole thread group. In that case we're
1556 still attached, and must reap the lwp. */
1557 if (save_errno == ESRCH)
1558 {
1559 int ret, status;
1560
1561 ret = my_waitpid (lwpid, &status, __WALL);
1562 if (ret == -1)
1563 {
1564 warning (_("Couldn't reap LWP %d while detaching: %s"),
1565 lwpid, strerror (errno));
1566 }
1567 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1568 {
1569 warning (_("Reaping LWP %d while detaching "
1570 "returned unexpected status 0x%x"),
1571 lwpid, status);
1572 }
1573 }
1574 else
1575 {
1576 error (_("Can't detach %s: %s"),
1577 target_pid_to_str (ptid_of (thread)),
1578 strerror (save_errno));
1579 }
1580 }
1581 else if (debug_threads)
1582 {
1583 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1584 target_pid_to_str (ptid_of (thread)),
1585 strsignal (sig));
1586 }
1587
1588 delete_lwp (lwp);
1589 }
1590
1591 /* Callback for find_inferior. Detaches from non-leader threads of a
1592 given process. */
1593
1594 static int
1595 linux_detach_lwp_callback (thread_info *thread, void *args)
1596 {
1597 struct lwp_info *lwp = get_thread_lwp (thread);
1598 int pid = *(int *) args;
1599 int lwpid = lwpid_of (thread);
1600
1601 /* Skip other processes. */
1602 if (thread->id.pid () != pid)
1603 return 0;
1604
1605 /* We don't actually detach from the thread group leader just yet.
1606 If the thread group exits, we must reap the zombie clone lwps
1607 before we're able to reap the leader. */
1608 if (thread->id.pid () == lwpid)
1609 return 0;
1610
1611 linux_detach_one_lwp (lwp);
1612 return 0;
1613 }
1614
1615 static int
1616 linux_detach (int pid)
1617 {
1618 struct process_info *process;
1619 struct lwp_info *main_lwp;
1620
1621 process = find_process_pid (pid);
1622 if (process == NULL)
1623 return -1;
1624
1625 /* As there's a step over already in progress, let it finish first,
1626 otherwise nesting a stabilize_threads operation on top gets real
1627 messy. */
1628 complete_ongoing_step_over ();
1629
1630 /* Stop all threads before detaching. First, ptrace requires that
1631 the thread is stopped to sucessfully detach. Second, thread_db
1632 may need to uninstall thread event breakpoints from memory, which
1633 only works with a stopped process anyway. */
1634 stop_all_lwps (0, NULL);
1635
1636 #ifdef USE_THREAD_DB
1637 thread_db_detach (process);
1638 #endif
1639
1640 /* Stabilize threads (move out of jump pads). */
1641 stabilize_threads ();
1642
1643 /* Detach from the clone lwps first. If the thread group exits just
1644 while we're detaching, we must reap the clone lwps before we're
1645 able to reap the leader. */
1646 find_inferior (&all_threads, linux_detach_lwp_callback, &pid);
1647
1648 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1649 linux_detach_one_lwp (main_lwp);
1650
1651 the_target->mourn (process);
1652
1653 /* Since we presently can only stop all lwps of all processes, we
1654 need to unstop lwps of other processes. */
1655 unstop_all_lwps (0, NULL);
1656 return 0;
1657 }
1658
1659 /* Remove all LWPs that belong to process PROC from the lwp list. */
1660
1661 static int
1662 delete_lwp_callback (thread_info *thread, void *proc)
1663 {
1664 struct lwp_info *lwp = get_thread_lwp (thread);
1665 struct process_info *process = (struct process_info *) proc;
1666
1667 if (pid_of (thread) == pid_of (process))
1668 delete_lwp (lwp);
1669
1670 return 0;
1671 }
1672
1673 static void
1674 linux_mourn (struct process_info *process)
1675 {
1676 struct process_info_private *priv;
1677
1678 #ifdef USE_THREAD_DB
1679 thread_db_mourn (process);
1680 #endif
1681
1682 find_inferior (&all_threads, delete_lwp_callback, process);
1683
1684 /* Freeing all private data. */
1685 priv = process->priv;
1686 if (the_low_target.delete_process != NULL)
1687 the_low_target.delete_process (priv->arch_private);
1688 else
1689 gdb_assert (priv->arch_private == NULL);
1690 free (priv);
1691 process->priv = NULL;
1692
1693 remove_process (process);
1694 }
1695
1696 static void
1697 linux_join (int pid)
1698 {
1699 int status, ret;
1700
1701 do {
1702 ret = my_waitpid (pid, &status, 0);
1703 if (WIFEXITED (status) || WIFSIGNALED (status))
1704 break;
1705 } while (ret != -1 || errno != ECHILD);
1706 }
1707
1708 /* Return nonzero if the given thread is still alive. */
1709 static int
1710 linux_thread_alive (ptid_t ptid)
1711 {
1712 struct lwp_info *lwp = find_lwp_pid (ptid);
1713
1714 /* We assume we always know if a thread exits. If a whole process
1715 exited but we still haven't been able to report it to GDB, we'll
1716 hold on to the last lwp of the dead process. */
1717 if (lwp != NULL)
1718 return !lwp_is_marked_dead (lwp);
1719 else
1720 return 0;
1721 }
1722
1723 /* Return 1 if this lwp still has an interesting status pending. If
1724 not (e.g., it had stopped for a breakpoint that is gone), return
1725 false. */
1726
1727 static int
1728 thread_still_has_status_pending_p (struct thread_info *thread)
1729 {
1730 struct lwp_info *lp = get_thread_lwp (thread);
1731
1732 if (!lp->status_pending_p)
1733 return 0;
1734
1735 if (thread->last_resume_kind != resume_stop
1736 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1737 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1738 {
1739 struct thread_info *saved_thread;
1740 CORE_ADDR pc;
1741 int discard = 0;
1742
1743 gdb_assert (lp->last_status != 0);
1744
1745 pc = get_pc (lp);
1746
1747 saved_thread = current_thread;
1748 current_thread = thread;
1749
1750 if (pc != lp->stop_pc)
1751 {
1752 if (debug_threads)
1753 debug_printf ("PC of %ld changed\n",
1754 lwpid_of (thread));
1755 discard = 1;
1756 }
1757
1758 #if !USE_SIGTRAP_SIGINFO
1759 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1760 && !(*the_low_target.breakpoint_at) (pc))
1761 {
1762 if (debug_threads)
1763 debug_printf ("previous SW breakpoint of %ld gone\n",
1764 lwpid_of (thread));
1765 discard = 1;
1766 }
1767 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1768 && !hardware_breakpoint_inserted_here (pc))
1769 {
1770 if (debug_threads)
1771 debug_printf ("previous HW breakpoint of %ld gone\n",
1772 lwpid_of (thread));
1773 discard = 1;
1774 }
1775 #endif
1776
1777 current_thread = saved_thread;
1778
1779 if (discard)
1780 {
1781 if (debug_threads)
1782 debug_printf ("discarding pending breakpoint status\n");
1783 lp->status_pending_p = 0;
1784 return 0;
1785 }
1786 }
1787
1788 return 1;
1789 }
1790
1791 /* Returns true if LWP is resumed from the client's perspective. */
1792
1793 static int
1794 lwp_resumed (struct lwp_info *lwp)
1795 {
1796 struct thread_info *thread = get_lwp_thread (lwp);
1797
1798 if (thread->last_resume_kind != resume_stop)
1799 return 1;
1800
1801 /* Did gdb send us a `vCont;t', but we haven't reported the
1802 corresponding stop to gdb yet? If so, the thread is still
1803 resumed/running from gdb's perspective. */
1804 if (thread->last_resume_kind == resume_stop
1805 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1806 return 1;
1807
1808 return 0;
1809 }
1810
1811 /* Return 1 if this lwp has an interesting status pending. */
1812 static int
1813 status_pending_p_callback (thread_info *thread, void *arg)
1814 {
1815 struct lwp_info *lp = get_thread_lwp (thread);
1816 ptid_t ptid = * (ptid_t *) arg;
1817
1818 /* Check if we're only interested in events from a specific process
1819 or a specific LWP. */
1820 if (!ptid_match (ptid_of (thread), ptid))
1821 return 0;
1822
1823 if (!lwp_resumed (lp))
1824 return 0;
1825
1826 if (lp->status_pending_p
1827 && !thread_still_has_status_pending_p (thread))
1828 {
1829 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1830 return 0;
1831 }
1832
1833 return lp->status_pending_p;
1834 }
1835
1836 static int
1837 same_lwp (thread_info *thread, void *data)
1838 {
1839 ptid_t ptid = *(ptid_t *) data;
1840 int lwp;
1841
1842 if (ptid_get_lwp (ptid) != 0)
1843 lwp = ptid_get_lwp (ptid);
1844 else
1845 lwp = ptid_get_pid (ptid);
1846
1847 if (thread->id.lwp () == lwp)
1848 return 1;
1849
1850 return 0;
1851 }
1852
1853 struct lwp_info *
1854 find_lwp_pid (ptid_t ptid)
1855 {
1856 thread_info *thread = find_inferior (&all_threads, same_lwp, &ptid);
1857
1858 if (thread == NULL)
1859 return NULL;
1860
1861 return get_thread_lwp (thread);
1862 }
1863
1864 /* Return the number of known LWPs in the tgid given by PID. */
1865
1866 static int
1867 num_lwps (int pid)
1868 {
1869 int count = 0;
1870
1871 for_each_thread (pid, [&] (thread_info *thread)
1872 {
1873 count++;
1874 });
1875
1876 return count;
1877 }
1878
1879 /* See nat/linux-nat.h. */
1880
1881 struct lwp_info *
1882 iterate_over_lwps (ptid_t filter,
1883 iterate_over_lwps_ftype callback,
1884 void *data)
1885 {
1886 thread_info *thread = find_thread (filter, [&] (thread_info *thread)
1887 {
1888 lwp_info *lwp = get_thread_lwp (thread);
1889
1890 return callback (lwp, data);
1891 });
1892
1893 if (thread == NULL)
1894 return NULL;
1895
1896 return get_thread_lwp (thread);
1897 }
1898
1899 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1900 their exits until all other threads in the group have exited. */
1901
1902 static void
1903 check_zombie_leaders (void)
1904 {
1905 for_each_process ([] (process_info *proc) {
1906 pid_t leader_pid = pid_of (proc);
1907 struct lwp_info *leader_lp;
1908
1909 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1910
1911 if (debug_threads)
1912 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1913 "num_lwps=%d, zombie=%d\n",
1914 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1915 linux_proc_pid_is_zombie (leader_pid));
1916
1917 if (leader_lp != NULL && !leader_lp->stopped
1918 /* Check if there are other threads in the group, as we may
1919 have raced with the inferior simply exiting. */
1920 && !last_thread_of_process_p (leader_pid)
1921 && linux_proc_pid_is_zombie (leader_pid))
1922 {
1923 /* A leader zombie can mean one of two things:
1924
1925 - It exited, and there's an exit status pending
1926 available, or only the leader exited (not the whole
1927 program). In the latter case, we can't waitpid the
1928 leader's exit status until all other threads are gone.
1929
1930 - There are 3 or more threads in the group, and a thread
1931 other than the leader exec'd. On an exec, the Linux
1932 kernel destroys all other threads (except the execing
1933 one) in the thread group, and resets the execing thread's
1934 tid to the tgid. No exit notification is sent for the
1935 execing thread -- from the ptracer's perspective, it
1936 appears as though the execing thread just vanishes.
1937 Until we reap all other threads except the leader and the
1938 execing thread, the leader will be zombie, and the
1939 execing thread will be in `D (disc sleep)'. As soon as
1940 all other threads are reaped, the execing thread changes
1941 it's tid to the tgid, and the previous (zombie) leader
1942 vanishes, giving place to the "new" leader. We could try
1943 distinguishing the exit and exec cases, by waiting once
1944 more, and seeing if something comes out, but it doesn't
1945 sound useful. The previous leader _does_ go away, and
1946 we'll re-add the new one once we see the exec event
1947 (which is just the same as what would happen if the
1948 previous leader did exit voluntarily before some other
1949 thread execs). */
1950
1951 if (debug_threads)
1952 debug_printf ("CZL: Thread group leader %d zombie "
1953 "(it exited, or another thread execd).\n",
1954 leader_pid);
1955
1956 delete_lwp (leader_lp);
1957 }
1958 });
1959 }
1960
1961 /* Callback for `find_inferior'. Returns the first LWP that is not
1962 stopped. ARG is a PTID filter. */
1963
1964 static int
1965 not_stopped_callback (thread_info *thread, void *arg)
1966 {
1967 struct lwp_info *lwp;
1968 ptid_t filter = *(ptid_t *) arg;
1969
1970 if (!ptid_match (ptid_of (thread), filter))
1971 return 0;
1972
1973 lwp = get_thread_lwp (thread);
1974 if (!lwp->stopped)
1975 return 1;
1976
1977 return 0;
1978 }
1979
1980 /* Increment LWP's suspend count. */
1981
1982 static void
1983 lwp_suspended_inc (struct lwp_info *lwp)
1984 {
1985 lwp->suspended++;
1986
1987 if (debug_threads && lwp->suspended > 4)
1988 {
1989 struct thread_info *thread = get_lwp_thread (lwp);
1990
1991 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1992 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1993 }
1994 }
1995
1996 /* Decrement LWP's suspend count. */
1997
1998 static void
1999 lwp_suspended_decr (struct lwp_info *lwp)
2000 {
2001 lwp->suspended--;
2002
2003 if (lwp->suspended < 0)
2004 {
2005 struct thread_info *thread = get_lwp_thread (lwp);
2006
2007 internal_error (__FILE__, __LINE__,
2008 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
2009 lwp->suspended);
2010 }
2011 }
2012
2013 /* This function should only be called if the LWP got a SIGTRAP.
2014
2015 Handle any tracepoint steps or hits. Return true if a tracepoint
2016 event was handled, 0 otherwise. */
2017
2018 static int
2019 handle_tracepoints (struct lwp_info *lwp)
2020 {
2021 struct thread_info *tinfo = get_lwp_thread (lwp);
2022 int tpoint_related_event = 0;
2023
2024 gdb_assert (lwp->suspended == 0);
2025
2026 /* If this tracepoint hit causes a tracing stop, we'll immediately
2027 uninsert tracepoints. To do this, we temporarily pause all
2028 threads, unpatch away, and then unpause threads. We need to make
2029 sure the unpausing doesn't resume LWP too. */
2030 lwp_suspended_inc (lwp);
2031
2032 /* And we need to be sure that any all-threads-stopping doesn't try
2033 to move threads out of the jump pads, as it could deadlock the
2034 inferior (LWP could be in the jump pad, maybe even holding the
2035 lock.) */
2036
2037 /* Do any necessary step collect actions. */
2038 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2039
2040 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2041
2042 /* See if we just hit a tracepoint and do its main collect
2043 actions. */
2044 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2045
2046 lwp_suspended_decr (lwp);
2047
2048 gdb_assert (lwp->suspended == 0);
2049 gdb_assert (!stabilizing_threads
2050 || (lwp->collecting_fast_tracepoint
2051 != fast_tpoint_collect_result::not_collecting));
2052
2053 if (tpoint_related_event)
2054 {
2055 if (debug_threads)
2056 debug_printf ("got a tracepoint event\n");
2057 return 1;
2058 }
2059
2060 return 0;
2061 }
2062
2063 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2064 collection status. */
2065
2066 static fast_tpoint_collect_result
2067 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2068 struct fast_tpoint_collect_status *status)
2069 {
2070 CORE_ADDR thread_area;
2071 struct thread_info *thread = get_lwp_thread (lwp);
2072
2073 if (the_low_target.get_thread_area == NULL)
2074 return fast_tpoint_collect_result::not_collecting;
2075
2076 /* Get the thread area address. This is used to recognize which
2077 thread is which when tracing with the in-process agent library.
2078 We don't read anything from the address, and treat it as opaque;
2079 it's the address itself that we assume is unique per-thread. */
2080 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2081 return fast_tpoint_collect_result::not_collecting;
2082
2083 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2084 }
2085
2086 /* The reason we resume in the caller, is because we want to be able
2087 to pass lwp->status_pending as WSTAT, and we need to clear
2088 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2089 refuses to resume. */
2090
2091 static int
2092 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2093 {
2094 struct thread_info *saved_thread;
2095
2096 saved_thread = current_thread;
2097 current_thread = get_lwp_thread (lwp);
2098
2099 if ((wstat == NULL
2100 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2101 && supports_fast_tracepoints ()
2102 && agent_loaded_p ())
2103 {
2104 struct fast_tpoint_collect_status status;
2105
2106 if (debug_threads)
2107 debug_printf ("Checking whether LWP %ld needs to move out of the "
2108 "jump pad.\n",
2109 lwpid_of (current_thread));
2110
2111 fast_tpoint_collect_result r
2112 = linux_fast_tracepoint_collecting (lwp, &status);
2113
2114 if (wstat == NULL
2115 || (WSTOPSIG (*wstat) != SIGILL
2116 && WSTOPSIG (*wstat) != SIGFPE
2117 && WSTOPSIG (*wstat) != SIGSEGV
2118 && WSTOPSIG (*wstat) != SIGBUS))
2119 {
2120 lwp->collecting_fast_tracepoint = r;
2121
2122 if (r != fast_tpoint_collect_result::not_collecting)
2123 {
2124 if (r == fast_tpoint_collect_result::before_insn
2125 && lwp->exit_jump_pad_bkpt == NULL)
2126 {
2127 /* Haven't executed the original instruction yet.
2128 Set breakpoint there, and wait till it's hit,
2129 then single-step until exiting the jump pad. */
2130 lwp->exit_jump_pad_bkpt
2131 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2132 }
2133
2134 if (debug_threads)
2135 debug_printf ("Checking whether LWP %ld needs to move out of "
2136 "the jump pad...it does\n",
2137 lwpid_of (current_thread));
2138 current_thread = saved_thread;
2139
2140 return 1;
2141 }
2142 }
2143 else
2144 {
2145 /* If we get a synchronous signal while collecting, *and*
2146 while executing the (relocated) original instruction,
2147 reset the PC to point at the tpoint address, before
2148 reporting to GDB. Otherwise, it's an IPA lib bug: just
2149 report the signal to GDB, and pray for the best. */
2150
2151 lwp->collecting_fast_tracepoint
2152 = fast_tpoint_collect_result::not_collecting;
2153
2154 if (r != fast_tpoint_collect_result::not_collecting
2155 && (status.adjusted_insn_addr <= lwp->stop_pc
2156 && lwp->stop_pc < status.adjusted_insn_addr_end))
2157 {
2158 siginfo_t info;
2159 struct regcache *regcache;
2160
2161 /* The si_addr on a few signals references the address
2162 of the faulting instruction. Adjust that as
2163 well. */
2164 if ((WSTOPSIG (*wstat) == SIGILL
2165 || WSTOPSIG (*wstat) == SIGFPE
2166 || WSTOPSIG (*wstat) == SIGBUS
2167 || WSTOPSIG (*wstat) == SIGSEGV)
2168 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2169 (PTRACE_TYPE_ARG3) 0, &info) == 0
2170 /* Final check just to make sure we don't clobber
2171 the siginfo of non-kernel-sent signals. */
2172 && (uintptr_t) info.si_addr == lwp->stop_pc)
2173 {
2174 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2175 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2176 (PTRACE_TYPE_ARG3) 0, &info);
2177 }
2178
2179 regcache = get_thread_regcache (current_thread, 1);
2180 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2181 lwp->stop_pc = status.tpoint_addr;
2182
2183 /* Cancel any fast tracepoint lock this thread was
2184 holding. */
2185 force_unlock_trace_buffer ();
2186 }
2187
2188 if (lwp->exit_jump_pad_bkpt != NULL)
2189 {
2190 if (debug_threads)
2191 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2192 "stopping all threads momentarily.\n");
2193
2194 stop_all_lwps (1, lwp);
2195
2196 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2197 lwp->exit_jump_pad_bkpt = NULL;
2198
2199 unstop_all_lwps (1, lwp);
2200
2201 gdb_assert (lwp->suspended >= 0);
2202 }
2203 }
2204 }
2205
2206 if (debug_threads)
2207 debug_printf ("Checking whether LWP %ld needs to move out of the "
2208 "jump pad...no\n",
2209 lwpid_of (current_thread));
2210
2211 current_thread = saved_thread;
2212 return 0;
2213 }
2214
2215 /* Enqueue one signal in the "signals to report later when out of the
2216 jump pad" list. */
2217
2218 static void
2219 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2220 {
2221 struct pending_signals *p_sig;
2222 struct thread_info *thread = get_lwp_thread (lwp);
2223
2224 if (debug_threads)
2225 debug_printf ("Deferring signal %d for LWP %ld.\n",
2226 WSTOPSIG (*wstat), lwpid_of (thread));
2227
2228 if (debug_threads)
2229 {
2230 struct pending_signals *sig;
2231
2232 for (sig = lwp->pending_signals_to_report;
2233 sig != NULL;
2234 sig = sig->prev)
2235 debug_printf (" Already queued %d\n",
2236 sig->signal);
2237
2238 debug_printf (" (no more currently queued signals)\n");
2239 }
2240
2241 /* Don't enqueue non-RT signals if they are already in the deferred
2242 queue. (SIGSTOP being the easiest signal to see ending up here
2243 twice) */
2244 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2245 {
2246 struct pending_signals *sig;
2247
2248 for (sig = lwp->pending_signals_to_report;
2249 sig != NULL;
2250 sig = sig->prev)
2251 {
2252 if (sig->signal == WSTOPSIG (*wstat))
2253 {
2254 if (debug_threads)
2255 debug_printf ("Not requeuing already queued non-RT signal %d"
2256 " for LWP %ld\n",
2257 sig->signal,
2258 lwpid_of (thread));
2259 return;
2260 }
2261 }
2262 }
2263
2264 p_sig = XCNEW (struct pending_signals);
2265 p_sig->prev = lwp->pending_signals_to_report;
2266 p_sig->signal = WSTOPSIG (*wstat);
2267
2268 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2269 &p_sig->info);
2270
2271 lwp->pending_signals_to_report = p_sig;
2272 }
2273
2274 /* Dequeue one signal from the "signals to report later when out of
2275 the jump pad" list. */
2276
2277 static int
2278 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2279 {
2280 struct thread_info *thread = get_lwp_thread (lwp);
2281
2282 if (lwp->pending_signals_to_report != NULL)
2283 {
2284 struct pending_signals **p_sig;
2285
2286 p_sig = &lwp->pending_signals_to_report;
2287 while ((*p_sig)->prev != NULL)
2288 p_sig = &(*p_sig)->prev;
2289
2290 *wstat = W_STOPCODE ((*p_sig)->signal);
2291 if ((*p_sig)->info.si_signo != 0)
2292 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2293 &(*p_sig)->info);
2294 free (*p_sig);
2295 *p_sig = NULL;
2296
2297 if (debug_threads)
2298 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2299 WSTOPSIG (*wstat), lwpid_of (thread));
2300
2301 if (debug_threads)
2302 {
2303 struct pending_signals *sig;
2304
2305 for (sig = lwp->pending_signals_to_report;
2306 sig != NULL;
2307 sig = sig->prev)
2308 debug_printf (" Still queued %d\n",
2309 sig->signal);
2310
2311 debug_printf (" (no more queued signals)\n");
2312 }
2313
2314 return 1;
2315 }
2316
2317 return 0;
2318 }
2319
2320 /* Fetch the possibly triggered data watchpoint info and store it in
2321 CHILD.
2322
2323 On some archs, like x86, that use debug registers to set
2324 watchpoints, it's possible that the way to know which watched
2325 address trapped, is to check the register that is used to select
2326 which address to watch. Problem is, between setting the watchpoint
2327 and reading back which data address trapped, the user may change
2328 the set of watchpoints, and, as a consequence, GDB changes the
2329 debug registers in the inferior. To avoid reading back a stale
2330 stopped-data-address when that happens, we cache in LP the fact
2331 that a watchpoint trapped, and the corresponding data address, as
2332 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2333 registers meanwhile, we have the cached data we can rely on. */
2334
2335 static int
2336 check_stopped_by_watchpoint (struct lwp_info *child)
2337 {
2338 if (the_low_target.stopped_by_watchpoint != NULL)
2339 {
2340 struct thread_info *saved_thread;
2341
2342 saved_thread = current_thread;
2343 current_thread = get_lwp_thread (child);
2344
2345 if (the_low_target.stopped_by_watchpoint ())
2346 {
2347 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2348
2349 if (the_low_target.stopped_data_address != NULL)
2350 child->stopped_data_address
2351 = the_low_target.stopped_data_address ();
2352 else
2353 child->stopped_data_address = 0;
2354 }
2355
2356 current_thread = saved_thread;
2357 }
2358
2359 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2360 }
2361
2362 /* Return the ptrace options that we want to try to enable. */
2363
2364 static int
2365 linux_low_ptrace_options (int attached)
2366 {
2367 int options = 0;
2368
2369 if (!attached)
2370 options |= PTRACE_O_EXITKILL;
2371
2372 if (report_fork_events)
2373 options |= PTRACE_O_TRACEFORK;
2374
2375 if (report_vfork_events)
2376 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2377
2378 if (report_exec_events)
2379 options |= PTRACE_O_TRACEEXEC;
2380
2381 options |= PTRACE_O_TRACESYSGOOD;
2382
2383 return options;
2384 }
2385
2386 /* Do low-level handling of the event, and check if we should go on
2387 and pass it to caller code. Return the affected lwp if we are, or
2388 NULL otherwise. */
2389
2390 static struct lwp_info *
2391 linux_low_filter_event (int lwpid, int wstat)
2392 {
2393 struct lwp_info *child;
2394 struct thread_info *thread;
2395 int have_stop_pc = 0;
2396
2397 child = find_lwp_pid (pid_to_ptid (lwpid));
2398
2399 /* Check for stop events reported by a process we didn't already
2400 know about - anything not already in our LWP list.
2401
2402 If we're expecting to receive stopped processes after
2403 fork, vfork, and clone events, then we'll just add the
2404 new one to our list and go back to waiting for the event
2405 to be reported - the stopped process might be returned
2406 from waitpid before or after the event is.
2407
2408 But note the case of a non-leader thread exec'ing after the
2409 leader having exited, and gone from our lists (because
2410 check_zombie_leaders deleted it). The non-leader thread
2411 changes its tid to the tgid. */
2412
2413 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2414 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2415 {
2416 ptid_t child_ptid;
2417
2418 /* A multi-thread exec after we had seen the leader exiting. */
2419 if (debug_threads)
2420 {
2421 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2422 "after exec.\n", lwpid);
2423 }
2424
2425 child_ptid = ptid_build (lwpid, lwpid, 0);
2426 child = add_lwp (child_ptid);
2427 child->stopped = 1;
2428 current_thread = child->thread;
2429 }
2430
2431 /* If we didn't find a process, one of two things presumably happened:
2432 - A process we started and then detached from has exited. Ignore it.
2433 - A process we are controlling has forked and the new child's stop
2434 was reported to us by the kernel. Save its PID. */
2435 if (child == NULL && WIFSTOPPED (wstat))
2436 {
2437 add_to_pid_list (&stopped_pids, lwpid, wstat);
2438 return NULL;
2439 }
2440 else if (child == NULL)
2441 return NULL;
2442
2443 thread = get_lwp_thread (child);
2444
2445 child->stopped = 1;
2446
2447 child->last_status = wstat;
2448
2449 /* Check if the thread has exited. */
2450 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2451 {
2452 if (debug_threads)
2453 debug_printf ("LLFE: %d exited.\n", lwpid);
2454
2455 if (finish_step_over (child))
2456 {
2457 /* Unsuspend all other LWPs, and set them back running again. */
2458 unsuspend_all_lwps (child);
2459 }
2460
2461 /* If there is at least one more LWP, then the exit signal was
2462 not the end of the debugged application and should be
2463 ignored, unless GDB wants to hear about thread exits. */
2464 if (report_thread_events
2465 || last_thread_of_process_p (pid_of (thread)))
2466 {
2467 /* Since events are serialized to GDB core, and we can't
2468 report this one right now. Leave the status pending for
2469 the next time we're able to report it. */
2470 mark_lwp_dead (child, wstat);
2471 return child;
2472 }
2473 else
2474 {
2475 delete_lwp (child);
2476 return NULL;
2477 }
2478 }
2479
2480 gdb_assert (WIFSTOPPED (wstat));
2481
2482 if (WIFSTOPPED (wstat))
2483 {
2484 struct process_info *proc;
2485
2486 /* Architecture-specific setup after inferior is running. */
2487 proc = find_process_pid (pid_of (thread));
2488 if (proc->tdesc == NULL)
2489 {
2490 if (proc->attached)
2491 {
2492 /* This needs to happen after we have attached to the
2493 inferior and it is stopped for the first time, but
2494 before we access any inferior registers. */
2495 linux_arch_setup_thread (thread);
2496 }
2497 else
2498 {
2499 /* The process is started, but GDBserver will do
2500 architecture-specific setup after the program stops at
2501 the first instruction. */
2502 child->status_pending_p = 1;
2503 child->status_pending = wstat;
2504 return child;
2505 }
2506 }
2507 }
2508
2509 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2510 {
2511 struct process_info *proc = find_process_pid (pid_of (thread));
2512 int options = linux_low_ptrace_options (proc->attached);
2513
2514 linux_enable_event_reporting (lwpid, options);
2515 child->must_set_ptrace_flags = 0;
2516 }
2517
2518 /* Always update syscall_state, even if it will be filtered later. */
2519 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2520 {
2521 child->syscall_state
2522 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2523 ? TARGET_WAITKIND_SYSCALL_RETURN
2524 : TARGET_WAITKIND_SYSCALL_ENTRY);
2525 }
2526 else
2527 {
2528 /* Almost all other ptrace-stops are known to be outside of system
2529 calls, with further exceptions in handle_extended_wait. */
2530 child->syscall_state = TARGET_WAITKIND_IGNORE;
2531 }
2532
2533 /* Be careful to not overwrite stop_pc until save_stop_reason is
2534 called. */
2535 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2536 && linux_is_extended_waitstatus (wstat))
2537 {
2538 child->stop_pc = get_pc (child);
2539 if (handle_extended_wait (&child, wstat))
2540 {
2541 /* The event has been handled, so just return without
2542 reporting it. */
2543 return NULL;
2544 }
2545 }
2546
2547 if (linux_wstatus_maybe_breakpoint (wstat))
2548 {
2549 if (save_stop_reason (child))
2550 have_stop_pc = 1;
2551 }
2552
2553 if (!have_stop_pc)
2554 child->stop_pc = get_pc (child);
2555
2556 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2557 && child->stop_expected)
2558 {
2559 if (debug_threads)
2560 debug_printf ("Expected stop.\n");
2561 child->stop_expected = 0;
2562
2563 if (thread->last_resume_kind == resume_stop)
2564 {
2565 /* We want to report the stop to the core. Treat the
2566 SIGSTOP as a normal event. */
2567 if (debug_threads)
2568 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2569 target_pid_to_str (ptid_of (thread)));
2570 }
2571 else if (stopping_threads != NOT_STOPPING_THREADS)
2572 {
2573 /* Stopping threads. We don't want this SIGSTOP to end up
2574 pending. */
2575 if (debug_threads)
2576 debug_printf ("LLW: SIGSTOP caught for %s "
2577 "while stopping threads.\n",
2578 target_pid_to_str (ptid_of (thread)));
2579 return NULL;
2580 }
2581 else
2582 {
2583 /* This is a delayed SIGSTOP. Filter out the event. */
2584 if (debug_threads)
2585 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2586 child->stepping ? "step" : "continue",
2587 target_pid_to_str (ptid_of (thread)));
2588
2589 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2590 return NULL;
2591 }
2592 }
2593
2594 child->status_pending_p = 1;
2595 child->status_pending = wstat;
2596 return child;
2597 }
2598
2599 /* Return true if THREAD is doing hardware single step. */
2600
2601 static int
2602 maybe_hw_step (struct thread_info *thread)
2603 {
2604 if (can_hardware_single_step ())
2605 return 1;
2606 else
2607 {
2608 /* GDBserver must insert single-step breakpoint for software
2609 single step. */
2610 gdb_assert (has_single_step_breakpoints (thread));
2611 return 0;
2612 }
2613 }
2614
2615 /* Resume LWPs that are currently stopped without any pending status
2616 to report, but are resumed from the core's perspective. */
2617
2618 static void
2619 resume_stopped_resumed_lwps (thread_info *thread)
2620 {
2621 struct lwp_info *lp = get_thread_lwp (thread);
2622
2623 if (lp->stopped
2624 && !lp->suspended
2625 && !lp->status_pending_p
2626 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2627 {
2628 int step = 0;
2629
2630 if (thread->last_resume_kind == resume_step)
2631 step = maybe_hw_step (thread);
2632
2633 if (debug_threads)
2634 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2635 target_pid_to_str (ptid_of (thread)),
2636 paddress (lp->stop_pc),
2637 step);
2638
2639 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2640 }
2641 }
2642
2643 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2644 match FILTER_PTID (leaving others pending). The PTIDs can be:
2645 minus_one_ptid, to specify any child; a pid PTID, specifying all
2646 lwps of a thread group; or a PTID representing a single lwp. Store
2647 the stop status through the status pointer WSTAT. OPTIONS is
2648 passed to the waitpid call. Return 0 if no event was found and
2649 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2650 was found. Return the PID of the stopped child otherwise. */
2651
2652 static int
2653 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2654 int *wstatp, int options)
2655 {
2656 struct thread_info *event_thread;
2657 struct lwp_info *event_child, *requested_child;
2658 sigset_t block_mask, prev_mask;
2659
2660 retry:
2661 /* N.B. event_thread points to the thread_info struct that contains
2662 event_child. Keep them in sync. */
2663 event_thread = NULL;
2664 event_child = NULL;
2665 requested_child = NULL;
2666
2667 /* Check for a lwp with a pending status. */
2668
2669 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2670 {
2671 event_thread = (struct thread_info *)
2672 find_inferior_in_random (&all_threads, status_pending_p_callback,
2673 &filter_ptid);
2674 if (event_thread != NULL)
2675 event_child = get_thread_lwp (event_thread);
2676 if (debug_threads && event_thread)
2677 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2678 }
2679 else if (!ptid_equal (filter_ptid, null_ptid))
2680 {
2681 requested_child = find_lwp_pid (filter_ptid);
2682
2683 if (stopping_threads == NOT_STOPPING_THREADS
2684 && requested_child->status_pending_p
2685 && (requested_child->collecting_fast_tracepoint
2686 != fast_tpoint_collect_result::not_collecting))
2687 {
2688 enqueue_one_deferred_signal (requested_child,
2689 &requested_child->status_pending);
2690 requested_child->status_pending_p = 0;
2691 requested_child->status_pending = 0;
2692 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2693 }
2694
2695 if (requested_child->suspended
2696 && requested_child->status_pending_p)
2697 {
2698 internal_error (__FILE__, __LINE__,
2699 "requesting an event out of a"
2700 " suspended child?");
2701 }
2702
2703 if (requested_child->status_pending_p)
2704 {
2705 event_child = requested_child;
2706 event_thread = get_lwp_thread (event_child);
2707 }
2708 }
2709
2710 if (event_child != NULL)
2711 {
2712 if (debug_threads)
2713 debug_printf ("Got an event from pending child %ld (%04x)\n",
2714 lwpid_of (event_thread), event_child->status_pending);
2715 *wstatp = event_child->status_pending;
2716 event_child->status_pending_p = 0;
2717 event_child->status_pending = 0;
2718 current_thread = event_thread;
2719 return lwpid_of (event_thread);
2720 }
2721
2722 /* But if we don't find a pending event, we'll have to wait.
2723
2724 We only enter this loop if no process has a pending wait status.
2725 Thus any action taken in response to a wait status inside this
2726 loop is responding as soon as we detect the status, not after any
2727 pending events. */
2728
2729 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2730 all signals while here. */
2731 sigfillset (&block_mask);
2732 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2733
2734 /* Always pull all events out of the kernel. We'll randomly select
2735 an event LWP out of all that have events, to prevent
2736 starvation. */
2737 while (event_child == NULL)
2738 {
2739 pid_t ret = 0;
2740
2741 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2742 quirks:
2743
2744 - If the thread group leader exits while other threads in the
2745 thread group still exist, waitpid(TGID, ...) hangs. That
2746 waitpid won't return an exit status until the other threads
2747 in the group are reaped.
2748
2749 - When a non-leader thread execs, that thread just vanishes
2750 without reporting an exit (so we'd hang if we waited for it
2751 explicitly in that case). The exec event is reported to
2752 the TGID pid. */
2753 errno = 0;
2754 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2755
2756 if (debug_threads)
2757 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2758 ret, errno ? strerror (errno) : "ERRNO-OK");
2759
2760 if (ret > 0)
2761 {
2762 if (debug_threads)
2763 {
2764 debug_printf ("LLW: waitpid %ld received %s\n",
2765 (long) ret, status_to_str (*wstatp));
2766 }
2767
2768 /* Filter all events. IOW, leave all events pending. We'll
2769 randomly select an event LWP out of all that have events
2770 below. */
2771 linux_low_filter_event (ret, *wstatp);
2772 /* Retry until nothing comes out of waitpid. A single
2773 SIGCHLD can indicate more than one child stopped. */
2774 continue;
2775 }
2776
2777 /* Now that we've pulled all events out of the kernel, resume
2778 LWPs that don't have an interesting event to report. */
2779 if (stopping_threads == NOT_STOPPING_THREADS)
2780 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2781
2782 /* ... and find an LWP with a status to report to the core, if
2783 any. */
2784 event_thread = (struct thread_info *)
2785 find_inferior_in_random (&all_threads, status_pending_p_callback,
2786 &filter_ptid);
2787 if (event_thread != NULL)
2788 {
2789 event_child = get_thread_lwp (event_thread);
2790 *wstatp = event_child->status_pending;
2791 event_child->status_pending_p = 0;
2792 event_child->status_pending = 0;
2793 break;
2794 }
2795
2796 /* Check for zombie thread group leaders. Those can't be reaped
2797 until all other threads in the thread group are. */
2798 check_zombie_leaders ();
2799
2800 /* If there are no resumed children left in the set of LWPs we
2801 want to wait for, bail. We can't just block in
2802 waitpid/sigsuspend, because lwps might have been left stopped
2803 in trace-stop state, and we'd be stuck forever waiting for
2804 their status to change (which would only happen if we resumed
2805 them). Even if WNOHANG is set, this return code is preferred
2806 over 0 (below), as it is more detailed. */
2807 if ((find_inferior (&all_threads,
2808 not_stopped_callback,
2809 &wait_ptid) == NULL))
2810 {
2811 if (debug_threads)
2812 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2813 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2814 return -1;
2815 }
2816
2817 /* No interesting event to report to the caller. */
2818 if ((options & WNOHANG))
2819 {
2820 if (debug_threads)
2821 debug_printf ("WNOHANG set, no event found\n");
2822
2823 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2824 return 0;
2825 }
2826
2827 /* Block until we get an event reported with SIGCHLD. */
2828 if (debug_threads)
2829 debug_printf ("sigsuspend'ing\n");
2830
2831 sigsuspend (&prev_mask);
2832 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2833 goto retry;
2834 }
2835
2836 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2837
2838 current_thread = event_thread;
2839
2840 return lwpid_of (event_thread);
2841 }
2842
2843 /* Wait for an event from child(ren) PTID. PTIDs can be:
2844 minus_one_ptid, to specify any child; a pid PTID, specifying all
2845 lwps of a thread group; or a PTID representing a single lwp. Store
2846 the stop status through the status pointer WSTAT. OPTIONS is
2847 passed to the waitpid call. Return 0 if no event was found and
2848 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2849 was found. Return the PID of the stopped child otherwise. */
2850
2851 static int
2852 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2853 {
2854 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2855 }
2856
2857 /* Count the LWP's that have had events. */
2858
2859 static int
2860 count_events_callback (thread_info *thread, void *data)
2861 {
2862 struct lwp_info *lp = get_thread_lwp (thread);
2863 int *count = (int *) data;
2864
2865 gdb_assert (count != NULL);
2866
2867 /* Count only resumed LWPs that have an event pending. */
2868 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2869 && lp->status_pending_p)
2870 (*count)++;
2871
2872 return 0;
2873 }
2874
2875 /* Select the LWP (if any) that is currently being single-stepped. */
2876
2877 static int
2878 select_singlestep_lwp_callback (thread_info *thread, void *data)
2879 {
2880 struct lwp_info *lp = get_thread_lwp (thread);
2881
2882 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2883 && thread->last_resume_kind == resume_step
2884 && lp->status_pending_p)
2885 return 1;
2886 else
2887 return 0;
2888 }
2889
2890 /* Select the Nth LWP that has had an event. */
2891
2892 static int
2893 select_event_lwp_callback (thread_info *thread, void *data)
2894 {
2895 struct lwp_info *lp = get_thread_lwp (thread);
2896 int *selector = (int *) data;
2897
2898 gdb_assert (selector != NULL);
2899
2900 /* Select only resumed LWPs that have an event pending. */
2901 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2902 && lp->status_pending_p)
2903 if ((*selector)-- == 0)
2904 return 1;
2905
2906 return 0;
2907 }
2908
2909 /* Select one LWP out of those that have events pending. */
2910
2911 static void
2912 select_event_lwp (struct lwp_info **orig_lp)
2913 {
2914 int num_events = 0;
2915 int random_selector;
2916 struct thread_info *event_thread = NULL;
2917
2918 /* In all-stop, give preference to the LWP that is being
2919 single-stepped. There will be at most one, and it's the LWP that
2920 the core is most interested in. If we didn't do this, then we'd
2921 have to handle pending step SIGTRAPs somehow in case the core
2922 later continues the previously-stepped thread, otherwise we'd
2923 report the pending SIGTRAP, and the core, not having stepped the
2924 thread, wouldn't understand what the trap was for, and therefore
2925 would report it to the user as a random signal. */
2926 if (!non_stop)
2927 {
2928 event_thread
2929 = (struct thread_info *) find_inferior (&all_threads,
2930 select_singlestep_lwp_callback,
2931 NULL);
2932 if (event_thread != NULL)
2933 {
2934 if (debug_threads)
2935 debug_printf ("SEL: Select single-step %s\n",
2936 target_pid_to_str (ptid_of (event_thread)));
2937 }
2938 }
2939 if (event_thread == NULL)
2940 {
2941 /* No single-stepping LWP. Select one at random, out of those
2942 which have had events. */
2943
2944 /* First see how many events we have. */
2945 find_inferior (&all_threads, count_events_callback, &num_events);
2946 gdb_assert (num_events > 0);
2947
2948 /* Now randomly pick a LWP out of those that have had
2949 events. */
2950 random_selector = (int)
2951 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2952
2953 if (debug_threads && num_events > 1)
2954 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2955 num_events, random_selector);
2956
2957 event_thread
2958 = (struct thread_info *) find_inferior (&all_threads,
2959 select_event_lwp_callback,
2960 &random_selector);
2961 }
2962
2963 if (event_thread != NULL)
2964 {
2965 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2966
2967 /* Switch the event LWP. */
2968 *orig_lp = event_lp;
2969 }
2970 }
2971
2972 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2973 NULL. */
2974
2975 static void
2976 unsuspend_all_lwps (struct lwp_info *except)
2977 {
2978 for_each_thread ([&] (thread_info *thread)
2979 {
2980 lwp_info *lwp = get_thread_lwp (thread);
2981
2982 if (lwp != except)
2983 lwp_suspended_decr (lwp);
2984 });
2985 }
2986
2987 static void move_out_of_jump_pad_callback (thread_info *thread);
2988 static bool stuck_in_jump_pad_callback (thread_info *thread);
2989 static int lwp_running (thread_info *thread, void *data);
2990 static ptid_t linux_wait_1 (ptid_t ptid,
2991 struct target_waitstatus *ourstatus,
2992 int target_options);
2993
2994 /* Stabilize threads (move out of jump pads).
2995
2996 If a thread is midway collecting a fast tracepoint, we need to
2997 finish the collection and move it out of the jump pad before
2998 reporting the signal.
2999
3000 This avoids recursion while collecting (when a signal arrives
3001 midway, and the signal handler itself collects), which would trash
3002 the trace buffer. In case the user set a breakpoint in a signal
3003 handler, this avoids the backtrace showing the jump pad, etc..
3004 Most importantly, there are certain things we can't do safely if
3005 threads are stopped in a jump pad (or in its callee's). For
3006 example:
3007
3008 - starting a new trace run. A thread still collecting the
3009 previous run, could trash the trace buffer when resumed. The trace
3010 buffer control structures would have been reset but the thread had
3011 no way to tell. The thread could even midway memcpy'ing to the
3012 buffer, which would mean that when resumed, it would clobber the
3013 trace buffer that had been set for a new run.
3014
3015 - we can't rewrite/reuse the jump pads for new tracepoints
3016 safely. Say you do tstart while a thread is stopped midway while
3017 collecting. When the thread is later resumed, it finishes the
3018 collection, and returns to the jump pad, to execute the original
3019 instruction that was under the tracepoint jump at the time the
3020 older run had been started. If the jump pad had been rewritten
3021 since for something else in the new run, the thread would now
3022 execute the wrong / random instructions. */
3023
3024 static void
3025 linux_stabilize_threads (void)
3026 {
3027 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
3028
3029 if (thread_stuck != NULL)
3030 {
3031 if (debug_threads)
3032 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
3033 lwpid_of (thread_stuck));
3034 return;
3035 }
3036
3037 thread_info *saved_thread = current_thread;
3038
3039 stabilizing_threads = 1;
3040
3041 /* Kick 'em all. */
3042 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
3043
3044 /* Loop until all are stopped out of the jump pads. */
3045 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
3046 {
3047 struct target_waitstatus ourstatus;
3048 struct lwp_info *lwp;
3049 int wstat;
3050
3051 /* Note that we go through the full wait even loop. While
3052 moving threads out of jump pad, we need to be able to step
3053 over internal breakpoints and such. */
3054 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
3055
3056 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
3057 {
3058 lwp = get_thread_lwp (current_thread);
3059
3060 /* Lock it. */
3061 lwp_suspended_inc (lwp);
3062
3063 if (ourstatus.value.sig != GDB_SIGNAL_0
3064 || current_thread->last_resume_kind == resume_stop)
3065 {
3066 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3067 enqueue_one_deferred_signal (lwp, &wstat);
3068 }
3069 }
3070 }
3071
3072 unsuspend_all_lwps (NULL);
3073
3074 stabilizing_threads = 0;
3075
3076 current_thread = saved_thread;
3077
3078 if (debug_threads)
3079 {
3080 thread_stuck = find_thread (stuck_in_jump_pad_callback);
3081
3082 if (thread_stuck != NULL)
3083 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3084 lwpid_of (thread_stuck));
3085 }
3086 }
3087
3088 /* Convenience function that is called when the kernel reports an
3089 event that is not passed out to GDB. */
3090
3091 static ptid_t
3092 ignore_event (struct target_waitstatus *ourstatus)
3093 {
3094 /* If we got an event, there may still be others, as a single
3095 SIGCHLD can indicate more than one child stopped. This forces
3096 another target_wait call. */
3097 async_file_mark ();
3098
3099 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3100 return null_ptid;
3101 }
3102
3103 /* Convenience function that is called when the kernel reports an exit
3104 event. This decides whether to report the event to GDB as a
3105 process exit event, a thread exit event, or to suppress the
3106 event. */
3107
3108 static ptid_t
3109 filter_exit_event (struct lwp_info *event_child,
3110 struct target_waitstatus *ourstatus)
3111 {
3112 struct thread_info *thread = get_lwp_thread (event_child);
3113 ptid_t ptid = ptid_of (thread);
3114
3115 if (!last_thread_of_process_p (pid_of (thread)))
3116 {
3117 if (report_thread_events)
3118 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3119 else
3120 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3121
3122 delete_lwp (event_child);
3123 }
3124 return ptid;
3125 }
3126
3127 /* Returns 1 if GDB is interested in any event_child syscalls. */
3128
3129 static int
3130 gdb_catching_syscalls_p (struct lwp_info *event_child)
3131 {
3132 struct thread_info *thread = get_lwp_thread (event_child);
3133 struct process_info *proc = get_thread_process (thread);
3134
3135 return !proc->syscalls_to_catch.empty ();
3136 }
3137
3138 /* Returns 1 if GDB is interested in the event_child syscall.
3139 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3140
3141 static int
3142 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3143 {
3144 int sysno;
3145 struct thread_info *thread = get_lwp_thread (event_child);
3146 struct process_info *proc = get_thread_process (thread);
3147
3148 if (proc->syscalls_to_catch.empty ())
3149 return 0;
3150
3151 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3152 return 1;
3153
3154 get_syscall_trapinfo (event_child, &sysno);
3155
3156 for (int iter : proc->syscalls_to_catch)
3157 if (iter == sysno)
3158 return 1;
3159
3160 return 0;
3161 }
3162
3163 /* Wait for process, returns status. */
3164
3165 static ptid_t
3166 linux_wait_1 (ptid_t ptid,
3167 struct target_waitstatus *ourstatus, int target_options)
3168 {
3169 int w;
3170 struct lwp_info *event_child;
3171 int options;
3172 int pid;
3173 int step_over_finished;
3174 int bp_explains_trap;
3175 int maybe_internal_trap;
3176 int report_to_gdb;
3177 int trace_event;
3178 int in_step_range;
3179 int any_resumed;
3180
3181 if (debug_threads)
3182 {
3183 debug_enter ();
3184 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3185 }
3186
3187 /* Translate generic target options into linux options. */
3188 options = __WALL;
3189 if (target_options & TARGET_WNOHANG)
3190 options |= WNOHANG;
3191
3192 bp_explains_trap = 0;
3193 trace_event = 0;
3194 in_step_range = 0;
3195 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3196
3197 /* Find a resumed LWP, if any. */
3198 if (find_inferior (&all_threads,
3199 status_pending_p_callback,
3200 &minus_one_ptid) != NULL)
3201 any_resumed = 1;
3202 else if ((find_inferior (&all_threads,
3203 not_stopped_callback,
3204 &minus_one_ptid) != NULL))
3205 any_resumed = 1;
3206 else
3207 any_resumed = 0;
3208
3209 if (ptid_equal (step_over_bkpt, null_ptid))
3210 pid = linux_wait_for_event (ptid, &w, options);
3211 else
3212 {
3213 if (debug_threads)
3214 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3215 target_pid_to_str (step_over_bkpt));
3216 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3217 }
3218
3219 if (pid == 0 || (pid == -1 && !any_resumed))
3220 {
3221 gdb_assert (target_options & TARGET_WNOHANG);
3222
3223 if (debug_threads)
3224 {
3225 debug_printf ("linux_wait_1 ret = null_ptid, "
3226 "TARGET_WAITKIND_IGNORE\n");
3227 debug_exit ();
3228 }
3229
3230 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3231 return null_ptid;
3232 }
3233 else if (pid == -1)
3234 {
3235 if (debug_threads)
3236 {
3237 debug_printf ("linux_wait_1 ret = null_ptid, "
3238 "TARGET_WAITKIND_NO_RESUMED\n");
3239 debug_exit ();
3240 }
3241
3242 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3243 return null_ptid;
3244 }
3245
3246 event_child = get_thread_lwp (current_thread);
3247
3248 /* linux_wait_for_event only returns an exit status for the last
3249 child of a process. Report it. */
3250 if (WIFEXITED (w) || WIFSIGNALED (w))
3251 {
3252 if (WIFEXITED (w))
3253 {
3254 ourstatus->kind = TARGET_WAITKIND_EXITED;
3255 ourstatus->value.integer = WEXITSTATUS (w);
3256
3257 if (debug_threads)
3258 {
3259 debug_printf ("linux_wait_1 ret = %s, exited with "
3260 "retcode %d\n",
3261 target_pid_to_str (ptid_of (current_thread)),
3262 WEXITSTATUS (w));
3263 debug_exit ();
3264 }
3265 }
3266 else
3267 {
3268 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3269 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3270
3271 if (debug_threads)
3272 {
3273 debug_printf ("linux_wait_1 ret = %s, terminated with "
3274 "signal %d\n",
3275 target_pid_to_str (ptid_of (current_thread)),
3276 WTERMSIG (w));
3277 debug_exit ();
3278 }
3279 }
3280
3281 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3282 return filter_exit_event (event_child, ourstatus);
3283
3284 return ptid_of (current_thread);
3285 }
3286
3287 /* If step-over executes a breakpoint instruction, in the case of a
3288 hardware single step it means a gdb/gdbserver breakpoint had been
3289 planted on top of a permanent breakpoint, in the case of a software
3290 single step it may just mean that gdbserver hit the reinsert breakpoint.
3291 The PC has been adjusted by save_stop_reason to point at
3292 the breakpoint address.
3293 So in the case of the hardware single step advance the PC manually
3294 past the breakpoint and in the case of software single step advance only
3295 if it's not the single_step_breakpoint we are hitting.
3296 This avoids that a program would keep trapping a permanent breakpoint
3297 forever. */
3298 if (!ptid_equal (step_over_bkpt, null_ptid)
3299 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3300 && (event_child->stepping
3301 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3302 {
3303 int increment_pc = 0;
3304 int breakpoint_kind = 0;
3305 CORE_ADDR stop_pc = event_child->stop_pc;
3306
3307 breakpoint_kind =
3308 the_target->breakpoint_kind_from_current_state (&stop_pc);
3309 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3310
3311 if (debug_threads)
3312 {
3313 debug_printf ("step-over for %s executed software breakpoint\n",
3314 target_pid_to_str (ptid_of (current_thread)));
3315 }
3316
3317 if (increment_pc != 0)
3318 {
3319 struct regcache *regcache
3320 = get_thread_regcache (current_thread, 1);
3321
3322 event_child->stop_pc += increment_pc;
3323 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3324
3325 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3326 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3327 }
3328 }
3329
3330 /* If this event was not handled before, and is not a SIGTRAP, we
3331 report it. SIGILL and SIGSEGV are also treated as traps in case
3332 a breakpoint is inserted at the current PC. If this target does
3333 not support internal breakpoints at all, we also report the
3334 SIGTRAP without further processing; it's of no concern to us. */
3335 maybe_internal_trap
3336 = (supports_breakpoints ()
3337 && (WSTOPSIG (w) == SIGTRAP
3338 || ((WSTOPSIG (w) == SIGILL
3339 || WSTOPSIG (w) == SIGSEGV)
3340 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3341
3342 if (maybe_internal_trap)
3343 {
3344 /* Handle anything that requires bookkeeping before deciding to
3345 report the event or continue waiting. */
3346
3347 /* First check if we can explain the SIGTRAP with an internal
3348 breakpoint, or if we should possibly report the event to GDB.
3349 Do this before anything that may remove or insert a
3350 breakpoint. */
3351 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3352
3353 /* We have a SIGTRAP, possibly a step-over dance has just
3354 finished. If so, tweak the state machine accordingly,
3355 reinsert breakpoints and delete any single-step
3356 breakpoints. */
3357 step_over_finished = finish_step_over (event_child);
3358
3359 /* Now invoke the callbacks of any internal breakpoints there. */
3360 check_breakpoints (event_child->stop_pc);
3361
3362 /* Handle tracepoint data collecting. This may overflow the
3363 trace buffer, and cause a tracing stop, removing
3364 breakpoints. */
3365 trace_event = handle_tracepoints (event_child);
3366
3367 if (bp_explains_trap)
3368 {
3369 if (debug_threads)
3370 debug_printf ("Hit a gdbserver breakpoint.\n");
3371 }
3372 }
3373 else
3374 {
3375 /* We have some other signal, possibly a step-over dance was in
3376 progress, and it should be cancelled too. */
3377 step_over_finished = finish_step_over (event_child);
3378 }
3379
3380 /* We have all the data we need. Either report the event to GDB, or
3381 resume threads and keep waiting for more. */
3382
3383 /* If we're collecting a fast tracepoint, finish the collection and
3384 move out of the jump pad before delivering a signal. See
3385 linux_stabilize_threads. */
3386
3387 if (WIFSTOPPED (w)
3388 && WSTOPSIG (w) != SIGTRAP
3389 && supports_fast_tracepoints ()
3390 && agent_loaded_p ())
3391 {
3392 if (debug_threads)
3393 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3394 "to defer or adjust it.\n",
3395 WSTOPSIG (w), lwpid_of (current_thread));
3396
3397 /* Allow debugging the jump pad itself. */
3398 if (current_thread->last_resume_kind != resume_step
3399 && maybe_move_out_of_jump_pad (event_child, &w))
3400 {
3401 enqueue_one_deferred_signal (event_child, &w);
3402
3403 if (debug_threads)
3404 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3405 WSTOPSIG (w), lwpid_of (current_thread));
3406
3407 linux_resume_one_lwp (event_child, 0, 0, NULL);
3408
3409 if (debug_threads)
3410 debug_exit ();
3411 return ignore_event (ourstatus);
3412 }
3413 }
3414
3415 if (event_child->collecting_fast_tracepoint
3416 != fast_tpoint_collect_result::not_collecting)
3417 {
3418 if (debug_threads)
3419 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3420 "Check if we're already there.\n",
3421 lwpid_of (current_thread),
3422 (int) event_child->collecting_fast_tracepoint);
3423
3424 trace_event = 1;
3425
3426 event_child->collecting_fast_tracepoint
3427 = linux_fast_tracepoint_collecting (event_child, NULL);
3428
3429 if (event_child->collecting_fast_tracepoint
3430 != fast_tpoint_collect_result::before_insn)
3431 {
3432 /* No longer need this breakpoint. */
3433 if (event_child->exit_jump_pad_bkpt != NULL)
3434 {
3435 if (debug_threads)
3436 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3437 "stopping all threads momentarily.\n");
3438
3439 /* Other running threads could hit this breakpoint.
3440 We don't handle moribund locations like GDB does,
3441 instead we always pause all threads when removing
3442 breakpoints, so that any step-over or
3443 decr_pc_after_break adjustment is always taken
3444 care of while the breakpoint is still
3445 inserted. */
3446 stop_all_lwps (1, event_child);
3447
3448 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3449 event_child->exit_jump_pad_bkpt = NULL;
3450
3451 unstop_all_lwps (1, event_child);
3452
3453 gdb_assert (event_child->suspended >= 0);
3454 }
3455 }
3456
3457 if (event_child->collecting_fast_tracepoint
3458 == fast_tpoint_collect_result::not_collecting)
3459 {
3460 if (debug_threads)
3461 debug_printf ("fast tracepoint finished "
3462 "collecting successfully.\n");
3463
3464 /* We may have a deferred signal to report. */
3465 if (dequeue_one_deferred_signal (event_child, &w))
3466 {
3467 if (debug_threads)
3468 debug_printf ("dequeued one signal.\n");
3469 }
3470 else
3471 {
3472 if (debug_threads)
3473 debug_printf ("no deferred signals.\n");
3474
3475 if (stabilizing_threads)
3476 {
3477 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3478 ourstatus->value.sig = GDB_SIGNAL_0;
3479
3480 if (debug_threads)
3481 {
3482 debug_printf ("linux_wait_1 ret = %s, stopped "
3483 "while stabilizing threads\n",
3484 target_pid_to_str (ptid_of (current_thread)));
3485 debug_exit ();
3486 }
3487
3488 return ptid_of (current_thread);
3489 }
3490 }
3491 }
3492 }
3493
3494 /* Check whether GDB would be interested in this event. */
3495
3496 /* Check if GDB is interested in this syscall. */
3497 if (WIFSTOPPED (w)
3498 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3499 && !gdb_catch_this_syscall_p (event_child))
3500 {
3501 if (debug_threads)
3502 {
3503 debug_printf ("Ignored syscall for LWP %ld.\n",
3504 lwpid_of (current_thread));
3505 }
3506
3507 linux_resume_one_lwp (event_child, event_child->stepping,
3508 0, NULL);
3509
3510 if (debug_threads)
3511 debug_exit ();
3512 return ignore_event (ourstatus);
3513 }
3514
3515 /* If GDB is not interested in this signal, don't stop other
3516 threads, and don't report it to GDB. Just resume the inferior
3517 right away. We do this for threading-related signals as well as
3518 any that GDB specifically requested we ignore. But never ignore
3519 SIGSTOP if we sent it ourselves, and do not ignore signals when
3520 stepping - they may require special handling to skip the signal
3521 handler. Also never ignore signals that could be caused by a
3522 breakpoint. */
3523 if (WIFSTOPPED (w)
3524 && current_thread->last_resume_kind != resume_step
3525 && (
3526 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3527 (current_process ()->priv->thread_db != NULL
3528 && (WSTOPSIG (w) == __SIGRTMIN
3529 || WSTOPSIG (w) == __SIGRTMIN + 1))
3530 ||
3531 #endif
3532 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3533 && !(WSTOPSIG (w) == SIGSTOP
3534 && current_thread->last_resume_kind == resume_stop)
3535 && !linux_wstatus_maybe_breakpoint (w))))
3536 {
3537 siginfo_t info, *info_p;
3538
3539 if (debug_threads)
3540 debug_printf ("Ignored signal %d for LWP %ld.\n",
3541 WSTOPSIG (w), lwpid_of (current_thread));
3542
3543 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3544 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3545 info_p = &info;
3546 else
3547 info_p = NULL;
3548
3549 if (step_over_finished)
3550 {
3551 /* We cancelled this thread's step-over above. We still
3552 need to unsuspend all other LWPs, and set them back
3553 running again while the signal handler runs. */
3554 unsuspend_all_lwps (event_child);
3555
3556 /* Enqueue the pending signal info so that proceed_all_lwps
3557 doesn't lose it. */
3558 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3559
3560 proceed_all_lwps ();
3561 }
3562 else
3563 {
3564 linux_resume_one_lwp (event_child, event_child->stepping,
3565 WSTOPSIG (w), info_p);
3566 }
3567
3568 if (debug_threads)
3569 debug_exit ();
3570
3571 return ignore_event (ourstatus);
3572 }
3573
3574 /* Note that all addresses are always "out of the step range" when
3575 there's no range to begin with. */
3576 in_step_range = lwp_in_step_range (event_child);
3577
3578 /* If GDB wanted this thread to single step, and the thread is out
3579 of the step range, we always want to report the SIGTRAP, and let
3580 GDB handle it. Watchpoints should always be reported. So should
3581 signals we can't explain. A SIGTRAP we can't explain could be a
3582 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3583 do, we're be able to handle GDB breakpoints on top of internal
3584 breakpoints, by handling the internal breakpoint and still
3585 reporting the event to GDB. If we don't, we're out of luck, GDB
3586 won't see the breakpoint hit. If we see a single-step event but
3587 the thread should be continuing, don't pass the trap to gdb.
3588 That indicates that we had previously finished a single-step but
3589 left the single-step pending -- see
3590 complete_ongoing_step_over. */
3591 report_to_gdb = (!maybe_internal_trap
3592 || (current_thread->last_resume_kind == resume_step
3593 && !in_step_range)
3594 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3595 || (!in_step_range
3596 && !bp_explains_trap
3597 && !trace_event
3598 && !step_over_finished
3599 && !(current_thread->last_resume_kind == resume_continue
3600 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3601 || (gdb_breakpoint_here (event_child->stop_pc)
3602 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3603 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3604 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3605
3606 run_breakpoint_commands (event_child->stop_pc);
3607
3608 /* We found no reason GDB would want us to stop. We either hit one
3609 of our own breakpoints, or finished an internal step GDB
3610 shouldn't know about. */
3611 if (!report_to_gdb)
3612 {
3613 if (debug_threads)
3614 {
3615 if (bp_explains_trap)
3616 debug_printf ("Hit a gdbserver breakpoint.\n");
3617 if (step_over_finished)
3618 debug_printf ("Step-over finished.\n");
3619 if (trace_event)
3620 debug_printf ("Tracepoint event.\n");
3621 if (lwp_in_step_range (event_child))
3622 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3623 paddress (event_child->stop_pc),
3624 paddress (event_child->step_range_start),
3625 paddress (event_child->step_range_end));
3626 }
3627
3628 /* We're not reporting this breakpoint to GDB, so apply the
3629 decr_pc_after_break adjustment to the inferior's regcache
3630 ourselves. */
3631
3632 if (the_low_target.set_pc != NULL)
3633 {
3634 struct regcache *regcache
3635 = get_thread_regcache (current_thread, 1);
3636 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3637 }
3638
3639 if (step_over_finished)
3640 {
3641 /* If we have finished stepping over a breakpoint, we've
3642 stopped and suspended all LWPs momentarily except the
3643 stepping one. This is where we resume them all again.
3644 We're going to keep waiting, so use proceed, which
3645 handles stepping over the next breakpoint. */
3646 unsuspend_all_lwps (event_child);
3647 }
3648 else
3649 {
3650 /* Remove the single-step breakpoints if any. Note that
3651 there isn't single-step breakpoint if we finished stepping
3652 over. */
3653 if (can_software_single_step ()
3654 && has_single_step_breakpoints (current_thread))
3655 {
3656 stop_all_lwps (0, event_child);
3657 delete_single_step_breakpoints (current_thread);
3658 unstop_all_lwps (0, event_child);
3659 }
3660 }
3661
3662 if (debug_threads)
3663 debug_printf ("proceeding all threads.\n");
3664 proceed_all_lwps ();
3665
3666 if (debug_threads)
3667 debug_exit ();
3668
3669 return ignore_event (ourstatus);
3670 }
3671
3672 if (debug_threads)
3673 {
3674 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3675 {
3676 std::string str
3677 = target_waitstatus_to_string (&event_child->waitstatus);
3678
3679 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3680 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3681 }
3682 if (current_thread->last_resume_kind == resume_step)
3683 {
3684 if (event_child->step_range_start == event_child->step_range_end)
3685 debug_printf ("GDB wanted to single-step, reporting event.\n");
3686 else if (!lwp_in_step_range (event_child))
3687 debug_printf ("Out of step range, reporting event.\n");
3688 }
3689 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3690 debug_printf ("Stopped by watchpoint.\n");
3691 else if (gdb_breakpoint_here (event_child->stop_pc))
3692 debug_printf ("Stopped by GDB breakpoint.\n");
3693 if (debug_threads)
3694 debug_printf ("Hit a non-gdbserver trap event.\n");
3695 }
3696
3697 /* Alright, we're going to report a stop. */
3698
3699 /* Remove single-step breakpoints. */
3700 if (can_software_single_step ())
3701 {
3702 /* Remove single-step breakpoints or not. It it is true, stop all
3703 lwps, so that other threads won't hit the breakpoint in the
3704 staled memory. */
3705 int remove_single_step_breakpoints_p = 0;
3706
3707 if (non_stop)
3708 {
3709 remove_single_step_breakpoints_p
3710 = has_single_step_breakpoints (current_thread);
3711 }
3712 else
3713 {
3714 /* In all-stop, a stop reply cancels all previous resume
3715 requests. Delete all single-step breakpoints. */
3716
3717 find_thread ([&] (thread_info *thread) {
3718 if (has_single_step_breakpoints (thread))
3719 {
3720 remove_single_step_breakpoints_p = 1;
3721 return true;
3722 }
3723
3724 return false;
3725 });
3726 }
3727
3728 if (remove_single_step_breakpoints_p)
3729 {
3730 /* If we remove single-step breakpoints from memory, stop all lwps,
3731 so that other threads won't hit the breakpoint in the staled
3732 memory. */
3733 stop_all_lwps (0, event_child);
3734
3735 if (non_stop)
3736 {
3737 gdb_assert (has_single_step_breakpoints (current_thread));
3738 delete_single_step_breakpoints (current_thread);
3739 }
3740 else
3741 {
3742 for_each_thread ([] (thread_info *thread){
3743 if (has_single_step_breakpoints (thread))
3744 delete_single_step_breakpoints (thread);
3745 });
3746 }
3747
3748 unstop_all_lwps (0, event_child);
3749 }
3750 }
3751
3752 if (!stabilizing_threads)
3753 {
3754 /* In all-stop, stop all threads. */
3755 if (!non_stop)
3756 stop_all_lwps (0, NULL);
3757
3758 if (step_over_finished)
3759 {
3760 if (!non_stop)
3761 {
3762 /* If we were doing a step-over, all other threads but
3763 the stepping one had been paused in start_step_over,
3764 with their suspend counts incremented. We don't want
3765 to do a full unstop/unpause, because we're in
3766 all-stop mode (so we want threads stopped), but we
3767 still need to unsuspend the other threads, to
3768 decrement their `suspended' count back. */
3769 unsuspend_all_lwps (event_child);
3770 }
3771 else
3772 {
3773 /* If we just finished a step-over, then all threads had
3774 been momentarily paused. In all-stop, that's fine,
3775 we want threads stopped by now anyway. In non-stop,
3776 we need to re-resume threads that GDB wanted to be
3777 running. */
3778 unstop_all_lwps (1, event_child);
3779 }
3780 }
3781
3782 /* If we're not waiting for a specific LWP, choose an event LWP
3783 from among those that have had events. Giving equal priority
3784 to all LWPs that have had events helps prevent
3785 starvation. */
3786 if (ptid_equal (ptid, minus_one_ptid))
3787 {
3788 event_child->status_pending_p = 1;
3789 event_child->status_pending = w;
3790
3791 select_event_lwp (&event_child);
3792
3793 /* current_thread and event_child must stay in sync. */
3794 current_thread = get_lwp_thread (event_child);
3795
3796 event_child->status_pending_p = 0;
3797 w = event_child->status_pending;
3798 }
3799
3800
3801 /* Stabilize threads (move out of jump pads). */
3802 if (!non_stop)
3803 stabilize_threads ();
3804 }
3805 else
3806 {
3807 /* If we just finished a step-over, then all threads had been
3808 momentarily paused. In all-stop, that's fine, we want
3809 threads stopped by now anyway. In non-stop, we need to
3810 re-resume threads that GDB wanted to be running. */
3811 if (step_over_finished)
3812 unstop_all_lwps (1, event_child);
3813 }
3814
3815 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3816 {
3817 /* If the reported event is an exit, fork, vfork or exec, let
3818 GDB know. */
3819
3820 /* Break the unreported fork relationship chain. */
3821 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3822 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3823 {
3824 event_child->fork_relative->fork_relative = NULL;
3825 event_child->fork_relative = NULL;
3826 }
3827
3828 *ourstatus = event_child->waitstatus;
3829 /* Clear the event lwp's waitstatus since we handled it already. */
3830 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3831 }
3832 else
3833 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3834
3835 /* Now that we've selected our final event LWP, un-adjust its PC if
3836 it was a software breakpoint, and the client doesn't know we can
3837 adjust the breakpoint ourselves. */
3838 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3839 && !swbreak_feature)
3840 {
3841 int decr_pc = the_low_target.decr_pc_after_break;
3842
3843 if (decr_pc != 0)
3844 {
3845 struct regcache *regcache
3846 = get_thread_regcache (current_thread, 1);
3847 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3848 }
3849 }
3850
3851 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3852 {
3853 get_syscall_trapinfo (event_child,
3854 &ourstatus->value.syscall_number);
3855 ourstatus->kind = event_child->syscall_state;
3856 }
3857 else if (current_thread->last_resume_kind == resume_stop
3858 && WSTOPSIG (w) == SIGSTOP)
3859 {
3860 /* A thread that has been requested to stop by GDB with vCont;t,
3861 and it stopped cleanly, so report as SIG0. The use of
3862 SIGSTOP is an implementation detail. */
3863 ourstatus->value.sig = GDB_SIGNAL_0;
3864 }
3865 else if (current_thread->last_resume_kind == resume_stop
3866 && WSTOPSIG (w) != SIGSTOP)
3867 {
3868 /* A thread that has been requested to stop by GDB with vCont;t,
3869 but, it stopped for other reasons. */
3870 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3871 }
3872 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3873 {
3874 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3875 }
3876
3877 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3878
3879 if (debug_threads)
3880 {
3881 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3882 target_pid_to_str (ptid_of (current_thread)),
3883 ourstatus->kind, ourstatus->value.sig);
3884 debug_exit ();
3885 }
3886
3887 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3888 return filter_exit_event (event_child, ourstatus);
3889
3890 return ptid_of (current_thread);
3891 }
3892
3893 /* Get rid of any pending event in the pipe. */
3894 static void
3895 async_file_flush (void)
3896 {
3897 int ret;
3898 char buf;
3899
3900 do
3901 ret = read (linux_event_pipe[0], &buf, 1);
3902 while (ret >= 0 || (ret == -1 && errno == EINTR));
3903 }
3904
3905 /* Put something in the pipe, so the event loop wakes up. */
3906 static void
3907 async_file_mark (void)
3908 {
3909 int ret;
3910
3911 async_file_flush ();
3912
3913 do
3914 ret = write (linux_event_pipe[1], "+", 1);
3915 while (ret == 0 || (ret == -1 && errno == EINTR));
3916
3917 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3918 be awakened anyway. */
3919 }
3920
3921 static ptid_t
3922 linux_wait (ptid_t ptid,
3923 struct target_waitstatus *ourstatus, int target_options)
3924 {
3925 ptid_t event_ptid;
3926
3927 /* Flush the async file first. */
3928 if (target_is_async_p ())
3929 async_file_flush ();
3930
3931 do
3932 {
3933 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3934 }
3935 while ((target_options & TARGET_WNOHANG) == 0
3936 && ptid_equal (event_ptid, null_ptid)
3937 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3938
3939 /* If at least one stop was reported, there may be more. A single
3940 SIGCHLD can signal more than one child stop. */
3941 if (target_is_async_p ()
3942 && (target_options & TARGET_WNOHANG) != 0
3943 && !ptid_equal (event_ptid, null_ptid))
3944 async_file_mark ();
3945
3946 return event_ptid;
3947 }
3948
3949 /* Send a signal to an LWP. */
3950
3951 static int
3952 kill_lwp (unsigned long lwpid, int signo)
3953 {
3954 int ret;
3955
3956 errno = 0;
3957 ret = syscall (__NR_tkill, lwpid, signo);
3958 if (errno == ENOSYS)
3959 {
3960 /* If tkill fails, then we are not using nptl threads, a
3961 configuration we no longer support. */
3962 perror_with_name (("tkill"));
3963 }
3964 return ret;
3965 }
3966
3967 void
3968 linux_stop_lwp (struct lwp_info *lwp)
3969 {
3970 send_sigstop (lwp);
3971 }
3972
3973 static void
3974 send_sigstop (struct lwp_info *lwp)
3975 {
3976 int pid;
3977
3978 pid = lwpid_of (get_lwp_thread (lwp));
3979
3980 /* If we already have a pending stop signal for this process, don't
3981 send another. */
3982 if (lwp->stop_expected)
3983 {
3984 if (debug_threads)
3985 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3986
3987 return;
3988 }
3989
3990 if (debug_threads)
3991 debug_printf ("Sending sigstop to lwp %d\n", pid);
3992
3993 lwp->stop_expected = 1;
3994 kill_lwp (pid, SIGSTOP);
3995 }
3996
3997 static int
3998 send_sigstop_callback (thread_info *thread, void *except)
3999 {
4000 struct lwp_info *lwp = get_thread_lwp (thread);
4001
4002 /* Ignore EXCEPT. */
4003 if (lwp == except)
4004 return 0;
4005
4006 if (lwp->stopped)
4007 return 0;
4008
4009 send_sigstop (lwp);
4010 return 0;
4011 }
4012
4013 /* Increment the suspend count of an LWP, and stop it, if not stopped
4014 yet. */
4015 static int
4016 suspend_and_send_sigstop_callback (thread_info *thread, void *except)
4017 {
4018 struct lwp_info *lwp = get_thread_lwp (thread);
4019
4020 /* Ignore EXCEPT. */
4021 if (lwp == except)
4022 return 0;
4023
4024 lwp_suspended_inc (lwp);
4025
4026 return send_sigstop_callback (thread, except);
4027 }
4028
4029 static void
4030 mark_lwp_dead (struct lwp_info *lwp, int wstat)
4031 {
4032 /* Store the exit status for later. */
4033 lwp->status_pending_p = 1;
4034 lwp->status_pending = wstat;
4035
4036 /* Store in waitstatus as well, as there's nothing else to process
4037 for this event. */
4038 if (WIFEXITED (wstat))
4039 {
4040 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
4041 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
4042 }
4043 else if (WIFSIGNALED (wstat))
4044 {
4045 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
4046 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
4047 }
4048
4049 /* Prevent trying to stop it. */
4050 lwp->stopped = 1;
4051
4052 /* No further stops are expected from a dead lwp. */
4053 lwp->stop_expected = 0;
4054 }
4055
4056 /* Return true if LWP has exited already, and has a pending exit event
4057 to report to GDB. */
4058
4059 static int
4060 lwp_is_marked_dead (struct lwp_info *lwp)
4061 {
4062 return (lwp->status_pending_p
4063 && (WIFEXITED (lwp->status_pending)
4064 || WIFSIGNALED (lwp->status_pending)));
4065 }
4066
4067 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4068
4069 static void
4070 wait_for_sigstop (void)
4071 {
4072 struct thread_info *saved_thread;
4073 ptid_t saved_tid;
4074 int wstat;
4075 int ret;
4076
4077 saved_thread = current_thread;
4078 if (saved_thread != NULL)
4079 saved_tid = saved_thread->id;
4080 else
4081 saved_tid = null_ptid; /* avoid bogus unused warning */
4082
4083 if (debug_threads)
4084 debug_printf ("wait_for_sigstop: pulling events\n");
4085
4086 /* Passing NULL_PTID as filter indicates we want all events to be
4087 left pending. Eventually this returns when there are no
4088 unwaited-for children left. */
4089 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4090 &wstat, __WALL);
4091 gdb_assert (ret == -1);
4092
4093 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4094 current_thread = saved_thread;
4095 else
4096 {
4097 if (debug_threads)
4098 debug_printf ("Previously current thread died.\n");
4099
4100 /* We can't change the current inferior behind GDB's back,
4101 otherwise, a subsequent command may apply to the wrong
4102 process. */
4103 current_thread = NULL;
4104 }
4105 }
4106
4107 /* Returns true if THREAD is stopped in a jump pad, and we can't
4108 move it out, because we need to report the stop event to GDB. For
4109 example, if the user puts a breakpoint in the jump pad, it's
4110 because she wants to debug it. */
4111
4112 static bool
4113 stuck_in_jump_pad_callback (thread_info *thread)
4114 {
4115 struct lwp_info *lwp = get_thread_lwp (thread);
4116
4117 if (lwp->suspended != 0)
4118 {
4119 internal_error (__FILE__, __LINE__,
4120 "LWP %ld is suspended, suspended=%d\n",
4121 lwpid_of (thread), lwp->suspended);
4122 }
4123 gdb_assert (lwp->stopped);
4124
4125 /* Allow debugging the jump pad, gdb_collect, etc.. */
4126 return (supports_fast_tracepoints ()
4127 && agent_loaded_p ()
4128 && (gdb_breakpoint_here (lwp->stop_pc)
4129 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4130 || thread->last_resume_kind == resume_step)
4131 && (linux_fast_tracepoint_collecting (lwp, NULL)
4132 != fast_tpoint_collect_result::not_collecting));
4133 }
4134
4135 static void
4136 move_out_of_jump_pad_callback (thread_info *thread)
4137 {
4138 struct thread_info *saved_thread;
4139 struct lwp_info *lwp = get_thread_lwp (thread);
4140 int *wstat;
4141
4142 if (lwp->suspended != 0)
4143 {
4144 internal_error (__FILE__, __LINE__,
4145 "LWP %ld is suspended, suspended=%d\n",
4146 lwpid_of (thread), lwp->suspended);
4147 }
4148 gdb_assert (lwp->stopped);
4149
4150 /* For gdb_breakpoint_here. */
4151 saved_thread = current_thread;
4152 current_thread = thread;
4153
4154 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4155
4156 /* Allow debugging the jump pad, gdb_collect, etc. */
4157 if (!gdb_breakpoint_here (lwp->stop_pc)
4158 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4159 && thread->last_resume_kind != resume_step
4160 && maybe_move_out_of_jump_pad (lwp, wstat))
4161 {
4162 if (debug_threads)
4163 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4164 lwpid_of (thread));
4165
4166 if (wstat)
4167 {
4168 lwp->status_pending_p = 0;
4169 enqueue_one_deferred_signal (lwp, wstat);
4170
4171 if (debug_threads)
4172 debug_printf ("Signal %d for LWP %ld deferred "
4173 "(in jump pad)\n",
4174 WSTOPSIG (*wstat), lwpid_of (thread));
4175 }
4176
4177 linux_resume_one_lwp (lwp, 0, 0, NULL);
4178 }
4179 else
4180 lwp_suspended_inc (lwp);
4181
4182 current_thread = saved_thread;
4183 }
4184
4185 static int
4186 lwp_running (thread_info *thread, void *data)
4187 {
4188 struct lwp_info *lwp = get_thread_lwp (thread);
4189
4190 if (lwp_is_marked_dead (lwp))
4191 return 0;
4192 if (lwp->stopped)
4193 return 0;
4194 return 1;
4195 }
4196
4197 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4198 If SUSPEND, then also increase the suspend count of every LWP,
4199 except EXCEPT. */
4200
4201 static void
4202 stop_all_lwps (int suspend, struct lwp_info *except)
4203 {
4204 /* Should not be called recursively. */
4205 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4206
4207 if (debug_threads)
4208 {
4209 debug_enter ();
4210 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4211 suspend ? "stop-and-suspend" : "stop",
4212 except != NULL
4213 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4214 : "none");
4215 }
4216
4217 stopping_threads = (suspend
4218 ? STOPPING_AND_SUSPENDING_THREADS
4219 : STOPPING_THREADS);
4220
4221 if (suspend)
4222 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4223 else
4224 find_inferior (&all_threads, send_sigstop_callback, except);
4225 wait_for_sigstop ();
4226 stopping_threads = NOT_STOPPING_THREADS;
4227
4228 if (debug_threads)
4229 {
4230 debug_printf ("stop_all_lwps done, setting stopping_threads "
4231 "back to !stopping\n");
4232 debug_exit ();
4233 }
4234 }
4235
4236 /* Enqueue one signal in the chain of signals which need to be
4237 delivered to this process on next resume. */
4238
4239 static void
4240 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4241 {
4242 struct pending_signals *p_sig = XNEW (struct pending_signals);
4243
4244 p_sig->prev = lwp->pending_signals;
4245 p_sig->signal = signal;
4246 if (info == NULL)
4247 memset (&p_sig->info, 0, sizeof (siginfo_t));
4248 else
4249 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4250 lwp->pending_signals = p_sig;
4251 }
4252
4253 /* Install breakpoints for software single stepping. */
4254
4255 static void
4256 install_software_single_step_breakpoints (struct lwp_info *lwp)
4257 {
4258 struct thread_info *thread = get_lwp_thread (lwp);
4259 struct regcache *regcache = get_thread_regcache (thread, 1);
4260 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
4261
4262 current_thread = thread;
4263 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4264
4265 for (CORE_ADDR pc : next_pcs)
4266 set_single_step_breakpoint (pc, current_ptid);
4267
4268 do_cleanups (old_chain);
4269 }
4270
4271 /* Single step via hardware or software single step.
4272 Return 1 if hardware single stepping, 0 if software single stepping
4273 or can't single step. */
4274
4275 static int
4276 single_step (struct lwp_info* lwp)
4277 {
4278 int step = 0;
4279
4280 if (can_hardware_single_step ())
4281 {
4282 step = 1;
4283 }
4284 else if (can_software_single_step ())
4285 {
4286 install_software_single_step_breakpoints (lwp);
4287 step = 0;
4288 }
4289 else
4290 {
4291 if (debug_threads)
4292 debug_printf ("stepping is not implemented on this target");
4293 }
4294
4295 return step;
4296 }
4297
4298 /* The signal can be delivered to the inferior if we are not trying to
4299 finish a fast tracepoint collect. Since signal can be delivered in
4300 the step-over, the program may go to signal handler and trap again
4301 after return from the signal handler. We can live with the spurious
4302 double traps. */
4303
4304 static int
4305 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4306 {
4307 return (lwp->collecting_fast_tracepoint
4308 == fast_tpoint_collect_result::not_collecting);
4309 }
4310
4311 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4312 SIGNAL is nonzero, give it that signal. */
4313
4314 static void
4315 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4316 int step, int signal, siginfo_t *info)
4317 {
4318 struct thread_info *thread = get_lwp_thread (lwp);
4319 struct thread_info *saved_thread;
4320 int ptrace_request;
4321 struct process_info *proc = get_thread_process (thread);
4322
4323 /* Note that target description may not be initialised
4324 (proc->tdesc == NULL) at this point because the program hasn't
4325 stopped at the first instruction yet. It means GDBserver skips
4326 the extra traps from the wrapper program (see option --wrapper).
4327 Code in this function that requires register access should be
4328 guarded by proc->tdesc == NULL or something else. */
4329
4330 if (lwp->stopped == 0)
4331 return;
4332
4333 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4334
4335 fast_tpoint_collect_result fast_tp_collecting
4336 = lwp->collecting_fast_tracepoint;
4337
4338 gdb_assert (!stabilizing_threads
4339 || (fast_tp_collecting
4340 != fast_tpoint_collect_result::not_collecting));
4341
4342 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4343 user used the "jump" command, or "set $pc = foo"). */
4344 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4345 {
4346 /* Collecting 'while-stepping' actions doesn't make sense
4347 anymore. */
4348 release_while_stepping_state_list (thread);
4349 }
4350
4351 /* If we have pending signals or status, and a new signal, enqueue the
4352 signal. Also enqueue the signal if it can't be delivered to the
4353 inferior right now. */
4354 if (signal != 0
4355 && (lwp->status_pending_p
4356 || lwp->pending_signals != NULL
4357 || !lwp_signal_can_be_delivered (lwp)))
4358 {
4359 enqueue_pending_signal (lwp, signal, info);
4360
4361 /* Postpone any pending signal. It was enqueued above. */
4362 signal = 0;
4363 }
4364
4365 if (lwp->status_pending_p)
4366 {
4367 if (debug_threads)
4368 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4369 " has pending status\n",
4370 lwpid_of (thread), step ? "step" : "continue",
4371 lwp->stop_expected ? "expected" : "not expected");
4372 return;
4373 }
4374
4375 saved_thread = current_thread;
4376 current_thread = thread;
4377
4378 /* This bit needs some thinking about. If we get a signal that
4379 we must report while a single-step reinsert is still pending,
4380 we often end up resuming the thread. It might be better to
4381 (ew) allow a stack of pending events; then we could be sure that
4382 the reinsert happened right away and not lose any signals.
4383
4384 Making this stack would also shrink the window in which breakpoints are
4385 uninserted (see comment in linux_wait_for_lwp) but not enough for
4386 complete correctness, so it won't solve that problem. It may be
4387 worthwhile just to solve this one, however. */
4388 if (lwp->bp_reinsert != 0)
4389 {
4390 if (debug_threads)
4391 debug_printf (" pending reinsert at 0x%s\n",
4392 paddress (lwp->bp_reinsert));
4393
4394 if (can_hardware_single_step ())
4395 {
4396 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4397 {
4398 if (step == 0)
4399 warning ("BAD - reinserting but not stepping.");
4400 if (lwp->suspended)
4401 warning ("BAD - reinserting and suspended(%d).",
4402 lwp->suspended);
4403 }
4404 }
4405
4406 step = maybe_hw_step (thread);
4407 }
4408
4409 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4410 {
4411 if (debug_threads)
4412 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4413 " (exit-jump-pad-bkpt)\n",
4414 lwpid_of (thread));
4415 }
4416 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4417 {
4418 if (debug_threads)
4419 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4420 " single-stepping\n",
4421 lwpid_of (thread));
4422
4423 if (can_hardware_single_step ())
4424 step = 1;
4425 else
4426 {
4427 internal_error (__FILE__, __LINE__,
4428 "moving out of jump pad single-stepping"
4429 " not implemented on this target");
4430 }
4431 }
4432
4433 /* If we have while-stepping actions in this thread set it stepping.
4434 If we have a signal to deliver, it may or may not be set to
4435 SIG_IGN, we don't know. Assume so, and allow collecting
4436 while-stepping into a signal handler. A possible smart thing to
4437 do would be to set an internal breakpoint at the signal return
4438 address, continue, and carry on catching this while-stepping
4439 action only when that breakpoint is hit. A future
4440 enhancement. */
4441 if (thread->while_stepping != NULL)
4442 {
4443 if (debug_threads)
4444 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4445 lwpid_of (thread));
4446
4447 step = single_step (lwp);
4448 }
4449
4450 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4451 {
4452 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4453
4454 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4455
4456 if (debug_threads)
4457 {
4458 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4459 (long) lwp->stop_pc);
4460 }
4461 }
4462
4463 /* If we have pending signals, consume one if it can be delivered to
4464 the inferior. */
4465 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4466 {
4467 struct pending_signals **p_sig;
4468
4469 p_sig = &lwp->pending_signals;
4470 while ((*p_sig)->prev != NULL)
4471 p_sig = &(*p_sig)->prev;
4472
4473 signal = (*p_sig)->signal;
4474 if ((*p_sig)->info.si_signo != 0)
4475 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4476 &(*p_sig)->info);
4477
4478 free (*p_sig);
4479 *p_sig = NULL;
4480 }
4481
4482 if (debug_threads)
4483 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4484 lwpid_of (thread), step ? "step" : "continue", signal,
4485 lwp->stop_expected ? "expected" : "not expected");
4486
4487 if (the_low_target.prepare_to_resume != NULL)
4488 the_low_target.prepare_to_resume (lwp);
4489
4490 regcache_invalidate_thread (thread);
4491 errno = 0;
4492 lwp->stepping = step;
4493 if (step)
4494 ptrace_request = PTRACE_SINGLESTEP;
4495 else if (gdb_catching_syscalls_p (lwp))
4496 ptrace_request = PTRACE_SYSCALL;
4497 else
4498 ptrace_request = PTRACE_CONT;
4499 ptrace (ptrace_request,
4500 lwpid_of (thread),
4501 (PTRACE_TYPE_ARG3) 0,
4502 /* Coerce to a uintptr_t first to avoid potential gcc warning
4503 of coercing an 8 byte integer to a 4 byte pointer. */
4504 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4505
4506 current_thread = saved_thread;
4507 if (errno)
4508 perror_with_name ("resuming thread");
4509
4510 /* Successfully resumed. Clear state that no longer makes sense,
4511 and mark the LWP as running. Must not do this before resuming
4512 otherwise if that fails other code will be confused. E.g., we'd
4513 later try to stop the LWP and hang forever waiting for a stop
4514 status. Note that we must not throw after this is cleared,
4515 otherwise handle_zombie_lwp_error would get confused. */
4516 lwp->stopped = 0;
4517 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4518 }
4519
4520 /* Called when we try to resume a stopped LWP and that errors out. If
4521 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4522 or about to become), discard the error, clear any pending status
4523 the LWP may have, and return true (we'll collect the exit status
4524 soon enough). Otherwise, return false. */
4525
4526 static int
4527 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4528 {
4529 struct thread_info *thread = get_lwp_thread (lp);
4530
4531 /* If we get an error after resuming the LWP successfully, we'd
4532 confuse !T state for the LWP being gone. */
4533 gdb_assert (lp->stopped);
4534
4535 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4536 because even if ptrace failed with ESRCH, the tracee may be "not
4537 yet fully dead", but already refusing ptrace requests. In that
4538 case the tracee has 'R (Running)' state for a little bit
4539 (observed in Linux 3.18). See also the note on ESRCH in the
4540 ptrace(2) man page. Instead, check whether the LWP has any state
4541 other than ptrace-stopped. */
4542
4543 /* Don't assume anything if /proc/PID/status can't be read. */
4544 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4545 {
4546 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4547 lp->status_pending_p = 0;
4548 return 1;
4549 }
4550 return 0;
4551 }
4552
4553 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4554 disappears while we try to resume it. */
4555
4556 static void
4557 linux_resume_one_lwp (struct lwp_info *lwp,
4558 int step, int signal, siginfo_t *info)
4559 {
4560 TRY
4561 {
4562 linux_resume_one_lwp_throw (lwp, step, signal, info);
4563 }
4564 CATCH (ex, RETURN_MASK_ERROR)
4565 {
4566 if (!check_ptrace_stopped_lwp_gone (lwp))
4567 throw_exception (ex);
4568 }
4569 END_CATCH
4570 }
4571
4572 /* This function is called once per thread via for_each_thread.
4573 We look up which resume request applies to THREAD and mark it with a
4574 pointer to the appropriate resume request.
4575
4576 This algorithm is O(threads * resume elements), but resume elements
4577 is small (and will remain small at least until GDB supports thread
4578 suspension). */
4579
4580 static void
4581 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4582 {
4583 struct lwp_info *lwp = get_thread_lwp (thread);
4584
4585 for (int ndx = 0; ndx < n; ndx++)
4586 {
4587 ptid_t ptid = resume[ndx].thread;
4588 if (ptid_equal (ptid, minus_one_ptid)
4589 || ptid == thread->id
4590 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4591 of PID'. */
4592 || (ptid_get_pid (ptid) == pid_of (thread)
4593 && (ptid_is_pid (ptid)
4594 || ptid_get_lwp (ptid) == -1)))
4595 {
4596 if (resume[ndx].kind == resume_stop
4597 && thread->last_resume_kind == resume_stop)
4598 {
4599 if (debug_threads)
4600 debug_printf ("already %s LWP %ld at GDB's request\n",
4601 (thread->last_status.kind
4602 == TARGET_WAITKIND_STOPPED)
4603 ? "stopped"
4604 : "stopping",
4605 lwpid_of (thread));
4606
4607 continue;
4608 }
4609
4610 /* Ignore (wildcard) resume requests for already-resumed
4611 threads. */
4612 if (resume[ndx].kind != resume_stop
4613 && thread->last_resume_kind != resume_stop)
4614 {
4615 if (debug_threads)
4616 debug_printf ("already %s LWP %ld at GDB's request\n",
4617 (thread->last_resume_kind
4618 == resume_step)
4619 ? "stepping"
4620 : "continuing",
4621 lwpid_of (thread));
4622 continue;
4623 }
4624
4625 /* Don't let wildcard resumes resume fork children that GDB
4626 does not yet know are new fork children. */
4627 if (lwp->fork_relative != NULL)
4628 {
4629 struct lwp_info *rel = lwp->fork_relative;
4630
4631 if (rel->status_pending_p
4632 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4633 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4634 {
4635 if (debug_threads)
4636 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4637 lwpid_of (thread));
4638 continue;
4639 }
4640 }
4641
4642 /* If the thread has a pending event that has already been
4643 reported to GDBserver core, but GDB has not pulled the
4644 event out of the vStopped queue yet, likewise, ignore the
4645 (wildcard) resume request. */
4646 if (in_queued_stop_replies (thread->id))
4647 {
4648 if (debug_threads)
4649 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4650 lwpid_of (thread));
4651 continue;
4652 }
4653
4654 lwp->resume = &resume[ndx];
4655 thread->last_resume_kind = lwp->resume->kind;
4656
4657 lwp->step_range_start = lwp->resume->step_range_start;
4658 lwp->step_range_end = lwp->resume->step_range_end;
4659
4660 /* If we had a deferred signal to report, dequeue one now.
4661 This can happen if LWP gets more than one signal while
4662 trying to get out of a jump pad. */
4663 if (lwp->stopped
4664 && !lwp->status_pending_p
4665 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4666 {
4667 lwp->status_pending_p = 1;
4668
4669 if (debug_threads)
4670 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4671 "leaving status pending.\n",
4672 WSTOPSIG (lwp->status_pending),
4673 lwpid_of (thread));
4674 }
4675
4676 return;
4677 }
4678 }
4679
4680 /* No resume action for this thread. */
4681 lwp->resume = NULL;
4682 }
4683
4684 /* find_inferior callback for linux_resume.
4685 Set *FLAG_P if this lwp has an interesting status pending. */
4686
4687 static int
4688 resume_status_pending_p (thread_info *thread, void *flag_p)
4689 {
4690 struct lwp_info *lwp = get_thread_lwp (thread);
4691
4692 /* LWPs which will not be resumed are not interesting, because
4693 we might not wait for them next time through linux_wait. */
4694 if (lwp->resume == NULL)
4695 return 0;
4696
4697 if (thread_still_has_status_pending_p (thread))
4698 * (int *) flag_p = 1;
4699
4700 return 0;
4701 }
4702
4703 /* Return 1 if this lwp that GDB wants running is stopped at an
4704 internal breakpoint that we need to step over. It assumes that any
4705 required STOP_PC adjustment has already been propagated to the
4706 inferior's regcache. */
4707
4708 static int
4709 need_step_over_p (thread_info *thread, void *dummy)
4710 {
4711 struct lwp_info *lwp = get_thread_lwp (thread);
4712 struct thread_info *saved_thread;
4713 CORE_ADDR pc;
4714 struct process_info *proc = get_thread_process (thread);
4715
4716 /* GDBserver is skipping the extra traps from the wrapper program,
4717 don't have to do step over. */
4718 if (proc->tdesc == NULL)
4719 return 0;
4720
4721 /* LWPs which will not be resumed are not interesting, because we
4722 might not wait for them next time through linux_wait. */
4723
4724 if (!lwp->stopped)
4725 {
4726 if (debug_threads)
4727 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4728 lwpid_of (thread));
4729 return 0;
4730 }
4731
4732 if (thread->last_resume_kind == resume_stop)
4733 {
4734 if (debug_threads)
4735 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4736 " stopped\n",
4737 lwpid_of (thread));
4738 return 0;
4739 }
4740
4741 gdb_assert (lwp->suspended >= 0);
4742
4743 if (lwp->suspended)
4744 {
4745 if (debug_threads)
4746 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4747 lwpid_of (thread));
4748 return 0;
4749 }
4750
4751 if (lwp->status_pending_p)
4752 {
4753 if (debug_threads)
4754 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4755 " status.\n",
4756 lwpid_of (thread));
4757 return 0;
4758 }
4759
4760 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4761 or we have. */
4762 pc = get_pc (lwp);
4763
4764 /* If the PC has changed since we stopped, then don't do anything,
4765 and let the breakpoint/tracepoint be hit. This happens if, for
4766 instance, GDB handled the decr_pc_after_break subtraction itself,
4767 GDB is OOL stepping this thread, or the user has issued a "jump"
4768 command, or poked thread's registers herself. */
4769 if (pc != lwp->stop_pc)
4770 {
4771 if (debug_threads)
4772 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4773 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4774 lwpid_of (thread),
4775 paddress (lwp->stop_pc), paddress (pc));
4776 return 0;
4777 }
4778
4779 /* On software single step target, resume the inferior with signal
4780 rather than stepping over. */
4781 if (can_software_single_step ()
4782 && lwp->pending_signals != NULL
4783 && lwp_signal_can_be_delivered (lwp))
4784 {
4785 if (debug_threads)
4786 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4787 " signals.\n",
4788 lwpid_of (thread));
4789
4790 return 0;
4791 }
4792
4793 saved_thread = current_thread;
4794 current_thread = thread;
4795
4796 /* We can only step over breakpoints we know about. */
4797 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4798 {
4799 /* Don't step over a breakpoint that GDB expects to hit
4800 though. If the condition is being evaluated on the target's side
4801 and it evaluate to false, step over this breakpoint as well. */
4802 if (gdb_breakpoint_here (pc)
4803 && gdb_condition_true_at_breakpoint (pc)
4804 && gdb_no_commands_at_breakpoint (pc))
4805 {
4806 if (debug_threads)
4807 debug_printf ("Need step over [LWP %ld]? yes, but found"
4808 " GDB breakpoint at 0x%s; skipping step over\n",
4809 lwpid_of (thread), paddress (pc));
4810
4811 current_thread = saved_thread;
4812 return 0;
4813 }
4814 else
4815 {
4816 if (debug_threads)
4817 debug_printf ("Need step over [LWP %ld]? yes, "
4818 "found breakpoint at 0x%s\n",
4819 lwpid_of (thread), paddress (pc));
4820
4821 /* We've found an lwp that needs stepping over --- return 1 so
4822 that find_inferior stops looking. */
4823 current_thread = saved_thread;
4824
4825 return 1;
4826 }
4827 }
4828
4829 current_thread = saved_thread;
4830
4831 if (debug_threads)
4832 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4833 " at 0x%s\n",
4834 lwpid_of (thread), paddress (pc));
4835
4836 return 0;
4837 }
4838
4839 /* Start a step-over operation on LWP. When LWP stopped at a
4840 breakpoint, to make progress, we need to remove the breakpoint out
4841 of the way. If we let other threads run while we do that, they may
4842 pass by the breakpoint location and miss hitting it. To avoid
4843 that, a step-over momentarily stops all threads while LWP is
4844 single-stepped by either hardware or software while the breakpoint
4845 is temporarily uninserted from the inferior. When the single-step
4846 finishes, we reinsert the breakpoint, and let all threads that are
4847 supposed to be running, run again. */
4848
4849 static int
4850 start_step_over (struct lwp_info *lwp)
4851 {
4852 struct thread_info *thread = get_lwp_thread (lwp);
4853 struct thread_info *saved_thread;
4854 CORE_ADDR pc;
4855 int step;
4856
4857 if (debug_threads)
4858 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4859 lwpid_of (thread));
4860
4861 stop_all_lwps (1, lwp);
4862
4863 if (lwp->suspended != 0)
4864 {
4865 internal_error (__FILE__, __LINE__,
4866 "LWP %ld suspended=%d\n", lwpid_of (thread),
4867 lwp->suspended);
4868 }
4869
4870 if (debug_threads)
4871 debug_printf ("Done stopping all threads for step-over.\n");
4872
4873 /* Note, we should always reach here with an already adjusted PC,
4874 either by GDB (if we're resuming due to GDB's request), or by our
4875 caller, if we just finished handling an internal breakpoint GDB
4876 shouldn't care about. */
4877 pc = get_pc (lwp);
4878
4879 saved_thread = current_thread;
4880 current_thread = thread;
4881
4882 lwp->bp_reinsert = pc;
4883 uninsert_breakpoints_at (pc);
4884 uninsert_fast_tracepoint_jumps_at (pc);
4885
4886 step = single_step (lwp);
4887
4888 current_thread = saved_thread;
4889
4890 linux_resume_one_lwp (lwp, step, 0, NULL);
4891
4892 /* Require next event from this LWP. */
4893 step_over_bkpt = thread->id;
4894 return 1;
4895 }
4896
4897 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4898 start_step_over, if still there, and delete any single-step
4899 breakpoints we've set, on non hardware single-step targets. */
4900
4901 static int
4902 finish_step_over (struct lwp_info *lwp)
4903 {
4904 if (lwp->bp_reinsert != 0)
4905 {
4906 struct thread_info *saved_thread = current_thread;
4907
4908 if (debug_threads)
4909 debug_printf ("Finished step over.\n");
4910
4911 current_thread = get_lwp_thread (lwp);
4912
4913 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4914 may be no breakpoint to reinsert there by now. */
4915 reinsert_breakpoints_at (lwp->bp_reinsert);
4916 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4917
4918 lwp->bp_reinsert = 0;
4919
4920 /* Delete any single-step breakpoints. No longer needed. We
4921 don't have to worry about other threads hitting this trap,
4922 and later not being able to explain it, because we were
4923 stepping over a breakpoint, and we hold all threads but
4924 LWP stopped while doing that. */
4925 if (!can_hardware_single_step ())
4926 {
4927 gdb_assert (has_single_step_breakpoints (current_thread));
4928 delete_single_step_breakpoints (current_thread);
4929 }
4930
4931 step_over_bkpt = null_ptid;
4932 current_thread = saved_thread;
4933 return 1;
4934 }
4935 else
4936 return 0;
4937 }
4938
4939 /* If there's a step over in progress, wait until all threads stop
4940 (that is, until the stepping thread finishes its step), and
4941 unsuspend all lwps. The stepping thread ends with its status
4942 pending, which is processed later when we get back to processing
4943 events. */
4944
4945 static void
4946 complete_ongoing_step_over (void)
4947 {
4948 if (!ptid_equal (step_over_bkpt, null_ptid))
4949 {
4950 struct lwp_info *lwp;
4951 int wstat;
4952 int ret;
4953
4954 if (debug_threads)
4955 debug_printf ("detach: step over in progress, finish it first\n");
4956
4957 /* Passing NULL_PTID as filter indicates we want all events to
4958 be left pending. Eventually this returns when there are no
4959 unwaited-for children left. */
4960 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4961 &wstat, __WALL);
4962 gdb_assert (ret == -1);
4963
4964 lwp = find_lwp_pid (step_over_bkpt);
4965 if (lwp != NULL)
4966 finish_step_over (lwp);
4967 step_over_bkpt = null_ptid;
4968 unsuspend_all_lwps (lwp);
4969 }
4970 }
4971
4972 /* This function is called once per thread. We check the thread's resume
4973 request, which will tell us whether to resume, step, or leave the thread
4974 stopped; and what signal, if any, it should be sent.
4975
4976 For threads which we aren't explicitly told otherwise, we preserve
4977 the stepping flag; this is used for stepping over gdbserver-placed
4978 breakpoints.
4979
4980 If pending_flags was set in any thread, we queue any needed
4981 signals, since we won't actually resume. We already have a pending
4982 event to report, so we don't need to preserve any step requests;
4983 they should be re-issued if necessary. */
4984
4985 static int
4986 linux_resume_one_thread (thread_info *thread, void *arg)
4987 {
4988 struct lwp_info *lwp = get_thread_lwp (thread);
4989 int leave_all_stopped = * (int *) arg;
4990 int leave_pending;
4991
4992 if (lwp->resume == NULL)
4993 return 0;
4994
4995 if (lwp->resume->kind == resume_stop)
4996 {
4997 if (debug_threads)
4998 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4999
5000 if (!lwp->stopped)
5001 {
5002 if (debug_threads)
5003 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
5004
5005 /* Stop the thread, and wait for the event asynchronously,
5006 through the event loop. */
5007 send_sigstop (lwp);
5008 }
5009 else
5010 {
5011 if (debug_threads)
5012 debug_printf ("already stopped LWP %ld\n",
5013 lwpid_of (thread));
5014
5015 /* The LWP may have been stopped in an internal event that
5016 was not meant to be notified back to GDB (e.g., gdbserver
5017 breakpoint), so we should be reporting a stop event in
5018 this case too. */
5019
5020 /* If the thread already has a pending SIGSTOP, this is a
5021 no-op. Otherwise, something later will presumably resume
5022 the thread and this will cause it to cancel any pending
5023 operation, due to last_resume_kind == resume_stop. If
5024 the thread already has a pending status to report, we
5025 will still report it the next time we wait - see
5026 status_pending_p_callback. */
5027
5028 /* If we already have a pending signal to report, then
5029 there's no need to queue a SIGSTOP, as this means we're
5030 midway through moving the LWP out of the jumppad, and we
5031 will report the pending signal as soon as that is
5032 finished. */
5033 if (lwp->pending_signals_to_report == NULL)
5034 send_sigstop (lwp);
5035 }
5036
5037 /* For stop requests, we're done. */
5038 lwp->resume = NULL;
5039 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5040 return 0;
5041 }
5042
5043 /* If this thread which is about to be resumed has a pending status,
5044 then don't resume it - we can just report the pending status.
5045 Likewise if it is suspended, because e.g., another thread is
5046 stepping past a breakpoint. Make sure to queue any signals that
5047 would otherwise be sent. In all-stop mode, we do this decision
5048 based on if *any* thread has a pending status. If there's a
5049 thread that needs the step-over-breakpoint dance, then don't
5050 resume any other thread but that particular one. */
5051 leave_pending = (lwp->suspended
5052 || lwp->status_pending_p
5053 || leave_all_stopped);
5054
5055 /* If we have a new signal, enqueue the signal. */
5056 if (lwp->resume->sig != 0)
5057 {
5058 siginfo_t info, *info_p;
5059
5060 /* If this is the same signal we were previously stopped by,
5061 make sure to queue its siginfo. */
5062 if (WIFSTOPPED (lwp->last_status)
5063 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5064 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5065 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5066 info_p = &info;
5067 else
5068 info_p = NULL;
5069
5070 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5071 }
5072
5073 if (!leave_pending)
5074 {
5075 if (debug_threads)
5076 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5077
5078 proceed_one_lwp (thread, NULL);
5079 }
5080 else
5081 {
5082 if (debug_threads)
5083 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5084 }
5085
5086 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5087 lwp->resume = NULL;
5088 return 0;
5089 }
5090
5091 static void
5092 linux_resume (struct thread_resume *resume_info, size_t n)
5093 {
5094 struct thread_info *need_step_over = NULL;
5095 int any_pending;
5096 int leave_all_stopped;
5097
5098 if (debug_threads)
5099 {
5100 debug_enter ();
5101 debug_printf ("linux_resume:\n");
5102 }
5103
5104 for_each_thread ([&] (thread_info *thread)
5105 {
5106 linux_set_resume_request (thread, resume_info, n);
5107 });
5108
5109 /* If there is a thread which would otherwise be resumed, which has
5110 a pending status, then don't resume any threads - we can just
5111 report the pending status. Make sure to queue any signals that
5112 would otherwise be sent. In non-stop mode, we'll apply this
5113 logic to each thread individually. We consume all pending events
5114 before considering to start a step-over (in all-stop). */
5115 any_pending = 0;
5116 if (!non_stop)
5117 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
5118
5119 /* If there is a thread which would otherwise be resumed, which is
5120 stopped at a breakpoint that needs stepping over, then don't
5121 resume any threads - have it step over the breakpoint with all
5122 other threads stopped, then resume all threads again. Make sure
5123 to queue any signals that would otherwise be delivered or
5124 queued. */
5125 if (!any_pending && supports_breakpoints ())
5126 need_step_over
5127 = (struct thread_info *) find_inferior (&all_threads,
5128 need_step_over_p, NULL);
5129
5130 leave_all_stopped = (need_step_over != NULL || any_pending);
5131
5132 if (debug_threads)
5133 {
5134 if (need_step_over != NULL)
5135 debug_printf ("Not resuming all, need step over\n");
5136 else if (any_pending)
5137 debug_printf ("Not resuming, all-stop and found "
5138 "an LWP with pending status\n");
5139 else
5140 debug_printf ("Resuming, no pending status or step over needed\n");
5141 }
5142
5143 /* Even if we're leaving threads stopped, queue all signals we'd
5144 otherwise deliver. */
5145 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
5146
5147 if (need_step_over)
5148 start_step_over (get_thread_lwp (need_step_over));
5149
5150 if (debug_threads)
5151 {
5152 debug_printf ("linux_resume done\n");
5153 debug_exit ();
5154 }
5155
5156 /* We may have events that were pending that can/should be sent to
5157 the client now. Trigger a linux_wait call. */
5158 if (target_is_async_p ())
5159 async_file_mark ();
5160 }
5161
5162 /* This function is called once per thread. We check the thread's
5163 last resume request, which will tell us whether to resume, step, or
5164 leave the thread stopped. Any signal the client requested to be
5165 delivered has already been enqueued at this point.
5166
5167 If any thread that GDB wants running is stopped at an internal
5168 breakpoint that needs stepping over, we start a step-over operation
5169 on that particular thread, and leave all others stopped. */
5170
5171 static int
5172 proceed_one_lwp (thread_info *thread, void *except)
5173 {
5174 struct lwp_info *lwp = get_thread_lwp (thread);
5175 int step;
5176
5177 if (lwp == except)
5178 return 0;
5179
5180 if (debug_threads)
5181 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5182
5183 if (!lwp->stopped)
5184 {
5185 if (debug_threads)
5186 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5187 return 0;
5188 }
5189
5190 if (thread->last_resume_kind == resume_stop
5191 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5192 {
5193 if (debug_threads)
5194 debug_printf (" client wants LWP to remain %ld stopped\n",
5195 lwpid_of (thread));
5196 return 0;
5197 }
5198
5199 if (lwp->status_pending_p)
5200 {
5201 if (debug_threads)
5202 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5203 lwpid_of (thread));
5204 return 0;
5205 }
5206
5207 gdb_assert (lwp->suspended >= 0);
5208
5209 if (lwp->suspended)
5210 {
5211 if (debug_threads)
5212 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5213 return 0;
5214 }
5215
5216 if (thread->last_resume_kind == resume_stop
5217 && lwp->pending_signals_to_report == NULL
5218 && (lwp->collecting_fast_tracepoint
5219 == fast_tpoint_collect_result::not_collecting))
5220 {
5221 /* We haven't reported this LWP as stopped yet (otherwise, the
5222 last_status.kind check above would catch it, and we wouldn't
5223 reach here. This LWP may have been momentarily paused by a
5224 stop_all_lwps call while handling for example, another LWP's
5225 step-over. In that case, the pending expected SIGSTOP signal
5226 that was queued at vCont;t handling time will have already
5227 been consumed by wait_for_sigstop, and so we need to requeue
5228 another one here. Note that if the LWP already has a SIGSTOP
5229 pending, this is a no-op. */
5230
5231 if (debug_threads)
5232 debug_printf ("Client wants LWP %ld to stop. "
5233 "Making sure it has a SIGSTOP pending\n",
5234 lwpid_of (thread));
5235
5236 send_sigstop (lwp);
5237 }
5238
5239 if (thread->last_resume_kind == resume_step)
5240 {
5241 if (debug_threads)
5242 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5243 lwpid_of (thread));
5244
5245 /* If resume_step is requested by GDB, install single-step
5246 breakpoints when the thread is about to be actually resumed if
5247 the single-step breakpoints weren't removed. */
5248 if (can_software_single_step ()
5249 && !has_single_step_breakpoints (thread))
5250 install_software_single_step_breakpoints (lwp);
5251
5252 step = maybe_hw_step (thread);
5253 }
5254 else if (lwp->bp_reinsert != 0)
5255 {
5256 if (debug_threads)
5257 debug_printf (" stepping LWP %ld, reinsert set\n",
5258 lwpid_of (thread));
5259
5260 step = maybe_hw_step (thread);
5261 }
5262 else
5263 step = 0;
5264
5265 linux_resume_one_lwp (lwp, step, 0, NULL);
5266 return 0;
5267 }
5268
5269 static int
5270 unsuspend_and_proceed_one_lwp (thread_info *thread, void *except)
5271 {
5272 struct lwp_info *lwp = get_thread_lwp (thread);
5273
5274 if (lwp == except)
5275 return 0;
5276
5277 lwp_suspended_decr (lwp);
5278
5279 return proceed_one_lwp (thread, except);
5280 }
5281
5282 /* When we finish a step-over, set threads running again. If there's
5283 another thread that may need a step-over, now's the time to start
5284 it. Eventually, we'll move all threads past their breakpoints. */
5285
5286 static void
5287 proceed_all_lwps (void)
5288 {
5289 struct thread_info *need_step_over;
5290
5291 /* If there is a thread which would otherwise be resumed, which is
5292 stopped at a breakpoint that needs stepping over, then don't
5293 resume any threads - have it step over the breakpoint with all
5294 other threads stopped, then resume all threads again. */
5295
5296 if (supports_breakpoints ())
5297 {
5298 need_step_over
5299 = (struct thread_info *) find_inferior (&all_threads,
5300 need_step_over_p, NULL);
5301
5302 if (need_step_over != NULL)
5303 {
5304 if (debug_threads)
5305 debug_printf ("proceed_all_lwps: found "
5306 "thread %ld needing a step-over\n",
5307 lwpid_of (need_step_over));
5308
5309 start_step_over (get_thread_lwp (need_step_over));
5310 return;
5311 }
5312 }
5313
5314 if (debug_threads)
5315 debug_printf ("Proceeding, no step-over needed\n");
5316
5317 find_inferior (&all_threads, proceed_one_lwp, NULL);
5318 }
5319
5320 /* Stopped LWPs that the client wanted to be running, that don't have
5321 pending statuses, are set to run again, except for EXCEPT, if not
5322 NULL. This undoes a stop_all_lwps call. */
5323
5324 static void
5325 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5326 {
5327 if (debug_threads)
5328 {
5329 debug_enter ();
5330 if (except)
5331 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5332 lwpid_of (get_lwp_thread (except)));
5333 else
5334 debug_printf ("unstopping all lwps\n");
5335 }
5336
5337 if (unsuspend)
5338 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5339 else
5340 find_inferior (&all_threads, proceed_one_lwp, except);
5341
5342 if (debug_threads)
5343 {
5344 debug_printf ("unstop_all_lwps done\n");
5345 debug_exit ();
5346 }
5347 }
5348
5349
5350 #ifdef HAVE_LINUX_REGSETS
5351
5352 #define use_linux_regsets 1
5353
5354 /* Returns true if REGSET has been disabled. */
5355
5356 static int
5357 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5358 {
5359 return (info->disabled_regsets != NULL
5360 && info->disabled_regsets[regset - info->regsets]);
5361 }
5362
5363 /* Disable REGSET. */
5364
5365 static void
5366 disable_regset (struct regsets_info *info, struct regset_info *regset)
5367 {
5368 int dr_offset;
5369
5370 dr_offset = regset - info->regsets;
5371 if (info->disabled_regsets == NULL)
5372 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5373 info->disabled_regsets[dr_offset] = 1;
5374 }
5375
5376 static int
5377 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5378 struct regcache *regcache)
5379 {
5380 struct regset_info *regset;
5381 int saw_general_regs = 0;
5382 int pid;
5383 struct iovec iov;
5384
5385 pid = lwpid_of (current_thread);
5386 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5387 {
5388 void *buf, *data;
5389 int nt_type, res;
5390
5391 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5392 continue;
5393
5394 buf = xmalloc (regset->size);
5395
5396 nt_type = regset->nt_type;
5397 if (nt_type)
5398 {
5399 iov.iov_base = buf;
5400 iov.iov_len = regset->size;
5401 data = (void *) &iov;
5402 }
5403 else
5404 data = buf;
5405
5406 #ifndef __sparc__
5407 res = ptrace (regset->get_request, pid,
5408 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5409 #else
5410 res = ptrace (regset->get_request, pid, data, nt_type);
5411 #endif
5412 if (res < 0)
5413 {
5414 if (errno == EIO)
5415 {
5416 /* If we get EIO on a regset, do not try it again for
5417 this process mode. */
5418 disable_regset (regsets_info, regset);
5419 }
5420 else if (errno == ENODATA)
5421 {
5422 /* ENODATA may be returned if the regset is currently
5423 not "active". This can happen in normal operation,
5424 so suppress the warning in this case. */
5425 }
5426 else if (errno == ESRCH)
5427 {
5428 /* At this point, ESRCH should mean the process is
5429 already gone, in which case we simply ignore attempts
5430 to read its registers. */
5431 }
5432 else
5433 {
5434 char s[256];
5435 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5436 pid);
5437 perror (s);
5438 }
5439 }
5440 else
5441 {
5442 if (regset->type == GENERAL_REGS)
5443 saw_general_regs = 1;
5444 regset->store_function (regcache, buf);
5445 }
5446 free (buf);
5447 }
5448 if (saw_general_regs)
5449 return 0;
5450 else
5451 return 1;
5452 }
5453
5454 static int
5455 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5456 struct regcache *regcache)
5457 {
5458 struct regset_info *regset;
5459 int saw_general_regs = 0;
5460 int pid;
5461 struct iovec iov;
5462
5463 pid = lwpid_of (current_thread);
5464 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5465 {
5466 void *buf, *data;
5467 int nt_type, res;
5468
5469 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5470 || regset->fill_function == NULL)
5471 continue;
5472
5473 buf = xmalloc (regset->size);
5474
5475 /* First fill the buffer with the current register set contents,
5476 in case there are any items in the kernel's regset that are
5477 not in gdbserver's regcache. */
5478
5479 nt_type = regset->nt_type;
5480 if (nt_type)
5481 {
5482 iov.iov_base = buf;
5483 iov.iov_len = regset->size;
5484 data = (void *) &iov;
5485 }
5486 else
5487 data = buf;
5488
5489 #ifndef __sparc__
5490 res = ptrace (regset->get_request, pid,
5491 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5492 #else
5493 res = ptrace (regset->get_request, pid, data, nt_type);
5494 #endif
5495
5496 if (res == 0)
5497 {
5498 /* Then overlay our cached registers on that. */
5499 regset->fill_function (regcache, buf);
5500
5501 /* Only now do we write the register set. */
5502 #ifndef __sparc__
5503 res = ptrace (regset->set_request, pid,
5504 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5505 #else
5506 res = ptrace (regset->set_request, pid, data, nt_type);
5507 #endif
5508 }
5509
5510 if (res < 0)
5511 {
5512 if (errno == EIO)
5513 {
5514 /* If we get EIO on a regset, do not try it again for
5515 this process mode. */
5516 disable_regset (regsets_info, regset);
5517 }
5518 else if (errno == ESRCH)
5519 {
5520 /* At this point, ESRCH should mean the process is
5521 already gone, in which case we simply ignore attempts
5522 to change its registers. See also the related
5523 comment in linux_resume_one_lwp. */
5524 free (buf);
5525 return 0;
5526 }
5527 else
5528 {
5529 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5530 }
5531 }
5532 else if (regset->type == GENERAL_REGS)
5533 saw_general_regs = 1;
5534 free (buf);
5535 }
5536 if (saw_general_regs)
5537 return 0;
5538 else
5539 return 1;
5540 }
5541
5542 #else /* !HAVE_LINUX_REGSETS */
5543
5544 #define use_linux_regsets 0
5545 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5546 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5547
5548 #endif
5549
5550 /* Return 1 if register REGNO is supported by one of the regset ptrace
5551 calls or 0 if it has to be transferred individually. */
5552
5553 static int
5554 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5555 {
5556 unsigned char mask = 1 << (regno % 8);
5557 size_t index = regno / 8;
5558
5559 return (use_linux_regsets
5560 && (regs_info->regset_bitmap == NULL
5561 || (regs_info->regset_bitmap[index] & mask) != 0));
5562 }
5563
5564 #ifdef HAVE_LINUX_USRREGS
5565
5566 static int
5567 register_addr (const struct usrregs_info *usrregs, int regnum)
5568 {
5569 int addr;
5570
5571 if (regnum < 0 || regnum >= usrregs->num_regs)
5572 error ("Invalid register number %d.", regnum);
5573
5574 addr = usrregs->regmap[regnum];
5575
5576 return addr;
5577 }
5578
5579 /* Fetch one register. */
5580 static void
5581 fetch_register (const struct usrregs_info *usrregs,
5582 struct regcache *regcache, int regno)
5583 {
5584 CORE_ADDR regaddr;
5585 int i, size;
5586 char *buf;
5587 int pid;
5588
5589 if (regno >= usrregs->num_regs)
5590 return;
5591 if ((*the_low_target.cannot_fetch_register) (regno))
5592 return;
5593
5594 regaddr = register_addr (usrregs, regno);
5595 if (regaddr == -1)
5596 return;
5597
5598 size = ((register_size (regcache->tdesc, regno)
5599 + sizeof (PTRACE_XFER_TYPE) - 1)
5600 & -sizeof (PTRACE_XFER_TYPE));
5601 buf = (char *) alloca (size);
5602
5603 pid = lwpid_of (current_thread);
5604 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5605 {
5606 errno = 0;
5607 *(PTRACE_XFER_TYPE *) (buf + i) =
5608 ptrace (PTRACE_PEEKUSER, pid,
5609 /* Coerce to a uintptr_t first to avoid potential gcc warning
5610 of coercing an 8 byte integer to a 4 byte pointer. */
5611 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5612 regaddr += sizeof (PTRACE_XFER_TYPE);
5613 if (errno != 0)
5614 error ("reading register %d: %s", regno, strerror (errno));
5615 }
5616
5617 if (the_low_target.supply_ptrace_register)
5618 the_low_target.supply_ptrace_register (regcache, regno, buf);
5619 else
5620 supply_register (regcache, regno, buf);
5621 }
5622
5623 /* Store one register. */
5624 static void
5625 store_register (const struct usrregs_info *usrregs,
5626 struct regcache *regcache, int regno)
5627 {
5628 CORE_ADDR regaddr;
5629 int i, size;
5630 char *buf;
5631 int pid;
5632
5633 if (regno >= usrregs->num_regs)
5634 return;
5635 if ((*the_low_target.cannot_store_register) (regno))
5636 return;
5637
5638 regaddr = register_addr (usrregs, regno);
5639 if (regaddr == -1)
5640 return;
5641
5642 size = ((register_size (regcache->tdesc, regno)
5643 + sizeof (PTRACE_XFER_TYPE) - 1)
5644 & -sizeof (PTRACE_XFER_TYPE));
5645 buf = (char *) alloca (size);
5646 memset (buf, 0, size);
5647
5648 if (the_low_target.collect_ptrace_register)
5649 the_low_target.collect_ptrace_register (regcache, regno, buf);
5650 else
5651 collect_register (regcache, regno, buf);
5652
5653 pid = lwpid_of (current_thread);
5654 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5655 {
5656 errno = 0;
5657 ptrace (PTRACE_POKEUSER, pid,
5658 /* Coerce to a uintptr_t first to avoid potential gcc warning
5659 about coercing an 8 byte integer to a 4 byte pointer. */
5660 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5661 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5662 if (errno != 0)
5663 {
5664 /* At this point, ESRCH should mean the process is
5665 already gone, in which case we simply ignore attempts
5666 to change its registers. See also the related
5667 comment in linux_resume_one_lwp. */
5668 if (errno == ESRCH)
5669 return;
5670
5671 if ((*the_low_target.cannot_store_register) (regno) == 0)
5672 error ("writing register %d: %s", regno, strerror (errno));
5673 }
5674 regaddr += sizeof (PTRACE_XFER_TYPE);
5675 }
5676 }
5677
5678 /* Fetch all registers, or just one, from the child process.
5679 If REGNO is -1, do this for all registers, skipping any that are
5680 assumed to have been retrieved by regsets_fetch_inferior_registers,
5681 unless ALL is non-zero.
5682 Otherwise, REGNO specifies which register (so we can save time). */
5683 static void
5684 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5685 struct regcache *regcache, int regno, int all)
5686 {
5687 struct usrregs_info *usr = regs_info->usrregs;
5688
5689 if (regno == -1)
5690 {
5691 for (regno = 0; regno < usr->num_regs; regno++)
5692 if (all || !linux_register_in_regsets (regs_info, regno))
5693 fetch_register (usr, regcache, regno);
5694 }
5695 else
5696 fetch_register (usr, regcache, regno);
5697 }
5698
5699 /* Store our register values back into the inferior.
5700 If REGNO is -1, do this for all registers, skipping any that are
5701 assumed to have been saved by regsets_store_inferior_registers,
5702 unless ALL is non-zero.
5703 Otherwise, REGNO specifies which register (so we can save time). */
5704 static void
5705 usr_store_inferior_registers (const struct regs_info *regs_info,
5706 struct regcache *regcache, int regno, int all)
5707 {
5708 struct usrregs_info *usr = regs_info->usrregs;
5709
5710 if (regno == -1)
5711 {
5712 for (regno = 0; regno < usr->num_regs; regno++)
5713 if (all || !linux_register_in_regsets (regs_info, regno))
5714 store_register (usr, regcache, regno);
5715 }
5716 else
5717 store_register (usr, regcache, regno);
5718 }
5719
5720 #else /* !HAVE_LINUX_USRREGS */
5721
5722 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5723 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5724
5725 #endif
5726
5727
5728 static void
5729 linux_fetch_registers (struct regcache *regcache, int regno)
5730 {
5731 int use_regsets;
5732 int all = 0;
5733 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5734
5735 if (regno == -1)
5736 {
5737 if (the_low_target.fetch_register != NULL
5738 && regs_info->usrregs != NULL)
5739 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5740 (*the_low_target.fetch_register) (regcache, regno);
5741
5742 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5743 if (regs_info->usrregs != NULL)
5744 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5745 }
5746 else
5747 {
5748 if (the_low_target.fetch_register != NULL
5749 && (*the_low_target.fetch_register) (regcache, regno))
5750 return;
5751
5752 use_regsets = linux_register_in_regsets (regs_info, regno);
5753 if (use_regsets)
5754 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5755 regcache);
5756 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5757 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5758 }
5759 }
5760
5761 static void
5762 linux_store_registers (struct regcache *regcache, int regno)
5763 {
5764 int use_regsets;
5765 int all = 0;
5766 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5767
5768 if (regno == -1)
5769 {
5770 all = regsets_store_inferior_registers (regs_info->regsets_info,
5771 regcache);
5772 if (regs_info->usrregs != NULL)
5773 usr_store_inferior_registers (regs_info, regcache, regno, all);
5774 }
5775 else
5776 {
5777 use_regsets = linux_register_in_regsets (regs_info, regno);
5778 if (use_regsets)
5779 all = regsets_store_inferior_registers (regs_info->regsets_info,
5780 regcache);
5781 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5782 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5783 }
5784 }
5785
5786
5787 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5788 to debugger memory starting at MYADDR. */
5789
5790 static int
5791 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5792 {
5793 int pid = lwpid_of (current_thread);
5794 PTRACE_XFER_TYPE *buffer;
5795 CORE_ADDR addr;
5796 int count;
5797 char filename[64];
5798 int i;
5799 int ret;
5800 int fd;
5801
5802 /* Try using /proc. Don't bother for one word. */
5803 if (len >= 3 * sizeof (long))
5804 {
5805 int bytes;
5806
5807 /* We could keep this file open and cache it - possibly one per
5808 thread. That requires some juggling, but is even faster. */
5809 sprintf (filename, "/proc/%d/mem", pid);
5810 fd = open (filename, O_RDONLY | O_LARGEFILE);
5811 if (fd == -1)
5812 goto no_proc;
5813
5814 /* If pread64 is available, use it. It's faster if the kernel
5815 supports it (only one syscall), and it's 64-bit safe even on
5816 32-bit platforms (for instance, SPARC debugging a SPARC64
5817 application). */
5818 #ifdef HAVE_PREAD64
5819 bytes = pread64 (fd, myaddr, len, memaddr);
5820 #else
5821 bytes = -1;
5822 if (lseek (fd, memaddr, SEEK_SET) != -1)
5823 bytes = read (fd, myaddr, len);
5824 #endif
5825
5826 close (fd);
5827 if (bytes == len)
5828 return 0;
5829
5830 /* Some data was read, we'll try to get the rest with ptrace. */
5831 if (bytes > 0)
5832 {
5833 memaddr += bytes;
5834 myaddr += bytes;
5835 len -= bytes;
5836 }
5837 }
5838
5839 no_proc:
5840 /* Round starting address down to longword boundary. */
5841 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5842 /* Round ending address up; get number of longwords that makes. */
5843 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5844 / sizeof (PTRACE_XFER_TYPE));
5845 /* Allocate buffer of that many longwords. */
5846 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5847
5848 /* Read all the longwords */
5849 errno = 0;
5850 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5851 {
5852 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5853 about coercing an 8 byte integer to a 4 byte pointer. */
5854 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5855 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5856 (PTRACE_TYPE_ARG4) 0);
5857 if (errno)
5858 break;
5859 }
5860 ret = errno;
5861
5862 /* Copy appropriate bytes out of the buffer. */
5863 if (i > 0)
5864 {
5865 i *= sizeof (PTRACE_XFER_TYPE);
5866 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5867 memcpy (myaddr,
5868 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5869 i < len ? i : len);
5870 }
5871
5872 return ret;
5873 }
5874
5875 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5876 memory at MEMADDR. On failure (cannot write to the inferior)
5877 returns the value of errno. Always succeeds if LEN is zero. */
5878
5879 static int
5880 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5881 {
5882 int i;
5883 /* Round starting address down to longword boundary. */
5884 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5885 /* Round ending address up; get number of longwords that makes. */
5886 int count
5887 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5888 / sizeof (PTRACE_XFER_TYPE);
5889
5890 /* Allocate buffer of that many longwords. */
5891 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5892
5893 int pid = lwpid_of (current_thread);
5894
5895 if (len == 0)
5896 {
5897 /* Zero length write always succeeds. */
5898 return 0;
5899 }
5900
5901 if (debug_threads)
5902 {
5903 /* Dump up to four bytes. */
5904 char str[4 * 2 + 1];
5905 char *p = str;
5906 int dump = len < 4 ? len : 4;
5907
5908 for (i = 0; i < dump; i++)
5909 {
5910 sprintf (p, "%02x", myaddr[i]);
5911 p += 2;
5912 }
5913 *p = '\0';
5914
5915 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5916 str, (long) memaddr, pid);
5917 }
5918
5919 /* Fill start and end extra bytes of buffer with existing memory data. */
5920
5921 errno = 0;
5922 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5923 about coercing an 8 byte integer to a 4 byte pointer. */
5924 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5925 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5926 (PTRACE_TYPE_ARG4) 0);
5927 if (errno)
5928 return errno;
5929
5930 if (count > 1)
5931 {
5932 errno = 0;
5933 buffer[count - 1]
5934 = ptrace (PTRACE_PEEKTEXT, pid,
5935 /* Coerce to a uintptr_t first to avoid potential gcc warning
5936 about coercing an 8 byte integer to a 4 byte pointer. */
5937 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5938 * sizeof (PTRACE_XFER_TYPE)),
5939 (PTRACE_TYPE_ARG4) 0);
5940 if (errno)
5941 return errno;
5942 }
5943
5944 /* Copy data to be written over corresponding part of buffer. */
5945
5946 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5947 myaddr, len);
5948
5949 /* Write the entire buffer. */
5950
5951 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5952 {
5953 errno = 0;
5954 ptrace (PTRACE_POKETEXT, pid,
5955 /* Coerce to a uintptr_t first to avoid potential gcc warning
5956 about coercing an 8 byte integer to a 4 byte pointer. */
5957 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5958 (PTRACE_TYPE_ARG4) buffer[i]);
5959 if (errno)
5960 return errno;
5961 }
5962
5963 return 0;
5964 }
5965
5966 static void
5967 linux_look_up_symbols (void)
5968 {
5969 #ifdef USE_THREAD_DB
5970 struct process_info *proc = current_process ();
5971
5972 if (proc->priv->thread_db != NULL)
5973 return;
5974
5975 thread_db_init ();
5976 #endif
5977 }
5978
5979 static void
5980 linux_request_interrupt (void)
5981 {
5982 /* Send a SIGINT to the process group. This acts just like the user
5983 typed a ^C on the controlling terminal. */
5984 kill (-signal_pid, SIGINT);
5985 }
5986
5987 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5988 to debugger memory starting at MYADDR. */
5989
5990 static int
5991 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5992 {
5993 char filename[PATH_MAX];
5994 int fd, n;
5995 int pid = lwpid_of (current_thread);
5996
5997 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5998
5999 fd = open (filename, O_RDONLY);
6000 if (fd < 0)
6001 return -1;
6002
6003 if (offset != (CORE_ADDR) 0
6004 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6005 n = -1;
6006 else
6007 n = read (fd, myaddr, len);
6008
6009 close (fd);
6010
6011 return n;
6012 }
6013
6014 /* These breakpoint and watchpoint related wrapper functions simply
6015 pass on the function call if the target has registered a
6016 corresponding function. */
6017
6018 static int
6019 linux_supports_z_point_type (char z_type)
6020 {
6021 return (the_low_target.supports_z_point_type != NULL
6022 && the_low_target.supports_z_point_type (z_type));
6023 }
6024
6025 static int
6026 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
6027 int size, struct raw_breakpoint *bp)
6028 {
6029 if (type == raw_bkpt_type_sw)
6030 return insert_memory_breakpoint (bp);
6031 else if (the_low_target.insert_point != NULL)
6032 return the_low_target.insert_point (type, addr, size, bp);
6033 else
6034 /* Unsupported (see target.h). */
6035 return 1;
6036 }
6037
6038 static int
6039 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
6040 int size, struct raw_breakpoint *bp)
6041 {
6042 if (type == raw_bkpt_type_sw)
6043 return remove_memory_breakpoint (bp);
6044 else if (the_low_target.remove_point != NULL)
6045 return the_low_target.remove_point (type, addr, size, bp);
6046 else
6047 /* Unsupported (see target.h). */
6048 return 1;
6049 }
6050
6051 /* Implement the to_stopped_by_sw_breakpoint target_ops
6052 method. */
6053
6054 static int
6055 linux_stopped_by_sw_breakpoint (void)
6056 {
6057 struct lwp_info *lwp = get_thread_lwp (current_thread);
6058
6059 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6060 }
6061
6062 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6063 method. */
6064
6065 static int
6066 linux_supports_stopped_by_sw_breakpoint (void)
6067 {
6068 return USE_SIGTRAP_SIGINFO;
6069 }
6070
6071 /* Implement the to_stopped_by_hw_breakpoint target_ops
6072 method. */
6073
6074 static int
6075 linux_stopped_by_hw_breakpoint (void)
6076 {
6077 struct lwp_info *lwp = get_thread_lwp (current_thread);
6078
6079 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6080 }
6081
6082 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6083 method. */
6084
6085 static int
6086 linux_supports_stopped_by_hw_breakpoint (void)
6087 {
6088 return USE_SIGTRAP_SIGINFO;
6089 }
6090
6091 /* Implement the supports_hardware_single_step target_ops method. */
6092
6093 static int
6094 linux_supports_hardware_single_step (void)
6095 {
6096 return can_hardware_single_step ();
6097 }
6098
6099 static int
6100 linux_supports_software_single_step (void)
6101 {
6102 return can_software_single_step ();
6103 }
6104
6105 static int
6106 linux_stopped_by_watchpoint (void)
6107 {
6108 struct lwp_info *lwp = get_thread_lwp (current_thread);
6109
6110 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6111 }
6112
6113 static CORE_ADDR
6114 linux_stopped_data_address (void)
6115 {
6116 struct lwp_info *lwp = get_thread_lwp (current_thread);
6117
6118 return lwp->stopped_data_address;
6119 }
6120
6121 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6122 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6123 && defined(PT_TEXT_END_ADDR)
6124
6125 /* This is only used for targets that define PT_TEXT_ADDR,
6126 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6127 the target has different ways of acquiring this information, like
6128 loadmaps. */
6129
6130 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6131 to tell gdb about. */
6132
6133 static int
6134 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6135 {
6136 unsigned long text, text_end, data;
6137 int pid = lwpid_of (current_thread);
6138
6139 errno = 0;
6140
6141 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6142 (PTRACE_TYPE_ARG4) 0);
6143 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6144 (PTRACE_TYPE_ARG4) 0);
6145 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6146 (PTRACE_TYPE_ARG4) 0);
6147
6148 if (errno == 0)
6149 {
6150 /* Both text and data offsets produced at compile-time (and so
6151 used by gdb) are relative to the beginning of the program,
6152 with the data segment immediately following the text segment.
6153 However, the actual runtime layout in memory may put the data
6154 somewhere else, so when we send gdb a data base-address, we
6155 use the real data base address and subtract the compile-time
6156 data base-address from it (which is just the length of the
6157 text segment). BSS immediately follows data in both
6158 cases. */
6159 *text_p = text;
6160 *data_p = data - (text_end - text);
6161
6162 return 1;
6163 }
6164 return 0;
6165 }
6166 #endif
6167
6168 static int
6169 linux_qxfer_osdata (const char *annex,
6170 unsigned char *readbuf, unsigned const char *writebuf,
6171 CORE_ADDR offset, int len)
6172 {
6173 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6174 }
6175
6176 /* Convert a native/host siginfo object, into/from the siginfo in the
6177 layout of the inferiors' architecture. */
6178
6179 static void
6180 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6181 {
6182 int done = 0;
6183
6184 if (the_low_target.siginfo_fixup != NULL)
6185 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6186
6187 /* If there was no callback, or the callback didn't do anything,
6188 then just do a straight memcpy. */
6189 if (!done)
6190 {
6191 if (direction == 1)
6192 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6193 else
6194 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6195 }
6196 }
6197
6198 static int
6199 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6200 unsigned const char *writebuf, CORE_ADDR offset, int len)
6201 {
6202 int pid;
6203 siginfo_t siginfo;
6204 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6205
6206 if (current_thread == NULL)
6207 return -1;
6208
6209 pid = lwpid_of (current_thread);
6210
6211 if (debug_threads)
6212 debug_printf ("%s siginfo for lwp %d.\n",
6213 readbuf != NULL ? "Reading" : "Writing",
6214 pid);
6215
6216 if (offset >= sizeof (siginfo))
6217 return -1;
6218
6219 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6220 return -1;
6221
6222 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6223 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6224 inferior with a 64-bit GDBSERVER should look the same as debugging it
6225 with a 32-bit GDBSERVER, we need to convert it. */
6226 siginfo_fixup (&siginfo, inf_siginfo, 0);
6227
6228 if (offset + len > sizeof (siginfo))
6229 len = sizeof (siginfo) - offset;
6230
6231 if (readbuf != NULL)
6232 memcpy (readbuf, inf_siginfo + offset, len);
6233 else
6234 {
6235 memcpy (inf_siginfo + offset, writebuf, len);
6236
6237 /* Convert back to ptrace layout before flushing it out. */
6238 siginfo_fixup (&siginfo, inf_siginfo, 1);
6239
6240 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6241 return -1;
6242 }
6243
6244 return len;
6245 }
6246
6247 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6248 so we notice when children change state; as the handler for the
6249 sigsuspend in my_waitpid. */
6250
6251 static void
6252 sigchld_handler (int signo)
6253 {
6254 int old_errno = errno;
6255
6256 if (debug_threads)
6257 {
6258 do
6259 {
6260 /* fprintf is not async-signal-safe, so call write
6261 directly. */
6262 if (write (2, "sigchld_handler\n",
6263 sizeof ("sigchld_handler\n") - 1) < 0)
6264 break; /* just ignore */
6265 } while (0);
6266 }
6267
6268 if (target_is_async_p ())
6269 async_file_mark (); /* trigger a linux_wait */
6270
6271 errno = old_errno;
6272 }
6273
6274 static int
6275 linux_supports_non_stop (void)
6276 {
6277 return 1;
6278 }
6279
6280 static int
6281 linux_async (int enable)
6282 {
6283 int previous = target_is_async_p ();
6284
6285 if (debug_threads)
6286 debug_printf ("linux_async (%d), previous=%d\n",
6287 enable, previous);
6288
6289 if (previous != enable)
6290 {
6291 sigset_t mask;
6292 sigemptyset (&mask);
6293 sigaddset (&mask, SIGCHLD);
6294
6295 sigprocmask (SIG_BLOCK, &mask, NULL);
6296
6297 if (enable)
6298 {
6299 if (pipe (linux_event_pipe) == -1)
6300 {
6301 linux_event_pipe[0] = -1;
6302 linux_event_pipe[1] = -1;
6303 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6304
6305 warning ("creating event pipe failed.");
6306 return previous;
6307 }
6308
6309 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6310 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6311
6312 /* Register the event loop handler. */
6313 add_file_handler (linux_event_pipe[0],
6314 handle_target_event, NULL);
6315
6316 /* Always trigger a linux_wait. */
6317 async_file_mark ();
6318 }
6319 else
6320 {
6321 delete_file_handler (linux_event_pipe[0]);
6322
6323 close (linux_event_pipe[0]);
6324 close (linux_event_pipe[1]);
6325 linux_event_pipe[0] = -1;
6326 linux_event_pipe[1] = -1;
6327 }
6328
6329 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6330 }
6331
6332 return previous;
6333 }
6334
6335 static int
6336 linux_start_non_stop (int nonstop)
6337 {
6338 /* Register or unregister from event-loop accordingly. */
6339 linux_async (nonstop);
6340
6341 if (target_is_async_p () != (nonstop != 0))
6342 return -1;
6343
6344 return 0;
6345 }
6346
6347 static int
6348 linux_supports_multi_process (void)
6349 {
6350 return 1;
6351 }
6352
6353 /* Check if fork events are supported. */
6354
6355 static int
6356 linux_supports_fork_events (void)
6357 {
6358 return linux_supports_tracefork ();
6359 }
6360
6361 /* Check if vfork events are supported. */
6362
6363 static int
6364 linux_supports_vfork_events (void)
6365 {
6366 return linux_supports_tracefork ();
6367 }
6368
6369 /* Check if exec events are supported. */
6370
6371 static int
6372 linux_supports_exec_events (void)
6373 {
6374 return linux_supports_traceexec ();
6375 }
6376
6377 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6378 ptrace flags for all inferiors. This is in case the new GDB connection
6379 doesn't support the same set of events that the previous one did. */
6380
6381 static void
6382 linux_handle_new_gdb_connection (void)
6383 {
6384 /* Request that all the lwps reset their ptrace options. */
6385 for_each_thread ([] (thread_info *thread)
6386 {
6387 struct lwp_info *lwp = get_thread_lwp (thread);
6388
6389 if (!lwp->stopped)
6390 {
6391 /* Stop the lwp so we can modify its ptrace options. */
6392 lwp->must_set_ptrace_flags = 1;
6393 linux_stop_lwp (lwp);
6394 }
6395 else
6396 {
6397 /* Already stopped; go ahead and set the ptrace options. */
6398 struct process_info *proc = find_process_pid (pid_of (thread));
6399 int options = linux_low_ptrace_options (proc->attached);
6400
6401 linux_enable_event_reporting (lwpid_of (thread), options);
6402 lwp->must_set_ptrace_flags = 0;
6403 }
6404 });
6405 }
6406
6407 static int
6408 linux_supports_disable_randomization (void)
6409 {
6410 #ifdef HAVE_PERSONALITY
6411 return 1;
6412 #else
6413 return 0;
6414 #endif
6415 }
6416
6417 static int
6418 linux_supports_agent (void)
6419 {
6420 return 1;
6421 }
6422
6423 static int
6424 linux_supports_range_stepping (void)
6425 {
6426 if (can_software_single_step ())
6427 return 1;
6428 if (*the_low_target.supports_range_stepping == NULL)
6429 return 0;
6430
6431 return (*the_low_target.supports_range_stepping) ();
6432 }
6433
6434 /* Enumerate spufs IDs for process PID. */
6435 static int
6436 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6437 {
6438 int pos = 0;
6439 int written = 0;
6440 char path[128];
6441 DIR *dir;
6442 struct dirent *entry;
6443
6444 sprintf (path, "/proc/%ld/fd", pid);
6445 dir = opendir (path);
6446 if (!dir)
6447 return -1;
6448
6449 rewinddir (dir);
6450 while ((entry = readdir (dir)) != NULL)
6451 {
6452 struct stat st;
6453 struct statfs stfs;
6454 int fd;
6455
6456 fd = atoi (entry->d_name);
6457 if (!fd)
6458 continue;
6459
6460 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6461 if (stat (path, &st) != 0)
6462 continue;
6463 if (!S_ISDIR (st.st_mode))
6464 continue;
6465
6466 if (statfs (path, &stfs) != 0)
6467 continue;
6468 if (stfs.f_type != SPUFS_MAGIC)
6469 continue;
6470
6471 if (pos >= offset && pos + 4 <= offset + len)
6472 {
6473 *(unsigned int *)(buf + pos - offset) = fd;
6474 written += 4;
6475 }
6476 pos += 4;
6477 }
6478
6479 closedir (dir);
6480 return written;
6481 }
6482
6483 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6484 object type, using the /proc file system. */
6485 static int
6486 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6487 unsigned const char *writebuf,
6488 CORE_ADDR offset, int len)
6489 {
6490 long pid = lwpid_of (current_thread);
6491 char buf[128];
6492 int fd = 0;
6493 int ret = 0;
6494
6495 if (!writebuf && !readbuf)
6496 return -1;
6497
6498 if (!*annex)
6499 {
6500 if (!readbuf)
6501 return -1;
6502 else
6503 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6504 }
6505
6506 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6507 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6508 if (fd <= 0)
6509 return -1;
6510
6511 if (offset != 0
6512 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6513 {
6514 close (fd);
6515 return 0;
6516 }
6517
6518 if (writebuf)
6519 ret = write (fd, writebuf, (size_t) len);
6520 else
6521 ret = read (fd, readbuf, (size_t) len);
6522
6523 close (fd);
6524 return ret;
6525 }
6526
6527 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6528 struct target_loadseg
6529 {
6530 /* Core address to which the segment is mapped. */
6531 Elf32_Addr addr;
6532 /* VMA recorded in the program header. */
6533 Elf32_Addr p_vaddr;
6534 /* Size of this segment in memory. */
6535 Elf32_Word p_memsz;
6536 };
6537
6538 # if defined PT_GETDSBT
6539 struct target_loadmap
6540 {
6541 /* Protocol version number, must be zero. */
6542 Elf32_Word version;
6543 /* Pointer to the DSBT table, its size, and the DSBT index. */
6544 unsigned *dsbt_table;
6545 unsigned dsbt_size, dsbt_index;
6546 /* Number of segments in this map. */
6547 Elf32_Word nsegs;
6548 /* The actual memory map. */
6549 struct target_loadseg segs[/*nsegs*/];
6550 };
6551 # define LINUX_LOADMAP PT_GETDSBT
6552 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6553 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6554 # else
6555 struct target_loadmap
6556 {
6557 /* Protocol version number, must be zero. */
6558 Elf32_Half version;
6559 /* Number of segments in this map. */
6560 Elf32_Half nsegs;
6561 /* The actual memory map. */
6562 struct target_loadseg segs[/*nsegs*/];
6563 };
6564 # define LINUX_LOADMAP PTRACE_GETFDPIC
6565 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6566 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6567 # endif
6568
6569 static int
6570 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6571 unsigned char *myaddr, unsigned int len)
6572 {
6573 int pid = lwpid_of (current_thread);
6574 int addr = -1;
6575 struct target_loadmap *data = NULL;
6576 unsigned int actual_length, copy_length;
6577
6578 if (strcmp (annex, "exec") == 0)
6579 addr = (int) LINUX_LOADMAP_EXEC;
6580 else if (strcmp (annex, "interp") == 0)
6581 addr = (int) LINUX_LOADMAP_INTERP;
6582 else
6583 return -1;
6584
6585 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6586 return -1;
6587
6588 if (data == NULL)
6589 return -1;
6590
6591 actual_length = sizeof (struct target_loadmap)
6592 + sizeof (struct target_loadseg) * data->nsegs;
6593
6594 if (offset < 0 || offset > actual_length)
6595 return -1;
6596
6597 copy_length = actual_length - offset < len ? actual_length - offset : len;
6598 memcpy (myaddr, (char *) data + offset, copy_length);
6599 return copy_length;
6600 }
6601 #else
6602 # define linux_read_loadmap NULL
6603 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6604
6605 static void
6606 linux_process_qsupported (char **features, int count)
6607 {
6608 if (the_low_target.process_qsupported != NULL)
6609 the_low_target.process_qsupported (features, count);
6610 }
6611
6612 static int
6613 linux_supports_catch_syscall (void)
6614 {
6615 return (the_low_target.get_syscall_trapinfo != NULL
6616 && linux_supports_tracesysgood ());
6617 }
6618
6619 static int
6620 linux_get_ipa_tdesc_idx (void)
6621 {
6622 if (the_low_target.get_ipa_tdesc_idx == NULL)
6623 return 0;
6624
6625 return (*the_low_target.get_ipa_tdesc_idx) ();
6626 }
6627
6628 static int
6629 linux_supports_tracepoints (void)
6630 {
6631 if (*the_low_target.supports_tracepoints == NULL)
6632 return 0;
6633
6634 return (*the_low_target.supports_tracepoints) ();
6635 }
6636
6637 static CORE_ADDR
6638 linux_read_pc (struct regcache *regcache)
6639 {
6640 if (the_low_target.get_pc == NULL)
6641 return 0;
6642
6643 return (*the_low_target.get_pc) (regcache);
6644 }
6645
6646 static void
6647 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6648 {
6649 gdb_assert (the_low_target.set_pc != NULL);
6650
6651 (*the_low_target.set_pc) (regcache, pc);
6652 }
6653
6654 static int
6655 linux_thread_stopped (struct thread_info *thread)
6656 {
6657 return get_thread_lwp (thread)->stopped;
6658 }
6659
6660 /* This exposes stop-all-threads functionality to other modules. */
6661
6662 static void
6663 linux_pause_all (int freeze)
6664 {
6665 stop_all_lwps (freeze, NULL);
6666 }
6667
6668 /* This exposes unstop-all-threads functionality to other gdbserver
6669 modules. */
6670
6671 static void
6672 linux_unpause_all (int unfreeze)
6673 {
6674 unstop_all_lwps (unfreeze, NULL);
6675 }
6676
6677 static int
6678 linux_prepare_to_access_memory (void)
6679 {
6680 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6681 running LWP. */
6682 if (non_stop)
6683 linux_pause_all (1);
6684 return 0;
6685 }
6686
6687 static void
6688 linux_done_accessing_memory (void)
6689 {
6690 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6691 running LWP. */
6692 if (non_stop)
6693 linux_unpause_all (1);
6694 }
6695
6696 static int
6697 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6698 CORE_ADDR collector,
6699 CORE_ADDR lockaddr,
6700 ULONGEST orig_size,
6701 CORE_ADDR *jump_entry,
6702 CORE_ADDR *trampoline,
6703 ULONGEST *trampoline_size,
6704 unsigned char *jjump_pad_insn,
6705 ULONGEST *jjump_pad_insn_size,
6706 CORE_ADDR *adjusted_insn_addr,
6707 CORE_ADDR *adjusted_insn_addr_end,
6708 char *err)
6709 {
6710 return (*the_low_target.install_fast_tracepoint_jump_pad)
6711 (tpoint, tpaddr, collector, lockaddr, orig_size,
6712 jump_entry, trampoline, trampoline_size,
6713 jjump_pad_insn, jjump_pad_insn_size,
6714 adjusted_insn_addr, adjusted_insn_addr_end,
6715 err);
6716 }
6717
6718 static struct emit_ops *
6719 linux_emit_ops (void)
6720 {
6721 if (the_low_target.emit_ops != NULL)
6722 return (*the_low_target.emit_ops) ();
6723 else
6724 return NULL;
6725 }
6726
6727 static int
6728 linux_get_min_fast_tracepoint_insn_len (void)
6729 {
6730 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6731 }
6732
6733 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6734
6735 static int
6736 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6737 CORE_ADDR *phdr_memaddr, int *num_phdr)
6738 {
6739 char filename[PATH_MAX];
6740 int fd;
6741 const int auxv_size = is_elf64
6742 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6743 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6744
6745 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6746
6747 fd = open (filename, O_RDONLY);
6748 if (fd < 0)
6749 return 1;
6750
6751 *phdr_memaddr = 0;
6752 *num_phdr = 0;
6753 while (read (fd, buf, auxv_size) == auxv_size
6754 && (*phdr_memaddr == 0 || *num_phdr == 0))
6755 {
6756 if (is_elf64)
6757 {
6758 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6759
6760 switch (aux->a_type)
6761 {
6762 case AT_PHDR:
6763 *phdr_memaddr = aux->a_un.a_val;
6764 break;
6765 case AT_PHNUM:
6766 *num_phdr = aux->a_un.a_val;
6767 break;
6768 }
6769 }
6770 else
6771 {
6772 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6773
6774 switch (aux->a_type)
6775 {
6776 case AT_PHDR:
6777 *phdr_memaddr = aux->a_un.a_val;
6778 break;
6779 case AT_PHNUM:
6780 *num_phdr = aux->a_un.a_val;
6781 break;
6782 }
6783 }
6784 }
6785
6786 close (fd);
6787
6788 if (*phdr_memaddr == 0 || *num_phdr == 0)
6789 {
6790 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6791 "phdr_memaddr = %ld, phdr_num = %d",
6792 (long) *phdr_memaddr, *num_phdr);
6793 return 2;
6794 }
6795
6796 return 0;
6797 }
6798
6799 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6800
6801 static CORE_ADDR
6802 get_dynamic (const int pid, const int is_elf64)
6803 {
6804 CORE_ADDR phdr_memaddr, relocation;
6805 int num_phdr, i;
6806 unsigned char *phdr_buf;
6807 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6808
6809 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6810 return 0;
6811
6812 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6813 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6814
6815 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6816 return 0;
6817
6818 /* Compute relocation: it is expected to be 0 for "regular" executables,
6819 non-zero for PIE ones. */
6820 relocation = -1;
6821 for (i = 0; relocation == -1 && i < num_phdr; i++)
6822 if (is_elf64)
6823 {
6824 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6825
6826 if (p->p_type == PT_PHDR)
6827 relocation = phdr_memaddr - p->p_vaddr;
6828 }
6829 else
6830 {
6831 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6832
6833 if (p->p_type == PT_PHDR)
6834 relocation = phdr_memaddr - p->p_vaddr;
6835 }
6836
6837 if (relocation == -1)
6838 {
6839 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6840 any real world executables, including PIE executables, have always
6841 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6842 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6843 or present DT_DEBUG anyway (fpc binaries are statically linked).
6844
6845 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6846
6847 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6848
6849 return 0;
6850 }
6851
6852 for (i = 0; i < num_phdr; i++)
6853 {
6854 if (is_elf64)
6855 {
6856 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6857
6858 if (p->p_type == PT_DYNAMIC)
6859 return p->p_vaddr + relocation;
6860 }
6861 else
6862 {
6863 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6864
6865 if (p->p_type == PT_DYNAMIC)
6866 return p->p_vaddr + relocation;
6867 }
6868 }
6869
6870 return 0;
6871 }
6872
6873 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6874 can be 0 if the inferior does not yet have the library list initialized.
6875 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6876 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6877
6878 static CORE_ADDR
6879 get_r_debug (const int pid, const int is_elf64)
6880 {
6881 CORE_ADDR dynamic_memaddr;
6882 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6883 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6884 CORE_ADDR map = -1;
6885
6886 dynamic_memaddr = get_dynamic (pid, is_elf64);
6887 if (dynamic_memaddr == 0)
6888 return map;
6889
6890 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6891 {
6892 if (is_elf64)
6893 {
6894 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6895 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6896 union
6897 {
6898 Elf64_Xword map;
6899 unsigned char buf[sizeof (Elf64_Xword)];
6900 }
6901 rld_map;
6902 #endif
6903 #ifdef DT_MIPS_RLD_MAP
6904 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6905 {
6906 if (linux_read_memory (dyn->d_un.d_val,
6907 rld_map.buf, sizeof (rld_map.buf)) == 0)
6908 return rld_map.map;
6909 else
6910 break;
6911 }
6912 #endif /* DT_MIPS_RLD_MAP */
6913 #ifdef DT_MIPS_RLD_MAP_REL
6914 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6915 {
6916 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6917 rld_map.buf, sizeof (rld_map.buf)) == 0)
6918 return rld_map.map;
6919 else
6920 break;
6921 }
6922 #endif /* DT_MIPS_RLD_MAP_REL */
6923
6924 if (dyn->d_tag == DT_DEBUG && map == -1)
6925 map = dyn->d_un.d_val;
6926
6927 if (dyn->d_tag == DT_NULL)
6928 break;
6929 }
6930 else
6931 {
6932 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6933 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6934 union
6935 {
6936 Elf32_Word map;
6937 unsigned char buf[sizeof (Elf32_Word)];
6938 }
6939 rld_map;
6940 #endif
6941 #ifdef DT_MIPS_RLD_MAP
6942 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6943 {
6944 if (linux_read_memory (dyn->d_un.d_val,
6945 rld_map.buf, sizeof (rld_map.buf)) == 0)
6946 return rld_map.map;
6947 else
6948 break;
6949 }
6950 #endif /* DT_MIPS_RLD_MAP */
6951 #ifdef DT_MIPS_RLD_MAP_REL
6952 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6953 {
6954 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6955 rld_map.buf, sizeof (rld_map.buf)) == 0)
6956 return rld_map.map;
6957 else
6958 break;
6959 }
6960 #endif /* DT_MIPS_RLD_MAP_REL */
6961
6962 if (dyn->d_tag == DT_DEBUG && map == -1)
6963 map = dyn->d_un.d_val;
6964
6965 if (dyn->d_tag == DT_NULL)
6966 break;
6967 }
6968
6969 dynamic_memaddr += dyn_size;
6970 }
6971
6972 return map;
6973 }
6974
6975 /* Read one pointer from MEMADDR in the inferior. */
6976
6977 static int
6978 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6979 {
6980 int ret;
6981
6982 /* Go through a union so this works on either big or little endian
6983 hosts, when the inferior's pointer size is smaller than the size
6984 of CORE_ADDR. It is assumed the inferior's endianness is the
6985 same of the superior's. */
6986 union
6987 {
6988 CORE_ADDR core_addr;
6989 unsigned int ui;
6990 unsigned char uc;
6991 } addr;
6992
6993 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6994 if (ret == 0)
6995 {
6996 if (ptr_size == sizeof (CORE_ADDR))
6997 *ptr = addr.core_addr;
6998 else if (ptr_size == sizeof (unsigned int))
6999 *ptr = addr.ui;
7000 else
7001 gdb_assert_not_reached ("unhandled pointer size");
7002 }
7003 return ret;
7004 }
7005
7006 struct link_map_offsets
7007 {
7008 /* Offset and size of r_debug.r_version. */
7009 int r_version_offset;
7010
7011 /* Offset and size of r_debug.r_map. */
7012 int r_map_offset;
7013
7014 /* Offset to l_addr field in struct link_map. */
7015 int l_addr_offset;
7016
7017 /* Offset to l_name field in struct link_map. */
7018 int l_name_offset;
7019
7020 /* Offset to l_ld field in struct link_map. */
7021 int l_ld_offset;
7022
7023 /* Offset to l_next field in struct link_map. */
7024 int l_next_offset;
7025
7026 /* Offset to l_prev field in struct link_map. */
7027 int l_prev_offset;
7028 };
7029
7030 /* Construct qXfer:libraries-svr4:read reply. */
7031
7032 static int
7033 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
7034 unsigned const char *writebuf,
7035 CORE_ADDR offset, int len)
7036 {
7037 char *document;
7038 unsigned document_len;
7039 struct process_info_private *const priv = current_process ()->priv;
7040 char filename[PATH_MAX];
7041 int pid, is_elf64;
7042
7043 static const struct link_map_offsets lmo_32bit_offsets =
7044 {
7045 0, /* r_version offset. */
7046 4, /* r_debug.r_map offset. */
7047 0, /* l_addr offset in link_map. */
7048 4, /* l_name offset in link_map. */
7049 8, /* l_ld offset in link_map. */
7050 12, /* l_next offset in link_map. */
7051 16 /* l_prev offset in link_map. */
7052 };
7053
7054 static const struct link_map_offsets lmo_64bit_offsets =
7055 {
7056 0, /* r_version offset. */
7057 8, /* r_debug.r_map offset. */
7058 0, /* l_addr offset in link_map. */
7059 8, /* l_name offset in link_map. */
7060 16, /* l_ld offset in link_map. */
7061 24, /* l_next offset in link_map. */
7062 32 /* l_prev offset in link_map. */
7063 };
7064 const struct link_map_offsets *lmo;
7065 unsigned int machine;
7066 int ptr_size;
7067 CORE_ADDR lm_addr = 0, lm_prev = 0;
7068 int allocated = 1024;
7069 char *p;
7070 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7071 int header_done = 0;
7072
7073 if (writebuf != NULL)
7074 return -2;
7075 if (readbuf == NULL)
7076 return -1;
7077
7078 pid = lwpid_of (current_thread);
7079 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7080 is_elf64 = elf_64_file_p (filename, &machine);
7081 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7082 ptr_size = is_elf64 ? 8 : 4;
7083
7084 while (annex[0] != '\0')
7085 {
7086 const char *sep;
7087 CORE_ADDR *addrp;
7088 int len;
7089
7090 sep = strchr (annex, '=');
7091 if (sep == NULL)
7092 break;
7093
7094 len = sep - annex;
7095 if (len == 5 && startswith (annex, "start"))
7096 addrp = &lm_addr;
7097 else if (len == 4 && startswith (annex, "prev"))
7098 addrp = &lm_prev;
7099 else
7100 {
7101 annex = strchr (sep, ';');
7102 if (annex == NULL)
7103 break;
7104 annex++;
7105 continue;
7106 }
7107
7108 annex = decode_address_to_semicolon (addrp, sep + 1);
7109 }
7110
7111 if (lm_addr == 0)
7112 {
7113 int r_version = 0;
7114
7115 if (priv->r_debug == 0)
7116 priv->r_debug = get_r_debug (pid, is_elf64);
7117
7118 /* We failed to find DT_DEBUG. Such situation will not change
7119 for this inferior - do not retry it. Report it to GDB as
7120 E01, see for the reasons at the GDB solib-svr4.c side. */
7121 if (priv->r_debug == (CORE_ADDR) -1)
7122 return -1;
7123
7124 if (priv->r_debug != 0)
7125 {
7126 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7127 (unsigned char *) &r_version,
7128 sizeof (r_version)) != 0
7129 || r_version != 1)
7130 {
7131 warning ("unexpected r_debug version %d", r_version);
7132 }
7133 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7134 &lm_addr, ptr_size) != 0)
7135 {
7136 warning ("unable to read r_map from 0x%lx",
7137 (long) priv->r_debug + lmo->r_map_offset);
7138 }
7139 }
7140 }
7141
7142 document = (char *) xmalloc (allocated);
7143 strcpy (document, "<library-list-svr4 version=\"1.0\"");
7144 p = document + strlen (document);
7145
7146 while (lm_addr
7147 && read_one_ptr (lm_addr + lmo->l_name_offset,
7148 &l_name, ptr_size) == 0
7149 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7150 &l_addr, ptr_size) == 0
7151 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7152 &l_ld, ptr_size) == 0
7153 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7154 &l_prev, ptr_size) == 0
7155 && read_one_ptr (lm_addr + lmo->l_next_offset,
7156 &l_next, ptr_size) == 0)
7157 {
7158 unsigned char libname[PATH_MAX];
7159
7160 if (lm_prev != l_prev)
7161 {
7162 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7163 (long) lm_prev, (long) l_prev);
7164 break;
7165 }
7166
7167 /* Ignore the first entry even if it has valid name as the first entry
7168 corresponds to the main executable. The first entry should not be
7169 skipped if the dynamic loader was loaded late by a static executable
7170 (see solib-svr4.c parameter ignore_first). But in such case the main
7171 executable does not have PT_DYNAMIC present and this function already
7172 exited above due to failed get_r_debug. */
7173 if (lm_prev == 0)
7174 {
7175 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7176 p = p + strlen (p);
7177 }
7178 else
7179 {
7180 /* Not checking for error because reading may stop before
7181 we've got PATH_MAX worth of characters. */
7182 libname[0] = '\0';
7183 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7184 libname[sizeof (libname) - 1] = '\0';
7185 if (libname[0] != '\0')
7186 {
7187 /* 6x the size for xml_escape_text below. */
7188 size_t len = 6 * strlen ((char *) libname);
7189
7190 if (!header_done)
7191 {
7192 /* Terminate `<library-list-svr4'. */
7193 *p++ = '>';
7194 header_done = 1;
7195 }
7196
7197 while (allocated < p - document + len + 200)
7198 {
7199 /* Expand to guarantee sufficient storage. */
7200 uintptr_t document_len = p - document;
7201
7202 document = (char *) xrealloc (document, 2 * allocated);
7203 allocated *= 2;
7204 p = document + document_len;
7205 }
7206
7207 std::string name = xml_escape_text ((char *) libname);
7208 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7209 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7210 name.c_str (), (unsigned long) lm_addr,
7211 (unsigned long) l_addr, (unsigned long) l_ld);
7212 }
7213 }
7214
7215 lm_prev = lm_addr;
7216 lm_addr = l_next;
7217 }
7218
7219 if (!header_done)
7220 {
7221 /* Empty list; terminate `<library-list-svr4'. */
7222 strcpy (p, "/>");
7223 }
7224 else
7225 strcpy (p, "</library-list-svr4>");
7226
7227 document_len = strlen (document);
7228 if (offset < document_len)
7229 document_len -= offset;
7230 else
7231 document_len = 0;
7232 if (len > document_len)
7233 len = document_len;
7234
7235 memcpy (readbuf, document + offset, len);
7236 xfree (document);
7237
7238 return len;
7239 }
7240
7241 #ifdef HAVE_LINUX_BTRACE
7242
7243 /* See to_disable_btrace target method. */
7244
7245 static int
7246 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7247 {
7248 enum btrace_error err;
7249
7250 err = linux_disable_btrace (tinfo);
7251 return (err == BTRACE_ERR_NONE ? 0 : -1);
7252 }
7253
7254 /* Encode an Intel Processor Trace configuration. */
7255
7256 static void
7257 linux_low_encode_pt_config (struct buffer *buffer,
7258 const struct btrace_data_pt_config *config)
7259 {
7260 buffer_grow_str (buffer, "<pt-config>\n");
7261
7262 switch (config->cpu.vendor)
7263 {
7264 case CV_INTEL:
7265 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7266 "model=\"%u\" stepping=\"%u\"/>\n",
7267 config->cpu.family, config->cpu.model,
7268 config->cpu.stepping);
7269 break;
7270
7271 default:
7272 break;
7273 }
7274
7275 buffer_grow_str (buffer, "</pt-config>\n");
7276 }
7277
7278 /* Encode a raw buffer. */
7279
7280 static void
7281 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7282 unsigned int size)
7283 {
7284 if (size == 0)
7285 return;
7286
7287 /* We use hex encoding - see common/rsp-low.h. */
7288 buffer_grow_str (buffer, "<raw>\n");
7289
7290 while (size-- > 0)
7291 {
7292 char elem[2];
7293
7294 elem[0] = tohex ((*data >> 4) & 0xf);
7295 elem[1] = tohex (*data++ & 0xf);
7296
7297 buffer_grow (buffer, elem, 2);
7298 }
7299
7300 buffer_grow_str (buffer, "</raw>\n");
7301 }
7302
7303 /* See to_read_btrace target method. */
7304
7305 static int
7306 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7307 enum btrace_read_type type)
7308 {
7309 struct btrace_data btrace;
7310 struct btrace_block *block;
7311 enum btrace_error err;
7312 int i;
7313
7314 btrace_data_init (&btrace);
7315
7316 err = linux_read_btrace (&btrace, tinfo, type);
7317 if (err != BTRACE_ERR_NONE)
7318 {
7319 if (err == BTRACE_ERR_OVERFLOW)
7320 buffer_grow_str0 (buffer, "E.Overflow.");
7321 else
7322 buffer_grow_str0 (buffer, "E.Generic Error.");
7323
7324 goto err;
7325 }
7326
7327 switch (btrace.format)
7328 {
7329 case BTRACE_FORMAT_NONE:
7330 buffer_grow_str0 (buffer, "E.No Trace.");
7331 goto err;
7332
7333 case BTRACE_FORMAT_BTS:
7334 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7335 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7336
7337 for (i = 0;
7338 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7339 i++)
7340 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7341 paddress (block->begin), paddress (block->end));
7342
7343 buffer_grow_str0 (buffer, "</btrace>\n");
7344 break;
7345
7346 case BTRACE_FORMAT_PT:
7347 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7348 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7349 buffer_grow_str (buffer, "<pt>\n");
7350
7351 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7352
7353 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7354 btrace.variant.pt.size);
7355
7356 buffer_grow_str (buffer, "</pt>\n");
7357 buffer_grow_str0 (buffer, "</btrace>\n");
7358 break;
7359
7360 default:
7361 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7362 goto err;
7363 }
7364
7365 btrace_data_fini (&btrace);
7366 return 0;
7367
7368 err:
7369 btrace_data_fini (&btrace);
7370 return -1;
7371 }
7372
7373 /* See to_btrace_conf target method. */
7374
7375 static int
7376 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7377 struct buffer *buffer)
7378 {
7379 const struct btrace_config *conf;
7380
7381 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7382 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7383
7384 conf = linux_btrace_conf (tinfo);
7385 if (conf != NULL)
7386 {
7387 switch (conf->format)
7388 {
7389 case BTRACE_FORMAT_NONE:
7390 break;
7391
7392 case BTRACE_FORMAT_BTS:
7393 buffer_xml_printf (buffer, "<bts");
7394 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7395 buffer_xml_printf (buffer, " />\n");
7396 break;
7397
7398 case BTRACE_FORMAT_PT:
7399 buffer_xml_printf (buffer, "<pt");
7400 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7401 buffer_xml_printf (buffer, "/>\n");
7402 break;
7403 }
7404 }
7405
7406 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7407 return 0;
7408 }
7409 #endif /* HAVE_LINUX_BTRACE */
7410
7411 /* See nat/linux-nat.h. */
7412
7413 ptid_t
7414 current_lwp_ptid (void)
7415 {
7416 return ptid_of (current_thread);
7417 }
7418
7419 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7420
7421 static int
7422 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7423 {
7424 if (the_low_target.breakpoint_kind_from_pc != NULL)
7425 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7426 else
7427 return default_breakpoint_kind_from_pc (pcptr);
7428 }
7429
7430 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7431
7432 static const gdb_byte *
7433 linux_sw_breakpoint_from_kind (int kind, int *size)
7434 {
7435 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7436
7437 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7438 }
7439
7440 /* Implementation of the target_ops method
7441 "breakpoint_kind_from_current_state". */
7442
7443 static int
7444 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7445 {
7446 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7447 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7448 else
7449 return linux_breakpoint_kind_from_pc (pcptr);
7450 }
7451
7452 /* Default implementation of linux_target_ops method "set_pc" for
7453 32-bit pc register which is literally named "pc". */
7454
7455 void
7456 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7457 {
7458 uint32_t newpc = pc;
7459
7460 supply_register_by_name (regcache, "pc", &newpc);
7461 }
7462
7463 /* Default implementation of linux_target_ops method "get_pc" for
7464 32-bit pc register which is literally named "pc". */
7465
7466 CORE_ADDR
7467 linux_get_pc_32bit (struct regcache *regcache)
7468 {
7469 uint32_t pc;
7470
7471 collect_register_by_name (regcache, "pc", &pc);
7472 if (debug_threads)
7473 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7474 return pc;
7475 }
7476
7477 /* Default implementation of linux_target_ops method "set_pc" for
7478 64-bit pc register which is literally named "pc". */
7479
7480 void
7481 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7482 {
7483 uint64_t newpc = pc;
7484
7485 supply_register_by_name (regcache, "pc", &newpc);
7486 }
7487
7488 /* Default implementation of linux_target_ops method "get_pc" for
7489 64-bit pc register which is literally named "pc". */
7490
7491 CORE_ADDR
7492 linux_get_pc_64bit (struct regcache *regcache)
7493 {
7494 uint64_t pc;
7495
7496 collect_register_by_name (regcache, "pc", &pc);
7497 if (debug_threads)
7498 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7499 return pc;
7500 }
7501
7502
7503 static struct target_ops linux_target_ops = {
7504 linux_create_inferior,
7505 linux_post_create_inferior,
7506 linux_attach,
7507 linux_kill,
7508 linux_detach,
7509 linux_mourn,
7510 linux_join,
7511 linux_thread_alive,
7512 linux_resume,
7513 linux_wait,
7514 linux_fetch_registers,
7515 linux_store_registers,
7516 linux_prepare_to_access_memory,
7517 linux_done_accessing_memory,
7518 linux_read_memory,
7519 linux_write_memory,
7520 linux_look_up_symbols,
7521 linux_request_interrupt,
7522 linux_read_auxv,
7523 linux_supports_z_point_type,
7524 linux_insert_point,
7525 linux_remove_point,
7526 linux_stopped_by_sw_breakpoint,
7527 linux_supports_stopped_by_sw_breakpoint,
7528 linux_stopped_by_hw_breakpoint,
7529 linux_supports_stopped_by_hw_breakpoint,
7530 linux_supports_hardware_single_step,
7531 linux_stopped_by_watchpoint,
7532 linux_stopped_data_address,
7533 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7534 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7535 && defined(PT_TEXT_END_ADDR)
7536 linux_read_offsets,
7537 #else
7538 NULL,
7539 #endif
7540 #ifdef USE_THREAD_DB
7541 thread_db_get_tls_address,
7542 #else
7543 NULL,
7544 #endif
7545 linux_qxfer_spu,
7546 hostio_last_error_from_errno,
7547 linux_qxfer_osdata,
7548 linux_xfer_siginfo,
7549 linux_supports_non_stop,
7550 linux_async,
7551 linux_start_non_stop,
7552 linux_supports_multi_process,
7553 linux_supports_fork_events,
7554 linux_supports_vfork_events,
7555 linux_supports_exec_events,
7556 linux_handle_new_gdb_connection,
7557 #ifdef USE_THREAD_DB
7558 thread_db_handle_monitor_command,
7559 #else
7560 NULL,
7561 #endif
7562 linux_common_core_of_thread,
7563 linux_read_loadmap,
7564 linux_process_qsupported,
7565 linux_supports_tracepoints,
7566 linux_read_pc,
7567 linux_write_pc,
7568 linux_thread_stopped,
7569 NULL,
7570 linux_pause_all,
7571 linux_unpause_all,
7572 linux_stabilize_threads,
7573 linux_install_fast_tracepoint_jump_pad,
7574 linux_emit_ops,
7575 linux_supports_disable_randomization,
7576 linux_get_min_fast_tracepoint_insn_len,
7577 linux_qxfer_libraries_svr4,
7578 linux_supports_agent,
7579 #ifdef HAVE_LINUX_BTRACE
7580 linux_supports_btrace,
7581 linux_enable_btrace,
7582 linux_low_disable_btrace,
7583 linux_low_read_btrace,
7584 linux_low_btrace_conf,
7585 #else
7586 NULL,
7587 NULL,
7588 NULL,
7589 NULL,
7590 NULL,
7591 #endif
7592 linux_supports_range_stepping,
7593 linux_proc_pid_to_exec_file,
7594 linux_mntns_open_cloexec,
7595 linux_mntns_unlink,
7596 linux_mntns_readlink,
7597 linux_breakpoint_kind_from_pc,
7598 linux_sw_breakpoint_from_kind,
7599 linux_proc_tid_get_name,
7600 linux_breakpoint_kind_from_current_state,
7601 linux_supports_software_single_step,
7602 linux_supports_catch_syscall,
7603 linux_get_ipa_tdesc_idx,
7604 #if USE_THREAD_DB
7605 thread_db_thread_handle,
7606 #else
7607 NULL,
7608 #endif
7609 };
7610
7611 #ifdef HAVE_LINUX_REGSETS
7612 void
7613 initialize_regsets_info (struct regsets_info *info)
7614 {
7615 for (info->num_regsets = 0;
7616 info->regsets[info->num_regsets].size >= 0;
7617 info->num_regsets++)
7618 ;
7619 }
7620 #endif
7621
7622 void
7623 initialize_low (void)
7624 {
7625 struct sigaction sigchld_action;
7626
7627 memset (&sigchld_action, 0, sizeof (sigchld_action));
7628 set_target_ops (&linux_target_ops);
7629
7630 linux_ptrace_init_warnings ();
7631
7632 sigchld_action.sa_handler = sigchld_handler;
7633 sigemptyset (&sigchld_action.sa_mask);
7634 sigchld_action.sa_flags = SA_RESTART;
7635 sigaction (SIGCHLD, &sigchld_action, NULL);
7636
7637 initialize_low_arch ();
7638
7639 linux_check_ptrace_features ();
7640 }