]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
Linux gdbserver fork event debug output
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #ifndef ELFMAG0
50 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54 #include <elf.h>
55 #endif
56 #include "nat/linux-namespaces.h"
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 #ifndef W_STOPCODE
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75 #endif
76
77 /* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79 #ifndef __SIGRTMIN
80 #define __SIGRTMIN 32
81 #endif
82
83 /* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
95 #elif defined(BFIN)
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
104 #endif
105 #endif
106
107 #ifdef HAVE_LINUX_BTRACE
108 # include "nat/linux-btrace.h"
109 # include "btrace-common.h"
110 #endif
111
112 #ifndef HAVE_ELF32_AUXV_T
113 /* Copied from glibc's elf.h. */
114 typedef struct
115 {
116 uint32_t a_type; /* Entry type */
117 union
118 {
119 uint32_t a_val; /* Integer value */
120 /* We use to have pointer elements added here. We cannot do that,
121 though, since it does not work when using 32-bit definitions
122 on 64-bit platforms and vice versa. */
123 } a_un;
124 } Elf32_auxv_t;
125 #endif
126
127 #ifndef HAVE_ELF64_AUXV_T
128 /* Copied from glibc's elf.h. */
129 typedef struct
130 {
131 uint64_t a_type; /* Entry type */
132 union
133 {
134 uint64_t a_val; /* Integer value */
135 /* We use to have pointer elements added here. We cannot do that,
136 though, since it does not work when using 32-bit definitions
137 on 64-bit platforms and vice versa. */
138 } a_un;
139 } Elf64_auxv_t;
140 #endif
141
142 /* Does the current host support PTRACE_GETREGSET? */
143 int have_ptrace_getregset = -1;
144
145 /* LWP accessors. */
146
147 /* See nat/linux-nat.h. */
148
149 ptid_t
150 ptid_of_lwp (struct lwp_info *lwp)
151 {
152 return ptid_of (get_lwp_thread (lwp));
153 }
154
155 /* See nat/linux-nat.h. */
156
157 void
158 lwp_set_arch_private_info (struct lwp_info *lwp,
159 struct arch_lwp_info *info)
160 {
161 lwp->arch_private = info;
162 }
163
164 /* See nat/linux-nat.h. */
165
166 struct arch_lwp_info *
167 lwp_arch_private_info (struct lwp_info *lwp)
168 {
169 return lwp->arch_private;
170 }
171
172 /* See nat/linux-nat.h. */
173
174 int
175 lwp_is_stopped (struct lwp_info *lwp)
176 {
177 return lwp->stopped;
178 }
179
180 /* See nat/linux-nat.h. */
181
182 enum target_stop_reason
183 lwp_stop_reason (struct lwp_info *lwp)
184 {
185 return lwp->stop_reason;
186 }
187
188 /* A list of all unknown processes which receive stop signals. Some
189 other process will presumably claim each of these as forked
190 children momentarily. */
191
192 struct simple_pid_list
193 {
194 /* The process ID. */
195 int pid;
196
197 /* The status as reported by waitpid. */
198 int status;
199
200 /* Next in chain. */
201 struct simple_pid_list *next;
202 };
203 struct simple_pid_list *stopped_pids;
204
205 /* Trivial list manipulation functions to keep track of a list of new
206 stopped processes. */
207
208 static void
209 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
210 {
211 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
212
213 new_pid->pid = pid;
214 new_pid->status = status;
215 new_pid->next = *listp;
216 *listp = new_pid;
217 }
218
219 static int
220 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
221 {
222 struct simple_pid_list **p;
223
224 for (p = listp; *p != NULL; p = &(*p)->next)
225 if ((*p)->pid == pid)
226 {
227 struct simple_pid_list *next = (*p)->next;
228
229 *statusp = (*p)->status;
230 xfree (*p);
231 *p = next;
232 return 1;
233 }
234 return 0;
235 }
236
237 enum stopping_threads_kind
238 {
239 /* Not stopping threads presently. */
240 NOT_STOPPING_THREADS,
241
242 /* Stopping threads. */
243 STOPPING_THREADS,
244
245 /* Stopping and suspending threads. */
246 STOPPING_AND_SUSPENDING_THREADS
247 };
248
249 /* This is set while stop_all_lwps is in effect. */
250 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
251
252 /* FIXME make into a target method? */
253 int using_threads = 1;
254
255 /* True if we're presently stabilizing threads (moving them out of
256 jump pads). */
257 static int stabilizing_threads;
258
259 static void linux_resume_one_lwp (struct lwp_info *lwp,
260 int step, int signal, siginfo_t *info);
261 static void linux_resume (struct thread_resume *resume_info, size_t n);
262 static void stop_all_lwps (int suspend, struct lwp_info *except);
263 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
264 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
265 int *wstat, int options);
266 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
267 static struct lwp_info *add_lwp (ptid_t ptid);
268 static int linux_stopped_by_watchpoint (void);
269 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
270 static void proceed_all_lwps (void);
271 static int finish_step_over (struct lwp_info *lwp);
272 static int kill_lwp (unsigned long lwpid, int signo);
273
274 /* When the event-loop is doing a step-over, this points at the thread
275 being stepped. */
276 ptid_t step_over_bkpt;
277
278 /* True if the low target can hardware single-step. Such targets
279 don't need a BREAKPOINT_REINSERT_ADDR callback. */
280
281 static int
282 can_hardware_single_step (void)
283 {
284 return (the_low_target.breakpoint_reinsert_addr == NULL);
285 }
286
287 /* True if the low target supports memory breakpoints. If so, we'll
288 have a GET_PC implementation. */
289
290 static int
291 supports_breakpoints (void)
292 {
293 return (the_low_target.get_pc != NULL);
294 }
295
296 /* Returns true if this target can support fast tracepoints. This
297 does not mean that the in-process agent has been loaded in the
298 inferior. */
299
300 static int
301 supports_fast_tracepoints (void)
302 {
303 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
304 }
305
306 /* True if LWP is stopped in its stepping range. */
307
308 static int
309 lwp_in_step_range (struct lwp_info *lwp)
310 {
311 CORE_ADDR pc = lwp->stop_pc;
312
313 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
314 }
315
316 struct pending_signals
317 {
318 int signal;
319 siginfo_t info;
320 struct pending_signals *prev;
321 };
322
323 /* The read/write ends of the pipe registered as waitable file in the
324 event loop. */
325 static int linux_event_pipe[2] = { -1, -1 };
326
327 /* True if we're currently in async mode. */
328 #define target_is_async_p() (linux_event_pipe[0] != -1)
329
330 static void send_sigstop (struct lwp_info *lwp);
331 static void wait_for_sigstop (void);
332
333 /* Return non-zero if HEADER is a 64-bit ELF file. */
334
335 static int
336 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
337 {
338 if (header->e_ident[EI_MAG0] == ELFMAG0
339 && header->e_ident[EI_MAG1] == ELFMAG1
340 && header->e_ident[EI_MAG2] == ELFMAG2
341 && header->e_ident[EI_MAG3] == ELFMAG3)
342 {
343 *machine = header->e_machine;
344 return header->e_ident[EI_CLASS] == ELFCLASS64;
345
346 }
347 *machine = EM_NONE;
348 return -1;
349 }
350
351 /* Return non-zero if FILE is a 64-bit ELF file,
352 zero if the file is not a 64-bit ELF file,
353 and -1 if the file is not accessible or doesn't exist. */
354
355 static int
356 elf_64_file_p (const char *file, unsigned int *machine)
357 {
358 Elf64_Ehdr header;
359 int fd;
360
361 fd = open (file, O_RDONLY);
362 if (fd < 0)
363 return -1;
364
365 if (read (fd, &header, sizeof (header)) != sizeof (header))
366 {
367 close (fd);
368 return 0;
369 }
370 close (fd);
371
372 return elf_64_header_p (&header, machine);
373 }
374
375 /* Accepts an integer PID; Returns true if the executable PID is
376 running is a 64-bit ELF file.. */
377
378 int
379 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
380 {
381 char file[PATH_MAX];
382
383 sprintf (file, "/proc/%d/exe", pid);
384 return elf_64_file_p (file, machine);
385 }
386
387 static void
388 delete_lwp (struct lwp_info *lwp)
389 {
390 struct thread_info *thr = get_lwp_thread (lwp);
391
392 if (debug_threads)
393 debug_printf ("deleting %ld\n", lwpid_of (thr));
394
395 remove_thread (thr);
396 free (lwp->arch_private);
397 free (lwp);
398 }
399
400 /* Add a process to the common process list, and set its private
401 data. */
402
403 static struct process_info *
404 linux_add_process (int pid, int attached)
405 {
406 struct process_info *proc;
407
408 proc = add_process (pid, attached);
409 proc->priv = xcalloc (1, sizeof (*proc->priv));
410
411 if (the_low_target.new_process != NULL)
412 proc->priv->arch_private = the_low_target.new_process ();
413
414 return proc;
415 }
416
417 static CORE_ADDR get_pc (struct lwp_info *lwp);
418
419 /* Handle a GNU/Linux extended wait response. If we see a clone
420 event, we need to add the new LWP to our list (and return 0 so as
421 not to report the trap to higher layers). */
422
423 static int
424 handle_extended_wait (struct lwp_info *event_lwp, int wstat)
425 {
426 int event = linux_ptrace_get_extended_event (wstat);
427 struct thread_info *event_thr = get_lwp_thread (event_lwp);
428 struct lwp_info *new_lwp;
429
430 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
431 || (event == PTRACE_EVENT_CLONE))
432 {
433 ptid_t ptid;
434 unsigned long new_pid;
435 int ret, status;
436
437 /* Get the pid of the new lwp. */
438 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
439 &new_pid);
440
441 /* If we haven't already seen the new PID stop, wait for it now. */
442 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
443 {
444 /* The new child has a pending SIGSTOP. We can't affect it until it
445 hits the SIGSTOP, but we're already attached. */
446
447 ret = my_waitpid (new_pid, &status, __WALL);
448
449 if (ret == -1)
450 perror_with_name ("waiting for new child");
451 else if (ret != new_pid)
452 warning ("wait returned unexpected PID %d", ret);
453 else if (!WIFSTOPPED (status))
454 warning ("wait returned unexpected status 0x%x", status);
455 }
456
457 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
458 {
459 struct process_info *parent_proc;
460 struct process_info *child_proc;
461 struct lwp_info *child_lwp;
462 struct thread_info *child_thr;
463 struct target_desc *tdesc;
464
465 ptid = ptid_build (new_pid, new_pid, 0);
466
467 if (debug_threads)
468 {
469 debug_printf ("HEW: Got fork event from LWP %ld, "
470 "new child is %d\n",
471 ptid_get_lwp (ptid_of (event_thr)),
472 ptid_get_pid (ptid));
473 }
474
475 /* Add the new process to the tables and clone the breakpoint
476 lists of the parent. We need to do this even if the new process
477 will be detached, since we will need the process object and the
478 breakpoints to remove any breakpoints from memory when we
479 detach, and the client side will access registers. */
480 child_proc = linux_add_process (new_pid, 0);
481 gdb_assert (child_proc != NULL);
482 child_lwp = add_lwp (ptid);
483 gdb_assert (child_lwp != NULL);
484 child_lwp->stopped = 1;
485 child_lwp->must_set_ptrace_flags = 1;
486 child_lwp->status_pending_p = 0;
487 child_thr = get_lwp_thread (child_lwp);
488 child_thr->last_resume_kind = resume_stop;
489 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
490
491 parent_proc = get_thread_process (event_thr);
492 child_proc->attached = parent_proc->attached;
493 clone_all_breakpoints (&child_proc->breakpoints,
494 &child_proc->raw_breakpoints,
495 parent_proc->breakpoints);
496
497 tdesc = xmalloc (sizeof (struct target_desc));
498 copy_target_description (tdesc, parent_proc->tdesc);
499 child_proc->tdesc = tdesc;
500
501 /* Clone arch-specific process data. */
502 if (the_low_target.new_fork != NULL)
503 the_low_target.new_fork (parent_proc, child_proc);
504
505 /* Save fork info in the parent thread. */
506 if (event == PTRACE_EVENT_FORK)
507 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
508 else if (event == PTRACE_EVENT_VFORK)
509 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
510
511 event_lwp->waitstatus.value.related_pid = ptid;
512
513 /* The status_pending field contains bits denoting the
514 extended event, so when the pending event is handled,
515 the handler will look at lwp->waitstatus. */
516 event_lwp->status_pending_p = 1;
517 event_lwp->status_pending = wstat;
518
519 /* Report the event. */
520 return 0;
521 }
522
523 if (debug_threads)
524 debug_printf ("HEW: Got clone event "
525 "from LWP %ld, new child is LWP %ld\n",
526 lwpid_of (event_thr), new_pid);
527
528 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
529 new_lwp = add_lwp (ptid);
530
531 /* Either we're going to immediately resume the new thread
532 or leave it stopped. linux_resume_one_lwp is a nop if it
533 thinks the thread is currently running, so set this first
534 before calling linux_resume_one_lwp. */
535 new_lwp->stopped = 1;
536
537 /* If we're suspending all threads, leave this one suspended
538 too. */
539 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
540 new_lwp->suspended = 1;
541
542 /* Normally we will get the pending SIGSTOP. But in some cases
543 we might get another signal delivered to the group first.
544 If we do get another signal, be sure not to lose it. */
545 if (WSTOPSIG (status) != SIGSTOP)
546 {
547 new_lwp->stop_expected = 1;
548 new_lwp->status_pending_p = 1;
549 new_lwp->status_pending = status;
550 }
551
552 /* Don't report the event. */
553 return 1;
554 }
555 else if (event == PTRACE_EVENT_VFORK_DONE)
556 {
557 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
558
559 /* Report the event. */
560 return 0;
561 }
562
563 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
564 }
565
566 /* Return the PC as read from the regcache of LWP, without any
567 adjustment. */
568
569 static CORE_ADDR
570 get_pc (struct lwp_info *lwp)
571 {
572 struct thread_info *saved_thread;
573 struct regcache *regcache;
574 CORE_ADDR pc;
575
576 if (the_low_target.get_pc == NULL)
577 return 0;
578
579 saved_thread = current_thread;
580 current_thread = get_lwp_thread (lwp);
581
582 regcache = get_thread_regcache (current_thread, 1);
583 pc = (*the_low_target.get_pc) (regcache);
584
585 if (debug_threads)
586 debug_printf ("pc is 0x%lx\n", (long) pc);
587
588 current_thread = saved_thread;
589 return pc;
590 }
591
592 /* This function should only be called if LWP got a SIGTRAP.
593 The SIGTRAP could mean several things.
594
595 On i386, where decr_pc_after_break is non-zero:
596
597 If we were single-stepping this process using PTRACE_SINGLESTEP, we
598 will get only the one SIGTRAP. The value of $eip will be the next
599 instruction. If the instruction we stepped over was a breakpoint,
600 we need to decrement the PC.
601
602 If we continue the process using PTRACE_CONT, we will get a
603 SIGTRAP when we hit a breakpoint. The value of $eip will be
604 the instruction after the breakpoint (i.e. needs to be
605 decremented). If we report the SIGTRAP to GDB, we must also
606 report the undecremented PC. If the breakpoint is removed, we
607 must resume at the decremented PC.
608
609 On a non-decr_pc_after_break machine with hardware or kernel
610 single-step:
611
612 If we either single-step a breakpoint instruction, or continue and
613 hit a breakpoint instruction, our PC will point at the breakpoint
614 instruction. */
615
616 static int
617 check_stopped_by_breakpoint (struct lwp_info *lwp)
618 {
619 CORE_ADDR pc;
620 CORE_ADDR sw_breakpoint_pc;
621 struct thread_info *saved_thread;
622 #if USE_SIGTRAP_SIGINFO
623 siginfo_t siginfo;
624 #endif
625
626 if (the_low_target.get_pc == NULL)
627 return 0;
628
629 pc = get_pc (lwp);
630 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
631
632 /* breakpoint_at reads from the current thread. */
633 saved_thread = current_thread;
634 current_thread = get_lwp_thread (lwp);
635
636 #if USE_SIGTRAP_SIGINFO
637 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
638 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
639 {
640 if (siginfo.si_signo == SIGTRAP)
641 {
642 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
643 {
644 if (debug_threads)
645 {
646 struct thread_info *thr = get_lwp_thread (lwp);
647
648 debug_printf ("CSBB: %s stopped by software breakpoint\n",
649 target_pid_to_str (ptid_of (thr)));
650 }
651
652 /* Back up the PC if necessary. */
653 if (pc != sw_breakpoint_pc)
654 {
655 struct regcache *regcache
656 = get_thread_regcache (current_thread, 1);
657 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
658 }
659
660 lwp->stop_pc = sw_breakpoint_pc;
661 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
662 current_thread = saved_thread;
663 return 1;
664 }
665 else if (siginfo.si_code == TRAP_HWBKPT)
666 {
667 if (debug_threads)
668 {
669 struct thread_info *thr = get_lwp_thread (lwp);
670
671 debug_printf ("CSBB: %s stopped by hardware "
672 "breakpoint/watchpoint\n",
673 target_pid_to_str (ptid_of (thr)));
674 }
675
676 lwp->stop_pc = pc;
677 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
678 current_thread = saved_thread;
679 return 1;
680 }
681 else if (siginfo.si_code == TRAP_TRACE)
682 {
683 if (debug_threads)
684 {
685 struct thread_info *thr = get_lwp_thread (lwp);
686
687 debug_printf ("CSBB: %s stopped by trace\n",
688 target_pid_to_str (ptid_of (thr)));
689 }
690 }
691 }
692 }
693 #else
694 /* We may have just stepped a breakpoint instruction. E.g., in
695 non-stop mode, GDB first tells the thread A to step a range, and
696 then the user inserts a breakpoint inside the range. In that
697 case we need to report the breakpoint PC. */
698 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
699 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
700 {
701 if (debug_threads)
702 {
703 struct thread_info *thr = get_lwp_thread (lwp);
704
705 debug_printf ("CSBB: %s stopped by software breakpoint\n",
706 target_pid_to_str (ptid_of (thr)));
707 }
708
709 /* Back up the PC if necessary. */
710 if (pc != sw_breakpoint_pc)
711 {
712 struct regcache *regcache
713 = get_thread_regcache (current_thread, 1);
714 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
715 }
716
717 lwp->stop_pc = sw_breakpoint_pc;
718 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
719 current_thread = saved_thread;
720 return 1;
721 }
722
723 if (hardware_breakpoint_inserted_here (pc))
724 {
725 if (debug_threads)
726 {
727 struct thread_info *thr = get_lwp_thread (lwp);
728
729 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
730 target_pid_to_str (ptid_of (thr)));
731 }
732
733 lwp->stop_pc = pc;
734 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
735 current_thread = saved_thread;
736 return 1;
737 }
738 #endif
739
740 current_thread = saved_thread;
741 return 0;
742 }
743
744 static struct lwp_info *
745 add_lwp (ptid_t ptid)
746 {
747 struct lwp_info *lwp;
748
749 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
750 memset (lwp, 0, sizeof (*lwp));
751
752 if (the_low_target.new_thread != NULL)
753 the_low_target.new_thread (lwp);
754
755 lwp->thread = add_thread (ptid, lwp);
756
757 return lwp;
758 }
759
760 /* Start an inferior process and returns its pid.
761 ALLARGS is a vector of program-name and args. */
762
763 static int
764 linux_create_inferior (char *program, char **allargs)
765 {
766 struct lwp_info *new_lwp;
767 int pid;
768 ptid_t ptid;
769 struct cleanup *restore_personality
770 = maybe_disable_address_space_randomization (disable_randomization);
771
772 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
773 pid = vfork ();
774 #else
775 pid = fork ();
776 #endif
777 if (pid < 0)
778 perror_with_name ("fork");
779
780 if (pid == 0)
781 {
782 close_most_fds ();
783 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
784
785 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
786 signal (__SIGRTMIN + 1, SIG_DFL);
787 #endif
788
789 setpgid (0, 0);
790
791 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
792 stdout to stderr so that inferior i/o doesn't corrupt the connection.
793 Also, redirect stdin to /dev/null. */
794 if (remote_connection_is_stdio ())
795 {
796 close (0);
797 open ("/dev/null", O_RDONLY);
798 dup2 (2, 1);
799 if (write (2, "stdin/stdout redirected\n",
800 sizeof ("stdin/stdout redirected\n") - 1) < 0)
801 {
802 /* Errors ignored. */;
803 }
804 }
805
806 execv (program, allargs);
807 if (errno == ENOENT)
808 execvp (program, allargs);
809
810 fprintf (stderr, "Cannot exec %s: %s.\n", program,
811 strerror (errno));
812 fflush (stderr);
813 _exit (0177);
814 }
815
816 do_cleanups (restore_personality);
817
818 linux_add_process (pid, 0);
819
820 ptid = ptid_build (pid, pid, 0);
821 new_lwp = add_lwp (ptid);
822 new_lwp->must_set_ptrace_flags = 1;
823
824 return pid;
825 }
826
827 /* Implement the arch_setup target_ops method. */
828
829 static void
830 linux_arch_setup (void)
831 {
832 the_low_target.arch_setup ();
833 }
834
835 /* Attach to an inferior process. Returns 0 on success, ERRNO on
836 error. */
837
838 int
839 linux_attach_lwp (ptid_t ptid)
840 {
841 struct lwp_info *new_lwp;
842 int lwpid = ptid_get_lwp (ptid);
843
844 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
845 != 0)
846 return errno;
847
848 new_lwp = add_lwp (ptid);
849
850 /* We need to wait for SIGSTOP before being able to make the next
851 ptrace call on this LWP. */
852 new_lwp->must_set_ptrace_flags = 1;
853
854 if (linux_proc_pid_is_stopped (lwpid))
855 {
856 if (debug_threads)
857 debug_printf ("Attached to a stopped process\n");
858
859 /* The process is definitely stopped. It is in a job control
860 stop, unless the kernel predates the TASK_STOPPED /
861 TASK_TRACED distinction, in which case it might be in a
862 ptrace stop. Make sure it is in a ptrace stop; from there we
863 can kill it, signal it, et cetera.
864
865 First make sure there is a pending SIGSTOP. Since we are
866 already attached, the process can not transition from stopped
867 to running without a PTRACE_CONT; so we know this signal will
868 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
869 probably already in the queue (unless this kernel is old
870 enough to use TASK_STOPPED for ptrace stops); but since
871 SIGSTOP is not an RT signal, it can only be queued once. */
872 kill_lwp (lwpid, SIGSTOP);
873
874 /* Finally, resume the stopped process. This will deliver the
875 SIGSTOP (or a higher priority signal, just like normal
876 PTRACE_ATTACH), which we'll catch later on. */
877 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
878 }
879
880 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
881 brings it to a halt.
882
883 There are several cases to consider here:
884
885 1) gdbserver has already attached to the process and is being notified
886 of a new thread that is being created.
887 In this case we should ignore that SIGSTOP and resume the
888 process. This is handled below by setting stop_expected = 1,
889 and the fact that add_thread sets last_resume_kind ==
890 resume_continue.
891
892 2) This is the first thread (the process thread), and we're attaching
893 to it via attach_inferior.
894 In this case we want the process thread to stop.
895 This is handled by having linux_attach set last_resume_kind ==
896 resume_stop after we return.
897
898 If the pid we are attaching to is also the tgid, we attach to and
899 stop all the existing threads. Otherwise, we attach to pid and
900 ignore any other threads in the same group as this pid.
901
902 3) GDB is connecting to gdbserver and is requesting an enumeration of all
903 existing threads.
904 In this case we want the thread to stop.
905 FIXME: This case is currently not properly handled.
906 We should wait for the SIGSTOP but don't. Things work apparently
907 because enough time passes between when we ptrace (ATTACH) and when
908 gdb makes the next ptrace call on the thread.
909
910 On the other hand, if we are currently trying to stop all threads, we
911 should treat the new thread as if we had sent it a SIGSTOP. This works
912 because we are guaranteed that the add_lwp call above added us to the
913 end of the list, and so the new thread has not yet reached
914 wait_for_sigstop (but will). */
915 new_lwp->stop_expected = 1;
916
917 return 0;
918 }
919
920 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
921 already attached. Returns true if a new LWP is found, false
922 otherwise. */
923
924 static int
925 attach_proc_task_lwp_callback (ptid_t ptid)
926 {
927 /* Is this a new thread? */
928 if (find_thread_ptid (ptid) == NULL)
929 {
930 int lwpid = ptid_get_lwp (ptid);
931 int err;
932
933 if (debug_threads)
934 debug_printf ("Found new lwp %d\n", lwpid);
935
936 err = linux_attach_lwp (ptid);
937
938 /* Be quiet if we simply raced with the thread exiting. EPERM
939 is returned if the thread's task still exists, and is marked
940 as exited or zombie, as well as other conditions, so in that
941 case, confirm the status in /proc/PID/status. */
942 if (err == ESRCH
943 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
944 {
945 if (debug_threads)
946 {
947 debug_printf ("Cannot attach to lwp %d: "
948 "thread is gone (%d: %s)\n",
949 lwpid, err, strerror (err));
950 }
951 }
952 else if (err != 0)
953 {
954 warning (_("Cannot attach to lwp %d: %s"),
955 lwpid,
956 linux_ptrace_attach_fail_reason_string (ptid, err));
957 }
958
959 return 1;
960 }
961 return 0;
962 }
963
964 /* Attach to PID. If PID is the tgid, attach to it and all
965 of its threads. */
966
967 static int
968 linux_attach (unsigned long pid)
969 {
970 ptid_t ptid = ptid_build (pid, pid, 0);
971 int err;
972
973 /* Attach to PID. We will check for other threads
974 soon. */
975 err = linux_attach_lwp (ptid);
976 if (err != 0)
977 error ("Cannot attach to process %ld: %s",
978 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
979
980 linux_add_process (pid, 1);
981
982 if (!non_stop)
983 {
984 struct thread_info *thread;
985
986 /* Don't ignore the initial SIGSTOP if we just attached to this
987 process. It will be collected by wait shortly. */
988 thread = find_thread_ptid (ptid_build (pid, pid, 0));
989 thread->last_resume_kind = resume_stop;
990 }
991
992 /* We must attach to every LWP. If /proc is mounted, use that to
993 find them now. On the one hand, the inferior may be using raw
994 clone instead of using pthreads. On the other hand, even if it
995 is using pthreads, GDB may not be connected yet (thread_db needs
996 to do symbol lookups, through qSymbol). Also, thread_db walks
997 structures in the inferior's address space to find the list of
998 threads/LWPs, and those structures may well be corrupted. Note
999 that once thread_db is loaded, we'll still use it to list threads
1000 and associate pthread info with each LWP. */
1001 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1002 return 0;
1003 }
1004
1005 struct counter
1006 {
1007 int pid;
1008 int count;
1009 };
1010
1011 static int
1012 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1013 {
1014 struct counter *counter = args;
1015
1016 if (ptid_get_pid (entry->id) == counter->pid)
1017 {
1018 if (++counter->count > 1)
1019 return 1;
1020 }
1021
1022 return 0;
1023 }
1024
1025 static int
1026 last_thread_of_process_p (int pid)
1027 {
1028 struct counter counter = { pid , 0 };
1029
1030 return (find_inferior (&all_threads,
1031 second_thread_of_pid_p, &counter) == NULL);
1032 }
1033
1034 /* Kill LWP. */
1035
1036 static void
1037 linux_kill_one_lwp (struct lwp_info *lwp)
1038 {
1039 struct thread_info *thr = get_lwp_thread (lwp);
1040 int pid = lwpid_of (thr);
1041
1042 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1043 there is no signal context, and ptrace(PTRACE_KILL) (or
1044 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1045 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1046 alternative is to kill with SIGKILL. We only need one SIGKILL
1047 per process, not one for each thread. But since we still support
1048 linuxthreads, and we also support debugging programs using raw
1049 clone without CLONE_THREAD, we send one for each thread. For
1050 years, we used PTRACE_KILL only, so we're being a bit paranoid
1051 about some old kernels where PTRACE_KILL might work better
1052 (dubious if there are any such, but that's why it's paranoia), so
1053 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1054 everywhere. */
1055
1056 errno = 0;
1057 kill_lwp (pid, SIGKILL);
1058 if (debug_threads)
1059 {
1060 int save_errno = errno;
1061
1062 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1063 target_pid_to_str (ptid_of (thr)),
1064 save_errno ? strerror (save_errno) : "OK");
1065 }
1066
1067 errno = 0;
1068 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1069 if (debug_threads)
1070 {
1071 int save_errno = errno;
1072
1073 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1074 target_pid_to_str (ptid_of (thr)),
1075 save_errno ? strerror (save_errno) : "OK");
1076 }
1077 }
1078
1079 /* Kill LWP and wait for it to die. */
1080
1081 static void
1082 kill_wait_lwp (struct lwp_info *lwp)
1083 {
1084 struct thread_info *thr = get_lwp_thread (lwp);
1085 int pid = ptid_get_pid (ptid_of (thr));
1086 int lwpid = ptid_get_lwp (ptid_of (thr));
1087 int wstat;
1088 int res;
1089
1090 if (debug_threads)
1091 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1092
1093 do
1094 {
1095 linux_kill_one_lwp (lwp);
1096
1097 /* Make sure it died. Notes:
1098
1099 - The loop is most likely unnecessary.
1100
1101 - We don't use linux_wait_for_event as that could delete lwps
1102 while we're iterating over them. We're not interested in
1103 any pending status at this point, only in making sure all
1104 wait status on the kernel side are collected until the
1105 process is reaped.
1106
1107 - We don't use __WALL here as the __WALL emulation relies on
1108 SIGCHLD, and killing a stopped process doesn't generate
1109 one, nor an exit status.
1110 */
1111 res = my_waitpid (lwpid, &wstat, 0);
1112 if (res == -1 && errno == ECHILD)
1113 res = my_waitpid (lwpid, &wstat, __WCLONE);
1114 } while (res > 0 && WIFSTOPPED (wstat));
1115
1116 /* Even if it was stopped, the child may have already disappeared.
1117 E.g., if it was killed by SIGKILL. */
1118 if (res < 0 && errno != ECHILD)
1119 perror_with_name ("kill_wait_lwp");
1120 }
1121
1122 /* Callback for `find_inferior'. Kills an lwp of a given process,
1123 except the leader. */
1124
1125 static int
1126 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1127 {
1128 struct thread_info *thread = (struct thread_info *) entry;
1129 struct lwp_info *lwp = get_thread_lwp (thread);
1130 int pid = * (int *) args;
1131
1132 if (ptid_get_pid (entry->id) != pid)
1133 return 0;
1134
1135 /* We avoid killing the first thread here, because of a Linux kernel (at
1136 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1137 the children get a chance to be reaped, it will remain a zombie
1138 forever. */
1139
1140 if (lwpid_of (thread) == pid)
1141 {
1142 if (debug_threads)
1143 debug_printf ("lkop: is last of process %s\n",
1144 target_pid_to_str (entry->id));
1145 return 0;
1146 }
1147
1148 kill_wait_lwp (lwp);
1149 return 0;
1150 }
1151
1152 static int
1153 linux_kill (int pid)
1154 {
1155 struct process_info *process;
1156 struct lwp_info *lwp;
1157
1158 process = find_process_pid (pid);
1159 if (process == NULL)
1160 return -1;
1161
1162 /* If we're killing a running inferior, make sure it is stopped
1163 first, as PTRACE_KILL will not work otherwise. */
1164 stop_all_lwps (0, NULL);
1165
1166 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1167
1168 /* See the comment in linux_kill_one_lwp. We did not kill the first
1169 thread in the list, so do so now. */
1170 lwp = find_lwp_pid (pid_to_ptid (pid));
1171
1172 if (lwp == NULL)
1173 {
1174 if (debug_threads)
1175 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1176 pid);
1177 }
1178 else
1179 kill_wait_lwp (lwp);
1180
1181 the_target->mourn (process);
1182
1183 /* Since we presently can only stop all lwps of all processes, we
1184 need to unstop lwps of other processes. */
1185 unstop_all_lwps (0, NULL);
1186 return 0;
1187 }
1188
1189 /* Get pending signal of THREAD, for detaching purposes. This is the
1190 signal the thread last stopped for, which we need to deliver to the
1191 thread when detaching, otherwise, it'd be suppressed/lost. */
1192
1193 static int
1194 get_detach_signal (struct thread_info *thread)
1195 {
1196 enum gdb_signal signo = GDB_SIGNAL_0;
1197 int status;
1198 struct lwp_info *lp = get_thread_lwp (thread);
1199
1200 if (lp->status_pending_p)
1201 status = lp->status_pending;
1202 else
1203 {
1204 /* If the thread had been suspended by gdbserver, and it stopped
1205 cleanly, then it'll have stopped with SIGSTOP. But we don't
1206 want to deliver that SIGSTOP. */
1207 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1208 || thread->last_status.value.sig == GDB_SIGNAL_0)
1209 return 0;
1210
1211 /* Otherwise, we may need to deliver the signal we
1212 intercepted. */
1213 status = lp->last_status;
1214 }
1215
1216 if (!WIFSTOPPED (status))
1217 {
1218 if (debug_threads)
1219 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1220 target_pid_to_str (ptid_of (thread)));
1221 return 0;
1222 }
1223
1224 /* Extended wait statuses aren't real SIGTRAPs. */
1225 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1226 {
1227 if (debug_threads)
1228 debug_printf ("GPS: lwp %s had stopped with extended "
1229 "status: no pending signal\n",
1230 target_pid_to_str (ptid_of (thread)));
1231 return 0;
1232 }
1233
1234 signo = gdb_signal_from_host (WSTOPSIG (status));
1235
1236 if (program_signals_p && !program_signals[signo])
1237 {
1238 if (debug_threads)
1239 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1240 target_pid_to_str (ptid_of (thread)),
1241 gdb_signal_to_string (signo));
1242 return 0;
1243 }
1244 else if (!program_signals_p
1245 /* If we have no way to know which signals GDB does not
1246 want to have passed to the program, assume
1247 SIGTRAP/SIGINT, which is GDB's default. */
1248 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1249 {
1250 if (debug_threads)
1251 debug_printf ("GPS: lwp %s had signal %s, "
1252 "but we don't know if we should pass it. "
1253 "Default to not.\n",
1254 target_pid_to_str (ptid_of (thread)),
1255 gdb_signal_to_string (signo));
1256 return 0;
1257 }
1258 else
1259 {
1260 if (debug_threads)
1261 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1262 target_pid_to_str (ptid_of (thread)),
1263 gdb_signal_to_string (signo));
1264
1265 return WSTOPSIG (status);
1266 }
1267 }
1268
1269 static int
1270 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1271 {
1272 struct thread_info *thread = (struct thread_info *) entry;
1273 struct lwp_info *lwp = get_thread_lwp (thread);
1274 int pid = * (int *) args;
1275 int sig;
1276
1277 if (ptid_get_pid (entry->id) != pid)
1278 return 0;
1279
1280 /* If there is a pending SIGSTOP, get rid of it. */
1281 if (lwp->stop_expected)
1282 {
1283 if (debug_threads)
1284 debug_printf ("Sending SIGCONT to %s\n",
1285 target_pid_to_str (ptid_of (thread)));
1286
1287 kill_lwp (lwpid_of (thread), SIGCONT);
1288 lwp->stop_expected = 0;
1289 }
1290
1291 /* Flush any pending changes to the process's registers. */
1292 regcache_invalidate_thread (thread);
1293
1294 /* Pass on any pending signal for this thread. */
1295 sig = get_detach_signal (thread);
1296
1297 /* Finally, let it resume. */
1298 if (the_low_target.prepare_to_resume != NULL)
1299 the_low_target.prepare_to_resume (lwp);
1300 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1301 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1302 error (_("Can't detach %s: %s"),
1303 target_pid_to_str (ptid_of (thread)),
1304 strerror (errno));
1305
1306 delete_lwp (lwp);
1307 return 0;
1308 }
1309
1310 static int
1311 linux_detach (int pid)
1312 {
1313 struct process_info *process;
1314
1315 process = find_process_pid (pid);
1316 if (process == NULL)
1317 return -1;
1318
1319 /* Stop all threads before detaching. First, ptrace requires that
1320 the thread is stopped to sucessfully detach. Second, thread_db
1321 may need to uninstall thread event breakpoints from memory, which
1322 only works with a stopped process anyway. */
1323 stop_all_lwps (0, NULL);
1324
1325 #ifdef USE_THREAD_DB
1326 thread_db_detach (process);
1327 #endif
1328
1329 /* Stabilize threads (move out of jump pads). */
1330 stabilize_threads ();
1331
1332 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1333
1334 the_target->mourn (process);
1335
1336 /* Since we presently can only stop all lwps of all processes, we
1337 need to unstop lwps of other processes. */
1338 unstop_all_lwps (0, NULL);
1339 return 0;
1340 }
1341
1342 /* Remove all LWPs that belong to process PROC from the lwp list. */
1343
1344 static int
1345 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1346 {
1347 struct thread_info *thread = (struct thread_info *) entry;
1348 struct lwp_info *lwp = get_thread_lwp (thread);
1349 struct process_info *process = proc;
1350
1351 if (pid_of (thread) == pid_of (process))
1352 delete_lwp (lwp);
1353
1354 return 0;
1355 }
1356
1357 static void
1358 linux_mourn (struct process_info *process)
1359 {
1360 struct process_info_private *priv;
1361
1362 #ifdef USE_THREAD_DB
1363 thread_db_mourn (process);
1364 #endif
1365
1366 find_inferior (&all_threads, delete_lwp_callback, process);
1367
1368 /* Freeing all private data. */
1369 priv = process->priv;
1370 free (priv->arch_private);
1371 free (priv);
1372 process->priv = NULL;
1373
1374 remove_process (process);
1375 }
1376
1377 static void
1378 linux_join (int pid)
1379 {
1380 int status, ret;
1381
1382 do {
1383 ret = my_waitpid (pid, &status, 0);
1384 if (WIFEXITED (status) || WIFSIGNALED (status))
1385 break;
1386 } while (ret != -1 || errno != ECHILD);
1387 }
1388
1389 /* Return nonzero if the given thread is still alive. */
1390 static int
1391 linux_thread_alive (ptid_t ptid)
1392 {
1393 struct lwp_info *lwp = find_lwp_pid (ptid);
1394
1395 /* We assume we always know if a thread exits. If a whole process
1396 exited but we still haven't been able to report it to GDB, we'll
1397 hold on to the last lwp of the dead process. */
1398 if (lwp != NULL)
1399 return !lwp->dead;
1400 else
1401 return 0;
1402 }
1403
1404 /* Return 1 if this lwp still has an interesting status pending. If
1405 not (e.g., it had stopped for a breakpoint that is gone), return
1406 false. */
1407
1408 static int
1409 thread_still_has_status_pending_p (struct thread_info *thread)
1410 {
1411 struct lwp_info *lp = get_thread_lwp (thread);
1412
1413 if (!lp->status_pending_p)
1414 return 0;
1415
1416 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1417 report any status pending the LWP may have. */
1418 if (thread->last_resume_kind == resume_stop
1419 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1420 return 0;
1421
1422 if (thread->last_resume_kind != resume_stop
1423 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1424 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1425 {
1426 struct thread_info *saved_thread;
1427 CORE_ADDR pc;
1428 int discard = 0;
1429
1430 gdb_assert (lp->last_status != 0);
1431
1432 pc = get_pc (lp);
1433
1434 saved_thread = current_thread;
1435 current_thread = thread;
1436
1437 if (pc != lp->stop_pc)
1438 {
1439 if (debug_threads)
1440 debug_printf ("PC of %ld changed\n",
1441 lwpid_of (thread));
1442 discard = 1;
1443 }
1444
1445 #if !USE_SIGTRAP_SIGINFO
1446 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1447 && !(*the_low_target.breakpoint_at) (pc))
1448 {
1449 if (debug_threads)
1450 debug_printf ("previous SW breakpoint of %ld gone\n",
1451 lwpid_of (thread));
1452 discard = 1;
1453 }
1454 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1455 && !hardware_breakpoint_inserted_here (pc))
1456 {
1457 if (debug_threads)
1458 debug_printf ("previous HW breakpoint of %ld gone\n",
1459 lwpid_of (thread));
1460 discard = 1;
1461 }
1462 #endif
1463
1464 current_thread = saved_thread;
1465
1466 if (discard)
1467 {
1468 if (debug_threads)
1469 debug_printf ("discarding pending breakpoint status\n");
1470 lp->status_pending_p = 0;
1471 return 0;
1472 }
1473 }
1474
1475 return 1;
1476 }
1477
1478 /* Return 1 if this lwp has an interesting status pending. */
1479 static int
1480 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1481 {
1482 struct thread_info *thread = (struct thread_info *) entry;
1483 struct lwp_info *lp = get_thread_lwp (thread);
1484 ptid_t ptid = * (ptid_t *) arg;
1485
1486 /* Check if we're only interested in events from a specific process
1487 or a specific LWP. */
1488 if (!ptid_match (ptid_of (thread), ptid))
1489 return 0;
1490
1491 if (lp->status_pending_p
1492 && !thread_still_has_status_pending_p (thread))
1493 {
1494 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1495 return 0;
1496 }
1497
1498 return lp->status_pending_p;
1499 }
1500
1501 static int
1502 same_lwp (struct inferior_list_entry *entry, void *data)
1503 {
1504 ptid_t ptid = *(ptid_t *) data;
1505 int lwp;
1506
1507 if (ptid_get_lwp (ptid) != 0)
1508 lwp = ptid_get_lwp (ptid);
1509 else
1510 lwp = ptid_get_pid (ptid);
1511
1512 if (ptid_get_lwp (entry->id) == lwp)
1513 return 1;
1514
1515 return 0;
1516 }
1517
1518 struct lwp_info *
1519 find_lwp_pid (ptid_t ptid)
1520 {
1521 struct inferior_list_entry *thread
1522 = find_inferior (&all_threads, same_lwp, &ptid);
1523
1524 if (thread == NULL)
1525 return NULL;
1526
1527 return get_thread_lwp ((struct thread_info *) thread);
1528 }
1529
1530 /* Return the number of known LWPs in the tgid given by PID. */
1531
1532 static int
1533 num_lwps (int pid)
1534 {
1535 struct inferior_list_entry *inf, *tmp;
1536 int count = 0;
1537
1538 ALL_INFERIORS (&all_threads, inf, tmp)
1539 {
1540 if (ptid_get_pid (inf->id) == pid)
1541 count++;
1542 }
1543
1544 return count;
1545 }
1546
1547 /* The arguments passed to iterate_over_lwps. */
1548
1549 struct iterate_over_lwps_args
1550 {
1551 /* The FILTER argument passed to iterate_over_lwps. */
1552 ptid_t filter;
1553
1554 /* The CALLBACK argument passed to iterate_over_lwps. */
1555 iterate_over_lwps_ftype *callback;
1556
1557 /* The DATA argument passed to iterate_over_lwps. */
1558 void *data;
1559 };
1560
1561 /* Callback for find_inferior used by iterate_over_lwps to filter
1562 calls to the callback supplied to that function. Returning a
1563 nonzero value causes find_inferiors to stop iterating and return
1564 the current inferior_list_entry. Returning zero indicates that
1565 find_inferiors should continue iterating. */
1566
1567 static int
1568 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1569 {
1570 struct iterate_over_lwps_args *args
1571 = (struct iterate_over_lwps_args *) args_p;
1572
1573 if (ptid_match (entry->id, args->filter))
1574 {
1575 struct thread_info *thr = (struct thread_info *) entry;
1576 struct lwp_info *lwp = get_thread_lwp (thr);
1577
1578 return (*args->callback) (lwp, args->data);
1579 }
1580
1581 return 0;
1582 }
1583
1584 /* See nat/linux-nat.h. */
1585
1586 struct lwp_info *
1587 iterate_over_lwps (ptid_t filter,
1588 iterate_over_lwps_ftype callback,
1589 void *data)
1590 {
1591 struct iterate_over_lwps_args args = {filter, callback, data};
1592 struct inferior_list_entry *entry;
1593
1594 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1595 if (entry == NULL)
1596 return NULL;
1597
1598 return get_thread_lwp ((struct thread_info *) entry);
1599 }
1600
1601 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1602 their exits until all other threads in the group have exited. */
1603
1604 static void
1605 check_zombie_leaders (void)
1606 {
1607 struct process_info *proc, *tmp;
1608
1609 ALL_PROCESSES (proc, tmp)
1610 {
1611 pid_t leader_pid = pid_of (proc);
1612 struct lwp_info *leader_lp;
1613
1614 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1615
1616 if (debug_threads)
1617 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1618 "num_lwps=%d, zombie=%d\n",
1619 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1620 linux_proc_pid_is_zombie (leader_pid));
1621
1622 if (leader_lp != NULL
1623 /* Check if there are other threads in the group, as we may
1624 have raced with the inferior simply exiting. */
1625 && !last_thread_of_process_p (leader_pid)
1626 && linux_proc_pid_is_zombie (leader_pid))
1627 {
1628 /* A leader zombie can mean one of two things:
1629
1630 - It exited, and there's an exit status pending
1631 available, or only the leader exited (not the whole
1632 program). In the latter case, we can't waitpid the
1633 leader's exit status until all other threads are gone.
1634
1635 - There are 3 or more threads in the group, and a thread
1636 other than the leader exec'd. On an exec, the Linux
1637 kernel destroys all other threads (except the execing
1638 one) in the thread group, and resets the execing thread's
1639 tid to the tgid. No exit notification is sent for the
1640 execing thread -- from the ptracer's perspective, it
1641 appears as though the execing thread just vanishes.
1642 Until we reap all other threads except the leader and the
1643 execing thread, the leader will be zombie, and the
1644 execing thread will be in `D (disc sleep)'. As soon as
1645 all other threads are reaped, the execing thread changes
1646 it's tid to the tgid, and the previous (zombie) leader
1647 vanishes, giving place to the "new" leader. We could try
1648 distinguishing the exit and exec cases, by waiting once
1649 more, and seeing if something comes out, but it doesn't
1650 sound useful. The previous leader _does_ go away, and
1651 we'll re-add the new one once we see the exec event
1652 (which is just the same as what would happen if the
1653 previous leader did exit voluntarily before some other
1654 thread execs). */
1655
1656 if (debug_threads)
1657 fprintf (stderr,
1658 "CZL: Thread group leader %d zombie "
1659 "(it exited, or another thread execd).\n",
1660 leader_pid);
1661
1662 delete_lwp (leader_lp);
1663 }
1664 }
1665 }
1666
1667 /* Callback for `find_inferior'. Returns the first LWP that is not
1668 stopped. ARG is a PTID filter. */
1669
1670 static int
1671 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1672 {
1673 struct thread_info *thr = (struct thread_info *) entry;
1674 struct lwp_info *lwp;
1675 ptid_t filter = *(ptid_t *) arg;
1676
1677 if (!ptid_match (ptid_of (thr), filter))
1678 return 0;
1679
1680 lwp = get_thread_lwp (thr);
1681 if (!lwp->stopped)
1682 return 1;
1683
1684 return 0;
1685 }
1686
1687 /* This function should only be called if the LWP got a SIGTRAP.
1688
1689 Handle any tracepoint steps or hits. Return true if a tracepoint
1690 event was handled, 0 otherwise. */
1691
1692 static int
1693 handle_tracepoints (struct lwp_info *lwp)
1694 {
1695 struct thread_info *tinfo = get_lwp_thread (lwp);
1696 int tpoint_related_event = 0;
1697
1698 gdb_assert (lwp->suspended == 0);
1699
1700 /* If this tracepoint hit causes a tracing stop, we'll immediately
1701 uninsert tracepoints. To do this, we temporarily pause all
1702 threads, unpatch away, and then unpause threads. We need to make
1703 sure the unpausing doesn't resume LWP too. */
1704 lwp->suspended++;
1705
1706 /* And we need to be sure that any all-threads-stopping doesn't try
1707 to move threads out of the jump pads, as it could deadlock the
1708 inferior (LWP could be in the jump pad, maybe even holding the
1709 lock.) */
1710
1711 /* Do any necessary step collect actions. */
1712 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1713
1714 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1715
1716 /* See if we just hit a tracepoint and do its main collect
1717 actions. */
1718 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1719
1720 lwp->suspended--;
1721
1722 gdb_assert (lwp->suspended == 0);
1723 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1724
1725 if (tpoint_related_event)
1726 {
1727 if (debug_threads)
1728 debug_printf ("got a tracepoint event\n");
1729 return 1;
1730 }
1731
1732 return 0;
1733 }
1734
1735 /* Convenience wrapper. Returns true if LWP is presently collecting a
1736 fast tracepoint. */
1737
1738 static int
1739 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1740 struct fast_tpoint_collect_status *status)
1741 {
1742 CORE_ADDR thread_area;
1743 struct thread_info *thread = get_lwp_thread (lwp);
1744
1745 if (the_low_target.get_thread_area == NULL)
1746 return 0;
1747
1748 /* Get the thread area address. This is used to recognize which
1749 thread is which when tracing with the in-process agent library.
1750 We don't read anything from the address, and treat it as opaque;
1751 it's the address itself that we assume is unique per-thread. */
1752 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1753 return 0;
1754
1755 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1756 }
1757
1758 /* The reason we resume in the caller, is because we want to be able
1759 to pass lwp->status_pending as WSTAT, and we need to clear
1760 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1761 refuses to resume. */
1762
1763 static int
1764 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1765 {
1766 struct thread_info *saved_thread;
1767
1768 saved_thread = current_thread;
1769 current_thread = get_lwp_thread (lwp);
1770
1771 if ((wstat == NULL
1772 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1773 && supports_fast_tracepoints ()
1774 && agent_loaded_p ())
1775 {
1776 struct fast_tpoint_collect_status status;
1777 int r;
1778
1779 if (debug_threads)
1780 debug_printf ("Checking whether LWP %ld needs to move out of the "
1781 "jump pad.\n",
1782 lwpid_of (current_thread));
1783
1784 r = linux_fast_tracepoint_collecting (lwp, &status);
1785
1786 if (wstat == NULL
1787 || (WSTOPSIG (*wstat) != SIGILL
1788 && WSTOPSIG (*wstat) != SIGFPE
1789 && WSTOPSIG (*wstat) != SIGSEGV
1790 && WSTOPSIG (*wstat) != SIGBUS))
1791 {
1792 lwp->collecting_fast_tracepoint = r;
1793
1794 if (r != 0)
1795 {
1796 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1797 {
1798 /* Haven't executed the original instruction yet.
1799 Set breakpoint there, and wait till it's hit,
1800 then single-step until exiting the jump pad. */
1801 lwp->exit_jump_pad_bkpt
1802 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1803 }
1804
1805 if (debug_threads)
1806 debug_printf ("Checking whether LWP %ld needs to move out of "
1807 "the jump pad...it does\n",
1808 lwpid_of (current_thread));
1809 current_thread = saved_thread;
1810
1811 return 1;
1812 }
1813 }
1814 else
1815 {
1816 /* If we get a synchronous signal while collecting, *and*
1817 while executing the (relocated) original instruction,
1818 reset the PC to point at the tpoint address, before
1819 reporting to GDB. Otherwise, it's an IPA lib bug: just
1820 report the signal to GDB, and pray for the best. */
1821
1822 lwp->collecting_fast_tracepoint = 0;
1823
1824 if (r != 0
1825 && (status.adjusted_insn_addr <= lwp->stop_pc
1826 && lwp->stop_pc < status.adjusted_insn_addr_end))
1827 {
1828 siginfo_t info;
1829 struct regcache *regcache;
1830
1831 /* The si_addr on a few signals references the address
1832 of the faulting instruction. Adjust that as
1833 well. */
1834 if ((WSTOPSIG (*wstat) == SIGILL
1835 || WSTOPSIG (*wstat) == SIGFPE
1836 || WSTOPSIG (*wstat) == SIGBUS
1837 || WSTOPSIG (*wstat) == SIGSEGV)
1838 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1839 (PTRACE_TYPE_ARG3) 0, &info) == 0
1840 /* Final check just to make sure we don't clobber
1841 the siginfo of non-kernel-sent signals. */
1842 && (uintptr_t) info.si_addr == lwp->stop_pc)
1843 {
1844 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1845 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1846 (PTRACE_TYPE_ARG3) 0, &info);
1847 }
1848
1849 regcache = get_thread_regcache (current_thread, 1);
1850 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1851 lwp->stop_pc = status.tpoint_addr;
1852
1853 /* Cancel any fast tracepoint lock this thread was
1854 holding. */
1855 force_unlock_trace_buffer ();
1856 }
1857
1858 if (lwp->exit_jump_pad_bkpt != NULL)
1859 {
1860 if (debug_threads)
1861 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1862 "stopping all threads momentarily.\n");
1863
1864 stop_all_lwps (1, lwp);
1865
1866 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1867 lwp->exit_jump_pad_bkpt = NULL;
1868
1869 unstop_all_lwps (1, lwp);
1870
1871 gdb_assert (lwp->suspended >= 0);
1872 }
1873 }
1874 }
1875
1876 if (debug_threads)
1877 debug_printf ("Checking whether LWP %ld needs to move out of the "
1878 "jump pad...no\n",
1879 lwpid_of (current_thread));
1880
1881 current_thread = saved_thread;
1882 return 0;
1883 }
1884
1885 /* Enqueue one signal in the "signals to report later when out of the
1886 jump pad" list. */
1887
1888 static void
1889 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1890 {
1891 struct pending_signals *p_sig;
1892 struct thread_info *thread = get_lwp_thread (lwp);
1893
1894 if (debug_threads)
1895 debug_printf ("Deferring signal %d for LWP %ld.\n",
1896 WSTOPSIG (*wstat), lwpid_of (thread));
1897
1898 if (debug_threads)
1899 {
1900 struct pending_signals *sig;
1901
1902 for (sig = lwp->pending_signals_to_report;
1903 sig != NULL;
1904 sig = sig->prev)
1905 debug_printf (" Already queued %d\n",
1906 sig->signal);
1907
1908 debug_printf (" (no more currently queued signals)\n");
1909 }
1910
1911 /* Don't enqueue non-RT signals if they are already in the deferred
1912 queue. (SIGSTOP being the easiest signal to see ending up here
1913 twice) */
1914 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1915 {
1916 struct pending_signals *sig;
1917
1918 for (sig = lwp->pending_signals_to_report;
1919 sig != NULL;
1920 sig = sig->prev)
1921 {
1922 if (sig->signal == WSTOPSIG (*wstat))
1923 {
1924 if (debug_threads)
1925 debug_printf ("Not requeuing already queued non-RT signal %d"
1926 " for LWP %ld\n",
1927 sig->signal,
1928 lwpid_of (thread));
1929 return;
1930 }
1931 }
1932 }
1933
1934 p_sig = xmalloc (sizeof (*p_sig));
1935 p_sig->prev = lwp->pending_signals_to_report;
1936 p_sig->signal = WSTOPSIG (*wstat);
1937 memset (&p_sig->info, 0, sizeof (siginfo_t));
1938 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1939 &p_sig->info);
1940
1941 lwp->pending_signals_to_report = p_sig;
1942 }
1943
1944 /* Dequeue one signal from the "signals to report later when out of
1945 the jump pad" list. */
1946
1947 static int
1948 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1949 {
1950 struct thread_info *thread = get_lwp_thread (lwp);
1951
1952 if (lwp->pending_signals_to_report != NULL)
1953 {
1954 struct pending_signals **p_sig;
1955
1956 p_sig = &lwp->pending_signals_to_report;
1957 while ((*p_sig)->prev != NULL)
1958 p_sig = &(*p_sig)->prev;
1959
1960 *wstat = W_STOPCODE ((*p_sig)->signal);
1961 if ((*p_sig)->info.si_signo != 0)
1962 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1963 &(*p_sig)->info);
1964 free (*p_sig);
1965 *p_sig = NULL;
1966
1967 if (debug_threads)
1968 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1969 WSTOPSIG (*wstat), lwpid_of (thread));
1970
1971 if (debug_threads)
1972 {
1973 struct pending_signals *sig;
1974
1975 for (sig = lwp->pending_signals_to_report;
1976 sig != NULL;
1977 sig = sig->prev)
1978 debug_printf (" Still queued %d\n",
1979 sig->signal);
1980
1981 debug_printf (" (no more queued signals)\n");
1982 }
1983
1984 return 1;
1985 }
1986
1987 return 0;
1988 }
1989
1990 /* Fetch the possibly triggered data watchpoint info and store it in
1991 CHILD.
1992
1993 On some archs, like x86, that use debug registers to set
1994 watchpoints, it's possible that the way to know which watched
1995 address trapped, is to check the register that is used to select
1996 which address to watch. Problem is, between setting the watchpoint
1997 and reading back which data address trapped, the user may change
1998 the set of watchpoints, and, as a consequence, GDB changes the
1999 debug registers in the inferior. To avoid reading back a stale
2000 stopped-data-address when that happens, we cache in LP the fact
2001 that a watchpoint trapped, and the corresponding data address, as
2002 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2003 registers meanwhile, we have the cached data we can rely on. */
2004
2005 static int
2006 check_stopped_by_watchpoint (struct lwp_info *child)
2007 {
2008 if (the_low_target.stopped_by_watchpoint != NULL)
2009 {
2010 struct thread_info *saved_thread;
2011
2012 saved_thread = current_thread;
2013 current_thread = get_lwp_thread (child);
2014
2015 if (the_low_target.stopped_by_watchpoint ())
2016 {
2017 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2018
2019 if (the_low_target.stopped_data_address != NULL)
2020 child->stopped_data_address
2021 = the_low_target.stopped_data_address ();
2022 else
2023 child->stopped_data_address = 0;
2024 }
2025
2026 current_thread = saved_thread;
2027 }
2028
2029 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2030 }
2031
2032 /* Return the ptrace options that we want to try to enable. */
2033
2034 static int
2035 linux_low_ptrace_options (int attached)
2036 {
2037 int options = 0;
2038
2039 if (!attached)
2040 options |= PTRACE_O_EXITKILL;
2041
2042 if (report_fork_events)
2043 options |= PTRACE_O_TRACEFORK;
2044
2045 if (report_vfork_events)
2046 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2047
2048 return options;
2049 }
2050
2051 /* Do low-level handling of the event, and check if we should go on
2052 and pass it to caller code. Return the affected lwp if we are, or
2053 NULL otherwise. */
2054
2055 static struct lwp_info *
2056 linux_low_filter_event (int lwpid, int wstat)
2057 {
2058 struct lwp_info *child;
2059 struct thread_info *thread;
2060 int have_stop_pc = 0;
2061
2062 child = find_lwp_pid (pid_to_ptid (lwpid));
2063
2064 /* If we didn't find a process, one of two things presumably happened:
2065 - A process we started and then detached from has exited. Ignore it.
2066 - A process we are controlling has forked and the new child's stop
2067 was reported to us by the kernel. Save its PID. */
2068 if (child == NULL && WIFSTOPPED (wstat))
2069 {
2070 add_to_pid_list (&stopped_pids, lwpid, wstat);
2071 return NULL;
2072 }
2073 else if (child == NULL)
2074 return NULL;
2075
2076 thread = get_lwp_thread (child);
2077
2078 child->stopped = 1;
2079
2080 child->last_status = wstat;
2081
2082 /* Check if the thread has exited. */
2083 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2084 {
2085 if (debug_threads)
2086 debug_printf ("LLFE: %d exited.\n", lwpid);
2087 if (num_lwps (pid_of (thread)) > 1)
2088 {
2089
2090 /* If there is at least one more LWP, then the exit signal was
2091 not the end of the debugged application and should be
2092 ignored. */
2093 delete_lwp (child);
2094 return NULL;
2095 }
2096 else
2097 {
2098 /* This was the last lwp in the process. Since events are
2099 serialized to GDB core, and we can't report this one
2100 right now, but GDB core and the other target layers will
2101 want to be notified about the exit code/signal, leave the
2102 status pending for the next time we're able to report
2103 it. */
2104 mark_lwp_dead (child, wstat);
2105 return child;
2106 }
2107 }
2108
2109 gdb_assert (WIFSTOPPED (wstat));
2110
2111 if (WIFSTOPPED (wstat))
2112 {
2113 struct process_info *proc;
2114
2115 /* Architecture-specific setup after inferior is running. */
2116 proc = find_process_pid (pid_of (thread));
2117 if (proc->tdesc == NULL)
2118 {
2119 if (proc->attached)
2120 {
2121 struct thread_info *saved_thread;
2122
2123 /* This needs to happen after we have attached to the
2124 inferior and it is stopped for the first time, but
2125 before we access any inferior registers. */
2126 saved_thread = current_thread;
2127 current_thread = thread;
2128
2129 the_low_target.arch_setup ();
2130
2131 current_thread = saved_thread;
2132 }
2133 else
2134 {
2135 /* The process is started, but GDBserver will do
2136 architecture-specific setup after the program stops at
2137 the first instruction. */
2138 child->status_pending_p = 1;
2139 child->status_pending = wstat;
2140 return child;
2141 }
2142 }
2143 }
2144
2145 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2146 {
2147 struct process_info *proc = find_process_pid (pid_of (thread));
2148 int options = linux_low_ptrace_options (proc->attached);
2149
2150 linux_enable_event_reporting (lwpid, options);
2151 child->must_set_ptrace_flags = 0;
2152 }
2153
2154 /* Be careful to not overwrite stop_pc until
2155 check_stopped_by_breakpoint is called. */
2156 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2157 && linux_is_extended_waitstatus (wstat))
2158 {
2159 child->stop_pc = get_pc (child);
2160 if (handle_extended_wait (child, wstat))
2161 {
2162 /* The event has been handled, so just return without
2163 reporting it. */
2164 return NULL;
2165 }
2166 }
2167
2168 /* Check first whether this was a SW/HW breakpoint before checking
2169 watchpoints, because at least s390 can't tell the data address of
2170 hardware watchpoint hits, and returns stopped-by-watchpoint as
2171 long as there's a watchpoint set. */
2172 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2173 {
2174 if (check_stopped_by_breakpoint (child))
2175 have_stop_pc = 1;
2176 }
2177
2178 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2179 or hardware watchpoint. Check which is which if we got
2180 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2181 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2182 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2183 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2184 check_stopped_by_watchpoint (child);
2185
2186 if (!have_stop_pc)
2187 child->stop_pc = get_pc (child);
2188
2189 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2190 && child->stop_expected)
2191 {
2192 if (debug_threads)
2193 debug_printf ("Expected stop.\n");
2194 child->stop_expected = 0;
2195
2196 if (thread->last_resume_kind == resume_stop)
2197 {
2198 /* We want to report the stop to the core. Treat the
2199 SIGSTOP as a normal event. */
2200 if (debug_threads)
2201 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2202 target_pid_to_str (ptid_of (thread)));
2203 }
2204 else if (stopping_threads != NOT_STOPPING_THREADS)
2205 {
2206 /* Stopping threads. We don't want this SIGSTOP to end up
2207 pending. */
2208 if (debug_threads)
2209 debug_printf ("LLW: SIGSTOP caught for %s "
2210 "while stopping threads.\n",
2211 target_pid_to_str (ptid_of (thread)));
2212 return NULL;
2213 }
2214 else
2215 {
2216 /* This is a delayed SIGSTOP. Filter out the event. */
2217 if (debug_threads)
2218 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2219 child->stepping ? "step" : "continue",
2220 target_pid_to_str (ptid_of (thread)));
2221
2222 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2223 return NULL;
2224 }
2225 }
2226
2227 child->status_pending_p = 1;
2228 child->status_pending = wstat;
2229 return child;
2230 }
2231
2232 /* Resume LWPs that are currently stopped without any pending status
2233 to report, but are resumed from the core's perspective. */
2234
2235 static void
2236 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2237 {
2238 struct thread_info *thread = (struct thread_info *) entry;
2239 struct lwp_info *lp = get_thread_lwp (thread);
2240
2241 if (lp->stopped
2242 && !lp->status_pending_p
2243 && thread->last_resume_kind != resume_stop
2244 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2245 {
2246 int step = thread->last_resume_kind == resume_step;
2247
2248 if (debug_threads)
2249 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2250 target_pid_to_str (ptid_of (thread)),
2251 paddress (lp->stop_pc),
2252 step);
2253
2254 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2255 }
2256 }
2257
2258 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2259 match FILTER_PTID (leaving others pending). The PTIDs can be:
2260 minus_one_ptid, to specify any child; a pid PTID, specifying all
2261 lwps of a thread group; or a PTID representing a single lwp. Store
2262 the stop status through the status pointer WSTAT. OPTIONS is
2263 passed to the waitpid call. Return 0 if no event was found and
2264 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2265 was found. Return the PID of the stopped child otherwise. */
2266
2267 static int
2268 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2269 int *wstatp, int options)
2270 {
2271 struct thread_info *event_thread;
2272 struct lwp_info *event_child, *requested_child;
2273 sigset_t block_mask, prev_mask;
2274
2275 retry:
2276 /* N.B. event_thread points to the thread_info struct that contains
2277 event_child. Keep them in sync. */
2278 event_thread = NULL;
2279 event_child = NULL;
2280 requested_child = NULL;
2281
2282 /* Check for a lwp with a pending status. */
2283
2284 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2285 {
2286 event_thread = (struct thread_info *)
2287 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2288 if (event_thread != NULL)
2289 event_child = get_thread_lwp (event_thread);
2290 if (debug_threads && event_thread)
2291 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2292 }
2293 else if (!ptid_equal (filter_ptid, null_ptid))
2294 {
2295 requested_child = find_lwp_pid (filter_ptid);
2296
2297 if (stopping_threads == NOT_STOPPING_THREADS
2298 && requested_child->status_pending_p
2299 && requested_child->collecting_fast_tracepoint)
2300 {
2301 enqueue_one_deferred_signal (requested_child,
2302 &requested_child->status_pending);
2303 requested_child->status_pending_p = 0;
2304 requested_child->status_pending = 0;
2305 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2306 }
2307
2308 if (requested_child->suspended
2309 && requested_child->status_pending_p)
2310 {
2311 internal_error (__FILE__, __LINE__,
2312 "requesting an event out of a"
2313 " suspended child?");
2314 }
2315
2316 if (requested_child->status_pending_p)
2317 {
2318 event_child = requested_child;
2319 event_thread = get_lwp_thread (event_child);
2320 }
2321 }
2322
2323 if (event_child != NULL)
2324 {
2325 if (debug_threads)
2326 debug_printf ("Got an event from pending child %ld (%04x)\n",
2327 lwpid_of (event_thread), event_child->status_pending);
2328 *wstatp = event_child->status_pending;
2329 event_child->status_pending_p = 0;
2330 event_child->status_pending = 0;
2331 current_thread = event_thread;
2332 return lwpid_of (event_thread);
2333 }
2334
2335 /* But if we don't find a pending event, we'll have to wait.
2336
2337 We only enter this loop if no process has a pending wait status.
2338 Thus any action taken in response to a wait status inside this
2339 loop is responding as soon as we detect the status, not after any
2340 pending events. */
2341
2342 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2343 all signals while here. */
2344 sigfillset (&block_mask);
2345 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2346
2347 /* Always pull all events out of the kernel. We'll randomly select
2348 an event LWP out of all that have events, to prevent
2349 starvation. */
2350 while (event_child == NULL)
2351 {
2352 pid_t ret = 0;
2353
2354 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2355 quirks:
2356
2357 - If the thread group leader exits while other threads in the
2358 thread group still exist, waitpid(TGID, ...) hangs. That
2359 waitpid won't return an exit status until the other threads
2360 in the group are reaped.
2361
2362 - When a non-leader thread execs, that thread just vanishes
2363 without reporting an exit (so we'd hang if we waited for it
2364 explicitly in that case). The exec event is reported to
2365 the TGID pid (although we don't currently enable exec
2366 events). */
2367 errno = 0;
2368 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2369
2370 if (debug_threads)
2371 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2372 ret, errno ? strerror (errno) : "ERRNO-OK");
2373
2374 if (ret > 0)
2375 {
2376 if (debug_threads)
2377 {
2378 debug_printf ("LLW: waitpid %ld received %s\n",
2379 (long) ret, status_to_str (*wstatp));
2380 }
2381
2382 /* Filter all events. IOW, leave all events pending. We'll
2383 randomly select an event LWP out of all that have events
2384 below. */
2385 linux_low_filter_event (ret, *wstatp);
2386 /* Retry until nothing comes out of waitpid. A single
2387 SIGCHLD can indicate more than one child stopped. */
2388 continue;
2389 }
2390
2391 /* Now that we've pulled all events out of the kernel, resume
2392 LWPs that don't have an interesting event to report. */
2393 if (stopping_threads == NOT_STOPPING_THREADS)
2394 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2395
2396 /* ... and find an LWP with a status to report to the core, if
2397 any. */
2398 event_thread = (struct thread_info *)
2399 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2400 if (event_thread != NULL)
2401 {
2402 event_child = get_thread_lwp (event_thread);
2403 *wstatp = event_child->status_pending;
2404 event_child->status_pending_p = 0;
2405 event_child->status_pending = 0;
2406 break;
2407 }
2408
2409 /* Check for zombie thread group leaders. Those can't be reaped
2410 until all other threads in the thread group are. */
2411 check_zombie_leaders ();
2412
2413 /* If there are no resumed children left in the set of LWPs we
2414 want to wait for, bail. We can't just block in
2415 waitpid/sigsuspend, because lwps might have been left stopped
2416 in trace-stop state, and we'd be stuck forever waiting for
2417 their status to change (which would only happen if we resumed
2418 them). Even if WNOHANG is set, this return code is preferred
2419 over 0 (below), as it is more detailed. */
2420 if ((find_inferior (&all_threads,
2421 not_stopped_callback,
2422 &wait_ptid) == NULL))
2423 {
2424 if (debug_threads)
2425 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2426 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2427 return -1;
2428 }
2429
2430 /* No interesting event to report to the caller. */
2431 if ((options & WNOHANG))
2432 {
2433 if (debug_threads)
2434 debug_printf ("WNOHANG set, no event found\n");
2435
2436 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2437 return 0;
2438 }
2439
2440 /* Block until we get an event reported with SIGCHLD. */
2441 if (debug_threads)
2442 debug_printf ("sigsuspend'ing\n");
2443
2444 sigsuspend (&prev_mask);
2445 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2446 goto retry;
2447 }
2448
2449 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2450
2451 current_thread = event_thread;
2452
2453 /* Check for thread exit. */
2454 if (! WIFSTOPPED (*wstatp))
2455 {
2456 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2457
2458 if (debug_threads)
2459 debug_printf ("LWP %d is the last lwp of process. "
2460 "Process %ld exiting.\n",
2461 pid_of (event_thread), lwpid_of (event_thread));
2462 return lwpid_of (event_thread);
2463 }
2464
2465 return lwpid_of (event_thread);
2466 }
2467
2468 /* Wait for an event from child(ren) PTID. PTIDs can be:
2469 minus_one_ptid, to specify any child; a pid PTID, specifying all
2470 lwps of a thread group; or a PTID representing a single lwp. Store
2471 the stop status through the status pointer WSTAT. OPTIONS is
2472 passed to the waitpid call. Return 0 if no event was found and
2473 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2474 was found. Return the PID of the stopped child otherwise. */
2475
2476 static int
2477 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2478 {
2479 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2480 }
2481
2482 /* Count the LWP's that have had events. */
2483
2484 static int
2485 count_events_callback (struct inferior_list_entry *entry, void *data)
2486 {
2487 struct thread_info *thread = (struct thread_info *) entry;
2488 struct lwp_info *lp = get_thread_lwp (thread);
2489 int *count = data;
2490
2491 gdb_assert (count != NULL);
2492
2493 /* Count only resumed LWPs that have an event pending. */
2494 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2495 && lp->status_pending_p)
2496 (*count)++;
2497
2498 return 0;
2499 }
2500
2501 /* Select the LWP (if any) that is currently being single-stepped. */
2502
2503 static int
2504 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2505 {
2506 struct thread_info *thread = (struct thread_info *) entry;
2507 struct lwp_info *lp = get_thread_lwp (thread);
2508
2509 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2510 && thread->last_resume_kind == resume_step
2511 && lp->status_pending_p)
2512 return 1;
2513 else
2514 return 0;
2515 }
2516
2517 /* Select the Nth LWP that has had an event. */
2518
2519 static int
2520 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2521 {
2522 struct thread_info *thread = (struct thread_info *) entry;
2523 struct lwp_info *lp = get_thread_lwp (thread);
2524 int *selector = data;
2525
2526 gdb_assert (selector != NULL);
2527
2528 /* Select only resumed LWPs that have an event pending. */
2529 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2530 && lp->status_pending_p)
2531 if ((*selector)-- == 0)
2532 return 1;
2533
2534 return 0;
2535 }
2536
2537 /* Select one LWP out of those that have events pending. */
2538
2539 static void
2540 select_event_lwp (struct lwp_info **orig_lp)
2541 {
2542 int num_events = 0;
2543 int random_selector;
2544 struct thread_info *event_thread = NULL;
2545
2546 /* In all-stop, give preference to the LWP that is being
2547 single-stepped. There will be at most one, and it's the LWP that
2548 the core is most interested in. If we didn't do this, then we'd
2549 have to handle pending step SIGTRAPs somehow in case the core
2550 later continues the previously-stepped thread, otherwise we'd
2551 report the pending SIGTRAP, and the core, not having stepped the
2552 thread, wouldn't understand what the trap was for, and therefore
2553 would report it to the user as a random signal. */
2554 if (!non_stop)
2555 {
2556 event_thread
2557 = (struct thread_info *) find_inferior (&all_threads,
2558 select_singlestep_lwp_callback,
2559 NULL);
2560 if (event_thread != NULL)
2561 {
2562 if (debug_threads)
2563 debug_printf ("SEL: Select single-step %s\n",
2564 target_pid_to_str (ptid_of (event_thread)));
2565 }
2566 }
2567 if (event_thread == NULL)
2568 {
2569 /* No single-stepping LWP. Select one at random, out of those
2570 which have had events. */
2571
2572 /* First see how many events we have. */
2573 find_inferior (&all_threads, count_events_callback, &num_events);
2574 gdb_assert (num_events > 0);
2575
2576 /* Now randomly pick a LWP out of those that have had
2577 events. */
2578 random_selector = (int)
2579 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2580
2581 if (debug_threads && num_events > 1)
2582 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2583 num_events, random_selector);
2584
2585 event_thread
2586 = (struct thread_info *) find_inferior (&all_threads,
2587 select_event_lwp_callback,
2588 &random_selector);
2589 }
2590
2591 if (event_thread != NULL)
2592 {
2593 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2594
2595 /* Switch the event LWP. */
2596 *orig_lp = event_lp;
2597 }
2598 }
2599
2600 /* Decrement the suspend count of an LWP. */
2601
2602 static int
2603 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2604 {
2605 struct thread_info *thread = (struct thread_info *) entry;
2606 struct lwp_info *lwp = get_thread_lwp (thread);
2607
2608 /* Ignore EXCEPT. */
2609 if (lwp == except)
2610 return 0;
2611
2612 lwp->suspended--;
2613
2614 gdb_assert (lwp->suspended >= 0);
2615 return 0;
2616 }
2617
2618 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2619 NULL. */
2620
2621 static void
2622 unsuspend_all_lwps (struct lwp_info *except)
2623 {
2624 find_inferior (&all_threads, unsuspend_one_lwp, except);
2625 }
2626
2627 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2628 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2629 void *data);
2630 static int lwp_running (struct inferior_list_entry *entry, void *data);
2631 static ptid_t linux_wait_1 (ptid_t ptid,
2632 struct target_waitstatus *ourstatus,
2633 int target_options);
2634
2635 /* Stabilize threads (move out of jump pads).
2636
2637 If a thread is midway collecting a fast tracepoint, we need to
2638 finish the collection and move it out of the jump pad before
2639 reporting the signal.
2640
2641 This avoids recursion while collecting (when a signal arrives
2642 midway, and the signal handler itself collects), which would trash
2643 the trace buffer. In case the user set a breakpoint in a signal
2644 handler, this avoids the backtrace showing the jump pad, etc..
2645 Most importantly, there are certain things we can't do safely if
2646 threads are stopped in a jump pad (or in its callee's). For
2647 example:
2648
2649 - starting a new trace run. A thread still collecting the
2650 previous run, could trash the trace buffer when resumed. The trace
2651 buffer control structures would have been reset but the thread had
2652 no way to tell. The thread could even midway memcpy'ing to the
2653 buffer, which would mean that when resumed, it would clobber the
2654 trace buffer that had been set for a new run.
2655
2656 - we can't rewrite/reuse the jump pads for new tracepoints
2657 safely. Say you do tstart while a thread is stopped midway while
2658 collecting. When the thread is later resumed, it finishes the
2659 collection, and returns to the jump pad, to execute the original
2660 instruction that was under the tracepoint jump at the time the
2661 older run had been started. If the jump pad had been rewritten
2662 since for something else in the new run, the thread would now
2663 execute the wrong / random instructions. */
2664
2665 static void
2666 linux_stabilize_threads (void)
2667 {
2668 struct thread_info *saved_thread;
2669 struct thread_info *thread_stuck;
2670
2671 thread_stuck
2672 = (struct thread_info *) find_inferior (&all_threads,
2673 stuck_in_jump_pad_callback,
2674 NULL);
2675 if (thread_stuck != NULL)
2676 {
2677 if (debug_threads)
2678 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2679 lwpid_of (thread_stuck));
2680 return;
2681 }
2682
2683 saved_thread = current_thread;
2684
2685 stabilizing_threads = 1;
2686
2687 /* Kick 'em all. */
2688 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2689
2690 /* Loop until all are stopped out of the jump pads. */
2691 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2692 {
2693 struct target_waitstatus ourstatus;
2694 struct lwp_info *lwp;
2695 int wstat;
2696
2697 /* Note that we go through the full wait even loop. While
2698 moving threads out of jump pad, we need to be able to step
2699 over internal breakpoints and such. */
2700 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2701
2702 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2703 {
2704 lwp = get_thread_lwp (current_thread);
2705
2706 /* Lock it. */
2707 lwp->suspended++;
2708
2709 if (ourstatus.value.sig != GDB_SIGNAL_0
2710 || current_thread->last_resume_kind == resume_stop)
2711 {
2712 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2713 enqueue_one_deferred_signal (lwp, &wstat);
2714 }
2715 }
2716 }
2717
2718 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2719
2720 stabilizing_threads = 0;
2721
2722 current_thread = saved_thread;
2723
2724 if (debug_threads)
2725 {
2726 thread_stuck
2727 = (struct thread_info *) find_inferior (&all_threads,
2728 stuck_in_jump_pad_callback,
2729 NULL);
2730 if (thread_stuck != NULL)
2731 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2732 lwpid_of (thread_stuck));
2733 }
2734 }
2735
2736 static void async_file_mark (void);
2737
2738 /* Convenience function that is called when the kernel reports an
2739 event that is not passed out to GDB. */
2740
2741 static ptid_t
2742 ignore_event (struct target_waitstatus *ourstatus)
2743 {
2744 /* If we got an event, there may still be others, as a single
2745 SIGCHLD can indicate more than one child stopped. This forces
2746 another target_wait call. */
2747 async_file_mark ();
2748
2749 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2750 return null_ptid;
2751 }
2752
2753 /* Return non-zero if WAITSTATUS reflects an extended linux
2754 event. Otherwise, return zero. */
2755
2756 static int
2757 extended_event_reported (const struct target_waitstatus *waitstatus)
2758 {
2759 if (waitstatus == NULL)
2760 return 0;
2761
2762 return (waitstatus->kind == TARGET_WAITKIND_FORKED
2763 || waitstatus->kind == TARGET_WAITKIND_VFORKED
2764 || waitstatus->kind == TARGET_WAITKIND_VFORK_DONE);
2765 }
2766
2767 /* Wait for process, returns status. */
2768
2769 static ptid_t
2770 linux_wait_1 (ptid_t ptid,
2771 struct target_waitstatus *ourstatus, int target_options)
2772 {
2773 int w;
2774 struct lwp_info *event_child;
2775 int options;
2776 int pid;
2777 int step_over_finished;
2778 int bp_explains_trap;
2779 int maybe_internal_trap;
2780 int report_to_gdb;
2781 int trace_event;
2782 int in_step_range;
2783
2784 if (debug_threads)
2785 {
2786 debug_enter ();
2787 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2788 }
2789
2790 /* Translate generic target options into linux options. */
2791 options = __WALL;
2792 if (target_options & TARGET_WNOHANG)
2793 options |= WNOHANG;
2794
2795 bp_explains_trap = 0;
2796 trace_event = 0;
2797 in_step_range = 0;
2798 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2799
2800 if (ptid_equal (step_over_bkpt, null_ptid))
2801 pid = linux_wait_for_event (ptid, &w, options);
2802 else
2803 {
2804 if (debug_threads)
2805 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2806 target_pid_to_str (step_over_bkpt));
2807 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2808 }
2809
2810 if (pid == 0)
2811 {
2812 gdb_assert (target_options & TARGET_WNOHANG);
2813
2814 if (debug_threads)
2815 {
2816 debug_printf ("linux_wait_1 ret = null_ptid, "
2817 "TARGET_WAITKIND_IGNORE\n");
2818 debug_exit ();
2819 }
2820
2821 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2822 return null_ptid;
2823 }
2824 else if (pid == -1)
2825 {
2826 if (debug_threads)
2827 {
2828 debug_printf ("linux_wait_1 ret = null_ptid, "
2829 "TARGET_WAITKIND_NO_RESUMED\n");
2830 debug_exit ();
2831 }
2832
2833 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2834 return null_ptid;
2835 }
2836
2837 event_child = get_thread_lwp (current_thread);
2838
2839 /* linux_wait_for_event only returns an exit status for the last
2840 child of a process. Report it. */
2841 if (WIFEXITED (w) || WIFSIGNALED (w))
2842 {
2843 if (WIFEXITED (w))
2844 {
2845 ourstatus->kind = TARGET_WAITKIND_EXITED;
2846 ourstatus->value.integer = WEXITSTATUS (w);
2847
2848 if (debug_threads)
2849 {
2850 debug_printf ("linux_wait_1 ret = %s, exited with "
2851 "retcode %d\n",
2852 target_pid_to_str (ptid_of (current_thread)),
2853 WEXITSTATUS (w));
2854 debug_exit ();
2855 }
2856 }
2857 else
2858 {
2859 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2860 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2861
2862 if (debug_threads)
2863 {
2864 debug_printf ("linux_wait_1 ret = %s, terminated with "
2865 "signal %d\n",
2866 target_pid_to_str (ptid_of (current_thread)),
2867 WTERMSIG (w));
2868 debug_exit ();
2869 }
2870 }
2871
2872 return ptid_of (current_thread);
2873 }
2874
2875 /* If step-over executes a breakpoint instruction, it means a
2876 gdb/gdbserver breakpoint had been planted on top of a permanent
2877 breakpoint. The PC has been adjusted by
2878 check_stopped_by_breakpoint to point at the breakpoint address.
2879 Advance the PC manually past the breakpoint, otherwise the
2880 program would keep trapping the permanent breakpoint forever. */
2881 if (!ptid_equal (step_over_bkpt, null_ptid)
2882 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2883 {
2884 unsigned int increment_pc = the_low_target.breakpoint_len;
2885
2886 if (debug_threads)
2887 {
2888 debug_printf ("step-over for %s executed software breakpoint\n",
2889 target_pid_to_str (ptid_of (current_thread)));
2890 }
2891
2892 if (increment_pc != 0)
2893 {
2894 struct regcache *regcache
2895 = get_thread_regcache (current_thread, 1);
2896
2897 event_child->stop_pc += increment_pc;
2898 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2899
2900 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2901 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2902 }
2903 }
2904
2905 /* If this event was not handled before, and is not a SIGTRAP, we
2906 report it. SIGILL and SIGSEGV are also treated as traps in case
2907 a breakpoint is inserted at the current PC. If this target does
2908 not support internal breakpoints at all, we also report the
2909 SIGTRAP without further processing; it's of no concern to us. */
2910 maybe_internal_trap
2911 = (supports_breakpoints ()
2912 && (WSTOPSIG (w) == SIGTRAP
2913 || ((WSTOPSIG (w) == SIGILL
2914 || WSTOPSIG (w) == SIGSEGV)
2915 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2916
2917 if (maybe_internal_trap)
2918 {
2919 /* Handle anything that requires bookkeeping before deciding to
2920 report the event or continue waiting. */
2921
2922 /* First check if we can explain the SIGTRAP with an internal
2923 breakpoint, or if we should possibly report the event to GDB.
2924 Do this before anything that may remove or insert a
2925 breakpoint. */
2926 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2927
2928 /* We have a SIGTRAP, possibly a step-over dance has just
2929 finished. If so, tweak the state machine accordingly,
2930 reinsert breakpoints and delete any reinsert (software
2931 single-step) breakpoints. */
2932 step_over_finished = finish_step_over (event_child);
2933
2934 /* Now invoke the callbacks of any internal breakpoints there. */
2935 check_breakpoints (event_child->stop_pc);
2936
2937 /* Handle tracepoint data collecting. This may overflow the
2938 trace buffer, and cause a tracing stop, removing
2939 breakpoints. */
2940 trace_event = handle_tracepoints (event_child);
2941
2942 if (bp_explains_trap)
2943 {
2944 /* If we stepped or ran into an internal breakpoint, we've
2945 already handled it. So next time we resume (from this
2946 PC), we should step over it. */
2947 if (debug_threads)
2948 debug_printf ("Hit a gdbserver breakpoint.\n");
2949
2950 if (breakpoint_here (event_child->stop_pc))
2951 event_child->need_step_over = 1;
2952 }
2953 }
2954 else
2955 {
2956 /* We have some other signal, possibly a step-over dance was in
2957 progress, and it should be cancelled too. */
2958 step_over_finished = finish_step_over (event_child);
2959 }
2960
2961 /* We have all the data we need. Either report the event to GDB, or
2962 resume threads and keep waiting for more. */
2963
2964 /* If we're collecting a fast tracepoint, finish the collection and
2965 move out of the jump pad before delivering a signal. See
2966 linux_stabilize_threads. */
2967
2968 if (WIFSTOPPED (w)
2969 && WSTOPSIG (w) != SIGTRAP
2970 && supports_fast_tracepoints ()
2971 && agent_loaded_p ())
2972 {
2973 if (debug_threads)
2974 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2975 "to defer or adjust it.\n",
2976 WSTOPSIG (w), lwpid_of (current_thread));
2977
2978 /* Allow debugging the jump pad itself. */
2979 if (current_thread->last_resume_kind != resume_step
2980 && maybe_move_out_of_jump_pad (event_child, &w))
2981 {
2982 enqueue_one_deferred_signal (event_child, &w);
2983
2984 if (debug_threads)
2985 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2986 WSTOPSIG (w), lwpid_of (current_thread));
2987
2988 linux_resume_one_lwp (event_child, 0, 0, NULL);
2989
2990 return ignore_event (ourstatus);
2991 }
2992 }
2993
2994 if (event_child->collecting_fast_tracepoint)
2995 {
2996 if (debug_threads)
2997 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2998 "Check if we're already there.\n",
2999 lwpid_of (current_thread),
3000 event_child->collecting_fast_tracepoint);
3001
3002 trace_event = 1;
3003
3004 event_child->collecting_fast_tracepoint
3005 = linux_fast_tracepoint_collecting (event_child, NULL);
3006
3007 if (event_child->collecting_fast_tracepoint != 1)
3008 {
3009 /* No longer need this breakpoint. */
3010 if (event_child->exit_jump_pad_bkpt != NULL)
3011 {
3012 if (debug_threads)
3013 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3014 "stopping all threads momentarily.\n");
3015
3016 /* Other running threads could hit this breakpoint.
3017 We don't handle moribund locations like GDB does,
3018 instead we always pause all threads when removing
3019 breakpoints, so that any step-over or
3020 decr_pc_after_break adjustment is always taken
3021 care of while the breakpoint is still
3022 inserted. */
3023 stop_all_lwps (1, event_child);
3024
3025 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3026 event_child->exit_jump_pad_bkpt = NULL;
3027
3028 unstop_all_lwps (1, event_child);
3029
3030 gdb_assert (event_child->suspended >= 0);
3031 }
3032 }
3033
3034 if (event_child->collecting_fast_tracepoint == 0)
3035 {
3036 if (debug_threads)
3037 debug_printf ("fast tracepoint finished "
3038 "collecting successfully.\n");
3039
3040 /* We may have a deferred signal to report. */
3041 if (dequeue_one_deferred_signal (event_child, &w))
3042 {
3043 if (debug_threads)
3044 debug_printf ("dequeued one signal.\n");
3045 }
3046 else
3047 {
3048 if (debug_threads)
3049 debug_printf ("no deferred signals.\n");
3050
3051 if (stabilizing_threads)
3052 {
3053 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3054 ourstatus->value.sig = GDB_SIGNAL_0;
3055
3056 if (debug_threads)
3057 {
3058 debug_printf ("linux_wait_1 ret = %s, stopped "
3059 "while stabilizing threads\n",
3060 target_pid_to_str (ptid_of (current_thread)));
3061 debug_exit ();
3062 }
3063
3064 return ptid_of (current_thread);
3065 }
3066 }
3067 }
3068 }
3069
3070 /* Check whether GDB would be interested in this event. */
3071
3072 /* If GDB is not interested in this signal, don't stop other
3073 threads, and don't report it to GDB. Just resume the inferior
3074 right away. We do this for threading-related signals as well as
3075 any that GDB specifically requested we ignore. But never ignore
3076 SIGSTOP if we sent it ourselves, and do not ignore signals when
3077 stepping - they may require special handling to skip the signal
3078 handler. Also never ignore signals that could be caused by a
3079 breakpoint. */
3080 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3081 thread library? */
3082 if (WIFSTOPPED (w)
3083 && current_thread->last_resume_kind != resume_step
3084 && (
3085 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3086 (current_process ()->priv->thread_db != NULL
3087 && (WSTOPSIG (w) == __SIGRTMIN
3088 || WSTOPSIG (w) == __SIGRTMIN + 1))
3089 ||
3090 #endif
3091 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3092 && !(WSTOPSIG (w) == SIGSTOP
3093 && current_thread->last_resume_kind == resume_stop)
3094 && !linux_wstatus_maybe_breakpoint (w))))
3095 {
3096 siginfo_t info, *info_p;
3097
3098 if (debug_threads)
3099 debug_printf ("Ignored signal %d for LWP %ld.\n",
3100 WSTOPSIG (w), lwpid_of (current_thread));
3101
3102 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3103 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3104 info_p = &info;
3105 else
3106 info_p = NULL;
3107 linux_resume_one_lwp (event_child, event_child->stepping,
3108 WSTOPSIG (w), info_p);
3109 return ignore_event (ourstatus);
3110 }
3111
3112 /* Note that all addresses are always "out of the step range" when
3113 there's no range to begin with. */
3114 in_step_range = lwp_in_step_range (event_child);
3115
3116 /* If GDB wanted this thread to single step, and the thread is out
3117 of the step range, we always want to report the SIGTRAP, and let
3118 GDB handle it. Watchpoints should always be reported. So should
3119 signals we can't explain. A SIGTRAP we can't explain could be a
3120 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3121 do, we're be able to handle GDB breakpoints on top of internal
3122 breakpoints, by handling the internal breakpoint and still
3123 reporting the event to GDB. If we don't, we're out of luck, GDB
3124 won't see the breakpoint hit. */
3125 report_to_gdb = (!maybe_internal_trap
3126 || (current_thread->last_resume_kind == resume_step
3127 && !in_step_range)
3128 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3129 || (!step_over_finished && !in_step_range
3130 && !bp_explains_trap && !trace_event)
3131 || (gdb_breakpoint_here (event_child->stop_pc)
3132 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3133 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3134 || extended_event_reported (&event_child->waitstatus));
3135
3136 run_breakpoint_commands (event_child->stop_pc);
3137
3138 /* We found no reason GDB would want us to stop. We either hit one
3139 of our own breakpoints, or finished an internal step GDB
3140 shouldn't know about. */
3141 if (!report_to_gdb)
3142 {
3143 if (debug_threads)
3144 {
3145 if (bp_explains_trap)
3146 debug_printf ("Hit a gdbserver breakpoint.\n");
3147 if (step_over_finished)
3148 debug_printf ("Step-over finished.\n");
3149 if (trace_event)
3150 debug_printf ("Tracepoint event.\n");
3151 if (lwp_in_step_range (event_child))
3152 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3153 paddress (event_child->stop_pc),
3154 paddress (event_child->step_range_start),
3155 paddress (event_child->step_range_end));
3156 }
3157
3158 /* We're not reporting this breakpoint to GDB, so apply the
3159 decr_pc_after_break adjustment to the inferior's regcache
3160 ourselves. */
3161
3162 if (the_low_target.set_pc != NULL)
3163 {
3164 struct regcache *regcache
3165 = get_thread_regcache (current_thread, 1);
3166 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3167 }
3168
3169 /* We may have finished stepping over a breakpoint. If so,
3170 we've stopped and suspended all LWPs momentarily except the
3171 stepping one. This is where we resume them all again. We're
3172 going to keep waiting, so use proceed, which handles stepping
3173 over the next breakpoint. */
3174 if (debug_threads)
3175 debug_printf ("proceeding all threads.\n");
3176
3177 if (step_over_finished)
3178 unsuspend_all_lwps (event_child);
3179
3180 proceed_all_lwps ();
3181 return ignore_event (ourstatus);
3182 }
3183
3184 if (debug_threads)
3185 {
3186 if (extended_event_reported (&event_child->waitstatus))
3187 {
3188 char *str;
3189
3190 str = target_waitstatus_to_string (&event_child->waitstatus);
3191 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3192 lwpid_of (get_lwp_thread (event_child)), str);
3193 xfree (str);
3194 }
3195 if (current_thread->last_resume_kind == resume_step)
3196 {
3197 if (event_child->step_range_start == event_child->step_range_end)
3198 debug_printf ("GDB wanted to single-step, reporting event.\n");
3199 else if (!lwp_in_step_range (event_child))
3200 debug_printf ("Out of step range, reporting event.\n");
3201 }
3202 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3203 debug_printf ("Stopped by watchpoint.\n");
3204 else if (gdb_breakpoint_here (event_child->stop_pc))
3205 debug_printf ("Stopped by GDB breakpoint.\n");
3206 if (debug_threads)
3207 debug_printf ("Hit a non-gdbserver trap event.\n");
3208 }
3209
3210 /* Alright, we're going to report a stop. */
3211
3212 if (!stabilizing_threads)
3213 {
3214 /* In all-stop, stop all threads. */
3215 if (!non_stop)
3216 stop_all_lwps (0, NULL);
3217
3218 /* If we're not waiting for a specific LWP, choose an event LWP
3219 from among those that have had events. Giving equal priority
3220 to all LWPs that have had events helps prevent
3221 starvation. */
3222 if (ptid_equal (ptid, minus_one_ptid))
3223 {
3224 event_child->status_pending_p = 1;
3225 event_child->status_pending = w;
3226
3227 select_event_lwp (&event_child);
3228
3229 /* current_thread and event_child must stay in sync. */
3230 current_thread = get_lwp_thread (event_child);
3231
3232 event_child->status_pending_p = 0;
3233 w = event_child->status_pending;
3234 }
3235
3236 if (step_over_finished)
3237 {
3238 if (!non_stop)
3239 {
3240 /* If we were doing a step-over, all other threads but
3241 the stepping one had been paused in start_step_over,
3242 with their suspend counts incremented. We don't want
3243 to do a full unstop/unpause, because we're in
3244 all-stop mode (so we want threads stopped), but we
3245 still need to unsuspend the other threads, to
3246 decrement their `suspended' count back. */
3247 unsuspend_all_lwps (event_child);
3248 }
3249 else
3250 {
3251 /* If we just finished a step-over, then all threads had
3252 been momentarily paused. In all-stop, that's fine,
3253 we want threads stopped by now anyway. In non-stop,
3254 we need to re-resume threads that GDB wanted to be
3255 running. */
3256 unstop_all_lwps (1, event_child);
3257 }
3258 }
3259
3260 /* Stabilize threads (move out of jump pads). */
3261 if (!non_stop)
3262 stabilize_threads ();
3263 }
3264 else
3265 {
3266 /* If we just finished a step-over, then all threads had been
3267 momentarily paused. In all-stop, that's fine, we want
3268 threads stopped by now anyway. In non-stop, we need to
3269 re-resume threads that GDB wanted to be running. */
3270 if (step_over_finished)
3271 unstop_all_lwps (1, event_child);
3272 }
3273
3274 if (extended_event_reported (&event_child->waitstatus))
3275 {
3276 /* If the reported event is a fork, vfork or exec, let GDB know. */
3277 ourstatus->kind = event_child->waitstatus.kind;
3278 ourstatus->value = event_child->waitstatus.value;
3279
3280 /* Clear the event lwp's waitstatus since we handled it already. */
3281 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3282 }
3283 else
3284 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3285
3286 /* Now that we've selected our final event LWP, un-adjust its PC if
3287 it was a software breakpoint, and the client doesn't know we can
3288 adjust the breakpoint ourselves. */
3289 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3290 && !swbreak_feature)
3291 {
3292 int decr_pc = the_low_target.decr_pc_after_break;
3293
3294 if (decr_pc != 0)
3295 {
3296 struct regcache *regcache
3297 = get_thread_regcache (current_thread, 1);
3298 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3299 }
3300 }
3301
3302 if (current_thread->last_resume_kind == resume_stop
3303 && WSTOPSIG (w) == SIGSTOP)
3304 {
3305 /* A thread that has been requested to stop by GDB with vCont;t,
3306 and it stopped cleanly, so report as SIG0. The use of
3307 SIGSTOP is an implementation detail. */
3308 ourstatus->value.sig = GDB_SIGNAL_0;
3309 }
3310 else if (current_thread->last_resume_kind == resume_stop
3311 && WSTOPSIG (w) != SIGSTOP)
3312 {
3313 /* A thread that has been requested to stop by GDB with vCont;t,
3314 but, it stopped for other reasons. */
3315 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3316 }
3317 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3318 {
3319 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3320 }
3321
3322 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3323
3324 if (debug_threads)
3325 {
3326 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3327 target_pid_to_str (ptid_of (current_thread)),
3328 ourstatus->kind, ourstatus->value.sig);
3329 debug_exit ();
3330 }
3331
3332 return ptid_of (current_thread);
3333 }
3334
3335 /* Get rid of any pending event in the pipe. */
3336 static void
3337 async_file_flush (void)
3338 {
3339 int ret;
3340 char buf;
3341
3342 do
3343 ret = read (linux_event_pipe[0], &buf, 1);
3344 while (ret >= 0 || (ret == -1 && errno == EINTR));
3345 }
3346
3347 /* Put something in the pipe, so the event loop wakes up. */
3348 static void
3349 async_file_mark (void)
3350 {
3351 int ret;
3352
3353 async_file_flush ();
3354
3355 do
3356 ret = write (linux_event_pipe[1], "+", 1);
3357 while (ret == 0 || (ret == -1 && errno == EINTR));
3358
3359 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3360 be awakened anyway. */
3361 }
3362
3363 static ptid_t
3364 linux_wait (ptid_t ptid,
3365 struct target_waitstatus *ourstatus, int target_options)
3366 {
3367 ptid_t event_ptid;
3368
3369 /* Flush the async file first. */
3370 if (target_is_async_p ())
3371 async_file_flush ();
3372
3373 do
3374 {
3375 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3376 }
3377 while ((target_options & TARGET_WNOHANG) == 0
3378 && ptid_equal (event_ptid, null_ptid)
3379 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3380
3381 /* If at least one stop was reported, there may be more. A single
3382 SIGCHLD can signal more than one child stop. */
3383 if (target_is_async_p ()
3384 && (target_options & TARGET_WNOHANG) != 0
3385 && !ptid_equal (event_ptid, null_ptid))
3386 async_file_mark ();
3387
3388 return event_ptid;
3389 }
3390
3391 /* Send a signal to an LWP. */
3392
3393 static int
3394 kill_lwp (unsigned long lwpid, int signo)
3395 {
3396 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3397 fails, then we are not using nptl threads and we should be using kill. */
3398
3399 #ifdef __NR_tkill
3400 {
3401 static int tkill_failed;
3402
3403 if (!tkill_failed)
3404 {
3405 int ret;
3406
3407 errno = 0;
3408 ret = syscall (__NR_tkill, lwpid, signo);
3409 if (errno != ENOSYS)
3410 return ret;
3411 tkill_failed = 1;
3412 }
3413 }
3414 #endif
3415
3416 return kill (lwpid, signo);
3417 }
3418
3419 void
3420 linux_stop_lwp (struct lwp_info *lwp)
3421 {
3422 send_sigstop (lwp);
3423 }
3424
3425 static void
3426 send_sigstop (struct lwp_info *lwp)
3427 {
3428 int pid;
3429
3430 pid = lwpid_of (get_lwp_thread (lwp));
3431
3432 /* If we already have a pending stop signal for this process, don't
3433 send another. */
3434 if (lwp->stop_expected)
3435 {
3436 if (debug_threads)
3437 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3438
3439 return;
3440 }
3441
3442 if (debug_threads)
3443 debug_printf ("Sending sigstop to lwp %d\n", pid);
3444
3445 lwp->stop_expected = 1;
3446 kill_lwp (pid, SIGSTOP);
3447 }
3448
3449 static int
3450 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3451 {
3452 struct thread_info *thread = (struct thread_info *) entry;
3453 struct lwp_info *lwp = get_thread_lwp (thread);
3454
3455 /* Ignore EXCEPT. */
3456 if (lwp == except)
3457 return 0;
3458
3459 if (lwp->stopped)
3460 return 0;
3461
3462 send_sigstop (lwp);
3463 return 0;
3464 }
3465
3466 /* Increment the suspend count of an LWP, and stop it, if not stopped
3467 yet. */
3468 static int
3469 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3470 void *except)
3471 {
3472 struct thread_info *thread = (struct thread_info *) entry;
3473 struct lwp_info *lwp = get_thread_lwp (thread);
3474
3475 /* Ignore EXCEPT. */
3476 if (lwp == except)
3477 return 0;
3478
3479 lwp->suspended++;
3480
3481 return send_sigstop_callback (entry, except);
3482 }
3483
3484 static void
3485 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3486 {
3487 /* It's dead, really. */
3488 lwp->dead = 1;
3489
3490 /* Store the exit status for later. */
3491 lwp->status_pending_p = 1;
3492 lwp->status_pending = wstat;
3493
3494 /* Prevent trying to stop it. */
3495 lwp->stopped = 1;
3496
3497 /* No further stops are expected from a dead lwp. */
3498 lwp->stop_expected = 0;
3499 }
3500
3501 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3502
3503 static void
3504 wait_for_sigstop (void)
3505 {
3506 struct thread_info *saved_thread;
3507 ptid_t saved_tid;
3508 int wstat;
3509 int ret;
3510
3511 saved_thread = current_thread;
3512 if (saved_thread != NULL)
3513 saved_tid = saved_thread->entry.id;
3514 else
3515 saved_tid = null_ptid; /* avoid bogus unused warning */
3516
3517 if (debug_threads)
3518 debug_printf ("wait_for_sigstop: pulling events\n");
3519
3520 /* Passing NULL_PTID as filter indicates we want all events to be
3521 left pending. Eventually this returns when there are no
3522 unwaited-for children left. */
3523 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3524 &wstat, __WALL);
3525 gdb_assert (ret == -1);
3526
3527 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3528 current_thread = saved_thread;
3529 else
3530 {
3531 if (debug_threads)
3532 debug_printf ("Previously current thread died.\n");
3533
3534 if (non_stop)
3535 {
3536 /* We can't change the current inferior behind GDB's back,
3537 otherwise, a subsequent command may apply to the wrong
3538 process. */
3539 current_thread = NULL;
3540 }
3541 else
3542 {
3543 /* Set a valid thread as current. */
3544 set_desired_thread (0);
3545 }
3546 }
3547 }
3548
3549 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3550 move it out, because we need to report the stop event to GDB. For
3551 example, if the user puts a breakpoint in the jump pad, it's
3552 because she wants to debug it. */
3553
3554 static int
3555 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3556 {
3557 struct thread_info *thread = (struct thread_info *) entry;
3558 struct lwp_info *lwp = get_thread_lwp (thread);
3559
3560 gdb_assert (lwp->suspended == 0);
3561 gdb_assert (lwp->stopped);
3562
3563 /* Allow debugging the jump pad, gdb_collect, etc.. */
3564 return (supports_fast_tracepoints ()
3565 && agent_loaded_p ()
3566 && (gdb_breakpoint_here (lwp->stop_pc)
3567 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3568 || thread->last_resume_kind == resume_step)
3569 && linux_fast_tracepoint_collecting (lwp, NULL));
3570 }
3571
3572 static void
3573 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3574 {
3575 struct thread_info *thread = (struct thread_info *) entry;
3576 struct lwp_info *lwp = get_thread_lwp (thread);
3577 int *wstat;
3578
3579 gdb_assert (lwp->suspended == 0);
3580 gdb_assert (lwp->stopped);
3581
3582 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3583
3584 /* Allow debugging the jump pad, gdb_collect, etc. */
3585 if (!gdb_breakpoint_here (lwp->stop_pc)
3586 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3587 && thread->last_resume_kind != resume_step
3588 && maybe_move_out_of_jump_pad (lwp, wstat))
3589 {
3590 if (debug_threads)
3591 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3592 lwpid_of (thread));
3593
3594 if (wstat)
3595 {
3596 lwp->status_pending_p = 0;
3597 enqueue_one_deferred_signal (lwp, wstat);
3598
3599 if (debug_threads)
3600 debug_printf ("Signal %d for LWP %ld deferred "
3601 "(in jump pad)\n",
3602 WSTOPSIG (*wstat), lwpid_of (thread));
3603 }
3604
3605 linux_resume_one_lwp (lwp, 0, 0, NULL);
3606 }
3607 else
3608 lwp->suspended++;
3609 }
3610
3611 static int
3612 lwp_running (struct inferior_list_entry *entry, void *data)
3613 {
3614 struct thread_info *thread = (struct thread_info *) entry;
3615 struct lwp_info *lwp = get_thread_lwp (thread);
3616
3617 if (lwp->dead)
3618 return 0;
3619 if (lwp->stopped)
3620 return 0;
3621 return 1;
3622 }
3623
3624 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3625 If SUSPEND, then also increase the suspend count of every LWP,
3626 except EXCEPT. */
3627
3628 static void
3629 stop_all_lwps (int suspend, struct lwp_info *except)
3630 {
3631 /* Should not be called recursively. */
3632 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3633
3634 if (debug_threads)
3635 {
3636 debug_enter ();
3637 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3638 suspend ? "stop-and-suspend" : "stop",
3639 except != NULL
3640 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3641 : "none");
3642 }
3643
3644 stopping_threads = (suspend
3645 ? STOPPING_AND_SUSPENDING_THREADS
3646 : STOPPING_THREADS);
3647
3648 if (suspend)
3649 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3650 else
3651 find_inferior (&all_threads, send_sigstop_callback, except);
3652 wait_for_sigstop ();
3653 stopping_threads = NOT_STOPPING_THREADS;
3654
3655 if (debug_threads)
3656 {
3657 debug_printf ("stop_all_lwps done, setting stopping_threads "
3658 "back to !stopping\n");
3659 debug_exit ();
3660 }
3661 }
3662
3663 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3664 SIGNAL is nonzero, give it that signal. */
3665
3666 static void
3667 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3668 int step, int signal, siginfo_t *info)
3669 {
3670 struct thread_info *thread = get_lwp_thread (lwp);
3671 struct thread_info *saved_thread;
3672 int fast_tp_collecting;
3673 struct process_info *proc = get_thread_process (thread);
3674
3675 /* Note that target description may not be initialised
3676 (proc->tdesc == NULL) at this point because the program hasn't
3677 stopped at the first instruction yet. It means GDBserver skips
3678 the extra traps from the wrapper program (see option --wrapper).
3679 Code in this function that requires register access should be
3680 guarded by proc->tdesc == NULL or something else. */
3681
3682 if (lwp->stopped == 0)
3683 return;
3684
3685 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3686
3687 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3688
3689 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3690 user used the "jump" command, or "set $pc = foo"). */
3691 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
3692 {
3693 /* Collecting 'while-stepping' actions doesn't make sense
3694 anymore. */
3695 release_while_stepping_state_list (thread);
3696 }
3697
3698 /* If we have pending signals or status, and a new signal, enqueue the
3699 signal. Also enqueue the signal if we are waiting to reinsert a
3700 breakpoint; it will be picked up again below. */
3701 if (signal != 0
3702 && (lwp->status_pending_p
3703 || lwp->pending_signals != NULL
3704 || lwp->bp_reinsert != 0
3705 || fast_tp_collecting))
3706 {
3707 struct pending_signals *p_sig;
3708 p_sig = xmalloc (sizeof (*p_sig));
3709 p_sig->prev = lwp->pending_signals;
3710 p_sig->signal = signal;
3711 if (info == NULL)
3712 memset (&p_sig->info, 0, sizeof (siginfo_t));
3713 else
3714 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3715 lwp->pending_signals = p_sig;
3716 }
3717
3718 if (lwp->status_pending_p)
3719 {
3720 if (debug_threads)
3721 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3722 " has pending status\n",
3723 lwpid_of (thread), step ? "step" : "continue", signal,
3724 lwp->stop_expected ? "expected" : "not expected");
3725 return;
3726 }
3727
3728 saved_thread = current_thread;
3729 current_thread = thread;
3730
3731 if (debug_threads)
3732 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3733 lwpid_of (thread), step ? "step" : "continue", signal,
3734 lwp->stop_expected ? "expected" : "not expected");
3735
3736 /* This bit needs some thinking about. If we get a signal that
3737 we must report while a single-step reinsert is still pending,
3738 we often end up resuming the thread. It might be better to
3739 (ew) allow a stack of pending events; then we could be sure that
3740 the reinsert happened right away and not lose any signals.
3741
3742 Making this stack would also shrink the window in which breakpoints are
3743 uninserted (see comment in linux_wait_for_lwp) but not enough for
3744 complete correctness, so it won't solve that problem. It may be
3745 worthwhile just to solve this one, however. */
3746 if (lwp->bp_reinsert != 0)
3747 {
3748 if (debug_threads)
3749 debug_printf (" pending reinsert at 0x%s\n",
3750 paddress (lwp->bp_reinsert));
3751
3752 if (can_hardware_single_step ())
3753 {
3754 if (fast_tp_collecting == 0)
3755 {
3756 if (step == 0)
3757 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3758 if (lwp->suspended)
3759 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3760 lwp->suspended);
3761 }
3762
3763 step = 1;
3764 }
3765
3766 /* Postpone any pending signal. It was enqueued above. */
3767 signal = 0;
3768 }
3769
3770 if (fast_tp_collecting == 1)
3771 {
3772 if (debug_threads)
3773 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3774 " (exit-jump-pad-bkpt)\n",
3775 lwpid_of (thread));
3776
3777 /* Postpone any pending signal. It was enqueued above. */
3778 signal = 0;
3779 }
3780 else if (fast_tp_collecting == 2)
3781 {
3782 if (debug_threads)
3783 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3784 " single-stepping\n",
3785 lwpid_of (thread));
3786
3787 if (can_hardware_single_step ())
3788 step = 1;
3789 else
3790 {
3791 internal_error (__FILE__, __LINE__,
3792 "moving out of jump pad single-stepping"
3793 " not implemented on this target");
3794 }
3795
3796 /* Postpone any pending signal. It was enqueued above. */
3797 signal = 0;
3798 }
3799
3800 /* If we have while-stepping actions in this thread set it stepping.
3801 If we have a signal to deliver, it may or may not be set to
3802 SIG_IGN, we don't know. Assume so, and allow collecting
3803 while-stepping into a signal handler. A possible smart thing to
3804 do would be to set an internal breakpoint at the signal return
3805 address, continue, and carry on catching this while-stepping
3806 action only when that breakpoint is hit. A future
3807 enhancement. */
3808 if (thread->while_stepping != NULL
3809 && can_hardware_single_step ())
3810 {
3811 if (debug_threads)
3812 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3813 lwpid_of (thread));
3814 step = 1;
3815 }
3816
3817 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
3818 {
3819 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3820
3821 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3822
3823 if (debug_threads)
3824 {
3825 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3826 (long) lwp->stop_pc);
3827 }
3828 }
3829
3830 /* If we have pending signals, consume one unless we are trying to
3831 reinsert a breakpoint or we're trying to finish a fast tracepoint
3832 collect. */
3833 if (lwp->pending_signals != NULL
3834 && lwp->bp_reinsert == 0
3835 && fast_tp_collecting == 0)
3836 {
3837 struct pending_signals **p_sig;
3838
3839 p_sig = &lwp->pending_signals;
3840 while ((*p_sig)->prev != NULL)
3841 p_sig = &(*p_sig)->prev;
3842
3843 signal = (*p_sig)->signal;
3844 if ((*p_sig)->info.si_signo != 0)
3845 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3846 &(*p_sig)->info);
3847
3848 free (*p_sig);
3849 *p_sig = NULL;
3850 }
3851
3852 if (the_low_target.prepare_to_resume != NULL)
3853 the_low_target.prepare_to_resume (lwp);
3854
3855 regcache_invalidate_thread (thread);
3856 errno = 0;
3857 lwp->stepping = step;
3858 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3859 (PTRACE_TYPE_ARG3) 0,
3860 /* Coerce to a uintptr_t first to avoid potential gcc warning
3861 of coercing an 8 byte integer to a 4 byte pointer. */
3862 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3863
3864 current_thread = saved_thread;
3865 if (errno)
3866 perror_with_name ("resuming thread");
3867
3868 /* Successfully resumed. Clear state that no longer makes sense,
3869 and mark the LWP as running. Must not do this before resuming
3870 otherwise if that fails other code will be confused. E.g., we'd
3871 later try to stop the LWP and hang forever waiting for a stop
3872 status. Note that we must not throw after this is cleared,
3873 otherwise handle_zombie_lwp_error would get confused. */
3874 lwp->stopped = 0;
3875 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3876 }
3877
3878 /* Called when we try to resume a stopped LWP and that errors out. If
3879 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3880 or about to become), discard the error, clear any pending status
3881 the LWP may have, and return true (we'll collect the exit status
3882 soon enough). Otherwise, return false. */
3883
3884 static int
3885 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3886 {
3887 struct thread_info *thread = get_lwp_thread (lp);
3888
3889 /* If we get an error after resuming the LWP successfully, we'd
3890 confuse !T state for the LWP being gone. */
3891 gdb_assert (lp->stopped);
3892
3893 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3894 because even if ptrace failed with ESRCH, the tracee may be "not
3895 yet fully dead", but already refusing ptrace requests. In that
3896 case the tracee has 'R (Running)' state for a little bit
3897 (observed in Linux 3.18). See also the note on ESRCH in the
3898 ptrace(2) man page. Instead, check whether the LWP has any state
3899 other than ptrace-stopped. */
3900
3901 /* Don't assume anything if /proc/PID/status can't be read. */
3902 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3903 {
3904 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3905 lp->status_pending_p = 0;
3906 return 1;
3907 }
3908 return 0;
3909 }
3910
3911 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3912 disappears while we try to resume it. */
3913
3914 static void
3915 linux_resume_one_lwp (struct lwp_info *lwp,
3916 int step, int signal, siginfo_t *info)
3917 {
3918 TRY
3919 {
3920 linux_resume_one_lwp_throw (lwp, step, signal, info);
3921 }
3922 CATCH (ex, RETURN_MASK_ERROR)
3923 {
3924 if (!check_ptrace_stopped_lwp_gone (lwp))
3925 throw_exception (ex);
3926 }
3927 END_CATCH
3928 }
3929
3930 struct thread_resume_array
3931 {
3932 struct thread_resume *resume;
3933 size_t n;
3934 };
3935
3936 /* This function is called once per thread via find_inferior.
3937 ARG is a pointer to a thread_resume_array struct.
3938 We look up the thread specified by ENTRY in ARG, and mark the thread
3939 with a pointer to the appropriate resume request.
3940
3941 This algorithm is O(threads * resume elements), but resume elements
3942 is small (and will remain small at least until GDB supports thread
3943 suspension). */
3944
3945 static int
3946 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3947 {
3948 struct thread_info *thread = (struct thread_info *) entry;
3949 struct lwp_info *lwp = get_thread_lwp (thread);
3950 int ndx;
3951 struct thread_resume_array *r;
3952
3953 r = arg;
3954
3955 for (ndx = 0; ndx < r->n; ndx++)
3956 {
3957 ptid_t ptid = r->resume[ndx].thread;
3958 if (ptid_equal (ptid, minus_one_ptid)
3959 || ptid_equal (ptid, entry->id)
3960 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3961 of PID'. */
3962 || (ptid_get_pid (ptid) == pid_of (thread)
3963 && (ptid_is_pid (ptid)
3964 || ptid_get_lwp (ptid) == -1)))
3965 {
3966 if (r->resume[ndx].kind == resume_stop
3967 && thread->last_resume_kind == resume_stop)
3968 {
3969 if (debug_threads)
3970 debug_printf ("already %s LWP %ld at GDB's request\n",
3971 (thread->last_status.kind
3972 == TARGET_WAITKIND_STOPPED)
3973 ? "stopped"
3974 : "stopping",
3975 lwpid_of (thread));
3976
3977 continue;
3978 }
3979
3980 lwp->resume = &r->resume[ndx];
3981 thread->last_resume_kind = lwp->resume->kind;
3982
3983 lwp->step_range_start = lwp->resume->step_range_start;
3984 lwp->step_range_end = lwp->resume->step_range_end;
3985
3986 /* If we had a deferred signal to report, dequeue one now.
3987 This can happen if LWP gets more than one signal while
3988 trying to get out of a jump pad. */
3989 if (lwp->stopped
3990 && !lwp->status_pending_p
3991 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3992 {
3993 lwp->status_pending_p = 1;
3994
3995 if (debug_threads)
3996 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3997 "leaving status pending.\n",
3998 WSTOPSIG (lwp->status_pending),
3999 lwpid_of (thread));
4000 }
4001
4002 return 0;
4003 }
4004 }
4005
4006 /* No resume action for this thread. */
4007 lwp->resume = NULL;
4008
4009 return 0;
4010 }
4011
4012 /* find_inferior callback for linux_resume.
4013 Set *FLAG_P if this lwp has an interesting status pending. */
4014
4015 static int
4016 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4017 {
4018 struct thread_info *thread = (struct thread_info *) entry;
4019 struct lwp_info *lwp = get_thread_lwp (thread);
4020
4021 /* LWPs which will not be resumed are not interesting, because
4022 we might not wait for them next time through linux_wait. */
4023 if (lwp->resume == NULL)
4024 return 0;
4025
4026 if (thread_still_has_status_pending_p (thread))
4027 * (int *) flag_p = 1;
4028
4029 return 0;
4030 }
4031
4032 /* Return 1 if this lwp that GDB wants running is stopped at an
4033 internal breakpoint that we need to step over. It assumes that any
4034 required STOP_PC adjustment has already been propagated to the
4035 inferior's regcache. */
4036
4037 static int
4038 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4039 {
4040 struct thread_info *thread = (struct thread_info *) entry;
4041 struct lwp_info *lwp = get_thread_lwp (thread);
4042 struct thread_info *saved_thread;
4043 CORE_ADDR pc;
4044 struct process_info *proc = get_thread_process (thread);
4045
4046 /* GDBserver is skipping the extra traps from the wrapper program,
4047 don't have to do step over. */
4048 if (proc->tdesc == NULL)
4049 return 0;
4050
4051 /* LWPs which will not be resumed are not interesting, because we
4052 might not wait for them next time through linux_wait. */
4053
4054 if (!lwp->stopped)
4055 {
4056 if (debug_threads)
4057 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4058 lwpid_of (thread));
4059 return 0;
4060 }
4061
4062 if (thread->last_resume_kind == resume_stop)
4063 {
4064 if (debug_threads)
4065 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4066 " stopped\n",
4067 lwpid_of (thread));
4068 return 0;
4069 }
4070
4071 gdb_assert (lwp->suspended >= 0);
4072
4073 if (lwp->suspended)
4074 {
4075 if (debug_threads)
4076 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4077 lwpid_of (thread));
4078 return 0;
4079 }
4080
4081 if (!lwp->need_step_over)
4082 {
4083 if (debug_threads)
4084 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4085 }
4086
4087 if (lwp->status_pending_p)
4088 {
4089 if (debug_threads)
4090 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4091 " status.\n",
4092 lwpid_of (thread));
4093 return 0;
4094 }
4095
4096 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4097 or we have. */
4098 pc = get_pc (lwp);
4099
4100 /* If the PC has changed since we stopped, then don't do anything,
4101 and let the breakpoint/tracepoint be hit. This happens if, for
4102 instance, GDB handled the decr_pc_after_break subtraction itself,
4103 GDB is OOL stepping this thread, or the user has issued a "jump"
4104 command, or poked thread's registers herself. */
4105 if (pc != lwp->stop_pc)
4106 {
4107 if (debug_threads)
4108 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4109 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4110 lwpid_of (thread),
4111 paddress (lwp->stop_pc), paddress (pc));
4112
4113 lwp->need_step_over = 0;
4114 return 0;
4115 }
4116
4117 saved_thread = current_thread;
4118 current_thread = thread;
4119
4120 /* We can only step over breakpoints we know about. */
4121 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4122 {
4123 /* Don't step over a breakpoint that GDB expects to hit
4124 though. If the condition is being evaluated on the target's side
4125 and it evaluate to false, step over this breakpoint as well. */
4126 if (gdb_breakpoint_here (pc)
4127 && gdb_condition_true_at_breakpoint (pc)
4128 && gdb_no_commands_at_breakpoint (pc))
4129 {
4130 if (debug_threads)
4131 debug_printf ("Need step over [LWP %ld]? yes, but found"
4132 " GDB breakpoint at 0x%s; skipping step over\n",
4133 lwpid_of (thread), paddress (pc));
4134
4135 current_thread = saved_thread;
4136 return 0;
4137 }
4138 else
4139 {
4140 if (debug_threads)
4141 debug_printf ("Need step over [LWP %ld]? yes, "
4142 "found breakpoint at 0x%s\n",
4143 lwpid_of (thread), paddress (pc));
4144
4145 /* We've found an lwp that needs stepping over --- return 1 so
4146 that find_inferior stops looking. */
4147 current_thread = saved_thread;
4148
4149 /* If the step over is cancelled, this is set again. */
4150 lwp->need_step_over = 0;
4151 return 1;
4152 }
4153 }
4154
4155 current_thread = saved_thread;
4156
4157 if (debug_threads)
4158 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4159 " at 0x%s\n",
4160 lwpid_of (thread), paddress (pc));
4161
4162 return 0;
4163 }
4164
4165 /* Start a step-over operation on LWP. When LWP stopped at a
4166 breakpoint, to make progress, we need to remove the breakpoint out
4167 of the way. If we let other threads run while we do that, they may
4168 pass by the breakpoint location and miss hitting it. To avoid
4169 that, a step-over momentarily stops all threads while LWP is
4170 single-stepped while the breakpoint is temporarily uninserted from
4171 the inferior. When the single-step finishes, we reinsert the
4172 breakpoint, and let all threads that are supposed to be running,
4173 run again.
4174
4175 On targets that don't support hardware single-step, we don't
4176 currently support full software single-stepping. Instead, we only
4177 support stepping over the thread event breakpoint, by asking the
4178 low target where to place a reinsert breakpoint. Since this
4179 routine assumes the breakpoint being stepped over is a thread event
4180 breakpoint, it usually assumes the return address of the current
4181 function is a good enough place to set the reinsert breakpoint. */
4182
4183 static int
4184 start_step_over (struct lwp_info *lwp)
4185 {
4186 struct thread_info *thread = get_lwp_thread (lwp);
4187 struct thread_info *saved_thread;
4188 CORE_ADDR pc;
4189 int step;
4190
4191 if (debug_threads)
4192 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4193 lwpid_of (thread));
4194
4195 stop_all_lwps (1, lwp);
4196 gdb_assert (lwp->suspended == 0);
4197
4198 if (debug_threads)
4199 debug_printf ("Done stopping all threads for step-over.\n");
4200
4201 /* Note, we should always reach here with an already adjusted PC,
4202 either by GDB (if we're resuming due to GDB's request), or by our
4203 caller, if we just finished handling an internal breakpoint GDB
4204 shouldn't care about. */
4205 pc = get_pc (lwp);
4206
4207 saved_thread = current_thread;
4208 current_thread = thread;
4209
4210 lwp->bp_reinsert = pc;
4211 uninsert_breakpoints_at (pc);
4212 uninsert_fast_tracepoint_jumps_at (pc);
4213
4214 if (can_hardware_single_step ())
4215 {
4216 step = 1;
4217 }
4218 else
4219 {
4220 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4221 set_reinsert_breakpoint (raddr);
4222 step = 0;
4223 }
4224
4225 current_thread = saved_thread;
4226
4227 linux_resume_one_lwp (lwp, step, 0, NULL);
4228
4229 /* Require next event from this LWP. */
4230 step_over_bkpt = thread->entry.id;
4231 return 1;
4232 }
4233
4234 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4235 start_step_over, if still there, and delete any reinsert
4236 breakpoints we've set, on non hardware single-step targets. */
4237
4238 static int
4239 finish_step_over (struct lwp_info *lwp)
4240 {
4241 if (lwp->bp_reinsert != 0)
4242 {
4243 if (debug_threads)
4244 debug_printf ("Finished step over.\n");
4245
4246 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4247 may be no breakpoint to reinsert there by now. */
4248 reinsert_breakpoints_at (lwp->bp_reinsert);
4249 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4250
4251 lwp->bp_reinsert = 0;
4252
4253 /* Delete any software-single-step reinsert breakpoints. No
4254 longer needed. We don't have to worry about other threads
4255 hitting this trap, and later not being able to explain it,
4256 because we were stepping over a breakpoint, and we hold all
4257 threads but LWP stopped while doing that. */
4258 if (!can_hardware_single_step ())
4259 delete_reinsert_breakpoints ();
4260
4261 step_over_bkpt = null_ptid;
4262 return 1;
4263 }
4264 else
4265 return 0;
4266 }
4267
4268 /* This function is called once per thread. We check the thread's resume
4269 request, which will tell us whether to resume, step, or leave the thread
4270 stopped; and what signal, if any, it should be sent.
4271
4272 For threads which we aren't explicitly told otherwise, we preserve
4273 the stepping flag; this is used for stepping over gdbserver-placed
4274 breakpoints.
4275
4276 If pending_flags was set in any thread, we queue any needed
4277 signals, since we won't actually resume. We already have a pending
4278 event to report, so we don't need to preserve any step requests;
4279 they should be re-issued if necessary. */
4280
4281 static int
4282 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4283 {
4284 struct thread_info *thread = (struct thread_info *) entry;
4285 struct lwp_info *lwp = get_thread_lwp (thread);
4286 int step;
4287 int leave_all_stopped = * (int *) arg;
4288 int leave_pending;
4289
4290 if (lwp->resume == NULL)
4291 return 0;
4292
4293 if (lwp->resume->kind == resume_stop)
4294 {
4295 if (debug_threads)
4296 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4297
4298 if (!lwp->stopped)
4299 {
4300 if (debug_threads)
4301 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4302
4303 /* Stop the thread, and wait for the event asynchronously,
4304 through the event loop. */
4305 send_sigstop (lwp);
4306 }
4307 else
4308 {
4309 if (debug_threads)
4310 debug_printf ("already stopped LWP %ld\n",
4311 lwpid_of (thread));
4312
4313 /* The LWP may have been stopped in an internal event that
4314 was not meant to be notified back to GDB (e.g., gdbserver
4315 breakpoint), so we should be reporting a stop event in
4316 this case too. */
4317
4318 /* If the thread already has a pending SIGSTOP, this is a
4319 no-op. Otherwise, something later will presumably resume
4320 the thread and this will cause it to cancel any pending
4321 operation, due to last_resume_kind == resume_stop. If
4322 the thread already has a pending status to report, we
4323 will still report it the next time we wait - see
4324 status_pending_p_callback. */
4325
4326 /* If we already have a pending signal to report, then
4327 there's no need to queue a SIGSTOP, as this means we're
4328 midway through moving the LWP out of the jumppad, and we
4329 will report the pending signal as soon as that is
4330 finished. */
4331 if (lwp->pending_signals_to_report == NULL)
4332 send_sigstop (lwp);
4333 }
4334
4335 /* For stop requests, we're done. */
4336 lwp->resume = NULL;
4337 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4338 return 0;
4339 }
4340
4341 /* If this thread which is about to be resumed has a pending status,
4342 then don't resume any threads - we can just report the pending
4343 status. Make sure to queue any signals that would otherwise be
4344 sent. In all-stop mode, we do this decision based on if *any*
4345 thread has a pending status. If there's a thread that needs the
4346 step-over-breakpoint dance, then don't resume any other thread
4347 but that particular one. */
4348 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4349
4350 if (!leave_pending)
4351 {
4352 if (debug_threads)
4353 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4354
4355 step = (lwp->resume->kind == resume_step);
4356 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4357 }
4358 else
4359 {
4360 if (debug_threads)
4361 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4362
4363 /* If we have a new signal, enqueue the signal. */
4364 if (lwp->resume->sig != 0)
4365 {
4366 struct pending_signals *p_sig;
4367 p_sig = xmalloc (sizeof (*p_sig));
4368 p_sig->prev = lwp->pending_signals;
4369 p_sig->signal = lwp->resume->sig;
4370 memset (&p_sig->info, 0, sizeof (siginfo_t));
4371
4372 /* If this is the same signal we were previously stopped by,
4373 make sure to queue its siginfo. We can ignore the return
4374 value of ptrace; if it fails, we'll skip
4375 PTRACE_SETSIGINFO. */
4376 if (WIFSTOPPED (lwp->last_status)
4377 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4378 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4379 &p_sig->info);
4380
4381 lwp->pending_signals = p_sig;
4382 }
4383 }
4384
4385 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4386 lwp->resume = NULL;
4387 return 0;
4388 }
4389
4390 static void
4391 linux_resume (struct thread_resume *resume_info, size_t n)
4392 {
4393 struct thread_resume_array array = { resume_info, n };
4394 struct thread_info *need_step_over = NULL;
4395 int any_pending;
4396 int leave_all_stopped;
4397
4398 if (debug_threads)
4399 {
4400 debug_enter ();
4401 debug_printf ("linux_resume:\n");
4402 }
4403
4404 find_inferior (&all_threads, linux_set_resume_request, &array);
4405
4406 /* If there is a thread which would otherwise be resumed, which has
4407 a pending status, then don't resume any threads - we can just
4408 report the pending status. Make sure to queue any signals that
4409 would otherwise be sent. In non-stop mode, we'll apply this
4410 logic to each thread individually. We consume all pending events
4411 before considering to start a step-over (in all-stop). */
4412 any_pending = 0;
4413 if (!non_stop)
4414 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4415
4416 /* If there is a thread which would otherwise be resumed, which is
4417 stopped at a breakpoint that needs stepping over, then don't
4418 resume any threads - have it step over the breakpoint with all
4419 other threads stopped, then resume all threads again. Make sure
4420 to queue any signals that would otherwise be delivered or
4421 queued. */
4422 if (!any_pending && supports_breakpoints ())
4423 need_step_over
4424 = (struct thread_info *) find_inferior (&all_threads,
4425 need_step_over_p, NULL);
4426
4427 leave_all_stopped = (need_step_over != NULL || any_pending);
4428
4429 if (debug_threads)
4430 {
4431 if (need_step_over != NULL)
4432 debug_printf ("Not resuming all, need step over\n");
4433 else if (any_pending)
4434 debug_printf ("Not resuming, all-stop and found "
4435 "an LWP with pending status\n");
4436 else
4437 debug_printf ("Resuming, no pending status or step over needed\n");
4438 }
4439
4440 /* Even if we're leaving threads stopped, queue all signals we'd
4441 otherwise deliver. */
4442 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4443
4444 if (need_step_over)
4445 start_step_over (get_thread_lwp (need_step_over));
4446
4447 if (debug_threads)
4448 {
4449 debug_printf ("linux_resume done\n");
4450 debug_exit ();
4451 }
4452 }
4453
4454 /* This function is called once per thread. We check the thread's
4455 last resume request, which will tell us whether to resume, step, or
4456 leave the thread stopped. Any signal the client requested to be
4457 delivered has already been enqueued at this point.
4458
4459 If any thread that GDB wants running is stopped at an internal
4460 breakpoint that needs stepping over, we start a step-over operation
4461 on that particular thread, and leave all others stopped. */
4462
4463 static int
4464 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4465 {
4466 struct thread_info *thread = (struct thread_info *) entry;
4467 struct lwp_info *lwp = get_thread_lwp (thread);
4468 int step;
4469
4470 if (lwp == except)
4471 return 0;
4472
4473 if (debug_threads)
4474 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4475
4476 if (!lwp->stopped)
4477 {
4478 if (debug_threads)
4479 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4480 return 0;
4481 }
4482
4483 if (thread->last_resume_kind == resume_stop
4484 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4485 {
4486 if (debug_threads)
4487 debug_printf (" client wants LWP to remain %ld stopped\n",
4488 lwpid_of (thread));
4489 return 0;
4490 }
4491
4492 if (lwp->status_pending_p)
4493 {
4494 if (debug_threads)
4495 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4496 lwpid_of (thread));
4497 return 0;
4498 }
4499
4500 gdb_assert (lwp->suspended >= 0);
4501
4502 if (lwp->suspended)
4503 {
4504 if (debug_threads)
4505 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4506 return 0;
4507 }
4508
4509 if (thread->last_resume_kind == resume_stop
4510 && lwp->pending_signals_to_report == NULL
4511 && lwp->collecting_fast_tracepoint == 0)
4512 {
4513 /* We haven't reported this LWP as stopped yet (otherwise, the
4514 last_status.kind check above would catch it, and we wouldn't
4515 reach here. This LWP may have been momentarily paused by a
4516 stop_all_lwps call while handling for example, another LWP's
4517 step-over. In that case, the pending expected SIGSTOP signal
4518 that was queued at vCont;t handling time will have already
4519 been consumed by wait_for_sigstop, and so we need to requeue
4520 another one here. Note that if the LWP already has a SIGSTOP
4521 pending, this is a no-op. */
4522
4523 if (debug_threads)
4524 debug_printf ("Client wants LWP %ld to stop. "
4525 "Making sure it has a SIGSTOP pending\n",
4526 lwpid_of (thread));
4527
4528 send_sigstop (lwp);
4529 }
4530
4531 step = thread->last_resume_kind == resume_step;
4532 linux_resume_one_lwp (lwp, step, 0, NULL);
4533 return 0;
4534 }
4535
4536 static int
4537 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4538 {
4539 struct thread_info *thread = (struct thread_info *) entry;
4540 struct lwp_info *lwp = get_thread_lwp (thread);
4541
4542 if (lwp == except)
4543 return 0;
4544
4545 lwp->suspended--;
4546 gdb_assert (lwp->suspended >= 0);
4547
4548 return proceed_one_lwp (entry, except);
4549 }
4550
4551 /* When we finish a step-over, set threads running again. If there's
4552 another thread that may need a step-over, now's the time to start
4553 it. Eventually, we'll move all threads past their breakpoints. */
4554
4555 static void
4556 proceed_all_lwps (void)
4557 {
4558 struct thread_info *need_step_over;
4559
4560 /* If there is a thread which would otherwise be resumed, which is
4561 stopped at a breakpoint that needs stepping over, then don't
4562 resume any threads - have it step over the breakpoint with all
4563 other threads stopped, then resume all threads again. */
4564
4565 if (supports_breakpoints ())
4566 {
4567 need_step_over
4568 = (struct thread_info *) find_inferior (&all_threads,
4569 need_step_over_p, NULL);
4570
4571 if (need_step_over != NULL)
4572 {
4573 if (debug_threads)
4574 debug_printf ("proceed_all_lwps: found "
4575 "thread %ld needing a step-over\n",
4576 lwpid_of (need_step_over));
4577
4578 start_step_over (get_thread_lwp (need_step_over));
4579 return;
4580 }
4581 }
4582
4583 if (debug_threads)
4584 debug_printf ("Proceeding, no step-over needed\n");
4585
4586 find_inferior (&all_threads, proceed_one_lwp, NULL);
4587 }
4588
4589 /* Stopped LWPs that the client wanted to be running, that don't have
4590 pending statuses, are set to run again, except for EXCEPT, if not
4591 NULL. This undoes a stop_all_lwps call. */
4592
4593 static void
4594 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4595 {
4596 if (debug_threads)
4597 {
4598 debug_enter ();
4599 if (except)
4600 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4601 lwpid_of (get_lwp_thread (except)));
4602 else
4603 debug_printf ("unstopping all lwps\n");
4604 }
4605
4606 if (unsuspend)
4607 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4608 else
4609 find_inferior (&all_threads, proceed_one_lwp, except);
4610
4611 if (debug_threads)
4612 {
4613 debug_printf ("unstop_all_lwps done\n");
4614 debug_exit ();
4615 }
4616 }
4617
4618
4619 #ifdef HAVE_LINUX_REGSETS
4620
4621 #define use_linux_regsets 1
4622
4623 /* Returns true if REGSET has been disabled. */
4624
4625 static int
4626 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4627 {
4628 return (info->disabled_regsets != NULL
4629 && info->disabled_regsets[regset - info->regsets]);
4630 }
4631
4632 /* Disable REGSET. */
4633
4634 static void
4635 disable_regset (struct regsets_info *info, struct regset_info *regset)
4636 {
4637 int dr_offset;
4638
4639 dr_offset = regset - info->regsets;
4640 if (info->disabled_regsets == NULL)
4641 info->disabled_regsets = xcalloc (1, info->num_regsets);
4642 info->disabled_regsets[dr_offset] = 1;
4643 }
4644
4645 static int
4646 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4647 struct regcache *regcache)
4648 {
4649 struct regset_info *regset;
4650 int saw_general_regs = 0;
4651 int pid;
4652 struct iovec iov;
4653
4654 pid = lwpid_of (current_thread);
4655 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4656 {
4657 void *buf, *data;
4658 int nt_type, res;
4659
4660 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4661 continue;
4662
4663 buf = xmalloc (regset->size);
4664
4665 nt_type = regset->nt_type;
4666 if (nt_type)
4667 {
4668 iov.iov_base = buf;
4669 iov.iov_len = regset->size;
4670 data = (void *) &iov;
4671 }
4672 else
4673 data = buf;
4674
4675 #ifndef __sparc__
4676 res = ptrace (regset->get_request, pid,
4677 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4678 #else
4679 res = ptrace (regset->get_request, pid, data, nt_type);
4680 #endif
4681 if (res < 0)
4682 {
4683 if (errno == EIO)
4684 {
4685 /* If we get EIO on a regset, do not try it again for
4686 this process mode. */
4687 disable_regset (regsets_info, regset);
4688 }
4689 else if (errno == ENODATA)
4690 {
4691 /* ENODATA may be returned if the regset is currently
4692 not "active". This can happen in normal operation,
4693 so suppress the warning in this case. */
4694 }
4695 else
4696 {
4697 char s[256];
4698 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4699 pid);
4700 perror (s);
4701 }
4702 }
4703 else
4704 {
4705 if (regset->type == GENERAL_REGS)
4706 saw_general_regs = 1;
4707 regset->store_function (regcache, buf);
4708 }
4709 free (buf);
4710 }
4711 if (saw_general_regs)
4712 return 0;
4713 else
4714 return 1;
4715 }
4716
4717 static int
4718 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4719 struct regcache *regcache)
4720 {
4721 struct regset_info *regset;
4722 int saw_general_regs = 0;
4723 int pid;
4724 struct iovec iov;
4725
4726 pid = lwpid_of (current_thread);
4727 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4728 {
4729 void *buf, *data;
4730 int nt_type, res;
4731
4732 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4733 || regset->fill_function == NULL)
4734 continue;
4735
4736 buf = xmalloc (regset->size);
4737
4738 /* First fill the buffer with the current register set contents,
4739 in case there are any items in the kernel's regset that are
4740 not in gdbserver's regcache. */
4741
4742 nt_type = regset->nt_type;
4743 if (nt_type)
4744 {
4745 iov.iov_base = buf;
4746 iov.iov_len = regset->size;
4747 data = (void *) &iov;
4748 }
4749 else
4750 data = buf;
4751
4752 #ifndef __sparc__
4753 res = ptrace (regset->get_request, pid,
4754 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4755 #else
4756 res = ptrace (regset->get_request, pid, data, nt_type);
4757 #endif
4758
4759 if (res == 0)
4760 {
4761 /* Then overlay our cached registers on that. */
4762 regset->fill_function (regcache, buf);
4763
4764 /* Only now do we write the register set. */
4765 #ifndef __sparc__
4766 res = ptrace (regset->set_request, pid,
4767 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4768 #else
4769 res = ptrace (regset->set_request, pid, data, nt_type);
4770 #endif
4771 }
4772
4773 if (res < 0)
4774 {
4775 if (errno == EIO)
4776 {
4777 /* If we get EIO on a regset, do not try it again for
4778 this process mode. */
4779 disable_regset (regsets_info, regset);
4780 }
4781 else if (errno == ESRCH)
4782 {
4783 /* At this point, ESRCH should mean the process is
4784 already gone, in which case we simply ignore attempts
4785 to change its registers. See also the related
4786 comment in linux_resume_one_lwp. */
4787 free (buf);
4788 return 0;
4789 }
4790 else
4791 {
4792 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4793 }
4794 }
4795 else if (regset->type == GENERAL_REGS)
4796 saw_general_regs = 1;
4797 free (buf);
4798 }
4799 if (saw_general_regs)
4800 return 0;
4801 else
4802 return 1;
4803 }
4804
4805 #else /* !HAVE_LINUX_REGSETS */
4806
4807 #define use_linux_regsets 0
4808 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4809 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4810
4811 #endif
4812
4813 /* Return 1 if register REGNO is supported by one of the regset ptrace
4814 calls or 0 if it has to be transferred individually. */
4815
4816 static int
4817 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4818 {
4819 unsigned char mask = 1 << (regno % 8);
4820 size_t index = regno / 8;
4821
4822 return (use_linux_regsets
4823 && (regs_info->regset_bitmap == NULL
4824 || (regs_info->regset_bitmap[index] & mask) != 0));
4825 }
4826
4827 #ifdef HAVE_LINUX_USRREGS
4828
4829 int
4830 register_addr (const struct usrregs_info *usrregs, int regnum)
4831 {
4832 int addr;
4833
4834 if (regnum < 0 || regnum >= usrregs->num_regs)
4835 error ("Invalid register number %d.", regnum);
4836
4837 addr = usrregs->regmap[regnum];
4838
4839 return addr;
4840 }
4841
4842 /* Fetch one register. */
4843 static void
4844 fetch_register (const struct usrregs_info *usrregs,
4845 struct regcache *regcache, int regno)
4846 {
4847 CORE_ADDR regaddr;
4848 int i, size;
4849 char *buf;
4850 int pid;
4851
4852 if (regno >= usrregs->num_regs)
4853 return;
4854 if ((*the_low_target.cannot_fetch_register) (regno))
4855 return;
4856
4857 regaddr = register_addr (usrregs, regno);
4858 if (regaddr == -1)
4859 return;
4860
4861 size = ((register_size (regcache->tdesc, regno)
4862 + sizeof (PTRACE_XFER_TYPE) - 1)
4863 & -sizeof (PTRACE_XFER_TYPE));
4864 buf = alloca (size);
4865
4866 pid = lwpid_of (current_thread);
4867 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4868 {
4869 errno = 0;
4870 *(PTRACE_XFER_TYPE *) (buf + i) =
4871 ptrace (PTRACE_PEEKUSER, pid,
4872 /* Coerce to a uintptr_t first to avoid potential gcc warning
4873 of coercing an 8 byte integer to a 4 byte pointer. */
4874 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4875 regaddr += sizeof (PTRACE_XFER_TYPE);
4876 if (errno != 0)
4877 error ("reading register %d: %s", regno, strerror (errno));
4878 }
4879
4880 if (the_low_target.supply_ptrace_register)
4881 the_low_target.supply_ptrace_register (regcache, regno, buf);
4882 else
4883 supply_register (regcache, regno, buf);
4884 }
4885
4886 /* Store one register. */
4887 static void
4888 store_register (const struct usrregs_info *usrregs,
4889 struct regcache *regcache, int regno)
4890 {
4891 CORE_ADDR regaddr;
4892 int i, size;
4893 char *buf;
4894 int pid;
4895
4896 if (regno >= usrregs->num_regs)
4897 return;
4898 if ((*the_low_target.cannot_store_register) (regno))
4899 return;
4900
4901 regaddr = register_addr (usrregs, regno);
4902 if (regaddr == -1)
4903 return;
4904
4905 size = ((register_size (regcache->tdesc, regno)
4906 + sizeof (PTRACE_XFER_TYPE) - 1)
4907 & -sizeof (PTRACE_XFER_TYPE));
4908 buf = alloca (size);
4909 memset (buf, 0, size);
4910
4911 if (the_low_target.collect_ptrace_register)
4912 the_low_target.collect_ptrace_register (regcache, regno, buf);
4913 else
4914 collect_register (regcache, regno, buf);
4915
4916 pid = lwpid_of (current_thread);
4917 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4918 {
4919 errno = 0;
4920 ptrace (PTRACE_POKEUSER, pid,
4921 /* Coerce to a uintptr_t first to avoid potential gcc warning
4922 about coercing an 8 byte integer to a 4 byte pointer. */
4923 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4924 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4925 if (errno != 0)
4926 {
4927 /* At this point, ESRCH should mean the process is
4928 already gone, in which case we simply ignore attempts
4929 to change its registers. See also the related
4930 comment in linux_resume_one_lwp. */
4931 if (errno == ESRCH)
4932 return;
4933
4934 if ((*the_low_target.cannot_store_register) (regno) == 0)
4935 error ("writing register %d: %s", regno, strerror (errno));
4936 }
4937 regaddr += sizeof (PTRACE_XFER_TYPE);
4938 }
4939 }
4940
4941 /* Fetch all registers, or just one, from the child process.
4942 If REGNO is -1, do this for all registers, skipping any that are
4943 assumed to have been retrieved by regsets_fetch_inferior_registers,
4944 unless ALL is non-zero.
4945 Otherwise, REGNO specifies which register (so we can save time). */
4946 static void
4947 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4948 struct regcache *regcache, int regno, int all)
4949 {
4950 struct usrregs_info *usr = regs_info->usrregs;
4951
4952 if (regno == -1)
4953 {
4954 for (regno = 0; regno < usr->num_regs; regno++)
4955 if (all || !linux_register_in_regsets (regs_info, regno))
4956 fetch_register (usr, regcache, regno);
4957 }
4958 else
4959 fetch_register (usr, regcache, regno);
4960 }
4961
4962 /* Store our register values back into the inferior.
4963 If REGNO is -1, do this for all registers, skipping any that are
4964 assumed to have been saved by regsets_store_inferior_registers,
4965 unless ALL is non-zero.
4966 Otherwise, REGNO specifies which register (so we can save time). */
4967 static void
4968 usr_store_inferior_registers (const struct regs_info *regs_info,
4969 struct regcache *regcache, int regno, int all)
4970 {
4971 struct usrregs_info *usr = regs_info->usrregs;
4972
4973 if (regno == -1)
4974 {
4975 for (regno = 0; regno < usr->num_regs; regno++)
4976 if (all || !linux_register_in_regsets (regs_info, regno))
4977 store_register (usr, regcache, regno);
4978 }
4979 else
4980 store_register (usr, regcache, regno);
4981 }
4982
4983 #else /* !HAVE_LINUX_USRREGS */
4984
4985 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4986 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4987
4988 #endif
4989
4990
4991 void
4992 linux_fetch_registers (struct regcache *regcache, int regno)
4993 {
4994 int use_regsets;
4995 int all = 0;
4996 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4997
4998 if (regno == -1)
4999 {
5000 if (the_low_target.fetch_register != NULL
5001 && regs_info->usrregs != NULL)
5002 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5003 (*the_low_target.fetch_register) (regcache, regno);
5004
5005 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5006 if (regs_info->usrregs != NULL)
5007 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5008 }
5009 else
5010 {
5011 if (the_low_target.fetch_register != NULL
5012 && (*the_low_target.fetch_register) (regcache, regno))
5013 return;
5014
5015 use_regsets = linux_register_in_regsets (regs_info, regno);
5016 if (use_regsets)
5017 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5018 regcache);
5019 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5020 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5021 }
5022 }
5023
5024 void
5025 linux_store_registers (struct regcache *regcache, int regno)
5026 {
5027 int use_regsets;
5028 int all = 0;
5029 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5030
5031 if (regno == -1)
5032 {
5033 all = regsets_store_inferior_registers (regs_info->regsets_info,
5034 regcache);
5035 if (regs_info->usrregs != NULL)
5036 usr_store_inferior_registers (regs_info, regcache, regno, all);
5037 }
5038 else
5039 {
5040 use_regsets = linux_register_in_regsets (regs_info, regno);
5041 if (use_regsets)
5042 all = regsets_store_inferior_registers (regs_info->regsets_info,
5043 regcache);
5044 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5045 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5046 }
5047 }
5048
5049
5050 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5051 to debugger memory starting at MYADDR. */
5052
5053 static int
5054 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5055 {
5056 int pid = lwpid_of (current_thread);
5057 register PTRACE_XFER_TYPE *buffer;
5058 register CORE_ADDR addr;
5059 register int count;
5060 char filename[64];
5061 register int i;
5062 int ret;
5063 int fd;
5064
5065 /* Try using /proc. Don't bother for one word. */
5066 if (len >= 3 * sizeof (long))
5067 {
5068 int bytes;
5069
5070 /* We could keep this file open and cache it - possibly one per
5071 thread. That requires some juggling, but is even faster. */
5072 sprintf (filename, "/proc/%d/mem", pid);
5073 fd = open (filename, O_RDONLY | O_LARGEFILE);
5074 if (fd == -1)
5075 goto no_proc;
5076
5077 /* If pread64 is available, use it. It's faster if the kernel
5078 supports it (only one syscall), and it's 64-bit safe even on
5079 32-bit platforms (for instance, SPARC debugging a SPARC64
5080 application). */
5081 #ifdef HAVE_PREAD64
5082 bytes = pread64 (fd, myaddr, len, memaddr);
5083 #else
5084 bytes = -1;
5085 if (lseek (fd, memaddr, SEEK_SET) != -1)
5086 bytes = read (fd, myaddr, len);
5087 #endif
5088
5089 close (fd);
5090 if (bytes == len)
5091 return 0;
5092
5093 /* Some data was read, we'll try to get the rest with ptrace. */
5094 if (bytes > 0)
5095 {
5096 memaddr += bytes;
5097 myaddr += bytes;
5098 len -= bytes;
5099 }
5100 }
5101
5102 no_proc:
5103 /* Round starting address down to longword boundary. */
5104 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5105 /* Round ending address up; get number of longwords that makes. */
5106 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5107 / sizeof (PTRACE_XFER_TYPE));
5108 /* Allocate buffer of that many longwords. */
5109 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5110
5111 /* Read all the longwords */
5112 errno = 0;
5113 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5114 {
5115 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5116 about coercing an 8 byte integer to a 4 byte pointer. */
5117 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5118 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5119 (PTRACE_TYPE_ARG4) 0);
5120 if (errno)
5121 break;
5122 }
5123 ret = errno;
5124
5125 /* Copy appropriate bytes out of the buffer. */
5126 if (i > 0)
5127 {
5128 i *= sizeof (PTRACE_XFER_TYPE);
5129 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5130 memcpy (myaddr,
5131 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5132 i < len ? i : len);
5133 }
5134
5135 return ret;
5136 }
5137
5138 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5139 memory at MEMADDR. On failure (cannot write to the inferior)
5140 returns the value of errno. Always succeeds if LEN is zero. */
5141
5142 static int
5143 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5144 {
5145 register int i;
5146 /* Round starting address down to longword boundary. */
5147 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5148 /* Round ending address up; get number of longwords that makes. */
5149 register int count
5150 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5151 / sizeof (PTRACE_XFER_TYPE);
5152
5153 /* Allocate buffer of that many longwords. */
5154 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5155 alloca (count * sizeof (PTRACE_XFER_TYPE));
5156
5157 int pid = lwpid_of (current_thread);
5158
5159 if (len == 0)
5160 {
5161 /* Zero length write always succeeds. */
5162 return 0;
5163 }
5164
5165 if (debug_threads)
5166 {
5167 /* Dump up to four bytes. */
5168 unsigned int val = * (unsigned int *) myaddr;
5169 if (len == 1)
5170 val = val & 0xff;
5171 else if (len == 2)
5172 val = val & 0xffff;
5173 else if (len == 3)
5174 val = val & 0xffffff;
5175 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5176 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
5177 }
5178
5179 /* Fill start and end extra bytes of buffer with existing memory data. */
5180
5181 errno = 0;
5182 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5183 about coercing an 8 byte integer to a 4 byte pointer. */
5184 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5185 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5186 (PTRACE_TYPE_ARG4) 0);
5187 if (errno)
5188 return errno;
5189
5190 if (count > 1)
5191 {
5192 errno = 0;
5193 buffer[count - 1]
5194 = ptrace (PTRACE_PEEKTEXT, pid,
5195 /* Coerce to a uintptr_t first to avoid potential gcc warning
5196 about coercing an 8 byte integer to a 4 byte pointer. */
5197 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5198 * sizeof (PTRACE_XFER_TYPE)),
5199 (PTRACE_TYPE_ARG4) 0);
5200 if (errno)
5201 return errno;
5202 }
5203
5204 /* Copy data to be written over corresponding part of buffer. */
5205
5206 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5207 myaddr, len);
5208
5209 /* Write the entire buffer. */
5210
5211 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5212 {
5213 errno = 0;
5214 ptrace (PTRACE_POKETEXT, pid,
5215 /* Coerce to a uintptr_t first to avoid potential gcc warning
5216 about coercing an 8 byte integer to a 4 byte pointer. */
5217 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5218 (PTRACE_TYPE_ARG4) buffer[i]);
5219 if (errno)
5220 return errno;
5221 }
5222
5223 return 0;
5224 }
5225
5226 static void
5227 linux_look_up_symbols (void)
5228 {
5229 #ifdef USE_THREAD_DB
5230 struct process_info *proc = current_process ();
5231
5232 if (proc->priv->thread_db != NULL)
5233 return;
5234
5235 /* If the kernel supports tracing clones, then we don't need to
5236 use the magic thread event breakpoint to learn about
5237 threads. */
5238 thread_db_init (!linux_supports_traceclone ());
5239 #endif
5240 }
5241
5242 static void
5243 linux_request_interrupt (void)
5244 {
5245 extern unsigned long signal_pid;
5246
5247 /* Send a SIGINT to the process group. This acts just like the user
5248 typed a ^C on the controlling terminal. */
5249 kill (-signal_pid, SIGINT);
5250 }
5251
5252 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5253 to debugger memory starting at MYADDR. */
5254
5255 static int
5256 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5257 {
5258 char filename[PATH_MAX];
5259 int fd, n;
5260 int pid = lwpid_of (current_thread);
5261
5262 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5263
5264 fd = open (filename, O_RDONLY);
5265 if (fd < 0)
5266 return -1;
5267
5268 if (offset != (CORE_ADDR) 0
5269 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5270 n = -1;
5271 else
5272 n = read (fd, myaddr, len);
5273
5274 close (fd);
5275
5276 return n;
5277 }
5278
5279 /* These breakpoint and watchpoint related wrapper functions simply
5280 pass on the function call if the target has registered a
5281 corresponding function. */
5282
5283 static int
5284 linux_supports_z_point_type (char z_type)
5285 {
5286 return (the_low_target.supports_z_point_type != NULL
5287 && the_low_target.supports_z_point_type (z_type));
5288 }
5289
5290 static int
5291 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5292 int size, struct raw_breakpoint *bp)
5293 {
5294 if (type == raw_bkpt_type_sw)
5295 return insert_memory_breakpoint (bp);
5296 else if (the_low_target.insert_point != NULL)
5297 return the_low_target.insert_point (type, addr, size, bp);
5298 else
5299 /* Unsupported (see target.h). */
5300 return 1;
5301 }
5302
5303 static int
5304 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5305 int size, struct raw_breakpoint *bp)
5306 {
5307 if (type == raw_bkpt_type_sw)
5308 return remove_memory_breakpoint (bp);
5309 else if (the_low_target.remove_point != NULL)
5310 return the_low_target.remove_point (type, addr, size, bp);
5311 else
5312 /* Unsupported (see target.h). */
5313 return 1;
5314 }
5315
5316 /* Implement the to_stopped_by_sw_breakpoint target_ops
5317 method. */
5318
5319 static int
5320 linux_stopped_by_sw_breakpoint (void)
5321 {
5322 struct lwp_info *lwp = get_thread_lwp (current_thread);
5323
5324 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5325 }
5326
5327 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5328 method. */
5329
5330 static int
5331 linux_supports_stopped_by_sw_breakpoint (void)
5332 {
5333 return USE_SIGTRAP_SIGINFO;
5334 }
5335
5336 /* Implement the to_stopped_by_hw_breakpoint target_ops
5337 method. */
5338
5339 static int
5340 linux_stopped_by_hw_breakpoint (void)
5341 {
5342 struct lwp_info *lwp = get_thread_lwp (current_thread);
5343
5344 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5345 }
5346
5347 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5348 method. */
5349
5350 static int
5351 linux_supports_stopped_by_hw_breakpoint (void)
5352 {
5353 return USE_SIGTRAP_SIGINFO;
5354 }
5355
5356 /* Implement the supports_conditional_breakpoints target_ops
5357 method. */
5358
5359 static int
5360 linux_supports_conditional_breakpoints (void)
5361 {
5362 /* GDBserver needs to step over the breakpoint if the condition is
5363 false. GDBserver software single step is too simple, so disable
5364 conditional breakpoints if the target doesn't have hardware single
5365 step. */
5366 return can_hardware_single_step ();
5367 }
5368
5369 static int
5370 linux_stopped_by_watchpoint (void)
5371 {
5372 struct lwp_info *lwp = get_thread_lwp (current_thread);
5373
5374 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5375 }
5376
5377 static CORE_ADDR
5378 linux_stopped_data_address (void)
5379 {
5380 struct lwp_info *lwp = get_thread_lwp (current_thread);
5381
5382 return lwp->stopped_data_address;
5383 }
5384
5385 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5386 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5387 && defined(PT_TEXT_END_ADDR)
5388
5389 /* This is only used for targets that define PT_TEXT_ADDR,
5390 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5391 the target has different ways of acquiring this information, like
5392 loadmaps. */
5393
5394 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5395 to tell gdb about. */
5396
5397 static int
5398 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5399 {
5400 unsigned long text, text_end, data;
5401 int pid = lwpid_of (current_thread);
5402
5403 errno = 0;
5404
5405 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5406 (PTRACE_TYPE_ARG4) 0);
5407 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5408 (PTRACE_TYPE_ARG4) 0);
5409 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5410 (PTRACE_TYPE_ARG4) 0);
5411
5412 if (errno == 0)
5413 {
5414 /* Both text and data offsets produced at compile-time (and so
5415 used by gdb) are relative to the beginning of the program,
5416 with the data segment immediately following the text segment.
5417 However, the actual runtime layout in memory may put the data
5418 somewhere else, so when we send gdb a data base-address, we
5419 use the real data base address and subtract the compile-time
5420 data base-address from it (which is just the length of the
5421 text segment). BSS immediately follows data in both
5422 cases. */
5423 *text_p = text;
5424 *data_p = data - (text_end - text);
5425
5426 return 1;
5427 }
5428 return 0;
5429 }
5430 #endif
5431
5432 static int
5433 linux_qxfer_osdata (const char *annex,
5434 unsigned char *readbuf, unsigned const char *writebuf,
5435 CORE_ADDR offset, int len)
5436 {
5437 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5438 }
5439
5440 /* Convert a native/host siginfo object, into/from the siginfo in the
5441 layout of the inferiors' architecture. */
5442
5443 static void
5444 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5445 {
5446 int done = 0;
5447
5448 if (the_low_target.siginfo_fixup != NULL)
5449 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5450
5451 /* If there was no callback, or the callback didn't do anything,
5452 then just do a straight memcpy. */
5453 if (!done)
5454 {
5455 if (direction == 1)
5456 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5457 else
5458 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5459 }
5460 }
5461
5462 static int
5463 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5464 unsigned const char *writebuf, CORE_ADDR offset, int len)
5465 {
5466 int pid;
5467 siginfo_t siginfo;
5468 char inf_siginfo[sizeof (siginfo_t)];
5469
5470 if (current_thread == NULL)
5471 return -1;
5472
5473 pid = lwpid_of (current_thread);
5474
5475 if (debug_threads)
5476 debug_printf ("%s siginfo for lwp %d.\n",
5477 readbuf != NULL ? "Reading" : "Writing",
5478 pid);
5479
5480 if (offset >= sizeof (siginfo))
5481 return -1;
5482
5483 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5484 return -1;
5485
5486 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5487 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5488 inferior with a 64-bit GDBSERVER should look the same as debugging it
5489 with a 32-bit GDBSERVER, we need to convert it. */
5490 siginfo_fixup (&siginfo, inf_siginfo, 0);
5491
5492 if (offset + len > sizeof (siginfo))
5493 len = sizeof (siginfo) - offset;
5494
5495 if (readbuf != NULL)
5496 memcpy (readbuf, inf_siginfo + offset, len);
5497 else
5498 {
5499 memcpy (inf_siginfo + offset, writebuf, len);
5500
5501 /* Convert back to ptrace layout before flushing it out. */
5502 siginfo_fixup (&siginfo, inf_siginfo, 1);
5503
5504 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5505 return -1;
5506 }
5507
5508 return len;
5509 }
5510
5511 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5512 so we notice when children change state; as the handler for the
5513 sigsuspend in my_waitpid. */
5514
5515 static void
5516 sigchld_handler (int signo)
5517 {
5518 int old_errno = errno;
5519
5520 if (debug_threads)
5521 {
5522 do
5523 {
5524 /* fprintf is not async-signal-safe, so call write
5525 directly. */
5526 if (write (2, "sigchld_handler\n",
5527 sizeof ("sigchld_handler\n") - 1) < 0)
5528 break; /* just ignore */
5529 } while (0);
5530 }
5531
5532 if (target_is_async_p ())
5533 async_file_mark (); /* trigger a linux_wait */
5534
5535 errno = old_errno;
5536 }
5537
5538 static int
5539 linux_supports_non_stop (void)
5540 {
5541 return 1;
5542 }
5543
5544 static int
5545 linux_async (int enable)
5546 {
5547 int previous = target_is_async_p ();
5548
5549 if (debug_threads)
5550 debug_printf ("linux_async (%d), previous=%d\n",
5551 enable, previous);
5552
5553 if (previous != enable)
5554 {
5555 sigset_t mask;
5556 sigemptyset (&mask);
5557 sigaddset (&mask, SIGCHLD);
5558
5559 sigprocmask (SIG_BLOCK, &mask, NULL);
5560
5561 if (enable)
5562 {
5563 if (pipe (linux_event_pipe) == -1)
5564 {
5565 linux_event_pipe[0] = -1;
5566 linux_event_pipe[1] = -1;
5567 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5568
5569 warning ("creating event pipe failed.");
5570 return previous;
5571 }
5572
5573 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5574 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5575
5576 /* Register the event loop handler. */
5577 add_file_handler (linux_event_pipe[0],
5578 handle_target_event, NULL);
5579
5580 /* Always trigger a linux_wait. */
5581 async_file_mark ();
5582 }
5583 else
5584 {
5585 delete_file_handler (linux_event_pipe[0]);
5586
5587 close (linux_event_pipe[0]);
5588 close (linux_event_pipe[1]);
5589 linux_event_pipe[0] = -1;
5590 linux_event_pipe[1] = -1;
5591 }
5592
5593 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5594 }
5595
5596 return previous;
5597 }
5598
5599 static int
5600 linux_start_non_stop (int nonstop)
5601 {
5602 /* Register or unregister from event-loop accordingly. */
5603 linux_async (nonstop);
5604
5605 if (target_is_async_p () != (nonstop != 0))
5606 return -1;
5607
5608 return 0;
5609 }
5610
5611 static int
5612 linux_supports_multi_process (void)
5613 {
5614 return 1;
5615 }
5616
5617 /* Check if fork events are supported. */
5618
5619 static int
5620 linux_supports_fork_events (void)
5621 {
5622 return linux_supports_tracefork ();
5623 }
5624
5625 /* Check if vfork events are supported. */
5626
5627 static int
5628 linux_supports_vfork_events (void)
5629 {
5630 return linux_supports_tracefork ();
5631 }
5632
5633 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5634 options for the specified lwp. */
5635
5636 static int
5637 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5638 void *args)
5639 {
5640 struct thread_info *thread = (struct thread_info *) entry;
5641 struct lwp_info *lwp = get_thread_lwp (thread);
5642
5643 if (!lwp->stopped)
5644 {
5645 /* Stop the lwp so we can modify its ptrace options. */
5646 lwp->must_set_ptrace_flags = 1;
5647 linux_stop_lwp (lwp);
5648 }
5649 else
5650 {
5651 /* Already stopped; go ahead and set the ptrace options. */
5652 struct process_info *proc = find_process_pid (pid_of (thread));
5653 int options = linux_low_ptrace_options (proc->attached);
5654
5655 linux_enable_event_reporting (lwpid_of (thread), options);
5656 lwp->must_set_ptrace_flags = 0;
5657 }
5658
5659 return 0;
5660 }
5661
5662 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5663 ptrace flags for all inferiors. This is in case the new GDB connection
5664 doesn't support the same set of events that the previous one did. */
5665
5666 static void
5667 linux_handle_new_gdb_connection (void)
5668 {
5669 pid_t pid;
5670
5671 /* Request that all the lwps reset their ptrace options. */
5672 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5673 }
5674
5675 static int
5676 linux_supports_disable_randomization (void)
5677 {
5678 #ifdef HAVE_PERSONALITY
5679 return 1;
5680 #else
5681 return 0;
5682 #endif
5683 }
5684
5685 static int
5686 linux_supports_agent (void)
5687 {
5688 return 1;
5689 }
5690
5691 static int
5692 linux_supports_range_stepping (void)
5693 {
5694 if (*the_low_target.supports_range_stepping == NULL)
5695 return 0;
5696
5697 return (*the_low_target.supports_range_stepping) ();
5698 }
5699
5700 /* Enumerate spufs IDs for process PID. */
5701 static int
5702 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5703 {
5704 int pos = 0;
5705 int written = 0;
5706 char path[128];
5707 DIR *dir;
5708 struct dirent *entry;
5709
5710 sprintf (path, "/proc/%ld/fd", pid);
5711 dir = opendir (path);
5712 if (!dir)
5713 return -1;
5714
5715 rewinddir (dir);
5716 while ((entry = readdir (dir)) != NULL)
5717 {
5718 struct stat st;
5719 struct statfs stfs;
5720 int fd;
5721
5722 fd = atoi (entry->d_name);
5723 if (!fd)
5724 continue;
5725
5726 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5727 if (stat (path, &st) != 0)
5728 continue;
5729 if (!S_ISDIR (st.st_mode))
5730 continue;
5731
5732 if (statfs (path, &stfs) != 0)
5733 continue;
5734 if (stfs.f_type != SPUFS_MAGIC)
5735 continue;
5736
5737 if (pos >= offset && pos + 4 <= offset + len)
5738 {
5739 *(unsigned int *)(buf + pos - offset) = fd;
5740 written += 4;
5741 }
5742 pos += 4;
5743 }
5744
5745 closedir (dir);
5746 return written;
5747 }
5748
5749 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5750 object type, using the /proc file system. */
5751 static int
5752 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5753 unsigned const char *writebuf,
5754 CORE_ADDR offset, int len)
5755 {
5756 long pid = lwpid_of (current_thread);
5757 char buf[128];
5758 int fd = 0;
5759 int ret = 0;
5760
5761 if (!writebuf && !readbuf)
5762 return -1;
5763
5764 if (!*annex)
5765 {
5766 if (!readbuf)
5767 return -1;
5768 else
5769 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5770 }
5771
5772 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5773 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5774 if (fd <= 0)
5775 return -1;
5776
5777 if (offset != 0
5778 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5779 {
5780 close (fd);
5781 return 0;
5782 }
5783
5784 if (writebuf)
5785 ret = write (fd, writebuf, (size_t) len);
5786 else
5787 ret = read (fd, readbuf, (size_t) len);
5788
5789 close (fd);
5790 return ret;
5791 }
5792
5793 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5794 struct target_loadseg
5795 {
5796 /* Core address to which the segment is mapped. */
5797 Elf32_Addr addr;
5798 /* VMA recorded in the program header. */
5799 Elf32_Addr p_vaddr;
5800 /* Size of this segment in memory. */
5801 Elf32_Word p_memsz;
5802 };
5803
5804 # if defined PT_GETDSBT
5805 struct target_loadmap
5806 {
5807 /* Protocol version number, must be zero. */
5808 Elf32_Word version;
5809 /* Pointer to the DSBT table, its size, and the DSBT index. */
5810 unsigned *dsbt_table;
5811 unsigned dsbt_size, dsbt_index;
5812 /* Number of segments in this map. */
5813 Elf32_Word nsegs;
5814 /* The actual memory map. */
5815 struct target_loadseg segs[/*nsegs*/];
5816 };
5817 # define LINUX_LOADMAP PT_GETDSBT
5818 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5819 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5820 # else
5821 struct target_loadmap
5822 {
5823 /* Protocol version number, must be zero. */
5824 Elf32_Half version;
5825 /* Number of segments in this map. */
5826 Elf32_Half nsegs;
5827 /* The actual memory map. */
5828 struct target_loadseg segs[/*nsegs*/];
5829 };
5830 # define LINUX_LOADMAP PTRACE_GETFDPIC
5831 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5832 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5833 # endif
5834
5835 static int
5836 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5837 unsigned char *myaddr, unsigned int len)
5838 {
5839 int pid = lwpid_of (current_thread);
5840 int addr = -1;
5841 struct target_loadmap *data = NULL;
5842 unsigned int actual_length, copy_length;
5843
5844 if (strcmp (annex, "exec") == 0)
5845 addr = (int) LINUX_LOADMAP_EXEC;
5846 else if (strcmp (annex, "interp") == 0)
5847 addr = (int) LINUX_LOADMAP_INTERP;
5848 else
5849 return -1;
5850
5851 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5852 return -1;
5853
5854 if (data == NULL)
5855 return -1;
5856
5857 actual_length = sizeof (struct target_loadmap)
5858 + sizeof (struct target_loadseg) * data->nsegs;
5859
5860 if (offset < 0 || offset > actual_length)
5861 return -1;
5862
5863 copy_length = actual_length - offset < len ? actual_length - offset : len;
5864 memcpy (myaddr, (char *) data + offset, copy_length);
5865 return copy_length;
5866 }
5867 #else
5868 # define linux_read_loadmap NULL
5869 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5870
5871 static void
5872 linux_process_qsupported (const char *query)
5873 {
5874 if (the_low_target.process_qsupported != NULL)
5875 the_low_target.process_qsupported (query);
5876 }
5877
5878 static int
5879 linux_supports_tracepoints (void)
5880 {
5881 if (*the_low_target.supports_tracepoints == NULL)
5882 return 0;
5883
5884 return (*the_low_target.supports_tracepoints) ();
5885 }
5886
5887 static CORE_ADDR
5888 linux_read_pc (struct regcache *regcache)
5889 {
5890 if (the_low_target.get_pc == NULL)
5891 return 0;
5892
5893 return (*the_low_target.get_pc) (regcache);
5894 }
5895
5896 static void
5897 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5898 {
5899 gdb_assert (the_low_target.set_pc != NULL);
5900
5901 (*the_low_target.set_pc) (regcache, pc);
5902 }
5903
5904 static int
5905 linux_thread_stopped (struct thread_info *thread)
5906 {
5907 return get_thread_lwp (thread)->stopped;
5908 }
5909
5910 /* This exposes stop-all-threads functionality to other modules. */
5911
5912 static void
5913 linux_pause_all (int freeze)
5914 {
5915 stop_all_lwps (freeze, NULL);
5916 }
5917
5918 /* This exposes unstop-all-threads functionality to other gdbserver
5919 modules. */
5920
5921 static void
5922 linux_unpause_all (int unfreeze)
5923 {
5924 unstop_all_lwps (unfreeze, NULL);
5925 }
5926
5927 static int
5928 linux_prepare_to_access_memory (void)
5929 {
5930 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5931 running LWP. */
5932 if (non_stop)
5933 linux_pause_all (1);
5934 return 0;
5935 }
5936
5937 static void
5938 linux_done_accessing_memory (void)
5939 {
5940 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5941 running LWP. */
5942 if (non_stop)
5943 linux_unpause_all (1);
5944 }
5945
5946 static int
5947 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5948 CORE_ADDR collector,
5949 CORE_ADDR lockaddr,
5950 ULONGEST orig_size,
5951 CORE_ADDR *jump_entry,
5952 CORE_ADDR *trampoline,
5953 ULONGEST *trampoline_size,
5954 unsigned char *jjump_pad_insn,
5955 ULONGEST *jjump_pad_insn_size,
5956 CORE_ADDR *adjusted_insn_addr,
5957 CORE_ADDR *adjusted_insn_addr_end,
5958 char *err)
5959 {
5960 return (*the_low_target.install_fast_tracepoint_jump_pad)
5961 (tpoint, tpaddr, collector, lockaddr, orig_size,
5962 jump_entry, trampoline, trampoline_size,
5963 jjump_pad_insn, jjump_pad_insn_size,
5964 adjusted_insn_addr, adjusted_insn_addr_end,
5965 err);
5966 }
5967
5968 static struct emit_ops *
5969 linux_emit_ops (void)
5970 {
5971 if (the_low_target.emit_ops != NULL)
5972 return (*the_low_target.emit_ops) ();
5973 else
5974 return NULL;
5975 }
5976
5977 static int
5978 linux_get_min_fast_tracepoint_insn_len (void)
5979 {
5980 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5981 }
5982
5983 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5984
5985 static int
5986 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5987 CORE_ADDR *phdr_memaddr, int *num_phdr)
5988 {
5989 char filename[PATH_MAX];
5990 int fd;
5991 const int auxv_size = is_elf64
5992 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5993 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5994
5995 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5996
5997 fd = open (filename, O_RDONLY);
5998 if (fd < 0)
5999 return 1;
6000
6001 *phdr_memaddr = 0;
6002 *num_phdr = 0;
6003 while (read (fd, buf, auxv_size) == auxv_size
6004 && (*phdr_memaddr == 0 || *num_phdr == 0))
6005 {
6006 if (is_elf64)
6007 {
6008 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6009
6010 switch (aux->a_type)
6011 {
6012 case AT_PHDR:
6013 *phdr_memaddr = aux->a_un.a_val;
6014 break;
6015 case AT_PHNUM:
6016 *num_phdr = aux->a_un.a_val;
6017 break;
6018 }
6019 }
6020 else
6021 {
6022 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6023
6024 switch (aux->a_type)
6025 {
6026 case AT_PHDR:
6027 *phdr_memaddr = aux->a_un.a_val;
6028 break;
6029 case AT_PHNUM:
6030 *num_phdr = aux->a_un.a_val;
6031 break;
6032 }
6033 }
6034 }
6035
6036 close (fd);
6037
6038 if (*phdr_memaddr == 0 || *num_phdr == 0)
6039 {
6040 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6041 "phdr_memaddr = %ld, phdr_num = %d",
6042 (long) *phdr_memaddr, *num_phdr);
6043 return 2;
6044 }
6045
6046 return 0;
6047 }
6048
6049 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6050
6051 static CORE_ADDR
6052 get_dynamic (const int pid, const int is_elf64)
6053 {
6054 CORE_ADDR phdr_memaddr, relocation;
6055 int num_phdr, i;
6056 unsigned char *phdr_buf;
6057 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6058
6059 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6060 return 0;
6061
6062 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6063 phdr_buf = alloca (num_phdr * phdr_size);
6064
6065 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6066 return 0;
6067
6068 /* Compute relocation: it is expected to be 0 for "regular" executables,
6069 non-zero for PIE ones. */
6070 relocation = -1;
6071 for (i = 0; relocation == -1 && i < num_phdr; i++)
6072 if (is_elf64)
6073 {
6074 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6075
6076 if (p->p_type == PT_PHDR)
6077 relocation = phdr_memaddr - p->p_vaddr;
6078 }
6079 else
6080 {
6081 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6082
6083 if (p->p_type == PT_PHDR)
6084 relocation = phdr_memaddr - p->p_vaddr;
6085 }
6086
6087 if (relocation == -1)
6088 {
6089 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6090 any real world executables, including PIE executables, have always
6091 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6092 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6093 or present DT_DEBUG anyway (fpc binaries are statically linked).
6094
6095 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6096
6097 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6098
6099 return 0;
6100 }
6101
6102 for (i = 0; i < num_phdr; i++)
6103 {
6104 if (is_elf64)
6105 {
6106 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6107
6108 if (p->p_type == PT_DYNAMIC)
6109 return p->p_vaddr + relocation;
6110 }
6111 else
6112 {
6113 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6114
6115 if (p->p_type == PT_DYNAMIC)
6116 return p->p_vaddr + relocation;
6117 }
6118 }
6119
6120 return 0;
6121 }
6122
6123 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6124 can be 0 if the inferior does not yet have the library list initialized.
6125 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6126 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6127
6128 static CORE_ADDR
6129 get_r_debug (const int pid, const int is_elf64)
6130 {
6131 CORE_ADDR dynamic_memaddr;
6132 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6133 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6134 CORE_ADDR map = -1;
6135
6136 dynamic_memaddr = get_dynamic (pid, is_elf64);
6137 if (dynamic_memaddr == 0)
6138 return map;
6139
6140 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6141 {
6142 if (is_elf64)
6143 {
6144 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6145 #ifdef DT_MIPS_RLD_MAP
6146 union
6147 {
6148 Elf64_Xword map;
6149 unsigned char buf[sizeof (Elf64_Xword)];
6150 }
6151 rld_map;
6152
6153 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6154 {
6155 if (linux_read_memory (dyn->d_un.d_val,
6156 rld_map.buf, sizeof (rld_map.buf)) == 0)
6157 return rld_map.map;
6158 else
6159 break;
6160 }
6161 #endif /* DT_MIPS_RLD_MAP */
6162
6163 if (dyn->d_tag == DT_DEBUG && map == -1)
6164 map = dyn->d_un.d_val;
6165
6166 if (dyn->d_tag == DT_NULL)
6167 break;
6168 }
6169 else
6170 {
6171 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6172 #ifdef DT_MIPS_RLD_MAP
6173 union
6174 {
6175 Elf32_Word map;
6176 unsigned char buf[sizeof (Elf32_Word)];
6177 }
6178 rld_map;
6179
6180 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6181 {
6182 if (linux_read_memory (dyn->d_un.d_val,
6183 rld_map.buf, sizeof (rld_map.buf)) == 0)
6184 return rld_map.map;
6185 else
6186 break;
6187 }
6188 #endif /* DT_MIPS_RLD_MAP */
6189
6190 if (dyn->d_tag == DT_DEBUG && map == -1)
6191 map = dyn->d_un.d_val;
6192
6193 if (dyn->d_tag == DT_NULL)
6194 break;
6195 }
6196
6197 dynamic_memaddr += dyn_size;
6198 }
6199
6200 return map;
6201 }
6202
6203 /* Read one pointer from MEMADDR in the inferior. */
6204
6205 static int
6206 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6207 {
6208 int ret;
6209
6210 /* Go through a union so this works on either big or little endian
6211 hosts, when the inferior's pointer size is smaller than the size
6212 of CORE_ADDR. It is assumed the inferior's endianness is the
6213 same of the superior's. */
6214 union
6215 {
6216 CORE_ADDR core_addr;
6217 unsigned int ui;
6218 unsigned char uc;
6219 } addr;
6220
6221 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6222 if (ret == 0)
6223 {
6224 if (ptr_size == sizeof (CORE_ADDR))
6225 *ptr = addr.core_addr;
6226 else if (ptr_size == sizeof (unsigned int))
6227 *ptr = addr.ui;
6228 else
6229 gdb_assert_not_reached ("unhandled pointer size");
6230 }
6231 return ret;
6232 }
6233
6234 struct link_map_offsets
6235 {
6236 /* Offset and size of r_debug.r_version. */
6237 int r_version_offset;
6238
6239 /* Offset and size of r_debug.r_map. */
6240 int r_map_offset;
6241
6242 /* Offset to l_addr field in struct link_map. */
6243 int l_addr_offset;
6244
6245 /* Offset to l_name field in struct link_map. */
6246 int l_name_offset;
6247
6248 /* Offset to l_ld field in struct link_map. */
6249 int l_ld_offset;
6250
6251 /* Offset to l_next field in struct link_map. */
6252 int l_next_offset;
6253
6254 /* Offset to l_prev field in struct link_map. */
6255 int l_prev_offset;
6256 };
6257
6258 /* Construct qXfer:libraries-svr4:read reply. */
6259
6260 static int
6261 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6262 unsigned const char *writebuf,
6263 CORE_ADDR offset, int len)
6264 {
6265 char *document;
6266 unsigned document_len;
6267 struct process_info_private *const priv = current_process ()->priv;
6268 char filename[PATH_MAX];
6269 int pid, is_elf64;
6270
6271 static const struct link_map_offsets lmo_32bit_offsets =
6272 {
6273 0, /* r_version offset. */
6274 4, /* r_debug.r_map offset. */
6275 0, /* l_addr offset in link_map. */
6276 4, /* l_name offset in link_map. */
6277 8, /* l_ld offset in link_map. */
6278 12, /* l_next offset in link_map. */
6279 16 /* l_prev offset in link_map. */
6280 };
6281
6282 static const struct link_map_offsets lmo_64bit_offsets =
6283 {
6284 0, /* r_version offset. */
6285 8, /* r_debug.r_map offset. */
6286 0, /* l_addr offset in link_map. */
6287 8, /* l_name offset in link_map. */
6288 16, /* l_ld offset in link_map. */
6289 24, /* l_next offset in link_map. */
6290 32 /* l_prev offset in link_map. */
6291 };
6292 const struct link_map_offsets *lmo;
6293 unsigned int machine;
6294 int ptr_size;
6295 CORE_ADDR lm_addr = 0, lm_prev = 0;
6296 int allocated = 1024;
6297 char *p;
6298 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6299 int header_done = 0;
6300
6301 if (writebuf != NULL)
6302 return -2;
6303 if (readbuf == NULL)
6304 return -1;
6305
6306 pid = lwpid_of (current_thread);
6307 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6308 is_elf64 = elf_64_file_p (filename, &machine);
6309 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6310 ptr_size = is_elf64 ? 8 : 4;
6311
6312 while (annex[0] != '\0')
6313 {
6314 const char *sep;
6315 CORE_ADDR *addrp;
6316 int len;
6317
6318 sep = strchr (annex, '=');
6319 if (sep == NULL)
6320 break;
6321
6322 len = sep - annex;
6323 if (len == 5 && startswith (annex, "start"))
6324 addrp = &lm_addr;
6325 else if (len == 4 && startswith (annex, "prev"))
6326 addrp = &lm_prev;
6327 else
6328 {
6329 annex = strchr (sep, ';');
6330 if (annex == NULL)
6331 break;
6332 annex++;
6333 continue;
6334 }
6335
6336 annex = decode_address_to_semicolon (addrp, sep + 1);
6337 }
6338
6339 if (lm_addr == 0)
6340 {
6341 int r_version = 0;
6342
6343 if (priv->r_debug == 0)
6344 priv->r_debug = get_r_debug (pid, is_elf64);
6345
6346 /* We failed to find DT_DEBUG. Such situation will not change
6347 for this inferior - do not retry it. Report it to GDB as
6348 E01, see for the reasons at the GDB solib-svr4.c side. */
6349 if (priv->r_debug == (CORE_ADDR) -1)
6350 return -1;
6351
6352 if (priv->r_debug != 0)
6353 {
6354 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6355 (unsigned char *) &r_version,
6356 sizeof (r_version)) != 0
6357 || r_version != 1)
6358 {
6359 warning ("unexpected r_debug version %d", r_version);
6360 }
6361 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6362 &lm_addr, ptr_size) != 0)
6363 {
6364 warning ("unable to read r_map from 0x%lx",
6365 (long) priv->r_debug + lmo->r_map_offset);
6366 }
6367 }
6368 }
6369
6370 document = xmalloc (allocated);
6371 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6372 p = document + strlen (document);
6373
6374 while (lm_addr
6375 && read_one_ptr (lm_addr + lmo->l_name_offset,
6376 &l_name, ptr_size) == 0
6377 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6378 &l_addr, ptr_size) == 0
6379 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6380 &l_ld, ptr_size) == 0
6381 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6382 &l_prev, ptr_size) == 0
6383 && read_one_ptr (lm_addr + lmo->l_next_offset,
6384 &l_next, ptr_size) == 0)
6385 {
6386 unsigned char libname[PATH_MAX];
6387
6388 if (lm_prev != l_prev)
6389 {
6390 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6391 (long) lm_prev, (long) l_prev);
6392 break;
6393 }
6394
6395 /* Ignore the first entry even if it has valid name as the first entry
6396 corresponds to the main executable. The first entry should not be
6397 skipped if the dynamic loader was loaded late by a static executable
6398 (see solib-svr4.c parameter ignore_first). But in such case the main
6399 executable does not have PT_DYNAMIC present and this function already
6400 exited above due to failed get_r_debug. */
6401 if (lm_prev == 0)
6402 {
6403 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6404 p = p + strlen (p);
6405 }
6406 else
6407 {
6408 /* Not checking for error because reading may stop before
6409 we've got PATH_MAX worth of characters. */
6410 libname[0] = '\0';
6411 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6412 libname[sizeof (libname) - 1] = '\0';
6413 if (libname[0] != '\0')
6414 {
6415 /* 6x the size for xml_escape_text below. */
6416 size_t len = 6 * strlen ((char *) libname);
6417 char *name;
6418
6419 if (!header_done)
6420 {
6421 /* Terminate `<library-list-svr4'. */
6422 *p++ = '>';
6423 header_done = 1;
6424 }
6425
6426 while (allocated < p - document + len + 200)
6427 {
6428 /* Expand to guarantee sufficient storage. */
6429 uintptr_t document_len = p - document;
6430
6431 document = xrealloc (document, 2 * allocated);
6432 allocated *= 2;
6433 p = document + document_len;
6434 }
6435
6436 name = xml_escape_text ((char *) libname);
6437 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6438 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6439 name, (unsigned long) lm_addr,
6440 (unsigned long) l_addr, (unsigned long) l_ld);
6441 free (name);
6442 }
6443 }
6444
6445 lm_prev = lm_addr;
6446 lm_addr = l_next;
6447 }
6448
6449 if (!header_done)
6450 {
6451 /* Empty list; terminate `<library-list-svr4'. */
6452 strcpy (p, "/>");
6453 }
6454 else
6455 strcpy (p, "</library-list-svr4>");
6456
6457 document_len = strlen (document);
6458 if (offset < document_len)
6459 document_len -= offset;
6460 else
6461 document_len = 0;
6462 if (len > document_len)
6463 len = document_len;
6464
6465 memcpy (readbuf, document + offset, len);
6466 xfree (document);
6467
6468 return len;
6469 }
6470
6471 #ifdef HAVE_LINUX_BTRACE
6472
6473 /* See to_enable_btrace target method. */
6474
6475 static struct btrace_target_info *
6476 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6477 {
6478 struct btrace_target_info *tinfo;
6479
6480 tinfo = linux_enable_btrace (ptid, conf);
6481
6482 if (tinfo != NULL && tinfo->ptr_bits == 0)
6483 {
6484 struct thread_info *thread = find_thread_ptid (ptid);
6485 struct regcache *regcache = get_thread_regcache (thread, 0);
6486
6487 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6488 }
6489
6490 return tinfo;
6491 }
6492
6493 /* See to_disable_btrace target method. */
6494
6495 static int
6496 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6497 {
6498 enum btrace_error err;
6499
6500 err = linux_disable_btrace (tinfo);
6501 return (err == BTRACE_ERR_NONE ? 0 : -1);
6502 }
6503
6504 /* Encode an Intel(R) Processor Trace configuration. */
6505
6506 static void
6507 linux_low_encode_pt_config (struct buffer *buffer,
6508 const struct btrace_data_pt_config *config)
6509 {
6510 buffer_grow_str (buffer, "<pt-config>\n");
6511
6512 switch (config->cpu.vendor)
6513 {
6514 case CV_INTEL:
6515 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6516 "model=\"%u\" stepping=\"%u\"/>\n",
6517 config->cpu.family, config->cpu.model,
6518 config->cpu.stepping);
6519 break;
6520
6521 default:
6522 break;
6523 }
6524
6525 buffer_grow_str (buffer, "</pt-config>\n");
6526 }
6527
6528 /* Encode a raw buffer. */
6529
6530 static void
6531 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6532 unsigned int size)
6533 {
6534 if (size == 0)
6535 return;
6536
6537 /* We use hex encoding - see common/rsp-low.h. */
6538 buffer_grow_str (buffer, "<raw>\n");
6539
6540 while (size-- > 0)
6541 {
6542 char elem[2];
6543
6544 elem[0] = tohex ((*data >> 4) & 0xf);
6545 elem[1] = tohex (*data++ & 0xf);
6546
6547 buffer_grow (buffer, elem, 2);
6548 }
6549
6550 buffer_grow_str (buffer, "</raw>\n");
6551 }
6552
6553 /* See to_read_btrace target method. */
6554
6555 static int
6556 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6557 int type)
6558 {
6559 struct btrace_data btrace;
6560 struct btrace_block *block;
6561 enum btrace_error err;
6562 int i;
6563
6564 btrace_data_init (&btrace);
6565
6566 err = linux_read_btrace (&btrace, tinfo, type);
6567 if (err != BTRACE_ERR_NONE)
6568 {
6569 if (err == BTRACE_ERR_OVERFLOW)
6570 buffer_grow_str0 (buffer, "E.Overflow.");
6571 else
6572 buffer_grow_str0 (buffer, "E.Generic Error.");
6573
6574 goto err;
6575 }
6576
6577 switch (btrace.format)
6578 {
6579 case BTRACE_FORMAT_NONE:
6580 buffer_grow_str0 (buffer, "E.No Trace.");
6581 goto err;
6582
6583 case BTRACE_FORMAT_BTS:
6584 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6585 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6586
6587 for (i = 0;
6588 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6589 i++)
6590 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6591 paddress (block->begin), paddress (block->end));
6592
6593 buffer_grow_str0 (buffer, "</btrace>\n");
6594 break;
6595
6596 case BTRACE_FORMAT_PT:
6597 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6598 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6599 buffer_grow_str (buffer, "<pt>\n");
6600
6601 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6602
6603 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6604 btrace.variant.pt.size);
6605
6606 buffer_grow_str (buffer, "</pt>\n");
6607 buffer_grow_str0 (buffer, "</btrace>\n");
6608 break;
6609
6610 default:
6611 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6612 goto err;
6613 }
6614
6615 btrace_data_fini (&btrace);
6616 return 0;
6617
6618 err:
6619 btrace_data_fini (&btrace);
6620 return -1;
6621 }
6622
6623 /* See to_btrace_conf target method. */
6624
6625 static int
6626 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6627 struct buffer *buffer)
6628 {
6629 const struct btrace_config *conf;
6630
6631 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6632 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6633
6634 conf = linux_btrace_conf (tinfo);
6635 if (conf != NULL)
6636 {
6637 switch (conf->format)
6638 {
6639 case BTRACE_FORMAT_NONE:
6640 break;
6641
6642 case BTRACE_FORMAT_BTS:
6643 buffer_xml_printf (buffer, "<bts");
6644 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6645 buffer_xml_printf (buffer, " />\n");
6646 break;
6647
6648 case BTRACE_FORMAT_PT:
6649 buffer_xml_printf (buffer, "<pt");
6650 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6651 buffer_xml_printf (buffer, "/>\n");
6652 break;
6653 }
6654 }
6655
6656 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6657 return 0;
6658 }
6659 #endif /* HAVE_LINUX_BTRACE */
6660
6661 /* See nat/linux-nat.h. */
6662
6663 ptid_t
6664 current_lwp_ptid (void)
6665 {
6666 return ptid_of (current_thread);
6667 }
6668
6669 static struct target_ops linux_target_ops = {
6670 linux_create_inferior,
6671 linux_arch_setup,
6672 linux_attach,
6673 linux_kill,
6674 linux_detach,
6675 linux_mourn,
6676 linux_join,
6677 linux_thread_alive,
6678 linux_resume,
6679 linux_wait,
6680 linux_fetch_registers,
6681 linux_store_registers,
6682 linux_prepare_to_access_memory,
6683 linux_done_accessing_memory,
6684 linux_read_memory,
6685 linux_write_memory,
6686 linux_look_up_symbols,
6687 linux_request_interrupt,
6688 linux_read_auxv,
6689 linux_supports_z_point_type,
6690 linux_insert_point,
6691 linux_remove_point,
6692 linux_stopped_by_sw_breakpoint,
6693 linux_supports_stopped_by_sw_breakpoint,
6694 linux_stopped_by_hw_breakpoint,
6695 linux_supports_stopped_by_hw_breakpoint,
6696 linux_supports_conditional_breakpoints,
6697 linux_stopped_by_watchpoint,
6698 linux_stopped_data_address,
6699 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6700 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6701 && defined(PT_TEXT_END_ADDR)
6702 linux_read_offsets,
6703 #else
6704 NULL,
6705 #endif
6706 #ifdef USE_THREAD_DB
6707 thread_db_get_tls_address,
6708 #else
6709 NULL,
6710 #endif
6711 linux_qxfer_spu,
6712 hostio_last_error_from_errno,
6713 linux_qxfer_osdata,
6714 linux_xfer_siginfo,
6715 linux_supports_non_stop,
6716 linux_async,
6717 linux_start_non_stop,
6718 linux_supports_multi_process,
6719 linux_supports_fork_events,
6720 linux_supports_vfork_events,
6721 linux_handle_new_gdb_connection,
6722 #ifdef USE_THREAD_DB
6723 thread_db_handle_monitor_command,
6724 #else
6725 NULL,
6726 #endif
6727 linux_common_core_of_thread,
6728 linux_read_loadmap,
6729 linux_process_qsupported,
6730 linux_supports_tracepoints,
6731 linux_read_pc,
6732 linux_write_pc,
6733 linux_thread_stopped,
6734 NULL,
6735 linux_pause_all,
6736 linux_unpause_all,
6737 linux_stabilize_threads,
6738 linux_install_fast_tracepoint_jump_pad,
6739 linux_emit_ops,
6740 linux_supports_disable_randomization,
6741 linux_get_min_fast_tracepoint_insn_len,
6742 linux_qxfer_libraries_svr4,
6743 linux_supports_agent,
6744 #ifdef HAVE_LINUX_BTRACE
6745 linux_supports_btrace,
6746 linux_low_enable_btrace,
6747 linux_low_disable_btrace,
6748 linux_low_read_btrace,
6749 linux_low_btrace_conf,
6750 #else
6751 NULL,
6752 NULL,
6753 NULL,
6754 NULL,
6755 NULL,
6756 #endif
6757 linux_supports_range_stepping,
6758 linux_proc_pid_to_exec_file,
6759 linux_mntns_open_cloexec,
6760 linux_mntns_unlink,
6761 linux_mntns_readlink,
6762 };
6763
6764 static void
6765 linux_init_signals ()
6766 {
6767 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6768 to find what the cancel signal actually is. */
6769 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6770 signal (__SIGRTMIN+1, SIG_IGN);
6771 #endif
6772 }
6773
6774 #ifdef HAVE_LINUX_REGSETS
6775 void
6776 initialize_regsets_info (struct regsets_info *info)
6777 {
6778 for (info->num_regsets = 0;
6779 info->regsets[info->num_regsets].size >= 0;
6780 info->num_regsets++)
6781 ;
6782 }
6783 #endif
6784
6785 void
6786 initialize_low (void)
6787 {
6788 struct sigaction sigchld_action;
6789 memset (&sigchld_action, 0, sizeof (sigchld_action));
6790 set_target_ops (&linux_target_ops);
6791 set_breakpoint_data (the_low_target.breakpoint,
6792 the_low_target.breakpoint_len);
6793 linux_init_signals ();
6794 linux_ptrace_init_warnings ();
6795
6796 sigchld_action.sa_handler = sigchld_handler;
6797 sigemptyset (&sigchld_action.sa_mask);
6798 sigchld_action.sa_flags = SA_RESTART;
6799 sigaction (SIGCHLD, &sigchld_action, NULL);
6800
6801 initialize_low_arch ();
6802
6803 linux_check_ptrace_features ();
6804 }