]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
Replace some xmalloc-family functions with XNEW-family ones
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #ifndef ELFMAG0
50 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54 #include <elf.h>
55 #endif
56 #include "nat/linux-namespaces.h"
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 #ifndef W_STOPCODE
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75 #endif
76
77 /* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79 #ifndef __SIGRTMIN
80 #define __SIGRTMIN 32
81 #endif
82
83 /* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
95 #elif defined(BFIN)
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
104 #endif
105 #endif
106
107 #ifdef HAVE_LINUX_BTRACE
108 # include "nat/linux-btrace.h"
109 # include "btrace-common.h"
110 #endif
111
112 #ifndef HAVE_ELF32_AUXV_T
113 /* Copied from glibc's elf.h. */
114 typedef struct
115 {
116 uint32_t a_type; /* Entry type */
117 union
118 {
119 uint32_t a_val; /* Integer value */
120 /* We use to have pointer elements added here. We cannot do that,
121 though, since it does not work when using 32-bit definitions
122 on 64-bit platforms and vice versa. */
123 } a_un;
124 } Elf32_auxv_t;
125 #endif
126
127 #ifndef HAVE_ELF64_AUXV_T
128 /* Copied from glibc's elf.h. */
129 typedef struct
130 {
131 uint64_t a_type; /* Entry type */
132 union
133 {
134 uint64_t a_val; /* Integer value */
135 /* We use to have pointer elements added here. We cannot do that,
136 though, since it does not work when using 32-bit definitions
137 on 64-bit platforms and vice versa. */
138 } a_un;
139 } Elf64_auxv_t;
140 #endif
141
142 /* Does the current host support PTRACE_GETREGSET? */
143 int have_ptrace_getregset = -1;
144
145 /* LWP accessors. */
146
147 /* See nat/linux-nat.h. */
148
149 ptid_t
150 ptid_of_lwp (struct lwp_info *lwp)
151 {
152 return ptid_of (get_lwp_thread (lwp));
153 }
154
155 /* See nat/linux-nat.h. */
156
157 void
158 lwp_set_arch_private_info (struct lwp_info *lwp,
159 struct arch_lwp_info *info)
160 {
161 lwp->arch_private = info;
162 }
163
164 /* See nat/linux-nat.h. */
165
166 struct arch_lwp_info *
167 lwp_arch_private_info (struct lwp_info *lwp)
168 {
169 return lwp->arch_private;
170 }
171
172 /* See nat/linux-nat.h. */
173
174 int
175 lwp_is_stopped (struct lwp_info *lwp)
176 {
177 return lwp->stopped;
178 }
179
180 /* See nat/linux-nat.h. */
181
182 enum target_stop_reason
183 lwp_stop_reason (struct lwp_info *lwp)
184 {
185 return lwp->stop_reason;
186 }
187
188 /* A list of all unknown processes which receive stop signals. Some
189 other process will presumably claim each of these as forked
190 children momentarily. */
191
192 struct simple_pid_list
193 {
194 /* The process ID. */
195 int pid;
196
197 /* The status as reported by waitpid. */
198 int status;
199
200 /* Next in chain. */
201 struct simple_pid_list *next;
202 };
203 struct simple_pid_list *stopped_pids;
204
205 /* Trivial list manipulation functions to keep track of a list of new
206 stopped processes. */
207
208 static void
209 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
210 {
211 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
212
213 new_pid->pid = pid;
214 new_pid->status = status;
215 new_pid->next = *listp;
216 *listp = new_pid;
217 }
218
219 static int
220 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
221 {
222 struct simple_pid_list **p;
223
224 for (p = listp; *p != NULL; p = &(*p)->next)
225 if ((*p)->pid == pid)
226 {
227 struct simple_pid_list *next = (*p)->next;
228
229 *statusp = (*p)->status;
230 xfree (*p);
231 *p = next;
232 return 1;
233 }
234 return 0;
235 }
236
237 enum stopping_threads_kind
238 {
239 /* Not stopping threads presently. */
240 NOT_STOPPING_THREADS,
241
242 /* Stopping threads. */
243 STOPPING_THREADS,
244
245 /* Stopping and suspending threads. */
246 STOPPING_AND_SUSPENDING_THREADS
247 };
248
249 /* This is set while stop_all_lwps is in effect. */
250 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
251
252 /* FIXME make into a target method? */
253 int using_threads = 1;
254
255 /* True if we're presently stabilizing threads (moving them out of
256 jump pads). */
257 static int stabilizing_threads;
258
259 static void linux_resume_one_lwp (struct lwp_info *lwp,
260 int step, int signal, siginfo_t *info);
261 static void linux_resume (struct thread_resume *resume_info, size_t n);
262 static void stop_all_lwps (int suspend, struct lwp_info *except);
263 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
264 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
265 int *wstat, int options);
266 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
267 static struct lwp_info *add_lwp (ptid_t ptid);
268 static int linux_stopped_by_watchpoint (void);
269 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
270 static int lwp_is_marked_dead (struct lwp_info *lwp);
271 static void proceed_all_lwps (void);
272 static int finish_step_over (struct lwp_info *lwp);
273 static int kill_lwp (unsigned long lwpid, int signo);
274 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
275 static void complete_ongoing_step_over (void);
276
277 /* When the event-loop is doing a step-over, this points at the thread
278 being stepped. */
279 ptid_t step_over_bkpt;
280
281 /* True if the low target can hardware single-step. Such targets
282 don't need a BREAKPOINT_REINSERT_ADDR callback. */
283
284 static int
285 can_hardware_single_step (void)
286 {
287 return (the_low_target.breakpoint_reinsert_addr == NULL);
288 }
289
290 /* True if the low target supports memory breakpoints. If so, we'll
291 have a GET_PC implementation. */
292
293 static int
294 supports_breakpoints (void)
295 {
296 return (the_low_target.get_pc != NULL);
297 }
298
299 /* Returns true if this target can support fast tracepoints. This
300 does not mean that the in-process agent has been loaded in the
301 inferior. */
302
303 static int
304 supports_fast_tracepoints (void)
305 {
306 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
307 }
308
309 /* True if LWP is stopped in its stepping range. */
310
311 static int
312 lwp_in_step_range (struct lwp_info *lwp)
313 {
314 CORE_ADDR pc = lwp->stop_pc;
315
316 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
317 }
318
319 struct pending_signals
320 {
321 int signal;
322 siginfo_t info;
323 struct pending_signals *prev;
324 };
325
326 /* The read/write ends of the pipe registered as waitable file in the
327 event loop. */
328 static int linux_event_pipe[2] = { -1, -1 };
329
330 /* True if we're currently in async mode. */
331 #define target_is_async_p() (linux_event_pipe[0] != -1)
332
333 static void send_sigstop (struct lwp_info *lwp);
334 static void wait_for_sigstop (void);
335
336 /* Return non-zero if HEADER is a 64-bit ELF file. */
337
338 static int
339 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
340 {
341 if (header->e_ident[EI_MAG0] == ELFMAG0
342 && header->e_ident[EI_MAG1] == ELFMAG1
343 && header->e_ident[EI_MAG2] == ELFMAG2
344 && header->e_ident[EI_MAG3] == ELFMAG3)
345 {
346 *machine = header->e_machine;
347 return header->e_ident[EI_CLASS] == ELFCLASS64;
348
349 }
350 *machine = EM_NONE;
351 return -1;
352 }
353
354 /* Return non-zero if FILE is a 64-bit ELF file,
355 zero if the file is not a 64-bit ELF file,
356 and -1 if the file is not accessible or doesn't exist. */
357
358 static int
359 elf_64_file_p (const char *file, unsigned int *machine)
360 {
361 Elf64_Ehdr header;
362 int fd;
363
364 fd = open (file, O_RDONLY);
365 if (fd < 0)
366 return -1;
367
368 if (read (fd, &header, sizeof (header)) != sizeof (header))
369 {
370 close (fd);
371 return 0;
372 }
373 close (fd);
374
375 return elf_64_header_p (&header, machine);
376 }
377
378 /* Accepts an integer PID; Returns true if the executable PID is
379 running is a 64-bit ELF file.. */
380
381 int
382 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
383 {
384 char file[PATH_MAX];
385
386 sprintf (file, "/proc/%d/exe", pid);
387 return elf_64_file_p (file, machine);
388 }
389
390 static void
391 delete_lwp (struct lwp_info *lwp)
392 {
393 struct thread_info *thr = get_lwp_thread (lwp);
394
395 if (debug_threads)
396 debug_printf ("deleting %ld\n", lwpid_of (thr));
397
398 remove_thread (thr);
399 free (lwp->arch_private);
400 free (lwp);
401 }
402
403 /* Add a process to the common process list, and set its private
404 data. */
405
406 static struct process_info *
407 linux_add_process (int pid, int attached)
408 {
409 struct process_info *proc;
410
411 proc = add_process (pid, attached);
412 proc->priv = XCNEW (struct process_info_private);
413
414 if (the_low_target.new_process != NULL)
415 proc->priv->arch_private = the_low_target.new_process ();
416
417 return proc;
418 }
419
420 static CORE_ADDR get_pc (struct lwp_info *lwp);
421
422 /* Handle a GNU/Linux extended wait response. If we see a clone
423 event, we need to add the new LWP to our list (and return 0 so as
424 not to report the trap to higher layers). */
425
426 static int
427 handle_extended_wait (struct lwp_info *event_lwp, int wstat)
428 {
429 int event = linux_ptrace_get_extended_event (wstat);
430 struct thread_info *event_thr = get_lwp_thread (event_lwp);
431 struct lwp_info *new_lwp;
432
433 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
434 || (event == PTRACE_EVENT_CLONE))
435 {
436 ptid_t ptid;
437 unsigned long new_pid;
438 int ret, status;
439
440 /* Get the pid of the new lwp. */
441 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
442 &new_pid);
443
444 /* If we haven't already seen the new PID stop, wait for it now. */
445 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
446 {
447 /* The new child has a pending SIGSTOP. We can't affect it until it
448 hits the SIGSTOP, but we're already attached. */
449
450 ret = my_waitpid (new_pid, &status, __WALL);
451
452 if (ret == -1)
453 perror_with_name ("waiting for new child");
454 else if (ret != new_pid)
455 warning ("wait returned unexpected PID %d", ret);
456 else if (!WIFSTOPPED (status))
457 warning ("wait returned unexpected status 0x%x", status);
458 }
459
460 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
461 {
462 struct process_info *parent_proc;
463 struct process_info *child_proc;
464 struct lwp_info *child_lwp;
465 struct thread_info *child_thr;
466 struct target_desc *tdesc;
467
468 ptid = ptid_build (new_pid, new_pid, 0);
469
470 if (debug_threads)
471 {
472 debug_printf ("HEW: Got fork event from LWP %ld, "
473 "new child is %d\n",
474 ptid_get_lwp (ptid_of (event_thr)),
475 ptid_get_pid (ptid));
476 }
477
478 /* Add the new process to the tables and clone the breakpoint
479 lists of the parent. We need to do this even if the new process
480 will be detached, since we will need the process object and the
481 breakpoints to remove any breakpoints from memory when we
482 detach, and the client side will access registers. */
483 child_proc = linux_add_process (new_pid, 0);
484 gdb_assert (child_proc != NULL);
485 child_lwp = add_lwp (ptid);
486 gdb_assert (child_lwp != NULL);
487 child_lwp->stopped = 1;
488 child_lwp->must_set_ptrace_flags = 1;
489 child_lwp->status_pending_p = 0;
490 child_thr = get_lwp_thread (child_lwp);
491 child_thr->last_resume_kind = resume_stop;
492 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
493
494 /* If we're suspending all threads, leave this one suspended
495 too. */
496 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
497 {
498 if (debug_threads)
499 debug_printf ("HEW: leaving child suspended\n");
500 child_lwp->suspended = 1;
501 }
502
503 parent_proc = get_thread_process (event_thr);
504 child_proc->attached = parent_proc->attached;
505 clone_all_breakpoints (&child_proc->breakpoints,
506 &child_proc->raw_breakpoints,
507 parent_proc->breakpoints);
508
509 tdesc = XNEW (struct target_desc);
510 copy_target_description (tdesc, parent_proc->tdesc);
511 child_proc->tdesc = tdesc;
512
513 /* Clone arch-specific process data. */
514 if (the_low_target.new_fork != NULL)
515 the_low_target.new_fork (parent_proc, child_proc);
516
517 /* Save fork info in the parent thread. */
518 if (event == PTRACE_EVENT_FORK)
519 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
520 else if (event == PTRACE_EVENT_VFORK)
521 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
522
523 event_lwp->waitstatus.value.related_pid = ptid;
524
525 /* The status_pending field contains bits denoting the
526 extended event, so when the pending event is handled,
527 the handler will look at lwp->waitstatus. */
528 event_lwp->status_pending_p = 1;
529 event_lwp->status_pending = wstat;
530
531 /* Report the event. */
532 return 0;
533 }
534
535 if (debug_threads)
536 debug_printf ("HEW: Got clone event "
537 "from LWP %ld, new child is LWP %ld\n",
538 lwpid_of (event_thr), new_pid);
539
540 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
541 new_lwp = add_lwp (ptid);
542
543 /* Either we're going to immediately resume the new thread
544 or leave it stopped. linux_resume_one_lwp is a nop if it
545 thinks the thread is currently running, so set this first
546 before calling linux_resume_one_lwp. */
547 new_lwp->stopped = 1;
548
549 /* If we're suspending all threads, leave this one suspended
550 too. */
551 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
552 new_lwp->suspended = 1;
553
554 /* Normally we will get the pending SIGSTOP. But in some cases
555 we might get another signal delivered to the group first.
556 If we do get another signal, be sure not to lose it. */
557 if (WSTOPSIG (status) != SIGSTOP)
558 {
559 new_lwp->stop_expected = 1;
560 new_lwp->status_pending_p = 1;
561 new_lwp->status_pending = status;
562 }
563
564 /* Don't report the event. */
565 return 1;
566 }
567 else if (event == PTRACE_EVENT_VFORK_DONE)
568 {
569 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
570
571 /* Report the event. */
572 return 0;
573 }
574
575 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
576 }
577
578 /* Return the PC as read from the regcache of LWP, without any
579 adjustment. */
580
581 static CORE_ADDR
582 get_pc (struct lwp_info *lwp)
583 {
584 struct thread_info *saved_thread;
585 struct regcache *regcache;
586 CORE_ADDR pc;
587
588 if (the_low_target.get_pc == NULL)
589 return 0;
590
591 saved_thread = current_thread;
592 current_thread = get_lwp_thread (lwp);
593
594 regcache = get_thread_regcache (current_thread, 1);
595 pc = (*the_low_target.get_pc) (regcache);
596
597 if (debug_threads)
598 debug_printf ("pc is 0x%lx\n", (long) pc);
599
600 current_thread = saved_thread;
601 return pc;
602 }
603
604 /* This function should only be called if LWP got a SIGTRAP.
605 The SIGTRAP could mean several things.
606
607 On i386, where decr_pc_after_break is non-zero:
608
609 If we were single-stepping this process using PTRACE_SINGLESTEP, we
610 will get only the one SIGTRAP. The value of $eip will be the next
611 instruction. If the instruction we stepped over was a breakpoint,
612 we need to decrement the PC.
613
614 If we continue the process using PTRACE_CONT, we will get a
615 SIGTRAP when we hit a breakpoint. The value of $eip will be
616 the instruction after the breakpoint (i.e. needs to be
617 decremented). If we report the SIGTRAP to GDB, we must also
618 report the undecremented PC. If the breakpoint is removed, we
619 must resume at the decremented PC.
620
621 On a non-decr_pc_after_break machine with hardware or kernel
622 single-step:
623
624 If we either single-step a breakpoint instruction, or continue and
625 hit a breakpoint instruction, our PC will point at the breakpoint
626 instruction. */
627
628 static int
629 check_stopped_by_breakpoint (struct lwp_info *lwp)
630 {
631 CORE_ADDR pc;
632 CORE_ADDR sw_breakpoint_pc;
633 struct thread_info *saved_thread;
634 #if USE_SIGTRAP_SIGINFO
635 siginfo_t siginfo;
636 #endif
637
638 if (the_low_target.get_pc == NULL)
639 return 0;
640
641 pc = get_pc (lwp);
642 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
643
644 /* breakpoint_at reads from the current thread. */
645 saved_thread = current_thread;
646 current_thread = get_lwp_thread (lwp);
647
648 #if USE_SIGTRAP_SIGINFO
649 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
650 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
651 {
652 if (siginfo.si_signo == SIGTRAP)
653 {
654 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
655 {
656 if (debug_threads)
657 {
658 struct thread_info *thr = get_lwp_thread (lwp);
659
660 debug_printf ("CSBB: %s stopped by software breakpoint\n",
661 target_pid_to_str (ptid_of (thr)));
662 }
663
664 /* Back up the PC if necessary. */
665 if (pc != sw_breakpoint_pc)
666 {
667 struct regcache *regcache
668 = get_thread_regcache (current_thread, 1);
669 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
670 }
671
672 lwp->stop_pc = sw_breakpoint_pc;
673 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
674 current_thread = saved_thread;
675 return 1;
676 }
677 else if (siginfo.si_code == TRAP_HWBKPT)
678 {
679 if (debug_threads)
680 {
681 struct thread_info *thr = get_lwp_thread (lwp);
682
683 debug_printf ("CSBB: %s stopped by hardware "
684 "breakpoint/watchpoint\n",
685 target_pid_to_str (ptid_of (thr)));
686 }
687
688 lwp->stop_pc = pc;
689 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
690 current_thread = saved_thread;
691 return 1;
692 }
693 else if (siginfo.si_code == TRAP_TRACE)
694 {
695 if (debug_threads)
696 {
697 struct thread_info *thr = get_lwp_thread (lwp);
698
699 debug_printf ("CSBB: %s stopped by trace\n",
700 target_pid_to_str (ptid_of (thr)));
701 }
702
703 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
704 }
705 }
706 }
707 #else
708 /* We may have just stepped a breakpoint instruction. E.g., in
709 non-stop mode, GDB first tells the thread A to step a range, and
710 then the user inserts a breakpoint inside the range. In that
711 case we need to report the breakpoint PC. */
712 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
713 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
714 {
715 if (debug_threads)
716 {
717 struct thread_info *thr = get_lwp_thread (lwp);
718
719 debug_printf ("CSBB: %s stopped by software breakpoint\n",
720 target_pid_to_str (ptid_of (thr)));
721 }
722
723 /* Back up the PC if necessary. */
724 if (pc != sw_breakpoint_pc)
725 {
726 struct regcache *regcache
727 = get_thread_regcache (current_thread, 1);
728 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
729 }
730
731 lwp->stop_pc = sw_breakpoint_pc;
732 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
733 current_thread = saved_thread;
734 return 1;
735 }
736
737 if (hardware_breakpoint_inserted_here (pc))
738 {
739 if (debug_threads)
740 {
741 struct thread_info *thr = get_lwp_thread (lwp);
742
743 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
744 target_pid_to_str (ptid_of (thr)));
745 }
746
747 lwp->stop_pc = pc;
748 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
749 current_thread = saved_thread;
750 return 1;
751 }
752 #endif
753
754 current_thread = saved_thread;
755 return 0;
756 }
757
758 static struct lwp_info *
759 add_lwp (ptid_t ptid)
760 {
761 struct lwp_info *lwp;
762
763 lwp = XCNEW (struct lwp_info);
764
765 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
766
767 if (the_low_target.new_thread != NULL)
768 the_low_target.new_thread (lwp);
769
770 lwp->thread = add_thread (ptid, lwp);
771
772 return lwp;
773 }
774
775 /* Start an inferior process and returns its pid.
776 ALLARGS is a vector of program-name and args. */
777
778 static int
779 linux_create_inferior (char *program, char **allargs)
780 {
781 struct lwp_info *new_lwp;
782 int pid;
783 ptid_t ptid;
784 struct cleanup *restore_personality
785 = maybe_disable_address_space_randomization (disable_randomization);
786
787 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
788 pid = vfork ();
789 #else
790 pid = fork ();
791 #endif
792 if (pid < 0)
793 perror_with_name ("fork");
794
795 if (pid == 0)
796 {
797 close_most_fds ();
798 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
799
800 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
801 signal (__SIGRTMIN + 1, SIG_DFL);
802 #endif
803
804 setpgid (0, 0);
805
806 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
807 stdout to stderr so that inferior i/o doesn't corrupt the connection.
808 Also, redirect stdin to /dev/null. */
809 if (remote_connection_is_stdio ())
810 {
811 close (0);
812 open ("/dev/null", O_RDONLY);
813 dup2 (2, 1);
814 if (write (2, "stdin/stdout redirected\n",
815 sizeof ("stdin/stdout redirected\n") - 1) < 0)
816 {
817 /* Errors ignored. */;
818 }
819 }
820
821 execv (program, allargs);
822 if (errno == ENOENT)
823 execvp (program, allargs);
824
825 fprintf (stderr, "Cannot exec %s: %s.\n", program,
826 strerror (errno));
827 fflush (stderr);
828 _exit (0177);
829 }
830
831 do_cleanups (restore_personality);
832
833 linux_add_process (pid, 0);
834
835 ptid = ptid_build (pid, pid, 0);
836 new_lwp = add_lwp (ptid);
837 new_lwp->must_set_ptrace_flags = 1;
838
839 return pid;
840 }
841
842 /* Implement the arch_setup target_ops method. */
843
844 static void
845 linux_arch_setup (void)
846 {
847 the_low_target.arch_setup ();
848 }
849
850 /* Attach to an inferior process. Returns 0 on success, ERRNO on
851 error. */
852
853 int
854 linux_attach_lwp (ptid_t ptid)
855 {
856 struct lwp_info *new_lwp;
857 int lwpid = ptid_get_lwp (ptid);
858
859 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
860 != 0)
861 return errno;
862
863 new_lwp = add_lwp (ptid);
864
865 /* We need to wait for SIGSTOP before being able to make the next
866 ptrace call on this LWP. */
867 new_lwp->must_set_ptrace_flags = 1;
868
869 if (linux_proc_pid_is_stopped (lwpid))
870 {
871 if (debug_threads)
872 debug_printf ("Attached to a stopped process\n");
873
874 /* The process is definitely stopped. It is in a job control
875 stop, unless the kernel predates the TASK_STOPPED /
876 TASK_TRACED distinction, in which case it might be in a
877 ptrace stop. Make sure it is in a ptrace stop; from there we
878 can kill it, signal it, et cetera.
879
880 First make sure there is a pending SIGSTOP. Since we are
881 already attached, the process can not transition from stopped
882 to running without a PTRACE_CONT; so we know this signal will
883 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
884 probably already in the queue (unless this kernel is old
885 enough to use TASK_STOPPED for ptrace stops); but since
886 SIGSTOP is not an RT signal, it can only be queued once. */
887 kill_lwp (lwpid, SIGSTOP);
888
889 /* Finally, resume the stopped process. This will deliver the
890 SIGSTOP (or a higher priority signal, just like normal
891 PTRACE_ATTACH), which we'll catch later on. */
892 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
893 }
894
895 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
896 brings it to a halt.
897
898 There are several cases to consider here:
899
900 1) gdbserver has already attached to the process and is being notified
901 of a new thread that is being created.
902 In this case we should ignore that SIGSTOP and resume the
903 process. This is handled below by setting stop_expected = 1,
904 and the fact that add_thread sets last_resume_kind ==
905 resume_continue.
906
907 2) This is the first thread (the process thread), and we're attaching
908 to it via attach_inferior.
909 In this case we want the process thread to stop.
910 This is handled by having linux_attach set last_resume_kind ==
911 resume_stop after we return.
912
913 If the pid we are attaching to is also the tgid, we attach to and
914 stop all the existing threads. Otherwise, we attach to pid and
915 ignore any other threads in the same group as this pid.
916
917 3) GDB is connecting to gdbserver and is requesting an enumeration of all
918 existing threads.
919 In this case we want the thread to stop.
920 FIXME: This case is currently not properly handled.
921 We should wait for the SIGSTOP but don't. Things work apparently
922 because enough time passes between when we ptrace (ATTACH) and when
923 gdb makes the next ptrace call on the thread.
924
925 On the other hand, if we are currently trying to stop all threads, we
926 should treat the new thread as if we had sent it a SIGSTOP. This works
927 because we are guaranteed that the add_lwp call above added us to the
928 end of the list, and so the new thread has not yet reached
929 wait_for_sigstop (but will). */
930 new_lwp->stop_expected = 1;
931
932 return 0;
933 }
934
935 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
936 already attached. Returns true if a new LWP is found, false
937 otherwise. */
938
939 static int
940 attach_proc_task_lwp_callback (ptid_t ptid)
941 {
942 /* Is this a new thread? */
943 if (find_thread_ptid (ptid) == NULL)
944 {
945 int lwpid = ptid_get_lwp (ptid);
946 int err;
947
948 if (debug_threads)
949 debug_printf ("Found new lwp %d\n", lwpid);
950
951 err = linux_attach_lwp (ptid);
952
953 /* Be quiet if we simply raced with the thread exiting. EPERM
954 is returned if the thread's task still exists, and is marked
955 as exited or zombie, as well as other conditions, so in that
956 case, confirm the status in /proc/PID/status. */
957 if (err == ESRCH
958 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
959 {
960 if (debug_threads)
961 {
962 debug_printf ("Cannot attach to lwp %d: "
963 "thread is gone (%d: %s)\n",
964 lwpid, err, strerror (err));
965 }
966 }
967 else if (err != 0)
968 {
969 warning (_("Cannot attach to lwp %d: %s"),
970 lwpid,
971 linux_ptrace_attach_fail_reason_string (ptid, err));
972 }
973
974 return 1;
975 }
976 return 0;
977 }
978
979 /* Attach to PID. If PID is the tgid, attach to it and all
980 of its threads. */
981
982 static int
983 linux_attach (unsigned long pid)
984 {
985 ptid_t ptid = ptid_build (pid, pid, 0);
986 int err;
987
988 /* Attach to PID. We will check for other threads
989 soon. */
990 err = linux_attach_lwp (ptid);
991 if (err != 0)
992 error ("Cannot attach to process %ld: %s",
993 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
994
995 linux_add_process (pid, 1);
996
997 if (!non_stop)
998 {
999 struct thread_info *thread;
1000
1001 /* Don't ignore the initial SIGSTOP if we just attached to this
1002 process. It will be collected by wait shortly. */
1003 thread = find_thread_ptid (ptid_build (pid, pid, 0));
1004 thread->last_resume_kind = resume_stop;
1005 }
1006
1007 /* We must attach to every LWP. If /proc is mounted, use that to
1008 find them now. On the one hand, the inferior may be using raw
1009 clone instead of using pthreads. On the other hand, even if it
1010 is using pthreads, GDB may not be connected yet (thread_db needs
1011 to do symbol lookups, through qSymbol). Also, thread_db walks
1012 structures in the inferior's address space to find the list of
1013 threads/LWPs, and those structures may well be corrupted. Note
1014 that once thread_db is loaded, we'll still use it to list threads
1015 and associate pthread info with each LWP. */
1016 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1017 return 0;
1018 }
1019
1020 struct counter
1021 {
1022 int pid;
1023 int count;
1024 };
1025
1026 static int
1027 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1028 {
1029 struct counter *counter = args;
1030
1031 if (ptid_get_pid (entry->id) == counter->pid)
1032 {
1033 if (++counter->count > 1)
1034 return 1;
1035 }
1036
1037 return 0;
1038 }
1039
1040 static int
1041 last_thread_of_process_p (int pid)
1042 {
1043 struct counter counter = { pid , 0 };
1044
1045 return (find_inferior (&all_threads,
1046 second_thread_of_pid_p, &counter) == NULL);
1047 }
1048
1049 /* Kill LWP. */
1050
1051 static void
1052 linux_kill_one_lwp (struct lwp_info *lwp)
1053 {
1054 struct thread_info *thr = get_lwp_thread (lwp);
1055 int pid = lwpid_of (thr);
1056
1057 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1058 there is no signal context, and ptrace(PTRACE_KILL) (or
1059 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1060 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1061 alternative is to kill with SIGKILL. We only need one SIGKILL
1062 per process, not one for each thread. But since we still support
1063 linuxthreads, and we also support debugging programs using raw
1064 clone without CLONE_THREAD, we send one for each thread. For
1065 years, we used PTRACE_KILL only, so we're being a bit paranoid
1066 about some old kernels where PTRACE_KILL might work better
1067 (dubious if there are any such, but that's why it's paranoia), so
1068 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1069 everywhere. */
1070
1071 errno = 0;
1072 kill_lwp (pid, SIGKILL);
1073 if (debug_threads)
1074 {
1075 int save_errno = errno;
1076
1077 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1078 target_pid_to_str (ptid_of (thr)),
1079 save_errno ? strerror (save_errno) : "OK");
1080 }
1081
1082 errno = 0;
1083 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1084 if (debug_threads)
1085 {
1086 int save_errno = errno;
1087
1088 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1089 target_pid_to_str (ptid_of (thr)),
1090 save_errno ? strerror (save_errno) : "OK");
1091 }
1092 }
1093
1094 /* Kill LWP and wait for it to die. */
1095
1096 static void
1097 kill_wait_lwp (struct lwp_info *lwp)
1098 {
1099 struct thread_info *thr = get_lwp_thread (lwp);
1100 int pid = ptid_get_pid (ptid_of (thr));
1101 int lwpid = ptid_get_lwp (ptid_of (thr));
1102 int wstat;
1103 int res;
1104
1105 if (debug_threads)
1106 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1107
1108 do
1109 {
1110 linux_kill_one_lwp (lwp);
1111
1112 /* Make sure it died. Notes:
1113
1114 - The loop is most likely unnecessary.
1115
1116 - We don't use linux_wait_for_event as that could delete lwps
1117 while we're iterating over them. We're not interested in
1118 any pending status at this point, only in making sure all
1119 wait status on the kernel side are collected until the
1120 process is reaped.
1121
1122 - We don't use __WALL here as the __WALL emulation relies on
1123 SIGCHLD, and killing a stopped process doesn't generate
1124 one, nor an exit status.
1125 */
1126 res = my_waitpid (lwpid, &wstat, 0);
1127 if (res == -1 && errno == ECHILD)
1128 res = my_waitpid (lwpid, &wstat, __WCLONE);
1129 } while (res > 0 && WIFSTOPPED (wstat));
1130
1131 /* Even if it was stopped, the child may have already disappeared.
1132 E.g., if it was killed by SIGKILL. */
1133 if (res < 0 && errno != ECHILD)
1134 perror_with_name ("kill_wait_lwp");
1135 }
1136
1137 /* Callback for `find_inferior'. Kills an lwp of a given process,
1138 except the leader. */
1139
1140 static int
1141 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1142 {
1143 struct thread_info *thread = (struct thread_info *) entry;
1144 struct lwp_info *lwp = get_thread_lwp (thread);
1145 int pid = * (int *) args;
1146
1147 if (ptid_get_pid (entry->id) != pid)
1148 return 0;
1149
1150 /* We avoid killing the first thread here, because of a Linux kernel (at
1151 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1152 the children get a chance to be reaped, it will remain a zombie
1153 forever. */
1154
1155 if (lwpid_of (thread) == pid)
1156 {
1157 if (debug_threads)
1158 debug_printf ("lkop: is last of process %s\n",
1159 target_pid_to_str (entry->id));
1160 return 0;
1161 }
1162
1163 kill_wait_lwp (lwp);
1164 return 0;
1165 }
1166
1167 static int
1168 linux_kill (int pid)
1169 {
1170 struct process_info *process;
1171 struct lwp_info *lwp;
1172
1173 process = find_process_pid (pid);
1174 if (process == NULL)
1175 return -1;
1176
1177 /* If we're killing a running inferior, make sure it is stopped
1178 first, as PTRACE_KILL will not work otherwise. */
1179 stop_all_lwps (0, NULL);
1180
1181 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1182
1183 /* See the comment in linux_kill_one_lwp. We did not kill the first
1184 thread in the list, so do so now. */
1185 lwp = find_lwp_pid (pid_to_ptid (pid));
1186
1187 if (lwp == NULL)
1188 {
1189 if (debug_threads)
1190 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1191 pid);
1192 }
1193 else
1194 kill_wait_lwp (lwp);
1195
1196 the_target->mourn (process);
1197
1198 /* Since we presently can only stop all lwps of all processes, we
1199 need to unstop lwps of other processes. */
1200 unstop_all_lwps (0, NULL);
1201 return 0;
1202 }
1203
1204 /* Get pending signal of THREAD, for detaching purposes. This is the
1205 signal the thread last stopped for, which we need to deliver to the
1206 thread when detaching, otherwise, it'd be suppressed/lost. */
1207
1208 static int
1209 get_detach_signal (struct thread_info *thread)
1210 {
1211 enum gdb_signal signo = GDB_SIGNAL_0;
1212 int status;
1213 struct lwp_info *lp = get_thread_lwp (thread);
1214
1215 if (lp->status_pending_p)
1216 status = lp->status_pending;
1217 else
1218 {
1219 /* If the thread had been suspended by gdbserver, and it stopped
1220 cleanly, then it'll have stopped with SIGSTOP. But we don't
1221 want to deliver that SIGSTOP. */
1222 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1223 || thread->last_status.value.sig == GDB_SIGNAL_0)
1224 return 0;
1225
1226 /* Otherwise, we may need to deliver the signal we
1227 intercepted. */
1228 status = lp->last_status;
1229 }
1230
1231 if (!WIFSTOPPED (status))
1232 {
1233 if (debug_threads)
1234 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1235 target_pid_to_str (ptid_of (thread)));
1236 return 0;
1237 }
1238
1239 /* Extended wait statuses aren't real SIGTRAPs. */
1240 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1241 {
1242 if (debug_threads)
1243 debug_printf ("GPS: lwp %s had stopped with extended "
1244 "status: no pending signal\n",
1245 target_pid_to_str (ptid_of (thread)));
1246 return 0;
1247 }
1248
1249 signo = gdb_signal_from_host (WSTOPSIG (status));
1250
1251 if (program_signals_p && !program_signals[signo])
1252 {
1253 if (debug_threads)
1254 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1255 target_pid_to_str (ptid_of (thread)),
1256 gdb_signal_to_string (signo));
1257 return 0;
1258 }
1259 else if (!program_signals_p
1260 /* If we have no way to know which signals GDB does not
1261 want to have passed to the program, assume
1262 SIGTRAP/SIGINT, which is GDB's default. */
1263 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1264 {
1265 if (debug_threads)
1266 debug_printf ("GPS: lwp %s had signal %s, "
1267 "but we don't know if we should pass it. "
1268 "Default to not.\n",
1269 target_pid_to_str (ptid_of (thread)),
1270 gdb_signal_to_string (signo));
1271 return 0;
1272 }
1273 else
1274 {
1275 if (debug_threads)
1276 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1277 target_pid_to_str (ptid_of (thread)),
1278 gdb_signal_to_string (signo));
1279
1280 return WSTOPSIG (status);
1281 }
1282 }
1283
1284 static int
1285 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1286 {
1287 struct thread_info *thread = (struct thread_info *) entry;
1288 struct lwp_info *lwp = get_thread_lwp (thread);
1289 int pid = * (int *) args;
1290 int sig;
1291
1292 if (ptid_get_pid (entry->id) != pid)
1293 return 0;
1294
1295 /* If there is a pending SIGSTOP, get rid of it. */
1296 if (lwp->stop_expected)
1297 {
1298 if (debug_threads)
1299 debug_printf ("Sending SIGCONT to %s\n",
1300 target_pid_to_str (ptid_of (thread)));
1301
1302 kill_lwp (lwpid_of (thread), SIGCONT);
1303 lwp->stop_expected = 0;
1304 }
1305
1306 /* Flush any pending changes to the process's registers. */
1307 regcache_invalidate_thread (thread);
1308
1309 /* Pass on any pending signal for this thread. */
1310 sig = get_detach_signal (thread);
1311
1312 /* Finally, let it resume. */
1313 if (the_low_target.prepare_to_resume != NULL)
1314 the_low_target.prepare_to_resume (lwp);
1315 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1316 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1317 error (_("Can't detach %s: %s"),
1318 target_pid_to_str (ptid_of (thread)),
1319 strerror (errno));
1320
1321 delete_lwp (lwp);
1322 return 0;
1323 }
1324
1325 static int
1326 linux_detach (int pid)
1327 {
1328 struct process_info *process;
1329
1330 process = find_process_pid (pid);
1331 if (process == NULL)
1332 return -1;
1333
1334 /* As there's a step over already in progress, let it finish first,
1335 otherwise nesting a stabilize_threads operation on top gets real
1336 messy. */
1337 complete_ongoing_step_over ();
1338
1339 /* Stop all threads before detaching. First, ptrace requires that
1340 the thread is stopped to sucessfully detach. Second, thread_db
1341 may need to uninstall thread event breakpoints from memory, which
1342 only works with a stopped process anyway. */
1343 stop_all_lwps (0, NULL);
1344
1345 #ifdef USE_THREAD_DB
1346 thread_db_detach (process);
1347 #endif
1348
1349 /* Stabilize threads (move out of jump pads). */
1350 stabilize_threads ();
1351
1352 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1353
1354 the_target->mourn (process);
1355
1356 /* Since we presently can only stop all lwps of all processes, we
1357 need to unstop lwps of other processes. */
1358 unstop_all_lwps (0, NULL);
1359 return 0;
1360 }
1361
1362 /* Remove all LWPs that belong to process PROC from the lwp list. */
1363
1364 static int
1365 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1366 {
1367 struct thread_info *thread = (struct thread_info *) entry;
1368 struct lwp_info *lwp = get_thread_lwp (thread);
1369 struct process_info *process = proc;
1370
1371 if (pid_of (thread) == pid_of (process))
1372 delete_lwp (lwp);
1373
1374 return 0;
1375 }
1376
1377 static void
1378 linux_mourn (struct process_info *process)
1379 {
1380 struct process_info_private *priv;
1381
1382 #ifdef USE_THREAD_DB
1383 thread_db_mourn (process);
1384 #endif
1385
1386 find_inferior (&all_threads, delete_lwp_callback, process);
1387
1388 /* Freeing all private data. */
1389 priv = process->priv;
1390 free (priv->arch_private);
1391 free (priv);
1392 process->priv = NULL;
1393
1394 remove_process (process);
1395 }
1396
1397 static void
1398 linux_join (int pid)
1399 {
1400 int status, ret;
1401
1402 do {
1403 ret = my_waitpid (pid, &status, 0);
1404 if (WIFEXITED (status) || WIFSIGNALED (status))
1405 break;
1406 } while (ret != -1 || errno != ECHILD);
1407 }
1408
1409 /* Return nonzero if the given thread is still alive. */
1410 static int
1411 linux_thread_alive (ptid_t ptid)
1412 {
1413 struct lwp_info *lwp = find_lwp_pid (ptid);
1414
1415 /* We assume we always know if a thread exits. If a whole process
1416 exited but we still haven't been able to report it to GDB, we'll
1417 hold on to the last lwp of the dead process. */
1418 if (lwp != NULL)
1419 return !lwp_is_marked_dead (lwp);
1420 else
1421 return 0;
1422 }
1423
1424 /* Return 1 if this lwp still has an interesting status pending. If
1425 not (e.g., it had stopped for a breakpoint that is gone), return
1426 false. */
1427
1428 static int
1429 thread_still_has_status_pending_p (struct thread_info *thread)
1430 {
1431 struct lwp_info *lp = get_thread_lwp (thread);
1432
1433 if (!lp->status_pending_p)
1434 return 0;
1435
1436 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1437 report any status pending the LWP may have. */
1438 if (thread->last_resume_kind == resume_stop
1439 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1440 return 0;
1441
1442 if (thread->last_resume_kind != resume_stop
1443 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1444 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1445 {
1446 struct thread_info *saved_thread;
1447 CORE_ADDR pc;
1448 int discard = 0;
1449
1450 gdb_assert (lp->last_status != 0);
1451
1452 pc = get_pc (lp);
1453
1454 saved_thread = current_thread;
1455 current_thread = thread;
1456
1457 if (pc != lp->stop_pc)
1458 {
1459 if (debug_threads)
1460 debug_printf ("PC of %ld changed\n",
1461 lwpid_of (thread));
1462 discard = 1;
1463 }
1464
1465 #if !USE_SIGTRAP_SIGINFO
1466 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1467 && !(*the_low_target.breakpoint_at) (pc))
1468 {
1469 if (debug_threads)
1470 debug_printf ("previous SW breakpoint of %ld gone\n",
1471 lwpid_of (thread));
1472 discard = 1;
1473 }
1474 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1475 && !hardware_breakpoint_inserted_here (pc))
1476 {
1477 if (debug_threads)
1478 debug_printf ("previous HW breakpoint of %ld gone\n",
1479 lwpid_of (thread));
1480 discard = 1;
1481 }
1482 #endif
1483
1484 current_thread = saved_thread;
1485
1486 if (discard)
1487 {
1488 if (debug_threads)
1489 debug_printf ("discarding pending breakpoint status\n");
1490 lp->status_pending_p = 0;
1491 return 0;
1492 }
1493 }
1494
1495 return 1;
1496 }
1497
1498 /* Return 1 if this lwp has an interesting status pending. */
1499 static int
1500 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1501 {
1502 struct thread_info *thread = (struct thread_info *) entry;
1503 struct lwp_info *lp = get_thread_lwp (thread);
1504 ptid_t ptid = * (ptid_t *) arg;
1505
1506 /* Check if we're only interested in events from a specific process
1507 or a specific LWP. */
1508 if (!ptid_match (ptid_of (thread), ptid))
1509 return 0;
1510
1511 if (lp->status_pending_p
1512 && !thread_still_has_status_pending_p (thread))
1513 {
1514 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1515 return 0;
1516 }
1517
1518 return lp->status_pending_p;
1519 }
1520
1521 static int
1522 same_lwp (struct inferior_list_entry *entry, void *data)
1523 {
1524 ptid_t ptid = *(ptid_t *) data;
1525 int lwp;
1526
1527 if (ptid_get_lwp (ptid) != 0)
1528 lwp = ptid_get_lwp (ptid);
1529 else
1530 lwp = ptid_get_pid (ptid);
1531
1532 if (ptid_get_lwp (entry->id) == lwp)
1533 return 1;
1534
1535 return 0;
1536 }
1537
1538 struct lwp_info *
1539 find_lwp_pid (ptid_t ptid)
1540 {
1541 struct inferior_list_entry *thread
1542 = find_inferior (&all_threads, same_lwp, &ptid);
1543
1544 if (thread == NULL)
1545 return NULL;
1546
1547 return get_thread_lwp ((struct thread_info *) thread);
1548 }
1549
1550 /* Return the number of known LWPs in the tgid given by PID. */
1551
1552 static int
1553 num_lwps (int pid)
1554 {
1555 struct inferior_list_entry *inf, *tmp;
1556 int count = 0;
1557
1558 ALL_INFERIORS (&all_threads, inf, tmp)
1559 {
1560 if (ptid_get_pid (inf->id) == pid)
1561 count++;
1562 }
1563
1564 return count;
1565 }
1566
1567 /* The arguments passed to iterate_over_lwps. */
1568
1569 struct iterate_over_lwps_args
1570 {
1571 /* The FILTER argument passed to iterate_over_lwps. */
1572 ptid_t filter;
1573
1574 /* The CALLBACK argument passed to iterate_over_lwps. */
1575 iterate_over_lwps_ftype *callback;
1576
1577 /* The DATA argument passed to iterate_over_lwps. */
1578 void *data;
1579 };
1580
1581 /* Callback for find_inferior used by iterate_over_lwps to filter
1582 calls to the callback supplied to that function. Returning a
1583 nonzero value causes find_inferiors to stop iterating and return
1584 the current inferior_list_entry. Returning zero indicates that
1585 find_inferiors should continue iterating. */
1586
1587 static int
1588 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1589 {
1590 struct iterate_over_lwps_args *args
1591 = (struct iterate_over_lwps_args *) args_p;
1592
1593 if (ptid_match (entry->id, args->filter))
1594 {
1595 struct thread_info *thr = (struct thread_info *) entry;
1596 struct lwp_info *lwp = get_thread_lwp (thr);
1597
1598 return (*args->callback) (lwp, args->data);
1599 }
1600
1601 return 0;
1602 }
1603
1604 /* See nat/linux-nat.h. */
1605
1606 struct lwp_info *
1607 iterate_over_lwps (ptid_t filter,
1608 iterate_over_lwps_ftype callback,
1609 void *data)
1610 {
1611 struct iterate_over_lwps_args args = {filter, callback, data};
1612 struct inferior_list_entry *entry;
1613
1614 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1615 if (entry == NULL)
1616 return NULL;
1617
1618 return get_thread_lwp ((struct thread_info *) entry);
1619 }
1620
1621 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1622 their exits until all other threads in the group have exited. */
1623
1624 static void
1625 check_zombie_leaders (void)
1626 {
1627 struct process_info *proc, *tmp;
1628
1629 ALL_PROCESSES (proc, tmp)
1630 {
1631 pid_t leader_pid = pid_of (proc);
1632 struct lwp_info *leader_lp;
1633
1634 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1635
1636 if (debug_threads)
1637 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1638 "num_lwps=%d, zombie=%d\n",
1639 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1640 linux_proc_pid_is_zombie (leader_pid));
1641
1642 if (leader_lp != NULL
1643 /* Check if there are other threads in the group, as we may
1644 have raced with the inferior simply exiting. */
1645 && !last_thread_of_process_p (leader_pid)
1646 && linux_proc_pid_is_zombie (leader_pid))
1647 {
1648 /* A leader zombie can mean one of two things:
1649
1650 - It exited, and there's an exit status pending
1651 available, or only the leader exited (not the whole
1652 program). In the latter case, we can't waitpid the
1653 leader's exit status until all other threads are gone.
1654
1655 - There are 3 or more threads in the group, and a thread
1656 other than the leader exec'd. On an exec, the Linux
1657 kernel destroys all other threads (except the execing
1658 one) in the thread group, and resets the execing thread's
1659 tid to the tgid. No exit notification is sent for the
1660 execing thread -- from the ptracer's perspective, it
1661 appears as though the execing thread just vanishes.
1662 Until we reap all other threads except the leader and the
1663 execing thread, the leader will be zombie, and the
1664 execing thread will be in `D (disc sleep)'. As soon as
1665 all other threads are reaped, the execing thread changes
1666 it's tid to the tgid, and the previous (zombie) leader
1667 vanishes, giving place to the "new" leader. We could try
1668 distinguishing the exit and exec cases, by waiting once
1669 more, and seeing if something comes out, but it doesn't
1670 sound useful. The previous leader _does_ go away, and
1671 we'll re-add the new one once we see the exec event
1672 (which is just the same as what would happen if the
1673 previous leader did exit voluntarily before some other
1674 thread execs). */
1675
1676 if (debug_threads)
1677 fprintf (stderr,
1678 "CZL: Thread group leader %d zombie "
1679 "(it exited, or another thread execd).\n",
1680 leader_pid);
1681
1682 delete_lwp (leader_lp);
1683 }
1684 }
1685 }
1686
1687 /* Callback for `find_inferior'. Returns the first LWP that is not
1688 stopped. ARG is a PTID filter. */
1689
1690 static int
1691 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1692 {
1693 struct thread_info *thr = (struct thread_info *) entry;
1694 struct lwp_info *lwp;
1695 ptid_t filter = *(ptid_t *) arg;
1696
1697 if (!ptid_match (ptid_of (thr), filter))
1698 return 0;
1699
1700 lwp = get_thread_lwp (thr);
1701 if (!lwp->stopped)
1702 return 1;
1703
1704 return 0;
1705 }
1706
1707 /* Increment LWP's suspend count. */
1708
1709 static void
1710 lwp_suspended_inc (struct lwp_info *lwp)
1711 {
1712 lwp->suspended++;
1713
1714 if (debug_threads && lwp->suspended > 4)
1715 {
1716 struct thread_info *thread = get_lwp_thread (lwp);
1717
1718 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1719 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1720 }
1721 }
1722
1723 /* Decrement LWP's suspend count. */
1724
1725 static void
1726 lwp_suspended_decr (struct lwp_info *lwp)
1727 {
1728 lwp->suspended--;
1729
1730 if (lwp->suspended < 0)
1731 {
1732 struct thread_info *thread = get_lwp_thread (lwp);
1733
1734 internal_error (__FILE__, __LINE__,
1735 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1736 lwp->suspended);
1737 }
1738 }
1739
1740 /* This function should only be called if the LWP got a SIGTRAP.
1741
1742 Handle any tracepoint steps or hits. Return true if a tracepoint
1743 event was handled, 0 otherwise. */
1744
1745 static int
1746 handle_tracepoints (struct lwp_info *lwp)
1747 {
1748 struct thread_info *tinfo = get_lwp_thread (lwp);
1749 int tpoint_related_event = 0;
1750
1751 gdb_assert (lwp->suspended == 0);
1752
1753 /* If this tracepoint hit causes a tracing stop, we'll immediately
1754 uninsert tracepoints. To do this, we temporarily pause all
1755 threads, unpatch away, and then unpause threads. We need to make
1756 sure the unpausing doesn't resume LWP too. */
1757 lwp_suspended_inc (lwp);
1758
1759 /* And we need to be sure that any all-threads-stopping doesn't try
1760 to move threads out of the jump pads, as it could deadlock the
1761 inferior (LWP could be in the jump pad, maybe even holding the
1762 lock.) */
1763
1764 /* Do any necessary step collect actions. */
1765 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1766
1767 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1768
1769 /* See if we just hit a tracepoint and do its main collect
1770 actions. */
1771 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1772
1773 lwp_suspended_decr (lwp);
1774
1775 gdb_assert (lwp->suspended == 0);
1776 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1777
1778 if (tpoint_related_event)
1779 {
1780 if (debug_threads)
1781 debug_printf ("got a tracepoint event\n");
1782 return 1;
1783 }
1784
1785 return 0;
1786 }
1787
1788 /* Convenience wrapper. Returns true if LWP is presently collecting a
1789 fast tracepoint. */
1790
1791 static int
1792 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1793 struct fast_tpoint_collect_status *status)
1794 {
1795 CORE_ADDR thread_area;
1796 struct thread_info *thread = get_lwp_thread (lwp);
1797
1798 if (the_low_target.get_thread_area == NULL)
1799 return 0;
1800
1801 /* Get the thread area address. This is used to recognize which
1802 thread is which when tracing with the in-process agent library.
1803 We don't read anything from the address, and treat it as opaque;
1804 it's the address itself that we assume is unique per-thread. */
1805 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1806 return 0;
1807
1808 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1809 }
1810
1811 /* The reason we resume in the caller, is because we want to be able
1812 to pass lwp->status_pending as WSTAT, and we need to clear
1813 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1814 refuses to resume. */
1815
1816 static int
1817 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1818 {
1819 struct thread_info *saved_thread;
1820
1821 saved_thread = current_thread;
1822 current_thread = get_lwp_thread (lwp);
1823
1824 if ((wstat == NULL
1825 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1826 && supports_fast_tracepoints ()
1827 && agent_loaded_p ())
1828 {
1829 struct fast_tpoint_collect_status status;
1830 int r;
1831
1832 if (debug_threads)
1833 debug_printf ("Checking whether LWP %ld needs to move out of the "
1834 "jump pad.\n",
1835 lwpid_of (current_thread));
1836
1837 r = linux_fast_tracepoint_collecting (lwp, &status);
1838
1839 if (wstat == NULL
1840 || (WSTOPSIG (*wstat) != SIGILL
1841 && WSTOPSIG (*wstat) != SIGFPE
1842 && WSTOPSIG (*wstat) != SIGSEGV
1843 && WSTOPSIG (*wstat) != SIGBUS))
1844 {
1845 lwp->collecting_fast_tracepoint = r;
1846
1847 if (r != 0)
1848 {
1849 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1850 {
1851 /* Haven't executed the original instruction yet.
1852 Set breakpoint there, and wait till it's hit,
1853 then single-step until exiting the jump pad. */
1854 lwp->exit_jump_pad_bkpt
1855 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1856 }
1857
1858 if (debug_threads)
1859 debug_printf ("Checking whether LWP %ld needs to move out of "
1860 "the jump pad...it does\n",
1861 lwpid_of (current_thread));
1862 current_thread = saved_thread;
1863
1864 return 1;
1865 }
1866 }
1867 else
1868 {
1869 /* If we get a synchronous signal while collecting, *and*
1870 while executing the (relocated) original instruction,
1871 reset the PC to point at the tpoint address, before
1872 reporting to GDB. Otherwise, it's an IPA lib bug: just
1873 report the signal to GDB, and pray for the best. */
1874
1875 lwp->collecting_fast_tracepoint = 0;
1876
1877 if (r != 0
1878 && (status.adjusted_insn_addr <= lwp->stop_pc
1879 && lwp->stop_pc < status.adjusted_insn_addr_end))
1880 {
1881 siginfo_t info;
1882 struct regcache *regcache;
1883
1884 /* The si_addr on a few signals references the address
1885 of the faulting instruction. Adjust that as
1886 well. */
1887 if ((WSTOPSIG (*wstat) == SIGILL
1888 || WSTOPSIG (*wstat) == SIGFPE
1889 || WSTOPSIG (*wstat) == SIGBUS
1890 || WSTOPSIG (*wstat) == SIGSEGV)
1891 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1892 (PTRACE_TYPE_ARG3) 0, &info) == 0
1893 /* Final check just to make sure we don't clobber
1894 the siginfo of non-kernel-sent signals. */
1895 && (uintptr_t) info.si_addr == lwp->stop_pc)
1896 {
1897 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1898 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1899 (PTRACE_TYPE_ARG3) 0, &info);
1900 }
1901
1902 regcache = get_thread_regcache (current_thread, 1);
1903 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1904 lwp->stop_pc = status.tpoint_addr;
1905
1906 /* Cancel any fast tracepoint lock this thread was
1907 holding. */
1908 force_unlock_trace_buffer ();
1909 }
1910
1911 if (lwp->exit_jump_pad_bkpt != NULL)
1912 {
1913 if (debug_threads)
1914 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1915 "stopping all threads momentarily.\n");
1916
1917 stop_all_lwps (1, lwp);
1918
1919 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1920 lwp->exit_jump_pad_bkpt = NULL;
1921
1922 unstop_all_lwps (1, lwp);
1923
1924 gdb_assert (lwp->suspended >= 0);
1925 }
1926 }
1927 }
1928
1929 if (debug_threads)
1930 debug_printf ("Checking whether LWP %ld needs to move out of the "
1931 "jump pad...no\n",
1932 lwpid_of (current_thread));
1933
1934 current_thread = saved_thread;
1935 return 0;
1936 }
1937
1938 /* Enqueue one signal in the "signals to report later when out of the
1939 jump pad" list. */
1940
1941 static void
1942 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1943 {
1944 struct pending_signals *p_sig;
1945 struct thread_info *thread = get_lwp_thread (lwp);
1946
1947 if (debug_threads)
1948 debug_printf ("Deferring signal %d for LWP %ld.\n",
1949 WSTOPSIG (*wstat), lwpid_of (thread));
1950
1951 if (debug_threads)
1952 {
1953 struct pending_signals *sig;
1954
1955 for (sig = lwp->pending_signals_to_report;
1956 sig != NULL;
1957 sig = sig->prev)
1958 debug_printf (" Already queued %d\n",
1959 sig->signal);
1960
1961 debug_printf (" (no more currently queued signals)\n");
1962 }
1963
1964 /* Don't enqueue non-RT signals if they are already in the deferred
1965 queue. (SIGSTOP being the easiest signal to see ending up here
1966 twice) */
1967 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1968 {
1969 struct pending_signals *sig;
1970
1971 for (sig = lwp->pending_signals_to_report;
1972 sig != NULL;
1973 sig = sig->prev)
1974 {
1975 if (sig->signal == WSTOPSIG (*wstat))
1976 {
1977 if (debug_threads)
1978 debug_printf ("Not requeuing already queued non-RT signal %d"
1979 " for LWP %ld\n",
1980 sig->signal,
1981 lwpid_of (thread));
1982 return;
1983 }
1984 }
1985 }
1986
1987 p_sig = XCNEW (struct pending_signals);
1988 p_sig->prev = lwp->pending_signals_to_report;
1989 p_sig->signal = WSTOPSIG (*wstat);
1990
1991 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1992 &p_sig->info);
1993
1994 lwp->pending_signals_to_report = p_sig;
1995 }
1996
1997 /* Dequeue one signal from the "signals to report later when out of
1998 the jump pad" list. */
1999
2000 static int
2001 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2002 {
2003 struct thread_info *thread = get_lwp_thread (lwp);
2004
2005 if (lwp->pending_signals_to_report != NULL)
2006 {
2007 struct pending_signals **p_sig;
2008
2009 p_sig = &lwp->pending_signals_to_report;
2010 while ((*p_sig)->prev != NULL)
2011 p_sig = &(*p_sig)->prev;
2012
2013 *wstat = W_STOPCODE ((*p_sig)->signal);
2014 if ((*p_sig)->info.si_signo != 0)
2015 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2016 &(*p_sig)->info);
2017 free (*p_sig);
2018 *p_sig = NULL;
2019
2020 if (debug_threads)
2021 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2022 WSTOPSIG (*wstat), lwpid_of (thread));
2023
2024 if (debug_threads)
2025 {
2026 struct pending_signals *sig;
2027
2028 for (sig = lwp->pending_signals_to_report;
2029 sig != NULL;
2030 sig = sig->prev)
2031 debug_printf (" Still queued %d\n",
2032 sig->signal);
2033
2034 debug_printf (" (no more queued signals)\n");
2035 }
2036
2037 return 1;
2038 }
2039
2040 return 0;
2041 }
2042
2043 /* Fetch the possibly triggered data watchpoint info and store it in
2044 CHILD.
2045
2046 On some archs, like x86, that use debug registers to set
2047 watchpoints, it's possible that the way to know which watched
2048 address trapped, is to check the register that is used to select
2049 which address to watch. Problem is, between setting the watchpoint
2050 and reading back which data address trapped, the user may change
2051 the set of watchpoints, and, as a consequence, GDB changes the
2052 debug registers in the inferior. To avoid reading back a stale
2053 stopped-data-address when that happens, we cache in LP the fact
2054 that a watchpoint trapped, and the corresponding data address, as
2055 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2056 registers meanwhile, we have the cached data we can rely on. */
2057
2058 static int
2059 check_stopped_by_watchpoint (struct lwp_info *child)
2060 {
2061 if (the_low_target.stopped_by_watchpoint != NULL)
2062 {
2063 struct thread_info *saved_thread;
2064
2065 saved_thread = current_thread;
2066 current_thread = get_lwp_thread (child);
2067
2068 if (the_low_target.stopped_by_watchpoint ())
2069 {
2070 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2071
2072 if (the_low_target.stopped_data_address != NULL)
2073 child->stopped_data_address
2074 = the_low_target.stopped_data_address ();
2075 else
2076 child->stopped_data_address = 0;
2077 }
2078
2079 current_thread = saved_thread;
2080 }
2081
2082 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2083 }
2084
2085 /* Return the ptrace options that we want to try to enable. */
2086
2087 static int
2088 linux_low_ptrace_options (int attached)
2089 {
2090 int options = 0;
2091
2092 if (!attached)
2093 options |= PTRACE_O_EXITKILL;
2094
2095 if (report_fork_events)
2096 options |= PTRACE_O_TRACEFORK;
2097
2098 if (report_vfork_events)
2099 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2100
2101 return options;
2102 }
2103
2104 /* Do low-level handling of the event, and check if we should go on
2105 and pass it to caller code. Return the affected lwp if we are, or
2106 NULL otherwise. */
2107
2108 static struct lwp_info *
2109 linux_low_filter_event (int lwpid, int wstat)
2110 {
2111 struct lwp_info *child;
2112 struct thread_info *thread;
2113 int have_stop_pc = 0;
2114
2115 child = find_lwp_pid (pid_to_ptid (lwpid));
2116
2117 /* If we didn't find a process, one of two things presumably happened:
2118 - A process we started and then detached from has exited. Ignore it.
2119 - A process we are controlling has forked and the new child's stop
2120 was reported to us by the kernel. Save its PID. */
2121 if (child == NULL && WIFSTOPPED (wstat))
2122 {
2123 add_to_pid_list (&stopped_pids, lwpid, wstat);
2124 return NULL;
2125 }
2126 else if (child == NULL)
2127 return NULL;
2128
2129 thread = get_lwp_thread (child);
2130
2131 child->stopped = 1;
2132
2133 child->last_status = wstat;
2134
2135 /* Check if the thread has exited. */
2136 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2137 {
2138 if (debug_threads)
2139 debug_printf ("LLFE: %d exited.\n", lwpid);
2140 if (num_lwps (pid_of (thread)) > 1)
2141 {
2142
2143 /* If there is at least one more LWP, then the exit signal was
2144 not the end of the debugged application and should be
2145 ignored. */
2146 delete_lwp (child);
2147 return NULL;
2148 }
2149 else
2150 {
2151 /* This was the last lwp in the process. Since events are
2152 serialized to GDB core, and we can't report this one
2153 right now, but GDB core and the other target layers will
2154 want to be notified about the exit code/signal, leave the
2155 status pending for the next time we're able to report
2156 it. */
2157 mark_lwp_dead (child, wstat);
2158 return child;
2159 }
2160 }
2161
2162 gdb_assert (WIFSTOPPED (wstat));
2163
2164 if (WIFSTOPPED (wstat))
2165 {
2166 struct process_info *proc;
2167
2168 /* Architecture-specific setup after inferior is running. */
2169 proc = find_process_pid (pid_of (thread));
2170 if (proc->tdesc == NULL)
2171 {
2172 if (proc->attached)
2173 {
2174 struct thread_info *saved_thread;
2175
2176 /* This needs to happen after we have attached to the
2177 inferior and it is stopped for the first time, but
2178 before we access any inferior registers. */
2179 saved_thread = current_thread;
2180 current_thread = thread;
2181
2182 the_low_target.arch_setup ();
2183
2184 current_thread = saved_thread;
2185 }
2186 else
2187 {
2188 /* The process is started, but GDBserver will do
2189 architecture-specific setup after the program stops at
2190 the first instruction. */
2191 child->status_pending_p = 1;
2192 child->status_pending = wstat;
2193 return child;
2194 }
2195 }
2196 }
2197
2198 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2199 {
2200 struct process_info *proc = find_process_pid (pid_of (thread));
2201 int options = linux_low_ptrace_options (proc->attached);
2202
2203 linux_enable_event_reporting (lwpid, options);
2204 child->must_set_ptrace_flags = 0;
2205 }
2206
2207 /* Be careful to not overwrite stop_pc until
2208 check_stopped_by_breakpoint is called. */
2209 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2210 && linux_is_extended_waitstatus (wstat))
2211 {
2212 child->stop_pc = get_pc (child);
2213 if (handle_extended_wait (child, wstat))
2214 {
2215 /* The event has been handled, so just return without
2216 reporting it. */
2217 return NULL;
2218 }
2219 }
2220
2221 /* Check first whether this was a SW/HW breakpoint before checking
2222 watchpoints, because at least s390 can't tell the data address of
2223 hardware watchpoint hits, and returns stopped-by-watchpoint as
2224 long as there's a watchpoint set. */
2225 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2226 {
2227 if (check_stopped_by_breakpoint (child))
2228 have_stop_pc = 1;
2229 }
2230
2231 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2232 or hardware watchpoint. Check which is which if we got
2233 TARGET_STOPPED_BY_HW_BREAKPOINT. Likewise, we may have single
2234 stepped an instruction that triggered a watchpoint. In that
2235 case, on some architectures (such as x86), instead of
2236 TRAP_HWBKPT, si_code indicates TRAP_TRACE, and we need to check
2237 the debug registers separately. */
2238 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2239 && child->stop_reason != TARGET_STOPPED_BY_SW_BREAKPOINT)
2240 check_stopped_by_watchpoint (child);
2241
2242 if (!have_stop_pc)
2243 child->stop_pc = get_pc (child);
2244
2245 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2246 && child->stop_expected)
2247 {
2248 if (debug_threads)
2249 debug_printf ("Expected stop.\n");
2250 child->stop_expected = 0;
2251
2252 if (thread->last_resume_kind == resume_stop)
2253 {
2254 /* We want to report the stop to the core. Treat the
2255 SIGSTOP as a normal event. */
2256 if (debug_threads)
2257 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2258 target_pid_to_str (ptid_of (thread)));
2259 }
2260 else if (stopping_threads != NOT_STOPPING_THREADS)
2261 {
2262 /* Stopping threads. We don't want this SIGSTOP to end up
2263 pending. */
2264 if (debug_threads)
2265 debug_printf ("LLW: SIGSTOP caught for %s "
2266 "while stopping threads.\n",
2267 target_pid_to_str (ptid_of (thread)));
2268 return NULL;
2269 }
2270 else
2271 {
2272 /* This is a delayed SIGSTOP. Filter out the event. */
2273 if (debug_threads)
2274 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2275 child->stepping ? "step" : "continue",
2276 target_pid_to_str (ptid_of (thread)));
2277
2278 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2279 return NULL;
2280 }
2281 }
2282
2283 child->status_pending_p = 1;
2284 child->status_pending = wstat;
2285 return child;
2286 }
2287
2288 /* Resume LWPs that are currently stopped without any pending status
2289 to report, but are resumed from the core's perspective. */
2290
2291 static void
2292 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2293 {
2294 struct thread_info *thread = (struct thread_info *) entry;
2295 struct lwp_info *lp = get_thread_lwp (thread);
2296
2297 if (lp->stopped
2298 && !lp->suspended
2299 && !lp->status_pending_p
2300 && thread->last_resume_kind != resume_stop
2301 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2302 {
2303 int step = thread->last_resume_kind == resume_step;
2304
2305 if (debug_threads)
2306 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2307 target_pid_to_str (ptid_of (thread)),
2308 paddress (lp->stop_pc),
2309 step);
2310
2311 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2312 }
2313 }
2314
2315 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2316 match FILTER_PTID (leaving others pending). The PTIDs can be:
2317 minus_one_ptid, to specify any child; a pid PTID, specifying all
2318 lwps of a thread group; or a PTID representing a single lwp. Store
2319 the stop status through the status pointer WSTAT. OPTIONS is
2320 passed to the waitpid call. Return 0 if no event was found and
2321 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2322 was found. Return the PID of the stopped child otherwise. */
2323
2324 static int
2325 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2326 int *wstatp, int options)
2327 {
2328 struct thread_info *event_thread;
2329 struct lwp_info *event_child, *requested_child;
2330 sigset_t block_mask, prev_mask;
2331
2332 retry:
2333 /* N.B. event_thread points to the thread_info struct that contains
2334 event_child. Keep them in sync. */
2335 event_thread = NULL;
2336 event_child = NULL;
2337 requested_child = NULL;
2338
2339 /* Check for a lwp with a pending status. */
2340
2341 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2342 {
2343 event_thread = (struct thread_info *)
2344 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2345 if (event_thread != NULL)
2346 event_child = get_thread_lwp (event_thread);
2347 if (debug_threads && event_thread)
2348 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2349 }
2350 else if (!ptid_equal (filter_ptid, null_ptid))
2351 {
2352 requested_child = find_lwp_pid (filter_ptid);
2353
2354 if (stopping_threads == NOT_STOPPING_THREADS
2355 && requested_child->status_pending_p
2356 && requested_child->collecting_fast_tracepoint)
2357 {
2358 enqueue_one_deferred_signal (requested_child,
2359 &requested_child->status_pending);
2360 requested_child->status_pending_p = 0;
2361 requested_child->status_pending = 0;
2362 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2363 }
2364
2365 if (requested_child->suspended
2366 && requested_child->status_pending_p)
2367 {
2368 internal_error (__FILE__, __LINE__,
2369 "requesting an event out of a"
2370 " suspended child?");
2371 }
2372
2373 if (requested_child->status_pending_p)
2374 {
2375 event_child = requested_child;
2376 event_thread = get_lwp_thread (event_child);
2377 }
2378 }
2379
2380 if (event_child != NULL)
2381 {
2382 if (debug_threads)
2383 debug_printf ("Got an event from pending child %ld (%04x)\n",
2384 lwpid_of (event_thread), event_child->status_pending);
2385 *wstatp = event_child->status_pending;
2386 event_child->status_pending_p = 0;
2387 event_child->status_pending = 0;
2388 current_thread = event_thread;
2389 return lwpid_of (event_thread);
2390 }
2391
2392 /* But if we don't find a pending event, we'll have to wait.
2393
2394 We only enter this loop if no process has a pending wait status.
2395 Thus any action taken in response to a wait status inside this
2396 loop is responding as soon as we detect the status, not after any
2397 pending events. */
2398
2399 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2400 all signals while here. */
2401 sigfillset (&block_mask);
2402 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2403
2404 /* Always pull all events out of the kernel. We'll randomly select
2405 an event LWP out of all that have events, to prevent
2406 starvation. */
2407 while (event_child == NULL)
2408 {
2409 pid_t ret = 0;
2410
2411 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2412 quirks:
2413
2414 - If the thread group leader exits while other threads in the
2415 thread group still exist, waitpid(TGID, ...) hangs. That
2416 waitpid won't return an exit status until the other threads
2417 in the group are reaped.
2418
2419 - When a non-leader thread execs, that thread just vanishes
2420 without reporting an exit (so we'd hang if we waited for it
2421 explicitly in that case). The exec event is reported to
2422 the TGID pid (although we don't currently enable exec
2423 events). */
2424 errno = 0;
2425 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2426
2427 if (debug_threads)
2428 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2429 ret, errno ? strerror (errno) : "ERRNO-OK");
2430
2431 if (ret > 0)
2432 {
2433 if (debug_threads)
2434 {
2435 debug_printf ("LLW: waitpid %ld received %s\n",
2436 (long) ret, status_to_str (*wstatp));
2437 }
2438
2439 /* Filter all events. IOW, leave all events pending. We'll
2440 randomly select an event LWP out of all that have events
2441 below. */
2442 linux_low_filter_event (ret, *wstatp);
2443 /* Retry until nothing comes out of waitpid. A single
2444 SIGCHLD can indicate more than one child stopped. */
2445 continue;
2446 }
2447
2448 /* Now that we've pulled all events out of the kernel, resume
2449 LWPs that don't have an interesting event to report. */
2450 if (stopping_threads == NOT_STOPPING_THREADS)
2451 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2452
2453 /* ... and find an LWP with a status to report to the core, if
2454 any. */
2455 event_thread = (struct thread_info *)
2456 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2457 if (event_thread != NULL)
2458 {
2459 event_child = get_thread_lwp (event_thread);
2460 *wstatp = event_child->status_pending;
2461 event_child->status_pending_p = 0;
2462 event_child->status_pending = 0;
2463 break;
2464 }
2465
2466 /* Check for zombie thread group leaders. Those can't be reaped
2467 until all other threads in the thread group are. */
2468 check_zombie_leaders ();
2469
2470 /* If there are no resumed children left in the set of LWPs we
2471 want to wait for, bail. We can't just block in
2472 waitpid/sigsuspend, because lwps might have been left stopped
2473 in trace-stop state, and we'd be stuck forever waiting for
2474 their status to change (which would only happen if we resumed
2475 them). Even if WNOHANG is set, this return code is preferred
2476 over 0 (below), as it is more detailed. */
2477 if ((find_inferior (&all_threads,
2478 not_stopped_callback,
2479 &wait_ptid) == NULL))
2480 {
2481 if (debug_threads)
2482 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2483 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2484 return -1;
2485 }
2486
2487 /* No interesting event to report to the caller. */
2488 if ((options & WNOHANG))
2489 {
2490 if (debug_threads)
2491 debug_printf ("WNOHANG set, no event found\n");
2492
2493 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2494 return 0;
2495 }
2496
2497 /* Block until we get an event reported with SIGCHLD. */
2498 if (debug_threads)
2499 debug_printf ("sigsuspend'ing\n");
2500
2501 sigsuspend (&prev_mask);
2502 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2503 goto retry;
2504 }
2505
2506 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2507
2508 current_thread = event_thread;
2509
2510 /* Check for thread exit. */
2511 if (! WIFSTOPPED (*wstatp))
2512 {
2513 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2514
2515 if (debug_threads)
2516 debug_printf ("LWP %d is the last lwp of process. "
2517 "Process %ld exiting.\n",
2518 pid_of (event_thread), lwpid_of (event_thread));
2519 return lwpid_of (event_thread);
2520 }
2521
2522 return lwpid_of (event_thread);
2523 }
2524
2525 /* Wait for an event from child(ren) PTID. PTIDs can be:
2526 minus_one_ptid, to specify any child; a pid PTID, specifying all
2527 lwps of a thread group; or a PTID representing a single lwp. Store
2528 the stop status through the status pointer WSTAT. OPTIONS is
2529 passed to the waitpid call. Return 0 if no event was found and
2530 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2531 was found. Return the PID of the stopped child otherwise. */
2532
2533 static int
2534 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2535 {
2536 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2537 }
2538
2539 /* Count the LWP's that have had events. */
2540
2541 static int
2542 count_events_callback (struct inferior_list_entry *entry, void *data)
2543 {
2544 struct thread_info *thread = (struct thread_info *) entry;
2545 struct lwp_info *lp = get_thread_lwp (thread);
2546 int *count = data;
2547
2548 gdb_assert (count != NULL);
2549
2550 /* Count only resumed LWPs that have an event pending. */
2551 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2552 && lp->status_pending_p)
2553 (*count)++;
2554
2555 return 0;
2556 }
2557
2558 /* Select the LWP (if any) that is currently being single-stepped. */
2559
2560 static int
2561 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2562 {
2563 struct thread_info *thread = (struct thread_info *) entry;
2564 struct lwp_info *lp = get_thread_lwp (thread);
2565
2566 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2567 && thread->last_resume_kind == resume_step
2568 && lp->status_pending_p)
2569 return 1;
2570 else
2571 return 0;
2572 }
2573
2574 /* Select the Nth LWP that has had an event. */
2575
2576 static int
2577 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2578 {
2579 struct thread_info *thread = (struct thread_info *) entry;
2580 struct lwp_info *lp = get_thread_lwp (thread);
2581 int *selector = data;
2582
2583 gdb_assert (selector != NULL);
2584
2585 /* Select only resumed LWPs that have an event pending. */
2586 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2587 && lp->status_pending_p)
2588 if ((*selector)-- == 0)
2589 return 1;
2590
2591 return 0;
2592 }
2593
2594 /* Select one LWP out of those that have events pending. */
2595
2596 static void
2597 select_event_lwp (struct lwp_info **orig_lp)
2598 {
2599 int num_events = 0;
2600 int random_selector;
2601 struct thread_info *event_thread = NULL;
2602
2603 /* In all-stop, give preference to the LWP that is being
2604 single-stepped. There will be at most one, and it's the LWP that
2605 the core is most interested in. If we didn't do this, then we'd
2606 have to handle pending step SIGTRAPs somehow in case the core
2607 later continues the previously-stepped thread, otherwise we'd
2608 report the pending SIGTRAP, and the core, not having stepped the
2609 thread, wouldn't understand what the trap was for, and therefore
2610 would report it to the user as a random signal. */
2611 if (!non_stop)
2612 {
2613 event_thread
2614 = (struct thread_info *) find_inferior (&all_threads,
2615 select_singlestep_lwp_callback,
2616 NULL);
2617 if (event_thread != NULL)
2618 {
2619 if (debug_threads)
2620 debug_printf ("SEL: Select single-step %s\n",
2621 target_pid_to_str (ptid_of (event_thread)));
2622 }
2623 }
2624 if (event_thread == NULL)
2625 {
2626 /* No single-stepping LWP. Select one at random, out of those
2627 which have had events. */
2628
2629 /* First see how many events we have. */
2630 find_inferior (&all_threads, count_events_callback, &num_events);
2631 gdb_assert (num_events > 0);
2632
2633 /* Now randomly pick a LWP out of those that have had
2634 events. */
2635 random_selector = (int)
2636 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2637
2638 if (debug_threads && num_events > 1)
2639 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2640 num_events, random_selector);
2641
2642 event_thread
2643 = (struct thread_info *) find_inferior (&all_threads,
2644 select_event_lwp_callback,
2645 &random_selector);
2646 }
2647
2648 if (event_thread != NULL)
2649 {
2650 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2651
2652 /* Switch the event LWP. */
2653 *orig_lp = event_lp;
2654 }
2655 }
2656
2657 /* Decrement the suspend count of an LWP. */
2658
2659 static int
2660 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2661 {
2662 struct thread_info *thread = (struct thread_info *) entry;
2663 struct lwp_info *lwp = get_thread_lwp (thread);
2664
2665 /* Ignore EXCEPT. */
2666 if (lwp == except)
2667 return 0;
2668
2669 lwp_suspended_decr (lwp);
2670 return 0;
2671 }
2672
2673 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2674 NULL. */
2675
2676 static void
2677 unsuspend_all_lwps (struct lwp_info *except)
2678 {
2679 find_inferior (&all_threads, unsuspend_one_lwp, except);
2680 }
2681
2682 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2683 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2684 void *data);
2685 static int lwp_running (struct inferior_list_entry *entry, void *data);
2686 static ptid_t linux_wait_1 (ptid_t ptid,
2687 struct target_waitstatus *ourstatus,
2688 int target_options);
2689
2690 /* Stabilize threads (move out of jump pads).
2691
2692 If a thread is midway collecting a fast tracepoint, we need to
2693 finish the collection and move it out of the jump pad before
2694 reporting the signal.
2695
2696 This avoids recursion while collecting (when a signal arrives
2697 midway, and the signal handler itself collects), which would trash
2698 the trace buffer. In case the user set a breakpoint in a signal
2699 handler, this avoids the backtrace showing the jump pad, etc..
2700 Most importantly, there are certain things we can't do safely if
2701 threads are stopped in a jump pad (or in its callee's). For
2702 example:
2703
2704 - starting a new trace run. A thread still collecting the
2705 previous run, could trash the trace buffer when resumed. The trace
2706 buffer control structures would have been reset but the thread had
2707 no way to tell. The thread could even midway memcpy'ing to the
2708 buffer, which would mean that when resumed, it would clobber the
2709 trace buffer that had been set for a new run.
2710
2711 - we can't rewrite/reuse the jump pads for new tracepoints
2712 safely. Say you do tstart while a thread is stopped midway while
2713 collecting. When the thread is later resumed, it finishes the
2714 collection, and returns to the jump pad, to execute the original
2715 instruction that was under the tracepoint jump at the time the
2716 older run had been started. If the jump pad had been rewritten
2717 since for something else in the new run, the thread would now
2718 execute the wrong / random instructions. */
2719
2720 static void
2721 linux_stabilize_threads (void)
2722 {
2723 struct thread_info *saved_thread;
2724 struct thread_info *thread_stuck;
2725
2726 thread_stuck
2727 = (struct thread_info *) find_inferior (&all_threads,
2728 stuck_in_jump_pad_callback,
2729 NULL);
2730 if (thread_stuck != NULL)
2731 {
2732 if (debug_threads)
2733 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2734 lwpid_of (thread_stuck));
2735 return;
2736 }
2737
2738 saved_thread = current_thread;
2739
2740 stabilizing_threads = 1;
2741
2742 /* Kick 'em all. */
2743 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2744
2745 /* Loop until all are stopped out of the jump pads. */
2746 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2747 {
2748 struct target_waitstatus ourstatus;
2749 struct lwp_info *lwp;
2750 int wstat;
2751
2752 /* Note that we go through the full wait even loop. While
2753 moving threads out of jump pad, we need to be able to step
2754 over internal breakpoints and such. */
2755 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2756
2757 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2758 {
2759 lwp = get_thread_lwp (current_thread);
2760
2761 /* Lock it. */
2762 lwp_suspended_inc (lwp);
2763
2764 if (ourstatus.value.sig != GDB_SIGNAL_0
2765 || current_thread->last_resume_kind == resume_stop)
2766 {
2767 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2768 enqueue_one_deferred_signal (lwp, &wstat);
2769 }
2770 }
2771 }
2772
2773 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2774
2775 stabilizing_threads = 0;
2776
2777 current_thread = saved_thread;
2778
2779 if (debug_threads)
2780 {
2781 thread_stuck
2782 = (struct thread_info *) find_inferior (&all_threads,
2783 stuck_in_jump_pad_callback,
2784 NULL);
2785 if (thread_stuck != NULL)
2786 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2787 lwpid_of (thread_stuck));
2788 }
2789 }
2790
2791 static void async_file_mark (void);
2792
2793 /* Convenience function that is called when the kernel reports an
2794 event that is not passed out to GDB. */
2795
2796 static ptid_t
2797 ignore_event (struct target_waitstatus *ourstatus)
2798 {
2799 /* If we got an event, there may still be others, as a single
2800 SIGCHLD can indicate more than one child stopped. This forces
2801 another target_wait call. */
2802 async_file_mark ();
2803
2804 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2805 return null_ptid;
2806 }
2807
2808 /* Wait for process, returns status. */
2809
2810 static ptid_t
2811 linux_wait_1 (ptid_t ptid,
2812 struct target_waitstatus *ourstatus, int target_options)
2813 {
2814 int w;
2815 struct lwp_info *event_child;
2816 int options;
2817 int pid;
2818 int step_over_finished;
2819 int bp_explains_trap;
2820 int maybe_internal_trap;
2821 int report_to_gdb;
2822 int trace_event;
2823 int in_step_range;
2824
2825 if (debug_threads)
2826 {
2827 debug_enter ();
2828 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2829 }
2830
2831 /* Translate generic target options into linux options. */
2832 options = __WALL;
2833 if (target_options & TARGET_WNOHANG)
2834 options |= WNOHANG;
2835
2836 bp_explains_trap = 0;
2837 trace_event = 0;
2838 in_step_range = 0;
2839 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2840
2841 if (ptid_equal (step_over_bkpt, null_ptid))
2842 pid = linux_wait_for_event (ptid, &w, options);
2843 else
2844 {
2845 if (debug_threads)
2846 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2847 target_pid_to_str (step_over_bkpt));
2848 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2849 }
2850
2851 if (pid == 0)
2852 {
2853 gdb_assert (target_options & TARGET_WNOHANG);
2854
2855 if (debug_threads)
2856 {
2857 debug_printf ("linux_wait_1 ret = null_ptid, "
2858 "TARGET_WAITKIND_IGNORE\n");
2859 debug_exit ();
2860 }
2861
2862 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2863 return null_ptid;
2864 }
2865 else if (pid == -1)
2866 {
2867 if (debug_threads)
2868 {
2869 debug_printf ("linux_wait_1 ret = null_ptid, "
2870 "TARGET_WAITKIND_NO_RESUMED\n");
2871 debug_exit ();
2872 }
2873
2874 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2875 return null_ptid;
2876 }
2877
2878 event_child = get_thread_lwp (current_thread);
2879
2880 /* linux_wait_for_event only returns an exit status for the last
2881 child of a process. Report it. */
2882 if (WIFEXITED (w) || WIFSIGNALED (w))
2883 {
2884 if (WIFEXITED (w))
2885 {
2886 ourstatus->kind = TARGET_WAITKIND_EXITED;
2887 ourstatus->value.integer = WEXITSTATUS (w);
2888
2889 if (debug_threads)
2890 {
2891 debug_printf ("linux_wait_1 ret = %s, exited with "
2892 "retcode %d\n",
2893 target_pid_to_str (ptid_of (current_thread)),
2894 WEXITSTATUS (w));
2895 debug_exit ();
2896 }
2897 }
2898 else
2899 {
2900 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2901 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2902
2903 if (debug_threads)
2904 {
2905 debug_printf ("linux_wait_1 ret = %s, terminated with "
2906 "signal %d\n",
2907 target_pid_to_str (ptid_of (current_thread)),
2908 WTERMSIG (w));
2909 debug_exit ();
2910 }
2911 }
2912
2913 return ptid_of (current_thread);
2914 }
2915
2916 /* If step-over executes a breakpoint instruction, it means a
2917 gdb/gdbserver breakpoint had been planted on top of a permanent
2918 breakpoint. The PC has been adjusted by
2919 check_stopped_by_breakpoint to point at the breakpoint address.
2920 Advance the PC manually past the breakpoint, otherwise the
2921 program would keep trapping the permanent breakpoint forever. */
2922 if (!ptid_equal (step_over_bkpt, null_ptid)
2923 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2924 {
2925 unsigned int increment_pc = the_low_target.breakpoint_len;
2926
2927 if (debug_threads)
2928 {
2929 debug_printf ("step-over for %s executed software breakpoint\n",
2930 target_pid_to_str (ptid_of (current_thread)));
2931 }
2932
2933 if (increment_pc != 0)
2934 {
2935 struct regcache *regcache
2936 = get_thread_regcache (current_thread, 1);
2937
2938 event_child->stop_pc += increment_pc;
2939 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2940
2941 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2942 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2943 }
2944 }
2945
2946 /* If this event was not handled before, and is not a SIGTRAP, we
2947 report it. SIGILL and SIGSEGV are also treated as traps in case
2948 a breakpoint is inserted at the current PC. If this target does
2949 not support internal breakpoints at all, we also report the
2950 SIGTRAP without further processing; it's of no concern to us. */
2951 maybe_internal_trap
2952 = (supports_breakpoints ()
2953 && (WSTOPSIG (w) == SIGTRAP
2954 || ((WSTOPSIG (w) == SIGILL
2955 || WSTOPSIG (w) == SIGSEGV)
2956 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2957
2958 if (maybe_internal_trap)
2959 {
2960 /* Handle anything that requires bookkeeping before deciding to
2961 report the event or continue waiting. */
2962
2963 /* First check if we can explain the SIGTRAP with an internal
2964 breakpoint, or if we should possibly report the event to GDB.
2965 Do this before anything that may remove or insert a
2966 breakpoint. */
2967 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2968
2969 /* We have a SIGTRAP, possibly a step-over dance has just
2970 finished. If so, tweak the state machine accordingly,
2971 reinsert breakpoints and delete any reinsert (software
2972 single-step) breakpoints. */
2973 step_over_finished = finish_step_over (event_child);
2974
2975 /* Now invoke the callbacks of any internal breakpoints there. */
2976 check_breakpoints (event_child->stop_pc);
2977
2978 /* Handle tracepoint data collecting. This may overflow the
2979 trace buffer, and cause a tracing stop, removing
2980 breakpoints. */
2981 trace_event = handle_tracepoints (event_child);
2982
2983 if (bp_explains_trap)
2984 {
2985 /* If we stepped or ran into an internal breakpoint, we've
2986 already handled it. So next time we resume (from this
2987 PC), we should step over it. */
2988 if (debug_threads)
2989 debug_printf ("Hit a gdbserver breakpoint.\n");
2990
2991 if (breakpoint_here (event_child->stop_pc))
2992 event_child->need_step_over = 1;
2993 }
2994 }
2995 else
2996 {
2997 /* We have some other signal, possibly a step-over dance was in
2998 progress, and it should be cancelled too. */
2999 step_over_finished = finish_step_over (event_child);
3000 }
3001
3002 /* We have all the data we need. Either report the event to GDB, or
3003 resume threads and keep waiting for more. */
3004
3005 /* If we're collecting a fast tracepoint, finish the collection and
3006 move out of the jump pad before delivering a signal. See
3007 linux_stabilize_threads. */
3008
3009 if (WIFSTOPPED (w)
3010 && WSTOPSIG (w) != SIGTRAP
3011 && supports_fast_tracepoints ()
3012 && agent_loaded_p ())
3013 {
3014 if (debug_threads)
3015 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3016 "to defer or adjust it.\n",
3017 WSTOPSIG (w), lwpid_of (current_thread));
3018
3019 /* Allow debugging the jump pad itself. */
3020 if (current_thread->last_resume_kind != resume_step
3021 && maybe_move_out_of_jump_pad (event_child, &w))
3022 {
3023 enqueue_one_deferred_signal (event_child, &w);
3024
3025 if (debug_threads)
3026 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3027 WSTOPSIG (w), lwpid_of (current_thread));
3028
3029 linux_resume_one_lwp (event_child, 0, 0, NULL);
3030
3031 return ignore_event (ourstatus);
3032 }
3033 }
3034
3035 if (event_child->collecting_fast_tracepoint)
3036 {
3037 if (debug_threads)
3038 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3039 "Check if we're already there.\n",
3040 lwpid_of (current_thread),
3041 event_child->collecting_fast_tracepoint);
3042
3043 trace_event = 1;
3044
3045 event_child->collecting_fast_tracepoint
3046 = linux_fast_tracepoint_collecting (event_child, NULL);
3047
3048 if (event_child->collecting_fast_tracepoint != 1)
3049 {
3050 /* No longer need this breakpoint. */
3051 if (event_child->exit_jump_pad_bkpt != NULL)
3052 {
3053 if (debug_threads)
3054 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3055 "stopping all threads momentarily.\n");
3056
3057 /* Other running threads could hit this breakpoint.
3058 We don't handle moribund locations like GDB does,
3059 instead we always pause all threads when removing
3060 breakpoints, so that any step-over or
3061 decr_pc_after_break adjustment is always taken
3062 care of while the breakpoint is still
3063 inserted. */
3064 stop_all_lwps (1, event_child);
3065
3066 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3067 event_child->exit_jump_pad_bkpt = NULL;
3068
3069 unstop_all_lwps (1, event_child);
3070
3071 gdb_assert (event_child->suspended >= 0);
3072 }
3073 }
3074
3075 if (event_child->collecting_fast_tracepoint == 0)
3076 {
3077 if (debug_threads)
3078 debug_printf ("fast tracepoint finished "
3079 "collecting successfully.\n");
3080
3081 /* We may have a deferred signal to report. */
3082 if (dequeue_one_deferred_signal (event_child, &w))
3083 {
3084 if (debug_threads)
3085 debug_printf ("dequeued one signal.\n");
3086 }
3087 else
3088 {
3089 if (debug_threads)
3090 debug_printf ("no deferred signals.\n");
3091
3092 if (stabilizing_threads)
3093 {
3094 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3095 ourstatus->value.sig = GDB_SIGNAL_0;
3096
3097 if (debug_threads)
3098 {
3099 debug_printf ("linux_wait_1 ret = %s, stopped "
3100 "while stabilizing threads\n",
3101 target_pid_to_str (ptid_of (current_thread)));
3102 debug_exit ();
3103 }
3104
3105 return ptid_of (current_thread);
3106 }
3107 }
3108 }
3109 }
3110
3111 /* Check whether GDB would be interested in this event. */
3112
3113 /* If GDB is not interested in this signal, don't stop other
3114 threads, and don't report it to GDB. Just resume the inferior
3115 right away. We do this for threading-related signals as well as
3116 any that GDB specifically requested we ignore. But never ignore
3117 SIGSTOP if we sent it ourselves, and do not ignore signals when
3118 stepping - they may require special handling to skip the signal
3119 handler. Also never ignore signals that could be caused by a
3120 breakpoint. */
3121 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3122 thread library? */
3123 if (WIFSTOPPED (w)
3124 && current_thread->last_resume_kind != resume_step
3125 && (
3126 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3127 (current_process ()->priv->thread_db != NULL
3128 && (WSTOPSIG (w) == __SIGRTMIN
3129 || WSTOPSIG (w) == __SIGRTMIN + 1))
3130 ||
3131 #endif
3132 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3133 && !(WSTOPSIG (w) == SIGSTOP
3134 && current_thread->last_resume_kind == resume_stop)
3135 && !linux_wstatus_maybe_breakpoint (w))))
3136 {
3137 siginfo_t info, *info_p;
3138
3139 if (debug_threads)
3140 debug_printf ("Ignored signal %d for LWP %ld.\n",
3141 WSTOPSIG (w), lwpid_of (current_thread));
3142
3143 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3144 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3145 info_p = &info;
3146 else
3147 info_p = NULL;
3148
3149 if (step_over_finished)
3150 {
3151 /* We cancelled this thread's step-over above. We still
3152 need to unsuspend all other LWPs, and set them back
3153 running again while the signal handler runs. */
3154 unsuspend_all_lwps (event_child);
3155
3156 /* Enqueue the pending signal info so that proceed_all_lwps
3157 doesn't lose it. */
3158 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3159
3160 proceed_all_lwps ();
3161 }
3162 else
3163 {
3164 linux_resume_one_lwp (event_child, event_child->stepping,
3165 WSTOPSIG (w), info_p);
3166 }
3167 return ignore_event (ourstatus);
3168 }
3169
3170 /* Note that all addresses are always "out of the step range" when
3171 there's no range to begin with. */
3172 in_step_range = lwp_in_step_range (event_child);
3173
3174 /* If GDB wanted this thread to single step, and the thread is out
3175 of the step range, we always want to report the SIGTRAP, and let
3176 GDB handle it. Watchpoints should always be reported. So should
3177 signals we can't explain. A SIGTRAP we can't explain could be a
3178 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3179 do, we're be able to handle GDB breakpoints on top of internal
3180 breakpoints, by handling the internal breakpoint and still
3181 reporting the event to GDB. If we don't, we're out of luck, GDB
3182 won't see the breakpoint hit. If we see a single-step event but
3183 the thread should be continuing, don't pass the trap to gdb.
3184 That indicates that we had previously finished a single-step but
3185 left the single-step pending -- see
3186 complete_ongoing_step_over. */
3187 report_to_gdb = (!maybe_internal_trap
3188 || (current_thread->last_resume_kind == resume_step
3189 && !in_step_range)
3190 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3191 || (!in_step_range
3192 && !bp_explains_trap
3193 && !trace_event
3194 && !step_over_finished
3195 && !(current_thread->last_resume_kind == resume_continue
3196 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3197 || (gdb_breakpoint_here (event_child->stop_pc)
3198 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3199 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3200 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3201
3202 run_breakpoint_commands (event_child->stop_pc);
3203
3204 /* We found no reason GDB would want us to stop. We either hit one
3205 of our own breakpoints, or finished an internal step GDB
3206 shouldn't know about. */
3207 if (!report_to_gdb)
3208 {
3209 if (debug_threads)
3210 {
3211 if (bp_explains_trap)
3212 debug_printf ("Hit a gdbserver breakpoint.\n");
3213 if (step_over_finished)
3214 debug_printf ("Step-over finished.\n");
3215 if (trace_event)
3216 debug_printf ("Tracepoint event.\n");
3217 if (lwp_in_step_range (event_child))
3218 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3219 paddress (event_child->stop_pc),
3220 paddress (event_child->step_range_start),
3221 paddress (event_child->step_range_end));
3222 }
3223
3224 /* We're not reporting this breakpoint to GDB, so apply the
3225 decr_pc_after_break adjustment to the inferior's regcache
3226 ourselves. */
3227
3228 if (the_low_target.set_pc != NULL)
3229 {
3230 struct regcache *regcache
3231 = get_thread_regcache (current_thread, 1);
3232 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3233 }
3234
3235 /* We may have finished stepping over a breakpoint. If so,
3236 we've stopped and suspended all LWPs momentarily except the
3237 stepping one. This is where we resume them all again. We're
3238 going to keep waiting, so use proceed, which handles stepping
3239 over the next breakpoint. */
3240 if (debug_threads)
3241 debug_printf ("proceeding all threads.\n");
3242
3243 if (step_over_finished)
3244 unsuspend_all_lwps (event_child);
3245
3246 proceed_all_lwps ();
3247 return ignore_event (ourstatus);
3248 }
3249
3250 if (debug_threads)
3251 {
3252 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3253 {
3254 char *str;
3255
3256 str = target_waitstatus_to_string (&event_child->waitstatus);
3257 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3258 lwpid_of (get_lwp_thread (event_child)), str);
3259 xfree (str);
3260 }
3261 if (current_thread->last_resume_kind == resume_step)
3262 {
3263 if (event_child->step_range_start == event_child->step_range_end)
3264 debug_printf ("GDB wanted to single-step, reporting event.\n");
3265 else if (!lwp_in_step_range (event_child))
3266 debug_printf ("Out of step range, reporting event.\n");
3267 }
3268 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3269 debug_printf ("Stopped by watchpoint.\n");
3270 else if (gdb_breakpoint_here (event_child->stop_pc))
3271 debug_printf ("Stopped by GDB breakpoint.\n");
3272 if (debug_threads)
3273 debug_printf ("Hit a non-gdbserver trap event.\n");
3274 }
3275
3276 /* Alright, we're going to report a stop. */
3277
3278 if (!stabilizing_threads)
3279 {
3280 /* In all-stop, stop all threads. */
3281 if (!non_stop)
3282 stop_all_lwps (0, NULL);
3283
3284 /* If we're not waiting for a specific LWP, choose an event LWP
3285 from among those that have had events. Giving equal priority
3286 to all LWPs that have had events helps prevent
3287 starvation. */
3288 if (ptid_equal (ptid, minus_one_ptid))
3289 {
3290 event_child->status_pending_p = 1;
3291 event_child->status_pending = w;
3292
3293 select_event_lwp (&event_child);
3294
3295 /* current_thread and event_child must stay in sync. */
3296 current_thread = get_lwp_thread (event_child);
3297
3298 event_child->status_pending_p = 0;
3299 w = event_child->status_pending;
3300 }
3301
3302 if (step_over_finished)
3303 {
3304 if (!non_stop)
3305 {
3306 /* If we were doing a step-over, all other threads but
3307 the stepping one had been paused in start_step_over,
3308 with their suspend counts incremented. We don't want
3309 to do a full unstop/unpause, because we're in
3310 all-stop mode (so we want threads stopped), but we
3311 still need to unsuspend the other threads, to
3312 decrement their `suspended' count back. */
3313 unsuspend_all_lwps (event_child);
3314 }
3315 else
3316 {
3317 /* If we just finished a step-over, then all threads had
3318 been momentarily paused. In all-stop, that's fine,
3319 we want threads stopped by now anyway. In non-stop,
3320 we need to re-resume threads that GDB wanted to be
3321 running. */
3322 unstop_all_lwps (1, event_child);
3323 }
3324 }
3325
3326 /* Stabilize threads (move out of jump pads). */
3327 if (!non_stop)
3328 stabilize_threads ();
3329 }
3330 else
3331 {
3332 /* If we just finished a step-over, then all threads had been
3333 momentarily paused. In all-stop, that's fine, we want
3334 threads stopped by now anyway. In non-stop, we need to
3335 re-resume threads that GDB wanted to be running. */
3336 if (step_over_finished)
3337 unstop_all_lwps (1, event_child);
3338 }
3339
3340 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3341 {
3342 /* If the reported event is an exit, fork, vfork or exec, let
3343 GDB know. */
3344 *ourstatus = event_child->waitstatus;
3345 /* Clear the event lwp's waitstatus since we handled it already. */
3346 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3347 }
3348 else
3349 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3350
3351 /* Now that we've selected our final event LWP, un-adjust its PC if
3352 it was a software breakpoint, and the client doesn't know we can
3353 adjust the breakpoint ourselves. */
3354 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3355 && !swbreak_feature)
3356 {
3357 int decr_pc = the_low_target.decr_pc_after_break;
3358
3359 if (decr_pc != 0)
3360 {
3361 struct regcache *regcache
3362 = get_thread_regcache (current_thread, 1);
3363 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3364 }
3365 }
3366
3367 if (current_thread->last_resume_kind == resume_stop
3368 && WSTOPSIG (w) == SIGSTOP)
3369 {
3370 /* A thread that has been requested to stop by GDB with vCont;t,
3371 and it stopped cleanly, so report as SIG0. The use of
3372 SIGSTOP is an implementation detail. */
3373 ourstatus->value.sig = GDB_SIGNAL_0;
3374 }
3375 else if (current_thread->last_resume_kind == resume_stop
3376 && WSTOPSIG (w) != SIGSTOP)
3377 {
3378 /* A thread that has been requested to stop by GDB with vCont;t,
3379 but, it stopped for other reasons. */
3380 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3381 }
3382 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3383 {
3384 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3385 }
3386
3387 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3388
3389 if (debug_threads)
3390 {
3391 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3392 target_pid_to_str (ptid_of (current_thread)),
3393 ourstatus->kind, ourstatus->value.sig);
3394 debug_exit ();
3395 }
3396
3397 return ptid_of (current_thread);
3398 }
3399
3400 /* Get rid of any pending event in the pipe. */
3401 static void
3402 async_file_flush (void)
3403 {
3404 int ret;
3405 char buf;
3406
3407 do
3408 ret = read (linux_event_pipe[0], &buf, 1);
3409 while (ret >= 0 || (ret == -1 && errno == EINTR));
3410 }
3411
3412 /* Put something in the pipe, so the event loop wakes up. */
3413 static void
3414 async_file_mark (void)
3415 {
3416 int ret;
3417
3418 async_file_flush ();
3419
3420 do
3421 ret = write (linux_event_pipe[1], "+", 1);
3422 while (ret == 0 || (ret == -1 && errno == EINTR));
3423
3424 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3425 be awakened anyway. */
3426 }
3427
3428 static ptid_t
3429 linux_wait (ptid_t ptid,
3430 struct target_waitstatus *ourstatus, int target_options)
3431 {
3432 ptid_t event_ptid;
3433
3434 /* Flush the async file first. */
3435 if (target_is_async_p ())
3436 async_file_flush ();
3437
3438 do
3439 {
3440 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3441 }
3442 while ((target_options & TARGET_WNOHANG) == 0
3443 && ptid_equal (event_ptid, null_ptid)
3444 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3445
3446 /* If at least one stop was reported, there may be more. A single
3447 SIGCHLD can signal more than one child stop. */
3448 if (target_is_async_p ()
3449 && (target_options & TARGET_WNOHANG) != 0
3450 && !ptid_equal (event_ptid, null_ptid))
3451 async_file_mark ();
3452
3453 return event_ptid;
3454 }
3455
3456 /* Send a signal to an LWP. */
3457
3458 static int
3459 kill_lwp (unsigned long lwpid, int signo)
3460 {
3461 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3462 fails, then we are not using nptl threads and we should be using kill. */
3463
3464 #ifdef __NR_tkill
3465 {
3466 static int tkill_failed;
3467
3468 if (!tkill_failed)
3469 {
3470 int ret;
3471
3472 errno = 0;
3473 ret = syscall (__NR_tkill, lwpid, signo);
3474 if (errno != ENOSYS)
3475 return ret;
3476 tkill_failed = 1;
3477 }
3478 }
3479 #endif
3480
3481 return kill (lwpid, signo);
3482 }
3483
3484 void
3485 linux_stop_lwp (struct lwp_info *lwp)
3486 {
3487 send_sigstop (lwp);
3488 }
3489
3490 static void
3491 send_sigstop (struct lwp_info *lwp)
3492 {
3493 int pid;
3494
3495 pid = lwpid_of (get_lwp_thread (lwp));
3496
3497 /* If we already have a pending stop signal for this process, don't
3498 send another. */
3499 if (lwp->stop_expected)
3500 {
3501 if (debug_threads)
3502 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3503
3504 return;
3505 }
3506
3507 if (debug_threads)
3508 debug_printf ("Sending sigstop to lwp %d\n", pid);
3509
3510 lwp->stop_expected = 1;
3511 kill_lwp (pid, SIGSTOP);
3512 }
3513
3514 static int
3515 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3516 {
3517 struct thread_info *thread = (struct thread_info *) entry;
3518 struct lwp_info *lwp = get_thread_lwp (thread);
3519
3520 /* Ignore EXCEPT. */
3521 if (lwp == except)
3522 return 0;
3523
3524 if (lwp->stopped)
3525 return 0;
3526
3527 send_sigstop (lwp);
3528 return 0;
3529 }
3530
3531 /* Increment the suspend count of an LWP, and stop it, if not stopped
3532 yet. */
3533 static int
3534 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3535 void *except)
3536 {
3537 struct thread_info *thread = (struct thread_info *) entry;
3538 struct lwp_info *lwp = get_thread_lwp (thread);
3539
3540 /* Ignore EXCEPT. */
3541 if (lwp == except)
3542 return 0;
3543
3544 lwp_suspended_inc (lwp);
3545
3546 return send_sigstop_callback (entry, except);
3547 }
3548
3549 static void
3550 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3551 {
3552 /* Store the exit status for later. */
3553 lwp->status_pending_p = 1;
3554 lwp->status_pending = wstat;
3555
3556 /* Store in waitstatus as well, as there's nothing else to process
3557 for this event. */
3558 if (WIFEXITED (wstat))
3559 {
3560 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3561 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3562 }
3563 else if (WIFSIGNALED (wstat))
3564 {
3565 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3566 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3567 }
3568
3569 /* Prevent trying to stop it. */
3570 lwp->stopped = 1;
3571
3572 /* No further stops are expected from a dead lwp. */
3573 lwp->stop_expected = 0;
3574 }
3575
3576 /* Return true if LWP has exited already, and has a pending exit event
3577 to report to GDB. */
3578
3579 static int
3580 lwp_is_marked_dead (struct lwp_info *lwp)
3581 {
3582 return (lwp->status_pending_p
3583 && (WIFEXITED (lwp->status_pending)
3584 || WIFSIGNALED (lwp->status_pending)));
3585 }
3586
3587 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3588
3589 static void
3590 wait_for_sigstop (void)
3591 {
3592 struct thread_info *saved_thread;
3593 ptid_t saved_tid;
3594 int wstat;
3595 int ret;
3596
3597 saved_thread = current_thread;
3598 if (saved_thread != NULL)
3599 saved_tid = saved_thread->entry.id;
3600 else
3601 saved_tid = null_ptid; /* avoid bogus unused warning */
3602
3603 if (debug_threads)
3604 debug_printf ("wait_for_sigstop: pulling events\n");
3605
3606 /* Passing NULL_PTID as filter indicates we want all events to be
3607 left pending. Eventually this returns when there are no
3608 unwaited-for children left. */
3609 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3610 &wstat, __WALL);
3611 gdb_assert (ret == -1);
3612
3613 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3614 current_thread = saved_thread;
3615 else
3616 {
3617 if (debug_threads)
3618 debug_printf ("Previously current thread died.\n");
3619
3620 /* We can't change the current inferior behind GDB's back,
3621 otherwise, a subsequent command may apply to the wrong
3622 process. */
3623 current_thread = NULL;
3624 }
3625 }
3626
3627 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3628 move it out, because we need to report the stop event to GDB. For
3629 example, if the user puts a breakpoint in the jump pad, it's
3630 because she wants to debug it. */
3631
3632 static int
3633 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3634 {
3635 struct thread_info *thread = (struct thread_info *) entry;
3636 struct lwp_info *lwp = get_thread_lwp (thread);
3637
3638 if (lwp->suspended != 0)
3639 {
3640 internal_error (__FILE__, __LINE__,
3641 "LWP %ld is suspended, suspended=%d\n",
3642 lwpid_of (thread), lwp->suspended);
3643 }
3644 gdb_assert (lwp->stopped);
3645
3646 /* Allow debugging the jump pad, gdb_collect, etc.. */
3647 return (supports_fast_tracepoints ()
3648 && agent_loaded_p ()
3649 && (gdb_breakpoint_here (lwp->stop_pc)
3650 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3651 || thread->last_resume_kind == resume_step)
3652 && linux_fast_tracepoint_collecting (lwp, NULL));
3653 }
3654
3655 static void
3656 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3657 {
3658 struct thread_info *thread = (struct thread_info *) entry;
3659 struct thread_info *saved_thread;
3660 struct lwp_info *lwp = get_thread_lwp (thread);
3661 int *wstat;
3662
3663 if (lwp->suspended != 0)
3664 {
3665 internal_error (__FILE__, __LINE__,
3666 "LWP %ld is suspended, suspended=%d\n",
3667 lwpid_of (thread), lwp->suspended);
3668 }
3669 gdb_assert (lwp->stopped);
3670
3671 /* For gdb_breakpoint_here. */
3672 saved_thread = current_thread;
3673 current_thread = thread;
3674
3675 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3676
3677 /* Allow debugging the jump pad, gdb_collect, etc. */
3678 if (!gdb_breakpoint_here (lwp->stop_pc)
3679 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3680 && thread->last_resume_kind != resume_step
3681 && maybe_move_out_of_jump_pad (lwp, wstat))
3682 {
3683 if (debug_threads)
3684 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3685 lwpid_of (thread));
3686
3687 if (wstat)
3688 {
3689 lwp->status_pending_p = 0;
3690 enqueue_one_deferred_signal (lwp, wstat);
3691
3692 if (debug_threads)
3693 debug_printf ("Signal %d for LWP %ld deferred "
3694 "(in jump pad)\n",
3695 WSTOPSIG (*wstat), lwpid_of (thread));
3696 }
3697
3698 linux_resume_one_lwp (lwp, 0, 0, NULL);
3699 }
3700 else
3701 lwp_suspended_inc (lwp);
3702
3703 current_thread = saved_thread;
3704 }
3705
3706 static int
3707 lwp_running (struct inferior_list_entry *entry, void *data)
3708 {
3709 struct thread_info *thread = (struct thread_info *) entry;
3710 struct lwp_info *lwp = get_thread_lwp (thread);
3711
3712 if (lwp_is_marked_dead (lwp))
3713 return 0;
3714 if (lwp->stopped)
3715 return 0;
3716 return 1;
3717 }
3718
3719 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3720 If SUSPEND, then also increase the suspend count of every LWP,
3721 except EXCEPT. */
3722
3723 static void
3724 stop_all_lwps (int suspend, struct lwp_info *except)
3725 {
3726 /* Should not be called recursively. */
3727 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3728
3729 if (debug_threads)
3730 {
3731 debug_enter ();
3732 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3733 suspend ? "stop-and-suspend" : "stop",
3734 except != NULL
3735 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3736 : "none");
3737 }
3738
3739 stopping_threads = (suspend
3740 ? STOPPING_AND_SUSPENDING_THREADS
3741 : STOPPING_THREADS);
3742
3743 if (suspend)
3744 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3745 else
3746 find_inferior (&all_threads, send_sigstop_callback, except);
3747 wait_for_sigstop ();
3748 stopping_threads = NOT_STOPPING_THREADS;
3749
3750 if (debug_threads)
3751 {
3752 debug_printf ("stop_all_lwps done, setting stopping_threads "
3753 "back to !stopping\n");
3754 debug_exit ();
3755 }
3756 }
3757
3758 /* Enqueue one signal in the chain of signals which need to be
3759 delivered to this process on next resume. */
3760
3761 static void
3762 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3763 {
3764 struct pending_signals *p_sig = XNEW (struct pending_signals);
3765
3766 p_sig->prev = lwp->pending_signals;
3767 p_sig->signal = signal;
3768 if (info == NULL)
3769 memset (&p_sig->info, 0, sizeof (siginfo_t));
3770 else
3771 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3772 lwp->pending_signals = p_sig;
3773 }
3774
3775 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3776 SIGNAL is nonzero, give it that signal. */
3777
3778 static void
3779 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3780 int step, int signal, siginfo_t *info)
3781 {
3782 struct thread_info *thread = get_lwp_thread (lwp);
3783 struct thread_info *saved_thread;
3784 int fast_tp_collecting;
3785 struct process_info *proc = get_thread_process (thread);
3786
3787 /* Note that target description may not be initialised
3788 (proc->tdesc == NULL) at this point because the program hasn't
3789 stopped at the first instruction yet. It means GDBserver skips
3790 the extra traps from the wrapper program (see option --wrapper).
3791 Code in this function that requires register access should be
3792 guarded by proc->tdesc == NULL or something else. */
3793
3794 if (lwp->stopped == 0)
3795 return;
3796
3797 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3798
3799 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3800
3801 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3802 user used the "jump" command, or "set $pc = foo"). */
3803 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
3804 {
3805 /* Collecting 'while-stepping' actions doesn't make sense
3806 anymore. */
3807 release_while_stepping_state_list (thread);
3808 }
3809
3810 /* If we have pending signals or status, and a new signal, enqueue the
3811 signal. Also enqueue the signal if we are waiting to reinsert a
3812 breakpoint; it will be picked up again below. */
3813 if (signal != 0
3814 && (lwp->status_pending_p
3815 || lwp->pending_signals != NULL
3816 || lwp->bp_reinsert != 0
3817 || fast_tp_collecting))
3818 {
3819 struct pending_signals *p_sig = XNEW (struct pending_signals);
3820
3821 p_sig->prev = lwp->pending_signals;
3822 p_sig->signal = signal;
3823 if (info == NULL)
3824 memset (&p_sig->info, 0, sizeof (siginfo_t));
3825 else
3826 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3827 lwp->pending_signals = p_sig;
3828 }
3829
3830 if (lwp->status_pending_p)
3831 {
3832 if (debug_threads)
3833 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3834 " has pending status\n",
3835 lwpid_of (thread), step ? "step" : "continue", signal,
3836 lwp->stop_expected ? "expected" : "not expected");
3837 return;
3838 }
3839
3840 saved_thread = current_thread;
3841 current_thread = thread;
3842
3843 if (debug_threads)
3844 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3845 lwpid_of (thread), step ? "step" : "continue", signal,
3846 lwp->stop_expected ? "expected" : "not expected");
3847
3848 /* This bit needs some thinking about. If we get a signal that
3849 we must report while a single-step reinsert is still pending,
3850 we often end up resuming the thread. It might be better to
3851 (ew) allow a stack of pending events; then we could be sure that
3852 the reinsert happened right away and not lose any signals.
3853
3854 Making this stack would also shrink the window in which breakpoints are
3855 uninserted (see comment in linux_wait_for_lwp) but not enough for
3856 complete correctness, so it won't solve that problem. It may be
3857 worthwhile just to solve this one, however. */
3858 if (lwp->bp_reinsert != 0)
3859 {
3860 if (debug_threads)
3861 debug_printf (" pending reinsert at 0x%s\n",
3862 paddress (lwp->bp_reinsert));
3863
3864 if (can_hardware_single_step ())
3865 {
3866 if (fast_tp_collecting == 0)
3867 {
3868 if (step == 0)
3869 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3870 if (lwp->suspended)
3871 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3872 lwp->suspended);
3873 }
3874
3875 step = 1;
3876 }
3877
3878 /* Postpone any pending signal. It was enqueued above. */
3879 signal = 0;
3880 }
3881
3882 if (fast_tp_collecting == 1)
3883 {
3884 if (debug_threads)
3885 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3886 " (exit-jump-pad-bkpt)\n",
3887 lwpid_of (thread));
3888
3889 /* Postpone any pending signal. It was enqueued above. */
3890 signal = 0;
3891 }
3892 else if (fast_tp_collecting == 2)
3893 {
3894 if (debug_threads)
3895 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3896 " single-stepping\n",
3897 lwpid_of (thread));
3898
3899 if (can_hardware_single_step ())
3900 step = 1;
3901 else
3902 {
3903 internal_error (__FILE__, __LINE__,
3904 "moving out of jump pad single-stepping"
3905 " not implemented on this target");
3906 }
3907
3908 /* Postpone any pending signal. It was enqueued above. */
3909 signal = 0;
3910 }
3911
3912 /* If we have while-stepping actions in this thread set it stepping.
3913 If we have a signal to deliver, it may or may not be set to
3914 SIG_IGN, we don't know. Assume so, and allow collecting
3915 while-stepping into a signal handler. A possible smart thing to
3916 do would be to set an internal breakpoint at the signal return
3917 address, continue, and carry on catching this while-stepping
3918 action only when that breakpoint is hit. A future
3919 enhancement. */
3920 if (thread->while_stepping != NULL
3921 && can_hardware_single_step ())
3922 {
3923 if (debug_threads)
3924 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3925 lwpid_of (thread));
3926 step = 1;
3927 }
3928
3929 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
3930 {
3931 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3932
3933 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3934
3935 if (debug_threads)
3936 {
3937 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3938 (long) lwp->stop_pc);
3939 }
3940 }
3941
3942 /* If we have pending signals, consume one unless we are trying to
3943 reinsert a breakpoint or we're trying to finish a fast tracepoint
3944 collect. */
3945 if (lwp->pending_signals != NULL
3946 && lwp->bp_reinsert == 0
3947 && fast_tp_collecting == 0)
3948 {
3949 struct pending_signals **p_sig;
3950
3951 p_sig = &lwp->pending_signals;
3952 while ((*p_sig)->prev != NULL)
3953 p_sig = &(*p_sig)->prev;
3954
3955 signal = (*p_sig)->signal;
3956 if ((*p_sig)->info.si_signo != 0)
3957 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3958 &(*p_sig)->info);
3959
3960 free (*p_sig);
3961 *p_sig = NULL;
3962 }
3963
3964 if (the_low_target.prepare_to_resume != NULL)
3965 the_low_target.prepare_to_resume (lwp);
3966
3967 regcache_invalidate_thread (thread);
3968 errno = 0;
3969 lwp->stepping = step;
3970 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3971 (PTRACE_TYPE_ARG3) 0,
3972 /* Coerce to a uintptr_t first to avoid potential gcc warning
3973 of coercing an 8 byte integer to a 4 byte pointer. */
3974 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3975
3976 current_thread = saved_thread;
3977 if (errno)
3978 perror_with_name ("resuming thread");
3979
3980 /* Successfully resumed. Clear state that no longer makes sense,
3981 and mark the LWP as running. Must not do this before resuming
3982 otherwise if that fails other code will be confused. E.g., we'd
3983 later try to stop the LWP and hang forever waiting for a stop
3984 status. Note that we must not throw after this is cleared,
3985 otherwise handle_zombie_lwp_error would get confused. */
3986 lwp->stopped = 0;
3987 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3988 }
3989
3990 /* Called when we try to resume a stopped LWP and that errors out. If
3991 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3992 or about to become), discard the error, clear any pending status
3993 the LWP may have, and return true (we'll collect the exit status
3994 soon enough). Otherwise, return false. */
3995
3996 static int
3997 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3998 {
3999 struct thread_info *thread = get_lwp_thread (lp);
4000
4001 /* If we get an error after resuming the LWP successfully, we'd
4002 confuse !T state for the LWP being gone. */
4003 gdb_assert (lp->stopped);
4004
4005 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4006 because even if ptrace failed with ESRCH, the tracee may be "not
4007 yet fully dead", but already refusing ptrace requests. In that
4008 case the tracee has 'R (Running)' state for a little bit
4009 (observed in Linux 3.18). See also the note on ESRCH in the
4010 ptrace(2) man page. Instead, check whether the LWP has any state
4011 other than ptrace-stopped. */
4012
4013 /* Don't assume anything if /proc/PID/status can't be read. */
4014 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4015 {
4016 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4017 lp->status_pending_p = 0;
4018 return 1;
4019 }
4020 return 0;
4021 }
4022
4023 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4024 disappears while we try to resume it. */
4025
4026 static void
4027 linux_resume_one_lwp (struct lwp_info *lwp,
4028 int step, int signal, siginfo_t *info)
4029 {
4030 TRY
4031 {
4032 linux_resume_one_lwp_throw (lwp, step, signal, info);
4033 }
4034 CATCH (ex, RETURN_MASK_ERROR)
4035 {
4036 if (!check_ptrace_stopped_lwp_gone (lwp))
4037 throw_exception (ex);
4038 }
4039 END_CATCH
4040 }
4041
4042 struct thread_resume_array
4043 {
4044 struct thread_resume *resume;
4045 size_t n;
4046 };
4047
4048 /* This function is called once per thread via find_inferior.
4049 ARG is a pointer to a thread_resume_array struct.
4050 We look up the thread specified by ENTRY in ARG, and mark the thread
4051 with a pointer to the appropriate resume request.
4052
4053 This algorithm is O(threads * resume elements), but resume elements
4054 is small (and will remain small at least until GDB supports thread
4055 suspension). */
4056
4057 static int
4058 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4059 {
4060 struct thread_info *thread = (struct thread_info *) entry;
4061 struct lwp_info *lwp = get_thread_lwp (thread);
4062 int ndx;
4063 struct thread_resume_array *r;
4064
4065 r = arg;
4066
4067 for (ndx = 0; ndx < r->n; ndx++)
4068 {
4069 ptid_t ptid = r->resume[ndx].thread;
4070 if (ptid_equal (ptid, minus_one_ptid)
4071 || ptid_equal (ptid, entry->id)
4072 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4073 of PID'. */
4074 || (ptid_get_pid (ptid) == pid_of (thread)
4075 && (ptid_is_pid (ptid)
4076 || ptid_get_lwp (ptid) == -1)))
4077 {
4078 if (r->resume[ndx].kind == resume_stop
4079 && thread->last_resume_kind == resume_stop)
4080 {
4081 if (debug_threads)
4082 debug_printf ("already %s LWP %ld at GDB's request\n",
4083 (thread->last_status.kind
4084 == TARGET_WAITKIND_STOPPED)
4085 ? "stopped"
4086 : "stopping",
4087 lwpid_of (thread));
4088
4089 continue;
4090 }
4091
4092 lwp->resume = &r->resume[ndx];
4093 thread->last_resume_kind = lwp->resume->kind;
4094
4095 lwp->step_range_start = lwp->resume->step_range_start;
4096 lwp->step_range_end = lwp->resume->step_range_end;
4097
4098 /* If we had a deferred signal to report, dequeue one now.
4099 This can happen if LWP gets more than one signal while
4100 trying to get out of a jump pad. */
4101 if (lwp->stopped
4102 && !lwp->status_pending_p
4103 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4104 {
4105 lwp->status_pending_p = 1;
4106
4107 if (debug_threads)
4108 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4109 "leaving status pending.\n",
4110 WSTOPSIG (lwp->status_pending),
4111 lwpid_of (thread));
4112 }
4113
4114 return 0;
4115 }
4116 }
4117
4118 /* No resume action for this thread. */
4119 lwp->resume = NULL;
4120
4121 return 0;
4122 }
4123
4124 /* find_inferior callback for linux_resume.
4125 Set *FLAG_P if this lwp has an interesting status pending. */
4126
4127 static int
4128 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4129 {
4130 struct thread_info *thread = (struct thread_info *) entry;
4131 struct lwp_info *lwp = get_thread_lwp (thread);
4132
4133 /* LWPs which will not be resumed are not interesting, because
4134 we might not wait for them next time through linux_wait. */
4135 if (lwp->resume == NULL)
4136 return 0;
4137
4138 if (thread_still_has_status_pending_p (thread))
4139 * (int *) flag_p = 1;
4140
4141 return 0;
4142 }
4143
4144 /* Return 1 if this lwp that GDB wants running is stopped at an
4145 internal breakpoint that we need to step over. It assumes that any
4146 required STOP_PC adjustment has already been propagated to the
4147 inferior's regcache. */
4148
4149 static int
4150 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4151 {
4152 struct thread_info *thread = (struct thread_info *) entry;
4153 struct lwp_info *lwp = get_thread_lwp (thread);
4154 struct thread_info *saved_thread;
4155 CORE_ADDR pc;
4156 struct process_info *proc = get_thread_process (thread);
4157
4158 /* GDBserver is skipping the extra traps from the wrapper program,
4159 don't have to do step over. */
4160 if (proc->tdesc == NULL)
4161 return 0;
4162
4163 /* LWPs which will not be resumed are not interesting, because we
4164 might not wait for them next time through linux_wait. */
4165
4166 if (!lwp->stopped)
4167 {
4168 if (debug_threads)
4169 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4170 lwpid_of (thread));
4171 return 0;
4172 }
4173
4174 if (thread->last_resume_kind == resume_stop)
4175 {
4176 if (debug_threads)
4177 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4178 " stopped\n",
4179 lwpid_of (thread));
4180 return 0;
4181 }
4182
4183 gdb_assert (lwp->suspended >= 0);
4184
4185 if (lwp->suspended)
4186 {
4187 if (debug_threads)
4188 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4189 lwpid_of (thread));
4190 return 0;
4191 }
4192
4193 if (!lwp->need_step_over)
4194 {
4195 if (debug_threads)
4196 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4197 }
4198
4199 if (lwp->status_pending_p)
4200 {
4201 if (debug_threads)
4202 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4203 " status.\n",
4204 lwpid_of (thread));
4205 return 0;
4206 }
4207
4208 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4209 or we have. */
4210 pc = get_pc (lwp);
4211
4212 /* If the PC has changed since we stopped, then don't do anything,
4213 and let the breakpoint/tracepoint be hit. This happens if, for
4214 instance, GDB handled the decr_pc_after_break subtraction itself,
4215 GDB is OOL stepping this thread, or the user has issued a "jump"
4216 command, or poked thread's registers herself. */
4217 if (pc != lwp->stop_pc)
4218 {
4219 if (debug_threads)
4220 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4221 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4222 lwpid_of (thread),
4223 paddress (lwp->stop_pc), paddress (pc));
4224
4225 lwp->need_step_over = 0;
4226 return 0;
4227 }
4228
4229 saved_thread = current_thread;
4230 current_thread = thread;
4231
4232 /* We can only step over breakpoints we know about. */
4233 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4234 {
4235 /* Don't step over a breakpoint that GDB expects to hit
4236 though. If the condition is being evaluated on the target's side
4237 and it evaluate to false, step over this breakpoint as well. */
4238 if (gdb_breakpoint_here (pc)
4239 && gdb_condition_true_at_breakpoint (pc)
4240 && gdb_no_commands_at_breakpoint (pc))
4241 {
4242 if (debug_threads)
4243 debug_printf ("Need step over [LWP %ld]? yes, but found"
4244 " GDB breakpoint at 0x%s; skipping step over\n",
4245 lwpid_of (thread), paddress (pc));
4246
4247 current_thread = saved_thread;
4248 return 0;
4249 }
4250 else
4251 {
4252 if (debug_threads)
4253 debug_printf ("Need step over [LWP %ld]? yes, "
4254 "found breakpoint at 0x%s\n",
4255 lwpid_of (thread), paddress (pc));
4256
4257 /* We've found an lwp that needs stepping over --- return 1 so
4258 that find_inferior stops looking. */
4259 current_thread = saved_thread;
4260
4261 /* If the step over is cancelled, this is set again. */
4262 lwp->need_step_over = 0;
4263 return 1;
4264 }
4265 }
4266
4267 current_thread = saved_thread;
4268
4269 if (debug_threads)
4270 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4271 " at 0x%s\n",
4272 lwpid_of (thread), paddress (pc));
4273
4274 return 0;
4275 }
4276
4277 /* Start a step-over operation on LWP. When LWP stopped at a
4278 breakpoint, to make progress, we need to remove the breakpoint out
4279 of the way. If we let other threads run while we do that, they may
4280 pass by the breakpoint location and miss hitting it. To avoid
4281 that, a step-over momentarily stops all threads while LWP is
4282 single-stepped while the breakpoint is temporarily uninserted from
4283 the inferior. When the single-step finishes, we reinsert the
4284 breakpoint, and let all threads that are supposed to be running,
4285 run again.
4286
4287 On targets that don't support hardware single-step, we don't
4288 currently support full software single-stepping. Instead, we only
4289 support stepping over the thread event breakpoint, by asking the
4290 low target where to place a reinsert breakpoint. Since this
4291 routine assumes the breakpoint being stepped over is a thread event
4292 breakpoint, it usually assumes the return address of the current
4293 function is a good enough place to set the reinsert breakpoint. */
4294
4295 static int
4296 start_step_over (struct lwp_info *lwp)
4297 {
4298 struct thread_info *thread = get_lwp_thread (lwp);
4299 struct thread_info *saved_thread;
4300 CORE_ADDR pc;
4301 int step;
4302
4303 if (debug_threads)
4304 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4305 lwpid_of (thread));
4306
4307 stop_all_lwps (1, lwp);
4308
4309 if (lwp->suspended != 0)
4310 {
4311 internal_error (__FILE__, __LINE__,
4312 "LWP %ld suspended=%d\n", lwpid_of (thread),
4313 lwp->suspended);
4314 }
4315
4316 if (debug_threads)
4317 debug_printf ("Done stopping all threads for step-over.\n");
4318
4319 /* Note, we should always reach here with an already adjusted PC,
4320 either by GDB (if we're resuming due to GDB's request), or by our
4321 caller, if we just finished handling an internal breakpoint GDB
4322 shouldn't care about. */
4323 pc = get_pc (lwp);
4324
4325 saved_thread = current_thread;
4326 current_thread = thread;
4327
4328 lwp->bp_reinsert = pc;
4329 uninsert_breakpoints_at (pc);
4330 uninsert_fast_tracepoint_jumps_at (pc);
4331
4332 if (can_hardware_single_step ())
4333 {
4334 step = 1;
4335 }
4336 else
4337 {
4338 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4339 set_reinsert_breakpoint (raddr);
4340 step = 0;
4341 }
4342
4343 current_thread = saved_thread;
4344
4345 linux_resume_one_lwp (lwp, step, 0, NULL);
4346
4347 /* Require next event from this LWP. */
4348 step_over_bkpt = thread->entry.id;
4349 return 1;
4350 }
4351
4352 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4353 start_step_over, if still there, and delete any reinsert
4354 breakpoints we've set, on non hardware single-step targets. */
4355
4356 static int
4357 finish_step_over (struct lwp_info *lwp)
4358 {
4359 if (lwp->bp_reinsert != 0)
4360 {
4361 if (debug_threads)
4362 debug_printf ("Finished step over.\n");
4363
4364 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4365 may be no breakpoint to reinsert there by now. */
4366 reinsert_breakpoints_at (lwp->bp_reinsert);
4367 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4368
4369 lwp->bp_reinsert = 0;
4370
4371 /* Delete any software-single-step reinsert breakpoints. No
4372 longer needed. We don't have to worry about other threads
4373 hitting this trap, and later not being able to explain it,
4374 because we were stepping over a breakpoint, and we hold all
4375 threads but LWP stopped while doing that. */
4376 if (!can_hardware_single_step ())
4377 delete_reinsert_breakpoints ();
4378
4379 step_over_bkpt = null_ptid;
4380 return 1;
4381 }
4382 else
4383 return 0;
4384 }
4385
4386 /* If there's a step over in progress, wait until all threads stop
4387 (that is, until the stepping thread finishes its step), and
4388 unsuspend all lwps. The stepping thread ends with its status
4389 pending, which is processed later when we get back to processing
4390 events. */
4391
4392 static void
4393 complete_ongoing_step_over (void)
4394 {
4395 if (!ptid_equal (step_over_bkpt, null_ptid))
4396 {
4397 struct lwp_info *lwp;
4398 int wstat;
4399 int ret;
4400
4401 if (debug_threads)
4402 debug_printf ("detach: step over in progress, finish it first\n");
4403
4404 /* Passing NULL_PTID as filter indicates we want all events to
4405 be left pending. Eventually this returns when there are no
4406 unwaited-for children left. */
4407 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4408 &wstat, __WALL);
4409 gdb_assert (ret == -1);
4410
4411 lwp = find_lwp_pid (step_over_bkpt);
4412 if (lwp != NULL)
4413 finish_step_over (lwp);
4414 step_over_bkpt = null_ptid;
4415 unsuspend_all_lwps (lwp);
4416 }
4417 }
4418
4419 /* This function is called once per thread. We check the thread's resume
4420 request, which will tell us whether to resume, step, or leave the thread
4421 stopped; and what signal, if any, it should be sent.
4422
4423 For threads which we aren't explicitly told otherwise, we preserve
4424 the stepping flag; this is used for stepping over gdbserver-placed
4425 breakpoints.
4426
4427 If pending_flags was set in any thread, we queue any needed
4428 signals, since we won't actually resume. We already have a pending
4429 event to report, so we don't need to preserve any step requests;
4430 they should be re-issued if necessary. */
4431
4432 static int
4433 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4434 {
4435 struct thread_info *thread = (struct thread_info *) entry;
4436 struct lwp_info *lwp = get_thread_lwp (thread);
4437 int step;
4438 int leave_all_stopped = * (int *) arg;
4439 int leave_pending;
4440
4441 if (lwp->resume == NULL)
4442 return 0;
4443
4444 if (lwp->resume->kind == resume_stop)
4445 {
4446 if (debug_threads)
4447 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4448
4449 if (!lwp->stopped)
4450 {
4451 if (debug_threads)
4452 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4453
4454 /* Stop the thread, and wait for the event asynchronously,
4455 through the event loop. */
4456 send_sigstop (lwp);
4457 }
4458 else
4459 {
4460 if (debug_threads)
4461 debug_printf ("already stopped LWP %ld\n",
4462 lwpid_of (thread));
4463
4464 /* The LWP may have been stopped in an internal event that
4465 was not meant to be notified back to GDB (e.g., gdbserver
4466 breakpoint), so we should be reporting a stop event in
4467 this case too. */
4468
4469 /* If the thread already has a pending SIGSTOP, this is a
4470 no-op. Otherwise, something later will presumably resume
4471 the thread and this will cause it to cancel any pending
4472 operation, due to last_resume_kind == resume_stop. If
4473 the thread already has a pending status to report, we
4474 will still report it the next time we wait - see
4475 status_pending_p_callback. */
4476
4477 /* If we already have a pending signal to report, then
4478 there's no need to queue a SIGSTOP, as this means we're
4479 midway through moving the LWP out of the jumppad, and we
4480 will report the pending signal as soon as that is
4481 finished. */
4482 if (lwp->pending_signals_to_report == NULL)
4483 send_sigstop (lwp);
4484 }
4485
4486 /* For stop requests, we're done. */
4487 lwp->resume = NULL;
4488 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4489 return 0;
4490 }
4491
4492 /* If this thread which is about to be resumed has a pending status,
4493 then don't resume it - we can just report the pending status.
4494 Likewise if it is suspended, because e.g., another thread is
4495 stepping past a breakpoint. Make sure to queue any signals that
4496 would otherwise be sent. In all-stop mode, we do this decision
4497 based on if *any* thread has a pending status. If there's a
4498 thread that needs the step-over-breakpoint dance, then don't
4499 resume any other thread but that particular one. */
4500 leave_pending = (lwp->suspended
4501 || lwp->status_pending_p
4502 || leave_all_stopped);
4503
4504 if (!leave_pending)
4505 {
4506 if (debug_threads)
4507 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4508
4509 step = (lwp->resume->kind == resume_step);
4510 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4511 }
4512 else
4513 {
4514 if (debug_threads)
4515 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4516
4517 /* If we have a new signal, enqueue the signal. */
4518 if (lwp->resume->sig != 0)
4519 {
4520 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4521
4522 p_sig->prev = lwp->pending_signals;
4523 p_sig->signal = lwp->resume->sig;
4524
4525 /* If this is the same signal we were previously stopped by,
4526 make sure to queue its siginfo. We can ignore the return
4527 value of ptrace; if it fails, we'll skip
4528 PTRACE_SETSIGINFO. */
4529 if (WIFSTOPPED (lwp->last_status)
4530 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4531 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4532 &p_sig->info);
4533
4534 lwp->pending_signals = p_sig;
4535 }
4536 }
4537
4538 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4539 lwp->resume = NULL;
4540 return 0;
4541 }
4542
4543 static void
4544 linux_resume (struct thread_resume *resume_info, size_t n)
4545 {
4546 struct thread_resume_array array = { resume_info, n };
4547 struct thread_info *need_step_over = NULL;
4548 int any_pending;
4549 int leave_all_stopped;
4550
4551 if (debug_threads)
4552 {
4553 debug_enter ();
4554 debug_printf ("linux_resume:\n");
4555 }
4556
4557 find_inferior (&all_threads, linux_set_resume_request, &array);
4558
4559 /* If there is a thread which would otherwise be resumed, which has
4560 a pending status, then don't resume any threads - we can just
4561 report the pending status. Make sure to queue any signals that
4562 would otherwise be sent. In non-stop mode, we'll apply this
4563 logic to each thread individually. We consume all pending events
4564 before considering to start a step-over (in all-stop). */
4565 any_pending = 0;
4566 if (!non_stop)
4567 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4568
4569 /* If there is a thread which would otherwise be resumed, which is
4570 stopped at a breakpoint that needs stepping over, then don't
4571 resume any threads - have it step over the breakpoint with all
4572 other threads stopped, then resume all threads again. Make sure
4573 to queue any signals that would otherwise be delivered or
4574 queued. */
4575 if (!any_pending && supports_breakpoints ())
4576 need_step_over
4577 = (struct thread_info *) find_inferior (&all_threads,
4578 need_step_over_p, NULL);
4579
4580 leave_all_stopped = (need_step_over != NULL || any_pending);
4581
4582 if (debug_threads)
4583 {
4584 if (need_step_over != NULL)
4585 debug_printf ("Not resuming all, need step over\n");
4586 else if (any_pending)
4587 debug_printf ("Not resuming, all-stop and found "
4588 "an LWP with pending status\n");
4589 else
4590 debug_printf ("Resuming, no pending status or step over needed\n");
4591 }
4592
4593 /* Even if we're leaving threads stopped, queue all signals we'd
4594 otherwise deliver. */
4595 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4596
4597 if (need_step_over)
4598 start_step_over (get_thread_lwp (need_step_over));
4599
4600 if (debug_threads)
4601 {
4602 debug_printf ("linux_resume done\n");
4603 debug_exit ();
4604 }
4605 }
4606
4607 /* This function is called once per thread. We check the thread's
4608 last resume request, which will tell us whether to resume, step, or
4609 leave the thread stopped. Any signal the client requested to be
4610 delivered has already been enqueued at this point.
4611
4612 If any thread that GDB wants running is stopped at an internal
4613 breakpoint that needs stepping over, we start a step-over operation
4614 on that particular thread, and leave all others stopped. */
4615
4616 static int
4617 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4618 {
4619 struct thread_info *thread = (struct thread_info *) entry;
4620 struct lwp_info *lwp = get_thread_lwp (thread);
4621 int step;
4622
4623 if (lwp == except)
4624 return 0;
4625
4626 if (debug_threads)
4627 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4628
4629 if (!lwp->stopped)
4630 {
4631 if (debug_threads)
4632 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4633 return 0;
4634 }
4635
4636 if (thread->last_resume_kind == resume_stop
4637 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4638 {
4639 if (debug_threads)
4640 debug_printf (" client wants LWP to remain %ld stopped\n",
4641 lwpid_of (thread));
4642 return 0;
4643 }
4644
4645 if (lwp->status_pending_p)
4646 {
4647 if (debug_threads)
4648 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4649 lwpid_of (thread));
4650 return 0;
4651 }
4652
4653 gdb_assert (lwp->suspended >= 0);
4654
4655 if (lwp->suspended)
4656 {
4657 if (debug_threads)
4658 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4659 return 0;
4660 }
4661
4662 if (thread->last_resume_kind == resume_stop
4663 && lwp->pending_signals_to_report == NULL
4664 && lwp->collecting_fast_tracepoint == 0)
4665 {
4666 /* We haven't reported this LWP as stopped yet (otherwise, the
4667 last_status.kind check above would catch it, and we wouldn't
4668 reach here. This LWP may have been momentarily paused by a
4669 stop_all_lwps call while handling for example, another LWP's
4670 step-over. In that case, the pending expected SIGSTOP signal
4671 that was queued at vCont;t handling time will have already
4672 been consumed by wait_for_sigstop, and so we need to requeue
4673 another one here. Note that if the LWP already has a SIGSTOP
4674 pending, this is a no-op. */
4675
4676 if (debug_threads)
4677 debug_printf ("Client wants LWP %ld to stop. "
4678 "Making sure it has a SIGSTOP pending\n",
4679 lwpid_of (thread));
4680
4681 send_sigstop (lwp);
4682 }
4683
4684 if (thread->last_resume_kind == resume_step)
4685 {
4686 if (debug_threads)
4687 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4688 lwpid_of (thread));
4689 step = 1;
4690 }
4691 else if (lwp->bp_reinsert != 0)
4692 {
4693 if (debug_threads)
4694 debug_printf (" stepping LWP %ld, reinsert set\n",
4695 lwpid_of (thread));
4696 step = 1;
4697 }
4698 else
4699 step = 0;
4700
4701 linux_resume_one_lwp (lwp, step, 0, NULL);
4702 return 0;
4703 }
4704
4705 static int
4706 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4707 {
4708 struct thread_info *thread = (struct thread_info *) entry;
4709 struct lwp_info *lwp = get_thread_lwp (thread);
4710
4711 if (lwp == except)
4712 return 0;
4713
4714 lwp_suspended_decr (lwp);
4715
4716 return proceed_one_lwp (entry, except);
4717 }
4718
4719 /* When we finish a step-over, set threads running again. If there's
4720 another thread that may need a step-over, now's the time to start
4721 it. Eventually, we'll move all threads past their breakpoints. */
4722
4723 static void
4724 proceed_all_lwps (void)
4725 {
4726 struct thread_info *need_step_over;
4727
4728 /* If there is a thread which would otherwise be resumed, which is
4729 stopped at a breakpoint that needs stepping over, then don't
4730 resume any threads - have it step over the breakpoint with all
4731 other threads stopped, then resume all threads again. */
4732
4733 if (supports_breakpoints ())
4734 {
4735 need_step_over
4736 = (struct thread_info *) find_inferior (&all_threads,
4737 need_step_over_p, NULL);
4738
4739 if (need_step_over != NULL)
4740 {
4741 if (debug_threads)
4742 debug_printf ("proceed_all_lwps: found "
4743 "thread %ld needing a step-over\n",
4744 lwpid_of (need_step_over));
4745
4746 start_step_over (get_thread_lwp (need_step_over));
4747 return;
4748 }
4749 }
4750
4751 if (debug_threads)
4752 debug_printf ("Proceeding, no step-over needed\n");
4753
4754 find_inferior (&all_threads, proceed_one_lwp, NULL);
4755 }
4756
4757 /* Stopped LWPs that the client wanted to be running, that don't have
4758 pending statuses, are set to run again, except for EXCEPT, if not
4759 NULL. This undoes a stop_all_lwps call. */
4760
4761 static void
4762 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4763 {
4764 if (debug_threads)
4765 {
4766 debug_enter ();
4767 if (except)
4768 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4769 lwpid_of (get_lwp_thread (except)));
4770 else
4771 debug_printf ("unstopping all lwps\n");
4772 }
4773
4774 if (unsuspend)
4775 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4776 else
4777 find_inferior (&all_threads, proceed_one_lwp, except);
4778
4779 if (debug_threads)
4780 {
4781 debug_printf ("unstop_all_lwps done\n");
4782 debug_exit ();
4783 }
4784 }
4785
4786
4787 #ifdef HAVE_LINUX_REGSETS
4788
4789 #define use_linux_regsets 1
4790
4791 /* Returns true if REGSET has been disabled. */
4792
4793 static int
4794 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4795 {
4796 return (info->disabled_regsets != NULL
4797 && info->disabled_regsets[regset - info->regsets]);
4798 }
4799
4800 /* Disable REGSET. */
4801
4802 static void
4803 disable_regset (struct regsets_info *info, struct regset_info *regset)
4804 {
4805 int dr_offset;
4806
4807 dr_offset = regset - info->regsets;
4808 if (info->disabled_regsets == NULL)
4809 info->disabled_regsets = xcalloc (1, info->num_regsets);
4810 info->disabled_regsets[dr_offset] = 1;
4811 }
4812
4813 static int
4814 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4815 struct regcache *regcache)
4816 {
4817 struct regset_info *regset;
4818 int saw_general_regs = 0;
4819 int pid;
4820 struct iovec iov;
4821
4822 pid = lwpid_of (current_thread);
4823 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4824 {
4825 void *buf, *data;
4826 int nt_type, res;
4827
4828 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4829 continue;
4830
4831 buf = xmalloc (regset->size);
4832
4833 nt_type = regset->nt_type;
4834 if (nt_type)
4835 {
4836 iov.iov_base = buf;
4837 iov.iov_len = regset->size;
4838 data = (void *) &iov;
4839 }
4840 else
4841 data = buf;
4842
4843 #ifndef __sparc__
4844 res = ptrace (regset->get_request, pid,
4845 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4846 #else
4847 res = ptrace (regset->get_request, pid, data, nt_type);
4848 #endif
4849 if (res < 0)
4850 {
4851 if (errno == EIO)
4852 {
4853 /* If we get EIO on a regset, do not try it again for
4854 this process mode. */
4855 disable_regset (regsets_info, regset);
4856 }
4857 else if (errno == ENODATA)
4858 {
4859 /* ENODATA may be returned if the regset is currently
4860 not "active". This can happen in normal operation,
4861 so suppress the warning in this case. */
4862 }
4863 else
4864 {
4865 char s[256];
4866 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4867 pid);
4868 perror (s);
4869 }
4870 }
4871 else
4872 {
4873 if (regset->type == GENERAL_REGS)
4874 saw_general_regs = 1;
4875 regset->store_function (regcache, buf);
4876 }
4877 free (buf);
4878 }
4879 if (saw_general_regs)
4880 return 0;
4881 else
4882 return 1;
4883 }
4884
4885 static int
4886 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4887 struct regcache *regcache)
4888 {
4889 struct regset_info *regset;
4890 int saw_general_regs = 0;
4891 int pid;
4892 struct iovec iov;
4893
4894 pid = lwpid_of (current_thread);
4895 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4896 {
4897 void *buf, *data;
4898 int nt_type, res;
4899
4900 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4901 || regset->fill_function == NULL)
4902 continue;
4903
4904 buf = xmalloc (regset->size);
4905
4906 /* First fill the buffer with the current register set contents,
4907 in case there are any items in the kernel's regset that are
4908 not in gdbserver's regcache. */
4909
4910 nt_type = regset->nt_type;
4911 if (nt_type)
4912 {
4913 iov.iov_base = buf;
4914 iov.iov_len = regset->size;
4915 data = (void *) &iov;
4916 }
4917 else
4918 data = buf;
4919
4920 #ifndef __sparc__
4921 res = ptrace (regset->get_request, pid,
4922 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4923 #else
4924 res = ptrace (regset->get_request, pid, data, nt_type);
4925 #endif
4926
4927 if (res == 0)
4928 {
4929 /* Then overlay our cached registers on that. */
4930 regset->fill_function (regcache, buf);
4931
4932 /* Only now do we write the register set. */
4933 #ifndef __sparc__
4934 res = ptrace (regset->set_request, pid,
4935 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4936 #else
4937 res = ptrace (regset->set_request, pid, data, nt_type);
4938 #endif
4939 }
4940
4941 if (res < 0)
4942 {
4943 if (errno == EIO)
4944 {
4945 /* If we get EIO on a regset, do not try it again for
4946 this process mode. */
4947 disable_regset (regsets_info, regset);
4948 }
4949 else if (errno == ESRCH)
4950 {
4951 /* At this point, ESRCH should mean the process is
4952 already gone, in which case we simply ignore attempts
4953 to change its registers. See also the related
4954 comment in linux_resume_one_lwp. */
4955 free (buf);
4956 return 0;
4957 }
4958 else
4959 {
4960 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4961 }
4962 }
4963 else if (regset->type == GENERAL_REGS)
4964 saw_general_regs = 1;
4965 free (buf);
4966 }
4967 if (saw_general_regs)
4968 return 0;
4969 else
4970 return 1;
4971 }
4972
4973 #else /* !HAVE_LINUX_REGSETS */
4974
4975 #define use_linux_regsets 0
4976 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4977 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4978
4979 #endif
4980
4981 /* Return 1 if register REGNO is supported by one of the regset ptrace
4982 calls or 0 if it has to be transferred individually. */
4983
4984 static int
4985 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4986 {
4987 unsigned char mask = 1 << (regno % 8);
4988 size_t index = regno / 8;
4989
4990 return (use_linux_regsets
4991 && (regs_info->regset_bitmap == NULL
4992 || (regs_info->regset_bitmap[index] & mask) != 0));
4993 }
4994
4995 #ifdef HAVE_LINUX_USRREGS
4996
4997 int
4998 register_addr (const struct usrregs_info *usrregs, int regnum)
4999 {
5000 int addr;
5001
5002 if (regnum < 0 || regnum >= usrregs->num_regs)
5003 error ("Invalid register number %d.", regnum);
5004
5005 addr = usrregs->regmap[regnum];
5006
5007 return addr;
5008 }
5009
5010 /* Fetch one register. */
5011 static void
5012 fetch_register (const struct usrregs_info *usrregs,
5013 struct regcache *regcache, int regno)
5014 {
5015 CORE_ADDR regaddr;
5016 int i, size;
5017 char *buf;
5018 int pid;
5019
5020 if (regno >= usrregs->num_regs)
5021 return;
5022 if ((*the_low_target.cannot_fetch_register) (regno))
5023 return;
5024
5025 regaddr = register_addr (usrregs, regno);
5026 if (regaddr == -1)
5027 return;
5028
5029 size = ((register_size (regcache->tdesc, regno)
5030 + sizeof (PTRACE_XFER_TYPE) - 1)
5031 & -sizeof (PTRACE_XFER_TYPE));
5032 buf = alloca (size);
5033
5034 pid = lwpid_of (current_thread);
5035 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5036 {
5037 errno = 0;
5038 *(PTRACE_XFER_TYPE *) (buf + i) =
5039 ptrace (PTRACE_PEEKUSER, pid,
5040 /* Coerce to a uintptr_t first to avoid potential gcc warning
5041 of coercing an 8 byte integer to a 4 byte pointer. */
5042 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5043 regaddr += sizeof (PTRACE_XFER_TYPE);
5044 if (errno != 0)
5045 error ("reading register %d: %s", regno, strerror (errno));
5046 }
5047
5048 if (the_low_target.supply_ptrace_register)
5049 the_low_target.supply_ptrace_register (regcache, regno, buf);
5050 else
5051 supply_register (regcache, regno, buf);
5052 }
5053
5054 /* Store one register. */
5055 static void
5056 store_register (const struct usrregs_info *usrregs,
5057 struct regcache *regcache, int regno)
5058 {
5059 CORE_ADDR regaddr;
5060 int i, size;
5061 char *buf;
5062 int pid;
5063
5064 if (regno >= usrregs->num_regs)
5065 return;
5066 if ((*the_low_target.cannot_store_register) (regno))
5067 return;
5068
5069 regaddr = register_addr (usrregs, regno);
5070 if (regaddr == -1)
5071 return;
5072
5073 size = ((register_size (regcache->tdesc, regno)
5074 + sizeof (PTRACE_XFER_TYPE) - 1)
5075 & -sizeof (PTRACE_XFER_TYPE));
5076 buf = alloca (size);
5077 memset (buf, 0, size);
5078
5079 if (the_low_target.collect_ptrace_register)
5080 the_low_target.collect_ptrace_register (regcache, regno, buf);
5081 else
5082 collect_register (regcache, regno, buf);
5083
5084 pid = lwpid_of (current_thread);
5085 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5086 {
5087 errno = 0;
5088 ptrace (PTRACE_POKEUSER, pid,
5089 /* Coerce to a uintptr_t first to avoid potential gcc warning
5090 about coercing an 8 byte integer to a 4 byte pointer. */
5091 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5092 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5093 if (errno != 0)
5094 {
5095 /* At this point, ESRCH should mean the process is
5096 already gone, in which case we simply ignore attempts
5097 to change its registers. See also the related
5098 comment in linux_resume_one_lwp. */
5099 if (errno == ESRCH)
5100 return;
5101
5102 if ((*the_low_target.cannot_store_register) (regno) == 0)
5103 error ("writing register %d: %s", regno, strerror (errno));
5104 }
5105 regaddr += sizeof (PTRACE_XFER_TYPE);
5106 }
5107 }
5108
5109 /* Fetch all registers, or just one, from the child process.
5110 If REGNO is -1, do this for all registers, skipping any that are
5111 assumed to have been retrieved by regsets_fetch_inferior_registers,
5112 unless ALL is non-zero.
5113 Otherwise, REGNO specifies which register (so we can save time). */
5114 static void
5115 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5116 struct regcache *regcache, int regno, int all)
5117 {
5118 struct usrregs_info *usr = regs_info->usrregs;
5119
5120 if (regno == -1)
5121 {
5122 for (regno = 0; regno < usr->num_regs; regno++)
5123 if (all || !linux_register_in_regsets (regs_info, regno))
5124 fetch_register (usr, regcache, regno);
5125 }
5126 else
5127 fetch_register (usr, regcache, regno);
5128 }
5129
5130 /* Store our register values back into the inferior.
5131 If REGNO is -1, do this for all registers, skipping any that are
5132 assumed to have been saved by regsets_store_inferior_registers,
5133 unless ALL is non-zero.
5134 Otherwise, REGNO specifies which register (so we can save time). */
5135 static void
5136 usr_store_inferior_registers (const struct regs_info *regs_info,
5137 struct regcache *regcache, int regno, int all)
5138 {
5139 struct usrregs_info *usr = regs_info->usrregs;
5140
5141 if (regno == -1)
5142 {
5143 for (regno = 0; regno < usr->num_regs; regno++)
5144 if (all || !linux_register_in_regsets (regs_info, regno))
5145 store_register (usr, regcache, regno);
5146 }
5147 else
5148 store_register (usr, regcache, regno);
5149 }
5150
5151 #else /* !HAVE_LINUX_USRREGS */
5152
5153 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5154 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5155
5156 #endif
5157
5158
5159 void
5160 linux_fetch_registers (struct regcache *regcache, int regno)
5161 {
5162 int use_regsets;
5163 int all = 0;
5164 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5165
5166 if (regno == -1)
5167 {
5168 if (the_low_target.fetch_register != NULL
5169 && regs_info->usrregs != NULL)
5170 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5171 (*the_low_target.fetch_register) (regcache, regno);
5172
5173 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5174 if (regs_info->usrregs != NULL)
5175 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5176 }
5177 else
5178 {
5179 if (the_low_target.fetch_register != NULL
5180 && (*the_low_target.fetch_register) (regcache, regno))
5181 return;
5182
5183 use_regsets = linux_register_in_regsets (regs_info, regno);
5184 if (use_regsets)
5185 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5186 regcache);
5187 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5188 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5189 }
5190 }
5191
5192 void
5193 linux_store_registers (struct regcache *regcache, int regno)
5194 {
5195 int use_regsets;
5196 int all = 0;
5197 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5198
5199 if (regno == -1)
5200 {
5201 all = regsets_store_inferior_registers (regs_info->regsets_info,
5202 regcache);
5203 if (regs_info->usrregs != NULL)
5204 usr_store_inferior_registers (regs_info, regcache, regno, all);
5205 }
5206 else
5207 {
5208 use_regsets = linux_register_in_regsets (regs_info, regno);
5209 if (use_regsets)
5210 all = regsets_store_inferior_registers (regs_info->regsets_info,
5211 regcache);
5212 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5213 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5214 }
5215 }
5216
5217
5218 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5219 to debugger memory starting at MYADDR. */
5220
5221 static int
5222 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5223 {
5224 int pid = lwpid_of (current_thread);
5225 register PTRACE_XFER_TYPE *buffer;
5226 register CORE_ADDR addr;
5227 register int count;
5228 char filename[64];
5229 register int i;
5230 int ret;
5231 int fd;
5232
5233 /* Try using /proc. Don't bother for one word. */
5234 if (len >= 3 * sizeof (long))
5235 {
5236 int bytes;
5237
5238 /* We could keep this file open and cache it - possibly one per
5239 thread. That requires some juggling, but is even faster. */
5240 sprintf (filename, "/proc/%d/mem", pid);
5241 fd = open (filename, O_RDONLY | O_LARGEFILE);
5242 if (fd == -1)
5243 goto no_proc;
5244
5245 /* If pread64 is available, use it. It's faster if the kernel
5246 supports it (only one syscall), and it's 64-bit safe even on
5247 32-bit platforms (for instance, SPARC debugging a SPARC64
5248 application). */
5249 #ifdef HAVE_PREAD64
5250 bytes = pread64 (fd, myaddr, len, memaddr);
5251 #else
5252 bytes = -1;
5253 if (lseek (fd, memaddr, SEEK_SET) != -1)
5254 bytes = read (fd, myaddr, len);
5255 #endif
5256
5257 close (fd);
5258 if (bytes == len)
5259 return 0;
5260
5261 /* Some data was read, we'll try to get the rest with ptrace. */
5262 if (bytes > 0)
5263 {
5264 memaddr += bytes;
5265 myaddr += bytes;
5266 len -= bytes;
5267 }
5268 }
5269
5270 no_proc:
5271 /* Round starting address down to longword boundary. */
5272 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5273 /* Round ending address up; get number of longwords that makes. */
5274 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5275 / sizeof (PTRACE_XFER_TYPE));
5276 /* Allocate buffer of that many longwords. */
5277 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5278
5279 /* Read all the longwords */
5280 errno = 0;
5281 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5282 {
5283 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5284 about coercing an 8 byte integer to a 4 byte pointer. */
5285 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5286 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5287 (PTRACE_TYPE_ARG4) 0);
5288 if (errno)
5289 break;
5290 }
5291 ret = errno;
5292
5293 /* Copy appropriate bytes out of the buffer. */
5294 if (i > 0)
5295 {
5296 i *= sizeof (PTRACE_XFER_TYPE);
5297 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5298 memcpy (myaddr,
5299 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5300 i < len ? i : len);
5301 }
5302
5303 return ret;
5304 }
5305
5306 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5307 memory at MEMADDR. On failure (cannot write to the inferior)
5308 returns the value of errno. Always succeeds if LEN is zero. */
5309
5310 static int
5311 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5312 {
5313 register int i;
5314 /* Round starting address down to longword boundary. */
5315 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5316 /* Round ending address up; get number of longwords that makes. */
5317 register int count
5318 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5319 / sizeof (PTRACE_XFER_TYPE);
5320
5321 /* Allocate buffer of that many longwords. */
5322 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5323
5324 int pid = lwpid_of (current_thread);
5325
5326 if (len == 0)
5327 {
5328 /* Zero length write always succeeds. */
5329 return 0;
5330 }
5331
5332 if (debug_threads)
5333 {
5334 /* Dump up to four bytes. */
5335 char str[4 * 2 + 1];
5336 char *p = str;
5337 int dump = len < 4 ? len : 4;
5338
5339 for (i = 0; i < dump; i++)
5340 {
5341 sprintf (p, "%02x", myaddr[i]);
5342 p += 2;
5343 }
5344 *p = '\0';
5345
5346 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5347 str, (long) memaddr, pid);
5348 }
5349
5350 /* Fill start and end extra bytes of buffer with existing memory data. */
5351
5352 errno = 0;
5353 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5354 about coercing an 8 byte integer to a 4 byte pointer. */
5355 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5356 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5357 (PTRACE_TYPE_ARG4) 0);
5358 if (errno)
5359 return errno;
5360
5361 if (count > 1)
5362 {
5363 errno = 0;
5364 buffer[count - 1]
5365 = ptrace (PTRACE_PEEKTEXT, pid,
5366 /* Coerce to a uintptr_t first to avoid potential gcc warning
5367 about coercing an 8 byte integer to a 4 byte pointer. */
5368 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5369 * sizeof (PTRACE_XFER_TYPE)),
5370 (PTRACE_TYPE_ARG4) 0);
5371 if (errno)
5372 return errno;
5373 }
5374
5375 /* Copy data to be written over corresponding part of buffer. */
5376
5377 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5378 myaddr, len);
5379
5380 /* Write the entire buffer. */
5381
5382 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5383 {
5384 errno = 0;
5385 ptrace (PTRACE_POKETEXT, pid,
5386 /* Coerce to a uintptr_t first to avoid potential gcc warning
5387 about coercing an 8 byte integer to a 4 byte pointer. */
5388 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5389 (PTRACE_TYPE_ARG4) buffer[i]);
5390 if (errno)
5391 return errno;
5392 }
5393
5394 return 0;
5395 }
5396
5397 static void
5398 linux_look_up_symbols (void)
5399 {
5400 #ifdef USE_THREAD_DB
5401 struct process_info *proc = current_process ();
5402
5403 if (proc->priv->thread_db != NULL)
5404 return;
5405
5406 /* If the kernel supports tracing clones, then we don't need to
5407 use the magic thread event breakpoint to learn about
5408 threads. */
5409 thread_db_init (!linux_supports_traceclone ());
5410 #endif
5411 }
5412
5413 static void
5414 linux_request_interrupt (void)
5415 {
5416 extern unsigned long signal_pid;
5417
5418 /* Send a SIGINT to the process group. This acts just like the user
5419 typed a ^C on the controlling terminal. */
5420 kill (-signal_pid, SIGINT);
5421 }
5422
5423 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5424 to debugger memory starting at MYADDR. */
5425
5426 static int
5427 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5428 {
5429 char filename[PATH_MAX];
5430 int fd, n;
5431 int pid = lwpid_of (current_thread);
5432
5433 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5434
5435 fd = open (filename, O_RDONLY);
5436 if (fd < 0)
5437 return -1;
5438
5439 if (offset != (CORE_ADDR) 0
5440 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5441 n = -1;
5442 else
5443 n = read (fd, myaddr, len);
5444
5445 close (fd);
5446
5447 return n;
5448 }
5449
5450 /* These breakpoint and watchpoint related wrapper functions simply
5451 pass on the function call if the target has registered a
5452 corresponding function. */
5453
5454 static int
5455 linux_supports_z_point_type (char z_type)
5456 {
5457 return (the_low_target.supports_z_point_type != NULL
5458 && the_low_target.supports_z_point_type (z_type));
5459 }
5460
5461 static int
5462 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5463 int size, struct raw_breakpoint *bp)
5464 {
5465 if (type == raw_bkpt_type_sw)
5466 return insert_memory_breakpoint (bp);
5467 else if (the_low_target.insert_point != NULL)
5468 return the_low_target.insert_point (type, addr, size, bp);
5469 else
5470 /* Unsupported (see target.h). */
5471 return 1;
5472 }
5473
5474 static int
5475 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5476 int size, struct raw_breakpoint *bp)
5477 {
5478 if (type == raw_bkpt_type_sw)
5479 return remove_memory_breakpoint (bp);
5480 else if (the_low_target.remove_point != NULL)
5481 return the_low_target.remove_point (type, addr, size, bp);
5482 else
5483 /* Unsupported (see target.h). */
5484 return 1;
5485 }
5486
5487 /* Implement the to_stopped_by_sw_breakpoint target_ops
5488 method. */
5489
5490 static int
5491 linux_stopped_by_sw_breakpoint (void)
5492 {
5493 struct lwp_info *lwp = get_thread_lwp (current_thread);
5494
5495 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5496 }
5497
5498 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5499 method. */
5500
5501 static int
5502 linux_supports_stopped_by_sw_breakpoint (void)
5503 {
5504 return USE_SIGTRAP_SIGINFO;
5505 }
5506
5507 /* Implement the to_stopped_by_hw_breakpoint target_ops
5508 method. */
5509
5510 static int
5511 linux_stopped_by_hw_breakpoint (void)
5512 {
5513 struct lwp_info *lwp = get_thread_lwp (current_thread);
5514
5515 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5516 }
5517
5518 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5519 method. */
5520
5521 static int
5522 linux_supports_stopped_by_hw_breakpoint (void)
5523 {
5524 return USE_SIGTRAP_SIGINFO;
5525 }
5526
5527 /* Implement the supports_conditional_breakpoints target_ops
5528 method. */
5529
5530 static int
5531 linux_supports_conditional_breakpoints (void)
5532 {
5533 /* GDBserver needs to step over the breakpoint if the condition is
5534 false. GDBserver software single step is too simple, so disable
5535 conditional breakpoints if the target doesn't have hardware single
5536 step. */
5537 return can_hardware_single_step ();
5538 }
5539
5540 static int
5541 linux_stopped_by_watchpoint (void)
5542 {
5543 struct lwp_info *lwp = get_thread_lwp (current_thread);
5544
5545 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5546 }
5547
5548 static CORE_ADDR
5549 linux_stopped_data_address (void)
5550 {
5551 struct lwp_info *lwp = get_thread_lwp (current_thread);
5552
5553 return lwp->stopped_data_address;
5554 }
5555
5556 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5557 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5558 && defined(PT_TEXT_END_ADDR)
5559
5560 /* This is only used for targets that define PT_TEXT_ADDR,
5561 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5562 the target has different ways of acquiring this information, like
5563 loadmaps. */
5564
5565 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5566 to tell gdb about. */
5567
5568 static int
5569 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5570 {
5571 unsigned long text, text_end, data;
5572 int pid = lwpid_of (current_thread);
5573
5574 errno = 0;
5575
5576 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5577 (PTRACE_TYPE_ARG4) 0);
5578 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5579 (PTRACE_TYPE_ARG4) 0);
5580 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5581 (PTRACE_TYPE_ARG4) 0);
5582
5583 if (errno == 0)
5584 {
5585 /* Both text and data offsets produced at compile-time (and so
5586 used by gdb) are relative to the beginning of the program,
5587 with the data segment immediately following the text segment.
5588 However, the actual runtime layout in memory may put the data
5589 somewhere else, so when we send gdb a data base-address, we
5590 use the real data base address and subtract the compile-time
5591 data base-address from it (which is just the length of the
5592 text segment). BSS immediately follows data in both
5593 cases. */
5594 *text_p = text;
5595 *data_p = data - (text_end - text);
5596
5597 return 1;
5598 }
5599 return 0;
5600 }
5601 #endif
5602
5603 static int
5604 linux_qxfer_osdata (const char *annex,
5605 unsigned char *readbuf, unsigned const char *writebuf,
5606 CORE_ADDR offset, int len)
5607 {
5608 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5609 }
5610
5611 /* Convert a native/host siginfo object, into/from the siginfo in the
5612 layout of the inferiors' architecture. */
5613
5614 static void
5615 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5616 {
5617 int done = 0;
5618
5619 if (the_low_target.siginfo_fixup != NULL)
5620 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5621
5622 /* If there was no callback, or the callback didn't do anything,
5623 then just do a straight memcpy. */
5624 if (!done)
5625 {
5626 if (direction == 1)
5627 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5628 else
5629 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5630 }
5631 }
5632
5633 static int
5634 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5635 unsigned const char *writebuf, CORE_ADDR offset, int len)
5636 {
5637 int pid;
5638 siginfo_t siginfo;
5639 char inf_siginfo[sizeof (siginfo_t)];
5640
5641 if (current_thread == NULL)
5642 return -1;
5643
5644 pid = lwpid_of (current_thread);
5645
5646 if (debug_threads)
5647 debug_printf ("%s siginfo for lwp %d.\n",
5648 readbuf != NULL ? "Reading" : "Writing",
5649 pid);
5650
5651 if (offset >= sizeof (siginfo))
5652 return -1;
5653
5654 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5655 return -1;
5656
5657 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5658 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5659 inferior with a 64-bit GDBSERVER should look the same as debugging it
5660 with a 32-bit GDBSERVER, we need to convert it. */
5661 siginfo_fixup (&siginfo, inf_siginfo, 0);
5662
5663 if (offset + len > sizeof (siginfo))
5664 len = sizeof (siginfo) - offset;
5665
5666 if (readbuf != NULL)
5667 memcpy (readbuf, inf_siginfo + offset, len);
5668 else
5669 {
5670 memcpy (inf_siginfo + offset, writebuf, len);
5671
5672 /* Convert back to ptrace layout before flushing it out. */
5673 siginfo_fixup (&siginfo, inf_siginfo, 1);
5674
5675 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5676 return -1;
5677 }
5678
5679 return len;
5680 }
5681
5682 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5683 so we notice when children change state; as the handler for the
5684 sigsuspend in my_waitpid. */
5685
5686 static void
5687 sigchld_handler (int signo)
5688 {
5689 int old_errno = errno;
5690
5691 if (debug_threads)
5692 {
5693 do
5694 {
5695 /* fprintf is not async-signal-safe, so call write
5696 directly. */
5697 if (write (2, "sigchld_handler\n",
5698 sizeof ("sigchld_handler\n") - 1) < 0)
5699 break; /* just ignore */
5700 } while (0);
5701 }
5702
5703 if (target_is_async_p ())
5704 async_file_mark (); /* trigger a linux_wait */
5705
5706 errno = old_errno;
5707 }
5708
5709 static int
5710 linux_supports_non_stop (void)
5711 {
5712 return 1;
5713 }
5714
5715 static int
5716 linux_async (int enable)
5717 {
5718 int previous = target_is_async_p ();
5719
5720 if (debug_threads)
5721 debug_printf ("linux_async (%d), previous=%d\n",
5722 enable, previous);
5723
5724 if (previous != enable)
5725 {
5726 sigset_t mask;
5727 sigemptyset (&mask);
5728 sigaddset (&mask, SIGCHLD);
5729
5730 sigprocmask (SIG_BLOCK, &mask, NULL);
5731
5732 if (enable)
5733 {
5734 if (pipe (linux_event_pipe) == -1)
5735 {
5736 linux_event_pipe[0] = -1;
5737 linux_event_pipe[1] = -1;
5738 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5739
5740 warning ("creating event pipe failed.");
5741 return previous;
5742 }
5743
5744 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5745 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5746
5747 /* Register the event loop handler. */
5748 add_file_handler (linux_event_pipe[0],
5749 handle_target_event, NULL);
5750
5751 /* Always trigger a linux_wait. */
5752 async_file_mark ();
5753 }
5754 else
5755 {
5756 delete_file_handler (linux_event_pipe[0]);
5757
5758 close (linux_event_pipe[0]);
5759 close (linux_event_pipe[1]);
5760 linux_event_pipe[0] = -1;
5761 linux_event_pipe[1] = -1;
5762 }
5763
5764 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5765 }
5766
5767 return previous;
5768 }
5769
5770 static int
5771 linux_start_non_stop (int nonstop)
5772 {
5773 /* Register or unregister from event-loop accordingly. */
5774 linux_async (nonstop);
5775
5776 if (target_is_async_p () != (nonstop != 0))
5777 return -1;
5778
5779 return 0;
5780 }
5781
5782 static int
5783 linux_supports_multi_process (void)
5784 {
5785 return 1;
5786 }
5787
5788 /* Check if fork events are supported. */
5789
5790 static int
5791 linux_supports_fork_events (void)
5792 {
5793 return linux_supports_tracefork ();
5794 }
5795
5796 /* Check if vfork events are supported. */
5797
5798 static int
5799 linux_supports_vfork_events (void)
5800 {
5801 return linux_supports_tracefork ();
5802 }
5803
5804 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5805 options for the specified lwp. */
5806
5807 static int
5808 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5809 void *args)
5810 {
5811 struct thread_info *thread = (struct thread_info *) entry;
5812 struct lwp_info *lwp = get_thread_lwp (thread);
5813
5814 if (!lwp->stopped)
5815 {
5816 /* Stop the lwp so we can modify its ptrace options. */
5817 lwp->must_set_ptrace_flags = 1;
5818 linux_stop_lwp (lwp);
5819 }
5820 else
5821 {
5822 /* Already stopped; go ahead and set the ptrace options. */
5823 struct process_info *proc = find_process_pid (pid_of (thread));
5824 int options = linux_low_ptrace_options (proc->attached);
5825
5826 linux_enable_event_reporting (lwpid_of (thread), options);
5827 lwp->must_set_ptrace_flags = 0;
5828 }
5829
5830 return 0;
5831 }
5832
5833 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5834 ptrace flags for all inferiors. This is in case the new GDB connection
5835 doesn't support the same set of events that the previous one did. */
5836
5837 static void
5838 linux_handle_new_gdb_connection (void)
5839 {
5840 pid_t pid;
5841
5842 /* Request that all the lwps reset their ptrace options. */
5843 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5844 }
5845
5846 static int
5847 linux_supports_disable_randomization (void)
5848 {
5849 #ifdef HAVE_PERSONALITY
5850 return 1;
5851 #else
5852 return 0;
5853 #endif
5854 }
5855
5856 static int
5857 linux_supports_agent (void)
5858 {
5859 return 1;
5860 }
5861
5862 static int
5863 linux_supports_range_stepping (void)
5864 {
5865 if (*the_low_target.supports_range_stepping == NULL)
5866 return 0;
5867
5868 return (*the_low_target.supports_range_stepping) ();
5869 }
5870
5871 /* Enumerate spufs IDs for process PID. */
5872 static int
5873 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5874 {
5875 int pos = 0;
5876 int written = 0;
5877 char path[128];
5878 DIR *dir;
5879 struct dirent *entry;
5880
5881 sprintf (path, "/proc/%ld/fd", pid);
5882 dir = opendir (path);
5883 if (!dir)
5884 return -1;
5885
5886 rewinddir (dir);
5887 while ((entry = readdir (dir)) != NULL)
5888 {
5889 struct stat st;
5890 struct statfs stfs;
5891 int fd;
5892
5893 fd = atoi (entry->d_name);
5894 if (!fd)
5895 continue;
5896
5897 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5898 if (stat (path, &st) != 0)
5899 continue;
5900 if (!S_ISDIR (st.st_mode))
5901 continue;
5902
5903 if (statfs (path, &stfs) != 0)
5904 continue;
5905 if (stfs.f_type != SPUFS_MAGIC)
5906 continue;
5907
5908 if (pos >= offset && pos + 4 <= offset + len)
5909 {
5910 *(unsigned int *)(buf + pos - offset) = fd;
5911 written += 4;
5912 }
5913 pos += 4;
5914 }
5915
5916 closedir (dir);
5917 return written;
5918 }
5919
5920 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5921 object type, using the /proc file system. */
5922 static int
5923 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5924 unsigned const char *writebuf,
5925 CORE_ADDR offset, int len)
5926 {
5927 long pid = lwpid_of (current_thread);
5928 char buf[128];
5929 int fd = 0;
5930 int ret = 0;
5931
5932 if (!writebuf && !readbuf)
5933 return -1;
5934
5935 if (!*annex)
5936 {
5937 if (!readbuf)
5938 return -1;
5939 else
5940 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5941 }
5942
5943 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5944 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5945 if (fd <= 0)
5946 return -1;
5947
5948 if (offset != 0
5949 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5950 {
5951 close (fd);
5952 return 0;
5953 }
5954
5955 if (writebuf)
5956 ret = write (fd, writebuf, (size_t) len);
5957 else
5958 ret = read (fd, readbuf, (size_t) len);
5959
5960 close (fd);
5961 return ret;
5962 }
5963
5964 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5965 struct target_loadseg
5966 {
5967 /* Core address to which the segment is mapped. */
5968 Elf32_Addr addr;
5969 /* VMA recorded in the program header. */
5970 Elf32_Addr p_vaddr;
5971 /* Size of this segment in memory. */
5972 Elf32_Word p_memsz;
5973 };
5974
5975 # if defined PT_GETDSBT
5976 struct target_loadmap
5977 {
5978 /* Protocol version number, must be zero. */
5979 Elf32_Word version;
5980 /* Pointer to the DSBT table, its size, and the DSBT index. */
5981 unsigned *dsbt_table;
5982 unsigned dsbt_size, dsbt_index;
5983 /* Number of segments in this map. */
5984 Elf32_Word nsegs;
5985 /* The actual memory map. */
5986 struct target_loadseg segs[/*nsegs*/];
5987 };
5988 # define LINUX_LOADMAP PT_GETDSBT
5989 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5990 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5991 # else
5992 struct target_loadmap
5993 {
5994 /* Protocol version number, must be zero. */
5995 Elf32_Half version;
5996 /* Number of segments in this map. */
5997 Elf32_Half nsegs;
5998 /* The actual memory map. */
5999 struct target_loadseg segs[/*nsegs*/];
6000 };
6001 # define LINUX_LOADMAP PTRACE_GETFDPIC
6002 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6003 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6004 # endif
6005
6006 static int
6007 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6008 unsigned char *myaddr, unsigned int len)
6009 {
6010 int pid = lwpid_of (current_thread);
6011 int addr = -1;
6012 struct target_loadmap *data = NULL;
6013 unsigned int actual_length, copy_length;
6014
6015 if (strcmp (annex, "exec") == 0)
6016 addr = (int) LINUX_LOADMAP_EXEC;
6017 else if (strcmp (annex, "interp") == 0)
6018 addr = (int) LINUX_LOADMAP_INTERP;
6019 else
6020 return -1;
6021
6022 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6023 return -1;
6024
6025 if (data == NULL)
6026 return -1;
6027
6028 actual_length = sizeof (struct target_loadmap)
6029 + sizeof (struct target_loadseg) * data->nsegs;
6030
6031 if (offset < 0 || offset > actual_length)
6032 return -1;
6033
6034 copy_length = actual_length - offset < len ? actual_length - offset : len;
6035 memcpy (myaddr, (char *) data + offset, copy_length);
6036 return copy_length;
6037 }
6038 #else
6039 # define linux_read_loadmap NULL
6040 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6041
6042 static void
6043 linux_process_qsupported (const char *query)
6044 {
6045 if (the_low_target.process_qsupported != NULL)
6046 the_low_target.process_qsupported (query);
6047 }
6048
6049 static int
6050 linux_supports_tracepoints (void)
6051 {
6052 if (*the_low_target.supports_tracepoints == NULL)
6053 return 0;
6054
6055 return (*the_low_target.supports_tracepoints) ();
6056 }
6057
6058 static CORE_ADDR
6059 linux_read_pc (struct regcache *regcache)
6060 {
6061 if (the_low_target.get_pc == NULL)
6062 return 0;
6063
6064 return (*the_low_target.get_pc) (regcache);
6065 }
6066
6067 static void
6068 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6069 {
6070 gdb_assert (the_low_target.set_pc != NULL);
6071
6072 (*the_low_target.set_pc) (regcache, pc);
6073 }
6074
6075 static int
6076 linux_thread_stopped (struct thread_info *thread)
6077 {
6078 return get_thread_lwp (thread)->stopped;
6079 }
6080
6081 /* This exposes stop-all-threads functionality to other modules. */
6082
6083 static void
6084 linux_pause_all (int freeze)
6085 {
6086 stop_all_lwps (freeze, NULL);
6087 }
6088
6089 /* This exposes unstop-all-threads functionality to other gdbserver
6090 modules. */
6091
6092 static void
6093 linux_unpause_all (int unfreeze)
6094 {
6095 unstop_all_lwps (unfreeze, NULL);
6096 }
6097
6098 static int
6099 linux_prepare_to_access_memory (void)
6100 {
6101 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6102 running LWP. */
6103 if (non_stop)
6104 linux_pause_all (1);
6105 return 0;
6106 }
6107
6108 static void
6109 linux_done_accessing_memory (void)
6110 {
6111 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6112 running LWP. */
6113 if (non_stop)
6114 linux_unpause_all (1);
6115 }
6116
6117 static int
6118 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6119 CORE_ADDR collector,
6120 CORE_ADDR lockaddr,
6121 ULONGEST orig_size,
6122 CORE_ADDR *jump_entry,
6123 CORE_ADDR *trampoline,
6124 ULONGEST *trampoline_size,
6125 unsigned char *jjump_pad_insn,
6126 ULONGEST *jjump_pad_insn_size,
6127 CORE_ADDR *adjusted_insn_addr,
6128 CORE_ADDR *adjusted_insn_addr_end,
6129 char *err)
6130 {
6131 return (*the_low_target.install_fast_tracepoint_jump_pad)
6132 (tpoint, tpaddr, collector, lockaddr, orig_size,
6133 jump_entry, trampoline, trampoline_size,
6134 jjump_pad_insn, jjump_pad_insn_size,
6135 adjusted_insn_addr, adjusted_insn_addr_end,
6136 err);
6137 }
6138
6139 static struct emit_ops *
6140 linux_emit_ops (void)
6141 {
6142 if (the_low_target.emit_ops != NULL)
6143 return (*the_low_target.emit_ops) ();
6144 else
6145 return NULL;
6146 }
6147
6148 static int
6149 linux_get_min_fast_tracepoint_insn_len (void)
6150 {
6151 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6152 }
6153
6154 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6155
6156 static int
6157 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6158 CORE_ADDR *phdr_memaddr, int *num_phdr)
6159 {
6160 char filename[PATH_MAX];
6161 int fd;
6162 const int auxv_size = is_elf64
6163 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6164 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6165
6166 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6167
6168 fd = open (filename, O_RDONLY);
6169 if (fd < 0)
6170 return 1;
6171
6172 *phdr_memaddr = 0;
6173 *num_phdr = 0;
6174 while (read (fd, buf, auxv_size) == auxv_size
6175 && (*phdr_memaddr == 0 || *num_phdr == 0))
6176 {
6177 if (is_elf64)
6178 {
6179 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6180
6181 switch (aux->a_type)
6182 {
6183 case AT_PHDR:
6184 *phdr_memaddr = aux->a_un.a_val;
6185 break;
6186 case AT_PHNUM:
6187 *num_phdr = aux->a_un.a_val;
6188 break;
6189 }
6190 }
6191 else
6192 {
6193 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6194
6195 switch (aux->a_type)
6196 {
6197 case AT_PHDR:
6198 *phdr_memaddr = aux->a_un.a_val;
6199 break;
6200 case AT_PHNUM:
6201 *num_phdr = aux->a_un.a_val;
6202 break;
6203 }
6204 }
6205 }
6206
6207 close (fd);
6208
6209 if (*phdr_memaddr == 0 || *num_phdr == 0)
6210 {
6211 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6212 "phdr_memaddr = %ld, phdr_num = %d",
6213 (long) *phdr_memaddr, *num_phdr);
6214 return 2;
6215 }
6216
6217 return 0;
6218 }
6219
6220 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6221
6222 static CORE_ADDR
6223 get_dynamic (const int pid, const int is_elf64)
6224 {
6225 CORE_ADDR phdr_memaddr, relocation;
6226 int num_phdr, i;
6227 unsigned char *phdr_buf;
6228 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6229
6230 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6231 return 0;
6232
6233 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6234 phdr_buf = alloca (num_phdr * phdr_size);
6235
6236 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6237 return 0;
6238
6239 /* Compute relocation: it is expected to be 0 for "regular" executables,
6240 non-zero for PIE ones. */
6241 relocation = -1;
6242 for (i = 0; relocation == -1 && i < num_phdr; i++)
6243 if (is_elf64)
6244 {
6245 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6246
6247 if (p->p_type == PT_PHDR)
6248 relocation = phdr_memaddr - p->p_vaddr;
6249 }
6250 else
6251 {
6252 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6253
6254 if (p->p_type == PT_PHDR)
6255 relocation = phdr_memaddr - p->p_vaddr;
6256 }
6257
6258 if (relocation == -1)
6259 {
6260 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6261 any real world executables, including PIE executables, have always
6262 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6263 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6264 or present DT_DEBUG anyway (fpc binaries are statically linked).
6265
6266 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6267
6268 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6269
6270 return 0;
6271 }
6272
6273 for (i = 0; i < num_phdr; i++)
6274 {
6275 if (is_elf64)
6276 {
6277 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6278
6279 if (p->p_type == PT_DYNAMIC)
6280 return p->p_vaddr + relocation;
6281 }
6282 else
6283 {
6284 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6285
6286 if (p->p_type == PT_DYNAMIC)
6287 return p->p_vaddr + relocation;
6288 }
6289 }
6290
6291 return 0;
6292 }
6293
6294 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6295 can be 0 if the inferior does not yet have the library list initialized.
6296 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6297 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6298
6299 static CORE_ADDR
6300 get_r_debug (const int pid, const int is_elf64)
6301 {
6302 CORE_ADDR dynamic_memaddr;
6303 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6304 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6305 CORE_ADDR map = -1;
6306
6307 dynamic_memaddr = get_dynamic (pid, is_elf64);
6308 if (dynamic_memaddr == 0)
6309 return map;
6310
6311 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6312 {
6313 if (is_elf64)
6314 {
6315 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6316 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6317 union
6318 {
6319 Elf64_Xword map;
6320 unsigned char buf[sizeof (Elf64_Xword)];
6321 }
6322 rld_map;
6323 #endif
6324 #ifdef DT_MIPS_RLD_MAP
6325 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6326 {
6327 if (linux_read_memory (dyn->d_un.d_val,
6328 rld_map.buf, sizeof (rld_map.buf)) == 0)
6329 return rld_map.map;
6330 else
6331 break;
6332 }
6333 #endif /* DT_MIPS_RLD_MAP */
6334 #ifdef DT_MIPS_RLD_MAP_REL
6335 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6336 {
6337 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6338 rld_map.buf, sizeof (rld_map.buf)) == 0)
6339 return rld_map.map;
6340 else
6341 break;
6342 }
6343 #endif /* DT_MIPS_RLD_MAP_REL */
6344
6345 if (dyn->d_tag == DT_DEBUG && map == -1)
6346 map = dyn->d_un.d_val;
6347
6348 if (dyn->d_tag == DT_NULL)
6349 break;
6350 }
6351 else
6352 {
6353 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6354 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6355 union
6356 {
6357 Elf32_Word map;
6358 unsigned char buf[sizeof (Elf32_Word)];
6359 }
6360 rld_map;
6361 #endif
6362 #ifdef DT_MIPS_RLD_MAP
6363 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6364 {
6365 if (linux_read_memory (dyn->d_un.d_val,
6366 rld_map.buf, sizeof (rld_map.buf)) == 0)
6367 return rld_map.map;
6368 else
6369 break;
6370 }
6371 #endif /* DT_MIPS_RLD_MAP */
6372 #ifdef DT_MIPS_RLD_MAP_REL
6373 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6374 {
6375 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6376 rld_map.buf, sizeof (rld_map.buf)) == 0)
6377 return rld_map.map;
6378 else
6379 break;
6380 }
6381 #endif /* DT_MIPS_RLD_MAP_REL */
6382
6383 if (dyn->d_tag == DT_DEBUG && map == -1)
6384 map = dyn->d_un.d_val;
6385
6386 if (dyn->d_tag == DT_NULL)
6387 break;
6388 }
6389
6390 dynamic_memaddr += dyn_size;
6391 }
6392
6393 return map;
6394 }
6395
6396 /* Read one pointer from MEMADDR in the inferior. */
6397
6398 static int
6399 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6400 {
6401 int ret;
6402
6403 /* Go through a union so this works on either big or little endian
6404 hosts, when the inferior's pointer size is smaller than the size
6405 of CORE_ADDR. It is assumed the inferior's endianness is the
6406 same of the superior's. */
6407 union
6408 {
6409 CORE_ADDR core_addr;
6410 unsigned int ui;
6411 unsigned char uc;
6412 } addr;
6413
6414 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6415 if (ret == 0)
6416 {
6417 if (ptr_size == sizeof (CORE_ADDR))
6418 *ptr = addr.core_addr;
6419 else if (ptr_size == sizeof (unsigned int))
6420 *ptr = addr.ui;
6421 else
6422 gdb_assert_not_reached ("unhandled pointer size");
6423 }
6424 return ret;
6425 }
6426
6427 struct link_map_offsets
6428 {
6429 /* Offset and size of r_debug.r_version. */
6430 int r_version_offset;
6431
6432 /* Offset and size of r_debug.r_map. */
6433 int r_map_offset;
6434
6435 /* Offset to l_addr field in struct link_map. */
6436 int l_addr_offset;
6437
6438 /* Offset to l_name field in struct link_map. */
6439 int l_name_offset;
6440
6441 /* Offset to l_ld field in struct link_map. */
6442 int l_ld_offset;
6443
6444 /* Offset to l_next field in struct link_map. */
6445 int l_next_offset;
6446
6447 /* Offset to l_prev field in struct link_map. */
6448 int l_prev_offset;
6449 };
6450
6451 /* Construct qXfer:libraries-svr4:read reply. */
6452
6453 static int
6454 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6455 unsigned const char *writebuf,
6456 CORE_ADDR offset, int len)
6457 {
6458 char *document;
6459 unsigned document_len;
6460 struct process_info_private *const priv = current_process ()->priv;
6461 char filename[PATH_MAX];
6462 int pid, is_elf64;
6463
6464 static const struct link_map_offsets lmo_32bit_offsets =
6465 {
6466 0, /* r_version offset. */
6467 4, /* r_debug.r_map offset. */
6468 0, /* l_addr offset in link_map. */
6469 4, /* l_name offset in link_map. */
6470 8, /* l_ld offset in link_map. */
6471 12, /* l_next offset in link_map. */
6472 16 /* l_prev offset in link_map. */
6473 };
6474
6475 static const struct link_map_offsets lmo_64bit_offsets =
6476 {
6477 0, /* r_version offset. */
6478 8, /* r_debug.r_map offset. */
6479 0, /* l_addr offset in link_map. */
6480 8, /* l_name offset in link_map. */
6481 16, /* l_ld offset in link_map. */
6482 24, /* l_next offset in link_map. */
6483 32 /* l_prev offset in link_map. */
6484 };
6485 const struct link_map_offsets *lmo;
6486 unsigned int machine;
6487 int ptr_size;
6488 CORE_ADDR lm_addr = 0, lm_prev = 0;
6489 int allocated = 1024;
6490 char *p;
6491 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6492 int header_done = 0;
6493
6494 if (writebuf != NULL)
6495 return -2;
6496 if (readbuf == NULL)
6497 return -1;
6498
6499 pid = lwpid_of (current_thread);
6500 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6501 is_elf64 = elf_64_file_p (filename, &machine);
6502 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6503 ptr_size = is_elf64 ? 8 : 4;
6504
6505 while (annex[0] != '\0')
6506 {
6507 const char *sep;
6508 CORE_ADDR *addrp;
6509 int len;
6510
6511 sep = strchr (annex, '=');
6512 if (sep == NULL)
6513 break;
6514
6515 len = sep - annex;
6516 if (len == 5 && startswith (annex, "start"))
6517 addrp = &lm_addr;
6518 else if (len == 4 && startswith (annex, "prev"))
6519 addrp = &lm_prev;
6520 else
6521 {
6522 annex = strchr (sep, ';');
6523 if (annex == NULL)
6524 break;
6525 annex++;
6526 continue;
6527 }
6528
6529 annex = decode_address_to_semicolon (addrp, sep + 1);
6530 }
6531
6532 if (lm_addr == 0)
6533 {
6534 int r_version = 0;
6535
6536 if (priv->r_debug == 0)
6537 priv->r_debug = get_r_debug (pid, is_elf64);
6538
6539 /* We failed to find DT_DEBUG. Such situation will not change
6540 for this inferior - do not retry it. Report it to GDB as
6541 E01, see for the reasons at the GDB solib-svr4.c side. */
6542 if (priv->r_debug == (CORE_ADDR) -1)
6543 return -1;
6544
6545 if (priv->r_debug != 0)
6546 {
6547 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6548 (unsigned char *) &r_version,
6549 sizeof (r_version)) != 0
6550 || r_version != 1)
6551 {
6552 warning ("unexpected r_debug version %d", r_version);
6553 }
6554 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6555 &lm_addr, ptr_size) != 0)
6556 {
6557 warning ("unable to read r_map from 0x%lx",
6558 (long) priv->r_debug + lmo->r_map_offset);
6559 }
6560 }
6561 }
6562
6563 document = xmalloc (allocated);
6564 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6565 p = document + strlen (document);
6566
6567 while (lm_addr
6568 && read_one_ptr (lm_addr + lmo->l_name_offset,
6569 &l_name, ptr_size) == 0
6570 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6571 &l_addr, ptr_size) == 0
6572 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6573 &l_ld, ptr_size) == 0
6574 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6575 &l_prev, ptr_size) == 0
6576 && read_one_ptr (lm_addr + lmo->l_next_offset,
6577 &l_next, ptr_size) == 0)
6578 {
6579 unsigned char libname[PATH_MAX];
6580
6581 if (lm_prev != l_prev)
6582 {
6583 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6584 (long) lm_prev, (long) l_prev);
6585 break;
6586 }
6587
6588 /* Ignore the first entry even if it has valid name as the first entry
6589 corresponds to the main executable. The first entry should not be
6590 skipped if the dynamic loader was loaded late by a static executable
6591 (see solib-svr4.c parameter ignore_first). But in such case the main
6592 executable does not have PT_DYNAMIC present and this function already
6593 exited above due to failed get_r_debug. */
6594 if (lm_prev == 0)
6595 {
6596 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6597 p = p + strlen (p);
6598 }
6599 else
6600 {
6601 /* Not checking for error because reading may stop before
6602 we've got PATH_MAX worth of characters. */
6603 libname[0] = '\0';
6604 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6605 libname[sizeof (libname) - 1] = '\0';
6606 if (libname[0] != '\0')
6607 {
6608 /* 6x the size for xml_escape_text below. */
6609 size_t len = 6 * strlen ((char *) libname);
6610 char *name;
6611
6612 if (!header_done)
6613 {
6614 /* Terminate `<library-list-svr4'. */
6615 *p++ = '>';
6616 header_done = 1;
6617 }
6618
6619 while (allocated < p - document + len + 200)
6620 {
6621 /* Expand to guarantee sufficient storage. */
6622 uintptr_t document_len = p - document;
6623
6624 document = xrealloc (document, 2 * allocated);
6625 allocated *= 2;
6626 p = document + document_len;
6627 }
6628
6629 name = xml_escape_text ((char *) libname);
6630 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6631 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6632 name, (unsigned long) lm_addr,
6633 (unsigned long) l_addr, (unsigned long) l_ld);
6634 free (name);
6635 }
6636 }
6637
6638 lm_prev = lm_addr;
6639 lm_addr = l_next;
6640 }
6641
6642 if (!header_done)
6643 {
6644 /* Empty list; terminate `<library-list-svr4'. */
6645 strcpy (p, "/>");
6646 }
6647 else
6648 strcpy (p, "</library-list-svr4>");
6649
6650 document_len = strlen (document);
6651 if (offset < document_len)
6652 document_len -= offset;
6653 else
6654 document_len = 0;
6655 if (len > document_len)
6656 len = document_len;
6657
6658 memcpy (readbuf, document + offset, len);
6659 xfree (document);
6660
6661 return len;
6662 }
6663
6664 #ifdef HAVE_LINUX_BTRACE
6665
6666 /* See to_enable_btrace target method. */
6667
6668 static struct btrace_target_info *
6669 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6670 {
6671 struct btrace_target_info *tinfo;
6672
6673 tinfo = linux_enable_btrace (ptid, conf);
6674
6675 if (tinfo != NULL && tinfo->ptr_bits == 0)
6676 {
6677 struct thread_info *thread = find_thread_ptid (ptid);
6678 struct regcache *regcache = get_thread_regcache (thread, 0);
6679
6680 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6681 }
6682
6683 return tinfo;
6684 }
6685
6686 /* See to_disable_btrace target method. */
6687
6688 static int
6689 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6690 {
6691 enum btrace_error err;
6692
6693 err = linux_disable_btrace (tinfo);
6694 return (err == BTRACE_ERR_NONE ? 0 : -1);
6695 }
6696
6697 /* Encode an Intel(R) Processor Trace configuration. */
6698
6699 static void
6700 linux_low_encode_pt_config (struct buffer *buffer,
6701 const struct btrace_data_pt_config *config)
6702 {
6703 buffer_grow_str (buffer, "<pt-config>\n");
6704
6705 switch (config->cpu.vendor)
6706 {
6707 case CV_INTEL:
6708 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6709 "model=\"%u\" stepping=\"%u\"/>\n",
6710 config->cpu.family, config->cpu.model,
6711 config->cpu.stepping);
6712 break;
6713
6714 default:
6715 break;
6716 }
6717
6718 buffer_grow_str (buffer, "</pt-config>\n");
6719 }
6720
6721 /* Encode a raw buffer. */
6722
6723 static void
6724 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6725 unsigned int size)
6726 {
6727 if (size == 0)
6728 return;
6729
6730 /* We use hex encoding - see common/rsp-low.h. */
6731 buffer_grow_str (buffer, "<raw>\n");
6732
6733 while (size-- > 0)
6734 {
6735 char elem[2];
6736
6737 elem[0] = tohex ((*data >> 4) & 0xf);
6738 elem[1] = tohex (*data++ & 0xf);
6739
6740 buffer_grow (buffer, elem, 2);
6741 }
6742
6743 buffer_grow_str (buffer, "</raw>\n");
6744 }
6745
6746 /* See to_read_btrace target method. */
6747
6748 static int
6749 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6750 int type)
6751 {
6752 struct btrace_data btrace;
6753 struct btrace_block *block;
6754 enum btrace_error err;
6755 int i;
6756
6757 btrace_data_init (&btrace);
6758
6759 err = linux_read_btrace (&btrace, tinfo, type);
6760 if (err != BTRACE_ERR_NONE)
6761 {
6762 if (err == BTRACE_ERR_OVERFLOW)
6763 buffer_grow_str0 (buffer, "E.Overflow.");
6764 else
6765 buffer_grow_str0 (buffer, "E.Generic Error.");
6766
6767 goto err;
6768 }
6769
6770 switch (btrace.format)
6771 {
6772 case BTRACE_FORMAT_NONE:
6773 buffer_grow_str0 (buffer, "E.No Trace.");
6774 goto err;
6775
6776 case BTRACE_FORMAT_BTS:
6777 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6778 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6779
6780 for (i = 0;
6781 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6782 i++)
6783 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6784 paddress (block->begin), paddress (block->end));
6785
6786 buffer_grow_str0 (buffer, "</btrace>\n");
6787 break;
6788
6789 case BTRACE_FORMAT_PT:
6790 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6791 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6792 buffer_grow_str (buffer, "<pt>\n");
6793
6794 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6795
6796 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6797 btrace.variant.pt.size);
6798
6799 buffer_grow_str (buffer, "</pt>\n");
6800 buffer_grow_str0 (buffer, "</btrace>\n");
6801 break;
6802
6803 default:
6804 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6805 goto err;
6806 }
6807
6808 btrace_data_fini (&btrace);
6809 return 0;
6810
6811 err:
6812 btrace_data_fini (&btrace);
6813 return -1;
6814 }
6815
6816 /* See to_btrace_conf target method. */
6817
6818 static int
6819 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6820 struct buffer *buffer)
6821 {
6822 const struct btrace_config *conf;
6823
6824 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6825 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6826
6827 conf = linux_btrace_conf (tinfo);
6828 if (conf != NULL)
6829 {
6830 switch (conf->format)
6831 {
6832 case BTRACE_FORMAT_NONE:
6833 break;
6834
6835 case BTRACE_FORMAT_BTS:
6836 buffer_xml_printf (buffer, "<bts");
6837 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6838 buffer_xml_printf (buffer, " />\n");
6839 break;
6840
6841 case BTRACE_FORMAT_PT:
6842 buffer_xml_printf (buffer, "<pt");
6843 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6844 buffer_xml_printf (buffer, "/>\n");
6845 break;
6846 }
6847 }
6848
6849 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6850 return 0;
6851 }
6852 #endif /* HAVE_LINUX_BTRACE */
6853
6854 /* See nat/linux-nat.h. */
6855
6856 ptid_t
6857 current_lwp_ptid (void)
6858 {
6859 return ptid_of (current_thread);
6860 }
6861
6862 static struct target_ops linux_target_ops = {
6863 linux_create_inferior,
6864 linux_arch_setup,
6865 linux_attach,
6866 linux_kill,
6867 linux_detach,
6868 linux_mourn,
6869 linux_join,
6870 linux_thread_alive,
6871 linux_resume,
6872 linux_wait,
6873 linux_fetch_registers,
6874 linux_store_registers,
6875 linux_prepare_to_access_memory,
6876 linux_done_accessing_memory,
6877 linux_read_memory,
6878 linux_write_memory,
6879 linux_look_up_symbols,
6880 linux_request_interrupt,
6881 linux_read_auxv,
6882 linux_supports_z_point_type,
6883 linux_insert_point,
6884 linux_remove_point,
6885 linux_stopped_by_sw_breakpoint,
6886 linux_supports_stopped_by_sw_breakpoint,
6887 linux_stopped_by_hw_breakpoint,
6888 linux_supports_stopped_by_hw_breakpoint,
6889 linux_supports_conditional_breakpoints,
6890 linux_stopped_by_watchpoint,
6891 linux_stopped_data_address,
6892 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6893 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6894 && defined(PT_TEXT_END_ADDR)
6895 linux_read_offsets,
6896 #else
6897 NULL,
6898 #endif
6899 #ifdef USE_THREAD_DB
6900 thread_db_get_tls_address,
6901 #else
6902 NULL,
6903 #endif
6904 linux_qxfer_spu,
6905 hostio_last_error_from_errno,
6906 linux_qxfer_osdata,
6907 linux_xfer_siginfo,
6908 linux_supports_non_stop,
6909 linux_async,
6910 linux_start_non_stop,
6911 linux_supports_multi_process,
6912 linux_supports_fork_events,
6913 linux_supports_vfork_events,
6914 linux_handle_new_gdb_connection,
6915 #ifdef USE_THREAD_DB
6916 thread_db_handle_monitor_command,
6917 #else
6918 NULL,
6919 #endif
6920 linux_common_core_of_thread,
6921 linux_read_loadmap,
6922 linux_process_qsupported,
6923 linux_supports_tracepoints,
6924 linux_read_pc,
6925 linux_write_pc,
6926 linux_thread_stopped,
6927 NULL,
6928 linux_pause_all,
6929 linux_unpause_all,
6930 linux_stabilize_threads,
6931 linux_install_fast_tracepoint_jump_pad,
6932 linux_emit_ops,
6933 linux_supports_disable_randomization,
6934 linux_get_min_fast_tracepoint_insn_len,
6935 linux_qxfer_libraries_svr4,
6936 linux_supports_agent,
6937 #ifdef HAVE_LINUX_BTRACE
6938 linux_supports_btrace,
6939 linux_low_enable_btrace,
6940 linux_low_disable_btrace,
6941 linux_low_read_btrace,
6942 linux_low_btrace_conf,
6943 #else
6944 NULL,
6945 NULL,
6946 NULL,
6947 NULL,
6948 NULL,
6949 #endif
6950 linux_supports_range_stepping,
6951 linux_proc_pid_to_exec_file,
6952 linux_mntns_open_cloexec,
6953 linux_mntns_unlink,
6954 linux_mntns_readlink,
6955 };
6956
6957 static void
6958 linux_init_signals ()
6959 {
6960 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6961 to find what the cancel signal actually is. */
6962 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6963 signal (__SIGRTMIN+1, SIG_IGN);
6964 #endif
6965 }
6966
6967 #ifdef HAVE_LINUX_REGSETS
6968 void
6969 initialize_regsets_info (struct regsets_info *info)
6970 {
6971 for (info->num_regsets = 0;
6972 info->regsets[info->num_regsets].size >= 0;
6973 info->num_regsets++)
6974 ;
6975 }
6976 #endif
6977
6978 void
6979 initialize_low (void)
6980 {
6981 struct sigaction sigchld_action;
6982 memset (&sigchld_action, 0, sizeof (sigchld_action));
6983 set_target_ops (&linux_target_ops);
6984 set_breakpoint_data (the_low_target.breakpoint,
6985 the_low_target.breakpoint_len);
6986 linux_init_signals ();
6987 linux_ptrace_init_warnings ();
6988
6989 sigchld_action.sa_handler = sigchld_handler;
6990 sigemptyset (&sigchld_action.sa_mask);
6991 sigchld_action.sa_flags = SA_RESTART;
6992 sigaction (SIGCHLD, &sigchld_action, NULL);
6993
6994 initialize_low_arch ();
6995
6996 linux_check_ptrace_features ();
6997 }