]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
Change signature of linux_target_ops.new_thread
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <sys/ptrace.h>
28 #include "nat/linux-ptrace.h"
29 #include "nat/linux-procfs.h"
30 #include "nat/linux-personality.h"
31 #include <signal.h>
32 #include <sys/ioctl.h>
33 #include <fcntl.h>
34 #include <unistd.h>
35 #include <sys/syscall.h>
36 #include <sched.h>
37 #include <ctype.h>
38 #include <pwd.h>
39 #include <sys/types.h>
40 #include <dirent.h>
41 #include <sys/stat.h>
42 #include <sys/vfs.h>
43 #include <sys/uio.h>
44 #include "filestuff.h"
45 #include "tracepoint.h"
46 #include "hostio.h"
47 #ifndef ELFMAG0
48 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
49 then ELFMAG0 will have been defined. If it didn't get included by
50 gdb_proc_service.h then including it will likely introduce a duplicate
51 definition of elf_fpregset_t. */
52 #include <elf.h>
53 #endif
54
55 #ifndef SPUFS_MAGIC
56 #define SPUFS_MAGIC 0x23c9b64e
57 #endif
58
59 #ifdef HAVE_PERSONALITY
60 # include <sys/personality.h>
61 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
62 # define ADDR_NO_RANDOMIZE 0x0040000
63 # endif
64 #endif
65
66 #ifndef O_LARGEFILE
67 #define O_LARGEFILE 0
68 #endif
69
70 #ifndef W_STOPCODE
71 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
72 #endif
73
74 /* This is the kernel's hard limit. Not to be confused with
75 SIGRTMIN. */
76 #ifndef __SIGRTMIN
77 #define __SIGRTMIN 32
78 #endif
79
80 /* Some targets did not define these ptrace constants from the start,
81 so gdbserver defines them locally here. In the future, these may
82 be removed after they are added to asm/ptrace.h. */
83 #if !(defined(PT_TEXT_ADDR) \
84 || defined(PT_DATA_ADDR) \
85 || defined(PT_TEXT_END_ADDR))
86 #if defined(__mcoldfire__)
87 /* These are still undefined in 3.10 kernels. */
88 #define PT_TEXT_ADDR 49*4
89 #define PT_DATA_ADDR 50*4
90 #define PT_TEXT_END_ADDR 51*4
91 /* BFIN already defines these since at least 2.6.32 kernels. */
92 #elif defined(BFIN)
93 #define PT_TEXT_ADDR 220
94 #define PT_TEXT_END_ADDR 224
95 #define PT_DATA_ADDR 228
96 /* These are still undefined in 3.10 kernels. */
97 #elif defined(__TMS320C6X__)
98 #define PT_TEXT_ADDR (0x10000*4)
99 #define PT_DATA_ADDR (0x10004*4)
100 #define PT_TEXT_END_ADDR (0x10008*4)
101 #endif
102 #endif
103
104 #ifdef HAVE_LINUX_BTRACE
105 # include "nat/linux-btrace.h"
106 # include "btrace-common.h"
107 #endif
108
109 #ifndef HAVE_ELF32_AUXV_T
110 /* Copied from glibc's elf.h. */
111 typedef struct
112 {
113 uint32_t a_type; /* Entry type */
114 union
115 {
116 uint32_t a_val; /* Integer value */
117 /* We use to have pointer elements added here. We cannot do that,
118 though, since it does not work when using 32-bit definitions
119 on 64-bit platforms and vice versa. */
120 } a_un;
121 } Elf32_auxv_t;
122 #endif
123
124 #ifndef HAVE_ELF64_AUXV_T
125 /* Copied from glibc's elf.h. */
126 typedef struct
127 {
128 uint64_t a_type; /* Entry type */
129 union
130 {
131 uint64_t a_val; /* Integer value */
132 /* We use to have pointer elements added here. We cannot do that,
133 though, since it does not work when using 32-bit definitions
134 on 64-bit platforms and vice versa. */
135 } a_un;
136 } Elf64_auxv_t;
137 #endif
138
139 /* LWP accessors. */
140
141 /* See nat/linux-nat.h. */
142
143 ptid_t
144 ptid_of_lwp (struct lwp_info *lwp)
145 {
146 return ptid_of (get_lwp_thread (lwp));
147 }
148
149 /* See nat/linux-nat.h. */
150
151 int
152 lwp_is_stopped (struct lwp_info *lwp)
153 {
154 return lwp->stopped;
155 }
156
157 /* See nat/linux-nat.h. */
158
159 enum target_stop_reason
160 lwp_stop_reason (struct lwp_info *lwp)
161 {
162 return lwp->stop_reason;
163 }
164
165 /* A list of all unknown processes which receive stop signals. Some
166 other process will presumably claim each of these as forked
167 children momentarily. */
168
169 struct simple_pid_list
170 {
171 /* The process ID. */
172 int pid;
173
174 /* The status as reported by waitpid. */
175 int status;
176
177 /* Next in chain. */
178 struct simple_pid_list *next;
179 };
180 struct simple_pid_list *stopped_pids;
181
182 /* Trivial list manipulation functions to keep track of a list of new
183 stopped processes. */
184
185 static void
186 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
187 {
188 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
189
190 new_pid->pid = pid;
191 new_pid->status = status;
192 new_pid->next = *listp;
193 *listp = new_pid;
194 }
195
196 static int
197 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
198 {
199 struct simple_pid_list **p;
200
201 for (p = listp; *p != NULL; p = &(*p)->next)
202 if ((*p)->pid == pid)
203 {
204 struct simple_pid_list *next = (*p)->next;
205
206 *statusp = (*p)->status;
207 xfree (*p);
208 *p = next;
209 return 1;
210 }
211 return 0;
212 }
213
214 enum stopping_threads_kind
215 {
216 /* Not stopping threads presently. */
217 NOT_STOPPING_THREADS,
218
219 /* Stopping threads. */
220 STOPPING_THREADS,
221
222 /* Stopping and suspending threads. */
223 STOPPING_AND_SUSPENDING_THREADS
224 };
225
226 /* This is set while stop_all_lwps is in effect. */
227 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
228
229 /* FIXME make into a target method? */
230 int using_threads = 1;
231
232 /* True if we're presently stabilizing threads (moving them out of
233 jump pads). */
234 static int stabilizing_threads;
235
236 static void linux_resume_one_lwp (struct lwp_info *lwp,
237 int step, int signal, siginfo_t *info);
238 static void linux_resume (struct thread_resume *resume_info, size_t n);
239 static void stop_all_lwps (int suspend, struct lwp_info *except);
240 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
241 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
242 int *wstat, int options);
243 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
244 static struct lwp_info *add_lwp (ptid_t ptid);
245 static int linux_stopped_by_watchpoint (void);
246 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
247 static void proceed_all_lwps (void);
248 static int finish_step_over (struct lwp_info *lwp);
249 static int kill_lwp (unsigned long lwpid, int signo);
250
251 /* When the event-loop is doing a step-over, this points at the thread
252 being stepped. */
253 ptid_t step_over_bkpt;
254
255 /* True if the low target can hardware single-step. Such targets
256 don't need a BREAKPOINT_REINSERT_ADDR callback. */
257
258 static int
259 can_hardware_single_step (void)
260 {
261 return (the_low_target.breakpoint_reinsert_addr == NULL);
262 }
263
264 /* True if the low target supports memory breakpoints. If so, we'll
265 have a GET_PC implementation. */
266
267 static int
268 supports_breakpoints (void)
269 {
270 return (the_low_target.get_pc != NULL);
271 }
272
273 /* Returns true if this target can support fast tracepoints. This
274 does not mean that the in-process agent has been loaded in the
275 inferior. */
276
277 static int
278 supports_fast_tracepoints (void)
279 {
280 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
281 }
282
283 /* True if LWP is stopped in its stepping range. */
284
285 static int
286 lwp_in_step_range (struct lwp_info *lwp)
287 {
288 CORE_ADDR pc = lwp->stop_pc;
289
290 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
291 }
292
293 struct pending_signals
294 {
295 int signal;
296 siginfo_t info;
297 struct pending_signals *prev;
298 };
299
300 /* The read/write ends of the pipe registered as waitable file in the
301 event loop. */
302 static int linux_event_pipe[2] = { -1, -1 };
303
304 /* True if we're currently in async mode. */
305 #define target_is_async_p() (linux_event_pipe[0] != -1)
306
307 static void send_sigstop (struct lwp_info *lwp);
308 static void wait_for_sigstop (void);
309
310 /* Return non-zero if HEADER is a 64-bit ELF file. */
311
312 static int
313 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
314 {
315 if (header->e_ident[EI_MAG0] == ELFMAG0
316 && header->e_ident[EI_MAG1] == ELFMAG1
317 && header->e_ident[EI_MAG2] == ELFMAG2
318 && header->e_ident[EI_MAG3] == ELFMAG3)
319 {
320 *machine = header->e_machine;
321 return header->e_ident[EI_CLASS] == ELFCLASS64;
322
323 }
324 *machine = EM_NONE;
325 return -1;
326 }
327
328 /* Return non-zero if FILE is a 64-bit ELF file,
329 zero if the file is not a 64-bit ELF file,
330 and -1 if the file is not accessible or doesn't exist. */
331
332 static int
333 elf_64_file_p (const char *file, unsigned int *machine)
334 {
335 Elf64_Ehdr header;
336 int fd;
337
338 fd = open (file, O_RDONLY);
339 if (fd < 0)
340 return -1;
341
342 if (read (fd, &header, sizeof (header)) != sizeof (header))
343 {
344 close (fd);
345 return 0;
346 }
347 close (fd);
348
349 return elf_64_header_p (&header, machine);
350 }
351
352 /* Accepts an integer PID; Returns true if the executable PID is
353 running is a 64-bit ELF file.. */
354
355 int
356 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
357 {
358 char file[PATH_MAX];
359
360 sprintf (file, "/proc/%d/exe", pid);
361 return elf_64_file_p (file, machine);
362 }
363
364 static void
365 delete_lwp (struct lwp_info *lwp)
366 {
367 struct thread_info *thr = get_lwp_thread (lwp);
368
369 if (debug_threads)
370 debug_printf ("deleting %ld\n", lwpid_of (thr));
371
372 remove_thread (thr);
373 free (lwp->arch_private);
374 free (lwp);
375 }
376
377 /* Add a process to the common process list, and set its private
378 data. */
379
380 static struct process_info *
381 linux_add_process (int pid, int attached)
382 {
383 struct process_info *proc;
384
385 proc = add_process (pid, attached);
386 proc->priv = xcalloc (1, sizeof (*proc->priv));
387
388 /* Set the arch when the first LWP stops. */
389 proc->priv->new_inferior = 1;
390
391 if (the_low_target.new_process != NULL)
392 proc->priv->arch_private = the_low_target.new_process ();
393
394 return proc;
395 }
396
397 static CORE_ADDR get_pc (struct lwp_info *lwp);
398
399 /* Handle a GNU/Linux extended wait response. If we see a clone
400 event, we need to add the new LWP to our list (and not report the
401 trap to higher layers). */
402
403 static void
404 handle_extended_wait (struct lwp_info *event_child, int wstat)
405 {
406 int event = linux_ptrace_get_extended_event (wstat);
407 struct thread_info *event_thr = get_lwp_thread (event_child);
408 struct lwp_info *new_lwp;
409
410 if (event == PTRACE_EVENT_CLONE)
411 {
412 ptid_t ptid;
413 unsigned long new_pid;
414 int ret, status;
415
416 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
417 &new_pid);
418
419 /* If we haven't already seen the new PID stop, wait for it now. */
420 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
421 {
422 /* The new child has a pending SIGSTOP. We can't affect it until it
423 hits the SIGSTOP, but we're already attached. */
424
425 ret = my_waitpid (new_pid, &status, __WALL);
426
427 if (ret == -1)
428 perror_with_name ("waiting for new child");
429 else if (ret != new_pid)
430 warning ("wait returned unexpected PID %d", ret);
431 else if (!WIFSTOPPED (status))
432 warning ("wait returned unexpected status 0x%x", status);
433 }
434
435 if (debug_threads)
436 debug_printf ("HEW: Got clone event "
437 "from LWP %ld, new child is LWP %ld\n",
438 lwpid_of (event_thr), new_pid);
439
440 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
441 new_lwp = add_lwp (ptid);
442
443 /* Either we're going to immediately resume the new thread
444 or leave it stopped. linux_resume_one_lwp is a nop if it
445 thinks the thread is currently running, so set this first
446 before calling linux_resume_one_lwp. */
447 new_lwp->stopped = 1;
448
449 /* If we're suspending all threads, leave this one suspended
450 too. */
451 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
452 new_lwp->suspended = 1;
453
454 /* Normally we will get the pending SIGSTOP. But in some cases
455 we might get another signal delivered to the group first.
456 If we do get another signal, be sure not to lose it. */
457 if (WSTOPSIG (status) != SIGSTOP)
458 {
459 new_lwp->stop_expected = 1;
460 new_lwp->status_pending_p = 1;
461 new_lwp->status_pending = status;
462 }
463 }
464 }
465
466 /* Return the PC as read from the regcache of LWP, without any
467 adjustment. */
468
469 static CORE_ADDR
470 get_pc (struct lwp_info *lwp)
471 {
472 struct thread_info *saved_thread;
473 struct regcache *regcache;
474 CORE_ADDR pc;
475
476 if (the_low_target.get_pc == NULL)
477 return 0;
478
479 saved_thread = current_thread;
480 current_thread = get_lwp_thread (lwp);
481
482 regcache = get_thread_regcache (current_thread, 1);
483 pc = (*the_low_target.get_pc) (regcache);
484
485 if (debug_threads)
486 debug_printf ("pc is 0x%lx\n", (long) pc);
487
488 current_thread = saved_thread;
489 return pc;
490 }
491
492 /* This function should only be called if LWP got a SIGTRAP.
493 The SIGTRAP could mean several things.
494
495 On i386, where decr_pc_after_break is non-zero:
496
497 If we were single-stepping this process using PTRACE_SINGLESTEP, we
498 will get only the one SIGTRAP. The value of $eip will be the next
499 instruction. If the instruction we stepped over was a breakpoint,
500 we need to decrement the PC.
501
502 If we continue the process using PTRACE_CONT, we will get a
503 SIGTRAP when we hit a breakpoint. The value of $eip will be
504 the instruction after the breakpoint (i.e. needs to be
505 decremented). If we report the SIGTRAP to GDB, we must also
506 report the undecremented PC. If the breakpoint is removed, we
507 must resume at the decremented PC.
508
509 On a non-decr_pc_after_break machine with hardware or kernel
510 single-step:
511
512 If we either single-step a breakpoint instruction, or continue and
513 hit a breakpoint instruction, our PC will point at the breakpoint
514 instruction. */
515
516 static int
517 check_stopped_by_breakpoint (struct lwp_info *lwp)
518 {
519 CORE_ADDR pc;
520 CORE_ADDR sw_breakpoint_pc;
521 struct thread_info *saved_thread;
522 #if USE_SIGTRAP_SIGINFO
523 siginfo_t siginfo;
524 #endif
525
526 if (the_low_target.get_pc == NULL)
527 return 0;
528
529 pc = get_pc (lwp);
530 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
531
532 /* breakpoint_at reads from the current thread. */
533 saved_thread = current_thread;
534 current_thread = get_lwp_thread (lwp);
535
536 #if USE_SIGTRAP_SIGINFO
537 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
538 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
539 {
540 if (siginfo.si_signo == SIGTRAP)
541 {
542 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
543 {
544 if (debug_threads)
545 {
546 struct thread_info *thr = get_lwp_thread (lwp);
547
548 debug_printf ("CSBB: Push back software breakpoint for %s\n",
549 target_pid_to_str (ptid_of (thr)));
550 }
551
552 /* Back up the PC if necessary. */
553 if (pc != sw_breakpoint_pc)
554 {
555 struct regcache *regcache
556 = get_thread_regcache (current_thread, 1);
557 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
558 }
559
560 lwp->stop_pc = sw_breakpoint_pc;
561 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
562 current_thread = saved_thread;
563 return 1;
564 }
565 else if (siginfo.si_code == TRAP_HWBKPT)
566 {
567 if (debug_threads)
568 {
569 struct thread_info *thr = get_lwp_thread (lwp);
570
571 debug_printf ("CSBB: Push back hardware "
572 "breakpoint/watchpoint for %s\n",
573 target_pid_to_str (ptid_of (thr)));
574 }
575
576 lwp->stop_pc = pc;
577 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
578 current_thread = saved_thread;
579 return 1;
580 }
581 }
582 }
583 #else
584 /* We may have just stepped a breakpoint instruction. E.g., in
585 non-stop mode, GDB first tells the thread A to step a range, and
586 then the user inserts a breakpoint inside the range. In that
587 case we need to report the breakpoint PC. */
588 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
589 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
590 {
591 if (debug_threads)
592 {
593 struct thread_info *thr = get_lwp_thread (lwp);
594
595 debug_printf ("CSBB: %s stopped by software breakpoint\n",
596 target_pid_to_str (ptid_of (thr)));
597 }
598
599 /* Back up the PC if necessary. */
600 if (pc != sw_breakpoint_pc)
601 {
602 struct regcache *regcache
603 = get_thread_regcache (current_thread, 1);
604 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
605 }
606
607 lwp->stop_pc = sw_breakpoint_pc;
608 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
609 current_thread = saved_thread;
610 return 1;
611 }
612
613 if (hardware_breakpoint_inserted_here (pc))
614 {
615 if (debug_threads)
616 {
617 struct thread_info *thr = get_lwp_thread (lwp);
618
619 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
620 target_pid_to_str (ptid_of (thr)));
621 }
622
623 lwp->stop_pc = pc;
624 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
625 current_thread = saved_thread;
626 return 1;
627 }
628 #endif
629
630 current_thread = saved_thread;
631 return 0;
632 }
633
634 static struct lwp_info *
635 add_lwp (ptid_t ptid)
636 {
637 struct lwp_info *lwp;
638
639 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
640 memset (lwp, 0, sizeof (*lwp));
641
642 if (the_low_target.new_thread != NULL)
643 the_low_target.new_thread (lwp);
644
645 lwp->thread = add_thread (ptid, lwp);
646
647 return lwp;
648 }
649
650 /* Start an inferior process and returns its pid.
651 ALLARGS is a vector of program-name and args. */
652
653 static int
654 linux_create_inferior (char *program, char **allargs)
655 {
656 struct lwp_info *new_lwp;
657 int pid;
658 ptid_t ptid;
659 struct cleanup *restore_personality
660 = maybe_disable_address_space_randomization (disable_randomization);
661
662 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
663 pid = vfork ();
664 #else
665 pid = fork ();
666 #endif
667 if (pid < 0)
668 perror_with_name ("fork");
669
670 if (pid == 0)
671 {
672 close_most_fds ();
673 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
674
675 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
676 signal (__SIGRTMIN + 1, SIG_DFL);
677 #endif
678
679 setpgid (0, 0);
680
681 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
682 stdout to stderr so that inferior i/o doesn't corrupt the connection.
683 Also, redirect stdin to /dev/null. */
684 if (remote_connection_is_stdio ())
685 {
686 close (0);
687 open ("/dev/null", O_RDONLY);
688 dup2 (2, 1);
689 if (write (2, "stdin/stdout redirected\n",
690 sizeof ("stdin/stdout redirected\n") - 1) < 0)
691 {
692 /* Errors ignored. */;
693 }
694 }
695
696 execv (program, allargs);
697 if (errno == ENOENT)
698 execvp (program, allargs);
699
700 fprintf (stderr, "Cannot exec %s: %s.\n", program,
701 strerror (errno));
702 fflush (stderr);
703 _exit (0177);
704 }
705
706 do_cleanups (restore_personality);
707
708 linux_add_process (pid, 0);
709
710 ptid = ptid_build (pid, pid, 0);
711 new_lwp = add_lwp (ptid);
712 new_lwp->must_set_ptrace_flags = 1;
713
714 return pid;
715 }
716
717 /* Attach to an inferior process. Returns 0 on success, ERRNO on
718 error. */
719
720 int
721 linux_attach_lwp (ptid_t ptid)
722 {
723 struct lwp_info *new_lwp;
724 int lwpid = ptid_get_lwp (ptid);
725
726 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
727 != 0)
728 return errno;
729
730 new_lwp = add_lwp (ptid);
731
732 /* We need to wait for SIGSTOP before being able to make the next
733 ptrace call on this LWP. */
734 new_lwp->must_set_ptrace_flags = 1;
735
736 if (linux_proc_pid_is_stopped (lwpid))
737 {
738 if (debug_threads)
739 debug_printf ("Attached to a stopped process\n");
740
741 /* The process is definitely stopped. It is in a job control
742 stop, unless the kernel predates the TASK_STOPPED /
743 TASK_TRACED distinction, in which case it might be in a
744 ptrace stop. Make sure it is in a ptrace stop; from there we
745 can kill it, signal it, et cetera.
746
747 First make sure there is a pending SIGSTOP. Since we are
748 already attached, the process can not transition from stopped
749 to running without a PTRACE_CONT; so we know this signal will
750 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
751 probably already in the queue (unless this kernel is old
752 enough to use TASK_STOPPED for ptrace stops); but since
753 SIGSTOP is not an RT signal, it can only be queued once. */
754 kill_lwp (lwpid, SIGSTOP);
755
756 /* Finally, resume the stopped process. This will deliver the
757 SIGSTOP (or a higher priority signal, just like normal
758 PTRACE_ATTACH), which we'll catch later on. */
759 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
760 }
761
762 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
763 brings it to a halt.
764
765 There are several cases to consider here:
766
767 1) gdbserver has already attached to the process and is being notified
768 of a new thread that is being created.
769 In this case we should ignore that SIGSTOP and resume the
770 process. This is handled below by setting stop_expected = 1,
771 and the fact that add_thread sets last_resume_kind ==
772 resume_continue.
773
774 2) This is the first thread (the process thread), and we're attaching
775 to it via attach_inferior.
776 In this case we want the process thread to stop.
777 This is handled by having linux_attach set last_resume_kind ==
778 resume_stop after we return.
779
780 If the pid we are attaching to is also the tgid, we attach to and
781 stop all the existing threads. Otherwise, we attach to pid and
782 ignore any other threads in the same group as this pid.
783
784 3) GDB is connecting to gdbserver and is requesting an enumeration of all
785 existing threads.
786 In this case we want the thread to stop.
787 FIXME: This case is currently not properly handled.
788 We should wait for the SIGSTOP but don't. Things work apparently
789 because enough time passes between when we ptrace (ATTACH) and when
790 gdb makes the next ptrace call on the thread.
791
792 On the other hand, if we are currently trying to stop all threads, we
793 should treat the new thread as if we had sent it a SIGSTOP. This works
794 because we are guaranteed that the add_lwp call above added us to the
795 end of the list, and so the new thread has not yet reached
796 wait_for_sigstop (but will). */
797 new_lwp->stop_expected = 1;
798
799 return 0;
800 }
801
802 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
803 already attached. Returns true if a new LWP is found, false
804 otherwise. */
805
806 static int
807 attach_proc_task_lwp_callback (ptid_t ptid)
808 {
809 /* Is this a new thread? */
810 if (find_thread_ptid (ptid) == NULL)
811 {
812 int lwpid = ptid_get_lwp (ptid);
813 int err;
814
815 if (debug_threads)
816 debug_printf ("Found new lwp %d\n", lwpid);
817
818 err = linux_attach_lwp (ptid);
819
820 /* Be quiet if we simply raced with the thread exiting. EPERM
821 is returned if the thread's task still exists, and is marked
822 as exited or zombie, as well as other conditions, so in that
823 case, confirm the status in /proc/PID/status. */
824 if (err == ESRCH
825 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
826 {
827 if (debug_threads)
828 {
829 debug_printf ("Cannot attach to lwp %d: "
830 "thread is gone (%d: %s)\n",
831 lwpid, err, strerror (err));
832 }
833 }
834 else if (err != 0)
835 {
836 warning (_("Cannot attach to lwp %d: %s"),
837 lwpid,
838 linux_ptrace_attach_fail_reason_string (ptid, err));
839 }
840
841 return 1;
842 }
843 return 0;
844 }
845
846 /* Attach to PID. If PID is the tgid, attach to it and all
847 of its threads. */
848
849 static int
850 linux_attach (unsigned long pid)
851 {
852 ptid_t ptid = ptid_build (pid, pid, 0);
853 int err;
854
855 /* Attach to PID. We will check for other threads
856 soon. */
857 err = linux_attach_lwp (ptid);
858 if (err != 0)
859 error ("Cannot attach to process %ld: %s",
860 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
861
862 linux_add_process (pid, 1);
863
864 if (!non_stop)
865 {
866 struct thread_info *thread;
867
868 /* Don't ignore the initial SIGSTOP if we just attached to this
869 process. It will be collected by wait shortly. */
870 thread = find_thread_ptid (ptid_build (pid, pid, 0));
871 thread->last_resume_kind = resume_stop;
872 }
873
874 /* We must attach to every LWP. If /proc is mounted, use that to
875 find them now. On the one hand, the inferior may be using raw
876 clone instead of using pthreads. On the other hand, even if it
877 is using pthreads, GDB may not be connected yet (thread_db needs
878 to do symbol lookups, through qSymbol). Also, thread_db walks
879 structures in the inferior's address space to find the list of
880 threads/LWPs, and those structures may well be corrupted. Note
881 that once thread_db is loaded, we'll still use it to list threads
882 and associate pthread info with each LWP. */
883 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
884 return 0;
885 }
886
887 struct counter
888 {
889 int pid;
890 int count;
891 };
892
893 static int
894 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
895 {
896 struct counter *counter = args;
897
898 if (ptid_get_pid (entry->id) == counter->pid)
899 {
900 if (++counter->count > 1)
901 return 1;
902 }
903
904 return 0;
905 }
906
907 static int
908 last_thread_of_process_p (int pid)
909 {
910 struct counter counter = { pid , 0 };
911
912 return (find_inferior (&all_threads,
913 second_thread_of_pid_p, &counter) == NULL);
914 }
915
916 /* Kill LWP. */
917
918 static void
919 linux_kill_one_lwp (struct lwp_info *lwp)
920 {
921 struct thread_info *thr = get_lwp_thread (lwp);
922 int pid = lwpid_of (thr);
923
924 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
925 there is no signal context, and ptrace(PTRACE_KILL) (or
926 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
927 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
928 alternative is to kill with SIGKILL. We only need one SIGKILL
929 per process, not one for each thread. But since we still support
930 linuxthreads, and we also support debugging programs using raw
931 clone without CLONE_THREAD, we send one for each thread. For
932 years, we used PTRACE_KILL only, so we're being a bit paranoid
933 about some old kernels where PTRACE_KILL might work better
934 (dubious if there are any such, but that's why it's paranoia), so
935 we try SIGKILL first, PTRACE_KILL second, and so we're fine
936 everywhere. */
937
938 errno = 0;
939 kill_lwp (pid, SIGKILL);
940 if (debug_threads)
941 {
942 int save_errno = errno;
943
944 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
945 target_pid_to_str (ptid_of (thr)),
946 save_errno ? strerror (save_errno) : "OK");
947 }
948
949 errno = 0;
950 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
951 if (debug_threads)
952 {
953 int save_errno = errno;
954
955 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
956 target_pid_to_str (ptid_of (thr)),
957 save_errno ? strerror (save_errno) : "OK");
958 }
959 }
960
961 /* Kill LWP and wait for it to die. */
962
963 static void
964 kill_wait_lwp (struct lwp_info *lwp)
965 {
966 struct thread_info *thr = get_lwp_thread (lwp);
967 int pid = ptid_get_pid (ptid_of (thr));
968 int lwpid = ptid_get_lwp (ptid_of (thr));
969 int wstat;
970 int res;
971
972 if (debug_threads)
973 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
974
975 do
976 {
977 linux_kill_one_lwp (lwp);
978
979 /* Make sure it died. Notes:
980
981 - The loop is most likely unnecessary.
982
983 - We don't use linux_wait_for_event as that could delete lwps
984 while we're iterating over them. We're not interested in
985 any pending status at this point, only in making sure all
986 wait status on the kernel side are collected until the
987 process is reaped.
988
989 - We don't use __WALL here as the __WALL emulation relies on
990 SIGCHLD, and killing a stopped process doesn't generate
991 one, nor an exit status.
992 */
993 res = my_waitpid (lwpid, &wstat, 0);
994 if (res == -1 && errno == ECHILD)
995 res = my_waitpid (lwpid, &wstat, __WCLONE);
996 } while (res > 0 && WIFSTOPPED (wstat));
997
998 gdb_assert (res > 0);
999 }
1000
1001 /* Callback for `find_inferior'. Kills an lwp of a given process,
1002 except the leader. */
1003
1004 static int
1005 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1006 {
1007 struct thread_info *thread = (struct thread_info *) entry;
1008 struct lwp_info *lwp = get_thread_lwp (thread);
1009 int pid = * (int *) args;
1010
1011 if (ptid_get_pid (entry->id) != pid)
1012 return 0;
1013
1014 /* We avoid killing the first thread here, because of a Linux kernel (at
1015 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1016 the children get a chance to be reaped, it will remain a zombie
1017 forever. */
1018
1019 if (lwpid_of (thread) == pid)
1020 {
1021 if (debug_threads)
1022 debug_printf ("lkop: is last of process %s\n",
1023 target_pid_to_str (entry->id));
1024 return 0;
1025 }
1026
1027 kill_wait_lwp (lwp);
1028 return 0;
1029 }
1030
1031 static int
1032 linux_kill (int pid)
1033 {
1034 struct process_info *process;
1035 struct lwp_info *lwp;
1036
1037 process = find_process_pid (pid);
1038 if (process == NULL)
1039 return -1;
1040
1041 /* If we're killing a running inferior, make sure it is stopped
1042 first, as PTRACE_KILL will not work otherwise. */
1043 stop_all_lwps (0, NULL);
1044
1045 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1046
1047 /* See the comment in linux_kill_one_lwp. We did not kill the first
1048 thread in the list, so do so now. */
1049 lwp = find_lwp_pid (pid_to_ptid (pid));
1050
1051 if (lwp == NULL)
1052 {
1053 if (debug_threads)
1054 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1055 pid);
1056 }
1057 else
1058 kill_wait_lwp (lwp);
1059
1060 the_target->mourn (process);
1061
1062 /* Since we presently can only stop all lwps of all processes, we
1063 need to unstop lwps of other processes. */
1064 unstop_all_lwps (0, NULL);
1065 return 0;
1066 }
1067
1068 /* Get pending signal of THREAD, for detaching purposes. This is the
1069 signal the thread last stopped for, which we need to deliver to the
1070 thread when detaching, otherwise, it'd be suppressed/lost. */
1071
1072 static int
1073 get_detach_signal (struct thread_info *thread)
1074 {
1075 enum gdb_signal signo = GDB_SIGNAL_0;
1076 int status;
1077 struct lwp_info *lp = get_thread_lwp (thread);
1078
1079 if (lp->status_pending_p)
1080 status = lp->status_pending;
1081 else
1082 {
1083 /* If the thread had been suspended by gdbserver, and it stopped
1084 cleanly, then it'll have stopped with SIGSTOP. But we don't
1085 want to deliver that SIGSTOP. */
1086 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1087 || thread->last_status.value.sig == GDB_SIGNAL_0)
1088 return 0;
1089
1090 /* Otherwise, we may need to deliver the signal we
1091 intercepted. */
1092 status = lp->last_status;
1093 }
1094
1095 if (!WIFSTOPPED (status))
1096 {
1097 if (debug_threads)
1098 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1099 target_pid_to_str (ptid_of (thread)));
1100 return 0;
1101 }
1102
1103 /* Extended wait statuses aren't real SIGTRAPs. */
1104 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1105 {
1106 if (debug_threads)
1107 debug_printf ("GPS: lwp %s had stopped with extended "
1108 "status: no pending signal\n",
1109 target_pid_to_str (ptid_of (thread)));
1110 return 0;
1111 }
1112
1113 signo = gdb_signal_from_host (WSTOPSIG (status));
1114
1115 if (program_signals_p && !program_signals[signo])
1116 {
1117 if (debug_threads)
1118 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1119 target_pid_to_str (ptid_of (thread)),
1120 gdb_signal_to_string (signo));
1121 return 0;
1122 }
1123 else if (!program_signals_p
1124 /* If we have no way to know which signals GDB does not
1125 want to have passed to the program, assume
1126 SIGTRAP/SIGINT, which is GDB's default. */
1127 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1128 {
1129 if (debug_threads)
1130 debug_printf ("GPS: lwp %s had signal %s, "
1131 "but we don't know if we should pass it. "
1132 "Default to not.\n",
1133 target_pid_to_str (ptid_of (thread)),
1134 gdb_signal_to_string (signo));
1135 return 0;
1136 }
1137 else
1138 {
1139 if (debug_threads)
1140 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1141 target_pid_to_str (ptid_of (thread)),
1142 gdb_signal_to_string (signo));
1143
1144 return WSTOPSIG (status);
1145 }
1146 }
1147
1148 static int
1149 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1150 {
1151 struct thread_info *thread = (struct thread_info *) entry;
1152 struct lwp_info *lwp = get_thread_lwp (thread);
1153 int pid = * (int *) args;
1154 int sig;
1155
1156 if (ptid_get_pid (entry->id) != pid)
1157 return 0;
1158
1159 /* If there is a pending SIGSTOP, get rid of it. */
1160 if (lwp->stop_expected)
1161 {
1162 if (debug_threads)
1163 debug_printf ("Sending SIGCONT to %s\n",
1164 target_pid_to_str (ptid_of (thread)));
1165
1166 kill_lwp (lwpid_of (thread), SIGCONT);
1167 lwp->stop_expected = 0;
1168 }
1169
1170 /* Flush any pending changes to the process's registers. */
1171 regcache_invalidate_thread (thread);
1172
1173 /* Pass on any pending signal for this thread. */
1174 sig = get_detach_signal (thread);
1175
1176 /* Finally, let it resume. */
1177 if (the_low_target.prepare_to_resume != NULL)
1178 the_low_target.prepare_to_resume (lwp);
1179 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1180 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1181 error (_("Can't detach %s: %s"),
1182 target_pid_to_str (ptid_of (thread)),
1183 strerror (errno));
1184
1185 delete_lwp (lwp);
1186 return 0;
1187 }
1188
1189 static int
1190 linux_detach (int pid)
1191 {
1192 struct process_info *process;
1193
1194 process = find_process_pid (pid);
1195 if (process == NULL)
1196 return -1;
1197
1198 /* Stop all threads before detaching. First, ptrace requires that
1199 the thread is stopped to sucessfully detach. Second, thread_db
1200 may need to uninstall thread event breakpoints from memory, which
1201 only works with a stopped process anyway. */
1202 stop_all_lwps (0, NULL);
1203
1204 #ifdef USE_THREAD_DB
1205 thread_db_detach (process);
1206 #endif
1207
1208 /* Stabilize threads (move out of jump pads). */
1209 stabilize_threads ();
1210
1211 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1212
1213 the_target->mourn (process);
1214
1215 /* Since we presently can only stop all lwps of all processes, we
1216 need to unstop lwps of other processes. */
1217 unstop_all_lwps (0, NULL);
1218 return 0;
1219 }
1220
1221 /* Remove all LWPs that belong to process PROC from the lwp list. */
1222
1223 static int
1224 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1225 {
1226 struct thread_info *thread = (struct thread_info *) entry;
1227 struct lwp_info *lwp = get_thread_lwp (thread);
1228 struct process_info *process = proc;
1229
1230 if (pid_of (thread) == pid_of (process))
1231 delete_lwp (lwp);
1232
1233 return 0;
1234 }
1235
1236 static void
1237 linux_mourn (struct process_info *process)
1238 {
1239 struct process_info_private *priv;
1240
1241 #ifdef USE_THREAD_DB
1242 thread_db_mourn (process);
1243 #endif
1244
1245 find_inferior (&all_threads, delete_lwp_callback, process);
1246
1247 /* Freeing all private data. */
1248 priv = process->priv;
1249 free (priv->arch_private);
1250 free (priv);
1251 process->priv = NULL;
1252
1253 remove_process (process);
1254 }
1255
1256 static void
1257 linux_join (int pid)
1258 {
1259 int status, ret;
1260
1261 do {
1262 ret = my_waitpid (pid, &status, 0);
1263 if (WIFEXITED (status) || WIFSIGNALED (status))
1264 break;
1265 } while (ret != -1 || errno != ECHILD);
1266 }
1267
1268 /* Return nonzero if the given thread is still alive. */
1269 static int
1270 linux_thread_alive (ptid_t ptid)
1271 {
1272 struct lwp_info *lwp = find_lwp_pid (ptid);
1273
1274 /* We assume we always know if a thread exits. If a whole process
1275 exited but we still haven't been able to report it to GDB, we'll
1276 hold on to the last lwp of the dead process. */
1277 if (lwp != NULL)
1278 return !lwp->dead;
1279 else
1280 return 0;
1281 }
1282
1283 /* Return 1 if this lwp still has an interesting status pending. If
1284 not (e.g., it had stopped for a breakpoint that is gone), return
1285 false. */
1286
1287 static int
1288 thread_still_has_status_pending_p (struct thread_info *thread)
1289 {
1290 struct lwp_info *lp = get_thread_lwp (thread);
1291
1292 if (!lp->status_pending_p)
1293 return 0;
1294
1295 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1296 report any status pending the LWP may have. */
1297 if (thread->last_resume_kind == resume_stop
1298 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1299 return 0;
1300
1301 if (thread->last_resume_kind != resume_stop
1302 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1303 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1304 {
1305 struct thread_info *saved_thread;
1306 CORE_ADDR pc;
1307 int discard = 0;
1308
1309 gdb_assert (lp->last_status != 0);
1310
1311 pc = get_pc (lp);
1312
1313 saved_thread = current_thread;
1314 current_thread = thread;
1315
1316 if (pc != lp->stop_pc)
1317 {
1318 if (debug_threads)
1319 debug_printf ("PC of %ld changed\n",
1320 lwpid_of (thread));
1321 discard = 1;
1322 }
1323
1324 #if !USE_SIGTRAP_SIGINFO
1325 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1326 && !(*the_low_target.breakpoint_at) (pc))
1327 {
1328 if (debug_threads)
1329 debug_printf ("previous SW breakpoint of %ld gone\n",
1330 lwpid_of (thread));
1331 discard = 1;
1332 }
1333 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1334 && !hardware_breakpoint_inserted_here (pc))
1335 {
1336 if (debug_threads)
1337 debug_printf ("previous HW breakpoint of %ld gone\n",
1338 lwpid_of (thread));
1339 discard = 1;
1340 }
1341 #endif
1342
1343 current_thread = saved_thread;
1344
1345 if (discard)
1346 {
1347 if (debug_threads)
1348 debug_printf ("discarding pending breakpoint status\n");
1349 lp->status_pending_p = 0;
1350 return 0;
1351 }
1352 }
1353
1354 return 1;
1355 }
1356
1357 /* Return 1 if this lwp has an interesting status pending. */
1358 static int
1359 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1360 {
1361 struct thread_info *thread = (struct thread_info *) entry;
1362 struct lwp_info *lp = get_thread_lwp (thread);
1363 ptid_t ptid = * (ptid_t *) arg;
1364
1365 /* Check if we're only interested in events from a specific process
1366 or a specific LWP. */
1367 if (!ptid_match (ptid_of (thread), ptid))
1368 return 0;
1369
1370 if (lp->status_pending_p
1371 && !thread_still_has_status_pending_p (thread))
1372 {
1373 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1374 return 0;
1375 }
1376
1377 return lp->status_pending_p;
1378 }
1379
1380 static int
1381 same_lwp (struct inferior_list_entry *entry, void *data)
1382 {
1383 ptid_t ptid = *(ptid_t *) data;
1384 int lwp;
1385
1386 if (ptid_get_lwp (ptid) != 0)
1387 lwp = ptid_get_lwp (ptid);
1388 else
1389 lwp = ptid_get_pid (ptid);
1390
1391 if (ptid_get_lwp (entry->id) == lwp)
1392 return 1;
1393
1394 return 0;
1395 }
1396
1397 struct lwp_info *
1398 find_lwp_pid (ptid_t ptid)
1399 {
1400 struct inferior_list_entry *thread
1401 = find_inferior (&all_threads, same_lwp, &ptid);
1402
1403 if (thread == NULL)
1404 return NULL;
1405
1406 return get_thread_lwp ((struct thread_info *) thread);
1407 }
1408
1409 /* Return the number of known LWPs in the tgid given by PID. */
1410
1411 static int
1412 num_lwps (int pid)
1413 {
1414 struct inferior_list_entry *inf, *tmp;
1415 int count = 0;
1416
1417 ALL_INFERIORS (&all_threads, inf, tmp)
1418 {
1419 if (ptid_get_pid (inf->id) == pid)
1420 count++;
1421 }
1422
1423 return count;
1424 }
1425
1426 /* The arguments passed to iterate_over_lwps. */
1427
1428 struct iterate_over_lwps_args
1429 {
1430 /* The FILTER argument passed to iterate_over_lwps. */
1431 ptid_t filter;
1432
1433 /* The CALLBACK argument passed to iterate_over_lwps. */
1434 iterate_over_lwps_ftype *callback;
1435
1436 /* The DATA argument passed to iterate_over_lwps. */
1437 void *data;
1438 };
1439
1440 /* Callback for find_inferior used by iterate_over_lwps to filter
1441 calls to the callback supplied to that function. Returning a
1442 nonzero value causes find_inferiors to stop iterating and return
1443 the current inferior_list_entry. Returning zero indicates that
1444 find_inferiors should continue iterating. */
1445
1446 static int
1447 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1448 {
1449 struct iterate_over_lwps_args *args
1450 = (struct iterate_over_lwps_args *) args_p;
1451
1452 if (ptid_match (entry->id, args->filter))
1453 {
1454 struct thread_info *thr = (struct thread_info *) entry;
1455 struct lwp_info *lwp = get_thread_lwp (thr);
1456
1457 return (*args->callback) (lwp, args->data);
1458 }
1459
1460 return 0;
1461 }
1462
1463 /* See nat/linux-nat.h. */
1464
1465 struct lwp_info *
1466 iterate_over_lwps (ptid_t filter,
1467 iterate_over_lwps_ftype callback,
1468 void *data)
1469 {
1470 struct iterate_over_lwps_args args = {filter, callback, data};
1471 struct inferior_list_entry *entry;
1472
1473 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1474 if (entry == NULL)
1475 return NULL;
1476
1477 return get_thread_lwp ((struct thread_info *) entry);
1478 }
1479
1480 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1481 their exits until all other threads in the group have exited. */
1482
1483 static void
1484 check_zombie_leaders (void)
1485 {
1486 struct process_info *proc, *tmp;
1487
1488 ALL_PROCESSES (proc, tmp)
1489 {
1490 pid_t leader_pid = pid_of (proc);
1491 struct lwp_info *leader_lp;
1492
1493 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1494
1495 if (debug_threads)
1496 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1497 "num_lwps=%d, zombie=%d\n",
1498 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1499 linux_proc_pid_is_zombie (leader_pid));
1500
1501 if (leader_lp != NULL
1502 /* Check if there are other threads in the group, as we may
1503 have raced with the inferior simply exiting. */
1504 && !last_thread_of_process_p (leader_pid)
1505 && linux_proc_pid_is_zombie (leader_pid))
1506 {
1507 /* A leader zombie can mean one of two things:
1508
1509 - It exited, and there's an exit status pending
1510 available, or only the leader exited (not the whole
1511 program). In the latter case, we can't waitpid the
1512 leader's exit status until all other threads are gone.
1513
1514 - There are 3 or more threads in the group, and a thread
1515 other than the leader exec'd. On an exec, the Linux
1516 kernel destroys all other threads (except the execing
1517 one) in the thread group, and resets the execing thread's
1518 tid to the tgid. No exit notification is sent for the
1519 execing thread -- from the ptracer's perspective, it
1520 appears as though the execing thread just vanishes.
1521 Until we reap all other threads except the leader and the
1522 execing thread, the leader will be zombie, and the
1523 execing thread will be in `D (disc sleep)'. As soon as
1524 all other threads are reaped, the execing thread changes
1525 it's tid to the tgid, and the previous (zombie) leader
1526 vanishes, giving place to the "new" leader. We could try
1527 distinguishing the exit and exec cases, by waiting once
1528 more, and seeing if something comes out, but it doesn't
1529 sound useful. The previous leader _does_ go away, and
1530 we'll re-add the new one once we see the exec event
1531 (which is just the same as what would happen if the
1532 previous leader did exit voluntarily before some other
1533 thread execs). */
1534
1535 if (debug_threads)
1536 fprintf (stderr,
1537 "CZL: Thread group leader %d zombie "
1538 "(it exited, or another thread execd).\n",
1539 leader_pid);
1540
1541 delete_lwp (leader_lp);
1542 }
1543 }
1544 }
1545
1546 /* Callback for `find_inferior'. Returns the first LWP that is not
1547 stopped. ARG is a PTID filter. */
1548
1549 static int
1550 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1551 {
1552 struct thread_info *thr = (struct thread_info *) entry;
1553 struct lwp_info *lwp;
1554 ptid_t filter = *(ptid_t *) arg;
1555
1556 if (!ptid_match (ptid_of (thr), filter))
1557 return 0;
1558
1559 lwp = get_thread_lwp (thr);
1560 if (!lwp->stopped)
1561 return 1;
1562
1563 return 0;
1564 }
1565
1566 /* This function should only be called if the LWP got a SIGTRAP.
1567
1568 Handle any tracepoint steps or hits. Return true if a tracepoint
1569 event was handled, 0 otherwise. */
1570
1571 static int
1572 handle_tracepoints (struct lwp_info *lwp)
1573 {
1574 struct thread_info *tinfo = get_lwp_thread (lwp);
1575 int tpoint_related_event = 0;
1576
1577 gdb_assert (lwp->suspended == 0);
1578
1579 /* If this tracepoint hit causes a tracing stop, we'll immediately
1580 uninsert tracepoints. To do this, we temporarily pause all
1581 threads, unpatch away, and then unpause threads. We need to make
1582 sure the unpausing doesn't resume LWP too. */
1583 lwp->suspended++;
1584
1585 /* And we need to be sure that any all-threads-stopping doesn't try
1586 to move threads out of the jump pads, as it could deadlock the
1587 inferior (LWP could be in the jump pad, maybe even holding the
1588 lock.) */
1589
1590 /* Do any necessary step collect actions. */
1591 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1592
1593 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1594
1595 /* See if we just hit a tracepoint and do its main collect
1596 actions. */
1597 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1598
1599 lwp->suspended--;
1600
1601 gdb_assert (lwp->suspended == 0);
1602 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1603
1604 if (tpoint_related_event)
1605 {
1606 if (debug_threads)
1607 debug_printf ("got a tracepoint event\n");
1608 return 1;
1609 }
1610
1611 return 0;
1612 }
1613
1614 /* Convenience wrapper. Returns true if LWP is presently collecting a
1615 fast tracepoint. */
1616
1617 static int
1618 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1619 struct fast_tpoint_collect_status *status)
1620 {
1621 CORE_ADDR thread_area;
1622 struct thread_info *thread = get_lwp_thread (lwp);
1623
1624 if (the_low_target.get_thread_area == NULL)
1625 return 0;
1626
1627 /* Get the thread area address. This is used to recognize which
1628 thread is which when tracing with the in-process agent library.
1629 We don't read anything from the address, and treat it as opaque;
1630 it's the address itself that we assume is unique per-thread. */
1631 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1632 return 0;
1633
1634 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1635 }
1636
1637 /* The reason we resume in the caller, is because we want to be able
1638 to pass lwp->status_pending as WSTAT, and we need to clear
1639 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1640 refuses to resume. */
1641
1642 static int
1643 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1644 {
1645 struct thread_info *saved_thread;
1646
1647 saved_thread = current_thread;
1648 current_thread = get_lwp_thread (lwp);
1649
1650 if ((wstat == NULL
1651 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1652 && supports_fast_tracepoints ()
1653 && agent_loaded_p ())
1654 {
1655 struct fast_tpoint_collect_status status;
1656 int r;
1657
1658 if (debug_threads)
1659 debug_printf ("Checking whether LWP %ld needs to move out of the "
1660 "jump pad.\n",
1661 lwpid_of (current_thread));
1662
1663 r = linux_fast_tracepoint_collecting (lwp, &status);
1664
1665 if (wstat == NULL
1666 || (WSTOPSIG (*wstat) != SIGILL
1667 && WSTOPSIG (*wstat) != SIGFPE
1668 && WSTOPSIG (*wstat) != SIGSEGV
1669 && WSTOPSIG (*wstat) != SIGBUS))
1670 {
1671 lwp->collecting_fast_tracepoint = r;
1672
1673 if (r != 0)
1674 {
1675 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1676 {
1677 /* Haven't executed the original instruction yet.
1678 Set breakpoint there, and wait till it's hit,
1679 then single-step until exiting the jump pad. */
1680 lwp->exit_jump_pad_bkpt
1681 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1682 }
1683
1684 if (debug_threads)
1685 debug_printf ("Checking whether LWP %ld needs to move out of "
1686 "the jump pad...it does\n",
1687 lwpid_of (current_thread));
1688 current_thread = saved_thread;
1689
1690 return 1;
1691 }
1692 }
1693 else
1694 {
1695 /* If we get a synchronous signal while collecting, *and*
1696 while executing the (relocated) original instruction,
1697 reset the PC to point at the tpoint address, before
1698 reporting to GDB. Otherwise, it's an IPA lib bug: just
1699 report the signal to GDB, and pray for the best. */
1700
1701 lwp->collecting_fast_tracepoint = 0;
1702
1703 if (r != 0
1704 && (status.adjusted_insn_addr <= lwp->stop_pc
1705 && lwp->stop_pc < status.adjusted_insn_addr_end))
1706 {
1707 siginfo_t info;
1708 struct regcache *regcache;
1709
1710 /* The si_addr on a few signals references the address
1711 of the faulting instruction. Adjust that as
1712 well. */
1713 if ((WSTOPSIG (*wstat) == SIGILL
1714 || WSTOPSIG (*wstat) == SIGFPE
1715 || WSTOPSIG (*wstat) == SIGBUS
1716 || WSTOPSIG (*wstat) == SIGSEGV)
1717 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1718 (PTRACE_TYPE_ARG3) 0, &info) == 0
1719 /* Final check just to make sure we don't clobber
1720 the siginfo of non-kernel-sent signals. */
1721 && (uintptr_t) info.si_addr == lwp->stop_pc)
1722 {
1723 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1724 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1725 (PTRACE_TYPE_ARG3) 0, &info);
1726 }
1727
1728 regcache = get_thread_regcache (current_thread, 1);
1729 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1730 lwp->stop_pc = status.tpoint_addr;
1731
1732 /* Cancel any fast tracepoint lock this thread was
1733 holding. */
1734 force_unlock_trace_buffer ();
1735 }
1736
1737 if (lwp->exit_jump_pad_bkpt != NULL)
1738 {
1739 if (debug_threads)
1740 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1741 "stopping all threads momentarily.\n");
1742
1743 stop_all_lwps (1, lwp);
1744
1745 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1746 lwp->exit_jump_pad_bkpt = NULL;
1747
1748 unstop_all_lwps (1, lwp);
1749
1750 gdb_assert (lwp->suspended >= 0);
1751 }
1752 }
1753 }
1754
1755 if (debug_threads)
1756 debug_printf ("Checking whether LWP %ld needs to move out of the "
1757 "jump pad...no\n",
1758 lwpid_of (current_thread));
1759
1760 current_thread = saved_thread;
1761 return 0;
1762 }
1763
1764 /* Enqueue one signal in the "signals to report later when out of the
1765 jump pad" list. */
1766
1767 static void
1768 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1769 {
1770 struct pending_signals *p_sig;
1771 struct thread_info *thread = get_lwp_thread (lwp);
1772
1773 if (debug_threads)
1774 debug_printf ("Deferring signal %d for LWP %ld.\n",
1775 WSTOPSIG (*wstat), lwpid_of (thread));
1776
1777 if (debug_threads)
1778 {
1779 struct pending_signals *sig;
1780
1781 for (sig = lwp->pending_signals_to_report;
1782 sig != NULL;
1783 sig = sig->prev)
1784 debug_printf (" Already queued %d\n",
1785 sig->signal);
1786
1787 debug_printf (" (no more currently queued signals)\n");
1788 }
1789
1790 /* Don't enqueue non-RT signals if they are already in the deferred
1791 queue. (SIGSTOP being the easiest signal to see ending up here
1792 twice) */
1793 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1794 {
1795 struct pending_signals *sig;
1796
1797 for (sig = lwp->pending_signals_to_report;
1798 sig != NULL;
1799 sig = sig->prev)
1800 {
1801 if (sig->signal == WSTOPSIG (*wstat))
1802 {
1803 if (debug_threads)
1804 debug_printf ("Not requeuing already queued non-RT signal %d"
1805 " for LWP %ld\n",
1806 sig->signal,
1807 lwpid_of (thread));
1808 return;
1809 }
1810 }
1811 }
1812
1813 p_sig = xmalloc (sizeof (*p_sig));
1814 p_sig->prev = lwp->pending_signals_to_report;
1815 p_sig->signal = WSTOPSIG (*wstat);
1816 memset (&p_sig->info, 0, sizeof (siginfo_t));
1817 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1818 &p_sig->info);
1819
1820 lwp->pending_signals_to_report = p_sig;
1821 }
1822
1823 /* Dequeue one signal from the "signals to report later when out of
1824 the jump pad" list. */
1825
1826 static int
1827 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1828 {
1829 struct thread_info *thread = get_lwp_thread (lwp);
1830
1831 if (lwp->pending_signals_to_report != NULL)
1832 {
1833 struct pending_signals **p_sig;
1834
1835 p_sig = &lwp->pending_signals_to_report;
1836 while ((*p_sig)->prev != NULL)
1837 p_sig = &(*p_sig)->prev;
1838
1839 *wstat = W_STOPCODE ((*p_sig)->signal);
1840 if ((*p_sig)->info.si_signo != 0)
1841 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1842 &(*p_sig)->info);
1843 free (*p_sig);
1844 *p_sig = NULL;
1845
1846 if (debug_threads)
1847 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1848 WSTOPSIG (*wstat), lwpid_of (thread));
1849
1850 if (debug_threads)
1851 {
1852 struct pending_signals *sig;
1853
1854 for (sig = lwp->pending_signals_to_report;
1855 sig != NULL;
1856 sig = sig->prev)
1857 debug_printf (" Still queued %d\n",
1858 sig->signal);
1859
1860 debug_printf (" (no more queued signals)\n");
1861 }
1862
1863 return 1;
1864 }
1865
1866 return 0;
1867 }
1868
1869 /* Fetch the possibly triggered data watchpoint info and store it in
1870 CHILD.
1871
1872 On some archs, like x86, that use debug registers to set
1873 watchpoints, it's possible that the way to know which watched
1874 address trapped, is to check the register that is used to select
1875 which address to watch. Problem is, between setting the watchpoint
1876 and reading back which data address trapped, the user may change
1877 the set of watchpoints, and, as a consequence, GDB changes the
1878 debug registers in the inferior. To avoid reading back a stale
1879 stopped-data-address when that happens, we cache in LP the fact
1880 that a watchpoint trapped, and the corresponding data address, as
1881 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1882 registers meanwhile, we have the cached data we can rely on. */
1883
1884 static int
1885 check_stopped_by_watchpoint (struct lwp_info *child)
1886 {
1887 if (the_low_target.stopped_by_watchpoint != NULL)
1888 {
1889 struct thread_info *saved_thread;
1890
1891 saved_thread = current_thread;
1892 current_thread = get_lwp_thread (child);
1893
1894 if (the_low_target.stopped_by_watchpoint ())
1895 {
1896 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
1897
1898 if (the_low_target.stopped_data_address != NULL)
1899 child->stopped_data_address
1900 = the_low_target.stopped_data_address ();
1901 else
1902 child->stopped_data_address = 0;
1903 }
1904
1905 current_thread = saved_thread;
1906 }
1907
1908 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
1909 }
1910
1911 /* Do low-level handling of the event, and check if we should go on
1912 and pass it to caller code. Return the affected lwp if we are, or
1913 NULL otherwise. */
1914
1915 static struct lwp_info *
1916 linux_low_filter_event (int lwpid, int wstat)
1917 {
1918 struct lwp_info *child;
1919 struct thread_info *thread;
1920 int have_stop_pc = 0;
1921
1922 child = find_lwp_pid (pid_to_ptid (lwpid));
1923
1924 /* If we didn't find a process, one of two things presumably happened:
1925 - A process we started and then detached from has exited. Ignore it.
1926 - A process we are controlling has forked and the new child's stop
1927 was reported to us by the kernel. Save its PID. */
1928 if (child == NULL && WIFSTOPPED (wstat))
1929 {
1930 add_to_pid_list (&stopped_pids, lwpid, wstat);
1931 return NULL;
1932 }
1933 else if (child == NULL)
1934 return NULL;
1935
1936 thread = get_lwp_thread (child);
1937
1938 child->stopped = 1;
1939
1940 child->last_status = wstat;
1941
1942 /* Check if the thread has exited. */
1943 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
1944 {
1945 if (debug_threads)
1946 debug_printf ("LLFE: %d exited.\n", lwpid);
1947 if (num_lwps (pid_of (thread)) > 1)
1948 {
1949
1950 /* If there is at least one more LWP, then the exit signal was
1951 not the end of the debugged application and should be
1952 ignored. */
1953 delete_lwp (child);
1954 return NULL;
1955 }
1956 else
1957 {
1958 /* This was the last lwp in the process. Since events are
1959 serialized to GDB core, and we can't report this one
1960 right now, but GDB core and the other target layers will
1961 want to be notified about the exit code/signal, leave the
1962 status pending for the next time we're able to report
1963 it. */
1964 mark_lwp_dead (child, wstat);
1965 return child;
1966 }
1967 }
1968
1969 gdb_assert (WIFSTOPPED (wstat));
1970
1971 if (WIFSTOPPED (wstat))
1972 {
1973 struct process_info *proc;
1974
1975 /* Architecture-specific setup after inferior is running. This
1976 needs to happen after we have attached to the inferior and it
1977 is stopped for the first time, but before we access any
1978 inferior registers. */
1979 proc = find_process_pid (pid_of (thread));
1980 if (proc->priv->new_inferior)
1981 {
1982 struct thread_info *saved_thread;
1983
1984 saved_thread = current_thread;
1985 current_thread = thread;
1986
1987 the_low_target.arch_setup ();
1988
1989 current_thread = saved_thread;
1990
1991 proc->priv->new_inferior = 0;
1992 }
1993 }
1994
1995 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1996 {
1997 struct process_info *proc = find_process_pid (pid_of (thread));
1998
1999 linux_enable_event_reporting (lwpid, proc->attached);
2000 child->must_set_ptrace_flags = 0;
2001 }
2002
2003 /* Be careful to not overwrite stop_pc until
2004 check_stopped_by_breakpoint is called. */
2005 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2006 && linux_is_extended_waitstatus (wstat))
2007 {
2008 child->stop_pc = get_pc (child);
2009 handle_extended_wait (child, wstat);
2010 return NULL;
2011 }
2012
2013 /* Check first whether this was a SW/HW breakpoint before checking
2014 watchpoints, because at least s390 can't tell the data address of
2015 hardware watchpoint hits, and returns stopped-by-watchpoint as
2016 long as there's a watchpoint set. */
2017 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2018 {
2019 if (check_stopped_by_breakpoint (child))
2020 have_stop_pc = 1;
2021 }
2022
2023 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2024 or hardware watchpoint. Check which is which if we got
2025 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2026 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2027 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2028 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2029 check_stopped_by_watchpoint (child);
2030
2031 if (!have_stop_pc)
2032 child->stop_pc = get_pc (child);
2033
2034 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2035 && child->stop_expected)
2036 {
2037 if (debug_threads)
2038 debug_printf ("Expected stop.\n");
2039 child->stop_expected = 0;
2040
2041 if (thread->last_resume_kind == resume_stop)
2042 {
2043 /* We want to report the stop to the core. Treat the
2044 SIGSTOP as a normal event. */
2045 }
2046 else if (stopping_threads != NOT_STOPPING_THREADS)
2047 {
2048 /* Stopping threads. We don't want this SIGSTOP to end up
2049 pending. */
2050 return NULL;
2051 }
2052 else
2053 {
2054 /* Filter out the event. */
2055 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2056 return NULL;
2057 }
2058 }
2059
2060 child->status_pending_p = 1;
2061 child->status_pending = wstat;
2062 return child;
2063 }
2064
2065 /* Resume LWPs that are currently stopped without any pending status
2066 to report, but are resumed from the core's perspective. */
2067
2068 static void
2069 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2070 {
2071 struct thread_info *thread = (struct thread_info *) entry;
2072 struct lwp_info *lp = get_thread_lwp (thread);
2073
2074 if (lp->stopped
2075 && !lp->status_pending_p
2076 && thread->last_resume_kind != resume_stop
2077 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2078 {
2079 int step = thread->last_resume_kind == resume_step;
2080
2081 if (debug_threads)
2082 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2083 target_pid_to_str (ptid_of (thread)),
2084 paddress (lp->stop_pc),
2085 step);
2086
2087 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2088 }
2089 }
2090
2091 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2092 match FILTER_PTID (leaving others pending). The PTIDs can be:
2093 minus_one_ptid, to specify any child; a pid PTID, specifying all
2094 lwps of a thread group; or a PTID representing a single lwp. Store
2095 the stop status through the status pointer WSTAT. OPTIONS is
2096 passed to the waitpid call. Return 0 if no event was found and
2097 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2098 was found. Return the PID of the stopped child otherwise. */
2099
2100 static int
2101 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2102 int *wstatp, int options)
2103 {
2104 struct thread_info *event_thread;
2105 struct lwp_info *event_child, *requested_child;
2106 sigset_t block_mask, prev_mask;
2107
2108 retry:
2109 /* N.B. event_thread points to the thread_info struct that contains
2110 event_child. Keep them in sync. */
2111 event_thread = NULL;
2112 event_child = NULL;
2113 requested_child = NULL;
2114
2115 /* Check for a lwp with a pending status. */
2116
2117 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2118 {
2119 event_thread = (struct thread_info *)
2120 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2121 if (event_thread != NULL)
2122 event_child = get_thread_lwp (event_thread);
2123 if (debug_threads && event_thread)
2124 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2125 }
2126 else if (!ptid_equal (filter_ptid, null_ptid))
2127 {
2128 requested_child = find_lwp_pid (filter_ptid);
2129
2130 if (stopping_threads == NOT_STOPPING_THREADS
2131 && requested_child->status_pending_p
2132 && requested_child->collecting_fast_tracepoint)
2133 {
2134 enqueue_one_deferred_signal (requested_child,
2135 &requested_child->status_pending);
2136 requested_child->status_pending_p = 0;
2137 requested_child->status_pending = 0;
2138 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2139 }
2140
2141 if (requested_child->suspended
2142 && requested_child->status_pending_p)
2143 {
2144 internal_error (__FILE__, __LINE__,
2145 "requesting an event out of a"
2146 " suspended child?");
2147 }
2148
2149 if (requested_child->status_pending_p)
2150 {
2151 event_child = requested_child;
2152 event_thread = get_lwp_thread (event_child);
2153 }
2154 }
2155
2156 if (event_child != NULL)
2157 {
2158 if (debug_threads)
2159 debug_printf ("Got an event from pending child %ld (%04x)\n",
2160 lwpid_of (event_thread), event_child->status_pending);
2161 *wstatp = event_child->status_pending;
2162 event_child->status_pending_p = 0;
2163 event_child->status_pending = 0;
2164 current_thread = event_thread;
2165 return lwpid_of (event_thread);
2166 }
2167
2168 /* But if we don't find a pending event, we'll have to wait.
2169
2170 We only enter this loop if no process has a pending wait status.
2171 Thus any action taken in response to a wait status inside this
2172 loop is responding as soon as we detect the status, not after any
2173 pending events. */
2174
2175 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2176 all signals while here. */
2177 sigfillset (&block_mask);
2178 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2179
2180 /* Always pull all events out of the kernel. We'll randomly select
2181 an event LWP out of all that have events, to prevent
2182 starvation. */
2183 while (event_child == NULL)
2184 {
2185 pid_t ret = 0;
2186
2187 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2188 quirks:
2189
2190 - If the thread group leader exits while other threads in the
2191 thread group still exist, waitpid(TGID, ...) hangs. That
2192 waitpid won't return an exit status until the other threads
2193 in the group are reaped.
2194
2195 - When a non-leader thread execs, that thread just vanishes
2196 without reporting an exit (so we'd hang if we waited for it
2197 explicitly in that case). The exec event is reported to
2198 the TGID pid (although we don't currently enable exec
2199 events). */
2200 errno = 0;
2201 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2202
2203 if (debug_threads)
2204 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2205 ret, errno ? strerror (errno) : "ERRNO-OK");
2206
2207 if (ret > 0)
2208 {
2209 if (debug_threads)
2210 {
2211 debug_printf ("LLW: waitpid %ld received %s\n",
2212 (long) ret, status_to_str (*wstatp));
2213 }
2214
2215 /* Filter all events. IOW, leave all events pending. We'll
2216 randomly select an event LWP out of all that have events
2217 below. */
2218 linux_low_filter_event (ret, *wstatp);
2219 /* Retry until nothing comes out of waitpid. A single
2220 SIGCHLD can indicate more than one child stopped. */
2221 continue;
2222 }
2223
2224 /* Now that we've pulled all events out of the kernel, resume
2225 LWPs that don't have an interesting event to report. */
2226 if (stopping_threads == NOT_STOPPING_THREADS)
2227 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2228
2229 /* ... and find an LWP with a status to report to the core, if
2230 any. */
2231 event_thread = (struct thread_info *)
2232 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2233 if (event_thread != NULL)
2234 {
2235 event_child = get_thread_lwp (event_thread);
2236 *wstatp = event_child->status_pending;
2237 event_child->status_pending_p = 0;
2238 event_child->status_pending = 0;
2239 break;
2240 }
2241
2242 /* Check for zombie thread group leaders. Those can't be reaped
2243 until all other threads in the thread group are. */
2244 check_zombie_leaders ();
2245
2246 /* If there are no resumed children left in the set of LWPs we
2247 want to wait for, bail. We can't just block in
2248 waitpid/sigsuspend, because lwps might have been left stopped
2249 in trace-stop state, and we'd be stuck forever waiting for
2250 their status to change (which would only happen if we resumed
2251 them). Even if WNOHANG is set, this return code is preferred
2252 over 0 (below), as it is more detailed. */
2253 if ((find_inferior (&all_threads,
2254 not_stopped_callback,
2255 &wait_ptid) == NULL))
2256 {
2257 if (debug_threads)
2258 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2259 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2260 return -1;
2261 }
2262
2263 /* No interesting event to report to the caller. */
2264 if ((options & WNOHANG))
2265 {
2266 if (debug_threads)
2267 debug_printf ("WNOHANG set, no event found\n");
2268
2269 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2270 return 0;
2271 }
2272
2273 /* Block until we get an event reported with SIGCHLD. */
2274 if (debug_threads)
2275 debug_printf ("sigsuspend'ing\n");
2276
2277 sigsuspend (&prev_mask);
2278 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2279 goto retry;
2280 }
2281
2282 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2283
2284 current_thread = event_thread;
2285
2286 /* Check for thread exit. */
2287 if (! WIFSTOPPED (*wstatp))
2288 {
2289 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2290
2291 if (debug_threads)
2292 debug_printf ("LWP %d is the last lwp of process. "
2293 "Process %ld exiting.\n",
2294 pid_of (event_thread), lwpid_of (event_thread));
2295 return lwpid_of (event_thread);
2296 }
2297
2298 return lwpid_of (event_thread);
2299 }
2300
2301 /* Wait for an event from child(ren) PTID. PTIDs can be:
2302 minus_one_ptid, to specify any child; a pid PTID, specifying all
2303 lwps of a thread group; or a PTID representing a single lwp. Store
2304 the stop status through the status pointer WSTAT. OPTIONS is
2305 passed to the waitpid call. Return 0 if no event was found and
2306 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2307 was found. Return the PID of the stopped child otherwise. */
2308
2309 static int
2310 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2311 {
2312 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2313 }
2314
2315 /* Count the LWP's that have had events. */
2316
2317 static int
2318 count_events_callback (struct inferior_list_entry *entry, void *data)
2319 {
2320 struct thread_info *thread = (struct thread_info *) entry;
2321 struct lwp_info *lp = get_thread_lwp (thread);
2322 int *count = data;
2323
2324 gdb_assert (count != NULL);
2325
2326 /* Count only resumed LWPs that have an event pending. */
2327 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2328 && lp->status_pending_p)
2329 (*count)++;
2330
2331 return 0;
2332 }
2333
2334 /* Select the LWP (if any) that is currently being single-stepped. */
2335
2336 static int
2337 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2338 {
2339 struct thread_info *thread = (struct thread_info *) entry;
2340 struct lwp_info *lp = get_thread_lwp (thread);
2341
2342 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2343 && thread->last_resume_kind == resume_step
2344 && lp->status_pending_p)
2345 return 1;
2346 else
2347 return 0;
2348 }
2349
2350 /* Select the Nth LWP that has had an event. */
2351
2352 static int
2353 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2354 {
2355 struct thread_info *thread = (struct thread_info *) entry;
2356 struct lwp_info *lp = get_thread_lwp (thread);
2357 int *selector = data;
2358
2359 gdb_assert (selector != NULL);
2360
2361 /* Select only resumed LWPs that have an event pending. */
2362 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2363 && lp->status_pending_p)
2364 if ((*selector)-- == 0)
2365 return 1;
2366
2367 return 0;
2368 }
2369
2370 /* Select one LWP out of those that have events pending. */
2371
2372 static void
2373 select_event_lwp (struct lwp_info **orig_lp)
2374 {
2375 int num_events = 0;
2376 int random_selector;
2377 struct thread_info *event_thread = NULL;
2378
2379 /* In all-stop, give preference to the LWP that is being
2380 single-stepped. There will be at most one, and it's the LWP that
2381 the core is most interested in. If we didn't do this, then we'd
2382 have to handle pending step SIGTRAPs somehow in case the core
2383 later continues the previously-stepped thread, otherwise we'd
2384 report the pending SIGTRAP, and the core, not having stepped the
2385 thread, wouldn't understand what the trap was for, and therefore
2386 would report it to the user as a random signal. */
2387 if (!non_stop)
2388 {
2389 event_thread
2390 = (struct thread_info *) find_inferior (&all_threads,
2391 select_singlestep_lwp_callback,
2392 NULL);
2393 if (event_thread != NULL)
2394 {
2395 if (debug_threads)
2396 debug_printf ("SEL: Select single-step %s\n",
2397 target_pid_to_str (ptid_of (event_thread)));
2398 }
2399 }
2400 if (event_thread == NULL)
2401 {
2402 /* No single-stepping LWP. Select one at random, out of those
2403 which have had events. */
2404
2405 /* First see how many events we have. */
2406 find_inferior (&all_threads, count_events_callback, &num_events);
2407 gdb_assert (num_events > 0);
2408
2409 /* Now randomly pick a LWP out of those that have had
2410 events. */
2411 random_selector = (int)
2412 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2413
2414 if (debug_threads && num_events > 1)
2415 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2416 num_events, random_selector);
2417
2418 event_thread
2419 = (struct thread_info *) find_inferior (&all_threads,
2420 select_event_lwp_callback,
2421 &random_selector);
2422 }
2423
2424 if (event_thread != NULL)
2425 {
2426 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2427
2428 /* Switch the event LWP. */
2429 *orig_lp = event_lp;
2430 }
2431 }
2432
2433 /* Decrement the suspend count of an LWP. */
2434
2435 static int
2436 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2437 {
2438 struct thread_info *thread = (struct thread_info *) entry;
2439 struct lwp_info *lwp = get_thread_lwp (thread);
2440
2441 /* Ignore EXCEPT. */
2442 if (lwp == except)
2443 return 0;
2444
2445 lwp->suspended--;
2446
2447 gdb_assert (lwp->suspended >= 0);
2448 return 0;
2449 }
2450
2451 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2452 NULL. */
2453
2454 static void
2455 unsuspend_all_lwps (struct lwp_info *except)
2456 {
2457 find_inferior (&all_threads, unsuspend_one_lwp, except);
2458 }
2459
2460 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2461 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2462 void *data);
2463 static int lwp_running (struct inferior_list_entry *entry, void *data);
2464 static ptid_t linux_wait_1 (ptid_t ptid,
2465 struct target_waitstatus *ourstatus,
2466 int target_options);
2467
2468 /* Stabilize threads (move out of jump pads).
2469
2470 If a thread is midway collecting a fast tracepoint, we need to
2471 finish the collection and move it out of the jump pad before
2472 reporting the signal.
2473
2474 This avoids recursion while collecting (when a signal arrives
2475 midway, and the signal handler itself collects), which would trash
2476 the trace buffer. In case the user set a breakpoint in a signal
2477 handler, this avoids the backtrace showing the jump pad, etc..
2478 Most importantly, there are certain things we can't do safely if
2479 threads are stopped in a jump pad (or in its callee's). For
2480 example:
2481
2482 - starting a new trace run. A thread still collecting the
2483 previous run, could trash the trace buffer when resumed. The trace
2484 buffer control structures would have been reset but the thread had
2485 no way to tell. The thread could even midway memcpy'ing to the
2486 buffer, which would mean that when resumed, it would clobber the
2487 trace buffer that had been set for a new run.
2488
2489 - we can't rewrite/reuse the jump pads for new tracepoints
2490 safely. Say you do tstart while a thread is stopped midway while
2491 collecting. When the thread is later resumed, it finishes the
2492 collection, and returns to the jump pad, to execute the original
2493 instruction that was under the tracepoint jump at the time the
2494 older run had been started. If the jump pad had been rewritten
2495 since for something else in the new run, the thread would now
2496 execute the wrong / random instructions. */
2497
2498 static void
2499 linux_stabilize_threads (void)
2500 {
2501 struct thread_info *saved_thread;
2502 struct thread_info *thread_stuck;
2503
2504 thread_stuck
2505 = (struct thread_info *) find_inferior (&all_threads,
2506 stuck_in_jump_pad_callback,
2507 NULL);
2508 if (thread_stuck != NULL)
2509 {
2510 if (debug_threads)
2511 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2512 lwpid_of (thread_stuck));
2513 return;
2514 }
2515
2516 saved_thread = current_thread;
2517
2518 stabilizing_threads = 1;
2519
2520 /* Kick 'em all. */
2521 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2522
2523 /* Loop until all are stopped out of the jump pads. */
2524 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2525 {
2526 struct target_waitstatus ourstatus;
2527 struct lwp_info *lwp;
2528 int wstat;
2529
2530 /* Note that we go through the full wait even loop. While
2531 moving threads out of jump pad, we need to be able to step
2532 over internal breakpoints and such. */
2533 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2534
2535 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2536 {
2537 lwp = get_thread_lwp (current_thread);
2538
2539 /* Lock it. */
2540 lwp->suspended++;
2541
2542 if (ourstatus.value.sig != GDB_SIGNAL_0
2543 || current_thread->last_resume_kind == resume_stop)
2544 {
2545 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2546 enqueue_one_deferred_signal (lwp, &wstat);
2547 }
2548 }
2549 }
2550
2551 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2552
2553 stabilizing_threads = 0;
2554
2555 current_thread = saved_thread;
2556
2557 if (debug_threads)
2558 {
2559 thread_stuck
2560 = (struct thread_info *) find_inferior (&all_threads,
2561 stuck_in_jump_pad_callback,
2562 NULL);
2563 if (thread_stuck != NULL)
2564 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2565 lwpid_of (thread_stuck));
2566 }
2567 }
2568
2569 static void async_file_mark (void);
2570
2571 /* Convenience function that is called when the kernel reports an
2572 event that is not passed out to GDB. */
2573
2574 static ptid_t
2575 ignore_event (struct target_waitstatus *ourstatus)
2576 {
2577 /* If we got an event, there may still be others, as a single
2578 SIGCHLD can indicate more than one child stopped. This forces
2579 another target_wait call. */
2580 async_file_mark ();
2581
2582 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2583 return null_ptid;
2584 }
2585
2586 /* Wait for process, returns status. */
2587
2588 static ptid_t
2589 linux_wait_1 (ptid_t ptid,
2590 struct target_waitstatus *ourstatus, int target_options)
2591 {
2592 int w;
2593 struct lwp_info *event_child;
2594 int options;
2595 int pid;
2596 int step_over_finished;
2597 int bp_explains_trap;
2598 int maybe_internal_trap;
2599 int report_to_gdb;
2600 int trace_event;
2601 int in_step_range;
2602
2603 if (debug_threads)
2604 {
2605 debug_enter ();
2606 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2607 }
2608
2609 /* Translate generic target options into linux options. */
2610 options = __WALL;
2611 if (target_options & TARGET_WNOHANG)
2612 options |= WNOHANG;
2613
2614 bp_explains_trap = 0;
2615 trace_event = 0;
2616 in_step_range = 0;
2617 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2618
2619 if (ptid_equal (step_over_bkpt, null_ptid))
2620 pid = linux_wait_for_event (ptid, &w, options);
2621 else
2622 {
2623 if (debug_threads)
2624 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2625 target_pid_to_str (step_over_bkpt));
2626 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2627 }
2628
2629 if (pid == 0)
2630 {
2631 gdb_assert (target_options & TARGET_WNOHANG);
2632
2633 if (debug_threads)
2634 {
2635 debug_printf ("linux_wait_1 ret = null_ptid, "
2636 "TARGET_WAITKIND_IGNORE\n");
2637 debug_exit ();
2638 }
2639
2640 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2641 return null_ptid;
2642 }
2643 else if (pid == -1)
2644 {
2645 if (debug_threads)
2646 {
2647 debug_printf ("linux_wait_1 ret = null_ptid, "
2648 "TARGET_WAITKIND_NO_RESUMED\n");
2649 debug_exit ();
2650 }
2651
2652 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2653 return null_ptid;
2654 }
2655
2656 event_child = get_thread_lwp (current_thread);
2657
2658 /* linux_wait_for_event only returns an exit status for the last
2659 child of a process. Report it. */
2660 if (WIFEXITED (w) || WIFSIGNALED (w))
2661 {
2662 if (WIFEXITED (w))
2663 {
2664 ourstatus->kind = TARGET_WAITKIND_EXITED;
2665 ourstatus->value.integer = WEXITSTATUS (w);
2666
2667 if (debug_threads)
2668 {
2669 debug_printf ("linux_wait_1 ret = %s, exited with "
2670 "retcode %d\n",
2671 target_pid_to_str (ptid_of (current_thread)),
2672 WEXITSTATUS (w));
2673 debug_exit ();
2674 }
2675 }
2676 else
2677 {
2678 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2679 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2680
2681 if (debug_threads)
2682 {
2683 debug_printf ("linux_wait_1 ret = %s, terminated with "
2684 "signal %d\n",
2685 target_pid_to_str (ptid_of (current_thread)),
2686 WTERMSIG (w));
2687 debug_exit ();
2688 }
2689 }
2690
2691 return ptid_of (current_thread);
2692 }
2693
2694 /* If step-over executes a breakpoint instruction, it means a
2695 gdb/gdbserver breakpoint had been planted on top of a permanent
2696 breakpoint. The PC has been adjusted by
2697 check_stopped_by_breakpoint to point at the breakpoint address.
2698 Advance the PC manually past the breakpoint, otherwise the
2699 program would keep trapping the permanent breakpoint forever. */
2700 if (!ptid_equal (step_over_bkpt, null_ptid)
2701 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2702 {
2703 unsigned int increment_pc = the_low_target.breakpoint_len;
2704
2705 if (debug_threads)
2706 {
2707 debug_printf ("step-over for %s executed software breakpoint\n",
2708 target_pid_to_str (ptid_of (current_thread)));
2709 }
2710
2711 if (increment_pc != 0)
2712 {
2713 struct regcache *regcache
2714 = get_thread_regcache (current_thread, 1);
2715
2716 event_child->stop_pc += increment_pc;
2717 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2718
2719 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2720 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2721 }
2722 }
2723
2724 /* If this event was not handled before, and is not a SIGTRAP, we
2725 report it. SIGILL and SIGSEGV are also treated as traps in case
2726 a breakpoint is inserted at the current PC. If this target does
2727 not support internal breakpoints at all, we also report the
2728 SIGTRAP without further processing; it's of no concern to us. */
2729 maybe_internal_trap
2730 = (supports_breakpoints ()
2731 && (WSTOPSIG (w) == SIGTRAP
2732 || ((WSTOPSIG (w) == SIGILL
2733 || WSTOPSIG (w) == SIGSEGV)
2734 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2735
2736 if (maybe_internal_trap)
2737 {
2738 /* Handle anything that requires bookkeeping before deciding to
2739 report the event or continue waiting. */
2740
2741 /* First check if we can explain the SIGTRAP with an internal
2742 breakpoint, or if we should possibly report the event to GDB.
2743 Do this before anything that may remove or insert a
2744 breakpoint. */
2745 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2746
2747 /* We have a SIGTRAP, possibly a step-over dance has just
2748 finished. If so, tweak the state machine accordingly,
2749 reinsert breakpoints and delete any reinsert (software
2750 single-step) breakpoints. */
2751 step_over_finished = finish_step_over (event_child);
2752
2753 /* Now invoke the callbacks of any internal breakpoints there. */
2754 check_breakpoints (event_child->stop_pc);
2755
2756 /* Handle tracepoint data collecting. This may overflow the
2757 trace buffer, and cause a tracing stop, removing
2758 breakpoints. */
2759 trace_event = handle_tracepoints (event_child);
2760
2761 if (bp_explains_trap)
2762 {
2763 /* If we stepped or ran into an internal breakpoint, we've
2764 already handled it. So next time we resume (from this
2765 PC), we should step over it. */
2766 if (debug_threads)
2767 debug_printf ("Hit a gdbserver breakpoint.\n");
2768
2769 if (breakpoint_here (event_child->stop_pc))
2770 event_child->need_step_over = 1;
2771 }
2772 }
2773 else
2774 {
2775 /* We have some other signal, possibly a step-over dance was in
2776 progress, and it should be cancelled too. */
2777 step_over_finished = finish_step_over (event_child);
2778 }
2779
2780 /* We have all the data we need. Either report the event to GDB, or
2781 resume threads and keep waiting for more. */
2782
2783 /* If we're collecting a fast tracepoint, finish the collection and
2784 move out of the jump pad before delivering a signal. See
2785 linux_stabilize_threads. */
2786
2787 if (WIFSTOPPED (w)
2788 && WSTOPSIG (w) != SIGTRAP
2789 && supports_fast_tracepoints ()
2790 && agent_loaded_p ())
2791 {
2792 if (debug_threads)
2793 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2794 "to defer or adjust it.\n",
2795 WSTOPSIG (w), lwpid_of (current_thread));
2796
2797 /* Allow debugging the jump pad itself. */
2798 if (current_thread->last_resume_kind != resume_step
2799 && maybe_move_out_of_jump_pad (event_child, &w))
2800 {
2801 enqueue_one_deferred_signal (event_child, &w);
2802
2803 if (debug_threads)
2804 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2805 WSTOPSIG (w), lwpid_of (current_thread));
2806
2807 linux_resume_one_lwp (event_child, 0, 0, NULL);
2808
2809 return ignore_event (ourstatus);
2810 }
2811 }
2812
2813 if (event_child->collecting_fast_tracepoint)
2814 {
2815 if (debug_threads)
2816 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2817 "Check if we're already there.\n",
2818 lwpid_of (current_thread),
2819 event_child->collecting_fast_tracepoint);
2820
2821 trace_event = 1;
2822
2823 event_child->collecting_fast_tracepoint
2824 = linux_fast_tracepoint_collecting (event_child, NULL);
2825
2826 if (event_child->collecting_fast_tracepoint != 1)
2827 {
2828 /* No longer need this breakpoint. */
2829 if (event_child->exit_jump_pad_bkpt != NULL)
2830 {
2831 if (debug_threads)
2832 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2833 "stopping all threads momentarily.\n");
2834
2835 /* Other running threads could hit this breakpoint.
2836 We don't handle moribund locations like GDB does,
2837 instead we always pause all threads when removing
2838 breakpoints, so that any step-over or
2839 decr_pc_after_break adjustment is always taken
2840 care of while the breakpoint is still
2841 inserted. */
2842 stop_all_lwps (1, event_child);
2843
2844 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2845 event_child->exit_jump_pad_bkpt = NULL;
2846
2847 unstop_all_lwps (1, event_child);
2848
2849 gdb_assert (event_child->suspended >= 0);
2850 }
2851 }
2852
2853 if (event_child->collecting_fast_tracepoint == 0)
2854 {
2855 if (debug_threads)
2856 debug_printf ("fast tracepoint finished "
2857 "collecting successfully.\n");
2858
2859 /* We may have a deferred signal to report. */
2860 if (dequeue_one_deferred_signal (event_child, &w))
2861 {
2862 if (debug_threads)
2863 debug_printf ("dequeued one signal.\n");
2864 }
2865 else
2866 {
2867 if (debug_threads)
2868 debug_printf ("no deferred signals.\n");
2869
2870 if (stabilizing_threads)
2871 {
2872 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2873 ourstatus->value.sig = GDB_SIGNAL_0;
2874
2875 if (debug_threads)
2876 {
2877 debug_printf ("linux_wait_1 ret = %s, stopped "
2878 "while stabilizing threads\n",
2879 target_pid_to_str (ptid_of (current_thread)));
2880 debug_exit ();
2881 }
2882
2883 return ptid_of (current_thread);
2884 }
2885 }
2886 }
2887 }
2888
2889 /* Check whether GDB would be interested in this event. */
2890
2891 /* If GDB is not interested in this signal, don't stop other
2892 threads, and don't report it to GDB. Just resume the inferior
2893 right away. We do this for threading-related signals as well as
2894 any that GDB specifically requested we ignore. But never ignore
2895 SIGSTOP if we sent it ourselves, and do not ignore signals when
2896 stepping - they may require special handling to skip the signal
2897 handler. Also never ignore signals that could be caused by a
2898 breakpoint. */
2899 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2900 thread library? */
2901 if (WIFSTOPPED (w)
2902 && current_thread->last_resume_kind != resume_step
2903 && (
2904 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2905 (current_process ()->priv->thread_db != NULL
2906 && (WSTOPSIG (w) == __SIGRTMIN
2907 || WSTOPSIG (w) == __SIGRTMIN + 1))
2908 ||
2909 #endif
2910 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2911 && !(WSTOPSIG (w) == SIGSTOP
2912 && current_thread->last_resume_kind == resume_stop)
2913 && !linux_wstatus_maybe_breakpoint (w))))
2914 {
2915 siginfo_t info, *info_p;
2916
2917 if (debug_threads)
2918 debug_printf ("Ignored signal %d for LWP %ld.\n",
2919 WSTOPSIG (w), lwpid_of (current_thread));
2920
2921 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2922 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2923 info_p = &info;
2924 else
2925 info_p = NULL;
2926 linux_resume_one_lwp (event_child, event_child->stepping,
2927 WSTOPSIG (w), info_p);
2928 return ignore_event (ourstatus);
2929 }
2930
2931 /* Note that all addresses are always "out of the step range" when
2932 there's no range to begin with. */
2933 in_step_range = lwp_in_step_range (event_child);
2934
2935 /* If GDB wanted this thread to single step, and the thread is out
2936 of the step range, we always want to report the SIGTRAP, and let
2937 GDB handle it. Watchpoints should always be reported. So should
2938 signals we can't explain. A SIGTRAP we can't explain could be a
2939 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2940 do, we're be able to handle GDB breakpoints on top of internal
2941 breakpoints, by handling the internal breakpoint and still
2942 reporting the event to GDB. If we don't, we're out of luck, GDB
2943 won't see the breakpoint hit. */
2944 report_to_gdb = (!maybe_internal_trap
2945 || (current_thread->last_resume_kind == resume_step
2946 && !in_step_range)
2947 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
2948 || (!step_over_finished && !in_step_range
2949 && !bp_explains_trap && !trace_event)
2950 || (gdb_breakpoint_here (event_child->stop_pc)
2951 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2952 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2953
2954 run_breakpoint_commands (event_child->stop_pc);
2955
2956 /* We found no reason GDB would want us to stop. We either hit one
2957 of our own breakpoints, or finished an internal step GDB
2958 shouldn't know about. */
2959 if (!report_to_gdb)
2960 {
2961 if (debug_threads)
2962 {
2963 if (bp_explains_trap)
2964 debug_printf ("Hit a gdbserver breakpoint.\n");
2965 if (step_over_finished)
2966 debug_printf ("Step-over finished.\n");
2967 if (trace_event)
2968 debug_printf ("Tracepoint event.\n");
2969 if (lwp_in_step_range (event_child))
2970 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2971 paddress (event_child->stop_pc),
2972 paddress (event_child->step_range_start),
2973 paddress (event_child->step_range_end));
2974 }
2975
2976 /* We're not reporting this breakpoint to GDB, so apply the
2977 decr_pc_after_break adjustment to the inferior's regcache
2978 ourselves. */
2979
2980 if (the_low_target.set_pc != NULL)
2981 {
2982 struct regcache *regcache
2983 = get_thread_regcache (current_thread, 1);
2984 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2985 }
2986
2987 /* We may have finished stepping over a breakpoint. If so,
2988 we've stopped and suspended all LWPs momentarily except the
2989 stepping one. This is where we resume them all again. We're
2990 going to keep waiting, so use proceed, which handles stepping
2991 over the next breakpoint. */
2992 if (debug_threads)
2993 debug_printf ("proceeding all threads.\n");
2994
2995 if (step_over_finished)
2996 unsuspend_all_lwps (event_child);
2997
2998 proceed_all_lwps ();
2999 return ignore_event (ourstatus);
3000 }
3001
3002 if (debug_threads)
3003 {
3004 if (current_thread->last_resume_kind == resume_step)
3005 {
3006 if (event_child->step_range_start == event_child->step_range_end)
3007 debug_printf ("GDB wanted to single-step, reporting event.\n");
3008 else if (!lwp_in_step_range (event_child))
3009 debug_printf ("Out of step range, reporting event.\n");
3010 }
3011 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3012 debug_printf ("Stopped by watchpoint.\n");
3013 else if (gdb_breakpoint_here (event_child->stop_pc))
3014 debug_printf ("Stopped by GDB breakpoint.\n");
3015 if (debug_threads)
3016 debug_printf ("Hit a non-gdbserver trap event.\n");
3017 }
3018
3019 /* Alright, we're going to report a stop. */
3020
3021 if (!stabilizing_threads)
3022 {
3023 /* In all-stop, stop all threads. */
3024 if (!non_stop)
3025 stop_all_lwps (0, NULL);
3026
3027 /* If we're not waiting for a specific LWP, choose an event LWP
3028 from among those that have had events. Giving equal priority
3029 to all LWPs that have had events helps prevent
3030 starvation. */
3031 if (ptid_equal (ptid, minus_one_ptid))
3032 {
3033 event_child->status_pending_p = 1;
3034 event_child->status_pending = w;
3035
3036 select_event_lwp (&event_child);
3037
3038 /* current_thread and event_child must stay in sync. */
3039 current_thread = get_lwp_thread (event_child);
3040
3041 event_child->status_pending_p = 0;
3042 w = event_child->status_pending;
3043 }
3044
3045 if (step_over_finished)
3046 {
3047 if (!non_stop)
3048 {
3049 /* If we were doing a step-over, all other threads but
3050 the stepping one had been paused in start_step_over,
3051 with their suspend counts incremented. We don't want
3052 to do a full unstop/unpause, because we're in
3053 all-stop mode (so we want threads stopped), but we
3054 still need to unsuspend the other threads, to
3055 decrement their `suspended' count back. */
3056 unsuspend_all_lwps (event_child);
3057 }
3058 else
3059 {
3060 /* If we just finished a step-over, then all threads had
3061 been momentarily paused. In all-stop, that's fine,
3062 we want threads stopped by now anyway. In non-stop,
3063 we need to re-resume threads that GDB wanted to be
3064 running. */
3065 unstop_all_lwps (1, event_child);
3066 }
3067 }
3068
3069 /* Stabilize threads (move out of jump pads). */
3070 if (!non_stop)
3071 stabilize_threads ();
3072 }
3073 else
3074 {
3075 /* If we just finished a step-over, then all threads had been
3076 momentarily paused. In all-stop, that's fine, we want
3077 threads stopped by now anyway. In non-stop, we need to
3078 re-resume threads that GDB wanted to be running. */
3079 if (step_over_finished)
3080 unstop_all_lwps (1, event_child);
3081 }
3082
3083 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3084
3085 /* Now that we've selected our final event LWP, un-adjust its PC if
3086 it was a software breakpoint, and the client doesn't know we can
3087 adjust the breakpoint ourselves. */
3088 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3089 && !swbreak_feature)
3090 {
3091 int decr_pc = the_low_target.decr_pc_after_break;
3092
3093 if (decr_pc != 0)
3094 {
3095 struct regcache *regcache
3096 = get_thread_regcache (current_thread, 1);
3097 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3098 }
3099 }
3100
3101 if (current_thread->last_resume_kind == resume_stop
3102 && WSTOPSIG (w) == SIGSTOP)
3103 {
3104 /* A thread that has been requested to stop by GDB with vCont;t,
3105 and it stopped cleanly, so report as SIG0. The use of
3106 SIGSTOP is an implementation detail. */
3107 ourstatus->value.sig = GDB_SIGNAL_0;
3108 }
3109 else if (current_thread->last_resume_kind == resume_stop
3110 && WSTOPSIG (w) != SIGSTOP)
3111 {
3112 /* A thread that has been requested to stop by GDB with vCont;t,
3113 but, it stopped for other reasons. */
3114 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3115 }
3116 else
3117 {
3118 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3119 }
3120
3121 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3122
3123 if (debug_threads)
3124 {
3125 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3126 target_pid_to_str (ptid_of (current_thread)),
3127 ourstatus->kind, ourstatus->value.sig);
3128 debug_exit ();
3129 }
3130
3131 return ptid_of (current_thread);
3132 }
3133
3134 /* Get rid of any pending event in the pipe. */
3135 static void
3136 async_file_flush (void)
3137 {
3138 int ret;
3139 char buf;
3140
3141 do
3142 ret = read (linux_event_pipe[0], &buf, 1);
3143 while (ret >= 0 || (ret == -1 && errno == EINTR));
3144 }
3145
3146 /* Put something in the pipe, so the event loop wakes up. */
3147 static void
3148 async_file_mark (void)
3149 {
3150 int ret;
3151
3152 async_file_flush ();
3153
3154 do
3155 ret = write (linux_event_pipe[1], "+", 1);
3156 while (ret == 0 || (ret == -1 && errno == EINTR));
3157
3158 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3159 be awakened anyway. */
3160 }
3161
3162 static ptid_t
3163 linux_wait (ptid_t ptid,
3164 struct target_waitstatus *ourstatus, int target_options)
3165 {
3166 ptid_t event_ptid;
3167
3168 /* Flush the async file first. */
3169 if (target_is_async_p ())
3170 async_file_flush ();
3171
3172 do
3173 {
3174 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3175 }
3176 while ((target_options & TARGET_WNOHANG) == 0
3177 && ptid_equal (event_ptid, null_ptid)
3178 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3179
3180 /* If at least one stop was reported, there may be more. A single
3181 SIGCHLD can signal more than one child stop. */
3182 if (target_is_async_p ()
3183 && (target_options & TARGET_WNOHANG) != 0
3184 && !ptid_equal (event_ptid, null_ptid))
3185 async_file_mark ();
3186
3187 return event_ptid;
3188 }
3189
3190 /* Send a signal to an LWP. */
3191
3192 static int
3193 kill_lwp (unsigned long lwpid, int signo)
3194 {
3195 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3196 fails, then we are not using nptl threads and we should be using kill. */
3197
3198 #ifdef __NR_tkill
3199 {
3200 static int tkill_failed;
3201
3202 if (!tkill_failed)
3203 {
3204 int ret;
3205
3206 errno = 0;
3207 ret = syscall (__NR_tkill, lwpid, signo);
3208 if (errno != ENOSYS)
3209 return ret;
3210 tkill_failed = 1;
3211 }
3212 }
3213 #endif
3214
3215 return kill (lwpid, signo);
3216 }
3217
3218 void
3219 linux_stop_lwp (struct lwp_info *lwp)
3220 {
3221 send_sigstop (lwp);
3222 }
3223
3224 static void
3225 send_sigstop (struct lwp_info *lwp)
3226 {
3227 int pid;
3228
3229 pid = lwpid_of (get_lwp_thread (lwp));
3230
3231 /* If we already have a pending stop signal for this process, don't
3232 send another. */
3233 if (lwp->stop_expected)
3234 {
3235 if (debug_threads)
3236 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3237
3238 return;
3239 }
3240
3241 if (debug_threads)
3242 debug_printf ("Sending sigstop to lwp %d\n", pid);
3243
3244 lwp->stop_expected = 1;
3245 kill_lwp (pid, SIGSTOP);
3246 }
3247
3248 static int
3249 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3250 {
3251 struct thread_info *thread = (struct thread_info *) entry;
3252 struct lwp_info *lwp = get_thread_lwp (thread);
3253
3254 /* Ignore EXCEPT. */
3255 if (lwp == except)
3256 return 0;
3257
3258 if (lwp->stopped)
3259 return 0;
3260
3261 send_sigstop (lwp);
3262 return 0;
3263 }
3264
3265 /* Increment the suspend count of an LWP, and stop it, if not stopped
3266 yet. */
3267 static int
3268 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3269 void *except)
3270 {
3271 struct thread_info *thread = (struct thread_info *) entry;
3272 struct lwp_info *lwp = get_thread_lwp (thread);
3273
3274 /* Ignore EXCEPT. */
3275 if (lwp == except)
3276 return 0;
3277
3278 lwp->suspended++;
3279
3280 return send_sigstop_callback (entry, except);
3281 }
3282
3283 static void
3284 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3285 {
3286 /* It's dead, really. */
3287 lwp->dead = 1;
3288
3289 /* Store the exit status for later. */
3290 lwp->status_pending_p = 1;
3291 lwp->status_pending = wstat;
3292
3293 /* Prevent trying to stop it. */
3294 lwp->stopped = 1;
3295
3296 /* No further stops are expected from a dead lwp. */
3297 lwp->stop_expected = 0;
3298 }
3299
3300 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3301
3302 static void
3303 wait_for_sigstop (void)
3304 {
3305 struct thread_info *saved_thread;
3306 ptid_t saved_tid;
3307 int wstat;
3308 int ret;
3309
3310 saved_thread = current_thread;
3311 if (saved_thread != NULL)
3312 saved_tid = saved_thread->entry.id;
3313 else
3314 saved_tid = null_ptid; /* avoid bogus unused warning */
3315
3316 if (debug_threads)
3317 debug_printf ("wait_for_sigstop: pulling events\n");
3318
3319 /* Passing NULL_PTID as filter indicates we want all events to be
3320 left pending. Eventually this returns when there are no
3321 unwaited-for children left. */
3322 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3323 &wstat, __WALL);
3324 gdb_assert (ret == -1);
3325
3326 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3327 current_thread = saved_thread;
3328 else
3329 {
3330 if (debug_threads)
3331 debug_printf ("Previously current thread died.\n");
3332
3333 if (non_stop)
3334 {
3335 /* We can't change the current inferior behind GDB's back,
3336 otherwise, a subsequent command may apply to the wrong
3337 process. */
3338 current_thread = NULL;
3339 }
3340 else
3341 {
3342 /* Set a valid thread as current. */
3343 set_desired_thread (0);
3344 }
3345 }
3346 }
3347
3348 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3349 move it out, because we need to report the stop event to GDB. For
3350 example, if the user puts a breakpoint in the jump pad, it's
3351 because she wants to debug it. */
3352
3353 static int
3354 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3355 {
3356 struct thread_info *thread = (struct thread_info *) entry;
3357 struct lwp_info *lwp = get_thread_lwp (thread);
3358
3359 gdb_assert (lwp->suspended == 0);
3360 gdb_assert (lwp->stopped);
3361
3362 /* Allow debugging the jump pad, gdb_collect, etc.. */
3363 return (supports_fast_tracepoints ()
3364 && agent_loaded_p ()
3365 && (gdb_breakpoint_here (lwp->stop_pc)
3366 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3367 || thread->last_resume_kind == resume_step)
3368 && linux_fast_tracepoint_collecting (lwp, NULL));
3369 }
3370
3371 static void
3372 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3373 {
3374 struct thread_info *thread = (struct thread_info *) entry;
3375 struct lwp_info *lwp = get_thread_lwp (thread);
3376 int *wstat;
3377
3378 gdb_assert (lwp->suspended == 0);
3379 gdb_assert (lwp->stopped);
3380
3381 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3382
3383 /* Allow debugging the jump pad, gdb_collect, etc. */
3384 if (!gdb_breakpoint_here (lwp->stop_pc)
3385 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3386 && thread->last_resume_kind != resume_step
3387 && maybe_move_out_of_jump_pad (lwp, wstat))
3388 {
3389 if (debug_threads)
3390 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3391 lwpid_of (thread));
3392
3393 if (wstat)
3394 {
3395 lwp->status_pending_p = 0;
3396 enqueue_one_deferred_signal (lwp, wstat);
3397
3398 if (debug_threads)
3399 debug_printf ("Signal %d for LWP %ld deferred "
3400 "(in jump pad)\n",
3401 WSTOPSIG (*wstat), lwpid_of (thread));
3402 }
3403
3404 linux_resume_one_lwp (lwp, 0, 0, NULL);
3405 }
3406 else
3407 lwp->suspended++;
3408 }
3409
3410 static int
3411 lwp_running (struct inferior_list_entry *entry, void *data)
3412 {
3413 struct thread_info *thread = (struct thread_info *) entry;
3414 struct lwp_info *lwp = get_thread_lwp (thread);
3415
3416 if (lwp->dead)
3417 return 0;
3418 if (lwp->stopped)
3419 return 0;
3420 return 1;
3421 }
3422
3423 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3424 If SUSPEND, then also increase the suspend count of every LWP,
3425 except EXCEPT. */
3426
3427 static void
3428 stop_all_lwps (int suspend, struct lwp_info *except)
3429 {
3430 /* Should not be called recursively. */
3431 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3432
3433 if (debug_threads)
3434 {
3435 debug_enter ();
3436 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3437 suspend ? "stop-and-suspend" : "stop",
3438 except != NULL
3439 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3440 : "none");
3441 }
3442
3443 stopping_threads = (suspend
3444 ? STOPPING_AND_SUSPENDING_THREADS
3445 : STOPPING_THREADS);
3446
3447 if (suspend)
3448 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3449 else
3450 find_inferior (&all_threads, send_sigstop_callback, except);
3451 wait_for_sigstop ();
3452 stopping_threads = NOT_STOPPING_THREADS;
3453
3454 if (debug_threads)
3455 {
3456 debug_printf ("stop_all_lwps done, setting stopping_threads "
3457 "back to !stopping\n");
3458 debug_exit ();
3459 }
3460 }
3461
3462 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3463 SIGNAL is nonzero, give it that signal. */
3464
3465 static void
3466 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3467 int step, int signal, siginfo_t *info)
3468 {
3469 struct thread_info *thread = get_lwp_thread (lwp);
3470 struct thread_info *saved_thread;
3471 int fast_tp_collecting;
3472
3473 if (lwp->stopped == 0)
3474 return;
3475
3476 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3477
3478 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3479
3480 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3481 user used the "jump" command, or "set $pc = foo"). */
3482 if (lwp->stop_pc != get_pc (lwp))
3483 {
3484 /* Collecting 'while-stepping' actions doesn't make sense
3485 anymore. */
3486 release_while_stepping_state_list (thread);
3487 }
3488
3489 /* If we have pending signals or status, and a new signal, enqueue the
3490 signal. Also enqueue the signal if we are waiting to reinsert a
3491 breakpoint; it will be picked up again below. */
3492 if (signal != 0
3493 && (lwp->status_pending_p
3494 || lwp->pending_signals != NULL
3495 || lwp->bp_reinsert != 0
3496 || fast_tp_collecting))
3497 {
3498 struct pending_signals *p_sig;
3499 p_sig = xmalloc (sizeof (*p_sig));
3500 p_sig->prev = lwp->pending_signals;
3501 p_sig->signal = signal;
3502 if (info == NULL)
3503 memset (&p_sig->info, 0, sizeof (siginfo_t));
3504 else
3505 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3506 lwp->pending_signals = p_sig;
3507 }
3508
3509 if (lwp->status_pending_p)
3510 {
3511 if (debug_threads)
3512 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3513 " has pending status\n",
3514 lwpid_of (thread), step ? "step" : "continue", signal,
3515 lwp->stop_expected ? "expected" : "not expected");
3516 return;
3517 }
3518
3519 saved_thread = current_thread;
3520 current_thread = thread;
3521
3522 if (debug_threads)
3523 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3524 lwpid_of (thread), step ? "step" : "continue", signal,
3525 lwp->stop_expected ? "expected" : "not expected");
3526
3527 /* This bit needs some thinking about. If we get a signal that
3528 we must report while a single-step reinsert is still pending,
3529 we often end up resuming the thread. It might be better to
3530 (ew) allow a stack of pending events; then we could be sure that
3531 the reinsert happened right away and not lose any signals.
3532
3533 Making this stack would also shrink the window in which breakpoints are
3534 uninserted (see comment in linux_wait_for_lwp) but not enough for
3535 complete correctness, so it won't solve that problem. It may be
3536 worthwhile just to solve this one, however. */
3537 if (lwp->bp_reinsert != 0)
3538 {
3539 if (debug_threads)
3540 debug_printf (" pending reinsert at 0x%s\n",
3541 paddress (lwp->bp_reinsert));
3542
3543 if (can_hardware_single_step ())
3544 {
3545 if (fast_tp_collecting == 0)
3546 {
3547 if (step == 0)
3548 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3549 if (lwp->suspended)
3550 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3551 lwp->suspended);
3552 }
3553
3554 step = 1;
3555 }
3556
3557 /* Postpone any pending signal. It was enqueued above. */
3558 signal = 0;
3559 }
3560
3561 if (fast_tp_collecting == 1)
3562 {
3563 if (debug_threads)
3564 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3565 " (exit-jump-pad-bkpt)\n",
3566 lwpid_of (thread));
3567
3568 /* Postpone any pending signal. It was enqueued above. */
3569 signal = 0;
3570 }
3571 else if (fast_tp_collecting == 2)
3572 {
3573 if (debug_threads)
3574 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3575 " single-stepping\n",
3576 lwpid_of (thread));
3577
3578 if (can_hardware_single_step ())
3579 step = 1;
3580 else
3581 {
3582 internal_error (__FILE__, __LINE__,
3583 "moving out of jump pad single-stepping"
3584 " not implemented on this target");
3585 }
3586
3587 /* Postpone any pending signal. It was enqueued above. */
3588 signal = 0;
3589 }
3590
3591 /* If we have while-stepping actions in this thread set it stepping.
3592 If we have a signal to deliver, it may or may not be set to
3593 SIG_IGN, we don't know. Assume so, and allow collecting
3594 while-stepping into a signal handler. A possible smart thing to
3595 do would be to set an internal breakpoint at the signal return
3596 address, continue, and carry on catching this while-stepping
3597 action only when that breakpoint is hit. A future
3598 enhancement. */
3599 if (thread->while_stepping != NULL
3600 && can_hardware_single_step ())
3601 {
3602 if (debug_threads)
3603 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3604 lwpid_of (thread));
3605 step = 1;
3606 }
3607
3608 if (the_low_target.get_pc != NULL)
3609 {
3610 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3611
3612 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3613
3614 if (debug_threads)
3615 {
3616 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3617 (long) lwp->stop_pc);
3618 }
3619 }
3620
3621 /* If we have pending signals, consume one unless we are trying to
3622 reinsert a breakpoint or we're trying to finish a fast tracepoint
3623 collect. */
3624 if (lwp->pending_signals != NULL
3625 && lwp->bp_reinsert == 0
3626 && fast_tp_collecting == 0)
3627 {
3628 struct pending_signals **p_sig;
3629
3630 p_sig = &lwp->pending_signals;
3631 while ((*p_sig)->prev != NULL)
3632 p_sig = &(*p_sig)->prev;
3633
3634 signal = (*p_sig)->signal;
3635 if ((*p_sig)->info.si_signo != 0)
3636 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3637 &(*p_sig)->info);
3638
3639 free (*p_sig);
3640 *p_sig = NULL;
3641 }
3642
3643 if (the_low_target.prepare_to_resume != NULL)
3644 the_low_target.prepare_to_resume (lwp);
3645
3646 regcache_invalidate_thread (thread);
3647 errno = 0;
3648 lwp->stepping = step;
3649 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3650 (PTRACE_TYPE_ARG3) 0,
3651 /* Coerce to a uintptr_t first to avoid potential gcc warning
3652 of coercing an 8 byte integer to a 4 byte pointer. */
3653 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3654
3655 current_thread = saved_thread;
3656 if (errno)
3657 perror_with_name ("resuming thread");
3658
3659 /* Successfully resumed. Clear state that no longer makes sense,
3660 and mark the LWP as running. Must not do this before resuming
3661 otherwise if that fails other code will be confused. E.g., we'd
3662 later try to stop the LWP and hang forever waiting for a stop
3663 status. Note that we must not throw after this is cleared,
3664 otherwise handle_zombie_lwp_error would get confused. */
3665 lwp->stopped = 0;
3666 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3667 }
3668
3669 /* Called when we try to resume a stopped LWP and that errors out. If
3670 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3671 or about to become), discard the error, clear any pending status
3672 the LWP may have, and return true (we'll collect the exit status
3673 soon enough). Otherwise, return false. */
3674
3675 static int
3676 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3677 {
3678 struct thread_info *thread = get_lwp_thread (lp);
3679
3680 /* If we get an error after resuming the LWP successfully, we'd
3681 confuse !T state for the LWP being gone. */
3682 gdb_assert (lp->stopped);
3683
3684 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3685 because even if ptrace failed with ESRCH, the tracee may be "not
3686 yet fully dead", but already refusing ptrace requests. In that
3687 case the tracee has 'R (Running)' state for a little bit
3688 (observed in Linux 3.18). See also the note on ESRCH in the
3689 ptrace(2) man page. Instead, check whether the LWP has any state
3690 other than ptrace-stopped. */
3691
3692 /* Don't assume anything if /proc/PID/status can't be read. */
3693 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3694 {
3695 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3696 lp->status_pending_p = 0;
3697 return 1;
3698 }
3699 return 0;
3700 }
3701
3702 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3703 disappears while we try to resume it. */
3704
3705 static void
3706 linux_resume_one_lwp (struct lwp_info *lwp,
3707 int step, int signal, siginfo_t *info)
3708 {
3709 TRY
3710 {
3711 linux_resume_one_lwp_throw (lwp, step, signal, info);
3712 }
3713 CATCH (ex, RETURN_MASK_ERROR)
3714 {
3715 if (!check_ptrace_stopped_lwp_gone (lwp))
3716 throw_exception (ex);
3717 }
3718 END_CATCH
3719 }
3720
3721 struct thread_resume_array
3722 {
3723 struct thread_resume *resume;
3724 size_t n;
3725 };
3726
3727 /* This function is called once per thread via find_inferior.
3728 ARG is a pointer to a thread_resume_array struct.
3729 We look up the thread specified by ENTRY in ARG, and mark the thread
3730 with a pointer to the appropriate resume request.
3731
3732 This algorithm is O(threads * resume elements), but resume elements
3733 is small (and will remain small at least until GDB supports thread
3734 suspension). */
3735
3736 static int
3737 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3738 {
3739 struct thread_info *thread = (struct thread_info *) entry;
3740 struct lwp_info *lwp = get_thread_lwp (thread);
3741 int ndx;
3742 struct thread_resume_array *r;
3743
3744 r = arg;
3745
3746 for (ndx = 0; ndx < r->n; ndx++)
3747 {
3748 ptid_t ptid = r->resume[ndx].thread;
3749 if (ptid_equal (ptid, minus_one_ptid)
3750 || ptid_equal (ptid, entry->id)
3751 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3752 of PID'. */
3753 || (ptid_get_pid (ptid) == pid_of (thread)
3754 && (ptid_is_pid (ptid)
3755 || ptid_get_lwp (ptid) == -1)))
3756 {
3757 if (r->resume[ndx].kind == resume_stop
3758 && thread->last_resume_kind == resume_stop)
3759 {
3760 if (debug_threads)
3761 debug_printf ("already %s LWP %ld at GDB's request\n",
3762 (thread->last_status.kind
3763 == TARGET_WAITKIND_STOPPED)
3764 ? "stopped"
3765 : "stopping",
3766 lwpid_of (thread));
3767
3768 continue;
3769 }
3770
3771 lwp->resume = &r->resume[ndx];
3772 thread->last_resume_kind = lwp->resume->kind;
3773
3774 lwp->step_range_start = lwp->resume->step_range_start;
3775 lwp->step_range_end = lwp->resume->step_range_end;
3776
3777 /* If we had a deferred signal to report, dequeue one now.
3778 This can happen if LWP gets more than one signal while
3779 trying to get out of a jump pad. */
3780 if (lwp->stopped
3781 && !lwp->status_pending_p
3782 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3783 {
3784 lwp->status_pending_p = 1;
3785
3786 if (debug_threads)
3787 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3788 "leaving status pending.\n",
3789 WSTOPSIG (lwp->status_pending),
3790 lwpid_of (thread));
3791 }
3792
3793 return 0;
3794 }
3795 }
3796
3797 /* No resume action for this thread. */
3798 lwp->resume = NULL;
3799
3800 return 0;
3801 }
3802
3803 /* find_inferior callback for linux_resume.
3804 Set *FLAG_P if this lwp has an interesting status pending. */
3805
3806 static int
3807 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3808 {
3809 struct thread_info *thread = (struct thread_info *) entry;
3810 struct lwp_info *lwp = get_thread_lwp (thread);
3811
3812 /* LWPs which will not be resumed are not interesting, because
3813 we might not wait for them next time through linux_wait. */
3814 if (lwp->resume == NULL)
3815 return 0;
3816
3817 if (thread_still_has_status_pending_p (thread))
3818 * (int *) flag_p = 1;
3819
3820 return 0;
3821 }
3822
3823 /* Return 1 if this lwp that GDB wants running is stopped at an
3824 internal breakpoint that we need to step over. It assumes that any
3825 required STOP_PC adjustment has already been propagated to the
3826 inferior's regcache. */
3827
3828 static int
3829 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3830 {
3831 struct thread_info *thread = (struct thread_info *) entry;
3832 struct lwp_info *lwp = get_thread_lwp (thread);
3833 struct thread_info *saved_thread;
3834 CORE_ADDR pc;
3835
3836 /* LWPs which will not be resumed are not interesting, because we
3837 might not wait for them next time through linux_wait. */
3838
3839 if (!lwp->stopped)
3840 {
3841 if (debug_threads)
3842 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3843 lwpid_of (thread));
3844 return 0;
3845 }
3846
3847 if (thread->last_resume_kind == resume_stop)
3848 {
3849 if (debug_threads)
3850 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3851 " stopped\n",
3852 lwpid_of (thread));
3853 return 0;
3854 }
3855
3856 gdb_assert (lwp->suspended >= 0);
3857
3858 if (lwp->suspended)
3859 {
3860 if (debug_threads)
3861 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3862 lwpid_of (thread));
3863 return 0;
3864 }
3865
3866 if (!lwp->need_step_over)
3867 {
3868 if (debug_threads)
3869 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
3870 }
3871
3872 if (lwp->status_pending_p)
3873 {
3874 if (debug_threads)
3875 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3876 " status.\n",
3877 lwpid_of (thread));
3878 return 0;
3879 }
3880
3881 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3882 or we have. */
3883 pc = get_pc (lwp);
3884
3885 /* If the PC has changed since we stopped, then don't do anything,
3886 and let the breakpoint/tracepoint be hit. This happens if, for
3887 instance, GDB handled the decr_pc_after_break subtraction itself,
3888 GDB is OOL stepping this thread, or the user has issued a "jump"
3889 command, or poked thread's registers herself. */
3890 if (pc != lwp->stop_pc)
3891 {
3892 if (debug_threads)
3893 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3894 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3895 lwpid_of (thread),
3896 paddress (lwp->stop_pc), paddress (pc));
3897
3898 lwp->need_step_over = 0;
3899 return 0;
3900 }
3901
3902 saved_thread = current_thread;
3903 current_thread = thread;
3904
3905 /* We can only step over breakpoints we know about. */
3906 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3907 {
3908 /* Don't step over a breakpoint that GDB expects to hit
3909 though. If the condition is being evaluated on the target's side
3910 and it evaluate to false, step over this breakpoint as well. */
3911 if (gdb_breakpoint_here (pc)
3912 && gdb_condition_true_at_breakpoint (pc)
3913 && gdb_no_commands_at_breakpoint (pc))
3914 {
3915 if (debug_threads)
3916 debug_printf ("Need step over [LWP %ld]? yes, but found"
3917 " GDB breakpoint at 0x%s; skipping step over\n",
3918 lwpid_of (thread), paddress (pc));
3919
3920 current_thread = saved_thread;
3921 return 0;
3922 }
3923 else
3924 {
3925 if (debug_threads)
3926 debug_printf ("Need step over [LWP %ld]? yes, "
3927 "found breakpoint at 0x%s\n",
3928 lwpid_of (thread), paddress (pc));
3929
3930 /* We've found an lwp that needs stepping over --- return 1 so
3931 that find_inferior stops looking. */
3932 current_thread = saved_thread;
3933
3934 /* If the step over is cancelled, this is set again. */
3935 lwp->need_step_over = 0;
3936 return 1;
3937 }
3938 }
3939
3940 current_thread = saved_thread;
3941
3942 if (debug_threads)
3943 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3944 " at 0x%s\n",
3945 lwpid_of (thread), paddress (pc));
3946
3947 return 0;
3948 }
3949
3950 /* Start a step-over operation on LWP. When LWP stopped at a
3951 breakpoint, to make progress, we need to remove the breakpoint out
3952 of the way. If we let other threads run while we do that, they may
3953 pass by the breakpoint location and miss hitting it. To avoid
3954 that, a step-over momentarily stops all threads while LWP is
3955 single-stepped while the breakpoint is temporarily uninserted from
3956 the inferior. When the single-step finishes, we reinsert the
3957 breakpoint, and let all threads that are supposed to be running,
3958 run again.
3959
3960 On targets that don't support hardware single-step, we don't
3961 currently support full software single-stepping. Instead, we only
3962 support stepping over the thread event breakpoint, by asking the
3963 low target where to place a reinsert breakpoint. Since this
3964 routine assumes the breakpoint being stepped over is a thread event
3965 breakpoint, it usually assumes the return address of the current
3966 function is a good enough place to set the reinsert breakpoint. */
3967
3968 static int
3969 start_step_over (struct lwp_info *lwp)
3970 {
3971 struct thread_info *thread = get_lwp_thread (lwp);
3972 struct thread_info *saved_thread;
3973 CORE_ADDR pc;
3974 int step;
3975
3976 if (debug_threads)
3977 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3978 lwpid_of (thread));
3979
3980 stop_all_lwps (1, lwp);
3981 gdb_assert (lwp->suspended == 0);
3982
3983 if (debug_threads)
3984 debug_printf ("Done stopping all threads for step-over.\n");
3985
3986 /* Note, we should always reach here with an already adjusted PC,
3987 either by GDB (if we're resuming due to GDB's request), or by our
3988 caller, if we just finished handling an internal breakpoint GDB
3989 shouldn't care about. */
3990 pc = get_pc (lwp);
3991
3992 saved_thread = current_thread;
3993 current_thread = thread;
3994
3995 lwp->bp_reinsert = pc;
3996 uninsert_breakpoints_at (pc);
3997 uninsert_fast_tracepoint_jumps_at (pc);
3998
3999 if (can_hardware_single_step ())
4000 {
4001 step = 1;
4002 }
4003 else
4004 {
4005 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4006 set_reinsert_breakpoint (raddr);
4007 step = 0;
4008 }
4009
4010 current_thread = saved_thread;
4011
4012 linux_resume_one_lwp (lwp, step, 0, NULL);
4013
4014 /* Require next event from this LWP. */
4015 step_over_bkpt = thread->entry.id;
4016 return 1;
4017 }
4018
4019 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4020 start_step_over, if still there, and delete any reinsert
4021 breakpoints we've set, on non hardware single-step targets. */
4022
4023 static int
4024 finish_step_over (struct lwp_info *lwp)
4025 {
4026 if (lwp->bp_reinsert != 0)
4027 {
4028 if (debug_threads)
4029 debug_printf ("Finished step over.\n");
4030
4031 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4032 may be no breakpoint to reinsert there by now. */
4033 reinsert_breakpoints_at (lwp->bp_reinsert);
4034 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4035
4036 lwp->bp_reinsert = 0;
4037
4038 /* Delete any software-single-step reinsert breakpoints. No
4039 longer needed. We don't have to worry about other threads
4040 hitting this trap, and later not being able to explain it,
4041 because we were stepping over a breakpoint, and we hold all
4042 threads but LWP stopped while doing that. */
4043 if (!can_hardware_single_step ())
4044 delete_reinsert_breakpoints ();
4045
4046 step_over_bkpt = null_ptid;
4047 return 1;
4048 }
4049 else
4050 return 0;
4051 }
4052
4053 /* This function is called once per thread. We check the thread's resume
4054 request, which will tell us whether to resume, step, or leave the thread
4055 stopped; and what signal, if any, it should be sent.
4056
4057 For threads which we aren't explicitly told otherwise, we preserve
4058 the stepping flag; this is used for stepping over gdbserver-placed
4059 breakpoints.
4060
4061 If pending_flags was set in any thread, we queue any needed
4062 signals, since we won't actually resume. We already have a pending
4063 event to report, so we don't need to preserve any step requests;
4064 they should be re-issued if necessary. */
4065
4066 static int
4067 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4068 {
4069 struct thread_info *thread = (struct thread_info *) entry;
4070 struct lwp_info *lwp = get_thread_lwp (thread);
4071 int step;
4072 int leave_all_stopped = * (int *) arg;
4073 int leave_pending;
4074
4075 if (lwp->resume == NULL)
4076 return 0;
4077
4078 if (lwp->resume->kind == resume_stop)
4079 {
4080 if (debug_threads)
4081 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4082
4083 if (!lwp->stopped)
4084 {
4085 if (debug_threads)
4086 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4087
4088 /* Stop the thread, and wait for the event asynchronously,
4089 through the event loop. */
4090 send_sigstop (lwp);
4091 }
4092 else
4093 {
4094 if (debug_threads)
4095 debug_printf ("already stopped LWP %ld\n",
4096 lwpid_of (thread));
4097
4098 /* The LWP may have been stopped in an internal event that
4099 was not meant to be notified back to GDB (e.g., gdbserver
4100 breakpoint), so we should be reporting a stop event in
4101 this case too. */
4102
4103 /* If the thread already has a pending SIGSTOP, this is a
4104 no-op. Otherwise, something later will presumably resume
4105 the thread and this will cause it to cancel any pending
4106 operation, due to last_resume_kind == resume_stop. If
4107 the thread already has a pending status to report, we
4108 will still report it the next time we wait - see
4109 status_pending_p_callback. */
4110
4111 /* If we already have a pending signal to report, then
4112 there's no need to queue a SIGSTOP, as this means we're
4113 midway through moving the LWP out of the jumppad, and we
4114 will report the pending signal as soon as that is
4115 finished. */
4116 if (lwp->pending_signals_to_report == NULL)
4117 send_sigstop (lwp);
4118 }
4119
4120 /* For stop requests, we're done. */
4121 lwp->resume = NULL;
4122 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4123 return 0;
4124 }
4125
4126 /* If this thread which is about to be resumed has a pending status,
4127 then don't resume any threads - we can just report the pending
4128 status. Make sure to queue any signals that would otherwise be
4129 sent. In all-stop mode, we do this decision based on if *any*
4130 thread has a pending status. If there's a thread that needs the
4131 step-over-breakpoint dance, then don't resume any other thread
4132 but that particular one. */
4133 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4134
4135 if (!leave_pending)
4136 {
4137 if (debug_threads)
4138 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4139
4140 step = (lwp->resume->kind == resume_step);
4141 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4142 }
4143 else
4144 {
4145 if (debug_threads)
4146 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4147
4148 /* If we have a new signal, enqueue the signal. */
4149 if (lwp->resume->sig != 0)
4150 {
4151 struct pending_signals *p_sig;
4152 p_sig = xmalloc (sizeof (*p_sig));
4153 p_sig->prev = lwp->pending_signals;
4154 p_sig->signal = lwp->resume->sig;
4155 memset (&p_sig->info, 0, sizeof (siginfo_t));
4156
4157 /* If this is the same signal we were previously stopped by,
4158 make sure to queue its siginfo. We can ignore the return
4159 value of ptrace; if it fails, we'll skip
4160 PTRACE_SETSIGINFO. */
4161 if (WIFSTOPPED (lwp->last_status)
4162 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4163 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4164 &p_sig->info);
4165
4166 lwp->pending_signals = p_sig;
4167 }
4168 }
4169
4170 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4171 lwp->resume = NULL;
4172 return 0;
4173 }
4174
4175 static void
4176 linux_resume (struct thread_resume *resume_info, size_t n)
4177 {
4178 struct thread_resume_array array = { resume_info, n };
4179 struct thread_info *need_step_over = NULL;
4180 int any_pending;
4181 int leave_all_stopped;
4182
4183 if (debug_threads)
4184 {
4185 debug_enter ();
4186 debug_printf ("linux_resume:\n");
4187 }
4188
4189 find_inferior (&all_threads, linux_set_resume_request, &array);
4190
4191 /* If there is a thread which would otherwise be resumed, which has
4192 a pending status, then don't resume any threads - we can just
4193 report the pending status. Make sure to queue any signals that
4194 would otherwise be sent. In non-stop mode, we'll apply this
4195 logic to each thread individually. We consume all pending events
4196 before considering to start a step-over (in all-stop). */
4197 any_pending = 0;
4198 if (!non_stop)
4199 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4200
4201 /* If there is a thread which would otherwise be resumed, which is
4202 stopped at a breakpoint that needs stepping over, then don't
4203 resume any threads - have it step over the breakpoint with all
4204 other threads stopped, then resume all threads again. Make sure
4205 to queue any signals that would otherwise be delivered or
4206 queued. */
4207 if (!any_pending && supports_breakpoints ())
4208 need_step_over
4209 = (struct thread_info *) find_inferior (&all_threads,
4210 need_step_over_p, NULL);
4211
4212 leave_all_stopped = (need_step_over != NULL || any_pending);
4213
4214 if (debug_threads)
4215 {
4216 if (need_step_over != NULL)
4217 debug_printf ("Not resuming all, need step over\n");
4218 else if (any_pending)
4219 debug_printf ("Not resuming, all-stop and found "
4220 "an LWP with pending status\n");
4221 else
4222 debug_printf ("Resuming, no pending status or step over needed\n");
4223 }
4224
4225 /* Even if we're leaving threads stopped, queue all signals we'd
4226 otherwise deliver. */
4227 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4228
4229 if (need_step_over)
4230 start_step_over (get_thread_lwp (need_step_over));
4231
4232 if (debug_threads)
4233 {
4234 debug_printf ("linux_resume done\n");
4235 debug_exit ();
4236 }
4237 }
4238
4239 /* This function is called once per thread. We check the thread's
4240 last resume request, which will tell us whether to resume, step, or
4241 leave the thread stopped. Any signal the client requested to be
4242 delivered has already been enqueued at this point.
4243
4244 If any thread that GDB wants running is stopped at an internal
4245 breakpoint that needs stepping over, we start a step-over operation
4246 on that particular thread, and leave all others stopped. */
4247
4248 static int
4249 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4250 {
4251 struct thread_info *thread = (struct thread_info *) entry;
4252 struct lwp_info *lwp = get_thread_lwp (thread);
4253 int step;
4254
4255 if (lwp == except)
4256 return 0;
4257
4258 if (debug_threads)
4259 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4260
4261 if (!lwp->stopped)
4262 {
4263 if (debug_threads)
4264 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4265 return 0;
4266 }
4267
4268 if (thread->last_resume_kind == resume_stop
4269 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4270 {
4271 if (debug_threads)
4272 debug_printf (" client wants LWP to remain %ld stopped\n",
4273 lwpid_of (thread));
4274 return 0;
4275 }
4276
4277 if (lwp->status_pending_p)
4278 {
4279 if (debug_threads)
4280 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4281 lwpid_of (thread));
4282 return 0;
4283 }
4284
4285 gdb_assert (lwp->suspended >= 0);
4286
4287 if (lwp->suspended)
4288 {
4289 if (debug_threads)
4290 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4291 return 0;
4292 }
4293
4294 if (thread->last_resume_kind == resume_stop
4295 && lwp->pending_signals_to_report == NULL
4296 && lwp->collecting_fast_tracepoint == 0)
4297 {
4298 /* We haven't reported this LWP as stopped yet (otherwise, the
4299 last_status.kind check above would catch it, and we wouldn't
4300 reach here. This LWP may have been momentarily paused by a
4301 stop_all_lwps call while handling for example, another LWP's
4302 step-over. In that case, the pending expected SIGSTOP signal
4303 that was queued at vCont;t handling time will have already
4304 been consumed by wait_for_sigstop, and so we need to requeue
4305 another one here. Note that if the LWP already has a SIGSTOP
4306 pending, this is a no-op. */
4307
4308 if (debug_threads)
4309 debug_printf ("Client wants LWP %ld to stop. "
4310 "Making sure it has a SIGSTOP pending\n",
4311 lwpid_of (thread));
4312
4313 send_sigstop (lwp);
4314 }
4315
4316 step = thread->last_resume_kind == resume_step;
4317 linux_resume_one_lwp (lwp, step, 0, NULL);
4318 return 0;
4319 }
4320
4321 static int
4322 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4323 {
4324 struct thread_info *thread = (struct thread_info *) entry;
4325 struct lwp_info *lwp = get_thread_lwp (thread);
4326
4327 if (lwp == except)
4328 return 0;
4329
4330 lwp->suspended--;
4331 gdb_assert (lwp->suspended >= 0);
4332
4333 return proceed_one_lwp (entry, except);
4334 }
4335
4336 /* When we finish a step-over, set threads running again. If there's
4337 another thread that may need a step-over, now's the time to start
4338 it. Eventually, we'll move all threads past their breakpoints. */
4339
4340 static void
4341 proceed_all_lwps (void)
4342 {
4343 struct thread_info *need_step_over;
4344
4345 /* If there is a thread which would otherwise be resumed, which is
4346 stopped at a breakpoint that needs stepping over, then don't
4347 resume any threads - have it step over the breakpoint with all
4348 other threads stopped, then resume all threads again. */
4349
4350 if (supports_breakpoints ())
4351 {
4352 need_step_over
4353 = (struct thread_info *) find_inferior (&all_threads,
4354 need_step_over_p, NULL);
4355
4356 if (need_step_over != NULL)
4357 {
4358 if (debug_threads)
4359 debug_printf ("proceed_all_lwps: found "
4360 "thread %ld needing a step-over\n",
4361 lwpid_of (need_step_over));
4362
4363 start_step_over (get_thread_lwp (need_step_over));
4364 return;
4365 }
4366 }
4367
4368 if (debug_threads)
4369 debug_printf ("Proceeding, no step-over needed\n");
4370
4371 find_inferior (&all_threads, proceed_one_lwp, NULL);
4372 }
4373
4374 /* Stopped LWPs that the client wanted to be running, that don't have
4375 pending statuses, are set to run again, except for EXCEPT, if not
4376 NULL. This undoes a stop_all_lwps call. */
4377
4378 static void
4379 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4380 {
4381 if (debug_threads)
4382 {
4383 debug_enter ();
4384 if (except)
4385 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4386 lwpid_of (get_lwp_thread (except)));
4387 else
4388 debug_printf ("unstopping all lwps\n");
4389 }
4390
4391 if (unsuspend)
4392 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4393 else
4394 find_inferior (&all_threads, proceed_one_lwp, except);
4395
4396 if (debug_threads)
4397 {
4398 debug_printf ("unstop_all_lwps done\n");
4399 debug_exit ();
4400 }
4401 }
4402
4403
4404 #ifdef HAVE_LINUX_REGSETS
4405
4406 #define use_linux_regsets 1
4407
4408 /* Returns true if REGSET has been disabled. */
4409
4410 static int
4411 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4412 {
4413 return (info->disabled_regsets != NULL
4414 && info->disabled_regsets[regset - info->regsets]);
4415 }
4416
4417 /* Disable REGSET. */
4418
4419 static void
4420 disable_regset (struct regsets_info *info, struct regset_info *regset)
4421 {
4422 int dr_offset;
4423
4424 dr_offset = regset - info->regsets;
4425 if (info->disabled_regsets == NULL)
4426 info->disabled_regsets = xcalloc (1, info->num_regsets);
4427 info->disabled_regsets[dr_offset] = 1;
4428 }
4429
4430 static int
4431 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4432 struct regcache *regcache)
4433 {
4434 struct regset_info *regset;
4435 int saw_general_regs = 0;
4436 int pid;
4437 struct iovec iov;
4438
4439 pid = lwpid_of (current_thread);
4440 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4441 {
4442 void *buf, *data;
4443 int nt_type, res;
4444
4445 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4446 continue;
4447
4448 buf = xmalloc (regset->size);
4449
4450 nt_type = regset->nt_type;
4451 if (nt_type)
4452 {
4453 iov.iov_base = buf;
4454 iov.iov_len = regset->size;
4455 data = (void *) &iov;
4456 }
4457 else
4458 data = buf;
4459
4460 #ifndef __sparc__
4461 res = ptrace (regset->get_request, pid,
4462 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4463 #else
4464 res = ptrace (regset->get_request, pid, data, nt_type);
4465 #endif
4466 if (res < 0)
4467 {
4468 if (errno == EIO)
4469 {
4470 /* If we get EIO on a regset, do not try it again for
4471 this process mode. */
4472 disable_regset (regsets_info, regset);
4473 }
4474 else if (errno == ENODATA)
4475 {
4476 /* ENODATA may be returned if the regset is currently
4477 not "active". This can happen in normal operation,
4478 so suppress the warning in this case. */
4479 }
4480 else
4481 {
4482 char s[256];
4483 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4484 pid);
4485 perror (s);
4486 }
4487 }
4488 else
4489 {
4490 if (regset->type == GENERAL_REGS)
4491 saw_general_regs = 1;
4492 regset->store_function (regcache, buf);
4493 }
4494 free (buf);
4495 }
4496 if (saw_general_regs)
4497 return 0;
4498 else
4499 return 1;
4500 }
4501
4502 static int
4503 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4504 struct regcache *regcache)
4505 {
4506 struct regset_info *regset;
4507 int saw_general_regs = 0;
4508 int pid;
4509 struct iovec iov;
4510
4511 pid = lwpid_of (current_thread);
4512 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4513 {
4514 void *buf, *data;
4515 int nt_type, res;
4516
4517 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4518 || regset->fill_function == NULL)
4519 continue;
4520
4521 buf = xmalloc (regset->size);
4522
4523 /* First fill the buffer with the current register set contents,
4524 in case there are any items in the kernel's regset that are
4525 not in gdbserver's regcache. */
4526
4527 nt_type = regset->nt_type;
4528 if (nt_type)
4529 {
4530 iov.iov_base = buf;
4531 iov.iov_len = regset->size;
4532 data = (void *) &iov;
4533 }
4534 else
4535 data = buf;
4536
4537 #ifndef __sparc__
4538 res = ptrace (regset->get_request, pid,
4539 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4540 #else
4541 res = ptrace (regset->get_request, pid, data, nt_type);
4542 #endif
4543
4544 if (res == 0)
4545 {
4546 /* Then overlay our cached registers on that. */
4547 regset->fill_function (regcache, buf);
4548
4549 /* Only now do we write the register set. */
4550 #ifndef __sparc__
4551 res = ptrace (regset->set_request, pid,
4552 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4553 #else
4554 res = ptrace (regset->set_request, pid, data, nt_type);
4555 #endif
4556 }
4557
4558 if (res < 0)
4559 {
4560 if (errno == EIO)
4561 {
4562 /* If we get EIO on a regset, do not try it again for
4563 this process mode. */
4564 disable_regset (regsets_info, regset);
4565 }
4566 else if (errno == ESRCH)
4567 {
4568 /* At this point, ESRCH should mean the process is
4569 already gone, in which case we simply ignore attempts
4570 to change its registers. See also the related
4571 comment in linux_resume_one_lwp. */
4572 free (buf);
4573 return 0;
4574 }
4575 else
4576 {
4577 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4578 }
4579 }
4580 else if (regset->type == GENERAL_REGS)
4581 saw_general_regs = 1;
4582 free (buf);
4583 }
4584 if (saw_general_regs)
4585 return 0;
4586 else
4587 return 1;
4588 }
4589
4590 #else /* !HAVE_LINUX_REGSETS */
4591
4592 #define use_linux_regsets 0
4593 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4594 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4595
4596 #endif
4597
4598 /* Return 1 if register REGNO is supported by one of the regset ptrace
4599 calls or 0 if it has to be transferred individually. */
4600
4601 static int
4602 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4603 {
4604 unsigned char mask = 1 << (regno % 8);
4605 size_t index = regno / 8;
4606
4607 return (use_linux_regsets
4608 && (regs_info->regset_bitmap == NULL
4609 || (regs_info->regset_bitmap[index] & mask) != 0));
4610 }
4611
4612 #ifdef HAVE_LINUX_USRREGS
4613
4614 int
4615 register_addr (const struct usrregs_info *usrregs, int regnum)
4616 {
4617 int addr;
4618
4619 if (regnum < 0 || regnum >= usrregs->num_regs)
4620 error ("Invalid register number %d.", regnum);
4621
4622 addr = usrregs->regmap[regnum];
4623
4624 return addr;
4625 }
4626
4627 /* Fetch one register. */
4628 static void
4629 fetch_register (const struct usrregs_info *usrregs,
4630 struct regcache *regcache, int regno)
4631 {
4632 CORE_ADDR regaddr;
4633 int i, size;
4634 char *buf;
4635 int pid;
4636
4637 if (regno >= usrregs->num_regs)
4638 return;
4639 if ((*the_low_target.cannot_fetch_register) (regno))
4640 return;
4641
4642 regaddr = register_addr (usrregs, regno);
4643 if (regaddr == -1)
4644 return;
4645
4646 size = ((register_size (regcache->tdesc, regno)
4647 + sizeof (PTRACE_XFER_TYPE) - 1)
4648 & -sizeof (PTRACE_XFER_TYPE));
4649 buf = alloca (size);
4650
4651 pid = lwpid_of (current_thread);
4652 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4653 {
4654 errno = 0;
4655 *(PTRACE_XFER_TYPE *) (buf + i) =
4656 ptrace (PTRACE_PEEKUSER, pid,
4657 /* Coerce to a uintptr_t first to avoid potential gcc warning
4658 of coercing an 8 byte integer to a 4 byte pointer. */
4659 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4660 regaddr += sizeof (PTRACE_XFER_TYPE);
4661 if (errno != 0)
4662 error ("reading register %d: %s", regno, strerror (errno));
4663 }
4664
4665 if (the_low_target.supply_ptrace_register)
4666 the_low_target.supply_ptrace_register (regcache, regno, buf);
4667 else
4668 supply_register (regcache, regno, buf);
4669 }
4670
4671 /* Store one register. */
4672 static void
4673 store_register (const struct usrregs_info *usrregs,
4674 struct regcache *regcache, int regno)
4675 {
4676 CORE_ADDR regaddr;
4677 int i, size;
4678 char *buf;
4679 int pid;
4680
4681 if (regno >= usrregs->num_regs)
4682 return;
4683 if ((*the_low_target.cannot_store_register) (regno))
4684 return;
4685
4686 regaddr = register_addr (usrregs, regno);
4687 if (regaddr == -1)
4688 return;
4689
4690 size = ((register_size (regcache->tdesc, regno)
4691 + sizeof (PTRACE_XFER_TYPE) - 1)
4692 & -sizeof (PTRACE_XFER_TYPE));
4693 buf = alloca (size);
4694 memset (buf, 0, size);
4695
4696 if (the_low_target.collect_ptrace_register)
4697 the_low_target.collect_ptrace_register (regcache, regno, buf);
4698 else
4699 collect_register (regcache, regno, buf);
4700
4701 pid = lwpid_of (current_thread);
4702 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4703 {
4704 errno = 0;
4705 ptrace (PTRACE_POKEUSER, pid,
4706 /* Coerce to a uintptr_t first to avoid potential gcc warning
4707 about coercing an 8 byte integer to a 4 byte pointer. */
4708 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4709 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4710 if (errno != 0)
4711 {
4712 /* At this point, ESRCH should mean the process is
4713 already gone, in which case we simply ignore attempts
4714 to change its registers. See also the related
4715 comment in linux_resume_one_lwp. */
4716 if (errno == ESRCH)
4717 return;
4718
4719 if ((*the_low_target.cannot_store_register) (regno) == 0)
4720 error ("writing register %d: %s", regno, strerror (errno));
4721 }
4722 regaddr += sizeof (PTRACE_XFER_TYPE);
4723 }
4724 }
4725
4726 /* Fetch all registers, or just one, from the child process.
4727 If REGNO is -1, do this for all registers, skipping any that are
4728 assumed to have been retrieved by regsets_fetch_inferior_registers,
4729 unless ALL is non-zero.
4730 Otherwise, REGNO specifies which register (so we can save time). */
4731 static void
4732 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4733 struct regcache *regcache, int regno, int all)
4734 {
4735 struct usrregs_info *usr = regs_info->usrregs;
4736
4737 if (regno == -1)
4738 {
4739 for (regno = 0; regno < usr->num_regs; regno++)
4740 if (all || !linux_register_in_regsets (regs_info, regno))
4741 fetch_register (usr, regcache, regno);
4742 }
4743 else
4744 fetch_register (usr, regcache, regno);
4745 }
4746
4747 /* Store our register values back into the inferior.
4748 If REGNO is -1, do this for all registers, skipping any that are
4749 assumed to have been saved by regsets_store_inferior_registers,
4750 unless ALL is non-zero.
4751 Otherwise, REGNO specifies which register (so we can save time). */
4752 static void
4753 usr_store_inferior_registers (const struct regs_info *regs_info,
4754 struct regcache *regcache, int regno, int all)
4755 {
4756 struct usrregs_info *usr = regs_info->usrregs;
4757
4758 if (regno == -1)
4759 {
4760 for (regno = 0; regno < usr->num_regs; regno++)
4761 if (all || !linux_register_in_regsets (regs_info, regno))
4762 store_register (usr, regcache, regno);
4763 }
4764 else
4765 store_register (usr, regcache, regno);
4766 }
4767
4768 #else /* !HAVE_LINUX_USRREGS */
4769
4770 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4771 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4772
4773 #endif
4774
4775
4776 void
4777 linux_fetch_registers (struct regcache *regcache, int regno)
4778 {
4779 int use_regsets;
4780 int all = 0;
4781 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4782
4783 if (regno == -1)
4784 {
4785 if (the_low_target.fetch_register != NULL
4786 && regs_info->usrregs != NULL)
4787 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4788 (*the_low_target.fetch_register) (regcache, regno);
4789
4790 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4791 if (regs_info->usrregs != NULL)
4792 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4793 }
4794 else
4795 {
4796 if (the_low_target.fetch_register != NULL
4797 && (*the_low_target.fetch_register) (regcache, regno))
4798 return;
4799
4800 use_regsets = linux_register_in_regsets (regs_info, regno);
4801 if (use_regsets)
4802 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4803 regcache);
4804 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4805 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4806 }
4807 }
4808
4809 void
4810 linux_store_registers (struct regcache *regcache, int regno)
4811 {
4812 int use_regsets;
4813 int all = 0;
4814 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4815
4816 if (regno == -1)
4817 {
4818 all = regsets_store_inferior_registers (regs_info->regsets_info,
4819 regcache);
4820 if (regs_info->usrregs != NULL)
4821 usr_store_inferior_registers (regs_info, regcache, regno, all);
4822 }
4823 else
4824 {
4825 use_regsets = linux_register_in_regsets (regs_info, regno);
4826 if (use_regsets)
4827 all = regsets_store_inferior_registers (regs_info->regsets_info,
4828 regcache);
4829 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4830 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4831 }
4832 }
4833
4834
4835 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4836 to debugger memory starting at MYADDR. */
4837
4838 static int
4839 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4840 {
4841 int pid = lwpid_of (current_thread);
4842 register PTRACE_XFER_TYPE *buffer;
4843 register CORE_ADDR addr;
4844 register int count;
4845 char filename[64];
4846 register int i;
4847 int ret;
4848 int fd;
4849
4850 /* Try using /proc. Don't bother for one word. */
4851 if (len >= 3 * sizeof (long))
4852 {
4853 int bytes;
4854
4855 /* We could keep this file open and cache it - possibly one per
4856 thread. That requires some juggling, but is even faster. */
4857 sprintf (filename, "/proc/%d/mem", pid);
4858 fd = open (filename, O_RDONLY | O_LARGEFILE);
4859 if (fd == -1)
4860 goto no_proc;
4861
4862 /* If pread64 is available, use it. It's faster if the kernel
4863 supports it (only one syscall), and it's 64-bit safe even on
4864 32-bit platforms (for instance, SPARC debugging a SPARC64
4865 application). */
4866 #ifdef HAVE_PREAD64
4867 bytes = pread64 (fd, myaddr, len, memaddr);
4868 #else
4869 bytes = -1;
4870 if (lseek (fd, memaddr, SEEK_SET) != -1)
4871 bytes = read (fd, myaddr, len);
4872 #endif
4873
4874 close (fd);
4875 if (bytes == len)
4876 return 0;
4877
4878 /* Some data was read, we'll try to get the rest with ptrace. */
4879 if (bytes > 0)
4880 {
4881 memaddr += bytes;
4882 myaddr += bytes;
4883 len -= bytes;
4884 }
4885 }
4886
4887 no_proc:
4888 /* Round starting address down to longword boundary. */
4889 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4890 /* Round ending address up; get number of longwords that makes. */
4891 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4892 / sizeof (PTRACE_XFER_TYPE));
4893 /* Allocate buffer of that many longwords. */
4894 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4895
4896 /* Read all the longwords */
4897 errno = 0;
4898 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4899 {
4900 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4901 about coercing an 8 byte integer to a 4 byte pointer. */
4902 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4903 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4904 (PTRACE_TYPE_ARG4) 0);
4905 if (errno)
4906 break;
4907 }
4908 ret = errno;
4909
4910 /* Copy appropriate bytes out of the buffer. */
4911 if (i > 0)
4912 {
4913 i *= sizeof (PTRACE_XFER_TYPE);
4914 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4915 memcpy (myaddr,
4916 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4917 i < len ? i : len);
4918 }
4919
4920 return ret;
4921 }
4922
4923 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4924 memory at MEMADDR. On failure (cannot write to the inferior)
4925 returns the value of errno. Always succeeds if LEN is zero. */
4926
4927 static int
4928 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4929 {
4930 register int i;
4931 /* Round starting address down to longword boundary. */
4932 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4933 /* Round ending address up; get number of longwords that makes. */
4934 register int count
4935 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4936 / sizeof (PTRACE_XFER_TYPE);
4937
4938 /* Allocate buffer of that many longwords. */
4939 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4940 alloca (count * sizeof (PTRACE_XFER_TYPE));
4941
4942 int pid = lwpid_of (current_thread);
4943
4944 if (len == 0)
4945 {
4946 /* Zero length write always succeeds. */
4947 return 0;
4948 }
4949
4950 if (debug_threads)
4951 {
4952 /* Dump up to four bytes. */
4953 unsigned int val = * (unsigned int *) myaddr;
4954 if (len == 1)
4955 val = val & 0xff;
4956 else if (len == 2)
4957 val = val & 0xffff;
4958 else if (len == 3)
4959 val = val & 0xffffff;
4960 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4961 val, (long)memaddr);
4962 }
4963
4964 /* Fill start and end extra bytes of buffer with existing memory data. */
4965
4966 errno = 0;
4967 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4968 about coercing an 8 byte integer to a 4 byte pointer. */
4969 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4970 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4971 (PTRACE_TYPE_ARG4) 0);
4972 if (errno)
4973 return errno;
4974
4975 if (count > 1)
4976 {
4977 errno = 0;
4978 buffer[count - 1]
4979 = ptrace (PTRACE_PEEKTEXT, pid,
4980 /* Coerce to a uintptr_t first to avoid potential gcc warning
4981 about coercing an 8 byte integer to a 4 byte pointer. */
4982 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4983 * sizeof (PTRACE_XFER_TYPE)),
4984 (PTRACE_TYPE_ARG4) 0);
4985 if (errno)
4986 return errno;
4987 }
4988
4989 /* Copy data to be written over corresponding part of buffer. */
4990
4991 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4992 myaddr, len);
4993
4994 /* Write the entire buffer. */
4995
4996 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4997 {
4998 errno = 0;
4999 ptrace (PTRACE_POKETEXT, pid,
5000 /* Coerce to a uintptr_t first to avoid potential gcc warning
5001 about coercing an 8 byte integer to a 4 byte pointer. */
5002 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5003 (PTRACE_TYPE_ARG4) buffer[i]);
5004 if (errno)
5005 return errno;
5006 }
5007
5008 return 0;
5009 }
5010
5011 static void
5012 linux_look_up_symbols (void)
5013 {
5014 #ifdef USE_THREAD_DB
5015 struct process_info *proc = current_process ();
5016
5017 if (proc->priv->thread_db != NULL)
5018 return;
5019
5020 /* If the kernel supports tracing clones, then we don't need to
5021 use the magic thread event breakpoint to learn about
5022 threads. */
5023 thread_db_init (!linux_supports_traceclone ());
5024 #endif
5025 }
5026
5027 static void
5028 linux_request_interrupt (void)
5029 {
5030 extern unsigned long signal_pid;
5031
5032 /* Send a SIGINT to the process group. This acts just like the user
5033 typed a ^C on the controlling terminal. */
5034 kill (-signal_pid, SIGINT);
5035 }
5036
5037 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5038 to debugger memory starting at MYADDR. */
5039
5040 static int
5041 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5042 {
5043 char filename[PATH_MAX];
5044 int fd, n;
5045 int pid = lwpid_of (current_thread);
5046
5047 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5048
5049 fd = open (filename, O_RDONLY);
5050 if (fd < 0)
5051 return -1;
5052
5053 if (offset != (CORE_ADDR) 0
5054 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5055 n = -1;
5056 else
5057 n = read (fd, myaddr, len);
5058
5059 close (fd);
5060
5061 return n;
5062 }
5063
5064 /* These breakpoint and watchpoint related wrapper functions simply
5065 pass on the function call if the target has registered a
5066 corresponding function. */
5067
5068 static int
5069 linux_supports_z_point_type (char z_type)
5070 {
5071 return (the_low_target.supports_z_point_type != NULL
5072 && the_low_target.supports_z_point_type (z_type));
5073 }
5074
5075 static int
5076 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5077 int size, struct raw_breakpoint *bp)
5078 {
5079 if (the_low_target.insert_point != NULL)
5080 return the_low_target.insert_point (type, addr, size, bp);
5081 else
5082 /* Unsupported (see target.h). */
5083 return 1;
5084 }
5085
5086 static int
5087 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5088 int size, struct raw_breakpoint *bp)
5089 {
5090 if (the_low_target.remove_point != NULL)
5091 return the_low_target.remove_point (type, addr, size, bp);
5092 else
5093 /* Unsupported (see target.h). */
5094 return 1;
5095 }
5096
5097 /* Implement the to_stopped_by_sw_breakpoint target_ops
5098 method. */
5099
5100 static int
5101 linux_stopped_by_sw_breakpoint (void)
5102 {
5103 struct lwp_info *lwp = get_thread_lwp (current_thread);
5104
5105 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5106 }
5107
5108 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5109 method. */
5110
5111 static int
5112 linux_supports_stopped_by_sw_breakpoint (void)
5113 {
5114 return USE_SIGTRAP_SIGINFO;
5115 }
5116
5117 /* Implement the to_stopped_by_hw_breakpoint target_ops
5118 method. */
5119
5120 static int
5121 linux_stopped_by_hw_breakpoint (void)
5122 {
5123 struct lwp_info *lwp = get_thread_lwp (current_thread);
5124
5125 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5126 }
5127
5128 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5129 method. */
5130
5131 static int
5132 linux_supports_stopped_by_hw_breakpoint (void)
5133 {
5134 return USE_SIGTRAP_SIGINFO;
5135 }
5136
5137 static int
5138 linux_stopped_by_watchpoint (void)
5139 {
5140 struct lwp_info *lwp = get_thread_lwp (current_thread);
5141
5142 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5143 }
5144
5145 static CORE_ADDR
5146 linux_stopped_data_address (void)
5147 {
5148 struct lwp_info *lwp = get_thread_lwp (current_thread);
5149
5150 return lwp->stopped_data_address;
5151 }
5152
5153 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5154 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5155 && defined(PT_TEXT_END_ADDR)
5156
5157 /* This is only used for targets that define PT_TEXT_ADDR,
5158 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5159 the target has different ways of acquiring this information, like
5160 loadmaps. */
5161
5162 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5163 to tell gdb about. */
5164
5165 static int
5166 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5167 {
5168 unsigned long text, text_end, data;
5169 int pid = lwpid_of (get_thread_lwp (current_thread));
5170
5171 errno = 0;
5172
5173 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5174 (PTRACE_TYPE_ARG4) 0);
5175 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5176 (PTRACE_TYPE_ARG4) 0);
5177 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5178 (PTRACE_TYPE_ARG4) 0);
5179
5180 if (errno == 0)
5181 {
5182 /* Both text and data offsets produced at compile-time (and so
5183 used by gdb) are relative to the beginning of the program,
5184 with the data segment immediately following the text segment.
5185 However, the actual runtime layout in memory may put the data
5186 somewhere else, so when we send gdb a data base-address, we
5187 use the real data base address and subtract the compile-time
5188 data base-address from it (which is just the length of the
5189 text segment). BSS immediately follows data in both
5190 cases. */
5191 *text_p = text;
5192 *data_p = data - (text_end - text);
5193
5194 return 1;
5195 }
5196 return 0;
5197 }
5198 #endif
5199
5200 static int
5201 linux_qxfer_osdata (const char *annex,
5202 unsigned char *readbuf, unsigned const char *writebuf,
5203 CORE_ADDR offset, int len)
5204 {
5205 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5206 }
5207
5208 /* Convert a native/host siginfo object, into/from the siginfo in the
5209 layout of the inferiors' architecture. */
5210
5211 static void
5212 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5213 {
5214 int done = 0;
5215
5216 if (the_low_target.siginfo_fixup != NULL)
5217 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5218
5219 /* If there was no callback, or the callback didn't do anything,
5220 then just do a straight memcpy. */
5221 if (!done)
5222 {
5223 if (direction == 1)
5224 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5225 else
5226 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5227 }
5228 }
5229
5230 static int
5231 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5232 unsigned const char *writebuf, CORE_ADDR offset, int len)
5233 {
5234 int pid;
5235 siginfo_t siginfo;
5236 char inf_siginfo[sizeof (siginfo_t)];
5237
5238 if (current_thread == NULL)
5239 return -1;
5240
5241 pid = lwpid_of (current_thread);
5242
5243 if (debug_threads)
5244 debug_printf ("%s siginfo for lwp %d.\n",
5245 readbuf != NULL ? "Reading" : "Writing",
5246 pid);
5247
5248 if (offset >= sizeof (siginfo))
5249 return -1;
5250
5251 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5252 return -1;
5253
5254 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5255 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5256 inferior with a 64-bit GDBSERVER should look the same as debugging it
5257 with a 32-bit GDBSERVER, we need to convert it. */
5258 siginfo_fixup (&siginfo, inf_siginfo, 0);
5259
5260 if (offset + len > sizeof (siginfo))
5261 len = sizeof (siginfo) - offset;
5262
5263 if (readbuf != NULL)
5264 memcpy (readbuf, inf_siginfo + offset, len);
5265 else
5266 {
5267 memcpy (inf_siginfo + offset, writebuf, len);
5268
5269 /* Convert back to ptrace layout before flushing it out. */
5270 siginfo_fixup (&siginfo, inf_siginfo, 1);
5271
5272 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5273 return -1;
5274 }
5275
5276 return len;
5277 }
5278
5279 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5280 so we notice when children change state; as the handler for the
5281 sigsuspend in my_waitpid. */
5282
5283 static void
5284 sigchld_handler (int signo)
5285 {
5286 int old_errno = errno;
5287
5288 if (debug_threads)
5289 {
5290 do
5291 {
5292 /* fprintf is not async-signal-safe, so call write
5293 directly. */
5294 if (write (2, "sigchld_handler\n",
5295 sizeof ("sigchld_handler\n") - 1) < 0)
5296 break; /* just ignore */
5297 } while (0);
5298 }
5299
5300 if (target_is_async_p ())
5301 async_file_mark (); /* trigger a linux_wait */
5302
5303 errno = old_errno;
5304 }
5305
5306 static int
5307 linux_supports_non_stop (void)
5308 {
5309 return 1;
5310 }
5311
5312 static int
5313 linux_async (int enable)
5314 {
5315 int previous = target_is_async_p ();
5316
5317 if (debug_threads)
5318 debug_printf ("linux_async (%d), previous=%d\n",
5319 enable, previous);
5320
5321 if (previous != enable)
5322 {
5323 sigset_t mask;
5324 sigemptyset (&mask);
5325 sigaddset (&mask, SIGCHLD);
5326
5327 sigprocmask (SIG_BLOCK, &mask, NULL);
5328
5329 if (enable)
5330 {
5331 if (pipe (linux_event_pipe) == -1)
5332 {
5333 linux_event_pipe[0] = -1;
5334 linux_event_pipe[1] = -1;
5335 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5336
5337 warning ("creating event pipe failed.");
5338 return previous;
5339 }
5340
5341 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5342 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5343
5344 /* Register the event loop handler. */
5345 add_file_handler (linux_event_pipe[0],
5346 handle_target_event, NULL);
5347
5348 /* Always trigger a linux_wait. */
5349 async_file_mark ();
5350 }
5351 else
5352 {
5353 delete_file_handler (linux_event_pipe[0]);
5354
5355 close (linux_event_pipe[0]);
5356 close (linux_event_pipe[1]);
5357 linux_event_pipe[0] = -1;
5358 linux_event_pipe[1] = -1;
5359 }
5360
5361 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5362 }
5363
5364 return previous;
5365 }
5366
5367 static int
5368 linux_start_non_stop (int nonstop)
5369 {
5370 /* Register or unregister from event-loop accordingly. */
5371 linux_async (nonstop);
5372
5373 if (target_is_async_p () != (nonstop != 0))
5374 return -1;
5375
5376 return 0;
5377 }
5378
5379 static int
5380 linux_supports_multi_process (void)
5381 {
5382 return 1;
5383 }
5384
5385 static int
5386 linux_supports_disable_randomization (void)
5387 {
5388 #ifdef HAVE_PERSONALITY
5389 return 1;
5390 #else
5391 return 0;
5392 #endif
5393 }
5394
5395 static int
5396 linux_supports_agent (void)
5397 {
5398 return 1;
5399 }
5400
5401 static int
5402 linux_supports_range_stepping (void)
5403 {
5404 if (*the_low_target.supports_range_stepping == NULL)
5405 return 0;
5406
5407 return (*the_low_target.supports_range_stepping) ();
5408 }
5409
5410 /* Enumerate spufs IDs for process PID. */
5411 static int
5412 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5413 {
5414 int pos = 0;
5415 int written = 0;
5416 char path[128];
5417 DIR *dir;
5418 struct dirent *entry;
5419
5420 sprintf (path, "/proc/%ld/fd", pid);
5421 dir = opendir (path);
5422 if (!dir)
5423 return -1;
5424
5425 rewinddir (dir);
5426 while ((entry = readdir (dir)) != NULL)
5427 {
5428 struct stat st;
5429 struct statfs stfs;
5430 int fd;
5431
5432 fd = atoi (entry->d_name);
5433 if (!fd)
5434 continue;
5435
5436 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5437 if (stat (path, &st) != 0)
5438 continue;
5439 if (!S_ISDIR (st.st_mode))
5440 continue;
5441
5442 if (statfs (path, &stfs) != 0)
5443 continue;
5444 if (stfs.f_type != SPUFS_MAGIC)
5445 continue;
5446
5447 if (pos >= offset && pos + 4 <= offset + len)
5448 {
5449 *(unsigned int *)(buf + pos - offset) = fd;
5450 written += 4;
5451 }
5452 pos += 4;
5453 }
5454
5455 closedir (dir);
5456 return written;
5457 }
5458
5459 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5460 object type, using the /proc file system. */
5461 static int
5462 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5463 unsigned const char *writebuf,
5464 CORE_ADDR offset, int len)
5465 {
5466 long pid = lwpid_of (current_thread);
5467 char buf[128];
5468 int fd = 0;
5469 int ret = 0;
5470
5471 if (!writebuf && !readbuf)
5472 return -1;
5473
5474 if (!*annex)
5475 {
5476 if (!readbuf)
5477 return -1;
5478 else
5479 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5480 }
5481
5482 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5483 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5484 if (fd <= 0)
5485 return -1;
5486
5487 if (offset != 0
5488 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5489 {
5490 close (fd);
5491 return 0;
5492 }
5493
5494 if (writebuf)
5495 ret = write (fd, writebuf, (size_t) len);
5496 else
5497 ret = read (fd, readbuf, (size_t) len);
5498
5499 close (fd);
5500 return ret;
5501 }
5502
5503 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5504 struct target_loadseg
5505 {
5506 /* Core address to which the segment is mapped. */
5507 Elf32_Addr addr;
5508 /* VMA recorded in the program header. */
5509 Elf32_Addr p_vaddr;
5510 /* Size of this segment in memory. */
5511 Elf32_Word p_memsz;
5512 };
5513
5514 # if defined PT_GETDSBT
5515 struct target_loadmap
5516 {
5517 /* Protocol version number, must be zero. */
5518 Elf32_Word version;
5519 /* Pointer to the DSBT table, its size, and the DSBT index. */
5520 unsigned *dsbt_table;
5521 unsigned dsbt_size, dsbt_index;
5522 /* Number of segments in this map. */
5523 Elf32_Word nsegs;
5524 /* The actual memory map. */
5525 struct target_loadseg segs[/*nsegs*/];
5526 };
5527 # define LINUX_LOADMAP PT_GETDSBT
5528 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5529 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5530 # else
5531 struct target_loadmap
5532 {
5533 /* Protocol version number, must be zero. */
5534 Elf32_Half version;
5535 /* Number of segments in this map. */
5536 Elf32_Half nsegs;
5537 /* The actual memory map. */
5538 struct target_loadseg segs[/*nsegs*/];
5539 };
5540 # define LINUX_LOADMAP PTRACE_GETFDPIC
5541 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5542 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5543 # endif
5544
5545 static int
5546 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5547 unsigned char *myaddr, unsigned int len)
5548 {
5549 int pid = lwpid_of (current_thread);
5550 int addr = -1;
5551 struct target_loadmap *data = NULL;
5552 unsigned int actual_length, copy_length;
5553
5554 if (strcmp (annex, "exec") == 0)
5555 addr = (int) LINUX_LOADMAP_EXEC;
5556 else if (strcmp (annex, "interp") == 0)
5557 addr = (int) LINUX_LOADMAP_INTERP;
5558 else
5559 return -1;
5560
5561 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5562 return -1;
5563
5564 if (data == NULL)
5565 return -1;
5566
5567 actual_length = sizeof (struct target_loadmap)
5568 + sizeof (struct target_loadseg) * data->nsegs;
5569
5570 if (offset < 0 || offset > actual_length)
5571 return -1;
5572
5573 copy_length = actual_length - offset < len ? actual_length - offset : len;
5574 memcpy (myaddr, (char *) data + offset, copy_length);
5575 return copy_length;
5576 }
5577 #else
5578 # define linux_read_loadmap NULL
5579 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5580
5581 static void
5582 linux_process_qsupported (const char *query)
5583 {
5584 if (the_low_target.process_qsupported != NULL)
5585 the_low_target.process_qsupported (query);
5586 }
5587
5588 static int
5589 linux_supports_tracepoints (void)
5590 {
5591 if (*the_low_target.supports_tracepoints == NULL)
5592 return 0;
5593
5594 return (*the_low_target.supports_tracepoints) ();
5595 }
5596
5597 static CORE_ADDR
5598 linux_read_pc (struct regcache *regcache)
5599 {
5600 if (the_low_target.get_pc == NULL)
5601 return 0;
5602
5603 return (*the_low_target.get_pc) (regcache);
5604 }
5605
5606 static void
5607 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5608 {
5609 gdb_assert (the_low_target.set_pc != NULL);
5610
5611 (*the_low_target.set_pc) (regcache, pc);
5612 }
5613
5614 static int
5615 linux_thread_stopped (struct thread_info *thread)
5616 {
5617 return get_thread_lwp (thread)->stopped;
5618 }
5619
5620 /* This exposes stop-all-threads functionality to other modules. */
5621
5622 static void
5623 linux_pause_all (int freeze)
5624 {
5625 stop_all_lwps (freeze, NULL);
5626 }
5627
5628 /* This exposes unstop-all-threads functionality to other gdbserver
5629 modules. */
5630
5631 static void
5632 linux_unpause_all (int unfreeze)
5633 {
5634 unstop_all_lwps (unfreeze, NULL);
5635 }
5636
5637 static int
5638 linux_prepare_to_access_memory (void)
5639 {
5640 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5641 running LWP. */
5642 if (non_stop)
5643 linux_pause_all (1);
5644 return 0;
5645 }
5646
5647 static void
5648 linux_done_accessing_memory (void)
5649 {
5650 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5651 running LWP. */
5652 if (non_stop)
5653 linux_unpause_all (1);
5654 }
5655
5656 static int
5657 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5658 CORE_ADDR collector,
5659 CORE_ADDR lockaddr,
5660 ULONGEST orig_size,
5661 CORE_ADDR *jump_entry,
5662 CORE_ADDR *trampoline,
5663 ULONGEST *trampoline_size,
5664 unsigned char *jjump_pad_insn,
5665 ULONGEST *jjump_pad_insn_size,
5666 CORE_ADDR *adjusted_insn_addr,
5667 CORE_ADDR *adjusted_insn_addr_end,
5668 char *err)
5669 {
5670 return (*the_low_target.install_fast_tracepoint_jump_pad)
5671 (tpoint, tpaddr, collector, lockaddr, orig_size,
5672 jump_entry, trampoline, trampoline_size,
5673 jjump_pad_insn, jjump_pad_insn_size,
5674 adjusted_insn_addr, adjusted_insn_addr_end,
5675 err);
5676 }
5677
5678 static struct emit_ops *
5679 linux_emit_ops (void)
5680 {
5681 if (the_low_target.emit_ops != NULL)
5682 return (*the_low_target.emit_ops) ();
5683 else
5684 return NULL;
5685 }
5686
5687 static int
5688 linux_get_min_fast_tracepoint_insn_len (void)
5689 {
5690 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5691 }
5692
5693 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5694
5695 static int
5696 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5697 CORE_ADDR *phdr_memaddr, int *num_phdr)
5698 {
5699 char filename[PATH_MAX];
5700 int fd;
5701 const int auxv_size = is_elf64
5702 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5703 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5704
5705 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5706
5707 fd = open (filename, O_RDONLY);
5708 if (fd < 0)
5709 return 1;
5710
5711 *phdr_memaddr = 0;
5712 *num_phdr = 0;
5713 while (read (fd, buf, auxv_size) == auxv_size
5714 && (*phdr_memaddr == 0 || *num_phdr == 0))
5715 {
5716 if (is_elf64)
5717 {
5718 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5719
5720 switch (aux->a_type)
5721 {
5722 case AT_PHDR:
5723 *phdr_memaddr = aux->a_un.a_val;
5724 break;
5725 case AT_PHNUM:
5726 *num_phdr = aux->a_un.a_val;
5727 break;
5728 }
5729 }
5730 else
5731 {
5732 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5733
5734 switch (aux->a_type)
5735 {
5736 case AT_PHDR:
5737 *phdr_memaddr = aux->a_un.a_val;
5738 break;
5739 case AT_PHNUM:
5740 *num_phdr = aux->a_un.a_val;
5741 break;
5742 }
5743 }
5744 }
5745
5746 close (fd);
5747
5748 if (*phdr_memaddr == 0 || *num_phdr == 0)
5749 {
5750 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5751 "phdr_memaddr = %ld, phdr_num = %d",
5752 (long) *phdr_memaddr, *num_phdr);
5753 return 2;
5754 }
5755
5756 return 0;
5757 }
5758
5759 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5760
5761 static CORE_ADDR
5762 get_dynamic (const int pid, const int is_elf64)
5763 {
5764 CORE_ADDR phdr_memaddr, relocation;
5765 int num_phdr, i;
5766 unsigned char *phdr_buf;
5767 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5768
5769 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5770 return 0;
5771
5772 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5773 phdr_buf = alloca (num_phdr * phdr_size);
5774
5775 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5776 return 0;
5777
5778 /* Compute relocation: it is expected to be 0 for "regular" executables,
5779 non-zero for PIE ones. */
5780 relocation = -1;
5781 for (i = 0; relocation == -1 && i < num_phdr; i++)
5782 if (is_elf64)
5783 {
5784 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5785
5786 if (p->p_type == PT_PHDR)
5787 relocation = phdr_memaddr - p->p_vaddr;
5788 }
5789 else
5790 {
5791 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5792
5793 if (p->p_type == PT_PHDR)
5794 relocation = phdr_memaddr - p->p_vaddr;
5795 }
5796
5797 if (relocation == -1)
5798 {
5799 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5800 any real world executables, including PIE executables, have always
5801 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5802 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5803 or present DT_DEBUG anyway (fpc binaries are statically linked).
5804
5805 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5806
5807 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5808
5809 return 0;
5810 }
5811
5812 for (i = 0; i < num_phdr; i++)
5813 {
5814 if (is_elf64)
5815 {
5816 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5817
5818 if (p->p_type == PT_DYNAMIC)
5819 return p->p_vaddr + relocation;
5820 }
5821 else
5822 {
5823 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5824
5825 if (p->p_type == PT_DYNAMIC)
5826 return p->p_vaddr + relocation;
5827 }
5828 }
5829
5830 return 0;
5831 }
5832
5833 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5834 can be 0 if the inferior does not yet have the library list initialized.
5835 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5836 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5837
5838 static CORE_ADDR
5839 get_r_debug (const int pid, const int is_elf64)
5840 {
5841 CORE_ADDR dynamic_memaddr;
5842 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5843 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5844 CORE_ADDR map = -1;
5845
5846 dynamic_memaddr = get_dynamic (pid, is_elf64);
5847 if (dynamic_memaddr == 0)
5848 return map;
5849
5850 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5851 {
5852 if (is_elf64)
5853 {
5854 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5855 #ifdef DT_MIPS_RLD_MAP
5856 union
5857 {
5858 Elf64_Xword map;
5859 unsigned char buf[sizeof (Elf64_Xword)];
5860 }
5861 rld_map;
5862
5863 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5864 {
5865 if (linux_read_memory (dyn->d_un.d_val,
5866 rld_map.buf, sizeof (rld_map.buf)) == 0)
5867 return rld_map.map;
5868 else
5869 break;
5870 }
5871 #endif /* DT_MIPS_RLD_MAP */
5872
5873 if (dyn->d_tag == DT_DEBUG && map == -1)
5874 map = dyn->d_un.d_val;
5875
5876 if (dyn->d_tag == DT_NULL)
5877 break;
5878 }
5879 else
5880 {
5881 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5882 #ifdef DT_MIPS_RLD_MAP
5883 union
5884 {
5885 Elf32_Word map;
5886 unsigned char buf[sizeof (Elf32_Word)];
5887 }
5888 rld_map;
5889
5890 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5891 {
5892 if (linux_read_memory (dyn->d_un.d_val,
5893 rld_map.buf, sizeof (rld_map.buf)) == 0)
5894 return rld_map.map;
5895 else
5896 break;
5897 }
5898 #endif /* DT_MIPS_RLD_MAP */
5899
5900 if (dyn->d_tag == DT_DEBUG && map == -1)
5901 map = dyn->d_un.d_val;
5902
5903 if (dyn->d_tag == DT_NULL)
5904 break;
5905 }
5906
5907 dynamic_memaddr += dyn_size;
5908 }
5909
5910 return map;
5911 }
5912
5913 /* Read one pointer from MEMADDR in the inferior. */
5914
5915 static int
5916 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5917 {
5918 int ret;
5919
5920 /* Go through a union so this works on either big or little endian
5921 hosts, when the inferior's pointer size is smaller than the size
5922 of CORE_ADDR. It is assumed the inferior's endianness is the
5923 same of the superior's. */
5924 union
5925 {
5926 CORE_ADDR core_addr;
5927 unsigned int ui;
5928 unsigned char uc;
5929 } addr;
5930
5931 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5932 if (ret == 0)
5933 {
5934 if (ptr_size == sizeof (CORE_ADDR))
5935 *ptr = addr.core_addr;
5936 else if (ptr_size == sizeof (unsigned int))
5937 *ptr = addr.ui;
5938 else
5939 gdb_assert_not_reached ("unhandled pointer size");
5940 }
5941 return ret;
5942 }
5943
5944 struct link_map_offsets
5945 {
5946 /* Offset and size of r_debug.r_version. */
5947 int r_version_offset;
5948
5949 /* Offset and size of r_debug.r_map. */
5950 int r_map_offset;
5951
5952 /* Offset to l_addr field in struct link_map. */
5953 int l_addr_offset;
5954
5955 /* Offset to l_name field in struct link_map. */
5956 int l_name_offset;
5957
5958 /* Offset to l_ld field in struct link_map. */
5959 int l_ld_offset;
5960
5961 /* Offset to l_next field in struct link_map. */
5962 int l_next_offset;
5963
5964 /* Offset to l_prev field in struct link_map. */
5965 int l_prev_offset;
5966 };
5967
5968 /* Construct qXfer:libraries-svr4:read reply. */
5969
5970 static int
5971 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5972 unsigned const char *writebuf,
5973 CORE_ADDR offset, int len)
5974 {
5975 char *document;
5976 unsigned document_len;
5977 struct process_info_private *const priv = current_process ()->priv;
5978 char filename[PATH_MAX];
5979 int pid, is_elf64;
5980
5981 static const struct link_map_offsets lmo_32bit_offsets =
5982 {
5983 0, /* r_version offset. */
5984 4, /* r_debug.r_map offset. */
5985 0, /* l_addr offset in link_map. */
5986 4, /* l_name offset in link_map. */
5987 8, /* l_ld offset in link_map. */
5988 12, /* l_next offset in link_map. */
5989 16 /* l_prev offset in link_map. */
5990 };
5991
5992 static const struct link_map_offsets lmo_64bit_offsets =
5993 {
5994 0, /* r_version offset. */
5995 8, /* r_debug.r_map offset. */
5996 0, /* l_addr offset in link_map. */
5997 8, /* l_name offset in link_map. */
5998 16, /* l_ld offset in link_map. */
5999 24, /* l_next offset in link_map. */
6000 32 /* l_prev offset in link_map. */
6001 };
6002 const struct link_map_offsets *lmo;
6003 unsigned int machine;
6004 int ptr_size;
6005 CORE_ADDR lm_addr = 0, lm_prev = 0;
6006 int allocated = 1024;
6007 char *p;
6008 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6009 int header_done = 0;
6010
6011 if (writebuf != NULL)
6012 return -2;
6013 if (readbuf == NULL)
6014 return -1;
6015
6016 pid = lwpid_of (current_thread);
6017 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6018 is_elf64 = elf_64_file_p (filename, &machine);
6019 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6020 ptr_size = is_elf64 ? 8 : 4;
6021
6022 while (annex[0] != '\0')
6023 {
6024 const char *sep;
6025 CORE_ADDR *addrp;
6026 int len;
6027
6028 sep = strchr (annex, '=');
6029 if (sep == NULL)
6030 break;
6031
6032 len = sep - annex;
6033 if (len == 5 && startswith (annex, "start"))
6034 addrp = &lm_addr;
6035 else if (len == 4 && startswith (annex, "prev"))
6036 addrp = &lm_prev;
6037 else
6038 {
6039 annex = strchr (sep, ';');
6040 if (annex == NULL)
6041 break;
6042 annex++;
6043 continue;
6044 }
6045
6046 annex = decode_address_to_semicolon (addrp, sep + 1);
6047 }
6048
6049 if (lm_addr == 0)
6050 {
6051 int r_version = 0;
6052
6053 if (priv->r_debug == 0)
6054 priv->r_debug = get_r_debug (pid, is_elf64);
6055
6056 /* We failed to find DT_DEBUG. Such situation will not change
6057 for this inferior - do not retry it. Report it to GDB as
6058 E01, see for the reasons at the GDB solib-svr4.c side. */
6059 if (priv->r_debug == (CORE_ADDR) -1)
6060 return -1;
6061
6062 if (priv->r_debug != 0)
6063 {
6064 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6065 (unsigned char *) &r_version,
6066 sizeof (r_version)) != 0
6067 || r_version != 1)
6068 {
6069 warning ("unexpected r_debug version %d", r_version);
6070 }
6071 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6072 &lm_addr, ptr_size) != 0)
6073 {
6074 warning ("unable to read r_map from 0x%lx",
6075 (long) priv->r_debug + lmo->r_map_offset);
6076 }
6077 }
6078 }
6079
6080 document = xmalloc (allocated);
6081 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6082 p = document + strlen (document);
6083
6084 while (lm_addr
6085 && read_one_ptr (lm_addr + lmo->l_name_offset,
6086 &l_name, ptr_size) == 0
6087 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6088 &l_addr, ptr_size) == 0
6089 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6090 &l_ld, ptr_size) == 0
6091 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6092 &l_prev, ptr_size) == 0
6093 && read_one_ptr (lm_addr + lmo->l_next_offset,
6094 &l_next, ptr_size) == 0)
6095 {
6096 unsigned char libname[PATH_MAX];
6097
6098 if (lm_prev != l_prev)
6099 {
6100 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6101 (long) lm_prev, (long) l_prev);
6102 break;
6103 }
6104
6105 /* Ignore the first entry even if it has valid name as the first entry
6106 corresponds to the main executable. The first entry should not be
6107 skipped if the dynamic loader was loaded late by a static executable
6108 (see solib-svr4.c parameter ignore_first). But in such case the main
6109 executable does not have PT_DYNAMIC present and this function already
6110 exited above due to failed get_r_debug. */
6111 if (lm_prev == 0)
6112 {
6113 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6114 p = p + strlen (p);
6115 }
6116 else
6117 {
6118 /* Not checking for error because reading may stop before
6119 we've got PATH_MAX worth of characters. */
6120 libname[0] = '\0';
6121 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6122 libname[sizeof (libname) - 1] = '\0';
6123 if (libname[0] != '\0')
6124 {
6125 /* 6x the size for xml_escape_text below. */
6126 size_t len = 6 * strlen ((char *) libname);
6127 char *name;
6128
6129 if (!header_done)
6130 {
6131 /* Terminate `<library-list-svr4'. */
6132 *p++ = '>';
6133 header_done = 1;
6134 }
6135
6136 while (allocated < p - document + len + 200)
6137 {
6138 /* Expand to guarantee sufficient storage. */
6139 uintptr_t document_len = p - document;
6140
6141 document = xrealloc (document, 2 * allocated);
6142 allocated *= 2;
6143 p = document + document_len;
6144 }
6145
6146 name = xml_escape_text ((char *) libname);
6147 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6148 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6149 name, (unsigned long) lm_addr,
6150 (unsigned long) l_addr, (unsigned long) l_ld);
6151 free (name);
6152 }
6153 }
6154
6155 lm_prev = lm_addr;
6156 lm_addr = l_next;
6157 }
6158
6159 if (!header_done)
6160 {
6161 /* Empty list; terminate `<library-list-svr4'. */
6162 strcpy (p, "/>");
6163 }
6164 else
6165 strcpy (p, "</library-list-svr4>");
6166
6167 document_len = strlen (document);
6168 if (offset < document_len)
6169 document_len -= offset;
6170 else
6171 document_len = 0;
6172 if (len > document_len)
6173 len = document_len;
6174
6175 memcpy (readbuf, document + offset, len);
6176 xfree (document);
6177
6178 return len;
6179 }
6180
6181 #ifdef HAVE_LINUX_BTRACE
6182
6183 /* See to_enable_btrace target method. */
6184
6185 static struct btrace_target_info *
6186 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6187 {
6188 struct btrace_target_info *tinfo;
6189
6190 tinfo = linux_enable_btrace (ptid, conf);
6191
6192 if (tinfo != NULL && tinfo->ptr_bits == 0)
6193 {
6194 struct thread_info *thread = find_thread_ptid (ptid);
6195 struct regcache *regcache = get_thread_regcache (thread, 0);
6196
6197 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6198 }
6199
6200 return tinfo;
6201 }
6202
6203 /* See to_disable_btrace target method. */
6204
6205 static int
6206 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6207 {
6208 enum btrace_error err;
6209
6210 err = linux_disable_btrace (tinfo);
6211 return (err == BTRACE_ERR_NONE ? 0 : -1);
6212 }
6213
6214 /* See to_read_btrace target method. */
6215
6216 static int
6217 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6218 int type)
6219 {
6220 struct btrace_data btrace;
6221 struct btrace_block *block;
6222 enum btrace_error err;
6223 int i;
6224
6225 btrace_data_init (&btrace);
6226
6227 err = linux_read_btrace (&btrace, tinfo, type);
6228 if (err != BTRACE_ERR_NONE)
6229 {
6230 if (err == BTRACE_ERR_OVERFLOW)
6231 buffer_grow_str0 (buffer, "E.Overflow.");
6232 else
6233 buffer_grow_str0 (buffer, "E.Generic Error.");
6234
6235 btrace_data_fini (&btrace);
6236 return -1;
6237 }
6238
6239 switch (btrace.format)
6240 {
6241 case BTRACE_FORMAT_NONE:
6242 buffer_grow_str0 (buffer, "E.No Trace.");
6243 break;
6244
6245 case BTRACE_FORMAT_BTS:
6246 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6247 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6248
6249 for (i = 0;
6250 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6251 i++)
6252 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6253 paddress (block->begin), paddress (block->end));
6254
6255 buffer_grow_str0 (buffer, "</btrace>\n");
6256 break;
6257
6258 default:
6259 buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
6260
6261 btrace_data_fini (&btrace);
6262 return -1;
6263 }
6264
6265 btrace_data_fini (&btrace);
6266 return 0;
6267 }
6268
6269 /* See to_btrace_conf target method. */
6270
6271 static int
6272 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6273 struct buffer *buffer)
6274 {
6275 const struct btrace_config *conf;
6276
6277 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6278 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6279
6280 conf = linux_btrace_conf (tinfo);
6281 if (conf != NULL)
6282 {
6283 switch (conf->format)
6284 {
6285 case BTRACE_FORMAT_NONE:
6286 break;
6287
6288 case BTRACE_FORMAT_BTS:
6289 buffer_xml_printf (buffer, "<bts");
6290 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6291 buffer_xml_printf (buffer, " />\n");
6292 break;
6293 }
6294 }
6295
6296 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6297 return 0;
6298 }
6299 #endif /* HAVE_LINUX_BTRACE */
6300
6301 /* See nat/linux-nat.h. */
6302
6303 ptid_t
6304 current_lwp_ptid (void)
6305 {
6306 return ptid_of (current_thread);
6307 }
6308
6309 static struct target_ops linux_target_ops = {
6310 linux_create_inferior,
6311 linux_attach,
6312 linux_kill,
6313 linux_detach,
6314 linux_mourn,
6315 linux_join,
6316 linux_thread_alive,
6317 linux_resume,
6318 linux_wait,
6319 linux_fetch_registers,
6320 linux_store_registers,
6321 linux_prepare_to_access_memory,
6322 linux_done_accessing_memory,
6323 linux_read_memory,
6324 linux_write_memory,
6325 linux_look_up_symbols,
6326 linux_request_interrupt,
6327 linux_read_auxv,
6328 linux_supports_z_point_type,
6329 linux_insert_point,
6330 linux_remove_point,
6331 linux_stopped_by_sw_breakpoint,
6332 linux_supports_stopped_by_sw_breakpoint,
6333 linux_stopped_by_hw_breakpoint,
6334 linux_supports_stopped_by_hw_breakpoint,
6335 linux_stopped_by_watchpoint,
6336 linux_stopped_data_address,
6337 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6338 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6339 && defined(PT_TEXT_END_ADDR)
6340 linux_read_offsets,
6341 #else
6342 NULL,
6343 #endif
6344 #ifdef USE_THREAD_DB
6345 thread_db_get_tls_address,
6346 #else
6347 NULL,
6348 #endif
6349 linux_qxfer_spu,
6350 hostio_last_error_from_errno,
6351 linux_qxfer_osdata,
6352 linux_xfer_siginfo,
6353 linux_supports_non_stop,
6354 linux_async,
6355 linux_start_non_stop,
6356 linux_supports_multi_process,
6357 #ifdef USE_THREAD_DB
6358 thread_db_handle_monitor_command,
6359 #else
6360 NULL,
6361 #endif
6362 linux_common_core_of_thread,
6363 linux_read_loadmap,
6364 linux_process_qsupported,
6365 linux_supports_tracepoints,
6366 linux_read_pc,
6367 linux_write_pc,
6368 linux_thread_stopped,
6369 NULL,
6370 linux_pause_all,
6371 linux_unpause_all,
6372 linux_stabilize_threads,
6373 linux_install_fast_tracepoint_jump_pad,
6374 linux_emit_ops,
6375 linux_supports_disable_randomization,
6376 linux_get_min_fast_tracepoint_insn_len,
6377 linux_qxfer_libraries_svr4,
6378 linux_supports_agent,
6379 #ifdef HAVE_LINUX_BTRACE
6380 linux_supports_btrace,
6381 linux_low_enable_btrace,
6382 linux_low_disable_btrace,
6383 linux_low_read_btrace,
6384 linux_low_btrace_conf,
6385 #else
6386 NULL,
6387 NULL,
6388 NULL,
6389 NULL,
6390 NULL,
6391 #endif
6392 linux_supports_range_stepping,
6393 };
6394
6395 static void
6396 linux_init_signals ()
6397 {
6398 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6399 to find what the cancel signal actually is. */
6400 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6401 signal (__SIGRTMIN+1, SIG_IGN);
6402 #endif
6403 }
6404
6405 #ifdef HAVE_LINUX_REGSETS
6406 void
6407 initialize_regsets_info (struct regsets_info *info)
6408 {
6409 for (info->num_regsets = 0;
6410 info->regsets[info->num_regsets].size >= 0;
6411 info->num_regsets++)
6412 ;
6413 }
6414 #endif
6415
6416 void
6417 initialize_low (void)
6418 {
6419 struct sigaction sigchld_action;
6420 memset (&sigchld_action, 0, sizeof (sigchld_action));
6421 set_target_ops (&linux_target_ops);
6422 set_breakpoint_data (the_low_target.breakpoint,
6423 the_low_target.breakpoint_len);
6424 linux_init_signals ();
6425 linux_ptrace_init_warnings ();
6426
6427 sigchld_action.sa_handler = sigchld_handler;
6428 sigemptyset (&sigchld_action.sa_mask);
6429 sigchld_action.sa_flags = SA_RESTART;
6430 sigaction (SIGCHLD, &sigchld_action, NULL);
6431
6432 initialize_low_arch ();
6433 }