]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
Introduce current_lwp_ptid
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <sys/ptrace.h>
28 #include "nat/linux-ptrace.h"
29 #include "nat/linux-procfs.h"
30 #include "nat/linux-personality.h"
31 #include <signal.h>
32 #include <sys/ioctl.h>
33 #include <fcntl.h>
34 #include <unistd.h>
35 #include <sys/syscall.h>
36 #include <sched.h>
37 #include <ctype.h>
38 #include <pwd.h>
39 #include <sys/types.h>
40 #include <dirent.h>
41 #include <sys/stat.h>
42 #include <sys/vfs.h>
43 #include <sys/uio.h>
44 #include "filestuff.h"
45 #include "tracepoint.h"
46 #include "hostio.h"
47 #ifndef ELFMAG0
48 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
49 then ELFMAG0 will have been defined. If it didn't get included by
50 gdb_proc_service.h then including it will likely introduce a duplicate
51 definition of elf_fpregset_t. */
52 #include <elf.h>
53 #endif
54
55 #ifndef SPUFS_MAGIC
56 #define SPUFS_MAGIC 0x23c9b64e
57 #endif
58
59 #ifdef HAVE_PERSONALITY
60 # include <sys/personality.h>
61 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
62 # define ADDR_NO_RANDOMIZE 0x0040000
63 # endif
64 #endif
65
66 #ifndef O_LARGEFILE
67 #define O_LARGEFILE 0
68 #endif
69
70 #ifndef W_STOPCODE
71 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
72 #endif
73
74 /* This is the kernel's hard limit. Not to be confused with
75 SIGRTMIN. */
76 #ifndef __SIGRTMIN
77 #define __SIGRTMIN 32
78 #endif
79
80 /* Some targets did not define these ptrace constants from the start,
81 so gdbserver defines them locally here. In the future, these may
82 be removed after they are added to asm/ptrace.h. */
83 #if !(defined(PT_TEXT_ADDR) \
84 || defined(PT_DATA_ADDR) \
85 || defined(PT_TEXT_END_ADDR))
86 #if defined(__mcoldfire__)
87 /* These are still undefined in 3.10 kernels. */
88 #define PT_TEXT_ADDR 49*4
89 #define PT_DATA_ADDR 50*4
90 #define PT_TEXT_END_ADDR 51*4
91 /* BFIN already defines these since at least 2.6.32 kernels. */
92 #elif defined(BFIN)
93 #define PT_TEXT_ADDR 220
94 #define PT_TEXT_END_ADDR 224
95 #define PT_DATA_ADDR 228
96 /* These are still undefined in 3.10 kernels. */
97 #elif defined(__TMS320C6X__)
98 #define PT_TEXT_ADDR (0x10000*4)
99 #define PT_DATA_ADDR (0x10004*4)
100 #define PT_TEXT_END_ADDR (0x10008*4)
101 #endif
102 #endif
103
104 #ifdef HAVE_LINUX_BTRACE
105 # include "nat/linux-btrace.h"
106 # include "btrace-common.h"
107 #endif
108
109 #ifndef HAVE_ELF32_AUXV_T
110 /* Copied from glibc's elf.h. */
111 typedef struct
112 {
113 uint32_t a_type; /* Entry type */
114 union
115 {
116 uint32_t a_val; /* Integer value */
117 /* We use to have pointer elements added here. We cannot do that,
118 though, since it does not work when using 32-bit definitions
119 on 64-bit platforms and vice versa. */
120 } a_un;
121 } Elf32_auxv_t;
122 #endif
123
124 #ifndef HAVE_ELF64_AUXV_T
125 /* Copied from glibc's elf.h. */
126 typedef struct
127 {
128 uint64_t a_type; /* Entry type */
129 union
130 {
131 uint64_t a_val; /* Integer value */
132 /* We use to have pointer elements added here. We cannot do that,
133 though, since it does not work when using 32-bit definitions
134 on 64-bit platforms and vice versa. */
135 } a_un;
136 } Elf64_auxv_t;
137 #endif
138
139 /* A list of all unknown processes which receive stop signals. Some
140 other process will presumably claim each of these as forked
141 children momentarily. */
142
143 struct simple_pid_list
144 {
145 /* The process ID. */
146 int pid;
147
148 /* The status as reported by waitpid. */
149 int status;
150
151 /* Next in chain. */
152 struct simple_pid_list *next;
153 };
154 struct simple_pid_list *stopped_pids;
155
156 /* Trivial list manipulation functions to keep track of a list of new
157 stopped processes. */
158
159 static void
160 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
161 {
162 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
163
164 new_pid->pid = pid;
165 new_pid->status = status;
166 new_pid->next = *listp;
167 *listp = new_pid;
168 }
169
170 static int
171 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
172 {
173 struct simple_pid_list **p;
174
175 for (p = listp; *p != NULL; p = &(*p)->next)
176 if ((*p)->pid == pid)
177 {
178 struct simple_pid_list *next = (*p)->next;
179
180 *statusp = (*p)->status;
181 xfree (*p);
182 *p = next;
183 return 1;
184 }
185 return 0;
186 }
187
188 enum stopping_threads_kind
189 {
190 /* Not stopping threads presently. */
191 NOT_STOPPING_THREADS,
192
193 /* Stopping threads. */
194 STOPPING_THREADS,
195
196 /* Stopping and suspending threads. */
197 STOPPING_AND_SUSPENDING_THREADS
198 };
199
200 /* This is set while stop_all_lwps is in effect. */
201 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
202
203 /* FIXME make into a target method? */
204 int using_threads = 1;
205
206 /* True if we're presently stabilizing threads (moving them out of
207 jump pads). */
208 static int stabilizing_threads;
209
210 static void linux_resume_one_lwp (struct lwp_info *lwp,
211 int step, int signal, siginfo_t *info);
212 static void linux_resume (struct thread_resume *resume_info, size_t n);
213 static void stop_all_lwps (int suspend, struct lwp_info *except);
214 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
215 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
216 int *wstat, int options);
217 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
218 static struct lwp_info *add_lwp (ptid_t ptid);
219 static int linux_stopped_by_watchpoint (void);
220 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
221 static void proceed_all_lwps (void);
222 static int finish_step_over (struct lwp_info *lwp);
223 static int kill_lwp (unsigned long lwpid, int signo);
224
225 /* When the event-loop is doing a step-over, this points at the thread
226 being stepped. */
227 ptid_t step_over_bkpt;
228
229 /* True if the low target can hardware single-step. Such targets
230 don't need a BREAKPOINT_REINSERT_ADDR callback. */
231
232 static int
233 can_hardware_single_step (void)
234 {
235 return (the_low_target.breakpoint_reinsert_addr == NULL);
236 }
237
238 /* True if the low target supports memory breakpoints. If so, we'll
239 have a GET_PC implementation. */
240
241 static int
242 supports_breakpoints (void)
243 {
244 return (the_low_target.get_pc != NULL);
245 }
246
247 /* Returns true if this target can support fast tracepoints. This
248 does not mean that the in-process agent has been loaded in the
249 inferior. */
250
251 static int
252 supports_fast_tracepoints (void)
253 {
254 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
255 }
256
257 /* True if LWP is stopped in its stepping range. */
258
259 static int
260 lwp_in_step_range (struct lwp_info *lwp)
261 {
262 CORE_ADDR pc = lwp->stop_pc;
263
264 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
265 }
266
267 struct pending_signals
268 {
269 int signal;
270 siginfo_t info;
271 struct pending_signals *prev;
272 };
273
274 /* The read/write ends of the pipe registered as waitable file in the
275 event loop. */
276 static int linux_event_pipe[2] = { -1, -1 };
277
278 /* True if we're currently in async mode. */
279 #define target_is_async_p() (linux_event_pipe[0] != -1)
280
281 static void send_sigstop (struct lwp_info *lwp);
282 static void wait_for_sigstop (void);
283
284 /* Return non-zero if HEADER is a 64-bit ELF file. */
285
286 static int
287 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
288 {
289 if (header->e_ident[EI_MAG0] == ELFMAG0
290 && header->e_ident[EI_MAG1] == ELFMAG1
291 && header->e_ident[EI_MAG2] == ELFMAG2
292 && header->e_ident[EI_MAG3] == ELFMAG3)
293 {
294 *machine = header->e_machine;
295 return header->e_ident[EI_CLASS] == ELFCLASS64;
296
297 }
298 *machine = EM_NONE;
299 return -1;
300 }
301
302 /* Return non-zero if FILE is a 64-bit ELF file,
303 zero if the file is not a 64-bit ELF file,
304 and -1 if the file is not accessible or doesn't exist. */
305
306 static int
307 elf_64_file_p (const char *file, unsigned int *machine)
308 {
309 Elf64_Ehdr header;
310 int fd;
311
312 fd = open (file, O_RDONLY);
313 if (fd < 0)
314 return -1;
315
316 if (read (fd, &header, sizeof (header)) != sizeof (header))
317 {
318 close (fd);
319 return 0;
320 }
321 close (fd);
322
323 return elf_64_header_p (&header, machine);
324 }
325
326 /* Accepts an integer PID; Returns true if the executable PID is
327 running is a 64-bit ELF file.. */
328
329 int
330 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
331 {
332 char file[PATH_MAX];
333
334 sprintf (file, "/proc/%d/exe", pid);
335 return elf_64_file_p (file, machine);
336 }
337
338 static void
339 delete_lwp (struct lwp_info *lwp)
340 {
341 struct thread_info *thr = get_lwp_thread (lwp);
342
343 if (debug_threads)
344 debug_printf ("deleting %ld\n", lwpid_of (thr));
345
346 remove_thread (thr);
347 free (lwp->arch_private);
348 free (lwp);
349 }
350
351 /* Add a process to the common process list, and set its private
352 data. */
353
354 static struct process_info *
355 linux_add_process (int pid, int attached)
356 {
357 struct process_info *proc;
358
359 proc = add_process (pid, attached);
360 proc->priv = xcalloc (1, sizeof (*proc->priv));
361
362 /* Set the arch when the first LWP stops. */
363 proc->priv->new_inferior = 1;
364
365 if (the_low_target.new_process != NULL)
366 proc->priv->arch_private = the_low_target.new_process ();
367
368 return proc;
369 }
370
371 static CORE_ADDR get_pc (struct lwp_info *lwp);
372
373 /* Handle a GNU/Linux extended wait response. If we see a clone
374 event, we need to add the new LWP to our list (and not report the
375 trap to higher layers). */
376
377 static void
378 handle_extended_wait (struct lwp_info *event_child, int wstat)
379 {
380 int event = linux_ptrace_get_extended_event (wstat);
381 struct thread_info *event_thr = get_lwp_thread (event_child);
382 struct lwp_info *new_lwp;
383
384 if (event == PTRACE_EVENT_CLONE)
385 {
386 ptid_t ptid;
387 unsigned long new_pid;
388 int ret, status;
389
390 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
391 &new_pid);
392
393 /* If we haven't already seen the new PID stop, wait for it now. */
394 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
395 {
396 /* The new child has a pending SIGSTOP. We can't affect it until it
397 hits the SIGSTOP, but we're already attached. */
398
399 ret = my_waitpid (new_pid, &status, __WALL);
400
401 if (ret == -1)
402 perror_with_name ("waiting for new child");
403 else if (ret != new_pid)
404 warning ("wait returned unexpected PID %d", ret);
405 else if (!WIFSTOPPED (status))
406 warning ("wait returned unexpected status 0x%x", status);
407 }
408
409 if (debug_threads)
410 debug_printf ("HEW: Got clone event "
411 "from LWP %ld, new child is LWP %ld\n",
412 lwpid_of (event_thr), new_pid);
413
414 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
415 new_lwp = add_lwp (ptid);
416
417 /* Either we're going to immediately resume the new thread
418 or leave it stopped. linux_resume_one_lwp is a nop if it
419 thinks the thread is currently running, so set this first
420 before calling linux_resume_one_lwp. */
421 new_lwp->stopped = 1;
422
423 /* If we're suspending all threads, leave this one suspended
424 too. */
425 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
426 new_lwp->suspended = 1;
427
428 /* Normally we will get the pending SIGSTOP. But in some cases
429 we might get another signal delivered to the group first.
430 If we do get another signal, be sure not to lose it. */
431 if (WSTOPSIG (status) != SIGSTOP)
432 {
433 new_lwp->stop_expected = 1;
434 new_lwp->status_pending_p = 1;
435 new_lwp->status_pending = status;
436 }
437 }
438 }
439
440 /* Return the PC as read from the regcache of LWP, without any
441 adjustment. */
442
443 static CORE_ADDR
444 get_pc (struct lwp_info *lwp)
445 {
446 struct thread_info *saved_thread;
447 struct regcache *regcache;
448 CORE_ADDR pc;
449
450 if (the_low_target.get_pc == NULL)
451 return 0;
452
453 saved_thread = current_thread;
454 current_thread = get_lwp_thread (lwp);
455
456 regcache = get_thread_regcache (current_thread, 1);
457 pc = (*the_low_target.get_pc) (regcache);
458
459 if (debug_threads)
460 debug_printf ("pc is 0x%lx\n", (long) pc);
461
462 current_thread = saved_thread;
463 return pc;
464 }
465
466 /* This function should only be called if LWP got a SIGTRAP.
467 The SIGTRAP could mean several things.
468
469 On i386, where decr_pc_after_break is non-zero:
470
471 If we were single-stepping this process using PTRACE_SINGLESTEP, we
472 will get only the one SIGTRAP. The value of $eip will be the next
473 instruction. If the instruction we stepped over was a breakpoint,
474 we need to decrement the PC.
475
476 If we continue the process using PTRACE_CONT, we will get a
477 SIGTRAP when we hit a breakpoint. The value of $eip will be
478 the instruction after the breakpoint (i.e. needs to be
479 decremented). If we report the SIGTRAP to GDB, we must also
480 report the undecremented PC. If the breakpoint is removed, we
481 must resume at the decremented PC.
482
483 On a non-decr_pc_after_break machine with hardware or kernel
484 single-step:
485
486 If we either single-step a breakpoint instruction, or continue and
487 hit a breakpoint instruction, our PC will point at the breakpoint
488 instruction. */
489
490 static int
491 check_stopped_by_breakpoint (struct lwp_info *lwp)
492 {
493 CORE_ADDR pc;
494 CORE_ADDR sw_breakpoint_pc;
495 struct thread_info *saved_thread;
496 #if USE_SIGTRAP_SIGINFO
497 siginfo_t siginfo;
498 #endif
499
500 if (the_low_target.get_pc == NULL)
501 return 0;
502
503 pc = get_pc (lwp);
504 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
505
506 /* breakpoint_at reads from the current thread. */
507 saved_thread = current_thread;
508 current_thread = get_lwp_thread (lwp);
509
510 #if USE_SIGTRAP_SIGINFO
511 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
512 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
513 {
514 if (siginfo.si_signo == SIGTRAP)
515 {
516 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
517 {
518 if (debug_threads)
519 {
520 struct thread_info *thr = get_lwp_thread (lwp);
521
522 debug_printf ("CSBB: Push back software breakpoint for %s\n",
523 target_pid_to_str (ptid_of (thr)));
524 }
525
526 /* Back up the PC if necessary. */
527 if (pc != sw_breakpoint_pc)
528 {
529 struct regcache *regcache
530 = get_thread_regcache (current_thread, 1);
531 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
532 }
533
534 lwp->stop_pc = sw_breakpoint_pc;
535 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
536 current_thread = saved_thread;
537 return 1;
538 }
539 else if (siginfo.si_code == TRAP_HWBKPT)
540 {
541 if (debug_threads)
542 {
543 struct thread_info *thr = get_lwp_thread (lwp);
544
545 debug_printf ("CSBB: Push back hardware "
546 "breakpoint/watchpoint for %s\n",
547 target_pid_to_str (ptid_of (thr)));
548 }
549
550 lwp->stop_pc = pc;
551 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
552 current_thread = saved_thread;
553 return 1;
554 }
555 }
556 }
557 #else
558 /* We may have just stepped a breakpoint instruction. E.g., in
559 non-stop mode, GDB first tells the thread A to step a range, and
560 then the user inserts a breakpoint inside the range. In that
561 case we need to report the breakpoint PC. */
562 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
563 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
564 {
565 if (debug_threads)
566 {
567 struct thread_info *thr = get_lwp_thread (lwp);
568
569 debug_printf ("CSBB: %s stopped by software breakpoint\n",
570 target_pid_to_str (ptid_of (thr)));
571 }
572
573 /* Back up the PC if necessary. */
574 if (pc != sw_breakpoint_pc)
575 {
576 struct regcache *regcache
577 = get_thread_regcache (current_thread, 1);
578 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
579 }
580
581 lwp->stop_pc = sw_breakpoint_pc;
582 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
583 current_thread = saved_thread;
584 return 1;
585 }
586
587 if (hardware_breakpoint_inserted_here (pc))
588 {
589 if (debug_threads)
590 {
591 struct thread_info *thr = get_lwp_thread (lwp);
592
593 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
594 target_pid_to_str (ptid_of (thr)));
595 }
596
597 lwp->stop_pc = pc;
598 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
599 current_thread = saved_thread;
600 return 1;
601 }
602 #endif
603
604 current_thread = saved_thread;
605 return 0;
606 }
607
608 static struct lwp_info *
609 add_lwp (ptid_t ptid)
610 {
611 struct lwp_info *lwp;
612
613 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
614 memset (lwp, 0, sizeof (*lwp));
615
616 if (the_low_target.new_thread != NULL)
617 lwp->arch_private = the_low_target.new_thread ();
618
619 lwp->thread = add_thread (ptid, lwp);
620
621 return lwp;
622 }
623
624 /* Start an inferior process and returns its pid.
625 ALLARGS is a vector of program-name and args. */
626
627 static int
628 linux_create_inferior (char *program, char **allargs)
629 {
630 struct lwp_info *new_lwp;
631 int pid;
632 ptid_t ptid;
633 struct cleanup *restore_personality
634 = maybe_disable_address_space_randomization (disable_randomization);
635
636 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
637 pid = vfork ();
638 #else
639 pid = fork ();
640 #endif
641 if (pid < 0)
642 perror_with_name ("fork");
643
644 if (pid == 0)
645 {
646 close_most_fds ();
647 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
648
649 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
650 signal (__SIGRTMIN + 1, SIG_DFL);
651 #endif
652
653 setpgid (0, 0);
654
655 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
656 stdout to stderr so that inferior i/o doesn't corrupt the connection.
657 Also, redirect stdin to /dev/null. */
658 if (remote_connection_is_stdio ())
659 {
660 close (0);
661 open ("/dev/null", O_RDONLY);
662 dup2 (2, 1);
663 if (write (2, "stdin/stdout redirected\n",
664 sizeof ("stdin/stdout redirected\n") - 1) < 0)
665 {
666 /* Errors ignored. */;
667 }
668 }
669
670 execv (program, allargs);
671 if (errno == ENOENT)
672 execvp (program, allargs);
673
674 fprintf (stderr, "Cannot exec %s: %s.\n", program,
675 strerror (errno));
676 fflush (stderr);
677 _exit (0177);
678 }
679
680 do_cleanups (restore_personality);
681
682 linux_add_process (pid, 0);
683
684 ptid = ptid_build (pid, pid, 0);
685 new_lwp = add_lwp (ptid);
686 new_lwp->must_set_ptrace_flags = 1;
687
688 return pid;
689 }
690
691 /* Attach to an inferior process. Returns 0 on success, ERRNO on
692 error. */
693
694 int
695 linux_attach_lwp (ptid_t ptid)
696 {
697 struct lwp_info *new_lwp;
698 int lwpid = ptid_get_lwp (ptid);
699
700 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
701 != 0)
702 return errno;
703
704 new_lwp = add_lwp (ptid);
705
706 /* We need to wait for SIGSTOP before being able to make the next
707 ptrace call on this LWP. */
708 new_lwp->must_set_ptrace_flags = 1;
709
710 if (linux_proc_pid_is_stopped (lwpid))
711 {
712 if (debug_threads)
713 debug_printf ("Attached to a stopped process\n");
714
715 /* The process is definitely stopped. It is in a job control
716 stop, unless the kernel predates the TASK_STOPPED /
717 TASK_TRACED distinction, in which case it might be in a
718 ptrace stop. Make sure it is in a ptrace stop; from there we
719 can kill it, signal it, et cetera.
720
721 First make sure there is a pending SIGSTOP. Since we are
722 already attached, the process can not transition from stopped
723 to running without a PTRACE_CONT; so we know this signal will
724 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
725 probably already in the queue (unless this kernel is old
726 enough to use TASK_STOPPED for ptrace stops); but since
727 SIGSTOP is not an RT signal, it can only be queued once. */
728 kill_lwp (lwpid, SIGSTOP);
729
730 /* Finally, resume the stopped process. This will deliver the
731 SIGSTOP (or a higher priority signal, just like normal
732 PTRACE_ATTACH), which we'll catch later on. */
733 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
734 }
735
736 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
737 brings it to a halt.
738
739 There are several cases to consider here:
740
741 1) gdbserver has already attached to the process and is being notified
742 of a new thread that is being created.
743 In this case we should ignore that SIGSTOP and resume the
744 process. This is handled below by setting stop_expected = 1,
745 and the fact that add_thread sets last_resume_kind ==
746 resume_continue.
747
748 2) This is the first thread (the process thread), and we're attaching
749 to it via attach_inferior.
750 In this case we want the process thread to stop.
751 This is handled by having linux_attach set last_resume_kind ==
752 resume_stop after we return.
753
754 If the pid we are attaching to is also the tgid, we attach to and
755 stop all the existing threads. Otherwise, we attach to pid and
756 ignore any other threads in the same group as this pid.
757
758 3) GDB is connecting to gdbserver and is requesting an enumeration of all
759 existing threads.
760 In this case we want the thread to stop.
761 FIXME: This case is currently not properly handled.
762 We should wait for the SIGSTOP but don't. Things work apparently
763 because enough time passes between when we ptrace (ATTACH) and when
764 gdb makes the next ptrace call on the thread.
765
766 On the other hand, if we are currently trying to stop all threads, we
767 should treat the new thread as if we had sent it a SIGSTOP. This works
768 because we are guaranteed that the add_lwp call above added us to the
769 end of the list, and so the new thread has not yet reached
770 wait_for_sigstop (but will). */
771 new_lwp->stop_expected = 1;
772
773 return 0;
774 }
775
776 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
777 already attached. Returns true if a new LWP is found, false
778 otherwise. */
779
780 static int
781 attach_proc_task_lwp_callback (ptid_t ptid)
782 {
783 /* Is this a new thread? */
784 if (find_thread_ptid (ptid) == NULL)
785 {
786 int lwpid = ptid_get_lwp (ptid);
787 int err;
788
789 if (debug_threads)
790 debug_printf ("Found new lwp %d\n", lwpid);
791
792 err = linux_attach_lwp (ptid);
793
794 /* Be quiet if we simply raced with the thread exiting. EPERM
795 is returned if the thread's task still exists, and is marked
796 as exited or zombie, as well as other conditions, so in that
797 case, confirm the status in /proc/PID/status. */
798 if (err == ESRCH
799 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
800 {
801 if (debug_threads)
802 {
803 debug_printf ("Cannot attach to lwp %d: "
804 "thread is gone (%d: %s)\n",
805 lwpid, err, strerror (err));
806 }
807 }
808 else if (err != 0)
809 {
810 warning (_("Cannot attach to lwp %d: %s"),
811 lwpid,
812 linux_ptrace_attach_fail_reason_string (ptid, err));
813 }
814
815 return 1;
816 }
817 return 0;
818 }
819
820 /* Attach to PID. If PID is the tgid, attach to it and all
821 of its threads. */
822
823 static int
824 linux_attach (unsigned long pid)
825 {
826 ptid_t ptid = ptid_build (pid, pid, 0);
827 int err;
828
829 /* Attach to PID. We will check for other threads
830 soon. */
831 err = linux_attach_lwp (ptid);
832 if (err != 0)
833 error ("Cannot attach to process %ld: %s",
834 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
835
836 linux_add_process (pid, 1);
837
838 if (!non_stop)
839 {
840 struct thread_info *thread;
841
842 /* Don't ignore the initial SIGSTOP if we just attached to this
843 process. It will be collected by wait shortly. */
844 thread = find_thread_ptid (ptid_build (pid, pid, 0));
845 thread->last_resume_kind = resume_stop;
846 }
847
848 /* We must attach to every LWP. If /proc is mounted, use that to
849 find them now. On the one hand, the inferior may be using raw
850 clone instead of using pthreads. On the other hand, even if it
851 is using pthreads, GDB may not be connected yet (thread_db needs
852 to do symbol lookups, through qSymbol). Also, thread_db walks
853 structures in the inferior's address space to find the list of
854 threads/LWPs, and those structures may well be corrupted. Note
855 that once thread_db is loaded, we'll still use it to list threads
856 and associate pthread info with each LWP. */
857 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
858 return 0;
859 }
860
861 struct counter
862 {
863 int pid;
864 int count;
865 };
866
867 static int
868 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
869 {
870 struct counter *counter = args;
871
872 if (ptid_get_pid (entry->id) == counter->pid)
873 {
874 if (++counter->count > 1)
875 return 1;
876 }
877
878 return 0;
879 }
880
881 static int
882 last_thread_of_process_p (int pid)
883 {
884 struct counter counter = { pid , 0 };
885
886 return (find_inferior (&all_threads,
887 second_thread_of_pid_p, &counter) == NULL);
888 }
889
890 /* Kill LWP. */
891
892 static void
893 linux_kill_one_lwp (struct lwp_info *lwp)
894 {
895 struct thread_info *thr = get_lwp_thread (lwp);
896 int pid = lwpid_of (thr);
897
898 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
899 there is no signal context, and ptrace(PTRACE_KILL) (or
900 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
901 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
902 alternative is to kill with SIGKILL. We only need one SIGKILL
903 per process, not one for each thread. But since we still support
904 linuxthreads, and we also support debugging programs using raw
905 clone without CLONE_THREAD, we send one for each thread. For
906 years, we used PTRACE_KILL only, so we're being a bit paranoid
907 about some old kernels where PTRACE_KILL might work better
908 (dubious if there are any such, but that's why it's paranoia), so
909 we try SIGKILL first, PTRACE_KILL second, and so we're fine
910 everywhere. */
911
912 errno = 0;
913 kill_lwp (pid, SIGKILL);
914 if (debug_threads)
915 {
916 int save_errno = errno;
917
918 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
919 target_pid_to_str (ptid_of (thr)),
920 save_errno ? strerror (save_errno) : "OK");
921 }
922
923 errno = 0;
924 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
925 if (debug_threads)
926 {
927 int save_errno = errno;
928
929 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
930 target_pid_to_str (ptid_of (thr)),
931 save_errno ? strerror (save_errno) : "OK");
932 }
933 }
934
935 /* Kill LWP and wait for it to die. */
936
937 static void
938 kill_wait_lwp (struct lwp_info *lwp)
939 {
940 struct thread_info *thr = get_lwp_thread (lwp);
941 int pid = ptid_get_pid (ptid_of (thr));
942 int lwpid = ptid_get_lwp (ptid_of (thr));
943 int wstat;
944 int res;
945
946 if (debug_threads)
947 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
948
949 do
950 {
951 linux_kill_one_lwp (lwp);
952
953 /* Make sure it died. Notes:
954
955 - The loop is most likely unnecessary.
956
957 - We don't use linux_wait_for_event as that could delete lwps
958 while we're iterating over them. We're not interested in
959 any pending status at this point, only in making sure all
960 wait status on the kernel side are collected until the
961 process is reaped.
962
963 - We don't use __WALL here as the __WALL emulation relies on
964 SIGCHLD, and killing a stopped process doesn't generate
965 one, nor an exit status.
966 */
967 res = my_waitpid (lwpid, &wstat, 0);
968 if (res == -1 && errno == ECHILD)
969 res = my_waitpid (lwpid, &wstat, __WCLONE);
970 } while (res > 0 && WIFSTOPPED (wstat));
971
972 gdb_assert (res > 0);
973 }
974
975 /* Callback for `find_inferior'. Kills an lwp of a given process,
976 except the leader. */
977
978 static int
979 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
980 {
981 struct thread_info *thread = (struct thread_info *) entry;
982 struct lwp_info *lwp = get_thread_lwp (thread);
983 int pid = * (int *) args;
984
985 if (ptid_get_pid (entry->id) != pid)
986 return 0;
987
988 /* We avoid killing the first thread here, because of a Linux kernel (at
989 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
990 the children get a chance to be reaped, it will remain a zombie
991 forever. */
992
993 if (lwpid_of (thread) == pid)
994 {
995 if (debug_threads)
996 debug_printf ("lkop: is last of process %s\n",
997 target_pid_to_str (entry->id));
998 return 0;
999 }
1000
1001 kill_wait_lwp (lwp);
1002 return 0;
1003 }
1004
1005 static int
1006 linux_kill (int pid)
1007 {
1008 struct process_info *process;
1009 struct lwp_info *lwp;
1010
1011 process = find_process_pid (pid);
1012 if (process == NULL)
1013 return -1;
1014
1015 /* If we're killing a running inferior, make sure it is stopped
1016 first, as PTRACE_KILL will not work otherwise. */
1017 stop_all_lwps (0, NULL);
1018
1019 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1020
1021 /* See the comment in linux_kill_one_lwp. We did not kill the first
1022 thread in the list, so do so now. */
1023 lwp = find_lwp_pid (pid_to_ptid (pid));
1024
1025 if (lwp == NULL)
1026 {
1027 if (debug_threads)
1028 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1029 pid);
1030 }
1031 else
1032 kill_wait_lwp (lwp);
1033
1034 the_target->mourn (process);
1035
1036 /* Since we presently can only stop all lwps of all processes, we
1037 need to unstop lwps of other processes. */
1038 unstop_all_lwps (0, NULL);
1039 return 0;
1040 }
1041
1042 /* Get pending signal of THREAD, for detaching purposes. This is the
1043 signal the thread last stopped for, which we need to deliver to the
1044 thread when detaching, otherwise, it'd be suppressed/lost. */
1045
1046 static int
1047 get_detach_signal (struct thread_info *thread)
1048 {
1049 enum gdb_signal signo = GDB_SIGNAL_0;
1050 int status;
1051 struct lwp_info *lp = get_thread_lwp (thread);
1052
1053 if (lp->status_pending_p)
1054 status = lp->status_pending;
1055 else
1056 {
1057 /* If the thread had been suspended by gdbserver, and it stopped
1058 cleanly, then it'll have stopped with SIGSTOP. But we don't
1059 want to deliver that SIGSTOP. */
1060 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1061 || thread->last_status.value.sig == GDB_SIGNAL_0)
1062 return 0;
1063
1064 /* Otherwise, we may need to deliver the signal we
1065 intercepted. */
1066 status = lp->last_status;
1067 }
1068
1069 if (!WIFSTOPPED (status))
1070 {
1071 if (debug_threads)
1072 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1073 target_pid_to_str (ptid_of (thread)));
1074 return 0;
1075 }
1076
1077 /* Extended wait statuses aren't real SIGTRAPs. */
1078 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1079 {
1080 if (debug_threads)
1081 debug_printf ("GPS: lwp %s had stopped with extended "
1082 "status: no pending signal\n",
1083 target_pid_to_str (ptid_of (thread)));
1084 return 0;
1085 }
1086
1087 signo = gdb_signal_from_host (WSTOPSIG (status));
1088
1089 if (program_signals_p && !program_signals[signo])
1090 {
1091 if (debug_threads)
1092 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1093 target_pid_to_str (ptid_of (thread)),
1094 gdb_signal_to_string (signo));
1095 return 0;
1096 }
1097 else if (!program_signals_p
1098 /* If we have no way to know which signals GDB does not
1099 want to have passed to the program, assume
1100 SIGTRAP/SIGINT, which is GDB's default. */
1101 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1102 {
1103 if (debug_threads)
1104 debug_printf ("GPS: lwp %s had signal %s, "
1105 "but we don't know if we should pass it. "
1106 "Default to not.\n",
1107 target_pid_to_str (ptid_of (thread)),
1108 gdb_signal_to_string (signo));
1109 return 0;
1110 }
1111 else
1112 {
1113 if (debug_threads)
1114 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1115 target_pid_to_str (ptid_of (thread)),
1116 gdb_signal_to_string (signo));
1117
1118 return WSTOPSIG (status);
1119 }
1120 }
1121
1122 static int
1123 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1124 {
1125 struct thread_info *thread = (struct thread_info *) entry;
1126 struct lwp_info *lwp = get_thread_lwp (thread);
1127 int pid = * (int *) args;
1128 int sig;
1129
1130 if (ptid_get_pid (entry->id) != pid)
1131 return 0;
1132
1133 /* If there is a pending SIGSTOP, get rid of it. */
1134 if (lwp->stop_expected)
1135 {
1136 if (debug_threads)
1137 debug_printf ("Sending SIGCONT to %s\n",
1138 target_pid_to_str (ptid_of (thread)));
1139
1140 kill_lwp (lwpid_of (thread), SIGCONT);
1141 lwp->stop_expected = 0;
1142 }
1143
1144 /* Flush any pending changes to the process's registers. */
1145 regcache_invalidate_thread (thread);
1146
1147 /* Pass on any pending signal for this thread. */
1148 sig = get_detach_signal (thread);
1149
1150 /* Finally, let it resume. */
1151 if (the_low_target.prepare_to_resume != NULL)
1152 the_low_target.prepare_to_resume (lwp);
1153 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1154 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1155 error (_("Can't detach %s: %s"),
1156 target_pid_to_str (ptid_of (thread)),
1157 strerror (errno));
1158
1159 delete_lwp (lwp);
1160 return 0;
1161 }
1162
1163 static int
1164 linux_detach (int pid)
1165 {
1166 struct process_info *process;
1167
1168 process = find_process_pid (pid);
1169 if (process == NULL)
1170 return -1;
1171
1172 /* Stop all threads before detaching. First, ptrace requires that
1173 the thread is stopped to sucessfully detach. Second, thread_db
1174 may need to uninstall thread event breakpoints from memory, which
1175 only works with a stopped process anyway. */
1176 stop_all_lwps (0, NULL);
1177
1178 #ifdef USE_THREAD_DB
1179 thread_db_detach (process);
1180 #endif
1181
1182 /* Stabilize threads (move out of jump pads). */
1183 stabilize_threads ();
1184
1185 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1186
1187 the_target->mourn (process);
1188
1189 /* Since we presently can only stop all lwps of all processes, we
1190 need to unstop lwps of other processes. */
1191 unstop_all_lwps (0, NULL);
1192 return 0;
1193 }
1194
1195 /* Remove all LWPs that belong to process PROC from the lwp list. */
1196
1197 static int
1198 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1199 {
1200 struct thread_info *thread = (struct thread_info *) entry;
1201 struct lwp_info *lwp = get_thread_lwp (thread);
1202 struct process_info *process = proc;
1203
1204 if (pid_of (thread) == pid_of (process))
1205 delete_lwp (lwp);
1206
1207 return 0;
1208 }
1209
1210 static void
1211 linux_mourn (struct process_info *process)
1212 {
1213 struct process_info_private *priv;
1214
1215 #ifdef USE_THREAD_DB
1216 thread_db_mourn (process);
1217 #endif
1218
1219 find_inferior (&all_threads, delete_lwp_callback, process);
1220
1221 /* Freeing all private data. */
1222 priv = process->priv;
1223 free (priv->arch_private);
1224 free (priv);
1225 process->priv = NULL;
1226
1227 remove_process (process);
1228 }
1229
1230 static void
1231 linux_join (int pid)
1232 {
1233 int status, ret;
1234
1235 do {
1236 ret = my_waitpid (pid, &status, 0);
1237 if (WIFEXITED (status) || WIFSIGNALED (status))
1238 break;
1239 } while (ret != -1 || errno != ECHILD);
1240 }
1241
1242 /* Return nonzero if the given thread is still alive. */
1243 static int
1244 linux_thread_alive (ptid_t ptid)
1245 {
1246 struct lwp_info *lwp = find_lwp_pid (ptid);
1247
1248 /* We assume we always know if a thread exits. If a whole process
1249 exited but we still haven't been able to report it to GDB, we'll
1250 hold on to the last lwp of the dead process. */
1251 if (lwp != NULL)
1252 return !lwp->dead;
1253 else
1254 return 0;
1255 }
1256
1257 /* Return 1 if this lwp still has an interesting status pending. If
1258 not (e.g., it had stopped for a breakpoint that is gone), return
1259 false. */
1260
1261 static int
1262 thread_still_has_status_pending_p (struct thread_info *thread)
1263 {
1264 struct lwp_info *lp = get_thread_lwp (thread);
1265
1266 if (!lp->status_pending_p)
1267 return 0;
1268
1269 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1270 report any status pending the LWP may have. */
1271 if (thread->last_resume_kind == resume_stop
1272 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1273 return 0;
1274
1275 if (thread->last_resume_kind != resume_stop
1276 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1277 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1278 {
1279 struct thread_info *saved_thread;
1280 CORE_ADDR pc;
1281 int discard = 0;
1282
1283 gdb_assert (lp->last_status != 0);
1284
1285 pc = get_pc (lp);
1286
1287 saved_thread = current_thread;
1288 current_thread = thread;
1289
1290 if (pc != lp->stop_pc)
1291 {
1292 if (debug_threads)
1293 debug_printf ("PC of %ld changed\n",
1294 lwpid_of (thread));
1295 discard = 1;
1296 }
1297
1298 #if !USE_SIGTRAP_SIGINFO
1299 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1300 && !(*the_low_target.breakpoint_at) (pc))
1301 {
1302 if (debug_threads)
1303 debug_printf ("previous SW breakpoint of %ld gone\n",
1304 lwpid_of (thread));
1305 discard = 1;
1306 }
1307 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1308 && !hardware_breakpoint_inserted_here (pc))
1309 {
1310 if (debug_threads)
1311 debug_printf ("previous HW breakpoint of %ld gone\n",
1312 lwpid_of (thread));
1313 discard = 1;
1314 }
1315 #endif
1316
1317 current_thread = saved_thread;
1318
1319 if (discard)
1320 {
1321 if (debug_threads)
1322 debug_printf ("discarding pending breakpoint status\n");
1323 lp->status_pending_p = 0;
1324 return 0;
1325 }
1326 }
1327
1328 return 1;
1329 }
1330
1331 /* Return 1 if this lwp has an interesting status pending. */
1332 static int
1333 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1334 {
1335 struct thread_info *thread = (struct thread_info *) entry;
1336 struct lwp_info *lp = get_thread_lwp (thread);
1337 ptid_t ptid = * (ptid_t *) arg;
1338
1339 /* Check if we're only interested in events from a specific process
1340 or a specific LWP. */
1341 if (!ptid_match (ptid_of (thread), ptid))
1342 return 0;
1343
1344 if (lp->status_pending_p
1345 && !thread_still_has_status_pending_p (thread))
1346 {
1347 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1348 return 0;
1349 }
1350
1351 return lp->status_pending_p;
1352 }
1353
1354 static int
1355 same_lwp (struct inferior_list_entry *entry, void *data)
1356 {
1357 ptid_t ptid = *(ptid_t *) data;
1358 int lwp;
1359
1360 if (ptid_get_lwp (ptid) != 0)
1361 lwp = ptid_get_lwp (ptid);
1362 else
1363 lwp = ptid_get_pid (ptid);
1364
1365 if (ptid_get_lwp (entry->id) == lwp)
1366 return 1;
1367
1368 return 0;
1369 }
1370
1371 struct lwp_info *
1372 find_lwp_pid (ptid_t ptid)
1373 {
1374 struct inferior_list_entry *thread
1375 = find_inferior (&all_threads, same_lwp, &ptid);
1376
1377 if (thread == NULL)
1378 return NULL;
1379
1380 return get_thread_lwp ((struct thread_info *) thread);
1381 }
1382
1383 /* Return the number of known LWPs in the tgid given by PID. */
1384
1385 static int
1386 num_lwps (int pid)
1387 {
1388 struct inferior_list_entry *inf, *tmp;
1389 int count = 0;
1390
1391 ALL_INFERIORS (&all_threads, inf, tmp)
1392 {
1393 if (ptid_get_pid (inf->id) == pid)
1394 count++;
1395 }
1396
1397 return count;
1398 }
1399
1400 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1401 their exits until all other threads in the group have exited. */
1402
1403 static void
1404 check_zombie_leaders (void)
1405 {
1406 struct process_info *proc, *tmp;
1407
1408 ALL_PROCESSES (proc, tmp)
1409 {
1410 pid_t leader_pid = pid_of (proc);
1411 struct lwp_info *leader_lp;
1412
1413 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1414
1415 if (debug_threads)
1416 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1417 "num_lwps=%d, zombie=%d\n",
1418 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1419 linux_proc_pid_is_zombie (leader_pid));
1420
1421 if (leader_lp != NULL
1422 /* Check if there are other threads in the group, as we may
1423 have raced with the inferior simply exiting. */
1424 && !last_thread_of_process_p (leader_pid)
1425 && linux_proc_pid_is_zombie (leader_pid))
1426 {
1427 /* A leader zombie can mean one of two things:
1428
1429 - It exited, and there's an exit status pending
1430 available, or only the leader exited (not the whole
1431 program). In the latter case, we can't waitpid the
1432 leader's exit status until all other threads are gone.
1433
1434 - There are 3 or more threads in the group, and a thread
1435 other than the leader exec'd. On an exec, the Linux
1436 kernel destroys all other threads (except the execing
1437 one) in the thread group, and resets the execing thread's
1438 tid to the tgid. No exit notification is sent for the
1439 execing thread -- from the ptracer's perspective, it
1440 appears as though the execing thread just vanishes.
1441 Until we reap all other threads except the leader and the
1442 execing thread, the leader will be zombie, and the
1443 execing thread will be in `D (disc sleep)'. As soon as
1444 all other threads are reaped, the execing thread changes
1445 it's tid to the tgid, and the previous (zombie) leader
1446 vanishes, giving place to the "new" leader. We could try
1447 distinguishing the exit and exec cases, by waiting once
1448 more, and seeing if something comes out, but it doesn't
1449 sound useful. The previous leader _does_ go away, and
1450 we'll re-add the new one once we see the exec event
1451 (which is just the same as what would happen if the
1452 previous leader did exit voluntarily before some other
1453 thread execs). */
1454
1455 if (debug_threads)
1456 fprintf (stderr,
1457 "CZL: Thread group leader %d zombie "
1458 "(it exited, or another thread execd).\n",
1459 leader_pid);
1460
1461 delete_lwp (leader_lp);
1462 }
1463 }
1464 }
1465
1466 /* Callback for `find_inferior'. Returns the first LWP that is not
1467 stopped. ARG is a PTID filter. */
1468
1469 static int
1470 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1471 {
1472 struct thread_info *thr = (struct thread_info *) entry;
1473 struct lwp_info *lwp;
1474 ptid_t filter = *(ptid_t *) arg;
1475
1476 if (!ptid_match (ptid_of (thr), filter))
1477 return 0;
1478
1479 lwp = get_thread_lwp (thr);
1480 if (!lwp->stopped)
1481 return 1;
1482
1483 return 0;
1484 }
1485
1486 /* This function should only be called if the LWP got a SIGTRAP.
1487
1488 Handle any tracepoint steps or hits. Return true if a tracepoint
1489 event was handled, 0 otherwise. */
1490
1491 static int
1492 handle_tracepoints (struct lwp_info *lwp)
1493 {
1494 struct thread_info *tinfo = get_lwp_thread (lwp);
1495 int tpoint_related_event = 0;
1496
1497 gdb_assert (lwp->suspended == 0);
1498
1499 /* If this tracepoint hit causes a tracing stop, we'll immediately
1500 uninsert tracepoints. To do this, we temporarily pause all
1501 threads, unpatch away, and then unpause threads. We need to make
1502 sure the unpausing doesn't resume LWP too. */
1503 lwp->suspended++;
1504
1505 /* And we need to be sure that any all-threads-stopping doesn't try
1506 to move threads out of the jump pads, as it could deadlock the
1507 inferior (LWP could be in the jump pad, maybe even holding the
1508 lock.) */
1509
1510 /* Do any necessary step collect actions. */
1511 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1512
1513 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1514
1515 /* See if we just hit a tracepoint and do its main collect
1516 actions. */
1517 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1518
1519 lwp->suspended--;
1520
1521 gdb_assert (lwp->suspended == 0);
1522 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1523
1524 if (tpoint_related_event)
1525 {
1526 if (debug_threads)
1527 debug_printf ("got a tracepoint event\n");
1528 return 1;
1529 }
1530
1531 return 0;
1532 }
1533
1534 /* Convenience wrapper. Returns true if LWP is presently collecting a
1535 fast tracepoint. */
1536
1537 static int
1538 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1539 struct fast_tpoint_collect_status *status)
1540 {
1541 CORE_ADDR thread_area;
1542 struct thread_info *thread = get_lwp_thread (lwp);
1543
1544 if (the_low_target.get_thread_area == NULL)
1545 return 0;
1546
1547 /* Get the thread area address. This is used to recognize which
1548 thread is which when tracing with the in-process agent library.
1549 We don't read anything from the address, and treat it as opaque;
1550 it's the address itself that we assume is unique per-thread. */
1551 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1552 return 0;
1553
1554 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1555 }
1556
1557 /* The reason we resume in the caller, is because we want to be able
1558 to pass lwp->status_pending as WSTAT, and we need to clear
1559 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1560 refuses to resume. */
1561
1562 static int
1563 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1564 {
1565 struct thread_info *saved_thread;
1566
1567 saved_thread = current_thread;
1568 current_thread = get_lwp_thread (lwp);
1569
1570 if ((wstat == NULL
1571 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1572 && supports_fast_tracepoints ()
1573 && agent_loaded_p ())
1574 {
1575 struct fast_tpoint_collect_status status;
1576 int r;
1577
1578 if (debug_threads)
1579 debug_printf ("Checking whether LWP %ld needs to move out of the "
1580 "jump pad.\n",
1581 lwpid_of (current_thread));
1582
1583 r = linux_fast_tracepoint_collecting (lwp, &status);
1584
1585 if (wstat == NULL
1586 || (WSTOPSIG (*wstat) != SIGILL
1587 && WSTOPSIG (*wstat) != SIGFPE
1588 && WSTOPSIG (*wstat) != SIGSEGV
1589 && WSTOPSIG (*wstat) != SIGBUS))
1590 {
1591 lwp->collecting_fast_tracepoint = r;
1592
1593 if (r != 0)
1594 {
1595 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1596 {
1597 /* Haven't executed the original instruction yet.
1598 Set breakpoint there, and wait till it's hit,
1599 then single-step until exiting the jump pad. */
1600 lwp->exit_jump_pad_bkpt
1601 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1602 }
1603
1604 if (debug_threads)
1605 debug_printf ("Checking whether LWP %ld needs to move out of "
1606 "the jump pad...it does\n",
1607 lwpid_of (current_thread));
1608 current_thread = saved_thread;
1609
1610 return 1;
1611 }
1612 }
1613 else
1614 {
1615 /* If we get a synchronous signal while collecting, *and*
1616 while executing the (relocated) original instruction,
1617 reset the PC to point at the tpoint address, before
1618 reporting to GDB. Otherwise, it's an IPA lib bug: just
1619 report the signal to GDB, and pray for the best. */
1620
1621 lwp->collecting_fast_tracepoint = 0;
1622
1623 if (r != 0
1624 && (status.adjusted_insn_addr <= lwp->stop_pc
1625 && lwp->stop_pc < status.adjusted_insn_addr_end))
1626 {
1627 siginfo_t info;
1628 struct regcache *regcache;
1629
1630 /* The si_addr on a few signals references the address
1631 of the faulting instruction. Adjust that as
1632 well. */
1633 if ((WSTOPSIG (*wstat) == SIGILL
1634 || WSTOPSIG (*wstat) == SIGFPE
1635 || WSTOPSIG (*wstat) == SIGBUS
1636 || WSTOPSIG (*wstat) == SIGSEGV)
1637 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1638 (PTRACE_TYPE_ARG3) 0, &info) == 0
1639 /* Final check just to make sure we don't clobber
1640 the siginfo of non-kernel-sent signals. */
1641 && (uintptr_t) info.si_addr == lwp->stop_pc)
1642 {
1643 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1644 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1645 (PTRACE_TYPE_ARG3) 0, &info);
1646 }
1647
1648 regcache = get_thread_regcache (current_thread, 1);
1649 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1650 lwp->stop_pc = status.tpoint_addr;
1651
1652 /* Cancel any fast tracepoint lock this thread was
1653 holding. */
1654 force_unlock_trace_buffer ();
1655 }
1656
1657 if (lwp->exit_jump_pad_bkpt != NULL)
1658 {
1659 if (debug_threads)
1660 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1661 "stopping all threads momentarily.\n");
1662
1663 stop_all_lwps (1, lwp);
1664
1665 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1666 lwp->exit_jump_pad_bkpt = NULL;
1667
1668 unstop_all_lwps (1, lwp);
1669
1670 gdb_assert (lwp->suspended >= 0);
1671 }
1672 }
1673 }
1674
1675 if (debug_threads)
1676 debug_printf ("Checking whether LWP %ld needs to move out of the "
1677 "jump pad...no\n",
1678 lwpid_of (current_thread));
1679
1680 current_thread = saved_thread;
1681 return 0;
1682 }
1683
1684 /* Enqueue one signal in the "signals to report later when out of the
1685 jump pad" list. */
1686
1687 static void
1688 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1689 {
1690 struct pending_signals *p_sig;
1691 struct thread_info *thread = get_lwp_thread (lwp);
1692
1693 if (debug_threads)
1694 debug_printf ("Deferring signal %d for LWP %ld.\n",
1695 WSTOPSIG (*wstat), lwpid_of (thread));
1696
1697 if (debug_threads)
1698 {
1699 struct pending_signals *sig;
1700
1701 for (sig = lwp->pending_signals_to_report;
1702 sig != NULL;
1703 sig = sig->prev)
1704 debug_printf (" Already queued %d\n",
1705 sig->signal);
1706
1707 debug_printf (" (no more currently queued signals)\n");
1708 }
1709
1710 /* Don't enqueue non-RT signals if they are already in the deferred
1711 queue. (SIGSTOP being the easiest signal to see ending up here
1712 twice) */
1713 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1714 {
1715 struct pending_signals *sig;
1716
1717 for (sig = lwp->pending_signals_to_report;
1718 sig != NULL;
1719 sig = sig->prev)
1720 {
1721 if (sig->signal == WSTOPSIG (*wstat))
1722 {
1723 if (debug_threads)
1724 debug_printf ("Not requeuing already queued non-RT signal %d"
1725 " for LWP %ld\n",
1726 sig->signal,
1727 lwpid_of (thread));
1728 return;
1729 }
1730 }
1731 }
1732
1733 p_sig = xmalloc (sizeof (*p_sig));
1734 p_sig->prev = lwp->pending_signals_to_report;
1735 p_sig->signal = WSTOPSIG (*wstat);
1736 memset (&p_sig->info, 0, sizeof (siginfo_t));
1737 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1738 &p_sig->info);
1739
1740 lwp->pending_signals_to_report = p_sig;
1741 }
1742
1743 /* Dequeue one signal from the "signals to report later when out of
1744 the jump pad" list. */
1745
1746 static int
1747 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1748 {
1749 struct thread_info *thread = get_lwp_thread (lwp);
1750
1751 if (lwp->pending_signals_to_report != NULL)
1752 {
1753 struct pending_signals **p_sig;
1754
1755 p_sig = &lwp->pending_signals_to_report;
1756 while ((*p_sig)->prev != NULL)
1757 p_sig = &(*p_sig)->prev;
1758
1759 *wstat = W_STOPCODE ((*p_sig)->signal);
1760 if ((*p_sig)->info.si_signo != 0)
1761 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1762 &(*p_sig)->info);
1763 free (*p_sig);
1764 *p_sig = NULL;
1765
1766 if (debug_threads)
1767 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1768 WSTOPSIG (*wstat), lwpid_of (thread));
1769
1770 if (debug_threads)
1771 {
1772 struct pending_signals *sig;
1773
1774 for (sig = lwp->pending_signals_to_report;
1775 sig != NULL;
1776 sig = sig->prev)
1777 debug_printf (" Still queued %d\n",
1778 sig->signal);
1779
1780 debug_printf (" (no more queued signals)\n");
1781 }
1782
1783 return 1;
1784 }
1785
1786 return 0;
1787 }
1788
1789 /* Fetch the possibly triggered data watchpoint info and store it in
1790 CHILD.
1791
1792 On some archs, like x86, that use debug registers to set
1793 watchpoints, it's possible that the way to know which watched
1794 address trapped, is to check the register that is used to select
1795 which address to watch. Problem is, between setting the watchpoint
1796 and reading back which data address trapped, the user may change
1797 the set of watchpoints, and, as a consequence, GDB changes the
1798 debug registers in the inferior. To avoid reading back a stale
1799 stopped-data-address when that happens, we cache in LP the fact
1800 that a watchpoint trapped, and the corresponding data address, as
1801 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1802 registers meanwhile, we have the cached data we can rely on. */
1803
1804 static int
1805 check_stopped_by_watchpoint (struct lwp_info *child)
1806 {
1807 if (the_low_target.stopped_by_watchpoint != NULL)
1808 {
1809 struct thread_info *saved_thread;
1810
1811 saved_thread = current_thread;
1812 current_thread = get_lwp_thread (child);
1813
1814 if (the_low_target.stopped_by_watchpoint ())
1815 {
1816 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
1817
1818 if (the_low_target.stopped_data_address != NULL)
1819 child->stopped_data_address
1820 = the_low_target.stopped_data_address ();
1821 else
1822 child->stopped_data_address = 0;
1823 }
1824
1825 current_thread = saved_thread;
1826 }
1827
1828 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
1829 }
1830
1831 /* Do low-level handling of the event, and check if we should go on
1832 and pass it to caller code. Return the affected lwp if we are, or
1833 NULL otherwise. */
1834
1835 static struct lwp_info *
1836 linux_low_filter_event (int lwpid, int wstat)
1837 {
1838 struct lwp_info *child;
1839 struct thread_info *thread;
1840 int have_stop_pc = 0;
1841
1842 child = find_lwp_pid (pid_to_ptid (lwpid));
1843
1844 /* If we didn't find a process, one of two things presumably happened:
1845 - A process we started and then detached from has exited. Ignore it.
1846 - A process we are controlling has forked and the new child's stop
1847 was reported to us by the kernel. Save its PID. */
1848 if (child == NULL && WIFSTOPPED (wstat))
1849 {
1850 add_to_pid_list (&stopped_pids, lwpid, wstat);
1851 return NULL;
1852 }
1853 else if (child == NULL)
1854 return NULL;
1855
1856 thread = get_lwp_thread (child);
1857
1858 child->stopped = 1;
1859
1860 child->last_status = wstat;
1861
1862 /* Check if the thread has exited. */
1863 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
1864 {
1865 if (debug_threads)
1866 debug_printf ("LLFE: %d exited.\n", lwpid);
1867 if (num_lwps (pid_of (thread)) > 1)
1868 {
1869
1870 /* If there is at least one more LWP, then the exit signal was
1871 not the end of the debugged application and should be
1872 ignored. */
1873 delete_lwp (child);
1874 return NULL;
1875 }
1876 else
1877 {
1878 /* This was the last lwp in the process. Since events are
1879 serialized to GDB core, and we can't report this one
1880 right now, but GDB core and the other target layers will
1881 want to be notified about the exit code/signal, leave the
1882 status pending for the next time we're able to report
1883 it. */
1884 mark_lwp_dead (child, wstat);
1885 return child;
1886 }
1887 }
1888
1889 gdb_assert (WIFSTOPPED (wstat));
1890
1891 if (WIFSTOPPED (wstat))
1892 {
1893 struct process_info *proc;
1894
1895 /* Architecture-specific setup after inferior is running. This
1896 needs to happen after we have attached to the inferior and it
1897 is stopped for the first time, but before we access any
1898 inferior registers. */
1899 proc = find_process_pid (pid_of (thread));
1900 if (proc->priv->new_inferior)
1901 {
1902 struct thread_info *saved_thread;
1903
1904 saved_thread = current_thread;
1905 current_thread = thread;
1906
1907 the_low_target.arch_setup ();
1908
1909 current_thread = saved_thread;
1910
1911 proc->priv->new_inferior = 0;
1912 }
1913 }
1914
1915 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1916 {
1917 struct process_info *proc = find_process_pid (pid_of (thread));
1918
1919 linux_enable_event_reporting (lwpid, proc->attached);
1920 child->must_set_ptrace_flags = 0;
1921 }
1922
1923 /* Be careful to not overwrite stop_pc until
1924 check_stopped_by_breakpoint is called. */
1925 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1926 && linux_is_extended_waitstatus (wstat))
1927 {
1928 child->stop_pc = get_pc (child);
1929 handle_extended_wait (child, wstat);
1930 return NULL;
1931 }
1932
1933 /* Check first whether this was a SW/HW breakpoint before checking
1934 watchpoints, because at least s390 can't tell the data address of
1935 hardware watchpoint hits, and returns stopped-by-watchpoint as
1936 long as there's a watchpoint set. */
1937 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
1938 {
1939 if (check_stopped_by_breakpoint (child))
1940 have_stop_pc = 1;
1941 }
1942
1943 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
1944 or hardware watchpoint. Check which is which if we got
1945 TARGET_STOPPED_BY_HW_BREAKPOINT. */
1946 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1947 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
1948 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1949 check_stopped_by_watchpoint (child);
1950
1951 if (!have_stop_pc)
1952 child->stop_pc = get_pc (child);
1953
1954 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
1955 && child->stop_expected)
1956 {
1957 if (debug_threads)
1958 debug_printf ("Expected stop.\n");
1959 child->stop_expected = 0;
1960
1961 if (thread->last_resume_kind == resume_stop)
1962 {
1963 /* We want to report the stop to the core. Treat the
1964 SIGSTOP as a normal event. */
1965 }
1966 else if (stopping_threads != NOT_STOPPING_THREADS)
1967 {
1968 /* Stopping threads. We don't want this SIGSTOP to end up
1969 pending. */
1970 return NULL;
1971 }
1972 else
1973 {
1974 /* Filter out the event. */
1975 linux_resume_one_lwp (child, child->stepping, 0, NULL);
1976 return NULL;
1977 }
1978 }
1979
1980 child->status_pending_p = 1;
1981 child->status_pending = wstat;
1982 return child;
1983 }
1984
1985 /* Resume LWPs that are currently stopped without any pending status
1986 to report, but are resumed from the core's perspective. */
1987
1988 static void
1989 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
1990 {
1991 struct thread_info *thread = (struct thread_info *) entry;
1992 struct lwp_info *lp = get_thread_lwp (thread);
1993
1994 if (lp->stopped
1995 && !lp->status_pending_p
1996 && thread->last_resume_kind != resume_stop
1997 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1998 {
1999 int step = thread->last_resume_kind == resume_step;
2000
2001 if (debug_threads)
2002 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2003 target_pid_to_str (ptid_of (thread)),
2004 paddress (lp->stop_pc),
2005 step);
2006
2007 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2008 }
2009 }
2010
2011 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2012 match FILTER_PTID (leaving others pending). The PTIDs can be:
2013 minus_one_ptid, to specify any child; a pid PTID, specifying all
2014 lwps of a thread group; or a PTID representing a single lwp. Store
2015 the stop status through the status pointer WSTAT. OPTIONS is
2016 passed to the waitpid call. Return 0 if no event was found and
2017 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2018 was found. Return the PID of the stopped child otherwise. */
2019
2020 static int
2021 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2022 int *wstatp, int options)
2023 {
2024 struct thread_info *event_thread;
2025 struct lwp_info *event_child, *requested_child;
2026 sigset_t block_mask, prev_mask;
2027
2028 retry:
2029 /* N.B. event_thread points to the thread_info struct that contains
2030 event_child. Keep them in sync. */
2031 event_thread = NULL;
2032 event_child = NULL;
2033 requested_child = NULL;
2034
2035 /* Check for a lwp with a pending status. */
2036
2037 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2038 {
2039 event_thread = (struct thread_info *)
2040 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2041 if (event_thread != NULL)
2042 event_child = get_thread_lwp (event_thread);
2043 if (debug_threads && event_thread)
2044 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2045 }
2046 else if (!ptid_equal (filter_ptid, null_ptid))
2047 {
2048 requested_child = find_lwp_pid (filter_ptid);
2049
2050 if (stopping_threads == NOT_STOPPING_THREADS
2051 && requested_child->status_pending_p
2052 && requested_child->collecting_fast_tracepoint)
2053 {
2054 enqueue_one_deferred_signal (requested_child,
2055 &requested_child->status_pending);
2056 requested_child->status_pending_p = 0;
2057 requested_child->status_pending = 0;
2058 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2059 }
2060
2061 if (requested_child->suspended
2062 && requested_child->status_pending_p)
2063 {
2064 internal_error (__FILE__, __LINE__,
2065 "requesting an event out of a"
2066 " suspended child?");
2067 }
2068
2069 if (requested_child->status_pending_p)
2070 {
2071 event_child = requested_child;
2072 event_thread = get_lwp_thread (event_child);
2073 }
2074 }
2075
2076 if (event_child != NULL)
2077 {
2078 if (debug_threads)
2079 debug_printf ("Got an event from pending child %ld (%04x)\n",
2080 lwpid_of (event_thread), event_child->status_pending);
2081 *wstatp = event_child->status_pending;
2082 event_child->status_pending_p = 0;
2083 event_child->status_pending = 0;
2084 current_thread = event_thread;
2085 return lwpid_of (event_thread);
2086 }
2087
2088 /* But if we don't find a pending event, we'll have to wait.
2089
2090 We only enter this loop if no process has a pending wait status.
2091 Thus any action taken in response to a wait status inside this
2092 loop is responding as soon as we detect the status, not after any
2093 pending events. */
2094
2095 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2096 all signals while here. */
2097 sigfillset (&block_mask);
2098 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2099
2100 /* Always pull all events out of the kernel. We'll randomly select
2101 an event LWP out of all that have events, to prevent
2102 starvation. */
2103 while (event_child == NULL)
2104 {
2105 pid_t ret = 0;
2106
2107 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2108 quirks:
2109
2110 - If the thread group leader exits while other threads in the
2111 thread group still exist, waitpid(TGID, ...) hangs. That
2112 waitpid won't return an exit status until the other threads
2113 in the group are reaped.
2114
2115 - When a non-leader thread execs, that thread just vanishes
2116 without reporting an exit (so we'd hang if we waited for it
2117 explicitly in that case). The exec event is reported to
2118 the TGID pid (although we don't currently enable exec
2119 events). */
2120 errno = 0;
2121 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2122
2123 if (debug_threads)
2124 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2125 ret, errno ? strerror (errno) : "ERRNO-OK");
2126
2127 if (ret > 0)
2128 {
2129 if (debug_threads)
2130 {
2131 debug_printf ("LLW: waitpid %ld received %s\n",
2132 (long) ret, status_to_str (*wstatp));
2133 }
2134
2135 /* Filter all events. IOW, leave all events pending. We'll
2136 randomly select an event LWP out of all that have events
2137 below. */
2138 linux_low_filter_event (ret, *wstatp);
2139 /* Retry until nothing comes out of waitpid. A single
2140 SIGCHLD can indicate more than one child stopped. */
2141 continue;
2142 }
2143
2144 /* Now that we've pulled all events out of the kernel, resume
2145 LWPs that don't have an interesting event to report. */
2146 if (stopping_threads == NOT_STOPPING_THREADS)
2147 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2148
2149 /* ... and find an LWP with a status to report to the core, if
2150 any. */
2151 event_thread = (struct thread_info *)
2152 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2153 if (event_thread != NULL)
2154 {
2155 event_child = get_thread_lwp (event_thread);
2156 *wstatp = event_child->status_pending;
2157 event_child->status_pending_p = 0;
2158 event_child->status_pending = 0;
2159 break;
2160 }
2161
2162 /* Check for zombie thread group leaders. Those can't be reaped
2163 until all other threads in the thread group are. */
2164 check_zombie_leaders ();
2165
2166 /* If there are no resumed children left in the set of LWPs we
2167 want to wait for, bail. We can't just block in
2168 waitpid/sigsuspend, because lwps might have been left stopped
2169 in trace-stop state, and we'd be stuck forever waiting for
2170 their status to change (which would only happen if we resumed
2171 them). Even if WNOHANG is set, this return code is preferred
2172 over 0 (below), as it is more detailed. */
2173 if ((find_inferior (&all_threads,
2174 not_stopped_callback,
2175 &wait_ptid) == NULL))
2176 {
2177 if (debug_threads)
2178 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2179 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2180 return -1;
2181 }
2182
2183 /* No interesting event to report to the caller. */
2184 if ((options & WNOHANG))
2185 {
2186 if (debug_threads)
2187 debug_printf ("WNOHANG set, no event found\n");
2188
2189 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2190 return 0;
2191 }
2192
2193 /* Block until we get an event reported with SIGCHLD. */
2194 if (debug_threads)
2195 debug_printf ("sigsuspend'ing\n");
2196
2197 sigsuspend (&prev_mask);
2198 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2199 goto retry;
2200 }
2201
2202 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2203
2204 current_thread = event_thread;
2205
2206 /* Check for thread exit. */
2207 if (! WIFSTOPPED (*wstatp))
2208 {
2209 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2210
2211 if (debug_threads)
2212 debug_printf ("LWP %d is the last lwp of process. "
2213 "Process %ld exiting.\n",
2214 pid_of (event_thread), lwpid_of (event_thread));
2215 return lwpid_of (event_thread);
2216 }
2217
2218 return lwpid_of (event_thread);
2219 }
2220
2221 /* Wait for an event from child(ren) PTID. PTIDs can be:
2222 minus_one_ptid, to specify any child; a pid PTID, specifying all
2223 lwps of a thread group; or a PTID representing a single lwp. Store
2224 the stop status through the status pointer WSTAT. OPTIONS is
2225 passed to the waitpid call. Return 0 if no event was found and
2226 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2227 was found. Return the PID of the stopped child otherwise. */
2228
2229 static int
2230 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2231 {
2232 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2233 }
2234
2235 /* Count the LWP's that have had events. */
2236
2237 static int
2238 count_events_callback (struct inferior_list_entry *entry, void *data)
2239 {
2240 struct thread_info *thread = (struct thread_info *) entry;
2241 struct lwp_info *lp = get_thread_lwp (thread);
2242 int *count = data;
2243
2244 gdb_assert (count != NULL);
2245
2246 /* Count only resumed LWPs that have an event pending. */
2247 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2248 && lp->status_pending_p)
2249 (*count)++;
2250
2251 return 0;
2252 }
2253
2254 /* Select the LWP (if any) that is currently being single-stepped. */
2255
2256 static int
2257 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2258 {
2259 struct thread_info *thread = (struct thread_info *) entry;
2260 struct lwp_info *lp = get_thread_lwp (thread);
2261
2262 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2263 && thread->last_resume_kind == resume_step
2264 && lp->status_pending_p)
2265 return 1;
2266 else
2267 return 0;
2268 }
2269
2270 /* Select the Nth LWP that has had an event. */
2271
2272 static int
2273 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2274 {
2275 struct thread_info *thread = (struct thread_info *) entry;
2276 struct lwp_info *lp = get_thread_lwp (thread);
2277 int *selector = data;
2278
2279 gdb_assert (selector != NULL);
2280
2281 /* Select only resumed LWPs that have an event pending. */
2282 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2283 && lp->status_pending_p)
2284 if ((*selector)-- == 0)
2285 return 1;
2286
2287 return 0;
2288 }
2289
2290 /* Select one LWP out of those that have events pending. */
2291
2292 static void
2293 select_event_lwp (struct lwp_info **orig_lp)
2294 {
2295 int num_events = 0;
2296 int random_selector;
2297 struct thread_info *event_thread = NULL;
2298
2299 /* In all-stop, give preference to the LWP that is being
2300 single-stepped. There will be at most one, and it's the LWP that
2301 the core is most interested in. If we didn't do this, then we'd
2302 have to handle pending step SIGTRAPs somehow in case the core
2303 later continues the previously-stepped thread, otherwise we'd
2304 report the pending SIGTRAP, and the core, not having stepped the
2305 thread, wouldn't understand what the trap was for, and therefore
2306 would report it to the user as a random signal. */
2307 if (!non_stop)
2308 {
2309 event_thread
2310 = (struct thread_info *) find_inferior (&all_threads,
2311 select_singlestep_lwp_callback,
2312 NULL);
2313 if (event_thread != NULL)
2314 {
2315 if (debug_threads)
2316 debug_printf ("SEL: Select single-step %s\n",
2317 target_pid_to_str (ptid_of (event_thread)));
2318 }
2319 }
2320 if (event_thread == NULL)
2321 {
2322 /* No single-stepping LWP. Select one at random, out of those
2323 which have had events. */
2324
2325 /* First see how many events we have. */
2326 find_inferior (&all_threads, count_events_callback, &num_events);
2327 gdb_assert (num_events > 0);
2328
2329 /* Now randomly pick a LWP out of those that have had
2330 events. */
2331 random_selector = (int)
2332 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2333
2334 if (debug_threads && num_events > 1)
2335 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2336 num_events, random_selector);
2337
2338 event_thread
2339 = (struct thread_info *) find_inferior (&all_threads,
2340 select_event_lwp_callback,
2341 &random_selector);
2342 }
2343
2344 if (event_thread != NULL)
2345 {
2346 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2347
2348 /* Switch the event LWP. */
2349 *orig_lp = event_lp;
2350 }
2351 }
2352
2353 /* Decrement the suspend count of an LWP. */
2354
2355 static int
2356 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2357 {
2358 struct thread_info *thread = (struct thread_info *) entry;
2359 struct lwp_info *lwp = get_thread_lwp (thread);
2360
2361 /* Ignore EXCEPT. */
2362 if (lwp == except)
2363 return 0;
2364
2365 lwp->suspended--;
2366
2367 gdb_assert (lwp->suspended >= 0);
2368 return 0;
2369 }
2370
2371 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2372 NULL. */
2373
2374 static void
2375 unsuspend_all_lwps (struct lwp_info *except)
2376 {
2377 find_inferior (&all_threads, unsuspend_one_lwp, except);
2378 }
2379
2380 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2381 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2382 void *data);
2383 static int lwp_running (struct inferior_list_entry *entry, void *data);
2384 static ptid_t linux_wait_1 (ptid_t ptid,
2385 struct target_waitstatus *ourstatus,
2386 int target_options);
2387
2388 /* Stabilize threads (move out of jump pads).
2389
2390 If a thread is midway collecting a fast tracepoint, we need to
2391 finish the collection and move it out of the jump pad before
2392 reporting the signal.
2393
2394 This avoids recursion while collecting (when a signal arrives
2395 midway, and the signal handler itself collects), which would trash
2396 the trace buffer. In case the user set a breakpoint in a signal
2397 handler, this avoids the backtrace showing the jump pad, etc..
2398 Most importantly, there are certain things we can't do safely if
2399 threads are stopped in a jump pad (or in its callee's). For
2400 example:
2401
2402 - starting a new trace run. A thread still collecting the
2403 previous run, could trash the trace buffer when resumed. The trace
2404 buffer control structures would have been reset but the thread had
2405 no way to tell. The thread could even midway memcpy'ing to the
2406 buffer, which would mean that when resumed, it would clobber the
2407 trace buffer that had been set for a new run.
2408
2409 - we can't rewrite/reuse the jump pads for new tracepoints
2410 safely. Say you do tstart while a thread is stopped midway while
2411 collecting. When the thread is later resumed, it finishes the
2412 collection, and returns to the jump pad, to execute the original
2413 instruction that was under the tracepoint jump at the time the
2414 older run had been started. If the jump pad had been rewritten
2415 since for something else in the new run, the thread would now
2416 execute the wrong / random instructions. */
2417
2418 static void
2419 linux_stabilize_threads (void)
2420 {
2421 struct thread_info *saved_thread;
2422 struct thread_info *thread_stuck;
2423
2424 thread_stuck
2425 = (struct thread_info *) find_inferior (&all_threads,
2426 stuck_in_jump_pad_callback,
2427 NULL);
2428 if (thread_stuck != NULL)
2429 {
2430 if (debug_threads)
2431 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2432 lwpid_of (thread_stuck));
2433 return;
2434 }
2435
2436 saved_thread = current_thread;
2437
2438 stabilizing_threads = 1;
2439
2440 /* Kick 'em all. */
2441 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2442
2443 /* Loop until all are stopped out of the jump pads. */
2444 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2445 {
2446 struct target_waitstatus ourstatus;
2447 struct lwp_info *lwp;
2448 int wstat;
2449
2450 /* Note that we go through the full wait even loop. While
2451 moving threads out of jump pad, we need to be able to step
2452 over internal breakpoints and such. */
2453 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2454
2455 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2456 {
2457 lwp = get_thread_lwp (current_thread);
2458
2459 /* Lock it. */
2460 lwp->suspended++;
2461
2462 if (ourstatus.value.sig != GDB_SIGNAL_0
2463 || current_thread->last_resume_kind == resume_stop)
2464 {
2465 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2466 enqueue_one_deferred_signal (lwp, &wstat);
2467 }
2468 }
2469 }
2470
2471 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2472
2473 stabilizing_threads = 0;
2474
2475 current_thread = saved_thread;
2476
2477 if (debug_threads)
2478 {
2479 thread_stuck
2480 = (struct thread_info *) find_inferior (&all_threads,
2481 stuck_in_jump_pad_callback,
2482 NULL);
2483 if (thread_stuck != NULL)
2484 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2485 lwpid_of (thread_stuck));
2486 }
2487 }
2488
2489 static void async_file_mark (void);
2490
2491 /* Convenience function that is called when the kernel reports an
2492 event that is not passed out to GDB. */
2493
2494 static ptid_t
2495 ignore_event (struct target_waitstatus *ourstatus)
2496 {
2497 /* If we got an event, there may still be others, as a single
2498 SIGCHLD can indicate more than one child stopped. This forces
2499 another target_wait call. */
2500 async_file_mark ();
2501
2502 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2503 return null_ptid;
2504 }
2505
2506 /* Wait for process, returns status. */
2507
2508 static ptid_t
2509 linux_wait_1 (ptid_t ptid,
2510 struct target_waitstatus *ourstatus, int target_options)
2511 {
2512 int w;
2513 struct lwp_info *event_child;
2514 int options;
2515 int pid;
2516 int step_over_finished;
2517 int bp_explains_trap;
2518 int maybe_internal_trap;
2519 int report_to_gdb;
2520 int trace_event;
2521 int in_step_range;
2522
2523 if (debug_threads)
2524 {
2525 debug_enter ();
2526 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2527 }
2528
2529 /* Translate generic target options into linux options. */
2530 options = __WALL;
2531 if (target_options & TARGET_WNOHANG)
2532 options |= WNOHANG;
2533
2534 bp_explains_trap = 0;
2535 trace_event = 0;
2536 in_step_range = 0;
2537 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2538
2539 if (ptid_equal (step_over_bkpt, null_ptid))
2540 pid = linux_wait_for_event (ptid, &w, options);
2541 else
2542 {
2543 if (debug_threads)
2544 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2545 target_pid_to_str (step_over_bkpt));
2546 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2547 }
2548
2549 if (pid == 0)
2550 {
2551 gdb_assert (target_options & TARGET_WNOHANG);
2552
2553 if (debug_threads)
2554 {
2555 debug_printf ("linux_wait_1 ret = null_ptid, "
2556 "TARGET_WAITKIND_IGNORE\n");
2557 debug_exit ();
2558 }
2559
2560 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2561 return null_ptid;
2562 }
2563 else if (pid == -1)
2564 {
2565 if (debug_threads)
2566 {
2567 debug_printf ("linux_wait_1 ret = null_ptid, "
2568 "TARGET_WAITKIND_NO_RESUMED\n");
2569 debug_exit ();
2570 }
2571
2572 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2573 return null_ptid;
2574 }
2575
2576 event_child = get_thread_lwp (current_thread);
2577
2578 /* linux_wait_for_event only returns an exit status for the last
2579 child of a process. Report it. */
2580 if (WIFEXITED (w) || WIFSIGNALED (w))
2581 {
2582 if (WIFEXITED (w))
2583 {
2584 ourstatus->kind = TARGET_WAITKIND_EXITED;
2585 ourstatus->value.integer = WEXITSTATUS (w);
2586
2587 if (debug_threads)
2588 {
2589 debug_printf ("linux_wait_1 ret = %s, exited with "
2590 "retcode %d\n",
2591 target_pid_to_str (ptid_of (current_thread)),
2592 WEXITSTATUS (w));
2593 debug_exit ();
2594 }
2595 }
2596 else
2597 {
2598 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2599 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2600
2601 if (debug_threads)
2602 {
2603 debug_printf ("linux_wait_1 ret = %s, terminated with "
2604 "signal %d\n",
2605 target_pid_to_str (ptid_of (current_thread)),
2606 WTERMSIG (w));
2607 debug_exit ();
2608 }
2609 }
2610
2611 return ptid_of (current_thread);
2612 }
2613
2614 /* If step-over executes a breakpoint instruction, it means a
2615 gdb/gdbserver breakpoint had been planted on top of a permanent
2616 breakpoint. The PC has been adjusted by
2617 check_stopped_by_breakpoint to point at the breakpoint address.
2618 Advance the PC manually past the breakpoint, otherwise the
2619 program would keep trapping the permanent breakpoint forever. */
2620 if (!ptid_equal (step_over_bkpt, null_ptid)
2621 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2622 {
2623 unsigned int increment_pc = the_low_target.breakpoint_len;
2624
2625 if (debug_threads)
2626 {
2627 debug_printf ("step-over for %s executed software breakpoint\n",
2628 target_pid_to_str (ptid_of (current_thread)));
2629 }
2630
2631 if (increment_pc != 0)
2632 {
2633 struct regcache *regcache
2634 = get_thread_regcache (current_thread, 1);
2635
2636 event_child->stop_pc += increment_pc;
2637 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2638
2639 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2640 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2641 }
2642 }
2643
2644 /* If this event was not handled before, and is not a SIGTRAP, we
2645 report it. SIGILL and SIGSEGV are also treated as traps in case
2646 a breakpoint is inserted at the current PC. If this target does
2647 not support internal breakpoints at all, we also report the
2648 SIGTRAP without further processing; it's of no concern to us. */
2649 maybe_internal_trap
2650 = (supports_breakpoints ()
2651 && (WSTOPSIG (w) == SIGTRAP
2652 || ((WSTOPSIG (w) == SIGILL
2653 || WSTOPSIG (w) == SIGSEGV)
2654 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2655
2656 if (maybe_internal_trap)
2657 {
2658 /* Handle anything that requires bookkeeping before deciding to
2659 report the event or continue waiting. */
2660
2661 /* First check if we can explain the SIGTRAP with an internal
2662 breakpoint, or if we should possibly report the event to GDB.
2663 Do this before anything that may remove or insert a
2664 breakpoint. */
2665 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2666
2667 /* We have a SIGTRAP, possibly a step-over dance has just
2668 finished. If so, tweak the state machine accordingly,
2669 reinsert breakpoints and delete any reinsert (software
2670 single-step) breakpoints. */
2671 step_over_finished = finish_step_over (event_child);
2672
2673 /* Now invoke the callbacks of any internal breakpoints there. */
2674 check_breakpoints (event_child->stop_pc);
2675
2676 /* Handle tracepoint data collecting. This may overflow the
2677 trace buffer, and cause a tracing stop, removing
2678 breakpoints. */
2679 trace_event = handle_tracepoints (event_child);
2680
2681 if (bp_explains_trap)
2682 {
2683 /* If we stepped or ran into an internal breakpoint, we've
2684 already handled it. So next time we resume (from this
2685 PC), we should step over it. */
2686 if (debug_threads)
2687 debug_printf ("Hit a gdbserver breakpoint.\n");
2688
2689 if (breakpoint_here (event_child->stop_pc))
2690 event_child->need_step_over = 1;
2691 }
2692 }
2693 else
2694 {
2695 /* We have some other signal, possibly a step-over dance was in
2696 progress, and it should be cancelled too. */
2697 step_over_finished = finish_step_over (event_child);
2698 }
2699
2700 /* We have all the data we need. Either report the event to GDB, or
2701 resume threads and keep waiting for more. */
2702
2703 /* If we're collecting a fast tracepoint, finish the collection and
2704 move out of the jump pad before delivering a signal. See
2705 linux_stabilize_threads. */
2706
2707 if (WIFSTOPPED (w)
2708 && WSTOPSIG (w) != SIGTRAP
2709 && supports_fast_tracepoints ()
2710 && agent_loaded_p ())
2711 {
2712 if (debug_threads)
2713 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2714 "to defer or adjust it.\n",
2715 WSTOPSIG (w), lwpid_of (current_thread));
2716
2717 /* Allow debugging the jump pad itself. */
2718 if (current_thread->last_resume_kind != resume_step
2719 && maybe_move_out_of_jump_pad (event_child, &w))
2720 {
2721 enqueue_one_deferred_signal (event_child, &w);
2722
2723 if (debug_threads)
2724 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2725 WSTOPSIG (w), lwpid_of (current_thread));
2726
2727 linux_resume_one_lwp (event_child, 0, 0, NULL);
2728
2729 return ignore_event (ourstatus);
2730 }
2731 }
2732
2733 if (event_child->collecting_fast_tracepoint)
2734 {
2735 if (debug_threads)
2736 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2737 "Check if we're already there.\n",
2738 lwpid_of (current_thread),
2739 event_child->collecting_fast_tracepoint);
2740
2741 trace_event = 1;
2742
2743 event_child->collecting_fast_tracepoint
2744 = linux_fast_tracepoint_collecting (event_child, NULL);
2745
2746 if (event_child->collecting_fast_tracepoint != 1)
2747 {
2748 /* No longer need this breakpoint. */
2749 if (event_child->exit_jump_pad_bkpt != NULL)
2750 {
2751 if (debug_threads)
2752 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2753 "stopping all threads momentarily.\n");
2754
2755 /* Other running threads could hit this breakpoint.
2756 We don't handle moribund locations like GDB does,
2757 instead we always pause all threads when removing
2758 breakpoints, so that any step-over or
2759 decr_pc_after_break adjustment is always taken
2760 care of while the breakpoint is still
2761 inserted. */
2762 stop_all_lwps (1, event_child);
2763
2764 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2765 event_child->exit_jump_pad_bkpt = NULL;
2766
2767 unstop_all_lwps (1, event_child);
2768
2769 gdb_assert (event_child->suspended >= 0);
2770 }
2771 }
2772
2773 if (event_child->collecting_fast_tracepoint == 0)
2774 {
2775 if (debug_threads)
2776 debug_printf ("fast tracepoint finished "
2777 "collecting successfully.\n");
2778
2779 /* We may have a deferred signal to report. */
2780 if (dequeue_one_deferred_signal (event_child, &w))
2781 {
2782 if (debug_threads)
2783 debug_printf ("dequeued one signal.\n");
2784 }
2785 else
2786 {
2787 if (debug_threads)
2788 debug_printf ("no deferred signals.\n");
2789
2790 if (stabilizing_threads)
2791 {
2792 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2793 ourstatus->value.sig = GDB_SIGNAL_0;
2794
2795 if (debug_threads)
2796 {
2797 debug_printf ("linux_wait_1 ret = %s, stopped "
2798 "while stabilizing threads\n",
2799 target_pid_to_str (ptid_of (current_thread)));
2800 debug_exit ();
2801 }
2802
2803 return ptid_of (current_thread);
2804 }
2805 }
2806 }
2807 }
2808
2809 /* Check whether GDB would be interested in this event. */
2810
2811 /* If GDB is not interested in this signal, don't stop other
2812 threads, and don't report it to GDB. Just resume the inferior
2813 right away. We do this for threading-related signals as well as
2814 any that GDB specifically requested we ignore. But never ignore
2815 SIGSTOP if we sent it ourselves, and do not ignore signals when
2816 stepping - they may require special handling to skip the signal
2817 handler. Also never ignore signals that could be caused by a
2818 breakpoint. */
2819 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2820 thread library? */
2821 if (WIFSTOPPED (w)
2822 && current_thread->last_resume_kind != resume_step
2823 && (
2824 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2825 (current_process ()->priv->thread_db != NULL
2826 && (WSTOPSIG (w) == __SIGRTMIN
2827 || WSTOPSIG (w) == __SIGRTMIN + 1))
2828 ||
2829 #endif
2830 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2831 && !(WSTOPSIG (w) == SIGSTOP
2832 && current_thread->last_resume_kind == resume_stop)
2833 && !linux_wstatus_maybe_breakpoint (w))))
2834 {
2835 siginfo_t info, *info_p;
2836
2837 if (debug_threads)
2838 debug_printf ("Ignored signal %d for LWP %ld.\n",
2839 WSTOPSIG (w), lwpid_of (current_thread));
2840
2841 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2842 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2843 info_p = &info;
2844 else
2845 info_p = NULL;
2846 linux_resume_one_lwp (event_child, event_child->stepping,
2847 WSTOPSIG (w), info_p);
2848 return ignore_event (ourstatus);
2849 }
2850
2851 /* Note that all addresses are always "out of the step range" when
2852 there's no range to begin with. */
2853 in_step_range = lwp_in_step_range (event_child);
2854
2855 /* If GDB wanted this thread to single step, and the thread is out
2856 of the step range, we always want to report the SIGTRAP, and let
2857 GDB handle it. Watchpoints should always be reported. So should
2858 signals we can't explain. A SIGTRAP we can't explain could be a
2859 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2860 do, we're be able to handle GDB breakpoints on top of internal
2861 breakpoints, by handling the internal breakpoint and still
2862 reporting the event to GDB. If we don't, we're out of luck, GDB
2863 won't see the breakpoint hit. */
2864 report_to_gdb = (!maybe_internal_trap
2865 || (current_thread->last_resume_kind == resume_step
2866 && !in_step_range)
2867 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
2868 || (!step_over_finished && !in_step_range
2869 && !bp_explains_trap && !trace_event)
2870 || (gdb_breakpoint_here (event_child->stop_pc)
2871 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2872 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2873
2874 run_breakpoint_commands (event_child->stop_pc);
2875
2876 /* We found no reason GDB would want us to stop. We either hit one
2877 of our own breakpoints, or finished an internal step GDB
2878 shouldn't know about. */
2879 if (!report_to_gdb)
2880 {
2881 if (debug_threads)
2882 {
2883 if (bp_explains_trap)
2884 debug_printf ("Hit a gdbserver breakpoint.\n");
2885 if (step_over_finished)
2886 debug_printf ("Step-over finished.\n");
2887 if (trace_event)
2888 debug_printf ("Tracepoint event.\n");
2889 if (lwp_in_step_range (event_child))
2890 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2891 paddress (event_child->stop_pc),
2892 paddress (event_child->step_range_start),
2893 paddress (event_child->step_range_end));
2894 }
2895
2896 /* We're not reporting this breakpoint to GDB, so apply the
2897 decr_pc_after_break adjustment to the inferior's regcache
2898 ourselves. */
2899
2900 if (the_low_target.set_pc != NULL)
2901 {
2902 struct regcache *regcache
2903 = get_thread_regcache (current_thread, 1);
2904 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2905 }
2906
2907 /* We may have finished stepping over a breakpoint. If so,
2908 we've stopped and suspended all LWPs momentarily except the
2909 stepping one. This is where we resume them all again. We're
2910 going to keep waiting, so use proceed, which handles stepping
2911 over the next breakpoint. */
2912 if (debug_threads)
2913 debug_printf ("proceeding all threads.\n");
2914
2915 if (step_over_finished)
2916 unsuspend_all_lwps (event_child);
2917
2918 proceed_all_lwps ();
2919 return ignore_event (ourstatus);
2920 }
2921
2922 if (debug_threads)
2923 {
2924 if (current_thread->last_resume_kind == resume_step)
2925 {
2926 if (event_child->step_range_start == event_child->step_range_end)
2927 debug_printf ("GDB wanted to single-step, reporting event.\n");
2928 else if (!lwp_in_step_range (event_child))
2929 debug_printf ("Out of step range, reporting event.\n");
2930 }
2931 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2932 debug_printf ("Stopped by watchpoint.\n");
2933 else if (gdb_breakpoint_here (event_child->stop_pc))
2934 debug_printf ("Stopped by GDB breakpoint.\n");
2935 if (debug_threads)
2936 debug_printf ("Hit a non-gdbserver trap event.\n");
2937 }
2938
2939 /* Alright, we're going to report a stop. */
2940
2941 if (!stabilizing_threads)
2942 {
2943 /* In all-stop, stop all threads. */
2944 if (!non_stop)
2945 stop_all_lwps (0, NULL);
2946
2947 /* If we're not waiting for a specific LWP, choose an event LWP
2948 from among those that have had events. Giving equal priority
2949 to all LWPs that have had events helps prevent
2950 starvation. */
2951 if (ptid_equal (ptid, minus_one_ptid))
2952 {
2953 event_child->status_pending_p = 1;
2954 event_child->status_pending = w;
2955
2956 select_event_lwp (&event_child);
2957
2958 /* current_thread and event_child must stay in sync. */
2959 current_thread = get_lwp_thread (event_child);
2960
2961 event_child->status_pending_p = 0;
2962 w = event_child->status_pending;
2963 }
2964
2965 if (step_over_finished)
2966 {
2967 if (!non_stop)
2968 {
2969 /* If we were doing a step-over, all other threads but
2970 the stepping one had been paused in start_step_over,
2971 with their suspend counts incremented. We don't want
2972 to do a full unstop/unpause, because we're in
2973 all-stop mode (so we want threads stopped), but we
2974 still need to unsuspend the other threads, to
2975 decrement their `suspended' count back. */
2976 unsuspend_all_lwps (event_child);
2977 }
2978 else
2979 {
2980 /* If we just finished a step-over, then all threads had
2981 been momentarily paused. In all-stop, that's fine,
2982 we want threads stopped by now anyway. In non-stop,
2983 we need to re-resume threads that GDB wanted to be
2984 running. */
2985 unstop_all_lwps (1, event_child);
2986 }
2987 }
2988
2989 /* Stabilize threads (move out of jump pads). */
2990 if (!non_stop)
2991 stabilize_threads ();
2992 }
2993 else
2994 {
2995 /* If we just finished a step-over, then all threads had been
2996 momentarily paused. In all-stop, that's fine, we want
2997 threads stopped by now anyway. In non-stop, we need to
2998 re-resume threads that GDB wanted to be running. */
2999 if (step_over_finished)
3000 unstop_all_lwps (1, event_child);
3001 }
3002
3003 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3004
3005 /* Now that we've selected our final event LWP, un-adjust its PC if
3006 it was a software breakpoint, and the client doesn't know we can
3007 adjust the breakpoint ourselves. */
3008 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3009 && !swbreak_feature)
3010 {
3011 int decr_pc = the_low_target.decr_pc_after_break;
3012
3013 if (decr_pc != 0)
3014 {
3015 struct regcache *regcache
3016 = get_thread_regcache (current_thread, 1);
3017 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3018 }
3019 }
3020
3021 if (current_thread->last_resume_kind == resume_stop
3022 && WSTOPSIG (w) == SIGSTOP)
3023 {
3024 /* A thread that has been requested to stop by GDB with vCont;t,
3025 and it stopped cleanly, so report as SIG0. The use of
3026 SIGSTOP is an implementation detail. */
3027 ourstatus->value.sig = GDB_SIGNAL_0;
3028 }
3029 else if (current_thread->last_resume_kind == resume_stop
3030 && WSTOPSIG (w) != SIGSTOP)
3031 {
3032 /* A thread that has been requested to stop by GDB with vCont;t,
3033 but, it stopped for other reasons. */
3034 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3035 }
3036 else
3037 {
3038 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3039 }
3040
3041 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3042
3043 if (debug_threads)
3044 {
3045 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3046 target_pid_to_str (ptid_of (current_thread)),
3047 ourstatus->kind, ourstatus->value.sig);
3048 debug_exit ();
3049 }
3050
3051 return ptid_of (current_thread);
3052 }
3053
3054 /* Get rid of any pending event in the pipe. */
3055 static void
3056 async_file_flush (void)
3057 {
3058 int ret;
3059 char buf;
3060
3061 do
3062 ret = read (linux_event_pipe[0], &buf, 1);
3063 while (ret >= 0 || (ret == -1 && errno == EINTR));
3064 }
3065
3066 /* Put something in the pipe, so the event loop wakes up. */
3067 static void
3068 async_file_mark (void)
3069 {
3070 int ret;
3071
3072 async_file_flush ();
3073
3074 do
3075 ret = write (linux_event_pipe[1], "+", 1);
3076 while (ret == 0 || (ret == -1 && errno == EINTR));
3077
3078 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3079 be awakened anyway. */
3080 }
3081
3082 static ptid_t
3083 linux_wait (ptid_t ptid,
3084 struct target_waitstatus *ourstatus, int target_options)
3085 {
3086 ptid_t event_ptid;
3087
3088 /* Flush the async file first. */
3089 if (target_is_async_p ())
3090 async_file_flush ();
3091
3092 do
3093 {
3094 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3095 }
3096 while ((target_options & TARGET_WNOHANG) == 0
3097 && ptid_equal (event_ptid, null_ptid)
3098 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3099
3100 /* If at least one stop was reported, there may be more. A single
3101 SIGCHLD can signal more than one child stop. */
3102 if (target_is_async_p ()
3103 && (target_options & TARGET_WNOHANG) != 0
3104 && !ptid_equal (event_ptid, null_ptid))
3105 async_file_mark ();
3106
3107 return event_ptid;
3108 }
3109
3110 /* Send a signal to an LWP. */
3111
3112 static int
3113 kill_lwp (unsigned long lwpid, int signo)
3114 {
3115 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3116 fails, then we are not using nptl threads and we should be using kill. */
3117
3118 #ifdef __NR_tkill
3119 {
3120 static int tkill_failed;
3121
3122 if (!tkill_failed)
3123 {
3124 int ret;
3125
3126 errno = 0;
3127 ret = syscall (__NR_tkill, lwpid, signo);
3128 if (errno != ENOSYS)
3129 return ret;
3130 tkill_failed = 1;
3131 }
3132 }
3133 #endif
3134
3135 return kill (lwpid, signo);
3136 }
3137
3138 void
3139 linux_stop_lwp (struct lwp_info *lwp)
3140 {
3141 send_sigstop (lwp);
3142 }
3143
3144 static void
3145 send_sigstop (struct lwp_info *lwp)
3146 {
3147 int pid;
3148
3149 pid = lwpid_of (get_lwp_thread (lwp));
3150
3151 /* If we already have a pending stop signal for this process, don't
3152 send another. */
3153 if (lwp->stop_expected)
3154 {
3155 if (debug_threads)
3156 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3157
3158 return;
3159 }
3160
3161 if (debug_threads)
3162 debug_printf ("Sending sigstop to lwp %d\n", pid);
3163
3164 lwp->stop_expected = 1;
3165 kill_lwp (pid, SIGSTOP);
3166 }
3167
3168 static int
3169 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3170 {
3171 struct thread_info *thread = (struct thread_info *) entry;
3172 struct lwp_info *lwp = get_thread_lwp (thread);
3173
3174 /* Ignore EXCEPT. */
3175 if (lwp == except)
3176 return 0;
3177
3178 if (lwp->stopped)
3179 return 0;
3180
3181 send_sigstop (lwp);
3182 return 0;
3183 }
3184
3185 /* Increment the suspend count of an LWP, and stop it, if not stopped
3186 yet. */
3187 static int
3188 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3189 void *except)
3190 {
3191 struct thread_info *thread = (struct thread_info *) entry;
3192 struct lwp_info *lwp = get_thread_lwp (thread);
3193
3194 /* Ignore EXCEPT. */
3195 if (lwp == except)
3196 return 0;
3197
3198 lwp->suspended++;
3199
3200 return send_sigstop_callback (entry, except);
3201 }
3202
3203 static void
3204 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3205 {
3206 /* It's dead, really. */
3207 lwp->dead = 1;
3208
3209 /* Store the exit status for later. */
3210 lwp->status_pending_p = 1;
3211 lwp->status_pending = wstat;
3212
3213 /* Prevent trying to stop it. */
3214 lwp->stopped = 1;
3215
3216 /* No further stops are expected from a dead lwp. */
3217 lwp->stop_expected = 0;
3218 }
3219
3220 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3221
3222 static void
3223 wait_for_sigstop (void)
3224 {
3225 struct thread_info *saved_thread;
3226 ptid_t saved_tid;
3227 int wstat;
3228 int ret;
3229
3230 saved_thread = current_thread;
3231 if (saved_thread != NULL)
3232 saved_tid = saved_thread->entry.id;
3233 else
3234 saved_tid = null_ptid; /* avoid bogus unused warning */
3235
3236 if (debug_threads)
3237 debug_printf ("wait_for_sigstop: pulling events\n");
3238
3239 /* Passing NULL_PTID as filter indicates we want all events to be
3240 left pending. Eventually this returns when there are no
3241 unwaited-for children left. */
3242 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3243 &wstat, __WALL);
3244 gdb_assert (ret == -1);
3245
3246 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3247 current_thread = saved_thread;
3248 else
3249 {
3250 if (debug_threads)
3251 debug_printf ("Previously current thread died.\n");
3252
3253 if (non_stop)
3254 {
3255 /* We can't change the current inferior behind GDB's back,
3256 otherwise, a subsequent command may apply to the wrong
3257 process. */
3258 current_thread = NULL;
3259 }
3260 else
3261 {
3262 /* Set a valid thread as current. */
3263 set_desired_thread (0);
3264 }
3265 }
3266 }
3267
3268 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3269 move it out, because we need to report the stop event to GDB. For
3270 example, if the user puts a breakpoint in the jump pad, it's
3271 because she wants to debug it. */
3272
3273 static int
3274 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3275 {
3276 struct thread_info *thread = (struct thread_info *) entry;
3277 struct lwp_info *lwp = get_thread_lwp (thread);
3278
3279 gdb_assert (lwp->suspended == 0);
3280 gdb_assert (lwp->stopped);
3281
3282 /* Allow debugging the jump pad, gdb_collect, etc.. */
3283 return (supports_fast_tracepoints ()
3284 && agent_loaded_p ()
3285 && (gdb_breakpoint_here (lwp->stop_pc)
3286 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3287 || thread->last_resume_kind == resume_step)
3288 && linux_fast_tracepoint_collecting (lwp, NULL));
3289 }
3290
3291 static void
3292 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3293 {
3294 struct thread_info *thread = (struct thread_info *) entry;
3295 struct lwp_info *lwp = get_thread_lwp (thread);
3296 int *wstat;
3297
3298 gdb_assert (lwp->suspended == 0);
3299 gdb_assert (lwp->stopped);
3300
3301 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3302
3303 /* Allow debugging the jump pad, gdb_collect, etc. */
3304 if (!gdb_breakpoint_here (lwp->stop_pc)
3305 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3306 && thread->last_resume_kind != resume_step
3307 && maybe_move_out_of_jump_pad (lwp, wstat))
3308 {
3309 if (debug_threads)
3310 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3311 lwpid_of (thread));
3312
3313 if (wstat)
3314 {
3315 lwp->status_pending_p = 0;
3316 enqueue_one_deferred_signal (lwp, wstat);
3317
3318 if (debug_threads)
3319 debug_printf ("Signal %d for LWP %ld deferred "
3320 "(in jump pad)\n",
3321 WSTOPSIG (*wstat), lwpid_of (thread));
3322 }
3323
3324 linux_resume_one_lwp (lwp, 0, 0, NULL);
3325 }
3326 else
3327 lwp->suspended++;
3328 }
3329
3330 static int
3331 lwp_running (struct inferior_list_entry *entry, void *data)
3332 {
3333 struct thread_info *thread = (struct thread_info *) entry;
3334 struct lwp_info *lwp = get_thread_lwp (thread);
3335
3336 if (lwp->dead)
3337 return 0;
3338 if (lwp->stopped)
3339 return 0;
3340 return 1;
3341 }
3342
3343 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3344 If SUSPEND, then also increase the suspend count of every LWP,
3345 except EXCEPT. */
3346
3347 static void
3348 stop_all_lwps (int suspend, struct lwp_info *except)
3349 {
3350 /* Should not be called recursively. */
3351 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3352
3353 if (debug_threads)
3354 {
3355 debug_enter ();
3356 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3357 suspend ? "stop-and-suspend" : "stop",
3358 except != NULL
3359 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3360 : "none");
3361 }
3362
3363 stopping_threads = (suspend
3364 ? STOPPING_AND_SUSPENDING_THREADS
3365 : STOPPING_THREADS);
3366
3367 if (suspend)
3368 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3369 else
3370 find_inferior (&all_threads, send_sigstop_callback, except);
3371 wait_for_sigstop ();
3372 stopping_threads = NOT_STOPPING_THREADS;
3373
3374 if (debug_threads)
3375 {
3376 debug_printf ("stop_all_lwps done, setting stopping_threads "
3377 "back to !stopping\n");
3378 debug_exit ();
3379 }
3380 }
3381
3382 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3383 SIGNAL is nonzero, give it that signal. */
3384
3385 static void
3386 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3387 int step, int signal, siginfo_t *info)
3388 {
3389 struct thread_info *thread = get_lwp_thread (lwp);
3390 struct thread_info *saved_thread;
3391 int fast_tp_collecting;
3392
3393 if (lwp->stopped == 0)
3394 return;
3395
3396 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3397
3398 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3399
3400 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3401 user used the "jump" command, or "set $pc = foo"). */
3402 if (lwp->stop_pc != get_pc (lwp))
3403 {
3404 /* Collecting 'while-stepping' actions doesn't make sense
3405 anymore. */
3406 release_while_stepping_state_list (thread);
3407 }
3408
3409 /* If we have pending signals or status, and a new signal, enqueue the
3410 signal. Also enqueue the signal if we are waiting to reinsert a
3411 breakpoint; it will be picked up again below. */
3412 if (signal != 0
3413 && (lwp->status_pending_p
3414 || lwp->pending_signals != NULL
3415 || lwp->bp_reinsert != 0
3416 || fast_tp_collecting))
3417 {
3418 struct pending_signals *p_sig;
3419 p_sig = xmalloc (sizeof (*p_sig));
3420 p_sig->prev = lwp->pending_signals;
3421 p_sig->signal = signal;
3422 if (info == NULL)
3423 memset (&p_sig->info, 0, sizeof (siginfo_t));
3424 else
3425 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3426 lwp->pending_signals = p_sig;
3427 }
3428
3429 if (lwp->status_pending_p)
3430 {
3431 if (debug_threads)
3432 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3433 " has pending status\n",
3434 lwpid_of (thread), step ? "step" : "continue", signal,
3435 lwp->stop_expected ? "expected" : "not expected");
3436 return;
3437 }
3438
3439 saved_thread = current_thread;
3440 current_thread = thread;
3441
3442 if (debug_threads)
3443 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3444 lwpid_of (thread), step ? "step" : "continue", signal,
3445 lwp->stop_expected ? "expected" : "not expected");
3446
3447 /* This bit needs some thinking about. If we get a signal that
3448 we must report while a single-step reinsert is still pending,
3449 we often end up resuming the thread. It might be better to
3450 (ew) allow a stack of pending events; then we could be sure that
3451 the reinsert happened right away and not lose any signals.
3452
3453 Making this stack would also shrink the window in which breakpoints are
3454 uninserted (see comment in linux_wait_for_lwp) but not enough for
3455 complete correctness, so it won't solve that problem. It may be
3456 worthwhile just to solve this one, however. */
3457 if (lwp->bp_reinsert != 0)
3458 {
3459 if (debug_threads)
3460 debug_printf (" pending reinsert at 0x%s\n",
3461 paddress (lwp->bp_reinsert));
3462
3463 if (can_hardware_single_step ())
3464 {
3465 if (fast_tp_collecting == 0)
3466 {
3467 if (step == 0)
3468 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3469 if (lwp->suspended)
3470 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3471 lwp->suspended);
3472 }
3473
3474 step = 1;
3475 }
3476
3477 /* Postpone any pending signal. It was enqueued above. */
3478 signal = 0;
3479 }
3480
3481 if (fast_tp_collecting == 1)
3482 {
3483 if (debug_threads)
3484 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3485 " (exit-jump-pad-bkpt)\n",
3486 lwpid_of (thread));
3487
3488 /* Postpone any pending signal. It was enqueued above. */
3489 signal = 0;
3490 }
3491 else if (fast_tp_collecting == 2)
3492 {
3493 if (debug_threads)
3494 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3495 " single-stepping\n",
3496 lwpid_of (thread));
3497
3498 if (can_hardware_single_step ())
3499 step = 1;
3500 else
3501 {
3502 internal_error (__FILE__, __LINE__,
3503 "moving out of jump pad single-stepping"
3504 " not implemented on this target");
3505 }
3506
3507 /* Postpone any pending signal. It was enqueued above. */
3508 signal = 0;
3509 }
3510
3511 /* If we have while-stepping actions in this thread set it stepping.
3512 If we have a signal to deliver, it may or may not be set to
3513 SIG_IGN, we don't know. Assume so, and allow collecting
3514 while-stepping into a signal handler. A possible smart thing to
3515 do would be to set an internal breakpoint at the signal return
3516 address, continue, and carry on catching this while-stepping
3517 action only when that breakpoint is hit. A future
3518 enhancement. */
3519 if (thread->while_stepping != NULL
3520 && can_hardware_single_step ())
3521 {
3522 if (debug_threads)
3523 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3524 lwpid_of (thread));
3525 step = 1;
3526 }
3527
3528 if (the_low_target.get_pc != NULL)
3529 {
3530 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3531
3532 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3533
3534 if (debug_threads)
3535 {
3536 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3537 (long) lwp->stop_pc);
3538 }
3539 }
3540
3541 /* If we have pending signals, consume one unless we are trying to
3542 reinsert a breakpoint or we're trying to finish a fast tracepoint
3543 collect. */
3544 if (lwp->pending_signals != NULL
3545 && lwp->bp_reinsert == 0
3546 && fast_tp_collecting == 0)
3547 {
3548 struct pending_signals **p_sig;
3549
3550 p_sig = &lwp->pending_signals;
3551 while ((*p_sig)->prev != NULL)
3552 p_sig = &(*p_sig)->prev;
3553
3554 signal = (*p_sig)->signal;
3555 if ((*p_sig)->info.si_signo != 0)
3556 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3557 &(*p_sig)->info);
3558
3559 free (*p_sig);
3560 *p_sig = NULL;
3561 }
3562
3563 if (the_low_target.prepare_to_resume != NULL)
3564 the_low_target.prepare_to_resume (lwp);
3565
3566 regcache_invalidate_thread (thread);
3567 errno = 0;
3568 lwp->stepping = step;
3569 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3570 (PTRACE_TYPE_ARG3) 0,
3571 /* Coerce to a uintptr_t first to avoid potential gcc warning
3572 of coercing an 8 byte integer to a 4 byte pointer. */
3573 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3574
3575 current_thread = saved_thread;
3576 if (errno)
3577 perror_with_name ("resuming thread");
3578
3579 /* Successfully resumed. Clear state that no longer makes sense,
3580 and mark the LWP as running. Must not do this before resuming
3581 otherwise if that fails other code will be confused. E.g., we'd
3582 later try to stop the LWP and hang forever waiting for a stop
3583 status. Note that we must not throw after this is cleared,
3584 otherwise handle_zombie_lwp_error would get confused. */
3585 lwp->stopped = 0;
3586 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3587 }
3588
3589 /* Called when we try to resume a stopped LWP and that errors out. If
3590 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3591 or about to become), discard the error, clear any pending status
3592 the LWP may have, and return true (we'll collect the exit status
3593 soon enough). Otherwise, return false. */
3594
3595 static int
3596 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3597 {
3598 struct thread_info *thread = get_lwp_thread (lp);
3599
3600 /* If we get an error after resuming the LWP successfully, we'd
3601 confuse !T state for the LWP being gone. */
3602 gdb_assert (lp->stopped);
3603
3604 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3605 because even if ptrace failed with ESRCH, the tracee may be "not
3606 yet fully dead", but already refusing ptrace requests. In that
3607 case the tracee has 'R (Running)' state for a little bit
3608 (observed in Linux 3.18). See also the note on ESRCH in the
3609 ptrace(2) man page. Instead, check whether the LWP has any state
3610 other than ptrace-stopped. */
3611
3612 /* Don't assume anything if /proc/PID/status can't be read. */
3613 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3614 {
3615 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3616 lp->status_pending_p = 0;
3617 return 1;
3618 }
3619 return 0;
3620 }
3621
3622 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3623 disappears while we try to resume it. */
3624
3625 static void
3626 linux_resume_one_lwp (struct lwp_info *lwp,
3627 int step, int signal, siginfo_t *info)
3628 {
3629 TRY
3630 {
3631 linux_resume_one_lwp_throw (lwp, step, signal, info);
3632 }
3633 CATCH (ex, RETURN_MASK_ERROR)
3634 {
3635 if (!check_ptrace_stopped_lwp_gone (lwp))
3636 throw_exception (ex);
3637 }
3638 END_CATCH
3639 }
3640
3641 struct thread_resume_array
3642 {
3643 struct thread_resume *resume;
3644 size_t n;
3645 };
3646
3647 /* This function is called once per thread via find_inferior.
3648 ARG is a pointer to a thread_resume_array struct.
3649 We look up the thread specified by ENTRY in ARG, and mark the thread
3650 with a pointer to the appropriate resume request.
3651
3652 This algorithm is O(threads * resume elements), but resume elements
3653 is small (and will remain small at least until GDB supports thread
3654 suspension). */
3655
3656 static int
3657 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3658 {
3659 struct thread_info *thread = (struct thread_info *) entry;
3660 struct lwp_info *lwp = get_thread_lwp (thread);
3661 int ndx;
3662 struct thread_resume_array *r;
3663
3664 r = arg;
3665
3666 for (ndx = 0; ndx < r->n; ndx++)
3667 {
3668 ptid_t ptid = r->resume[ndx].thread;
3669 if (ptid_equal (ptid, minus_one_ptid)
3670 || ptid_equal (ptid, entry->id)
3671 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3672 of PID'. */
3673 || (ptid_get_pid (ptid) == pid_of (thread)
3674 && (ptid_is_pid (ptid)
3675 || ptid_get_lwp (ptid) == -1)))
3676 {
3677 if (r->resume[ndx].kind == resume_stop
3678 && thread->last_resume_kind == resume_stop)
3679 {
3680 if (debug_threads)
3681 debug_printf ("already %s LWP %ld at GDB's request\n",
3682 (thread->last_status.kind
3683 == TARGET_WAITKIND_STOPPED)
3684 ? "stopped"
3685 : "stopping",
3686 lwpid_of (thread));
3687
3688 continue;
3689 }
3690
3691 lwp->resume = &r->resume[ndx];
3692 thread->last_resume_kind = lwp->resume->kind;
3693
3694 lwp->step_range_start = lwp->resume->step_range_start;
3695 lwp->step_range_end = lwp->resume->step_range_end;
3696
3697 /* If we had a deferred signal to report, dequeue one now.
3698 This can happen if LWP gets more than one signal while
3699 trying to get out of a jump pad. */
3700 if (lwp->stopped
3701 && !lwp->status_pending_p
3702 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3703 {
3704 lwp->status_pending_p = 1;
3705
3706 if (debug_threads)
3707 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3708 "leaving status pending.\n",
3709 WSTOPSIG (lwp->status_pending),
3710 lwpid_of (thread));
3711 }
3712
3713 return 0;
3714 }
3715 }
3716
3717 /* No resume action for this thread. */
3718 lwp->resume = NULL;
3719
3720 return 0;
3721 }
3722
3723 /* find_inferior callback for linux_resume.
3724 Set *FLAG_P if this lwp has an interesting status pending. */
3725
3726 static int
3727 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3728 {
3729 struct thread_info *thread = (struct thread_info *) entry;
3730 struct lwp_info *lwp = get_thread_lwp (thread);
3731
3732 /* LWPs which will not be resumed are not interesting, because
3733 we might not wait for them next time through linux_wait. */
3734 if (lwp->resume == NULL)
3735 return 0;
3736
3737 if (thread_still_has_status_pending_p (thread))
3738 * (int *) flag_p = 1;
3739
3740 return 0;
3741 }
3742
3743 /* Return 1 if this lwp that GDB wants running is stopped at an
3744 internal breakpoint that we need to step over. It assumes that any
3745 required STOP_PC adjustment has already been propagated to the
3746 inferior's regcache. */
3747
3748 static int
3749 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3750 {
3751 struct thread_info *thread = (struct thread_info *) entry;
3752 struct lwp_info *lwp = get_thread_lwp (thread);
3753 struct thread_info *saved_thread;
3754 CORE_ADDR pc;
3755
3756 /* LWPs which will not be resumed are not interesting, because we
3757 might not wait for them next time through linux_wait. */
3758
3759 if (!lwp->stopped)
3760 {
3761 if (debug_threads)
3762 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3763 lwpid_of (thread));
3764 return 0;
3765 }
3766
3767 if (thread->last_resume_kind == resume_stop)
3768 {
3769 if (debug_threads)
3770 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3771 " stopped\n",
3772 lwpid_of (thread));
3773 return 0;
3774 }
3775
3776 gdb_assert (lwp->suspended >= 0);
3777
3778 if (lwp->suspended)
3779 {
3780 if (debug_threads)
3781 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3782 lwpid_of (thread));
3783 return 0;
3784 }
3785
3786 if (!lwp->need_step_over)
3787 {
3788 if (debug_threads)
3789 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
3790 }
3791
3792 if (lwp->status_pending_p)
3793 {
3794 if (debug_threads)
3795 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3796 " status.\n",
3797 lwpid_of (thread));
3798 return 0;
3799 }
3800
3801 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3802 or we have. */
3803 pc = get_pc (lwp);
3804
3805 /* If the PC has changed since we stopped, then don't do anything,
3806 and let the breakpoint/tracepoint be hit. This happens if, for
3807 instance, GDB handled the decr_pc_after_break subtraction itself,
3808 GDB is OOL stepping this thread, or the user has issued a "jump"
3809 command, or poked thread's registers herself. */
3810 if (pc != lwp->stop_pc)
3811 {
3812 if (debug_threads)
3813 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3814 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3815 lwpid_of (thread),
3816 paddress (lwp->stop_pc), paddress (pc));
3817
3818 lwp->need_step_over = 0;
3819 return 0;
3820 }
3821
3822 saved_thread = current_thread;
3823 current_thread = thread;
3824
3825 /* We can only step over breakpoints we know about. */
3826 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3827 {
3828 /* Don't step over a breakpoint that GDB expects to hit
3829 though. If the condition is being evaluated on the target's side
3830 and it evaluate to false, step over this breakpoint as well. */
3831 if (gdb_breakpoint_here (pc)
3832 && gdb_condition_true_at_breakpoint (pc)
3833 && gdb_no_commands_at_breakpoint (pc))
3834 {
3835 if (debug_threads)
3836 debug_printf ("Need step over [LWP %ld]? yes, but found"
3837 " GDB breakpoint at 0x%s; skipping step over\n",
3838 lwpid_of (thread), paddress (pc));
3839
3840 current_thread = saved_thread;
3841 return 0;
3842 }
3843 else
3844 {
3845 if (debug_threads)
3846 debug_printf ("Need step over [LWP %ld]? yes, "
3847 "found breakpoint at 0x%s\n",
3848 lwpid_of (thread), paddress (pc));
3849
3850 /* We've found an lwp that needs stepping over --- return 1 so
3851 that find_inferior stops looking. */
3852 current_thread = saved_thread;
3853
3854 /* If the step over is cancelled, this is set again. */
3855 lwp->need_step_over = 0;
3856 return 1;
3857 }
3858 }
3859
3860 current_thread = saved_thread;
3861
3862 if (debug_threads)
3863 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3864 " at 0x%s\n",
3865 lwpid_of (thread), paddress (pc));
3866
3867 return 0;
3868 }
3869
3870 /* Start a step-over operation on LWP. When LWP stopped at a
3871 breakpoint, to make progress, we need to remove the breakpoint out
3872 of the way. If we let other threads run while we do that, they may
3873 pass by the breakpoint location and miss hitting it. To avoid
3874 that, a step-over momentarily stops all threads while LWP is
3875 single-stepped while the breakpoint is temporarily uninserted from
3876 the inferior. When the single-step finishes, we reinsert the
3877 breakpoint, and let all threads that are supposed to be running,
3878 run again.
3879
3880 On targets that don't support hardware single-step, we don't
3881 currently support full software single-stepping. Instead, we only
3882 support stepping over the thread event breakpoint, by asking the
3883 low target where to place a reinsert breakpoint. Since this
3884 routine assumes the breakpoint being stepped over is a thread event
3885 breakpoint, it usually assumes the return address of the current
3886 function is a good enough place to set the reinsert breakpoint. */
3887
3888 static int
3889 start_step_over (struct lwp_info *lwp)
3890 {
3891 struct thread_info *thread = get_lwp_thread (lwp);
3892 struct thread_info *saved_thread;
3893 CORE_ADDR pc;
3894 int step;
3895
3896 if (debug_threads)
3897 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3898 lwpid_of (thread));
3899
3900 stop_all_lwps (1, lwp);
3901 gdb_assert (lwp->suspended == 0);
3902
3903 if (debug_threads)
3904 debug_printf ("Done stopping all threads for step-over.\n");
3905
3906 /* Note, we should always reach here with an already adjusted PC,
3907 either by GDB (if we're resuming due to GDB's request), or by our
3908 caller, if we just finished handling an internal breakpoint GDB
3909 shouldn't care about. */
3910 pc = get_pc (lwp);
3911
3912 saved_thread = current_thread;
3913 current_thread = thread;
3914
3915 lwp->bp_reinsert = pc;
3916 uninsert_breakpoints_at (pc);
3917 uninsert_fast_tracepoint_jumps_at (pc);
3918
3919 if (can_hardware_single_step ())
3920 {
3921 step = 1;
3922 }
3923 else
3924 {
3925 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3926 set_reinsert_breakpoint (raddr);
3927 step = 0;
3928 }
3929
3930 current_thread = saved_thread;
3931
3932 linux_resume_one_lwp (lwp, step, 0, NULL);
3933
3934 /* Require next event from this LWP. */
3935 step_over_bkpt = thread->entry.id;
3936 return 1;
3937 }
3938
3939 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3940 start_step_over, if still there, and delete any reinsert
3941 breakpoints we've set, on non hardware single-step targets. */
3942
3943 static int
3944 finish_step_over (struct lwp_info *lwp)
3945 {
3946 if (lwp->bp_reinsert != 0)
3947 {
3948 if (debug_threads)
3949 debug_printf ("Finished step over.\n");
3950
3951 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3952 may be no breakpoint to reinsert there by now. */
3953 reinsert_breakpoints_at (lwp->bp_reinsert);
3954 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3955
3956 lwp->bp_reinsert = 0;
3957
3958 /* Delete any software-single-step reinsert breakpoints. No
3959 longer needed. We don't have to worry about other threads
3960 hitting this trap, and later not being able to explain it,
3961 because we were stepping over a breakpoint, and we hold all
3962 threads but LWP stopped while doing that. */
3963 if (!can_hardware_single_step ())
3964 delete_reinsert_breakpoints ();
3965
3966 step_over_bkpt = null_ptid;
3967 return 1;
3968 }
3969 else
3970 return 0;
3971 }
3972
3973 /* This function is called once per thread. We check the thread's resume
3974 request, which will tell us whether to resume, step, or leave the thread
3975 stopped; and what signal, if any, it should be sent.
3976
3977 For threads which we aren't explicitly told otherwise, we preserve
3978 the stepping flag; this is used for stepping over gdbserver-placed
3979 breakpoints.
3980
3981 If pending_flags was set in any thread, we queue any needed
3982 signals, since we won't actually resume. We already have a pending
3983 event to report, so we don't need to preserve any step requests;
3984 they should be re-issued if necessary. */
3985
3986 static int
3987 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3988 {
3989 struct thread_info *thread = (struct thread_info *) entry;
3990 struct lwp_info *lwp = get_thread_lwp (thread);
3991 int step;
3992 int leave_all_stopped = * (int *) arg;
3993 int leave_pending;
3994
3995 if (lwp->resume == NULL)
3996 return 0;
3997
3998 if (lwp->resume->kind == resume_stop)
3999 {
4000 if (debug_threads)
4001 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4002
4003 if (!lwp->stopped)
4004 {
4005 if (debug_threads)
4006 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4007
4008 /* Stop the thread, and wait for the event asynchronously,
4009 through the event loop. */
4010 send_sigstop (lwp);
4011 }
4012 else
4013 {
4014 if (debug_threads)
4015 debug_printf ("already stopped LWP %ld\n",
4016 lwpid_of (thread));
4017
4018 /* The LWP may have been stopped in an internal event that
4019 was not meant to be notified back to GDB (e.g., gdbserver
4020 breakpoint), so we should be reporting a stop event in
4021 this case too. */
4022
4023 /* If the thread already has a pending SIGSTOP, this is a
4024 no-op. Otherwise, something later will presumably resume
4025 the thread and this will cause it to cancel any pending
4026 operation, due to last_resume_kind == resume_stop. If
4027 the thread already has a pending status to report, we
4028 will still report it the next time we wait - see
4029 status_pending_p_callback. */
4030
4031 /* If we already have a pending signal to report, then
4032 there's no need to queue a SIGSTOP, as this means we're
4033 midway through moving the LWP out of the jumppad, and we
4034 will report the pending signal as soon as that is
4035 finished. */
4036 if (lwp->pending_signals_to_report == NULL)
4037 send_sigstop (lwp);
4038 }
4039
4040 /* For stop requests, we're done. */
4041 lwp->resume = NULL;
4042 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4043 return 0;
4044 }
4045
4046 /* If this thread which is about to be resumed has a pending status,
4047 then don't resume any threads - we can just report the pending
4048 status. Make sure to queue any signals that would otherwise be
4049 sent. In all-stop mode, we do this decision based on if *any*
4050 thread has a pending status. If there's a thread that needs the
4051 step-over-breakpoint dance, then don't resume any other thread
4052 but that particular one. */
4053 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4054
4055 if (!leave_pending)
4056 {
4057 if (debug_threads)
4058 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4059
4060 step = (lwp->resume->kind == resume_step);
4061 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4062 }
4063 else
4064 {
4065 if (debug_threads)
4066 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4067
4068 /* If we have a new signal, enqueue the signal. */
4069 if (lwp->resume->sig != 0)
4070 {
4071 struct pending_signals *p_sig;
4072 p_sig = xmalloc (sizeof (*p_sig));
4073 p_sig->prev = lwp->pending_signals;
4074 p_sig->signal = lwp->resume->sig;
4075 memset (&p_sig->info, 0, sizeof (siginfo_t));
4076
4077 /* If this is the same signal we were previously stopped by,
4078 make sure to queue its siginfo. We can ignore the return
4079 value of ptrace; if it fails, we'll skip
4080 PTRACE_SETSIGINFO. */
4081 if (WIFSTOPPED (lwp->last_status)
4082 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4083 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4084 &p_sig->info);
4085
4086 lwp->pending_signals = p_sig;
4087 }
4088 }
4089
4090 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4091 lwp->resume = NULL;
4092 return 0;
4093 }
4094
4095 static void
4096 linux_resume (struct thread_resume *resume_info, size_t n)
4097 {
4098 struct thread_resume_array array = { resume_info, n };
4099 struct thread_info *need_step_over = NULL;
4100 int any_pending;
4101 int leave_all_stopped;
4102
4103 if (debug_threads)
4104 {
4105 debug_enter ();
4106 debug_printf ("linux_resume:\n");
4107 }
4108
4109 find_inferior (&all_threads, linux_set_resume_request, &array);
4110
4111 /* If there is a thread which would otherwise be resumed, which has
4112 a pending status, then don't resume any threads - we can just
4113 report the pending status. Make sure to queue any signals that
4114 would otherwise be sent. In non-stop mode, we'll apply this
4115 logic to each thread individually. We consume all pending events
4116 before considering to start a step-over (in all-stop). */
4117 any_pending = 0;
4118 if (!non_stop)
4119 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4120
4121 /* If there is a thread which would otherwise be resumed, which is
4122 stopped at a breakpoint that needs stepping over, then don't
4123 resume any threads - have it step over the breakpoint with all
4124 other threads stopped, then resume all threads again. Make sure
4125 to queue any signals that would otherwise be delivered or
4126 queued. */
4127 if (!any_pending && supports_breakpoints ())
4128 need_step_over
4129 = (struct thread_info *) find_inferior (&all_threads,
4130 need_step_over_p, NULL);
4131
4132 leave_all_stopped = (need_step_over != NULL || any_pending);
4133
4134 if (debug_threads)
4135 {
4136 if (need_step_over != NULL)
4137 debug_printf ("Not resuming all, need step over\n");
4138 else if (any_pending)
4139 debug_printf ("Not resuming, all-stop and found "
4140 "an LWP with pending status\n");
4141 else
4142 debug_printf ("Resuming, no pending status or step over needed\n");
4143 }
4144
4145 /* Even if we're leaving threads stopped, queue all signals we'd
4146 otherwise deliver. */
4147 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4148
4149 if (need_step_over)
4150 start_step_over (get_thread_lwp (need_step_over));
4151
4152 if (debug_threads)
4153 {
4154 debug_printf ("linux_resume done\n");
4155 debug_exit ();
4156 }
4157 }
4158
4159 /* This function is called once per thread. We check the thread's
4160 last resume request, which will tell us whether to resume, step, or
4161 leave the thread stopped. Any signal the client requested to be
4162 delivered has already been enqueued at this point.
4163
4164 If any thread that GDB wants running is stopped at an internal
4165 breakpoint that needs stepping over, we start a step-over operation
4166 on that particular thread, and leave all others stopped. */
4167
4168 static int
4169 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4170 {
4171 struct thread_info *thread = (struct thread_info *) entry;
4172 struct lwp_info *lwp = get_thread_lwp (thread);
4173 int step;
4174
4175 if (lwp == except)
4176 return 0;
4177
4178 if (debug_threads)
4179 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4180
4181 if (!lwp->stopped)
4182 {
4183 if (debug_threads)
4184 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4185 return 0;
4186 }
4187
4188 if (thread->last_resume_kind == resume_stop
4189 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4190 {
4191 if (debug_threads)
4192 debug_printf (" client wants LWP to remain %ld stopped\n",
4193 lwpid_of (thread));
4194 return 0;
4195 }
4196
4197 if (lwp->status_pending_p)
4198 {
4199 if (debug_threads)
4200 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4201 lwpid_of (thread));
4202 return 0;
4203 }
4204
4205 gdb_assert (lwp->suspended >= 0);
4206
4207 if (lwp->suspended)
4208 {
4209 if (debug_threads)
4210 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4211 return 0;
4212 }
4213
4214 if (thread->last_resume_kind == resume_stop
4215 && lwp->pending_signals_to_report == NULL
4216 && lwp->collecting_fast_tracepoint == 0)
4217 {
4218 /* We haven't reported this LWP as stopped yet (otherwise, the
4219 last_status.kind check above would catch it, and we wouldn't
4220 reach here. This LWP may have been momentarily paused by a
4221 stop_all_lwps call while handling for example, another LWP's
4222 step-over. In that case, the pending expected SIGSTOP signal
4223 that was queued at vCont;t handling time will have already
4224 been consumed by wait_for_sigstop, and so we need to requeue
4225 another one here. Note that if the LWP already has a SIGSTOP
4226 pending, this is a no-op. */
4227
4228 if (debug_threads)
4229 debug_printf ("Client wants LWP %ld to stop. "
4230 "Making sure it has a SIGSTOP pending\n",
4231 lwpid_of (thread));
4232
4233 send_sigstop (lwp);
4234 }
4235
4236 step = thread->last_resume_kind == resume_step;
4237 linux_resume_one_lwp (lwp, step, 0, NULL);
4238 return 0;
4239 }
4240
4241 static int
4242 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4243 {
4244 struct thread_info *thread = (struct thread_info *) entry;
4245 struct lwp_info *lwp = get_thread_lwp (thread);
4246
4247 if (lwp == except)
4248 return 0;
4249
4250 lwp->suspended--;
4251 gdb_assert (lwp->suspended >= 0);
4252
4253 return proceed_one_lwp (entry, except);
4254 }
4255
4256 /* When we finish a step-over, set threads running again. If there's
4257 another thread that may need a step-over, now's the time to start
4258 it. Eventually, we'll move all threads past their breakpoints. */
4259
4260 static void
4261 proceed_all_lwps (void)
4262 {
4263 struct thread_info *need_step_over;
4264
4265 /* If there is a thread which would otherwise be resumed, which is
4266 stopped at a breakpoint that needs stepping over, then don't
4267 resume any threads - have it step over the breakpoint with all
4268 other threads stopped, then resume all threads again. */
4269
4270 if (supports_breakpoints ())
4271 {
4272 need_step_over
4273 = (struct thread_info *) find_inferior (&all_threads,
4274 need_step_over_p, NULL);
4275
4276 if (need_step_over != NULL)
4277 {
4278 if (debug_threads)
4279 debug_printf ("proceed_all_lwps: found "
4280 "thread %ld needing a step-over\n",
4281 lwpid_of (need_step_over));
4282
4283 start_step_over (get_thread_lwp (need_step_over));
4284 return;
4285 }
4286 }
4287
4288 if (debug_threads)
4289 debug_printf ("Proceeding, no step-over needed\n");
4290
4291 find_inferior (&all_threads, proceed_one_lwp, NULL);
4292 }
4293
4294 /* Stopped LWPs that the client wanted to be running, that don't have
4295 pending statuses, are set to run again, except for EXCEPT, if not
4296 NULL. This undoes a stop_all_lwps call. */
4297
4298 static void
4299 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4300 {
4301 if (debug_threads)
4302 {
4303 debug_enter ();
4304 if (except)
4305 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4306 lwpid_of (get_lwp_thread (except)));
4307 else
4308 debug_printf ("unstopping all lwps\n");
4309 }
4310
4311 if (unsuspend)
4312 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4313 else
4314 find_inferior (&all_threads, proceed_one_lwp, except);
4315
4316 if (debug_threads)
4317 {
4318 debug_printf ("unstop_all_lwps done\n");
4319 debug_exit ();
4320 }
4321 }
4322
4323
4324 #ifdef HAVE_LINUX_REGSETS
4325
4326 #define use_linux_regsets 1
4327
4328 /* Returns true if REGSET has been disabled. */
4329
4330 static int
4331 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4332 {
4333 return (info->disabled_regsets != NULL
4334 && info->disabled_regsets[regset - info->regsets]);
4335 }
4336
4337 /* Disable REGSET. */
4338
4339 static void
4340 disable_regset (struct regsets_info *info, struct regset_info *regset)
4341 {
4342 int dr_offset;
4343
4344 dr_offset = regset - info->regsets;
4345 if (info->disabled_regsets == NULL)
4346 info->disabled_regsets = xcalloc (1, info->num_regsets);
4347 info->disabled_regsets[dr_offset] = 1;
4348 }
4349
4350 static int
4351 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4352 struct regcache *regcache)
4353 {
4354 struct regset_info *regset;
4355 int saw_general_regs = 0;
4356 int pid;
4357 struct iovec iov;
4358
4359 pid = lwpid_of (current_thread);
4360 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4361 {
4362 void *buf, *data;
4363 int nt_type, res;
4364
4365 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4366 continue;
4367
4368 buf = xmalloc (regset->size);
4369
4370 nt_type = regset->nt_type;
4371 if (nt_type)
4372 {
4373 iov.iov_base = buf;
4374 iov.iov_len = regset->size;
4375 data = (void *) &iov;
4376 }
4377 else
4378 data = buf;
4379
4380 #ifndef __sparc__
4381 res = ptrace (regset->get_request, pid,
4382 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4383 #else
4384 res = ptrace (regset->get_request, pid, data, nt_type);
4385 #endif
4386 if (res < 0)
4387 {
4388 if (errno == EIO)
4389 {
4390 /* If we get EIO on a regset, do not try it again for
4391 this process mode. */
4392 disable_regset (regsets_info, regset);
4393 }
4394 else if (errno == ENODATA)
4395 {
4396 /* ENODATA may be returned if the regset is currently
4397 not "active". This can happen in normal operation,
4398 so suppress the warning in this case. */
4399 }
4400 else
4401 {
4402 char s[256];
4403 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4404 pid);
4405 perror (s);
4406 }
4407 }
4408 else
4409 {
4410 if (regset->type == GENERAL_REGS)
4411 saw_general_regs = 1;
4412 regset->store_function (regcache, buf);
4413 }
4414 free (buf);
4415 }
4416 if (saw_general_regs)
4417 return 0;
4418 else
4419 return 1;
4420 }
4421
4422 static int
4423 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4424 struct regcache *regcache)
4425 {
4426 struct regset_info *regset;
4427 int saw_general_regs = 0;
4428 int pid;
4429 struct iovec iov;
4430
4431 pid = lwpid_of (current_thread);
4432 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4433 {
4434 void *buf, *data;
4435 int nt_type, res;
4436
4437 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4438 || regset->fill_function == NULL)
4439 continue;
4440
4441 buf = xmalloc (regset->size);
4442
4443 /* First fill the buffer with the current register set contents,
4444 in case there are any items in the kernel's regset that are
4445 not in gdbserver's regcache. */
4446
4447 nt_type = regset->nt_type;
4448 if (nt_type)
4449 {
4450 iov.iov_base = buf;
4451 iov.iov_len = regset->size;
4452 data = (void *) &iov;
4453 }
4454 else
4455 data = buf;
4456
4457 #ifndef __sparc__
4458 res = ptrace (regset->get_request, pid,
4459 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4460 #else
4461 res = ptrace (regset->get_request, pid, data, nt_type);
4462 #endif
4463
4464 if (res == 0)
4465 {
4466 /* Then overlay our cached registers on that. */
4467 regset->fill_function (regcache, buf);
4468
4469 /* Only now do we write the register set. */
4470 #ifndef __sparc__
4471 res = ptrace (regset->set_request, pid,
4472 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4473 #else
4474 res = ptrace (regset->set_request, pid, data, nt_type);
4475 #endif
4476 }
4477
4478 if (res < 0)
4479 {
4480 if (errno == EIO)
4481 {
4482 /* If we get EIO on a regset, do not try it again for
4483 this process mode. */
4484 disable_regset (regsets_info, regset);
4485 }
4486 else if (errno == ESRCH)
4487 {
4488 /* At this point, ESRCH should mean the process is
4489 already gone, in which case we simply ignore attempts
4490 to change its registers. See also the related
4491 comment in linux_resume_one_lwp. */
4492 free (buf);
4493 return 0;
4494 }
4495 else
4496 {
4497 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4498 }
4499 }
4500 else if (regset->type == GENERAL_REGS)
4501 saw_general_regs = 1;
4502 free (buf);
4503 }
4504 if (saw_general_regs)
4505 return 0;
4506 else
4507 return 1;
4508 }
4509
4510 #else /* !HAVE_LINUX_REGSETS */
4511
4512 #define use_linux_regsets 0
4513 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4514 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4515
4516 #endif
4517
4518 /* Return 1 if register REGNO is supported by one of the regset ptrace
4519 calls or 0 if it has to be transferred individually. */
4520
4521 static int
4522 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4523 {
4524 unsigned char mask = 1 << (regno % 8);
4525 size_t index = regno / 8;
4526
4527 return (use_linux_regsets
4528 && (regs_info->regset_bitmap == NULL
4529 || (regs_info->regset_bitmap[index] & mask) != 0));
4530 }
4531
4532 #ifdef HAVE_LINUX_USRREGS
4533
4534 int
4535 register_addr (const struct usrregs_info *usrregs, int regnum)
4536 {
4537 int addr;
4538
4539 if (regnum < 0 || regnum >= usrregs->num_regs)
4540 error ("Invalid register number %d.", regnum);
4541
4542 addr = usrregs->regmap[regnum];
4543
4544 return addr;
4545 }
4546
4547 /* Fetch one register. */
4548 static void
4549 fetch_register (const struct usrregs_info *usrregs,
4550 struct regcache *regcache, int regno)
4551 {
4552 CORE_ADDR regaddr;
4553 int i, size;
4554 char *buf;
4555 int pid;
4556
4557 if (regno >= usrregs->num_regs)
4558 return;
4559 if ((*the_low_target.cannot_fetch_register) (regno))
4560 return;
4561
4562 regaddr = register_addr (usrregs, regno);
4563 if (regaddr == -1)
4564 return;
4565
4566 size = ((register_size (regcache->tdesc, regno)
4567 + sizeof (PTRACE_XFER_TYPE) - 1)
4568 & -sizeof (PTRACE_XFER_TYPE));
4569 buf = alloca (size);
4570
4571 pid = lwpid_of (current_thread);
4572 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4573 {
4574 errno = 0;
4575 *(PTRACE_XFER_TYPE *) (buf + i) =
4576 ptrace (PTRACE_PEEKUSER, pid,
4577 /* Coerce to a uintptr_t first to avoid potential gcc warning
4578 of coercing an 8 byte integer to a 4 byte pointer. */
4579 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4580 regaddr += sizeof (PTRACE_XFER_TYPE);
4581 if (errno != 0)
4582 error ("reading register %d: %s", regno, strerror (errno));
4583 }
4584
4585 if (the_low_target.supply_ptrace_register)
4586 the_low_target.supply_ptrace_register (regcache, regno, buf);
4587 else
4588 supply_register (regcache, regno, buf);
4589 }
4590
4591 /* Store one register. */
4592 static void
4593 store_register (const struct usrregs_info *usrregs,
4594 struct regcache *regcache, int regno)
4595 {
4596 CORE_ADDR regaddr;
4597 int i, size;
4598 char *buf;
4599 int pid;
4600
4601 if (regno >= usrregs->num_regs)
4602 return;
4603 if ((*the_low_target.cannot_store_register) (regno))
4604 return;
4605
4606 regaddr = register_addr (usrregs, regno);
4607 if (regaddr == -1)
4608 return;
4609
4610 size = ((register_size (regcache->tdesc, regno)
4611 + sizeof (PTRACE_XFER_TYPE) - 1)
4612 & -sizeof (PTRACE_XFER_TYPE));
4613 buf = alloca (size);
4614 memset (buf, 0, size);
4615
4616 if (the_low_target.collect_ptrace_register)
4617 the_low_target.collect_ptrace_register (regcache, regno, buf);
4618 else
4619 collect_register (regcache, regno, buf);
4620
4621 pid = lwpid_of (current_thread);
4622 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4623 {
4624 errno = 0;
4625 ptrace (PTRACE_POKEUSER, pid,
4626 /* Coerce to a uintptr_t first to avoid potential gcc warning
4627 about coercing an 8 byte integer to a 4 byte pointer. */
4628 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4629 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4630 if (errno != 0)
4631 {
4632 /* At this point, ESRCH should mean the process is
4633 already gone, in which case we simply ignore attempts
4634 to change its registers. See also the related
4635 comment in linux_resume_one_lwp. */
4636 if (errno == ESRCH)
4637 return;
4638
4639 if ((*the_low_target.cannot_store_register) (regno) == 0)
4640 error ("writing register %d: %s", regno, strerror (errno));
4641 }
4642 regaddr += sizeof (PTRACE_XFER_TYPE);
4643 }
4644 }
4645
4646 /* Fetch all registers, or just one, from the child process.
4647 If REGNO is -1, do this for all registers, skipping any that are
4648 assumed to have been retrieved by regsets_fetch_inferior_registers,
4649 unless ALL is non-zero.
4650 Otherwise, REGNO specifies which register (so we can save time). */
4651 static void
4652 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4653 struct regcache *regcache, int regno, int all)
4654 {
4655 struct usrregs_info *usr = regs_info->usrregs;
4656
4657 if (regno == -1)
4658 {
4659 for (regno = 0; regno < usr->num_regs; regno++)
4660 if (all || !linux_register_in_regsets (regs_info, regno))
4661 fetch_register (usr, regcache, regno);
4662 }
4663 else
4664 fetch_register (usr, regcache, regno);
4665 }
4666
4667 /* Store our register values back into the inferior.
4668 If REGNO is -1, do this for all registers, skipping any that are
4669 assumed to have been saved by regsets_store_inferior_registers,
4670 unless ALL is non-zero.
4671 Otherwise, REGNO specifies which register (so we can save time). */
4672 static void
4673 usr_store_inferior_registers (const struct regs_info *regs_info,
4674 struct regcache *regcache, int regno, int all)
4675 {
4676 struct usrregs_info *usr = regs_info->usrregs;
4677
4678 if (regno == -1)
4679 {
4680 for (regno = 0; regno < usr->num_regs; regno++)
4681 if (all || !linux_register_in_regsets (regs_info, regno))
4682 store_register (usr, regcache, regno);
4683 }
4684 else
4685 store_register (usr, regcache, regno);
4686 }
4687
4688 #else /* !HAVE_LINUX_USRREGS */
4689
4690 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4691 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4692
4693 #endif
4694
4695
4696 void
4697 linux_fetch_registers (struct regcache *regcache, int regno)
4698 {
4699 int use_regsets;
4700 int all = 0;
4701 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4702
4703 if (regno == -1)
4704 {
4705 if (the_low_target.fetch_register != NULL
4706 && regs_info->usrregs != NULL)
4707 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4708 (*the_low_target.fetch_register) (regcache, regno);
4709
4710 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4711 if (regs_info->usrregs != NULL)
4712 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4713 }
4714 else
4715 {
4716 if (the_low_target.fetch_register != NULL
4717 && (*the_low_target.fetch_register) (regcache, regno))
4718 return;
4719
4720 use_regsets = linux_register_in_regsets (regs_info, regno);
4721 if (use_regsets)
4722 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4723 regcache);
4724 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4725 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4726 }
4727 }
4728
4729 void
4730 linux_store_registers (struct regcache *regcache, int regno)
4731 {
4732 int use_regsets;
4733 int all = 0;
4734 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4735
4736 if (regno == -1)
4737 {
4738 all = regsets_store_inferior_registers (regs_info->regsets_info,
4739 regcache);
4740 if (regs_info->usrregs != NULL)
4741 usr_store_inferior_registers (regs_info, regcache, regno, all);
4742 }
4743 else
4744 {
4745 use_regsets = linux_register_in_regsets (regs_info, regno);
4746 if (use_regsets)
4747 all = regsets_store_inferior_registers (regs_info->regsets_info,
4748 regcache);
4749 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4750 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4751 }
4752 }
4753
4754
4755 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4756 to debugger memory starting at MYADDR. */
4757
4758 static int
4759 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4760 {
4761 int pid = lwpid_of (current_thread);
4762 register PTRACE_XFER_TYPE *buffer;
4763 register CORE_ADDR addr;
4764 register int count;
4765 char filename[64];
4766 register int i;
4767 int ret;
4768 int fd;
4769
4770 /* Try using /proc. Don't bother for one word. */
4771 if (len >= 3 * sizeof (long))
4772 {
4773 int bytes;
4774
4775 /* We could keep this file open and cache it - possibly one per
4776 thread. That requires some juggling, but is even faster. */
4777 sprintf (filename, "/proc/%d/mem", pid);
4778 fd = open (filename, O_RDONLY | O_LARGEFILE);
4779 if (fd == -1)
4780 goto no_proc;
4781
4782 /* If pread64 is available, use it. It's faster if the kernel
4783 supports it (only one syscall), and it's 64-bit safe even on
4784 32-bit platforms (for instance, SPARC debugging a SPARC64
4785 application). */
4786 #ifdef HAVE_PREAD64
4787 bytes = pread64 (fd, myaddr, len, memaddr);
4788 #else
4789 bytes = -1;
4790 if (lseek (fd, memaddr, SEEK_SET) != -1)
4791 bytes = read (fd, myaddr, len);
4792 #endif
4793
4794 close (fd);
4795 if (bytes == len)
4796 return 0;
4797
4798 /* Some data was read, we'll try to get the rest with ptrace. */
4799 if (bytes > 0)
4800 {
4801 memaddr += bytes;
4802 myaddr += bytes;
4803 len -= bytes;
4804 }
4805 }
4806
4807 no_proc:
4808 /* Round starting address down to longword boundary. */
4809 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4810 /* Round ending address up; get number of longwords that makes. */
4811 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4812 / sizeof (PTRACE_XFER_TYPE));
4813 /* Allocate buffer of that many longwords. */
4814 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4815
4816 /* Read all the longwords */
4817 errno = 0;
4818 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4819 {
4820 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4821 about coercing an 8 byte integer to a 4 byte pointer. */
4822 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4823 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4824 (PTRACE_TYPE_ARG4) 0);
4825 if (errno)
4826 break;
4827 }
4828 ret = errno;
4829
4830 /* Copy appropriate bytes out of the buffer. */
4831 if (i > 0)
4832 {
4833 i *= sizeof (PTRACE_XFER_TYPE);
4834 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4835 memcpy (myaddr,
4836 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4837 i < len ? i : len);
4838 }
4839
4840 return ret;
4841 }
4842
4843 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4844 memory at MEMADDR. On failure (cannot write to the inferior)
4845 returns the value of errno. Always succeeds if LEN is zero. */
4846
4847 static int
4848 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4849 {
4850 register int i;
4851 /* Round starting address down to longword boundary. */
4852 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4853 /* Round ending address up; get number of longwords that makes. */
4854 register int count
4855 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4856 / sizeof (PTRACE_XFER_TYPE);
4857
4858 /* Allocate buffer of that many longwords. */
4859 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4860 alloca (count * sizeof (PTRACE_XFER_TYPE));
4861
4862 int pid = lwpid_of (current_thread);
4863
4864 if (len == 0)
4865 {
4866 /* Zero length write always succeeds. */
4867 return 0;
4868 }
4869
4870 if (debug_threads)
4871 {
4872 /* Dump up to four bytes. */
4873 unsigned int val = * (unsigned int *) myaddr;
4874 if (len == 1)
4875 val = val & 0xff;
4876 else if (len == 2)
4877 val = val & 0xffff;
4878 else if (len == 3)
4879 val = val & 0xffffff;
4880 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4881 val, (long)memaddr);
4882 }
4883
4884 /* Fill start and end extra bytes of buffer with existing memory data. */
4885
4886 errno = 0;
4887 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4888 about coercing an 8 byte integer to a 4 byte pointer. */
4889 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4890 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4891 (PTRACE_TYPE_ARG4) 0);
4892 if (errno)
4893 return errno;
4894
4895 if (count > 1)
4896 {
4897 errno = 0;
4898 buffer[count - 1]
4899 = ptrace (PTRACE_PEEKTEXT, pid,
4900 /* Coerce to a uintptr_t first to avoid potential gcc warning
4901 about coercing an 8 byte integer to a 4 byte pointer. */
4902 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4903 * sizeof (PTRACE_XFER_TYPE)),
4904 (PTRACE_TYPE_ARG4) 0);
4905 if (errno)
4906 return errno;
4907 }
4908
4909 /* Copy data to be written over corresponding part of buffer. */
4910
4911 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4912 myaddr, len);
4913
4914 /* Write the entire buffer. */
4915
4916 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4917 {
4918 errno = 0;
4919 ptrace (PTRACE_POKETEXT, pid,
4920 /* Coerce to a uintptr_t first to avoid potential gcc warning
4921 about coercing an 8 byte integer to a 4 byte pointer. */
4922 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4923 (PTRACE_TYPE_ARG4) buffer[i]);
4924 if (errno)
4925 return errno;
4926 }
4927
4928 return 0;
4929 }
4930
4931 static void
4932 linux_look_up_symbols (void)
4933 {
4934 #ifdef USE_THREAD_DB
4935 struct process_info *proc = current_process ();
4936
4937 if (proc->priv->thread_db != NULL)
4938 return;
4939
4940 /* If the kernel supports tracing clones, then we don't need to
4941 use the magic thread event breakpoint to learn about
4942 threads. */
4943 thread_db_init (!linux_supports_traceclone ());
4944 #endif
4945 }
4946
4947 static void
4948 linux_request_interrupt (void)
4949 {
4950 extern unsigned long signal_pid;
4951
4952 /* Send a SIGINT to the process group. This acts just like the user
4953 typed a ^C on the controlling terminal. */
4954 kill (-signal_pid, SIGINT);
4955 }
4956
4957 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4958 to debugger memory starting at MYADDR. */
4959
4960 static int
4961 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4962 {
4963 char filename[PATH_MAX];
4964 int fd, n;
4965 int pid = lwpid_of (current_thread);
4966
4967 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4968
4969 fd = open (filename, O_RDONLY);
4970 if (fd < 0)
4971 return -1;
4972
4973 if (offset != (CORE_ADDR) 0
4974 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4975 n = -1;
4976 else
4977 n = read (fd, myaddr, len);
4978
4979 close (fd);
4980
4981 return n;
4982 }
4983
4984 /* These breakpoint and watchpoint related wrapper functions simply
4985 pass on the function call if the target has registered a
4986 corresponding function. */
4987
4988 static int
4989 linux_supports_z_point_type (char z_type)
4990 {
4991 return (the_low_target.supports_z_point_type != NULL
4992 && the_low_target.supports_z_point_type (z_type));
4993 }
4994
4995 static int
4996 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
4997 int size, struct raw_breakpoint *bp)
4998 {
4999 if (the_low_target.insert_point != NULL)
5000 return the_low_target.insert_point (type, addr, size, bp);
5001 else
5002 /* Unsupported (see target.h). */
5003 return 1;
5004 }
5005
5006 static int
5007 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5008 int size, struct raw_breakpoint *bp)
5009 {
5010 if (the_low_target.remove_point != NULL)
5011 return the_low_target.remove_point (type, addr, size, bp);
5012 else
5013 /* Unsupported (see target.h). */
5014 return 1;
5015 }
5016
5017 /* Implement the to_stopped_by_sw_breakpoint target_ops
5018 method. */
5019
5020 static int
5021 linux_stopped_by_sw_breakpoint (void)
5022 {
5023 struct lwp_info *lwp = get_thread_lwp (current_thread);
5024
5025 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5026 }
5027
5028 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5029 method. */
5030
5031 static int
5032 linux_supports_stopped_by_sw_breakpoint (void)
5033 {
5034 return USE_SIGTRAP_SIGINFO;
5035 }
5036
5037 /* Implement the to_stopped_by_hw_breakpoint target_ops
5038 method. */
5039
5040 static int
5041 linux_stopped_by_hw_breakpoint (void)
5042 {
5043 struct lwp_info *lwp = get_thread_lwp (current_thread);
5044
5045 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5046 }
5047
5048 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5049 method. */
5050
5051 static int
5052 linux_supports_stopped_by_hw_breakpoint (void)
5053 {
5054 return USE_SIGTRAP_SIGINFO;
5055 }
5056
5057 static int
5058 linux_stopped_by_watchpoint (void)
5059 {
5060 struct lwp_info *lwp = get_thread_lwp (current_thread);
5061
5062 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5063 }
5064
5065 static CORE_ADDR
5066 linux_stopped_data_address (void)
5067 {
5068 struct lwp_info *lwp = get_thread_lwp (current_thread);
5069
5070 return lwp->stopped_data_address;
5071 }
5072
5073 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5074 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5075 && defined(PT_TEXT_END_ADDR)
5076
5077 /* This is only used for targets that define PT_TEXT_ADDR,
5078 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5079 the target has different ways of acquiring this information, like
5080 loadmaps. */
5081
5082 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5083 to tell gdb about. */
5084
5085 static int
5086 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5087 {
5088 unsigned long text, text_end, data;
5089 int pid = lwpid_of (get_thread_lwp (current_thread));
5090
5091 errno = 0;
5092
5093 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5094 (PTRACE_TYPE_ARG4) 0);
5095 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5096 (PTRACE_TYPE_ARG4) 0);
5097 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5098 (PTRACE_TYPE_ARG4) 0);
5099
5100 if (errno == 0)
5101 {
5102 /* Both text and data offsets produced at compile-time (and so
5103 used by gdb) are relative to the beginning of the program,
5104 with the data segment immediately following the text segment.
5105 However, the actual runtime layout in memory may put the data
5106 somewhere else, so when we send gdb a data base-address, we
5107 use the real data base address and subtract the compile-time
5108 data base-address from it (which is just the length of the
5109 text segment). BSS immediately follows data in both
5110 cases. */
5111 *text_p = text;
5112 *data_p = data - (text_end - text);
5113
5114 return 1;
5115 }
5116 return 0;
5117 }
5118 #endif
5119
5120 static int
5121 linux_qxfer_osdata (const char *annex,
5122 unsigned char *readbuf, unsigned const char *writebuf,
5123 CORE_ADDR offset, int len)
5124 {
5125 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5126 }
5127
5128 /* Convert a native/host siginfo object, into/from the siginfo in the
5129 layout of the inferiors' architecture. */
5130
5131 static void
5132 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5133 {
5134 int done = 0;
5135
5136 if (the_low_target.siginfo_fixup != NULL)
5137 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5138
5139 /* If there was no callback, or the callback didn't do anything,
5140 then just do a straight memcpy. */
5141 if (!done)
5142 {
5143 if (direction == 1)
5144 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5145 else
5146 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5147 }
5148 }
5149
5150 static int
5151 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5152 unsigned const char *writebuf, CORE_ADDR offset, int len)
5153 {
5154 int pid;
5155 siginfo_t siginfo;
5156 char inf_siginfo[sizeof (siginfo_t)];
5157
5158 if (current_thread == NULL)
5159 return -1;
5160
5161 pid = lwpid_of (current_thread);
5162
5163 if (debug_threads)
5164 debug_printf ("%s siginfo for lwp %d.\n",
5165 readbuf != NULL ? "Reading" : "Writing",
5166 pid);
5167
5168 if (offset >= sizeof (siginfo))
5169 return -1;
5170
5171 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5172 return -1;
5173
5174 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5175 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5176 inferior with a 64-bit GDBSERVER should look the same as debugging it
5177 with a 32-bit GDBSERVER, we need to convert it. */
5178 siginfo_fixup (&siginfo, inf_siginfo, 0);
5179
5180 if (offset + len > sizeof (siginfo))
5181 len = sizeof (siginfo) - offset;
5182
5183 if (readbuf != NULL)
5184 memcpy (readbuf, inf_siginfo + offset, len);
5185 else
5186 {
5187 memcpy (inf_siginfo + offset, writebuf, len);
5188
5189 /* Convert back to ptrace layout before flushing it out. */
5190 siginfo_fixup (&siginfo, inf_siginfo, 1);
5191
5192 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5193 return -1;
5194 }
5195
5196 return len;
5197 }
5198
5199 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5200 so we notice when children change state; as the handler for the
5201 sigsuspend in my_waitpid. */
5202
5203 static void
5204 sigchld_handler (int signo)
5205 {
5206 int old_errno = errno;
5207
5208 if (debug_threads)
5209 {
5210 do
5211 {
5212 /* fprintf is not async-signal-safe, so call write
5213 directly. */
5214 if (write (2, "sigchld_handler\n",
5215 sizeof ("sigchld_handler\n") - 1) < 0)
5216 break; /* just ignore */
5217 } while (0);
5218 }
5219
5220 if (target_is_async_p ())
5221 async_file_mark (); /* trigger a linux_wait */
5222
5223 errno = old_errno;
5224 }
5225
5226 static int
5227 linux_supports_non_stop (void)
5228 {
5229 return 1;
5230 }
5231
5232 static int
5233 linux_async (int enable)
5234 {
5235 int previous = target_is_async_p ();
5236
5237 if (debug_threads)
5238 debug_printf ("linux_async (%d), previous=%d\n",
5239 enable, previous);
5240
5241 if (previous != enable)
5242 {
5243 sigset_t mask;
5244 sigemptyset (&mask);
5245 sigaddset (&mask, SIGCHLD);
5246
5247 sigprocmask (SIG_BLOCK, &mask, NULL);
5248
5249 if (enable)
5250 {
5251 if (pipe (linux_event_pipe) == -1)
5252 {
5253 linux_event_pipe[0] = -1;
5254 linux_event_pipe[1] = -1;
5255 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5256
5257 warning ("creating event pipe failed.");
5258 return previous;
5259 }
5260
5261 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5262 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5263
5264 /* Register the event loop handler. */
5265 add_file_handler (linux_event_pipe[0],
5266 handle_target_event, NULL);
5267
5268 /* Always trigger a linux_wait. */
5269 async_file_mark ();
5270 }
5271 else
5272 {
5273 delete_file_handler (linux_event_pipe[0]);
5274
5275 close (linux_event_pipe[0]);
5276 close (linux_event_pipe[1]);
5277 linux_event_pipe[0] = -1;
5278 linux_event_pipe[1] = -1;
5279 }
5280
5281 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5282 }
5283
5284 return previous;
5285 }
5286
5287 static int
5288 linux_start_non_stop (int nonstop)
5289 {
5290 /* Register or unregister from event-loop accordingly. */
5291 linux_async (nonstop);
5292
5293 if (target_is_async_p () != (nonstop != 0))
5294 return -1;
5295
5296 return 0;
5297 }
5298
5299 static int
5300 linux_supports_multi_process (void)
5301 {
5302 return 1;
5303 }
5304
5305 static int
5306 linux_supports_disable_randomization (void)
5307 {
5308 #ifdef HAVE_PERSONALITY
5309 return 1;
5310 #else
5311 return 0;
5312 #endif
5313 }
5314
5315 static int
5316 linux_supports_agent (void)
5317 {
5318 return 1;
5319 }
5320
5321 static int
5322 linux_supports_range_stepping (void)
5323 {
5324 if (*the_low_target.supports_range_stepping == NULL)
5325 return 0;
5326
5327 return (*the_low_target.supports_range_stepping) ();
5328 }
5329
5330 /* Enumerate spufs IDs for process PID. */
5331 static int
5332 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5333 {
5334 int pos = 0;
5335 int written = 0;
5336 char path[128];
5337 DIR *dir;
5338 struct dirent *entry;
5339
5340 sprintf (path, "/proc/%ld/fd", pid);
5341 dir = opendir (path);
5342 if (!dir)
5343 return -1;
5344
5345 rewinddir (dir);
5346 while ((entry = readdir (dir)) != NULL)
5347 {
5348 struct stat st;
5349 struct statfs stfs;
5350 int fd;
5351
5352 fd = atoi (entry->d_name);
5353 if (!fd)
5354 continue;
5355
5356 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5357 if (stat (path, &st) != 0)
5358 continue;
5359 if (!S_ISDIR (st.st_mode))
5360 continue;
5361
5362 if (statfs (path, &stfs) != 0)
5363 continue;
5364 if (stfs.f_type != SPUFS_MAGIC)
5365 continue;
5366
5367 if (pos >= offset && pos + 4 <= offset + len)
5368 {
5369 *(unsigned int *)(buf + pos - offset) = fd;
5370 written += 4;
5371 }
5372 pos += 4;
5373 }
5374
5375 closedir (dir);
5376 return written;
5377 }
5378
5379 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5380 object type, using the /proc file system. */
5381 static int
5382 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5383 unsigned const char *writebuf,
5384 CORE_ADDR offset, int len)
5385 {
5386 long pid = lwpid_of (current_thread);
5387 char buf[128];
5388 int fd = 0;
5389 int ret = 0;
5390
5391 if (!writebuf && !readbuf)
5392 return -1;
5393
5394 if (!*annex)
5395 {
5396 if (!readbuf)
5397 return -1;
5398 else
5399 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5400 }
5401
5402 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5403 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5404 if (fd <= 0)
5405 return -1;
5406
5407 if (offset != 0
5408 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5409 {
5410 close (fd);
5411 return 0;
5412 }
5413
5414 if (writebuf)
5415 ret = write (fd, writebuf, (size_t) len);
5416 else
5417 ret = read (fd, readbuf, (size_t) len);
5418
5419 close (fd);
5420 return ret;
5421 }
5422
5423 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5424 struct target_loadseg
5425 {
5426 /* Core address to which the segment is mapped. */
5427 Elf32_Addr addr;
5428 /* VMA recorded in the program header. */
5429 Elf32_Addr p_vaddr;
5430 /* Size of this segment in memory. */
5431 Elf32_Word p_memsz;
5432 };
5433
5434 # if defined PT_GETDSBT
5435 struct target_loadmap
5436 {
5437 /* Protocol version number, must be zero. */
5438 Elf32_Word version;
5439 /* Pointer to the DSBT table, its size, and the DSBT index. */
5440 unsigned *dsbt_table;
5441 unsigned dsbt_size, dsbt_index;
5442 /* Number of segments in this map. */
5443 Elf32_Word nsegs;
5444 /* The actual memory map. */
5445 struct target_loadseg segs[/*nsegs*/];
5446 };
5447 # define LINUX_LOADMAP PT_GETDSBT
5448 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5449 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5450 # else
5451 struct target_loadmap
5452 {
5453 /* Protocol version number, must be zero. */
5454 Elf32_Half version;
5455 /* Number of segments in this map. */
5456 Elf32_Half nsegs;
5457 /* The actual memory map. */
5458 struct target_loadseg segs[/*nsegs*/];
5459 };
5460 # define LINUX_LOADMAP PTRACE_GETFDPIC
5461 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5462 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5463 # endif
5464
5465 static int
5466 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5467 unsigned char *myaddr, unsigned int len)
5468 {
5469 int pid = lwpid_of (current_thread);
5470 int addr = -1;
5471 struct target_loadmap *data = NULL;
5472 unsigned int actual_length, copy_length;
5473
5474 if (strcmp (annex, "exec") == 0)
5475 addr = (int) LINUX_LOADMAP_EXEC;
5476 else if (strcmp (annex, "interp") == 0)
5477 addr = (int) LINUX_LOADMAP_INTERP;
5478 else
5479 return -1;
5480
5481 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5482 return -1;
5483
5484 if (data == NULL)
5485 return -1;
5486
5487 actual_length = sizeof (struct target_loadmap)
5488 + sizeof (struct target_loadseg) * data->nsegs;
5489
5490 if (offset < 0 || offset > actual_length)
5491 return -1;
5492
5493 copy_length = actual_length - offset < len ? actual_length - offset : len;
5494 memcpy (myaddr, (char *) data + offset, copy_length);
5495 return copy_length;
5496 }
5497 #else
5498 # define linux_read_loadmap NULL
5499 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5500
5501 static void
5502 linux_process_qsupported (const char *query)
5503 {
5504 if (the_low_target.process_qsupported != NULL)
5505 the_low_target.process_qsupported (query);
5506 }
5507
5508 static int
5509 linux_supports_tracepoints (void)
5510 {
5511 if (*the_low_target.supports_tracepoints == NULL)
5512 return 0;
5513
5514 return (*the_low_target.supports_tracepoints) ();
5515 }
5516
5517 static CORE_ADDR
5518 linux_read_pc (struct regcache *regcache)
5519 {
5520 if (the_low_target.get_pc == NULL)
5521 return 0;
5522
5523 return (*the_low_target.get_pc) (regcache);
5524 }
5525
5526 static void
5527 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5528 {
5529 gdb_assert (the_low_target.set_pc != NULL);
5530
5531 (*the_low_target.set_pc) (regcache, pc);
5532 }
5533
5534 static int
5535 linux_thread_stopped (struct thread_info *thread)
5536 {
5537 return get_thread_lwp (thread)->stopped;
5538 }
5539
5540 /* This exposes stop-all-threads functionality to other modules. */
5541
5542 static void
5543 linux_pause_all (int freeze)
5544 {
5545 stop_all_lwps (freeze, NULL);
5546 }
5547
5548 /* This exposes unstop-all-threads functionality to other gdbserver
5549 modules. */
5550
5551 static void
5552 linux_unpause_all (int unfreeze)
5553 {
5554 unstop_all_lwps (unfreeze, NULL);
5555 }
5556
5557 static int
5558 linux_prepare_to_access_memory (void)
5559 {
5560 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5561 running LWP. */
5562 if (non_stop)
5563 linux_pause_all (1);
5564 return 0;
5565 }
5566
5567 static void
5568 linux_done_accessing_memory (void)
5569 {
5570 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5571 running LWP. */
5572 if (non_stop)
5573 linux_unpause_all (1);
5574 }
5575
5576 static int
5577 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5578 CORE_ADDR collector,
5579 CORE_ADDR lockaddr,
5580 ULONGEST orig_size,
5581 CORE_ADDR *jump_entry,
5582 CORE_ADDR *trampoline,
5583 ULONGEST *trampoline_size,
5584 unsigned char *jjump_pad_insn,
5585 ULONGEST *jjump_pad_insn_size,
5586 CORE_ADDR *adjusted_insn_addr,
5587 CORE_ADDR *adjusted_insn_addr_end,
5588 char *err)
5589 {
5590 return (*the_low_target.install_fast_tracepoint_jump_pad)
5591 (tpoint, tpaddr, collector, lockaddr, orig_size,
5592 jump_entry, trampoline, trampoline_size,
5593 jjump_pad_insn, jjump_pad_insn_size,
5594 adjusted_insn_addr, adjusted_insn_addr_end,
5595 err);
5596 }
5597
5598 static struct emit_ops *
5599 linux_emit_ops (void)
5600 {
5601 if (the_low_target.emit_ops != NULL)
5602 return (*the_low_target.emit_ops) ();
5603 else
5604 return NULL;
5605 }
5606
5607 static int
5608 linux_get_min_fast_tracepoint_insn_len (void)
5609 {
5610 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5611 }
5612
5613 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5614
5615 static int
5616 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5617 CORE_ADDR *phdr_memaddr, int *num_phdr)
5618 {
5619 char filename[PATH_MAX];
5620 int fd;
5621 const int auxv_size = is_elf64
5622 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5623 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5624
5625 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5626
5627 fd = open (filename, O_RDONLY);
5628 if (fd < 0)
5629 return 1;
5630
5631 *phdr_memaddr = 0;
5632 *num_phdr = 0;
5633 while (read (fd, buf, auxv_size) == auxv_size
5634 && (*phdr_memaddr == 0 || *num_phdr == 0))
5635 {
5636 if (is_elf64)
5637 {
5638 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5639
5640 switch (aux->a_type)
5641 {
5642 case AT_PHDR:
5643 *phdr_memaddr = aux->a_un.a_val;
5644 break;
5645 case AT_PHNUM:
5646 *num_phdr = aux->a_un.a_val;
5647 break;
5648 }
5649 }
5650 else
5651 {
5652 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5653
5654 switch (aux->a_type)
5655 {
5656 case AT_PHDR:
5657 *phdr_memaddr = aux->a_un.a_val;
5658 break;
5659 case AT_PHNUM:
5660 *num_phdr = aux->a_un.a_val;
5661 break;
5662 }
5663 }
5664 }
5665
5666 close (fd);
5667
5668 if (*phdr_memaddr == 0 || *num_phdr == 0)
5669 {
5670 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5671 "phdr_memaddr = %ld, phdr_num = %d",
5672 (long) *phdr_memaddr, *num_phdr);
5673 return 2;
5674 }
5675
5676 return 0;
5677 }
5678
5679 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5680
5681 static CORE_ADDR
5682 get_dynamic (const int pid, const int is_elf64)
5683 {
5684 CORE_ADDR phdr_memaddr, relocation;
5685 int num_phdr, i;
5686 unsigned char *phdr_buf;
5687 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5688
5689 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5690 return 0;
5691
5692 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5693 phdr_buf = alloca (num_phdr * phdr_size);
5694
5695 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5696 return 0;
5697
5698 /* Compute relocation: it is expected to be 0 for "regular" executables,
5699 non-zero for PIE ones. */
5700 relocation = -1;
5701 for (i = 0; relocation == -1 && i < num_phdr; i++)
5702 if (is_elf64)
5703 {
5704 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5705
5706 if (p->p_type == PT_PHDR)
5707 relocation = phdr_memaddr - p->p_vaddr;
5708 }
5709 else
5710 {
5711 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5712
5713 if (p->p_type == PT_PHDR)
5714 relocation = phdr_memaddr - p->p_vaddr;
5715 }
5716
5717 if (relocation == -1)
5718 {
5719 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5720 any real world executables, including PIE executables, have always
5721 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5722 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5723 or present DT_DEBUG anyway (fpc binaries are statically linked).
5724
5725 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5726
5727 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5728
5729 return 0;
5730 }
5731
5732 for (i = 0; i < num_phdr; i++)
5733 {
5734 if (is_elf64)
5735 {
5736 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5737
5738 if (p->p_type == PT_DYNAMIC)
5739 return p->p_vaddr + relocation;
5740 }
5741 else
5742 {
5743 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5744
5745 if (p->p_type == PT_DYNAMIC)
5746 return p->p_vaddr + relocation;
5747 }
5748 }
5749
5750 return 0;
5751 }
5752
5753 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5754 can be 0 if the inferior does not yet have the library list initialized.
5755 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5756 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5757
5758 static CORE_ADDR
5759 get_r_debug (const int pid, const int is_elf64)
5760 {
5761 CORE_ADDR dynamic_memaddr;
5762 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5763 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5764 CORE_ADDR map = -1;
5765
5766 dynamic_memaddr = get_dynamic (pid, is_elf64);
5767 if (dynamic_memaddr == 0)
5768 return map;
5769
5770 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5771 {
5772 if (is_elf64)
5773 {
5774 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5775 #ifdef DT_MIPS_RLD_MAP
5776 union
5777 {
5778 Elf64_Xword map;
5779 unsigned char buf[sizeof (Elf64_Xword)];
5780 }
5781 rld_map;
5782
5783 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5784 {
5785 if (linux_read_memory (dyn->d_un.d_val,
5786 rld_map.buf, sizeof (rld_map.buf)) == 0)
5787 return rld_map.map;
5788 else
5789 break;
5790 }
5791 #endif /* DT_MIPS_RLD_MAP */
5792
5793 if (dyn->d_tag == DT_DEBUG && map == -1)
5794 map = dyn->d_un.d_val;
5795
5796 if (dyn->d_tag == DT_NULL)
5797 break;
5798 }
5799 else
5800 {
5801 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5802 #ifdef DT_MIPS_RLD_MAP
5803 union
5804 {
5805 Elf32_Word map;
5806 unsigned char buf[sizeof (Elf32_Word)];
5807 }
5808 rld_map;
5809
5810 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5811 {
5812 if (linux_read_memory (dyn->d_un.d_val,
5813 rld_map.buf, sizeof (rld_map.buf)) == 0)
5814 return rld_map.map;
5815 else
5816 break;
5817 }
5818 #endif /* DT_MIPS_RLD_MAP */
5819
5820 if (dyn->d_tag == DT_DEBUG && map == -1)
5821 map = dyn->d_un.d_val;
5822
5823 if (dyn->d_tag == DT_NULL)
5824 break;
5825 }
5826
5827 dynamic_memaddr += dyn_size;
5828 }
5829
5830 return map;
5831 }
5832
5833 /* Read one pointer from MEMADDR in the inferior. */
5834
5835 static int
5836 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5837 {
5838 int ret;
5839
5840 /* Go through a union so this works on either big or little endian
5841 hosts, when the inferior's pointer size is smaller than the size
5842 of CORE_ADDR. It is assumed the inferior's endianness is the
5843 same of the superior's. */
5844 union
5845 {
5846 CORE_ADDR core_addr;
5847 unsigned int ui;
5848 unsigned char uc;
5849 } addr;
5850
5851 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5852 if (ret == 0)
5853 {
5854 if (ptr_size == sizeof (CORE_ADDR))
5855 *ptr = addr.core_addr;
5856 else if (ptr_size == sizeof (unsigned int))
5857 *ptr = addr.ui;
5858 else
5859 gdb_assert_not_reached ("unhandled pointer size");
5860 }
5861 return ret;
5862 }
5863
5864 struct link_map_offsets
5865 {
5866 /* Offset and size of r_debug.r_version. */
5867 int r_version_offset;
5868
5869 /* Offset and size of r_debug.r_map. */
5870 int r_map_offset;
5871
5872 /* Offset to l_addr field in struct link_map. */
5873 int l_addr_offset;
5874
5875 /* Offset to l_name field in struct link_map. */
5876 int l_name_offset;
5877
5878 /* Offset to l_ld field in struct link_map. */
5879 int l_ld_offset;
5880
5881 /* Offset to l_next field in struct link_map. */
5882 int l_next_offset;
5883
5884 /* Offset to l_prev field in struct link_map. */
5885 int l_prev_offset;
5886 };
5887
5888 /* Construct qXfer:libraries-svr4:read reply. */
5889
5890 static int
5891 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5892 unsigned const char *writebuf,
5893 CORE_ADDR offset, int len)
5894 {
5895 char *document;
5896 unsigned document_len;
5897 struct process_info_private *const priv = current_process ()->priv;
5898 char filename[PATH_MAX];
5899 int pid, is_elf64;
5900
5901 static const struct link_map_offsets lmo_32bit_offsets =
5902 {
5903 0, /* r_version offset. */
5904 4, /* r_debug.r_map offset. */
5905 0, /* l_addr offset in link_map. */
5906 4, /* l_name offset in link_map. */
5907 8, /* l_ld offset in link_map. */
5908 12, /* l_next offset in link_map. */
5909 16 /* l_prev offset in link_map. */
5910 };
5911
5912 static const struct link_map_offsets lmo_64bit_offsets =
5913 {
5914 0, /* r_version offset. */
5915 8, /* r_debug.r_map offset. */
5916 0, /* l_addr offset in link_map. */
5917 8, /* l_name offset in link_map. */
5918 16, /* l_ld offset in link_map. */
5919 24, /* l_next offset in link_map. */
5920 32 /* l_prev offset in link_map. */
5921 };
5922 const struct link_map_offsets *lmo;
5923 unsigned int machine;
5924 int ptr_size;
5925 CORE_ADDR lm_addr = 0, lm_prev = 0;
5926 int allocated = 1024;
5927 char *p;
5928 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5929 int header_done = 0;
5930
5931 if (writebuf != NULL)
5932 return -2;
5933 if (readbuf == NULL)
5934 return -1;
5935
5936 pid = lwpid_of (current_thread);
5937 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5938 is_elf64 = elf_64_file_p (filename, &machine);
5939 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5940 ptr_size = is_elf64 ? 8 : 4;
5941
5942 while (annex[0] != '\0')
5943 {
5944 const char *sep;
5945 CORE_ADDR *addrp;
5946 int len;
5947
5948 sep = strchr (annex, '=');
5949 if (sep == NULL)
5950 break;
5951
5952 len = sep - annex;
5953 if (len == 5 && startswith (annex, "start"))
5954 addrp = &lm_addr;
5955 else if (len == 4 && startswith (annex, "prev"))
5956 addrp = &lm_prev;
5957 else
5958 {
5959 annex = strchr (sep, ';');
5960 if (annex == NULL)
5961 break;
5962 annex++;
5963 continue;
5964 }
5965
5966 annex = decode_address_to_semicolon (addrp, sep + 1);
5967 }
5968
5969 if (lm_addr == 0)
5970 {
5971 int r_version = 0;
5972
5973 if (priv->r_debug == 0)
5974 priv->r_debug = get_r_debug (pid, is_elf64);
5975
5976 /* We failed to find DT_DEBUG. Such situation will not change
5977 for this inferior - do not retry it. Report it to GDB as
5978 E01, see for the reasons at the GDB solib-svr4.c side. */
5979 if (priv->r_debug == (CORE_ADDR) -1)
5980 return -1;
5981
5982 if (priv->r_debug != 0)
5983 {
5984 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5985 (unsigned char *) &r_version,
5986 sizeof (r_version)) != 0
5987 || r_version != 1)
5988 {
5989 warning ("unexpected r_debug version %d", r_version);
5990 }
5991 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5992 &lm_addr, ptr_size) != 0)
5993 {
5994 warning ("unable to read r_map from 0x%lx",
5995 (long) priv->r_debug + lmo->r_map_offset);
5996 }
5997 }
5998 }
5999
6000 document = xmalloc (allocated);
6001 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6002 p = document + strlen (document);
6003
6004 while (lm_addr
6005 && read_one_ptr (lm_addr + lmo->l_name_offset,
6006 &l_name, ptr_size) == 0
6007 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6008 &l_addr, ptr_size) == 0
6009 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6010 &l_ld, ptr_size) == 0
6011 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6012 &l_prev, ptr_size) == 0
6013 && read_one_ptr (lm_addr + lmo->l_next_offset,
6014 &l_next, ptr_size) == 0)
6015 {
6016 unsigned char libname[PATH_MAX];
6017
6018 if (lm_prev != l_prev)
6019 {
6020 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6021 (long) lm_prev, (long) l_prev);
6022 break;
6023 }
6024
6025 /* Ignore the first entry even if it has valid name as the first entry
6026 corresponds to the main executable. The first entry should not be
6027 skipped if the dynamic loader was loaded late by a static executable
6028 (see solib-svr4.c parameter ignore_first). But in such case the main
6029 executable does not have PT_DYNAMIC present and this function already
6030 exited above due to failed get_r_debug. */
6031 if (lm_prev == 0)
6032 {
6033 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6034 p = p + strlen (p);
6035 }
6036 else
6037 {
6038 /* Not checking for error because reading may stop before
6039 we've got PATH_MAX worth of characters. */
6040 libname[0] = '\0';
6041 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6042 libname[sizeof (libname) - 1] = '\0';
6043 if (libname[0] != '\0')
6044 {
6045 /* 6x the size for xml_escape_text below. */
6046 size_t len = 6 * strlen ((char *) libname);
6047 char *name;
6048
6049 if (!header_done)
6050 {
6051 /* Terminate `<library-list-svr4'. */
6052 *p++ = '>';
6053 header_done = 1;
6054 }
6055
6056 while (allocated < p - document + len + 200)
6057 {
6058 /* Expand to guarantee sufficient storage. */
6059 uintptr_t document_len = p - document;
6060
6061 document = xrealloc (document, 2 * allocated);
6062 allocated *= 2;
6063 p = document + document_len;
6064 }
6065
6066 name = xml_escape_text ((char *) libname);
6067 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6068 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6069 name, (unsigned long) lm_addr,
6070 (unsigned long) l_addr, (unsigned long) l_ld);
6071 free (name);
6072 }
6073 }
6074
6075 lm_prev = lm_addr;
6076 lm_addr = l_next;
6077 }
6078
6079 if (!header_done)
6080 {
6081 /* Empty list; terminate `<library-list-svr4'. */
6082 strcpy (p, "/>");
6083 }
6084 else
6085 strcpy (p, "</library-list-svr4>");
6086
6087 document_len = strlen (document);
6088 if (offset < document_len)
6089 document_len -= offset;
6090 else
6091 document_len = 0;
6092 if (len > document_len)
6093 len = document_len;
6094
6095 memcpy (readbuf, document + offset, len);
6096 xfree (document);
6097
6098 return len;
6099 }
6100
6101 #ifdef HAVE_LINUX_BTRACE
6102
6103 /* See to_enable_btrace target method. */
6104
6105 static struct btrace_target_info *
6106 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6107 {
6108 struct btrace_target_info *tinfo;
6109
6110 tinfo = linux_enable_btrace (ptid, conf);
6111
6112 if (tinfo != NULL && tinfo->ptr_bits == 0)
6113 {
6114 struct thread_info *thread = find_thread_ptid (ptid);
6115 struct regcache *regcache = get_thread_regcache (thread, 0);
6116
6117 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6118 }
6119
6120 return tinfo;
6121 }
6122
6123 /* See to_disable_btrace target method. */
6124
6125 static int
6126 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6127 {
6128 enum btrace_error err;
6129
6130 err = linux_disable_btrace (tinfo);
6131 return (err == BTRACE_ERR_NONE ? 0 : -1);
6132 }
6133
6134 /* See to_read_btrace target method. */
6135
6136 static int
6137 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6138 int type)
6139 {
6140 struct btrace_data btrace;
6141 struct btrace_block *block;
6142 enum btrace_error err;
6143 int i;
6144
6145 btrace_data_init (&btrace);
6146
6147 err = linux_read_btrace (&btrace, tinfo, type);
6148 if (err != BTRACE_ERR_NONE)
6149 {
6150 if (err == BTRACE_ERR_OVERFLOW)
6151 buffer_grow_str0 (buffer, "E.Overflow.");
6152 else
6153 buffer_grow_str0 (buffer, "E.Generic Error.");
6154
6155 btrace_data_fini (&btrace);
6156 return -1;
6157 }
6158
6159 switch (btrace.format)
6160 {
6161 case BTRACE_FORMAT_NONE:
6162 buffer_grow_str0 (buffer, "E.No Trace.");
6163 break;
6164
6165 case BTRACE_FORMAT_BTS:
6166 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6167 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6168
6169 for (i = 0;
6170 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6171 i++)
6172 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6173 paddress (block->begin), paddress (block->end));
6174
6175 buffer_grow_str0 (buffer, "</btrace>\n");
6176 break;
6177
6178 default:
6179 buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
6180
6181 btrace_data_fini (&btrace);
6182 return -1;
6183 }
6184
6185 btrace_data_fini (&btrace);
6186 return 0;
6187 }
6188
6189 /* See to_btrace_conf target method. */
6190
6191 static int
6192 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6193 struct buffer *buffer)
6194 {
6195 const struct btrace_config *conf;
6196
6197 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6198 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6199
6200 conf = linux_btrace_conf (tinfo);
6201 if (conf != NULL)
6202 {
6203 switch (conf->format)
6204 {
6205 case BTRACE_FORMAT_NONE:
6206 break;
6207
6208 case BTRACE_FORMAT_BTS:
6209 buffer_xml_printf (buffer, "<bts");
6210 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6211 buffer_xml_printf (buffer, " />\n");
6212 break;
6213 }
6214 }
6215
6216 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6217 return 0;
6218 }
6219 #endif /* HAVE_LINUX_BTRACE */
6220
6221 /* See nat/linux-nat.h. */
6222
6223 ptid_t
6224 current_lwp_ptid (void)
6225 {
6226 return ptid_of (current_thread);
6227 }
6228
6229 static struct target_ops linux_target_ops = {
6230 linux_create_inferior,
6231 linux_attach,
6232 linux_kill,
6233 linux_detach,
6234 linux_mourn,
6235 linux_join,
6236 linux_thread_alive,
6237 linux_resume,
6238 linux_wait,
6239 linux_fetch_registers,
6240 linux_store_registers,
6241 linux_prepare_to_access_memory,
6242 linux_done_accessing_memory,
6243 linux_read_memory,
6244 linux_write_memory,
6245 linux_look_up_symbols,
6246 linux_request_interrupt,
6247 linux_read_auxv,
6248 linux_supports_z_point_type,
6249 linux_insert_point,
6250 linux_remove_point,
6251 linux_stopped_by_sw_breakpoint,
6252 linux_supports_stopped_by_sw_breakpoint,
6253 linux_stopped_by_hw_breakpoint,
6254 linux_supports_stopped_by_hw_breakpoint,
6255 linux_stopped_by_watchpoint,
6256 linux_stopped_data_address,
6257 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6258 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6259 && defined(PT_TEXT_END_ADDR)
6260 linux_read_offsets,
6261 #else
6262 NULL,
6263 #endif
6264 #ifdef USE_THREAD_DB
6265 thread_db_get_tls_address,
6266 #else
6267 NULL,
6268 #endif
6269 linux_qxfer_spu,
6270 hostio_last_error_from_errno,
6271 linux_qxfer_osdata,
6272 linux_xfer_siginfo,
6273 linux_supports_non_stop,
6274 linux_async,
6275 linux_start_non_stop,
6276 linux_supports_multi_process,
6277 #ifdef USE_THREAD_DB
6278 thread_db_handle_monitor_command,
6279 #else
6280 NULL,
6281 #endif
6282 linux_common_core_of_thread,
6283 linux_read_loadmap,
6284 linux_process_qsupported,
6285 linux_supports_tracepoints,
6286 linux_read_pc,
6287 linux_write_pc,
6288 linux_thread_stopped,
6289 NULL,
6290 linux_pause_all,
6291 linux_unpause_all,
6292 linux_stabilize_threads,
6293 linux_install_fast_tracepoint_jump_pad,
6294 linux_emit_ops,
6295 linux_supports_disable_randomization,
6296 linux_get_min_fast_tracepoint_insn_len,
6297 linux_qxfer_libraries_svr4,
6298 linux_supports_agent,
6299 #ifdef HAVE_LINUX_BTRACE
6300 linux_supports_btrace,
6301 linux_low_enable_btrace,
6302 linux_low_disable_btrace,
6303 linux_low_read_btrace,
6304 linux_low_btrace_conf,
6305 #else
6306 NULL,
6307 NULL,
6308 NULL,
6309 NULL,
6310 NULL,
6311 #endif
6312 linux_supports_range_stepping,
6313 };
6314
6315 static void
6316 linux_init_signals ()
6317 {
6318 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6319 to find what the cancel signal actually is. */
6320 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6321 signal (__SIGRTMIN+1, SIG_IGN);
6322 #endif
6323 }
6324
6325 #ifdef HAVE_LINUX_REGSETS
6326 void
6327 initialize_regsets_info (struct regsets_info *info)
6328 {
6329 for (info->num_regsets = 0;
6330 info->regsets[info->num_regsets].size >= 0;
6331 info->num_regsets++)
6332 ;
6333 }
6334 #endif
6335
6336 void
6337 initialize_low (void)
6338 {
6339 struct sigaction sigchld_action;
6340 memset (&sigchld_action, 0, sizeof (sigchld_action));
6341 set_target_ops (&linux_target_ops);
6342 set_breakpoint_data (the_low_target.breakpoint,
6343 the_low_target.breakpoint_len);
6344 linux_init_signals ();
6345 linux_ptrace_init_warnings ();
6346
6347 sigchld_action.sa_handler = sigchld_handler;
6348 sigemptyset (&sigchld_action.sa_mask);
6349 sigchld_action.sa_flags = SA_RESTART;
6350 sigaction (SIGCHLD, &sigchld_action, NULL);
6351
6352 initialize_low_arch ();
6353 }