]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
2012-03-26 Pedro Alves <palves@redhat.com>
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22 #include "agent.h"
23
24 #include <sys/wait.h>
25 #include <stdio.h>
26 #include <sys/param.h>
27 #include <sys/ptrace.h>
28 #include "linux-ptrace.h"
29 #include "linux-procfs.h"
30 #include <signal.h>
31 #include <sys/ioctl.h>
32 #include <fcntl.h>
33 #include <string.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #ifndef ELFMAG0
47 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51 #include <elf.h>
52 #endif
53
54 #ifndef SPUFS_MAGIC
55 #define SPUFS_MAGIC 0x23c9b64e
56 #endif
57
58 #ifdef HAVE_PERSONALITY
59 # include <sys/personality.h>
60 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
61 # define ADDR_NO_RANDOMIZE 0x0040000
62 # endif
63 #endif
64
65 #ifndef O_LARGEFILE
66 #define O_LARGEFILE 0
67 #endif
68
69 #ifndef W_STOPCODE
70 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71 #endif
72
73 /* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75 #ifndef __SIGRTMIN
76 #define __SIGRTMIN 32
77 #endif
78
79 #ifdef __UCLIBC__
80 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
81 #define HAS_NOMMU
82 #endif
83 #endif
84
85 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
86 representation of the thread ID.
87
88 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
89 the same as the LWP ID.
90
91 ``all_processes'' is keyed by the "overall process ID", which
92 GNU/Linux calls tgid, "thread group ID". */
93
94 struct inferior_list all_lwps;
95
96 /* A list of all unknown processes which receive stop signals. Some
97 other process will presumably claim each of these as forked
98 children momentarily. */
99
100 struct simple_pid_list
101 {
102 /* The process ID. */
103 int pid;
104
105 /* The status as reported by waitpid. */
106 int status;
107
108 /* Next in chain. */
109 struct simple_pid_list *next;
110 };
111 struct simple_pid_list *stopped_pids;
112
113 /* Trivial list manipulation functions to keep track of a list of new
114 stopped processes. */
115
116 static void
117 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
118 {
119 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
120
121 new_pid->pid = pid;
122 new_pid->status = status;
123 new_pid->next = *listp;
124 *listp = new_pid;
125 }
126
127 static int
128 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
129 {
130 struct simple_pid_list **p;
131
132 for (p = listp; *p != NULL; p = &(*p)->next)
133 if ((*p)->pid == pid)
134 {
135 struct simple_pid_list *next = (*p)->next;
136
137 *statusp = (*p)->status;
138 xfree (*p);
139 *p = next;
140 return 1;
141 }
142 return 0;
143 }
144
145 /* FIXME this is a bit of a hack, and could be removed. */
146 int stopping_threads;
147
148 /* FIXME make into a target method? */
149 int using_threads = 1;
150
151 /* True if we're presently stabilizing threads (moving them out of
152 jump pads). */
153 static int stabilizing_threads;
154
155 /* This flag is true iff we've just created or attached to our first
156 inferior but it has not stopped yet. As soon as it does, we need
157 to call the low target's arch_setup callback. Doing this only on
158 the first inferior avoids reinializing the architecture on every
159 inferior, and avoids messing with the register caches of the
160 already running inferiors. NOTE: this assumes all inferiors under
161 control of gdbserver have the same architecture. */
162 static int new_inferior;
163
164 static void linux_resume_one_lwp (struct lwp_info *lwp,
165 int step, int signal, siginfo_t *info);
166 static void linux_resume (struct thread_resume *resume_info, size_t n);
167 static void stop_all_lwps (int suspend, struct lwp_info *except);
168 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
169 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
170 static void *add_lwp (ptid_t ptid);
171 static int linux_stopped_by_watchpoint (void);
172 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
173 static void proceed_all_lwps (void);
174 static int finish_step_over (struct lwp_info *lwp);
175 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
176 static int kill_lwp (unsigned long lwpid, int signo);
177 static void linux_enable_event_reporting (int pid);
178
179 /* True if the low target can hardware single-step. Such targets
180 don't need a BREAKPOINT_REINSERT_ADDR callback. */
181
182 static int
183 can_hardware_single_step (void)
184 {
185 return (the_low_target.breakpoint_reinsert_addr == NULL);
186 }
187
188 /* True if the low target supports memory breakpoints. If so, we'll
189 have a GET_PC implementation. */
190
191 static int
192 supports_breakpoints (void)
193 {
194 return (the_low_target.get_pc != NULL);
195 }
196
197 /* Returns true if this target can support fast tracepoints. This
198 does not mean that the in-process agent has been loaded in the
199 inferior. */
200
201 static int
202 supports_fast_tracepoints (void)
203 {
204 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
205 }
206
207 struct pending_signals
208 {
209 int signal;
210 siginfo_t info;
211 struct pending_signals *prev;
212 };
213
214 #define PTRACE_ARG3_TYPE void *
215 #define PTRACE_ARG4_TYPE void *
216 #define PTRACE_XFER_TYPE long
217
218 #ifdef HAVE_LINUX_REGSETS
219 static char *disabled_regsets;
220 static int num_regsets;
221 #endif
222
223 /* The read/write ends of the pipe registered as waitable file in the
224 event loop. */
225 static int linux_event_pipe[2] = { -1, -1 };
226
227 /* True if we're currently in async mode. */
228 #define target_is_async_p() (linux_event_pipe[0] != -1)
229
230 static void send_sigstop (struct lwp_info *lwp);
231 static void wait_for_sigstop (struct inferior_list_entry *entry);
232
233 /* Return non-zero if HEADER is a 64-bit ELF file. */
234
235 static int
236 elf_64_header_p (const Elf64_Ehdr *header)
237 {
238 return (header->e_ident[EI_MAG0] == ELFMAG0
239 && header->e_ident[EI_MAG1] == ELFMAG1
240 && header->e_ident[EI_MAG2] == ELFMAG2
241 && header->e_ident[EI_MAG3] == ELFMAG3
242 && header->e_ident[EI_CLASS] == ELFCLASS64);
243 }
244
245 /* Return non-zero if FILE is a 64-bit ELF file,
246 zero if the file is not a 64-bit ELF file,
247 and -1 if the file is not accessible or doesn't exist. */
248
249 static int
250 elf_64_file_p (const char *file)
251 {
252 Elf64_Ehdr header;
253 int fd;
254
255 fd = open (file, O_RDONLY);
256 if (fd < 0)
257 return -1;
258
259 if (read (fd, &header, sizeof (header)) != sizeof (header))
260 {
261 close (fd);
262 return 0;
263 }
264 close (fd);
265
266 return elf_64_header_p (&header);
267 }
268
269 /* Accepts an integer PID; Returns true if the executable PID is
270 running is a 64-bit ELF file.. */
271
272 int
273 linux_pid_exe_is_elf_64_file (int pid)
274 {
275 char file[MAXPATHLEN];
276
277 sprintf (file, "/proc/%d/exe", pid);
278 return elf_64_file_p (file);
279 }
280
281 static void
282 delete_lwp (struct lwp_info *lwp)
283 {
284 remove_thread (get_lwp_thread (lwp));
285 remove_inferior (&all_lwps, &lwp->head);
286 free (lwp->arch_private);
287 free (lwp);
288 }
289
290 /* Add a process to the common process list, and set its private
291 data. */
292
293 static struct process_info *
294 linux_add_process (int pid, int attached)
295 {
296 struct process_info *proc;
297
298 /* Is this the first process? If so, then set the arch. */
299 if (all_processes.head == NULL)
300 new_inferior = 1;
301
302 proc = add_process (pid, attached);
303 proc->private = xcalloc (1, sizeof (*proc->private));
304
305 if (the_low_target.new_process != NULL)
306 proc->private->arch_private = the_low_target.new_process ();
307
308 return proc;
309 }
310
311 /* Wrapper function for waitpid which handles EINTR, and emulates
312 __WALL for systems where that is not available. */
313
314 static int
315 my_waitpid (int pid, int *status, int flags)
316 {
317 int ret, out_errno;
318
319 if (debug_threads)
320 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
321
322 if (flags & __WALL)
323 {
324 sigset_t block_mask, org_mask, wake_mask;
325 int wnohang;
326
327 wnohang = (flags & WNOHANG) != 0;
328 flags &= ~(__WALL | __WCLONE);
329 flags |= WNOHANG;
330
331 /* Block all signals while here. This avoids knowing about
332 LinuxThread's signals. */
333 sigfillset (&block_mask);
334 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
335
336 /* ... except during the sigsuspend below. */
337 sigemptyset (&wake_mask);
338
339 while (1)
340 {
341 /* Since all signals are blocked, there's no need to check
342 for EINTR here. */
343 ret = waitpid (pid, status, flags);
344 out_errno = errno;
345
346 if (ret == -1 && out_errno != ECHILD)
347 break;
348 else if (ret > 0)
349 break;
350
351 if (flags & __WCLONE)
352 {
353 /* We've tried both flavors now. If WNOHANG is set,
354 there's nothing else to do, just bail out. */
355 if (wnohang)
356 break;
357
358 if (debug_threads)
359 fprintf (stderr, "blocking\n");
360
361 /* Block waiting for signals. */
362 sigsuspend (&wake_mask);
363 }
364
365 flags ^= __WCLONE;
366 }
367
368 sigprocmask (SIG_SETMASK, &org_mask, NULL);
369 }
370 else
371 {
372 do
373 ret = waitpid (pid, status, flags);
374 while (ret == -1 && errno == EINTR);
375 out_errno = errno;
376 }
377
378 if (debug_threads)
379 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
380 pid, flags, status ? *status : -1, ret);
381
382 errno = out_errno;
383 return ret;
384 }
385
386 /* Handle a GNU/Linux extended wait response. If we see a clone
387 event, we need to add the new LWP to our list (and not report the
388 trap to higher layers). */
389
390 static void
391 handle_extended_wait (struct lwp_info *event_child, int wstat)
392 {
393 int event = wstat >> 16;
394 struct lwp_info *new_lwp;
395
396 if (event == PTRACE_EVENT_CLONE)
397 {
398 ptid_t ptid;
399 unsigned long new_pid;
400 int ret, status;
401
402 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
403
404 /* If we haven't already seen the new PID stop, wait for it now. */
405 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
406 {
407 /* The new child has a pending SIGSTOP. We can't affect it until it
408 hits the SIGSTOP, but we're already attached. */
409
410 ret = my_waitpid (new_pid, &status, __WALL);
411
412 if (ret == -1)
413 perror_with_name ("waiting for new child");
414 else if (ret != new_pid)
415 warning ("wait returned unexpected PID %d", ret);
416 else if (!WIFSTOPPED (status))
417 warning ("wait returned unexpected status 0x%x", status);
418 }
419
420 linux_enable_event_reporting (new_pid);
421
422 ptid = ptid_build (pid_of (event_child), new_pid, 0);
423 new_lwp = (struct lwp_info *) add_lwp (ptid);
424 add_thread (ptid, new_lwp);
425
426 /* Either we're going to immediately resume the new thread
427 or leave it stopped. linux_resume_one_lwp is a nop if it
428 thinks the thread is currently running, so set this first
429 before calling linux_resume_one_lwp. */
430 new_lwp->stopped = 1;
431
432 /* Normally we will get the pending SIGSTOP. But in some cases
433 we might get another signal delivered to the group first.
434 If we do get another signal, be sure not to lose it. */
435 if (WSTOPSIG (status) == SIGSTOP)
436 {
437 if (stopping_threads)
438 new_lwp->stop_pc = get_stop_pc (new_lwp);
439 else
440 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
441 }
442 else
443 {
444 new_lwp->stop_expected = 1;
445
446 if (stopping_threads)
447 {
448 new_lwp->stop_pc = get_stop_pc (new_lwp);
449 new_lwp->status_pending_p = 1;
450 new_lwp->status_pending = status;
451 }
452 else
453 /* Pass the signal on. This is what GDB does - except
454 shouldn't we really report it instead? */
455 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
456 }
457
458 /* Always resume the current thread. If we are stopping
459 threads, it will have a pending SIGSTOP; we may as well
460 collect it now. */
461 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
462 }
463 }
464
465 /* Return the PC as read from the regcache of LWP, without any
466 adjustment. */
467
468 static CORE_ADDR
469 get_pc (struct lwp_info *lwp)
470 {
471 struct thread_info *saved_inferior;
472 struct regcache *regcache;
473 CORE_ADDR pc;
474
475 if (the_low_target.get_pc == NULL)
476 return 0;
477
478 saved_inferior = current_inferior;
479 current_inferior = get_lwp_thread (lwp);
480
481 regcache = get_thread_regcache (current_inferior, 1);
482 pc = (*the_low_target.get_pc) (regcache);
483
484 if (debug_threads)
485 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
486
487 current_inferior = saved_inferior;
488 return pc;
489 }
490
491 /* This function should only be called if LWP got a SIGTRAP.
492 The SIGTRAP could mean several things.
493
494 On i386, where decr_pc_after_break is non-zero:
495 If we were single-stepping this process using PTRACE_SINGLESTEP,
496 we will get only the one SIGTRAP (even if the instruction we
497 stepped over was a breakpoint). The value of $eip will be the
498 next instruction.
499 If we continue the process using PTRACE_CONT, we will get a
500 SIGTRAP when we hit a breakpoint. The value of $eip will be
501 the instruction after the breakpoint (i.e. needs to be
502 decremented). If we report the SIGTRAP to GDB, we must also
503 report the undecremented PC. If we cancel the SIGTRAP, we
504 must resume at the decremented PC.
505
506 (Presumably, not yet tested) On a non-decr_pc_after_break machine
507 with hardware or kernel single-step:
508 If we single-step over a breakpoint instruction, our PC will
509 point at the following instruction. If we continue and hit a
510 breakpoint instruction, our PC will point at the breakpoint
511 instruction. */
512
513 static CORE_ADDR
514 get_stop_pc (struct lwp_info *lwp)
515 {
516 CORE_ADDR stop_pc;
517
518 if (the_low_target.get_pc == NULL)
519 return 0;
520
521 stop_pc = get_pc (lwp);
522
523 if (WSTOPSIG (lwp->last_status) == SIGTRAP
524 && !lwp->stepping
525 && !lwp->stopped_by_watchpoint
526 && lwp->last_status >> 16 == 0)
527 stop_pc -= the_low_target.decr_pc_after_break;
528
529 if (debug_threads)
530 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
531
532 return stop_pc;
533 }
534
535 static void *
536 add_lwp (ptid_t ptid)
537 {
538 struct lwp_info *lwp;
539
540 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
541 memset (lwp, 0, sizeof (*lwp));
542
543 lwp->head.id = ptid;
544
545 if (the_low_target.new_thread != NULL)
546 lwp->arch_private = the_low_target.new_thread ();
547
548 add_inferior_to_list (&all_lwps, &lwp->head);
549
550 return lwp;
551 }
552
553 /* Start an inferior process and returns its pid.
554 ALLARGS is a vector of program-name and args. */
555
556 static int
557 linux_create_inferior (char *program, char **allargs)
558 {
559 #ifdef HAVE_PERSONALITY
560 int personality_orig = 0, personality_set = 0;
561 #endif
562 struct lwp_info *new_lwp;
563 int pid;
564 ptid_t ptid;
565
566 #ifdef HAVE_PERSONALITY
567 if (disable_randomization)
568 {
569 errno = 0;
570 personality_orig = personality (0xffffffff);
571 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
572 {
573 personality_set = 1;
574 personality (personality_orig | ADDR_NO_RANDOMIZE);
575 }
576 if (errno != 0 || (personality_set
577 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
578 warning ("Error disabling address space randomization: %s",
579 strerror (errno));
580 }
581 #endif
582
583 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
584 pid = vfork ();
585 #else
586 pid = fork ();
587 #endif
588 if (pid < 0)
589 perror_with_name ("fork");
590
591 if (pid == 0)
592 {
593 ptrace (PTRACE_TRACEME, 0, 0, 0);
594
595 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
596 signal (__SIGRTMIN + 1, SIG_DFL);
597 #endif
598
599 setpgid (0, 0);
600
601 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
602 stdout to stderr so that inferior i/o doesn't corrupt the connection.
603 Also, redirect stdin to /dev/null. */
604 if (remote_connection_is_stdio ())
605 {
606 close (0);
607 open ("/dev/null", O_RDONLY);
608 dup2 (2, 1);
609 if (write (2, "stdin/stdout redirected\n",
610 sizeof ("stdin/stdout redirected\n") - 1) < 0)
611 /* Errors ignored. */;
612 }
613
614 execv (program, allargs);
615 if (errno == ENOENT)
616 execvp (program, allargs);
617
618 fprintf (stderr, "Cannot exec %s: %s.\n", program,
619 strerror (errno));
620 fflush (stderr);
621 _exit (0177);
622 }
623
624 #ifdef HAVE_PERSONALITY
625 if (personality_set)
626 {
627 errno = 0;
628 personality (personality_orig);
629 if (errno != 0)
630 warning ("Error restoring address space randomization: %s",
631 strerror (errno));
632 }
633 #endif
634
635 linux_add_process (pid, 0);
636
637 ptid = ptid_build (pid, pid, 0);
638 new_lwp = add_lwp (ptid);
639 add_thread (ptid, new_lwp);
640 new_lwp->must_set_ptrace_flags = 1;
641
642 return pid;
643 }
644
645 /* Attach to an inferior process. */
646
647 static void
648 linux_attach_lwp_1 (unsigned long lwpid, int initial)
649 {
650 ptid_t ptid;
651 struct lwp_info *new_lwp;
652
653 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
654 {
655 struct buffer buffer;
656
657 if (!initial)
658 {
659 /* If we fail to attach to an LWP, just warn. */
660 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
661 strerror (errno), errno);
662 fflush (stderr);
663 return;
664 }
665
666 /* If we fail to attach to a process, report an error. */
667 buffer_init (&buffer);
668 linux_ptrace_attach_warnings (lwpid, &buffer);
669 buffer_grow_str0 (&buffer, "");
670 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
671 lwpid, strerror (errno), errno);
672 }
673
674 if (initial)
675 /* If lwp is the tgid, we handle adding existing threads later.
676 Otherwise we just add lwp without bothering about any other
677 threads. */
678 ptid = ptid_build (lwpid, lwpid, 0);
679 else
680 {
681 /* Note that extracting the pid from the current inferior is
682 safe, since we're always called in the context of the same
683 process as this new thread. */
684 int pid = pid_of (get_thread_lwp (current_inferior));
685 ptid = ptid_build (pid, lwpid, 0);
686 }
687
688 new_lwp = (struct lwp_info *) add_lwp (ptid);
689 add_thread (ptid, new_lwp);
690
691 /* We need to wait for SIGSTOP before being able to make the next
692 ptrace call on this LWP. */
693 new_lwp->must_set_ptrace_flags = 1;
694
695 if (linux_proc_pid_is_stopped (lwpid))
696 {
697 if (debug_threads)
698 fprintf (stderr,
699 "Attached to a stopped process\n");
700
701 /* The process is definitely stopped. It is in a job control
702 stop, unless the kernel predates the TASK_STOPPED /
703 TASK_TRACED distinction, in which case it might be in a
704 ptrace stop. Make sure it is in a ptrace stop; from there we
705 can kill it, signal it, et cetera.
706
707 First make sure there is a pending SIGSTOP. Since we are
708 already attached, the process can not transition from stopped
709 to running without a PTRACE_CONT; so we know this signal will
710 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
711 probably already in the queue (unless this kernel is old
712 enough to use TASK_STOPPED for ptrace stops); but since
713 SIGSTOP is not an RT signal, it can only be queued once. */
714 kill_lwp (lwpid, SIGSTOP);
715
716 /* Finally, resume the stopped process. This will deliver the
717 SIGSTOP (or a higher priority signal, just like normal
718 PTRACE_ATTACH), which we'll catch later on. */
719 ptrace (PTRACE_CONT, lwpid, 0, 0);
720 }
721
722 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
723 brings it to a halt.
724
725 There are several cases to consider here:
726
727 1) gdbserver has already attached to the process and is being notified
728 of a new thread that is being created.
729 In this case we should ignore that SIGSTOP and resume the
730 process. This is handled below by setting stop_expected = 1,
731 and the fact that add_thread sets last_resume_kind ==
732 resume_continue.
733
734 2) This is the first thread (the process thread), and we're attaching
735 to it via attach_inferior.
736 In this case we want the process thread to stop.
737 This is handled by having linux_attach set last_resume_kind ==
738 resume_stop after we return.
739
740 If the pid we are attaching to is also the tgid, we attach to and
741 stop all the existing threads. Otherwise, we attach to pid and
742 ignore any other threads in the same group as this pid.
743
744 3) GDB is connecting to gdbserver and is requesting an enumeration of all
745 existing threads.
746 In this case we want the thread to stop.
747 FIXME: This case is currently not properly handled.
748 We should wait for the SIGSTOP but don't. Things work apparently
749 because enough time passes between when we ptrace (ATTACH) and when
750 gdb makes the next ptrace call on the thread.
751
752 On the other hand, if we are currently trying to stop all threads, we
753 should treat the new thread as if we had sent it a SIGSTOP. This works
754 because we are guaranteed that the add_lwp call above added us to the
755 end of the list, and so the new thread has not yet reached
756 wait_for_sigstop (but will). */
757 new_lwp->stop_expected = 1;
758 }
759
760 void
761 linux_attach_lwp (unsigned long lwpid)
762 {
763 linux_attach_lwp_1 (lwpid, 0);
764 }
765
766 /* Attach to PID. If PID is the tgid, attach to it and all
767 of its threads. */
768
769 int
770 linux_attach (unsigned long pid)
771 {
772 /* Attach to PID. We will check for other threads
773 soon. */
774 linux_attach_lwp_1 (pid, 1);
775 linux_add_process (pid, 1);
776
777 if (!non_stop)
778 {
779 struct thread_info *thread;
780
781 /* Don't ignore the initial SIGSTOP if we just attached to this
782 process. It will be collected by wait shortly. */
783 thread = find_thread_ptid (ptid_build (pid, pid, 0));
784 thread->last_resume_kind = resume_stop;
785 }
786
787 if (linux_proc_get_tgid (pid) == pid)
788 {
789 DIR *dir;
790 char pathname[128];
791
792 sprintf (pathname, "/proc/%ld/task", pid);
793
794 dir = opendir (pathname);
795
796 if (!dir)
797 {
798 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
799 fflush (stderr);
800 }
801 else
802 {
803 /* At this point we attached to the tgid. Scan the task for
804 existing threads. */
805 unsigned long lwp;
806 int new_threads_found;
807 int iterations = 0;
808 struct dirent *dp;
809
810 while (iterations < 2)
811 {
812 new_threads_found = 0;
813 /* Add all the other threads. While we go through the
814 threads, new threads may be spawned. Cycle through
815 the list of threads until we have done two iterations without
816 finding new threads. */
817 while ((dp = readdir (dir)) != NULL)
818 {
819 /* Fetch one lwp. */
820 lwp = strtoul (dp->d_name, NULL, 10);
821
822 /* Is this a new thread? */
823 if (lwp
824 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
825 {
826 linux_attach_lwp_1 (lwp, 0);
827 new_threads_found++;
828
829 if (debug_threads)
830 fprintf (stderr, "\
831 Found and attached to new lwp %ld\n", lwp);
832 }
833 }
834
835 if (!new_threads_found)
836 iterations++;
837 else
838 iterations = 0;
839
840 rewinddir (dir);
841 }
842 closedir (dir);
843 }
844 }
845
846 return 0;
847 }
848
849 struct counter
850 {
851 int pid;
852 int count;
853 };
854
855 static int
856 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
857 {
858 struct counter *counter = args;
859
860 if (ptid_get_pid (entry->id) == counter->pid)
861 {
862 if (++counter->count > 1)
863 return 1;
864 }
865
866 return 0;
867 }
868
869 static int
870 last_thread_of_process_p (struct thread_info *thread)
871 {
872 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
873 int pid = ptid_get_pid (ptid);
874 struct counter counter = { pid , 0 };
875
876 return (find_inferior (&all_threads,
877 second_thread_of_pid_p, &counter) == NULL);
878 }
879
880 /* Kill LWP. */
881
882 static void
883 linux_kill_one_lwp (struct lwp_info *lwp)
884 {
885 int pid = lwpid_of (lwp);
886
887 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
888 there is no signal context, and ptrace(PTRACE_KILL) (or
889 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
890 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
891 alternative is to kill with SIGKILL. We only need one SIGKILL
892 per process, not one for each thread. But since we still support
893 linuxthreads, and we also support debugging programs using raw
894 clone without CLONE_THREAD, we send one for each thread. For
895 years, we used PTRACE_KILL only, so we're being a bit paranoid
896 about some old kernels where PTRACE_KILL might work better
897 (dubious if there are any such, but that's why it's paranoia), so
898 we try SIGKILL first, PTRACE_KILL second, and so we're fine
899 everywhere. */
900
901 errno = 0;
902 kill (pid, SIGKILL);
903 if (debug_threads)
904 fprintf (stderr,
905 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
906 target_pid_to_str (ptid_of (lwp)),
907 errno ? strerror (errno) : "OK");
908
909 errno = 0;
910 ptrace (PTRACE_KILL, pid, 0, 0);
911 if (debug_threads)
912 fprintf (stderr,
913 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
914 target_pid_to_str (ptid_of (lwp)),
915 errno ? strerror (errno) : "OK");
916 }
917
918 /* Callback for `find_inferior'. Kills an lwp of a given process,
919 except the leader. */
920
921 static int
922 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
923 {
924 struct thread_info *thread = (struct thread_info *) entry;
925 struct lwp_info *lwp = get_thread_lwp (thread);
926 int wstat;
927 int pid = * (int *) args;
928
929 if (ptid_get_pid (entry->id) != pid)
930 return 0;
931
932 /* We avoid killing the first thread here, because of a Linux kernel (at
933 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
934 the children get a chance to be reaped, it will remain a zombie
935 forever. */
936
937 if (lwpid_of (lwp) == pid)
938 {
939 if (debug_threads)
940 fprintf (stderr, "lkop: is last of process %s\n",
941 target_pid_to_str (entry->id));
942 return 0;
943 }
944
945 do
946 {
947 linux_kill_one_lwp (lwp);
948
949 /* Make sure it died. The loop is most likely unnecessary. */
950 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
951 } while (pid > 0 && WIFSTOPPED (wstat));
952
953 return 0;
954 }
955
956 static int
957 linux_kill (int pid)
958 {
959 struct process_info *process;
960 struct lwp_info *lwp;
961 int wstat;
962 int lwpid;
963
964 process = find_process_pid (pid);
965 if (process == NULL)
966 return -1;
967
968 /* If we're killing a running inferior, make sure it is stopped
969 first, as PTRACE_KILL will not work otherwise. */
970 stop_all_lwps (0, NULL);
971
972 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
973
974 /* See the comment in linux_kill_one_lwp. We did not kill the first
975 thread in the list, so do so now. */
976 lwp = find_lwp_pid (pid_to_ptid (pid));
977
978 if (lwp == NULL)
979 {
980 if (debug_threads)
981 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
982 lwpid_of (lwp), pid);
983 }
984 else
985 {
986 if (debug_threads)
987 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
988 lwpid_of (lwp), pid);
989
990 do
991 {
992 linux_kill_one_lwp (lwp);
993
994 /* Make sure it died. The loop is most likely unnecessary. */
995 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
996 } while (lwpid > 0 && WIFSTOPPED (wstat));
997 }
998
999 the_target->mourn (process);
1000
1001 /* Since we presently can only stop all lwps of all processes, we
1002 need to unstop lwps of other processes. */
1003 unstop_all_lwps (0, NULL);
1004 return 0;
1005 }
1006
1007 /* Get pending signal of THREAD, for detaching purposes. This is the
1008 signal the thread last stopped for, which we need to deliver to the
1009 thread when detaching, otherwise, it'd be suppressed/lost. */
1010
1011 static int
1012 get_detach_signal (struct thread_info *thread)
1013 {
1014 enum target_signal signo = TARGET_SIGNAL_0;
1015 int status;
1016 struct lwp_info *lp = get_thread_lwp (thread);
1017
1018 if (lp->status_pending_p)
1019 status = lp->status_pending;
1020 else
1021 {
1022 /* If the thread had been suspended by gdbserver, and it stopped
1023 cleanly, then it'll have stopped with SIGSTOP. But we don't
1024 want to deliver that SIGSTOP. */
1025 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1026 || thread->last_status.value.sig == TARGET_SIGNAL_0)
1027 return 0;
1028
1029 /* Otherwise, we may need to deliver the signal we
1030 intercepted. */
1031 status = lp->last_status;
1032 }
1033
1034 if (!WIFSTOPPED (status))
1035 {
1036 if (debug_threads)
1037 fprintf (stderr,
1038 "GPS: lwp %s hasn't stopped: no pending signal\n",
1039 target_pid_to_str (ptid_of (lp)));
1040 return 0;
1041 }
1042
1043 /* Extended wait statuses aren't real SIGTRAPs. */
1044 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1045 {
1046 if (debug_threads)
1047 fprintf (stderr,
1048 "GPS: lwp %s had stopped with extended "
1049 "status: no pending signal\n",
1050 target_pid_to_str (ptid_of (lp)));
1051 return 0;
1052 }
1053
1054 signo = target_signal_from_host (WSTOPSIG (status));
1055
1056 if (program_signals_p && !program_signals[signo])
1057 {
1058 if (debug_threads)
1059 fprintf (stderr,
1060 "GPS: lwp %s had signal %s, but it is in nopass state\n",
1061 target_pid_to_str (ptid_of (lp)),
1062 target_signal_to_string (signo));
1063 return 0;
1064 }
1065 else if (!program_signals_p
1066 /* If we have no way to know which signals GDB does not
1067 want to have passed to the program, assume
1068 SIGTRAP/SIGINT, which is GDB's default. */
1069 && (signo == TARGET_SIGNAL_TRAP || signo == TARGET_SIGNAL_INT))
1070 {
1071 if (debug_threads)
1072 fprintf (stderr,
1073 "GPS: lwp %s had signal %s, "
1074 "but we don't know if we should pass it. Default to not.\n",
1075 target_pid_to_str (ptid_of (lp)),
1076 target_signal_to_string (signo));
1077 return 0;
1078 }
1079 else
1080 {
1081 if (debug_threads)
1082 fprintf (stderr,
1083 "GPS: lwp %s has pending signal %s: delivering it.\n",
1084 target_pid_to_str (ptid_of (lp)),
1085 target_signal_to_string (signo));
1086
1087 return WSTOPSIG (status);
1088 }
1089 }
1090
1091 static int
1092 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1093 {
1094 struct thread_info *thread = (struct thread_info *) entry;
1095 struct lwp_info *lwp = get_thread_lwp (thread);
1096 int pid = * (int *) args;
1097 int sig;
1098
1099 if (ptid_get_pid (entry->id) != pid)
1100 return 0;
1101
1102 /* If there is a pending SIGSTOP, get rid of it. */
1103 if (lwp->stop_expected)
1104 {
1105 if (debug_threads)
1106 fprintf (stderr,
1107 "Sending SIGCONT to %s\n",
1108 target_pid_to_str (ptid_of (lwp)));
1109
1110 kill_lwp (lwpid_of (lwp), SIGCONT);
1111 lwp->stop_expected = 0;
1112 }
1113
1114 /* Flush any pending changes to the process's registers. */
1115 regcache_invalidate_one ((struct inferior_list_entry *)
1116 get_lwp_thread (lwp));
1117
1118 /* Pass on any pending signal for this thread. */
1119 sig = get_detach_signal (thread);
1120
1121 /* Finally, let it resume. */
1122 if (the_low_target.prepare_to_resume != NULL)
1123 the_low_target.prepare_to_resume (lwp);
1124 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, sig) < 0)
1125 error (_("Can't detach %s: %s"),
1126 target_pid_to_str (ptid_of (lwp)),
1127 strerror (errno));
1128
1129 delete_lwp (lwp);
1130 return 0;
1131 }
1132
1133 static int
1134 linux_detach (int pid)
1135 {
1136 struct process_info *process;
1137
1138 process = find_process_pid (pid);
1139 if (process == NULL)
1140 return -1;
1141
1142 /* Stop all threads before detaching. First, ptrace requires that
1143 the thread is stopped to sucessfully detach. Second, thread_db
1144 may need to uninstall thread event breakpoints from memory, which
1145 only works with a stopped process anyway. */
1146 stop_all_lwps (0, NULL);
1147
1148 #ifdef USE_THREAD_DB
1149 thread_db_detach (process);
1150 #endif
1151
1152 /* Stabilize threads (move out of jump pads). */
1153 stabilize_threads ();
1154
1155 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1156
1157 the_target->mourn (process);
1158
1159 /* Since we presently can only stop all lwps of all processes, we
1160 need to unstop lwps of other processes. */
1161 unstop_all_lwps (0, NULL);
1162 return 0;
1163 }
1164
1165 /* Remove all LWPs that belong to process PROC from the lwp list. */
1166
1167 static int
1168 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1169 {
1170 struct lwp_info *lwp = (struct lwp_info *) entry;
1171 struct process_info *process = proc;
1172
1173 if (pid_of (lwp) == pid_of (process))
1174 delete_lwp (lwp);
1175
1176 return 0;
1177 }
1178
1179 static void
1180 linux_mourn (struct process_info *process)
1181 {
1182 struct process_info_private *priv;
1183
1184 #ifdef USE_THREAD_DB
1185 thread_db_mourn (process);
1186 #endif
1187
1188 find_inferior (&all_lwps, delete_lwp_callback, process);
1189
1190 /* Freeing all private data. */
1191 priv = process->private;
1192 free (priv->arch_private);
1193 free (priv);
1194 process->private = NULL;
1195
1196 remove_process (process);
1197 }
1198
1199 static void
1200 linux_join (int pid)
1201 {
1202 int status, ret;
1203
1204 do {
1205 ret = my_waitpid (pid, &status, 0);
1206 if (WIFEXITED (status) || WIFSIGNALED (status))
1207 break;
1208 } while (ret != -1 || errno != ECHILD);
1209 }
1210
1211 /* Return nonzero if the given thread is still alive. */
1212 static int
1213 linux_thread_alive (ptid_t ptid)
1214 {
1215 struct lwp_info *lwp = find_lwp_pid (ptid);
1216
1217 /* We assume we always know if a thread exits. If a whole process
1218 exited but we still haven't been able to report it to GDB, we'll
1219 hold on to the last lwp of the dead process. */
1220 if (lwp != NULL)
1221 return !lwp->dead;
1222 else
1223 return 0;
1224 }
1225
1226 /* Return 1 if this lwp has an interesting status pending. */
1227 static int
1228 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1229 {
1230 struct lwp_info *lwp = (struct lwp_info *) entry;
1231 ptid_t ptid = * (ptid_t *) arg;
1232 struct thread_info *thread;
1233
1234 /* Check if we're only interested in events from a specific process
1235 or its lwps. */
1236 if (!ptid_equal (minus_one_ptid, ptid)
1237 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1238 return 0;
1239
1240 thread = get_lwp_thread (lwp);
1241
1242 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1243 report any status pending the LWP may have. */
1244 if (thread->last_resume_kind == resume_stop
1245 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1246 return 0;
1247
1248 return lwp->status_pending_p;
1249 }
1250
1251 static int
1252 same_lwp (struct inferior_list_entry *entry, void *data)
1253 {
1254 ptid_t ptid = *(ptid_t *) data;
1255 int lwp;
1256
1257 if (ptid_get_lwp (ptid) != 0)
1258 lwp = ptid_get_lwp (ptid);
1259 else
1260 lwp = ptid_get_pid (ptid);
1261
1262 if (ptid_get_lwp (entry->id) == lwp)
1263 return 1;
1264
1265 return 0;
1266 }
1267
1268 struct lwp_info *
1269 find_lwp_pid (ptid_t ptid)
1270 {
1271 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1272 }
1273
1274 static struct lwp_info *
1275 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1276 {
1277 int ret;
1278 int to_wait_for = -1;
1279 struct lwp_info *child = NULL;
1280
1281 if (debug_threads)
1282 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1283
1284 if (ptid_equal (ptid, minus_one_ptid))
1285 to_wait_for = -1; /* any child */
1286 else
1287 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1288
1289 options |= __WALL;
1290
1291 retry:
1292
1293 ret = my_waitpid (to_wait_for, wstatp, options);
1294 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1295 return NULL;
1296 else if (ret == -1)
1297 perror_with_name ("waitpid");
1298
1299 if (debug_threads
1300 && (!WIFSTOPPED (*wstatp)
1301 || (WSTOPSIG (*wstatp) != 32
1302 && WSTOPSIG (*wstatp) != 33)))
1303 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1304
1305 child = find_lwp_pid (pid_to_ptid (ret));
1306
1307 /* If we didn't find a process, one of two things presumably happened:
1308 - A process we started and then detached from has exited. Ignore it.
1309 - A process we are controlling has forked and the new child's stop
1310 was reported to us by the kernel. Save its PID. */
1311 if (child == NULL && WIFSTOPPED (*wstatp))
1312 {
1313 add_to_pid_list (&stopped_pids, ret, *wstatp);
1314 goto retry;
1315 }
1316 else if (child == NULL)
1317 goto retry;
1318
1319 child->stopped = 1;
1320
1321 child->last_status = *wstatp;
1322
1323 /* Architecture-specific setup after inferior is running.
1324 This needs to happen after we have attached to the inferior
1325 and it is stopped for the first time, but before we access
1326 any inferior registers. */
1327 if (new_inferior)
1328 {
1329 the_low_target.arch_setup ();
1330 #ifdef HAVE_LINUX_REGSETS
1331 memset (disabled_regsets, 0, num_regsets);
1332 #endif
1333 new_inferior = 0;
1334 }
1335
1336 /* Fetch the possibly triggered data watchpoint info and store it in
1337 CHILD.
1338
1339 On some archs, like x86, that use debug registers to set
1340 watchpoints, it's possible that the way to know which watched
1341 address trapped, is to check the register that is used to select
1342 which address to watch. Problem is, between setting the
1343 watchpoint and reading back which data address trapped, the user
1344 may change the set of watchpoints, and, as a consequence, GDB
1345 changes the debug registers in the inferior. To avoid reading
1346 back a stale stopped-data-address when that happens, we cache in
1347 LP the fact that a watchpoint trapped, and the corresponding data
1348 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1349 changes the debug registers meanwhile, we have the cached data we
1350 can rely on. */
1351
1352 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1353 {
1354 if (the_low_target.stopped_by_watchpoint == NULL)
1355 {
1356 child->stopped_by_watchpoint = 0;
1357 }
1358 else
1359 {
1360 struct thread_info *saved_inferior;
1361
1362 saved_inferior = current_inferior;
1363 current_inferior = get_lwp_thread (child);
1364
1365 child->stopped_by_watchpoint
1366 = the_low_target.stopped_by_watchpoint ();
1367
1368 if (child->stopped_by_watchpoint)
1369 {
1370 if (the_low_target.stopped_data_address != NULL)
1371 child->stopped_data_address
1372 = the_low_target.stopped_data_address ();
1373 else
1374 child->stopped_data_address = 0;
1375 }
1376
1377 current_inferior = saved_inferior;
1378 }
1379 }
1380
1381 /* Store the STOP_PC, with adjustment applied. This depends on the
1382 architecture being defined already (so that CHILD has a valid
1383 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1384 not). */
1385 if (WIFSTOPPED (*wstatp))
1386 child->stop_pc = get_stop_pc (child);
1387
1388 if (debug_threads
1389 && WIFSTOPPED (*wstatp)
1390 && the_low_target.get_pc != NULL)
1391 {
1392 struct thread_info *saved_inferior = current_inferior;
1393 struct regcache *regcache;
1394 CORE_ADDR pc;
1395
1396 current_inferior = get_lwp_thread (child);
1397 regcache = get_thread_regcache (current_inferior, 1);
1398 pc = (*the_low_target.get_pc) (regcache);
1399 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1400 current_inferior = saved_inferior;
1401 }
1402
1403 return child;
1404 }
1405
1406 /* This function should only be called if the LWP got a SIGTRAP.
1407
1408 Handle any tracepoint steps or hits. Return true if a tracepoint
1409 event was handled, 0 otherwise. */
1410
1411 static int
1412 handle_tracepoints (struct lwp_info *lwp)
1413 {
1414 struct thread_info *tinfo = get_lwp_thread (lwp);
1415 int tpoint_related_event = 0;
1416
1417 /* If this tracepoint hit causes a tracing stop, we'll immediately
1418 uninsert tracepoints. To do this, we temporarily pause all
1419 threads, unpatch away, and then unpause threads. We need to make
1420 sure the unpausing doesn't resume LWP too. */
1421 lwp->suspended++;
1422
1423 /* And we need to be sure that any all-threads-stopping doesn't try
1424 to move threads out of the jump pads, as it could deadlock the
1425 inferior (LWP could be in the jump pad, maybe even holding the
1426 lock.) */
1427
1428 /* Do any necessary step collect actions. */
1429 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1430
1431 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1432
1433 /* See if we just hit a tracepoint and do its main collect
1434 actions. */
1435 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1436
1437 lwp->suspended--;
1438
1439 gdb_assert (lwp->suspended == 0);
1440 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1441
1442 if (tpoint_related_event)
1443 {
1444 if (debug_threads)
1445 fprintf (stderr, "got a tracepoint event\n");
1446 return 1;
1447 }
1448
1449 return 0;
1450 }
1451
1452 /* Convenience wrapper. Returns true if LWP is presently collecting a
1453 fast tracepoint. */
1454
1455 static int
1456 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1457 struct fast_tpoint_collect_status *status)
1458 {
1459 CORE_ADDR thread_area;
1460
1461 if (the_low_target.get_thread_area == NULL)
1462 return 0;
1463
1464 /* Get the thread area address. This is used to recognize which
1465 thread is which when tracing with the in-process agent library.
1466 We don't read anything from the address, and treat it as opaque;
1467 it's the address itself that we assume is unique per-thread. */
1468 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1469 return 0;
1470
1471 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1472 }
1473
1474 /* The reason we resume in the caller, is because we want to be able
1475 to pass lwp->status_pending as WSTAT, and we need to clear
1476 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1477 refuses to resume. */
1478
1479 static int
1480 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1481 {
1482 struct thread_info *saved_inferior;
1483
1484 saved_inferior = current_inferior;
1485 current_inferior = get_lwp_thread (lwp);
1486
1487 if ((wstat == NULL
1488 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1489 && supports_fast_tracepoints ()
1490 && agent_loaded_p ())
1491 {
1492 struct fast_tpoint_collect_status status;
1493 int r;
1494
1495 if (debug_threads)
1496 fprintf (stderr, "\
1497 Checking whether LWP %ld needs to move out of the jump pad.\n",
1498 lwpid_of (lwp));
1499
1500 r = linux_fast_tracepoint_collecting (lwp, &status);
1501
1502 if (wstat == NULL
1503 || (WSTOPSIG (*wstat) != SIGILL
1504 && WSTOPSIG (*wstat) != SIGFPE
1505 && WSTOPSIG (*wstat) != SIGSEGV
1506 && WSTOPSIG (*wstat) != SIGBUS))
1507 {
1508 lwp->collecting_fast_tracepoint = r;
1509
1510 if (r != 0)
1511 {
1512 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1513 {
1514 /* Haven't executed the original instruction yet.
1515 Set breakpoint there, and wait till it's hit,
1516 then single-step until exiting the jump pad. */
1517 lwp->exit_jump_pad_bkpt
1518 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1519 }
1520
1521 if (debug_threads)
1522 fprintf (stderr, "\
1523 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1524 lwpid_of (lwp));
1525 current_inferior = saved_inferior;
1526
1527 return 1;
1528 }
1529 }
1530 else
1531 {
1532 /* If we get a synchronous signal while collecting, *and*
1533 while executing the (relocated) original instruction,
1534 reset the PC to point at the tpoint address, before
1535 reporting to GDB. Otherwise, it's an IPA lib bug: just
1536 report the signal to GDB, and pray for the best. */
1537
1538 lwp->collecting_fast_tracepoint = 0;
1539
1540 if (r != 0
1541 && (status.adjusted_insn_addr <= lwp->stop_pc
1542 && lwp->stop_pc < status.adjusted_insn_addr_end))
1543 {
1544 siginfo_t info;
1545 struct regcache *regcache;
1546
1547 /* The si_addr on a few signals references the address
1548 of the faulting instruction. Adjust that as
1549 well. */
1550 if ((WSTOPSIG (*wstat) == SIGILL
1551 || WSTOPSIG (*wstat) == SIGFPE
1552 || WSTOPSIG (*wstat) == SIGBUS
1553 || WSTOPSIG (*wstat) == SIGSEGV)
1554 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1555 /* Final check just to make sure we don't clobber
1556 the siginfo of non-kernel-sent signals. */
1557 && (uintptr_t) info.si_addr == lwp->stop_pc)
1558 {
1559 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1560 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1561 }
1562
1563 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1564 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1565 lwp->stop_pc = status.tpoint_addr;
1566
1567 /* Cancel any fast tracepoint lock this thread was
1568 holding. */
1569 force_unlock_trace_buffer ();
1570 }
1571
1572 if (lwp->exit_jump_pad_bkpt != NULL)
1573 {
1574 if (debug_threads)
1575 fprintf (stderr,
1576 "Cancelling fast exit-jump-pad: removing bkpt. "
1577 "stopping all threads momentarily.\n");
1578
1579 stop_all_lwps (1, lwp);
1580 cancel_breakpoints ();
1581
1582 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1583 lwp->exit_jump_pad_bkpt = NULL;
1584
1585 unstop_all_lwps (1, lwp);
1586
1587 gdb_assert (lwp->suspended >= 0);
1588 }
1589 }
1590 }
1591
1592 if (debug_threads)
1593 fprintf (stderr, "\
1594 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1595 lwpid_of (lwp));
1596
1597 current_inferior = saved_inferior;
1598 return 0;
1599 }
1600
1601 /* Enqueue one signal in the "signals to report later when out of the
1602 jump pad" list. */
1603
1604 static void
1605 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1606 {
1607 struct pending_signals *p_sig;
1608
1609 if (debug_threads)
1610 fprintf (stderr, "\
1611 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1612
1613 if (debug_threads)
1614 {
1615 struct pending_signals *sig;
1616
1617 for (sig = lwp->pending_signals_to_report;
1618 sig != NULL;
1619 sig = sig->prev)
1620 fprintf (stderr,
1621 " Already queued %d\n",
1622 sig->signal);
1623
1624 fprintf (stderr, " (no more currently queued signals)\n");
1625 }
1626
1627 /* Don't enqueue non-RT signals if they are already in the deferred
1628 queue. (SIGSTOP being the easiest signal to see ending up here
1629 twice) */
1630 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1631 {
1632 struct pending_signals *sig;
1633
1634 for (sig = lwp->pending_signals_to_report;
1635 sig != NULL;
1636 sig = sig->prev)
1637 {
1638 if (sig->signal == WSTOPSIG (*wstat))
1639 {
1640 if (debug_threads)
1641 fprintf (stderr,
1642 "Not requeuing already queued non-RT signal %d"
1643 " for LWP %ld\n",
1644 sig->signal,
1645 lwpid_of (lwp));
1646 return;
1647 }
1648 }
1649 }
1650
1651 p_sig = xmalloc (sizeof (*p_sig));
1652 p_sig->prev = lwp->pending_signals_to_report;
1653 p_sig->signal = WSTOPSIG (*wstat);
1654 memset (&p_sig->info, 0, sizeof (siginfo_t));
1655 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1656
1657 lwp->pending_signals_to_report = p_sig;
1658 }
1659
1660 /* Dequeue one signal from the "signals to report later when out of
1661 the jump pad" list. */
1662
1663 static int
1664 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1665 {
1666 if (lwp->pending_signals_to_report != NULL)
1667 {
1668 struct pending_signals **p_sig;
1669
1670 p_sig = &lwp->pending_signals_to_report;
1671 while ((*p_sig)->prev != NULL)
1672 p_sig = &(*p_sig)->prev;
1673
1674 *wstat = W_STOPCODE ((*p_sig)->signal);
1675 if ((*p_sig)->info.si_signo != 0)
1676 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1677 free (*p_sig);
1678 *p_sig = NULL;
1679
1680 if (debug_threads)
1681 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1682 WSTOPSIG (*wstat), lwpid_of (lwp));
1683
1684 if (debug_threads)
1685 {
1686 struct pending_signals *sig;
1687
1688 for (sig = lwp->pending_signals_to_report;
1689 sig != NULL;
1690 sig = sig->prev)
1691 fprintf (stderr,
1692 " Still queued %d\n",
1693 sig->signal);
1694
1695 fprintf (stderr, " (no more queued signals)\n");
1696 }
1697
1698 return 1;
1699 }
1700
1701 return 0;
1702 }
1703
1704 /* Arrange for a breakpoint to be hit again later. We don't keep the
1705 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1706 will handle the current event, eventually we will resume this LWP,
1707 and this breakpoint will trap again. */
1708
1709 static int
1710 cancel_breakpoint (struct lwp_info *lwp)
1711 {
1712 struct thread_info *saved_inferior;
1713
1714 /* There's nothing to do if we don't support breakpoints. */
1715 if (!supports_breakpoints ())
1716 return 0;
1717
1718 /* breakpoint_at reads from current inferior. */
1719 saved_inferior = current_inferior;
1720 current_inferior = get_lwp_thread (lwp);
1721
1722 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1723 {
1724 if (debug_threads)
1725 fprintf (stderr,
1726 "CB: Push back breakpoint for %s\n",
1727 target_pid_to_str (ptid_of (lwp)));
1728
1729 /* Back up the PC if necessary. */
1730 if (the_low_target.decr_pc_after_break)
1731 {
1732 struct regcache *regcache
1733 = get_thread_regcache (current_inferior, 1);
1734 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1735 }
1736
1737 current_inferior = saved_inferior;
1738 return 1;
1739 }
1740 else
1741 {
1742 if (debug_threads)
1743 fprintf (stderr,
1744 "CB: No breakpoint found at %s for [%s]\n",
1745 paddress (lwp->stop_pc),
1746 target_pid_to_str (ptid_of (lwp)));
1747 }
1748
1749 current_inferior = saved_inferior;
1750 return 0;
1751 }
1752
1753 /* When the event-loop is doing a step-over, this points at the thread
1754 being stepped. */
1755 ptid_t step_over_bkpt;
1756
1757 /* Wait for an event from child PID. If PID is -1, wait for any
1758 child. Store the stop status through the status pointer WSTAT.
1759 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1760 event was found and OPTIONS contains WNOHANG. Return the PID of
1761 the stopped child otherwise. */
1762
1763 static int
1764 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1765 {
1766 struct lwp_info *event_child, *requested_child;
1767 ptid_t wait_ptid;
1768
1769 event_child = NULL;
1770 requested_child = NULL;
1771
1772 /* Check for a lwp with a pending status. */
1773
1774 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1775 {
1776 event_child = (struct lwp_info *)
1777 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1778 if (debug_threads && event_child)
1779 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1780 }
1781 else
1782 {
1783 requested_child = find_lwp_pid (ptid);
1784
1785 if (!stopping_threads
1786 && requested_child->status_pending_p
1787 && requested_child->collecting_fast_tracepoint)
1788 {
1789 enqueue_one_deferred_signal (requested_child,
1790 &requested_child->status_pending);
1791 requested_child->status_pending_p = 0;
1792 requested_child->status_pending = 0;
1793 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1794 }
1795
1796 if (requested_child->suspended
1797 && requested_child->status_pending_p)
1798 fatal ("requesting an event out of a suspended child?");
1799
1800 if (requested_child->status_pending_p)
1801 event_child = requested_child;
1802 }
1803
1804 if (event_child != NULL)
1805 {
1806 if (debug_threads)
1807 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1808 lwpid_of (event_child), event_child->status_pending);
1809 *wstat = event_child->status_pending;
1810 event_child->status_pending_p = 0;
1811 event_child->status_pending = 0;
1812 current_inferior = get_lwp_thread (event_child);
1813 return lwpid_of (event_child);
1814 }
1815
1816 if (ptid_is_pid (ptid))
1817 {
1818 /* A request to wait for a specific tgid. This is not possible
1819 with waitpid, so instead, we wait for any child, and leave
1820 children we're not interested in right now with a pending
1821 status to report later. */
1822 wait_ptid = minus_one_ptid;
1823 }
1824 else
1825 wait_ptid = ptid;
1826
1827 /* We only enter this loop if no process has a pending wait status. Thus
1828 any action taken in response to a wait status inside this loop is
1829 responding as soon as we detect the status, not after any pending
1830 events. */
1831 while (1)
1832 {
1833 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1834
1835 if ((options & WNOHANG) && event_child == NULL)
1836 {
1837 if (debug_threads)
1838 fprintf (stderr, "WNOHANG set, no event found\n");
1839 return 0;
1840 }
1841
1842 if (event_child == NULL)
1843 error ("event from unknown child");
1844
1845 if (ptid_is_pid (ptid)
1846 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1847 {
1848 if (! WIFSTOPPED (*wstat))
1849 mark_lwp_dead (event_child, *wstat);
1850 else
1851 {
1852 event_child->status_pending_p = 1;
1853 event_child->status_pending = *wstat;
1854 }
1855 continue;
1856 }
1857
1858 current_inferior = get_lwp_thread (event_child);
1859
1860 /* Check for thread exit. */
1861 if (! WIFSTOPPED (*wstat))
1862 {
1863 if (debug_threads)
1864 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1865
1866 /* If the last thread is exiting, just return. */
1867 if (last_thread_of_process_p (current_inferior))
1868 {
1869 if (debug_threads)
1870 fprintf (stderr, "LWP %ld is last lwp of process\n",
1871 lwpid_of (event_child));
1872 return lwpid_of (event_child);
1873 }
1874
1875 if (!non_stop)
1876 {
1877 current_inferior = (struct thread_info *) all_threads.head;
1878 if (debug_threads)
1879 fprintf (stderr, "Current inferior is now %ld\n",
1880 lwpid_of (get_thread_lwp (current_inferior)));
1881 }
1882 else
1883 {
1884 current_inferior = NULL;
1885 if (debug_threads)
1886 fprintf (stderr, "Current inferior is now <NULL>\n");
1887 }
1888
1889 /* If we were waiting for this particular child to do something...
1890 well, it did something. */
1891 if (requested_child != NULL)
1892 {
1893 int lwpid = lwpid_of (event_child);
1894
1895 /* Cancel the step-over operation --- the thread that
1896 started it is gone. */
1897 if (finish_step_over (event_child))
1898 unstop_all_lwps (1, event_child);
1899 delete_lwp (event_child);
1900 return lwpid;
1901 }
1902
1903 delete_lwp (event_child);
1904
1905 /* Wait for a more interesting event. */
1906 continue;
1907 }
1908
1909 if (event_child->must_set_ptrace_flags)
1910 {
1911 linux_enable_event_reporting (lwpid_of (event_child));
1912 event_child->must_set_ptrace_flags = 0;
1913 }
1914
1915 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1916 && *wstat >> 16 != 0)
1917 {
1918 handle_extended_wait (event_child, *wstat);
1919 continue;
1920 }
1921
1922 if (WIFSTOPPED (*wstat)
1923 && WSTOPSIG (*wstat) == SIGSTOP
1924 && event_child->stop_expected)
1925 {
1926 int should_stop;
1927
1928 if (debug_threads)
1929 fprintf (stderr, "Expected stop.\n");
1930 event_child->stop_expected = 0;
1931
1932 should_stop = (current_inferior->last_resume_kind == resume_stop
1933 || stopping_threads);
1934
1935 if (!should_stop)
1936 {
1937 linux_resume_one_lwp (event_child,
1938 event_child->stepping, 0, NULL);
1939 continue;
1940 }
1941 }
1942
1943 return lwpid_of (event_child);
1944 }
1945
1946 /* NOTREACHED */
1947 return 0;
1948 }
1949
1950 /* Count the LWP's that have had events. */
1951
1952 static int
1953 count_events_callback (struct inferior_list_entry *entry, void *data)
1954 {
1955 struct lwp_info *lp = (struct lwp_info *) entry;
1956 struct thread_info *thread = get_lwp_thread (lp);
1957 int *count = data;
1958
1959 gdb_assert (count != NULL);
1960
1961 /* Count only resumed LWPs that have a SIGTRAP event pending that
1962 should be reported to GDB. */
1963 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1964 && thread->last_resume_kind != resume_stop
1965 && lp->status_pending_p
1966 && WIFSTOPPED (lp->status_pending)
1967 && WSTOPSIG (lp->status_pending) == SIGTRAP
1968 && !breakpoint_inserted_here (lp->stop_pc))
1969 (*count)++;
1970
1971 return 0;
1972 }
1973
1974 /* Select the LWP (if any) that is currently being single-stepped. */
1975
1976 static int
1977 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1978 {
1979 struct lwp_info *lp = (struct lwp_info *) entry;
1980 struct thread_info *thread = get_lwp_thread (lp);
1981
1982 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1983 && thread->last_resume_kind == resume_step
1984 && lp->status_pending_p)
1985 return 1;
1986 else
1987 return 0;
1988 }
1989
1990 /* Select the Nth LWP that has had a SIGTRAP event that should be
1991 reported to GDB. */
1992
1993 static int
1994 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1995 {
1996 struct lwp_info *lp = (struct lwp_info *) entry;
1997 struct thread_info *thread = get_lwp_thread (lp);
1998 int *selector = data;
1999
2000 gdb_assert (selector != NULL);
2001
2002 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2003 if (thread->last_resume_kind != resume_stop
2004 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2005 && lp->status_pending_p
2006 && WIFSTOPPED (lp->status_pending)
2007 && WSTOPSIG (lp->status_pending) == SIGTRAP
2008 && !breakpoint_inserted_here (lp->stop_pc))
2009 if ((*selector)-- == 0)
2010 return 1;
2011
2012 return 0;
2013 }
2014
2015 static int
2016 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2017 {
2018 struct lwp_info *lp = (struct lwp_info *) entry;
2019 struct thread_info *thread = get_lwp_thread (lp);
2020 struct lwp_info *event_lp = data;
2021
2022 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2023 if (lp == event_lp)
2024 return 0;
2025
2026 /* If a LWP other than the LWP that we're reporting an event for has
2027 hit a GDB breakpoint (as opposed to some random trap signal),
2028 then just arrange for it to hit it again later. We don't keep
2029 the SIGTRAP status and don't forward the SIGTRAP signal to the
2030 LWP. We will handle the current event, eventually we will resume
2031 all LWPs, and this one will get its breakpoint trap again.
2032
2033 If we do not do this, then we run the risk that the user will
2034 delete or disable the breakpoint, but the LWP will have already
2035 tripped on it. */
2036
2037 if (thread->last_resume_kind != resume_stop
2038 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2039 && lp->status_pending_p
2040 && WIFSTOPPED (lp->status_pending)
2041 && WSTOPSIG (lp->status_pending) == SIGTRAP
2042 && !lp->stepping
2043 && !lp->stopped_by_watchpoint
2044 && cancel_breakpoint (lp))
2045 /* Throw away the SIGTRAP. */
2046 lp->status_pending_p = 0;
2047
2048 return 0;
2049 }
2050
2051 static void
2052 linux_cancel_breakpoints (void)
2053 {
2054 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2055 }
2056
2057 /* Select one LWP out of those that have events pending. */
2058
2059 static void
2060 select_event_lwp (struct lwp_info **orig_lp)
2061 {
2062 int num_events = 0;
2063 int random_selector;
2064 struct lwp_info *event_lp;
2065
2066 /* Give preference to any LWP that is being single-stepped. */
2067 event_lp
2068 = (struct lwp_info *) find_inferior (&all_lwps,
2069 select_singlestep_lwp_callback, NULL);
2070 if (event_lp != NULL)
2071 {
2072 if (debug_threads)
2073 fprintf (stderr,
2074 "SEL: Select single-step %s\n",
2075 target_pid_to_str (ptid_of (event_lp)));
2076 }
2077 else
2078 {
2079 /* No single-stepping LWP. Select one at random, out of those
2080 which have had SIGTRAP events. */
2081
2082 /* First see how many SIGTRAP events we have. */
2083 find_inferior (&all_lwps, count_events_callback, &num_events);
2084
2085 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2086 random_selector = (int)
2087 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2088
2089 if (debug_threads && num_events > 1)
2090 fprintf (stderr,
2091 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2092 num_events, random_selector);
2093
2094 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2095 select_event_lwp_callback,
2096 &random_selector);
2097 }
2098
2099 if (event_lp != NULL)
2100 {
2101 /* Switch the event LWP. */
2102 *orig_lp = event_lp;
2103 }
2104 }
2105
2106 /* Decrement the suspend count of an LWP. */
2107
2108 static int
2109 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2110 {
2111 struct lwp_info *lwp = (struct lwp_info *) entry;
2112
2113 /* Ignore EXCEPT. */
2114 if (lwp == except)
2115 return 0;
2116
2117 lwp->suspended--;
2118
2119 gdb_assert (lwp->suspended >= 0);
2120 return 0;
2121 }
2122
2123 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2124 NULL. */
2125
2126 static void
2127 unsuspend_all_lwps (struct lwp_info *except)
2128 {
2129 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2130 }
2131
2132 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2133 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2134 void *data);
2135 static int lwp_running (struct inferior_list_entry *entry, void *data);
2136 static ptid_t linux_wait_1 (ptid_t ptid,
2137 struct target_waitstatus *ourstatus,
2138 int target_options);
2139
2140 /* Stabilize threads (move out of jump pads).
2141
2142 If a thread is midway collecting a fast tracepoint, we need to
2143 finish the collection and move it out of the jump pad before
2144 reporting the signal.
2145
2146 This avoids recursion while collecting (when a signal arrives
2147 midway, and the signal handler itself collects), which would trash
2148 the trace buffer. In case the user set a breakpoint in a signal
2149 handler, this avoids the backtrace showing the jump pad, etc..
2150 Most importantly, there are certain things we can't do safely if
2151 threads are stopped in a jump pad (or in its callee's). For
2152 example:
2153
2154 - starting a new trace run. A thread still collecting the
2155 previous run, could trash the trace buffer when resumed. The trace
2156 buffer control structures would have been reset but the thread had
2157 no way to tell. The thread could even midway memcpy'ing to the
2158 buffer, which would mean that when resumed, it would clobber the
2159 trace buffer that had been set for a new run.
2160
2161 - we can't rewrite/reuse the jump pads for new tracepoints
2162 safely. Say you do tstart while a thread is stopped midway while
2163 collecting. When the thread is later resumed, it finishes the
2164 collection, and returns to the jump pad, to execute the original
2165 instruction that was under the tracepoint jump at the time the
2166 older run had been started. If the jump pad had been rewritten
2167 since for something else in the new run, the thread would now
2168 execute the wrong / random instructions. */
2169
2170 static void
2171 linux_stabilize_threads (void)
2172 {
2173 struct thread_info *save_inferior;
2174 struct lwp_info *lwp_stuck;
2175
2176 lwp_stuck
2177 = (struct lwp_info *) find_inferior (&all_lwps,
2178 stuck_in_jump_pad_callback, NULL);
2179 if (lwp_stuck != NULL)
2180 {
2181 if (debug_threads)
2182 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2183 lwpid_of (lwp_stuck));
2184 return;
2185 }
2186
2187 save_inferior = current_inferior;
2188
2189 stabilizing_threads = 1;
2190
2191 /* Kick 'em all. */
2192 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2193
2194 /* Loop until all are stopped out of the jump pads. */
2195 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2196 {
2197 struct target_waitstatus ourstatus;
2198 struct lwp_info *lwp;
2199 int wstat;
2200
2201 /* Note that we go through the full wait even loop. While
2202 moving threads out of jump pad, we need to be able to step
2203 over internal breakpoints and such. */
2204 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2205
2206 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2207 {
2208 lwp = get_thread_lwp (current_inferior);
2209
2210 /* Lock it. */
2211 lwp->suspended++;
2212
2213 if (ourstatus.value.sig != TARGET_SIGNAL_0
2214 || current_inferior->last_resume_kind == resume_stop)
2215 {
2216 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
2217 enqueue_one_deferred_signal (lwp, &wstat);
2218 }
2219 }
2220 }
2221
2222 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2223
2224 stabilizing_threads = 0;
2225
2226 current_inferior = save_inferior;
2227
2228 if (debug_threads)
2229 {
2230 lwp_stuck
2231 = (struct lwp_info *) find_inferior (&all_lwps,
2232 stuck_in_jump_pad_callback, NULL);
2233 if (lwp_stuck != NULL)
2234 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2235 lwpid_of (lwp_stuck));
2236 }
2237 }
2238
2239 /* Wait for process, returns status. */
2240
2241 static ptid_t
2242 linux_wait_1 (ptid_t ptid,
2243 struct target_waitstatus *ourstatus, int target_options)
2244 {
2245 int w;
2246 struct lwp_info *event_child;
2247 int options;
2248 int pid;
2249 int step_over_finished;
2250 int bp_explains_trap;
2251 int maybe_internal_trap;
2252 int report_to_gdb;
2253 int trace_event;
2254
2255 /* Translate generic target options into linux options. */
2256 options = __WALL;
2257 if (target_options & TARGET_WNOHANG)
2258 options |= WNOHANG;
2259
2260 retry:
2261 bp_explains_trap = 0;
2262 trace_event = 0;
2263 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2264
2265 /* If we were only supposed to resume one thread, only wait for
2266 that thread - if it's still alive. If it died, however - which
2267 can happen if we're coming from the thread death case below -
2268 then we need to make sure we restart the other threads. We could
2269 pick a thread at random or restart all; restarting all is less
2270 arbitrary. */
2271 if (!non_stop
2272 && !ptid_equal (cont_thread, null_ptid)
2273 && !ptid_equal (cont_thread, minus_one_ptid))
2274 {
2275 struct thread_info *thread;
2276
2277 thread = (struct thread_info *) find_inferior_id (&all_threads,
2278 cont_thread);
2279
2280 /* No stepping, no signal - unless one is pending already, of course. */
2281 if (thread == NULL)
2282 {
2283 struct thread_resume resume_info;
2284 resume_info.thread = minus_one_ptid;
2285 resume_info.kind = resume_continue;
2286 resume_info.sig = 0;
2287 linux_resume (&resume_info, 1);
2288 }
2289 else
2290 ptid = cont_thread;
2291 }
2292
2293 if (ptid_equal (step_over_bkpt, null_ptid))
2294 pid = linux_wait_for_event (ptid, &w, options);
2295 else
2296 {
2297 if (debug_threads)
2298 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2299 target_pid_to_str (step_over_bkpt));
2300 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2301 }
2302
2303 if (pid == 0) /* only if TARGET_WNOHANG */
2304 return null_ptid;
2305
2306 event_child = get_thread_lwp (current_inferior);
2307
2308 /* If we are waiting for a particular child, and it exited,
2309 linux_wait_for_event will return its exit status. Similarly if
2310 the last child exited. If this is not the last child, however,
2311 do not report it as exited until there is a 'thread exited' response
2312 available in the remote protocol. Instead, just wait for another event.
2313 This should be safe, because if the thread crashed we will already
2314 have reported the termination signal to GDB; that should stop any
2315 in-progress stepping operations, etc.
2316
2317 Report the exit status of the last thread to exit. This matches
2318 LinuxThreads' behavior. */
2319
2320 if (last_thread_of_process_p (current_inferior))
2321 {
2322 if (WIFEXITED (w) || WIFSIGNALED (w))
2323 {
2324 if (WIFEXITED (w))
2325 {
2326 ourstatus->kind = TARGET_WAITKIND_EXITED;
2327 ourstatus->value.integer = WEXITSTATUS (w);
2328
2329 if (debug_threads)
2330 fprintf (stderr,
2331 "\nChild exited with retcode = %x \n",
2332 WEXITSTATUS (w));
2333 }
2334 else
2335 {
2336 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2337 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2338
2339 if (debug_threads)
2340 fprintf (stderr,
2341 "\nChild terminated with signal = %x \n",
2342 WTERMSIG (w));
2343
2344 }
2345
2346 return ptid_of (event_child);
2347 }
2348 }
2349 else
2350 {
2351 if (!WIFSTOPPED (w))
2352 goto retry;
2353 }
2354
2355 /* If this event was not handled before, and is not a SIGTRAP, we
2356 report it. SIGILL and SIGSEGV are also treated as traps in case
2357 a breakpoint is inserted at the current PC. If this target does
2358 not support internal breakpoints at all, we also report the
2359 SIGTRAP without further processing; it's of no concern to us. */
2360 maybe_internal_trap
2361 = (supports_breakpoints ()
2362 && (WSTOPSIG (w) == SIGTRAP
2363 || ((WSTOPSIG (w) == SIGILL
2364 || WSTOPSIG (w) == SIGSEGV)
2365 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2366
2367 if (maybe_internal_trap)
2368 {
2369 /* Handle anything that requires bookkeeping before deciding to
2370 report the event or continue waiting. */
2371
2372 /* First check if we can explain the SIGTRAP with an internal
2373 breakpoint, or if we should possibly report the event to GDB.
2374 Do this before anything that may remove or insert a
2375 breakpoint. */
2376 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2377
2378 /* We have a SIGTRAP, possibly a step-over dance has just
2379 finished. If so, tweak the state machine accordingly,
2380 reinsert breakpoints and delete any reinsert (software
2381 single-step) breakpoints. */
2382 step_over_finished = finish_step_over (event_child);
2383
2384 /* Now invoke the callbacks of any internal breakpoints there. */
2385 check_breakpoints (event_child->stop_pc);
2386
2387 /* Handle tracepoint data collecting. This may overflow the
2388 trace buffer, and cause a tracing stop, removing
2389 breakpoints. */
2390 trace_event = handle_tracepoints (event_child);
2391
2392 if (bp_explains_trap)
2393 {
2394 /* If we stepped or ran into an internal breakpoint, we've
2395 already handled it. So next time we resume (from this
2396 PC), we should step over it. */
2397 if (debug_threads)
2398 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2399
2400 if (breakpoint_here (event_child->stop_pc))
2401 event_child->need_step_over = 1;
2402 }
2403 }
2404 else
2405 {
2406 /* We have some other signal, possibly a step-over dance was in
2407 progress, and it should be cancelled too. */
2408 step_over_finished = finish_step_over (event_child);
2409 }
2410
2411 /* We have all the data we need. Either report the event to GDB, or
2412 resume threads and keep waiting for more. */
2413
2414 /* If we're collecting a fast tracepoint, finish the collection and
2415 move out of the jump pad before delivering a signal. See
2416 linux_stabilize_threads. */
2417
2418 if (WIFSTOPPED (w)
2419 && WSTOPSIG (w) != SIGTRAP
2420 && supports_fast_tracepoints ()
2421 && agent_loaded_p ())
2422 {
2423 if (debug_threads)
2424 fprintf (stderr,
2425 "Got signal %d for LWP %ld. Check if we need "
2426 "to defer or adjust it.\n",
2427 WSTOPSIG (w), lwpid_of (event_child));
2428
2429 /* Allow debugging the jump pad itself. */
2430 if (current_inferior->last_resume_kind != resume_step
2431 && maybe_move_out_of_jump_pad (event_child, &w))
2432 {
2433 enqueue_one_deferred_signal (event_child, &w);
2434
2435 if (debug_threads)
2436 fprintf (stderr,
2437 "Signal %d for LWP %ld deferred (in jump pad)\n",
2438 WSTOPSIG (w), lwpid_of (event_child));
2439
2440 linux_resume_one_lwp (event_child, 0, 0, NULL);
2441 goto retry;
2442 }
2443 }
2444
2445 if (event_child->collecting_fast_tracepoint)
2446 {
2447 if (debug_threads)
2448 fprintf (stderr, "\
2449 LWP %ld was trying to move out of the jump pad (%d). \
2450 Check if we're already there.\n",
2451 lwpid_of (event_child),
2452 event_child->collecting_fast_tracepoint);
2453
2454 trace_event = 1;
2455
2456 event_child->collecting_fast_tracepoint
2457 = linux_fast_tracepoint_collecting (event_child, NULL);
2458
2459 if (event_child->collecting_fast_tracepoint != 1)
2460 {
2461 /* No longer need this breakpoint. */
2462 if (event_child->exit_jump_pad_bkpt != NULL)
2463 {
2464 if (debug_threads)
2465 fprintf (stderr,
2466 "No longer need exit-jump-pad bkpt; removing it."
2467 "stopping all threads momentarily.\n");
2468
2469 /* Other running threads could hit this breakpoint.
2470 We don't handle moribund locations like GDB does,
2471 instead we always pause all threads when removing
2472 breakpoints, so that any step-over or
2473 decr_pc_after_break adjustment is always taken
2474 care of while the breakpoint is still
2475 inserted. */
2476 stop_all_lwps (1, event_child);
2477 cancel_breakpoints ();
2478
2479 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2480 event_child->exit_jump_pad_bkpt = NULL;
2481
2482 unstop_all_lwps (1, event_child);
2483
2484 gdb_assert (event_child->suspended >= 0);
2485 }
2486 }
2487
2488 if (event_child->collecting_fast_tracepoint == 0)
2489 {
2490 if (debug_threads)
2491 fprintf (stderr,
2492 "fast tracepoint finished "
2493 "collecting successfully.\n");
2494
2495 /* We may have a deferred signal to report. */
2496 if (dequeue_one_deferred_signal (event_child, &w))
2497 {
2498 if (debug_threads)
2499 fprintf (stderr, "dequeued one signal.\n");
2500 }
2501 else
2502 {
2503 if (debug_threads)
2504 fprintf (stderr, "no deferred signals.\n");
2505
2506 if (stabilizing_threads)
2507 {
2508 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2509 ourstatus->value.sig = TARGET_SIGNAL_0;
2510 return ptid_of (event_child);
2511 }
2512 }
2513 }
2514 }
2515
2516 /* Check whether GDB would be interested in this event. */
2517
2518 /* If GDB is not interested in this signal, don't stop other
2519 threads, and don't report it to GDB. Just resume the inferior
2520 right away. We do this for threading-related signals as well as
2521 any that GDB specifically requested we ignore. But never ignore
2522 SIGSTOP if we sent it ourselves, and do not ignore signals when
2523 stepping - they may require special handling to skip the signal
2524 handler. */
2525 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2526 thread library? */
2527 if (WIFSTOPPED (w)
2528 && current_inferior->last_resume_kind != resume_step
2529 && (
2530 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2531 (current_process ()->private->thread_db != NULL
2532 && (WSTOPSIG (w) == __SIGRTMIN
2533 || WSTOPSIG (w) == __SIGRTMIN + 1))
2534 ||
2535 #endif
2536 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2537 && !(WSTOPSIG (w) == SIGSTOP
2538 && current_inferior->last_resume_kind == resume_stop))))
2539 {
2540 siginfo_t info, *info_p;
2541
2542 if (debug_threads)
2543 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2544 WSTOPSIG (w), lwpid_of (event_child));
2545
2546 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2547 info_p = &info;
2548 else
2549 info_p = NULL;
2550 linux_resume_one_lwp (event_child, event_child->stepping,
2551 WSTOPSIG (w), info_p);
2552 goto retry;
2553 }
2554
2555 /* If GDB wanted this thread to single step, we always want to
2556 report the SIGTRAP, and let GDB handle it. Watchpoints should
2557 always be reported. So should signals we can't explain. A
2558 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2559 not support Z0 breakpoints. If we do, we're be able to handle
2560 GDB breakpoints on top of internal breakpoints, by handling the
2561 internal breakpoint and still reporting the event to GDB. If we
2562 don't, we're out of luck, GDB won't see the breakpoint hit. */
2563 report_to_gdb = (!maybe_internal_trap
2564 || current_inferior->last_resume_kind == resume_step
2565 || event_child->stopped_by_watchpoint
2566 || (!step_over_finished
2567 && !bp_explains_trap && !trace_event)
2568 || (gdb_breakpoint_here (event_child->stop_pc)
2569 && gdb_condition_true_at_breakpoint (event_child->stop_pc)));
2570
2571 /* We found no reason GDB would want us to stop. We either hit one
2572 of our own breakpoints, or finished an internal step GDB
2573 shouldn't know about. */
2574 if (!report_to_gdb)
2575 {
2576 if (debug_threads)
2577 {
2578 if (bp_explains_trap)
2579 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2580 if (step_over_finished)
2581 fprintf (stderr, "Step-over finished.\n");
2582 if (trace_event)
2583 fprintf (stderr, "Tracepoint event.\n");
2584 }
2585
2586 /* We're not reporting this breakpoint to GDB, so apply the
2587 decr_pc_after_break adjustment to the inferior's regcache
2588 ourselves. */
2589
2590 if (the_low_target.set_pc != NULL)
2591 {
2592 struct regcache *regcache
2593 = get_thread_regcache (get_lwp_thread (event_child), 1);
2594 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2595 }
2596
2597 /* We may have finished stepping over a breakpoint. If so,
2598 we've stopped and suspended all LWPs momentarily except the
2599 stepping one. This is where we resume them all again. We're
2600 going to keep waiting, so use proceed, which handles stepping
2601 over the next breakpoint. */
2602 if (debug_threads)
2603 fprintf (stderr, "proceeding all threads.\n");
2604
2605 if (step_over_finished)
2606 unsuspend_all_lwps (event_child);
2607
2608 proceed_all_lwps ();
2609 goto retry;
2610 }
2611
2612 if (debug_threads)
2613 {
2614 if (current_inferior->last_resume_kind == resume_step)
2615 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2616 if (event_child->stopped_by_watchpoint)
2617 fprintf (stderr, "Stopped by watchpoint.\n");
2618 if (gdb_breakpoint_here (event_child->stop_pc))
2619 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2620 if (debug_threads)
2621 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2622 }
2623
2624 /* Alright, we're going to report a stop. */
2625
2626 if (!non_stop && !stabilizing_threads)
2627 {
2628 /* In all-stop, stop all threads. */
2629 stop_all_lwps (0, NULL);
2630
2631 /* If we're not waiting for a specific LWP, choose an event LWP
2632 from among those that have had events. Giving equal priority
2633 to all LWPs that have had events helps prevent
2634 starvation. */
2635 if (ptid_equal (ptid, minus_one_ptid))
2636 {
2637 event_child->status_pending_p = 1;
2638 event_child->status_pending = w;
2639
2640 select_event_lwp (&event_child);
2641
2642 event_child->status_pending_p = 0;
2643 w = event_child->status_pending;
2644 }
2645
2646 /* Now that we've selected our final event LWP, cancel any
2647 breakpoints in other LWPs that have hit a GDB breakpoint.
2648 See the comment in cancel_breakpoints_callback to find out
2649 why. */
2650 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2651
2652 /* If we were going a step-over, all other threads but the stepping one
2653 had been paused in start_step_over, with their suspend counts
2654 incremented. We don't want to do a full unstop/unpause, because we're
2655 in all-stop mode (so we want threads stopped), but we still need to
2656 unsuspend the other threads, to decrement their `suspended' count
2657 back. */
2658 if (step_over_finished)
2659 unsuspend_all_lwps (event_child);
2660
2661 /* Stabilize threads (move out of jump pads). */
2662 stabilize_threads ();
2663 }
2664 else
2665 {
2666 /* If we just finished a step-over, then all threads had been
2667 momentarily paused. In all-stop, that's fine, we want
2668 threads stopped by now anyway. In non-stop, we need to
2669 re-resume threads that GDB wanted to be running. */
2670 if (step_over_finished)
2671 unstop_all_lwps (1, event_child);
2672 }
2673
2674 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2675
2676 if (current_inferior->last_resume_kind == resume_stop
2677 && WSTOPSIG (w) == SIGSTOP)
2678 {
2679 /* A thread that has been requested to stop by GDB with vCont;t,
2680 and it stopped cleanly, so report as SIG0. The use of
2681 SIGSTOP is an implementation detail. */
2682 ourstatus->value.sig = TARGET_SIGNAL_0;
2683 }
2684 else if (current_inferior->last_resume_kind == resume_stop
2685 && WSTOPSIG (w) != SIGSTOP)
2686 {
2687 /* A thread that has been requested to stop by GDB with vCont;t,
2688 but, it stopped for other reasons. */
2689 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2690 }
2691 else
2692 {
2693 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2694 }
2695
2696 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2697
2698 if (debug_threads)
2699 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2700 target_pid_to_str (ptid_of (event_child)),
2701 ourstatus->kind,
2702 ourstatus->value.sig);
2703
2704 return ptid_of (event_child);
2705 }
2706
2707 /* Get rid of any pending event in the pipe. */
2708 static void
2709 async_file_flush (void)
2710 {
2711 int ret;
2712 char buf;
2713
2714 do
2715 ret = read (linux_event_pipe[0], &buf, 1);
2716 while (ret >= 0 || (ret == -1 && errno == EINTR));
2717 }
2718
2719 /* Put something in the pipe, so the event loop wakes up. */
2720 static void
2721 async_file_mark (void)
2722 {
2723 int ret;
2724
2725 async_file_flush ();
2726
2727 do
2728 ret = write (linux_event_pipe[1], "+", 1);
2729 while (ret == 0 || (ret == -1 && errno == EINTR));
2730
2731 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2732 be awakened anyway. */
2733 }
2734
2735 static ptid_t
2736 linux_wait (ptid_t ptid,
2737 struct target_waitstatus *ourstatus, int target_options)
2738 {
2739 ptid_t event_ptid;
2740
2741 if (debug_threads)
2742 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2743
2744 /* Flush the async file first. */
2745 if (target_is_async_p ())
2746 async_file_flush ();
2747
2748 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2749
2750 /* If at least one stop was reported, there may be more. A single
2751 SIGCHLD can signal more than one child stop. */
2752 if (target_is_async_p ()
2753 && (target_options & TARGET_WNOHANG) != 0
2754 && !ptid_equal (event_ptid, null_ptid))
2755 async_file_mark ();
2756
2757 return event_ptid;
2758 }
2759
2760 /* Send a signal to an LWP. */
2761
2762 static int
2763 kill_lwp (unsigned long lwpid, int signo)
2764 {
2765 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2766 fails, then we are not using nptl threads and we should be using kill. */
2767
2768 #ifdef __NR_tkill
2769 {
2770 static int tkill_failed;
2771
2772 if (!tkill_failed)
2773 {
2774 int ret;
2775
2776 errno = 0;
2777 ret = syscall (__NR_tkill, lwpid, signo);
2778 if (errno != ENOSYS)
2779 return ret;
2780 tkill_failed = 1;
2781 }
2782 }
2783 #endif
2784
2785 return kill (lwpid, signo);
2786 }
2787
2788 void
2789 linux_stop_lwp (struct lwp_info *lwp)
2790 {
2791 send_sigstop (lwp);
2792 }
2793
2794 static void
2795 send_sigstop (struct lwp_info *lwp)
2796 {
2797 int pid;
2798
2799 pid = lwpid_of (lwp);
2800
2801 /* If we already have a pending stop signal for this process, don't
2802 send another. */
2803 if (lwp->stop_expected)
2804 {
2805 if (debug_threads)
2806 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2807
2808 return;
2809 }
2810
2811 if (debug_threads)
2812 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2813
2814 lwp->stop_expected = 1;
2815 kill_lwp (pid, SIGSTOP);
2816 }
2817
2818 static int
2819 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2820 {
2821 struct lwp_info *lwp = (struct lwp_info *) entry;
2822
2823 /* Ignore EXCEPT. */
2824 if (lwp == except)
2825 return 0;
2826
2827 if (lwp->stopped)
2828 return 0;
2829
2830 send_sigstop (lwp);
2831 return 0;
2832 }
2833
2834 /* Increment the suspend count of an LWP, and stop it, if not stopped
2835 yet. */
2836 static int
2837 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2838 void *except)
2839 {
2840 struct lwp_info *lwp = (struct lwp_info *) entry;
2841
2842 /* Ignore EXCEPT. */
2843 if (lwp == except)
2844 return 0;
2845
2846 lwp->suspended++;
2847
2848 return send_sigstop_callback (entry, except);
2849 }
2850
2851 static void
2852 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2853 {
2854 /* It's dead, really. */
2855 lwp->dead = 1;
2856
2857 /* Store the exit status for later. */
2858 lwp->status_pending_p = 1;
2859 lwp->status_pending = wstat;
2860
2861 /* Prevent trying to stop it. */
2862 lwp->stopped = 1;
2863
2864 /* No further stops are expected from a dead lwp. */
2865 lwp->stop_expected = 0;
2866 }
2867
2868 static void
2869 wait_for_sigstop (struct inferior_list_entry *entry)
2870 {
2871 struct lwp_info *lwp = (struct lwp_info *) entry;
2872 struct thread_info *saved_inferior;
2873 int wstat;
2874 ptid_t saved_tid;
2875 ptid_t ptid;
2876 int pid;
2877
2878 if (lwp->stopped)
2879 {
2880 if (debug_threads)
2881 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2882 lwpid_of (lwp));
2883 return;
2884 }
2885
2886 saved_inferior = current_inferior;
2887 if (saved_inferior != NULL)
2888 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2889 else
2890 saved_tid = null_ptid; /* avoid bogus unused warning */
2891
2892 ptid = lwp->head.id;
2893
2894 if (debug_threads)
2895 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2896
2897 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2898
2899 /* If we stopped with a non-SIGSTOP signal, save it for later
2900 and record the pending SIGSTOP. If the process exited, just
2901 return. */
2902 if (WIFSTOPPED (wstat))
2903 {
2904 if (debug_threads)
2905 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2906 lwpid_of (lwp), WSTOPSIG (wstat));
2907
2908 if (WSTOPSIG (wstat) != SIGSTOP)
2909 {
2910 if (debug_threads)
2911 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2912 lwpid_of (lwp), wstat);
2913
2914 lwp->status_pending_p = 1;
2915 lwp->status_pending = wstat;
2916 }
2917 }
2918 else
2919 {
2920 if (debug_threads)
2921 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2922
2923 lwp = find_lwp_pid (pid_to_ptid (pid));
2924 if (lwp)
2925 {
2926 /* Leave this status pending for the next time we're able to
2927 report it. In the mean time, we'll report this lwp as
2928 dead to GDB, so GDB doesn't try to read registers and
2929 memory from it. This can only happen if this was the
2930 last thread of the process; otherwise, PID is removed
2931 from the thread tables before linux_wait_for_event
2932 returns. */
2933 mark_lwp_dead (lwp, wstat);
2934 }
2935 }
2936
2937 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2938 current_inferior = saved_inferior;
2939 else
2940 {
2941 if (debug_threads)
2942 fprintf (stderr, "Previously current thread died.\n");
2943
2944 if (non_stop)
2945 {
2946 /* We can't change the current inferior behind GDB's back,
2947 otherwise, a subsequent command may apply to the wrong
2948 process. */
2949 current_inferior = NULL;
2950 }
2951 else
2952 {
2953 /* Set a valid thread as current. */
2954 set_desired_inferior (0);
2955 }
2956 }
2957 }
2958
2959 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2960 move it out, because we need to report the stop event to GDB. For
2961 example, if the user puts a breakpoint in the jump pad, it's
2962 because she wants to debug it. */
2963
2964 static int
2965 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2966 {
2967 struct lwp_info *lwp = (struct lwp_info *) entry;
2968 struct thread_info *thread = get_lwp_thread (lwp);
2969
2970 gdb_assert (lwp->suspended == 0);
2971 gdb_assert (lwp->stopped);
2972
2973 /* Allow debugging the jump pad, gdb_collect, etc.. */
2974 return (supports_fast_tracepoints ()
2975 && agent_loaded_p ()
2976 && (gdb_breakpoint_here (lwp->stop_pc)
2977 || lwp->stopped_by_watchpoint
2978 || thread->last_resume_kind == resume_step)
2979 && linux_fast_tracepoint_collecting (lwp, NULL));
2980 }
2981
2982 static void
2983 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2984 {
2985 struct lwp_info *lwp = (struct lwp_info *) entry;
2986 struct thread_info *thread = get_lwp_thread (lwp);
2987 int *wstat;
2988
2989 gdb_assert (lwp->suspended == 0);
2990 gdb_assert (lwp->stopped);
2991
2992 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2993
2994 /* Allow debugging the jump pad, gdb_collect, etc. */
2995 if (!gdb_breakpoint_here (lwp->stop_pc)
2996 && !lwp->stopped_by_watchpoint
2997 && thread->last_resume_kind != resume_step
2998 && maybe_move_out_of_jump_pad (lwp, wstat))
2999 {
3000 if (debug_threads)
3001 fprintf (stderr,
3002 "LWP %ld needs stabilizing (in jump pad)\n",
3003 lwpid_of (lwp));
3004
3005 if (wstat)
3006 {
3007 lwp->status_pending_p = 0;
3008 enqueue_one_deferred_signal (lwp, wstat);
3009
3010 if (debug_threads)
3011 fprintf (stderr,
3012 "Signal %d for LWP %ld deferred "
3013 "(in jump pad)\n",
3014 WSTOPSIG (*wstat), lwpid_of (lwp));
3015 }
3016
3017 linux_resume_one_lwp (lwp, 0, 0, NULL);
3018 }
3019 else
3020 lwp->suspended++;
3021 }
3022
3023 static int
3024 lwp_running (struct inferior_list_entry *entry, void *data)
3025 {
3026 struct lwp_info *lwp = (struct lwp_info *) entry;
3027
3028 if (lwp->dead)
3029 return 0;
3030 if (lwp->stopped)
3031 return 0;
3032 return 1;
3033 }
3034
3035 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3036 If SUSPEND, then also increase the suspend count of every LWP,
3037 except EXCEPT. */
3038
3039 static void
3040 stop_all_lwps (int suspend, struct lwp_info *except)
3041 {
3042 stopping_threads = 1;
3043
3044 if (suspend)
3045 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3046 else
3047 find_inferior (&all_lwps, send_sigstop_callback, except);
3048 for_each_inferior (&all_lwps, wait_for_sigstop);
3049 stopping_threads = 0;
3050 }
3051
3052 /* Resume execution of the inferior process.
3053 If STEP is nonzero, single-step it.
3054 If SIGNAL is nonzero, give it that signal. */
3055
3056 static void
3057 linux_resume_one_lwp (struct lwp_info *lwp,
3058 int step, int signal, siginfo_t *info)
3059 {
3060 struct thread_info *saved_inferior;
3061 int fast_tp_collecting;
3062
3063 if (lwp->stopped == 0)
3064 return;
3065
3066 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3067
3068 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3069
3070 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3071 user used the "jump" command, or "set $pc = foo"). */
3072 if (lwp->stop_pc != get_pc (lwp))
3073 {
3074 /* Collecting 'while-stepping' actions doesn't make sense
3075 anymore. */
3076 release_while_stepping_state_list (get_lwp_thread (lwp));
3077 }
3078
3079 /* If we have pending signals or status, and a new signal, enqueue the
3080 signal. Also enqueue the signal if we are waiting to reinsert a
3081 breakpoint; it will be picked up again below. */
3082 if (signal != 0
3083 && (lwp->status_pending_p
3084 || lwp->pending_signals != NULL
3085 || lwp->bp_reinsert != 0
3086 || fast_tp_collecting))
3087 {
3088 struct pending_signals *p_sig;
3089 p_sig = xmalloc (sizeof (*p_sig));
3090 p_sig->prev = lwp->pending_signals;
3091 p_sig->signal = signal;
3092 if (info == NULL)
3093 memset (&p_sig->info, 0, sizeof (siginfo_t));
3094 else
3095 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3096 lwp->pending_signals = p_sig;
3097 }
3098
3099 if (lwp->status_pending_p)
3100 {
3101 if (debug_threads)
3102 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3103 " has pending status\n",
3104 lwpid_of (lwp), step ? "step" : "continue", signal,
3105 lwp->stop_expected ? "expected" : "not expected");
3106 return;
3107 }
3108
3109 saved_inferior = current_inferior;
3110 current_inferior = get_lwp_thread (lwp);
3111
3112 if (debug_threads)
3113 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
3114 lwpid_of (lwp), step ? "step" : "continue", signal,
3115 lwp->stop_expected ? "expected" : "not expected");
3116
3117 /* This bit needs some thinking about. If we get a signal that
3118 we must report while a single-step reinsert is still pending,
3119 we often end up resuming the thread. It might be better to
3120 (ew) allow a stack of pending events; then we could be sure that
3121 the reinsert happened right away and not lose any signals.
3122
3123 Making this stack would also shrink the window in which breakpoints are
3124 uninserted (see comment in linux_wait_for_lwp) but not enough for
3125 complete correctness, so it won't solve that problem. It may be
3126 worthwhile just to solve this one, however. */
3127 if (lwp->bp_reinsert != 0)
3128 {
3129 if (debug_threads)
3130 fprintf (stderr, " pending reinsert at 0x%s\n",
3131 paddress (lwp->bp_reinsert));
3132
3133 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
3134 {
3135 if (fast_tp_collecting == 0)
3136 {
3137 if (step == 0)
3138 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3139 if (lwp->suspended)
3140 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3141 lwp->suspended);
3142 }
3143
3144 step = 1;
3145 }
3146
3147 /* Postpone any pending signal. It was enqueued above. */
3148 signal = 0;
3149 }
3150
3151 if (fast_tp_collecting == 1)
3152 {
3153 if (debug_threads)
3154 fprintf (stderr, "\
3155 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3156 lwpid_of (lwp));
3157
3158 /* Postpone any pending signal. It was enqueued above. */
3159 signal = 0;
3160 }
3161 else if (fast_tp_collecting == 2)
3162 {
3163 if (debug_threads)
3164 fprintf (stderr, "\
3165 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3166 lwpid_of (lwp));
3167
3168 if (can_hardware_single_step ())
3169 step = 1;
3170 else
3171 fatal ("moving out of jump pad single-stepping"
3172 " not implemented on this target");
3173
3174 /* Postpone any pending signal. It was enqueued above. */
3175 signal = 0;
3176 }
3177
3178 /* If we have while-stepping actions in this thread set it stepping.
3179 If we have a signal to deliver, it may or may not be set to
3180 SIG_IGN, we don't know. Assume so, and allow collecting
3181 while-stepping into a signal handler. A possible smart thing to
3182 do would be to set an internal breakpoint at the signal return
3183 address, continue, and carry on catching this while-stepping
3184 action only when that breakpoint is hit. A future
3185 enhancement. */
3186 if (get_lwp_thread (lwp)->while_stepping != NULL
3187 && can_hardware_single_step ())
3188 {
3189 if (debug_threads)
3190 fprintf (stderr,
3191 "lwp %ld has a while-stepping action -> forcing step.\n",
3192 lwpid_of (lwp));
3193 step = 1;
3194 }
3195
3196 if (debug_threads && the_low_target.get_pc != NULL)
3197 {
3198 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3199 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3200 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3201 }
3202
3203 /* If we have pending signals, consume one unless we are trying to
3204 reinsert a breakpoint or we're trying to finish a fast tracepoint
3205 collect. */
3206 if (lwp->pending_signals != NULL
3207 && lwp->bp_reinsert == 0
3208 && fast_tp_collecting == 0)
3209 {
3210 struct pending_signals **p_sig;
3211
3212 p_sig = &lwp->pending_signals;
3213 while ((*p_sig)->prev != NULL)
3214 p_sig = &(*p_sig)->prev;
3215
3216 signal = (*p_sig)->signal;
3217 if ((*p_sig)->info.si_signo != 0)
3218 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
3219
3220 free (*p_sig);
3221 *p_sig = NULL;
3222 }
3223
3224 if (the_low_target.prepare_to_resume != NULL)
3225 the_low_target.prepare_to_resume (lwp);
3226
3227 regcache_invalidate_one ((struct inferior_list_entry *)
3228 get_lwp_thread (lwp));
3229 errno = 0;
3230 lwp->stopped = 0;
3231 lwp->stopped_by_watchpoint = 0;
3232 lwp->stepping = step;
3233 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
3234 /* Coerce to a uintptr_t first to avoid potential gcc warning
3235 of coercing an 8 byte integer to a 4 byte pointer. */
3236 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
3237
3238 current_inferior = saved_inferior;
3239 if (errno)
3240 {
3241 /* ESRCH from ptrace either means that the thread was already
3242 running (an error) or that it is gone (a race condition). If
3243 it's gone, we will get a notification the next time we wait,
3244 so we can ignore the error. We could differentiate these
3245 two, but it's tricky without waiting; the thread still exists
3246 as a zombie, so sending it signal 0 would succeed. So just
3247 ignore ESRCH. */
3248 if (errno == ESRCH)
3249 return;
3250
3251 perror_with_name ("ptrace");
3252 }
3253 }
3254
3255 struct thread_resume_array
3256 {
3257 struct thread_resume *resume;
3258 size_t n;
3259 };
3260
3261 /* This function is called once per thread. We look up the thread
3262 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3263 resume request.
3264
3265 This algorithm is O(threads * resume elements), but resume elements
3266 is small (and will remain small at least until GDB supports thread
3267 suspension). */
3268 static int
3269 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3270 {
3271 struct lwp_info *lwp;
3272 struct thread_info *thread;
3273 int ndx;
3274 struct thread_resume_array *r;
3275
3276 thread = (struct thread_info *) entry;
3277 lwp = get_thread_lwp (thread);
3278 r = arg;
3279
3280 for (ndx = 0; ndx < r->n; ndx++)
3281 {
3282 ptid_t ptid = r->resume[ndx].thread;
3283 if (ptid_equal (ptid, minus_one_ptid)
3284 || ptid_equal (ptid, entry->id)
3285 || (ptid_is_pid (ptid)
3286 && (ptid_get_pid (ptid) == pid_of (lwp)))
3287 || (ptid_get_lwp (ptid) == -1
3288 && (ptid_get_pid (ptid) == pid_of (lwp))))
3289 {
3290 if (r->resume[ndx].kind == resume_stop
3291 && thread->last_resume_kind == resume_stop)
3292 {
3293 if (debug_threads)
3294 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3295 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3296 ? "stopped"
3297 : "stopping",
3298 lwpid_of (lwp));
3299
3300 continue;
3301 }
3302
3303 lwp->resume = &r->resume[ndx];
3304 thread->last_resume_kind = lwp->resume->kind;
3305
3306 /* If we had a deferred signal to report, dequeue one now.
3307 This can happen if LWP gets more than one signal while
3308 trying to get out of a jump pad. */
3309 if (lwp->stopped
3310 && !lwp->status_pending_p
3311 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3312 {
3313 lwp->status_pending_p = 1;
3314
3315 if (debug_threads)
3316 fprintf (stderr,
3317 "Dequeueing deferred signal %d for LWP %ld, "
3318 "leaving status pending.\n",
3319 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3320 }
3321
3322 return 0;
3323 }
3324 }
3325
3326 /* No resume action for this thread. */
3327 lwp->resume = NULL;
3328
3329 return 0;
3330 }
3331
3332
3333 /* Set *FLAG_P if this lwp has an interesting status pending. */
3334 static int
3335 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3336 {
3337 struct lwp_info *lwp = (struct lwp_info *) entry;
3338
3339 /* LWPs which will not be resumed are not interesting, because
3340 we might not wait for them next time through linux_wait. */
3341 if (lwp->resume == NULL)
3342 return 0;
3343
3344 if (lwp->status_pending_p)
3345 * (int *) flag_p = 1;
3346
3347 return 0;
3348 }
3349
3350 /* Return 1 if this lwp that GDB wants running is stopped at an
3351 internal breakpoint that we need to step over. It assumes that any
3352 required STOP_PC adjustment has already been propagated to the
3353 inferior's regcache. */
3354
3355 static int
3356 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3357 {
3358 struct lwp_info *lwp = (struct lwp_info *) entry;
3359 struct thread_info *thread;
3360 struct thread_info *saved_inferior;
3361 CORE_ADDR pc;
3362
3363 /* LWPs which will not be resumed are not interesting, because we
3364 might not wait for them next time through linux_wait. */
3365
3366 if (!lwp->stopped)
3367 {
3368 if (debug_threads)
3369 fprintf (stderr,
3370 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3371 lwpid_of (lwp));
3372 return 0;
3373 }
3374
3375 thread = get_lwp_thread (lwp);
3376
3377 if (thread->last_resume_kind == resume_stop)
3378 {
3379 if (debug_threads)
3380 fprintf (stderr,
3381 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3382 lwpid_of (lwp));
3383 return 0;
3384 }
3385
3386 gdb_assert (lwp->suspended >= 0);
3387
3388 if (lwp->suspended)
3389 {
3390 if (debug_threads)
3391 fprintf (stderr,
3392 "Need step over [LWP %ld]? Ignoring, suspended\n",
3393 lwpid_of (lwp));
3394 return 0;
3395 }
3396
3397 if (!lwp->need_step_over)
3398 {
3399 if (debug_threads)
3400 fprintf (stderr,
3401 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3402 }
3403
3404 if (lwp->status_pending_p)
3405 {
3406 if (debug_threads)
3407 fprintf (stderr,
3408 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3409 lwpid_of (lwp));
3410 return 0;
3411 }
3412
3413 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3414 or we have. */
3415 pc = get_pc (lwp);
3416
3417 /* If the PC has changed since we stopped, then don't do anything,
3418 and let the breakpoint/tracepoint be hit. This happens if, for
3419 instance, GDB handled the decr_pc_after_break subtraction itself,
3420 GDB is OOL stepping this thread, or the user has issued a "jump"
3421 command, or poked thread's registers herself. */
3422 if (pc != lwp->stop_pc)
3423 {
3424 if (debug_threads)
3425 fprintf (stderr,
3426 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3427 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3428 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3429
3430 lwp->need_step_over = 0;
3431 return 0;
3432 }
3433
3434 saved_inferior = current_inferior;
3435 current_inferior = thread;
3436
3437 /* We can only step over breakpoints we know about. */
3438 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3439 {
3440 /* Don't step over a breakpoint that GDB expects to hit
3441 though. If the condition is being evaluated on the target's side
3442 and it evaluate to false, step over this breakpoint as well. */
3443 if (gdb_breakpoint_here (pc)
3444 && gdb_condition_true_at_breakpoint (pc))
3445 {
3446 if (debug_threads)
3447 fprintf (stderr,
3448 "Need step over [LWP %ld]? yes, but found"
3449 " GDB breakpoint at 0x%s; skipping step over\n",
3450 lwpid_of (lwp), paddress (pc));
3451
3452 current_inferior = saved_inferior;
3453 return 0;
3454 }
3455 else
3456 {
3457 if (debug_threads)
3458 fprintf (stderr,
3459 "Need step over [LWP %ld]? yes, "
3460 "found breakpoint at 0x%s\n",
3461 lwpid_of (lwp), paddress (pc));
3462
3463 /* We've found an lwp that needs stepping over --- return 1 so
3464 that find_inferior stops looking. */
3465 current_inferior = saved_inferior;
3466
3467 /* If the step over is cancelled, this is set again. */
3468 lwp->need_step_over = 0;
3469 return 1;
3470 }
3471 }
3472
3473 current_inferior = saved_inferior;
3474
3475 if (debug_threads)
3476 fprintf (stderr,
3477 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3478 lwpid_of (lwp), paddress (pc));
3479
3480 return 0;
3481 }
3482
3483 /* Start a step-over operation on LWP. When LWP stopped at a
3484 breakpoint, to make progress, we need to remove the breakpoint out
3485 of the way. If we let other threads run while we do that, they may
3486 pass by the breakpoint location and miss hitting it. To avoid
3487 that, a step-over momentarily stops all threads while LWP is
3488 single-stepped while the breakpoint is temporarily uninserted from
3489 the inferior. When the single-step finishes, we reinsert the
3490 breakpoint, and let all threads that are supposed to be running,
3491 run again.
3492
3493 On targets that don't support hardware single-step, we don't
3494 currently support full software single-stepping. Instead, we only
3495 support stepping over the thread event breakpoint, by asking the
3496 low target where to place a reinsert breakpoint. Since this
3497 routine assumes the breakpoint being stepped over is a thread event
3498 breakpoint, it usually assumes the return address of the current
3499 function is a good enough place to set the reinsert breakpoint. */
3500
3501 static int
3502 start_step_over (struct lwp_info *lwp)
3503 {
3504 struct thread_info *saved_inferior;
3505 CORE_ADDR pc;
3506 int step;
3507
3508 if (debug_threads)
3509 fprintf (stderr,
3510 "Starting step-over on LWP %ld. Stopping all threads\n",
3511 lwpid_of (lwp));
3512
3513 stop_all_lwps (1, lwp);
3514 gdb_assert (lwp->suspended == 0);
3515
3516 if (debug_threads)
3517 fprintf (stderr, "Done stopping all threads for step-over.\n");
3518
3519 /* Note, we should always reach here with an already adjusted PC,
3520 either by GDB (if we're resuming due to GDB's request), or by our
3521 caller, if we just finished handling an internal breakpoint GDB
3522 shouldn't care about. */
3523 pc = get_pc (lwp);
3524
3525 saved_inferior = current_inferior;
3526 current_inferior = get_lwp_thread (lwp);
3527
3528 lwp->bp_reinsert = pc;
3529 uninsert_breakpoints_at (pc);
3530 uninsert_fast_tracepoint_jumps_at (pc);
3531
3532 if (can_hardware_single_step ())
3533 {
3534 step = 1;
3535 }
3536 else
3537 {
3538 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3539 set_reinsert_breakpoint (raddr);
3540 step = 0;
3541 }
3542
3543 current_inferior = saved_inferior;
3544
3545 linux_resume_one_lwp (lwp, step, 0, NULL);
3546
3547 /* Require next event from this LWP. */
3548 step_over_bkpt = lwp->head.id;
3549 return 1;
3550 }
3551
3552 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3553 start_step_over, if still there, and delete any reinsert
3554 breakpoints we've set, on non hardware single-step targets. */
3555
3556 static int
3557 finish_step_over (struct lwp_info *lwp)
3558 {
3559 if (lwp->bp_reinsert != 0)
3560 {
3561 if (debug_threads)
3562 fprintf (stderr, "Finished step over.\n");
3563
3564 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3565 may be no breakpoint to reinsert there by now. */
3566 reinsert_breakpoints_at (lwp->bp_reinsert);
3567 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3568
3569 lwp->bp_reinsert = 0;
3570
3571 /* Delete any software-single-step reinsert breakpoints. No
3572 longer needed. We don't have to worry about other threads
3573 hitting this trap, and later not being able to explain it,
3574 because we were stepping over a breakpoint, and we hold all
3575 threads but LWP stopped while doing that. */
3576 if (!can_hardware_single_step ())
3577 delete_reinsert_breakpoints ();
3578
3579 step_over_bkpt = null_ptid;
3580 return 1;
3581 }
3582 else
3583 return 0;
3584 }
3585
3586 /* This function is called once per thread. We check the thread's resume
3587 request, which will tell us whether to resume, step, or leave the thread
3588 stopped; and what signal, if any, it should be sent.
3589
3590 For threads which we aren't explicitly told otherwise, we preserve
3591 the stepping flag; this is used for stepping over gdbserver-placed
3592 breakpoints.
3593
3594 If pending_flags was set in any thread, we queue any needed
3595 signals, since we won't actually resume. We already have a pending
3596 event to report, so we don't need to preserve any step requests;
3597 they should be re-issued if necessary. */
3598
3599 static int
3600 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3601 {
3602 struct lwp_info *lwp;
3603 struct thread_info *thread;
3604 int step;
3605 int leave_all_stopped = * (int *) arg;
3606 int leave_pending;
3607
3608 thread = (struct thread_info *) entry;
3609 lwp = get_thread_lwp (thread);
3610
3611 if (lwp->resume == NULL)
3612 return 0;
3613
3614 if (lwp->resume->kind == resume_stop)
3615 {
3616 if (debug_threads)
3617 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3618
3619 if (!lwp->stopped)
3620 {
3621 if (debug_threads)
3622 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3623
3624 /* Stop the thread, and wait for the event asynchronously,
3625 through the event loop. */
3626 send_sigstop (lwp);
3627 }
3628 else
3629 {
3630 if (debug_threads)
3631 fprintf (stderr, "already stopped LWP %ld\n",
3632 lwpid_of (lwp));
3633
3634 /* The LWP may have been stopped in an internal event that
3635 was not meant to be notified back to GDB (e.g., gdbserver
3636 breakpoint), so we should be reporting a stop event in
3637 this case too. */
3638
3639 /* If the thread already has a pending SIGSTOP, this is a
3640 no-op. Otherwise, something later will presumably resume
3641 the thread and this will cause it to cancel any pending
3642 operation, due to last_resume_kind == resume_stop. If
3643 the thread already has a pending status to report, we
3644 will still report it the next time we wait - see
3645 status_pending_p_callback. */
3646
3647 /* If we already have a pending signal to report, then
3648 there's no need to queue a SIGSTOP, as this means we're
3649 midway through moving the LWP out of the jumppad, and we
3650 will report the pending signal as soon as that is
3651 finished. */
3652 if (lwp->pending_signals_to_report == NULL)
3653 send_sigstop (lwp);
3654 }
3655
3656 /* For stop requests, we're done. */
3657 lwp->resume = NULL;
3658 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3659 return 0;
3660 }
3661
3662 /* If this thread which is about to be resumed has a pending status,
3663 then don't resume any threads - we can just report the pending
3664 status. Make sure to queue any signals that would otherwise be
3665 sent. In all-stop mode, we do this decision based on if *any*
3666 thread has a pending status. If there's a thread that needs the
3667 step-over-breakpoint dance, then don't resume any other thread
3668 but that particular one. */
3669 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3670
3671 if (!leave_pending)
3672 {
3673 if (debug_threads)
3674 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3675
3676 step = (lwp->resume->kind == resume_step);
3677 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3678 }
3679 else
3680 {
3681 if (debug_threads)
3682 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3683
3684 /* If we have a new signal, enqueue the signal. */
3685 if (lwp->resume->sig != 0)
3686 {
3687 struct pending_signals *p_sig;
3688 p_sig = xmalloc (sizeof (*p_sig));
3689 p_sig->prev = lwp->pending_signals;
3690 p_sig->signal = lwp->resume->sig;
3691 memset (&p_sig->info, 0, sizeof (siginfo_t));
3692
3693 /* If this is the same signal we were previously stopped by,
3694 make sure to queue its siginfo. We can ignore the return
3695 value of ptrace; if it fails, we'll skip
3696 PTRACE_SETSIGINFO. */
3697 if (WIFSTOPPED (lwp->last_status)
3698 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3699 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3700
3701 lwp->pending_signals = p_sig;
3702 }
3703 }
3704
3705 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3706 lwp->resume = NULL;
3707 return 0;
3708 }
3709
3710 static void
3711 linux_resume (struct thread_resume *resume_info, size_t n)
3712 {
3713 struct thread_resume_array array = { resume_info, n };
3714 struct lwp_info *need_step_over = NULL;
3715 int any_pending;
3716 int leave_all_stopped;
3717
3718 find_inferior (&all_threads, linux_set_resume_request, &array);
3719
3720 /* If there is a thread which would otherwise be resumed, which has
3721 a pending status, then don't resume any threads - we can just
3722 report the pending status. Make sure to queue any signals that
3723 would otherwise be sent. In non-stop mode, we'll apply this
3724 logic to each thread individually. We consume all pending events
3725 before considering to start a step-over (in all-stop). */
3726 any_pending = 0;
3727 if (!non_stop)
3728 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3729
3730 /* If there is a thread which would otherwise be resumed, which is
3731 stopped at a breakpoint that needs stepping over, then don't
3732 resume any threads - have it step over the breakpoint with all
3733 other threads stopped, then resume all threads again. Make sure
3734 to queue any signals that would otherwise be delivered or
3735 queued. */
3736 if (!any_pending && supports_breakpoints ())
3737 need_step_over
3738 = (struct lwp_info *) find_inferior (&all_lwps,
3739 need_step_over_p, NULL);
3740
3741 leave_all_stopped = (need_step_over != NULL || any_pending);
3742
3743 if (debug_threads)
3744 {
3745 if (need_step_over != NULL)
3746 fprintf (stderr, "Not resuming all, need step over\n");
3747 else if (any_pending)
3748 fprintf (stderr,
3749 "Not resuming, all-stop and found "
3750 "an LWP with pending status\n");
3751 else
3752 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3753 }
3754
3755 /* Even if we're leaving threads stopped, queue all signals we'd
3756 otherwise deliver. */
3757 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3758
3759 if (need_step_over)
3760 start_step_over (need_step_over);
3761 }
3762
3763 /* This function is called once per thread. We check the thread's
3764 last resume request, which will tell us whether to resume, step, or
3765 leave the thread stopped. Any signal the client requested to be
3766 delivered has already been enqueued at this point.
3767
3768 If any thread that GDB wants running is stopped at an internal
3769 breakpoint that needs stepping over, we start a step-over operation
3770 on that particular thread, and leave all others stopped. */
3771
3772 static int
3773 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3774 {
3775 struct lwp_info *lwp = (struct lwp_info *) entry;
3776 struct thread_info *thread;
3777 int step;
3778
3779 if (lwp == except)
3780 return 0;
3781
3782 if (debug_threads)
3783 fprintf (stderr,
3784 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3785
3786 if (!lwp->stopped)
3787 {
3788 if (debug_threads)
3789 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3790 return 0;
3791 }
3792
3793 thread = get_lwp_thread (lwp);
3794
3795 if (thread->last_resume_kind == resume_stop
3796 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3797 {
3798 if (debug_threads)
3799 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3800 lwpid_of (lwp));
3801 return 0;
3802 }
3803
3804 if (lwp->status_pending_p)
3805 {
3806 if (debug_threads)
3807 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3808 lwpid_of (lwp));
3809 return 0;
3810 }
3811
3812 gdb_assert (lwp->suspended >= 0);
3813
3814 if (lwp->suspended)
3815 {
3816 if (debug_threads)
3817 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3818 return 0;
3819 }
3820
3821 if (thread->last_resume_kind == resume_stop
3822 && lwp->pending_signals_to_report == NULL
3823 && lwp->collecting_fast_tracepoint == 0)
3824 {
3825 /* We haven't reported this LWP as stopped yet (otherwise, the
3826 last_status.kind check above would catch it, and we wouldn't
3827 reach here. This LWP may have been momentarily paused by a
3828 stop_all_lwps call while handling for example, another LWP's
3829 step-over. In that case, the pending expected SIGSTOP signal
3830 that was queued at vCont;t handling time will have already
3831 been consumed by wait_for_sigstop, and so we need to requeue
3832 another one here. Note that if the LWP already has a SIGSTOP
3833 pending, this is a no-op. */
3834
3835 if (debug_threads)
3836 fprintf (stderr,
3837 "Client wants LWP %ld to stop. "
3838 "Making sure it has a SIGSTOP pending\n",
3839 lwpid_of (lwp));
3840
3841 send_sigstop (lwp);
3842 }
3843
3844 step = thread->last_resume_kind == resume_step;
3845 linux_resume_one_lwp (lwp, step, 0, NULL);
3846 return 0;
3847 }
3848
3849 static int
3850 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3851 {
3852 struct lwp_info *lwp = (struct lwp_info *) entry;
3853
3854 if (lwp == except)
3855 return 0;
3856
3857 lwp->suspended--;
3858 gdb_assert (lwp->suspended >= 0);
3859
3860 return proceed_one_lwp (entry, except);
3861 }
3862
3863 /* When we finish a step-over, set threads running again. If there's
3864 another thread that may need a step-over, now's the time to start
3865 it. Eventually, we'll move all threads past their breakpoints. */
3866
3867 static void
3868 proceed_all_lwps (void)
3869 {
3870 struct lwp_info *need_step_over;
3871
3872 /* If there is a thread which would otherwise be resumed, which is
3873 stopped at a breakpoint that needs stepping over, then don't
3874 resume any threads - have it step over the breakpoint with all
3875 other threads stopped, then resume all threads again. */
3876
3877 if (supports_breakpoints ())
3878 {
3879 need_step_over
3880 = (struct lwp_info *) find_inferior (&all_lwps,
3881 need_step_over_p, NULL);
3882
3883 if (need_step_over != NULL)
3884 {
3885 if (debug_threads)
3886 fprintf (stderr, "proceed_all_lwps: found "
3887 "thread %ld needing a step-over\n",
3888 lwpid_of (need_step_over));
3889
3890 start_step_over (need_step_over);
3891 return;
3892 }
3893 }
3894
3895 if (debug_threads)
3896 fprintf (stderr, "Proceeding, no step-over needed\n");
3897
3898 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3899 }
3900
3901 /* Stopped LWPs that the client wanted to be running, that don't have
3902 pending statuses, are set to run again, except for EXCEPT, if not
3903 NULL. This undoes a stop_all_lwps call. */
3904
3905 static void
3906 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3907 {
3908 if (debug_threads)
3909 {
3910 if (except)
3911 fprintf (stderr,
3912 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3913 else
3914 fprintf (stderr,
3915 "unstopping all lwps\n");
3916 }
3917
3918 if (unsuspend)
3919 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3920 else
3921 find_inferior (&all_lwps, proceed_one_lwp, except);
3922 }
3923
3924
3925 #ifdef HAVE_LINUX_REGSETS
3926
3927 #define use_linux_regsets 1
3928
3929 static int
3930 regsets_fetch_inferior_registers (struct regcache *regcache)
3931 {
3932 struct regset_info *regset;
3933 int saw_general_regs = 0;
3934 int pid;
3935 struct iovec iov;
3936
3937 regset = target_regsets;
3938
3939 pid = lwpid_of (get_thread_lwp (current_inferior));
3940 while (regset->size >= 0)
3941 {
3942 void *buf, *data;
3943 int nt_type, res;
3944
3945 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3946 {
3947 regset ++;
3948 continue;
3949 }
3950
3951 buf = xmalloc (regset->size);
3952
3953 nt_type = regset->nt_type;
3954 if (nt_type)
3955 {
3956 iov.iov_base = buf;
3957 iov.iov_len = regset->size;
3958 data = (void *) &iov;
3959 }
3960 else
3961 data = buf;
3962
3963 #ifndef __sparc__
3964 res = ptrace (regset->get_request, pid, nt_type, data);
3965 #else
3966 res = ptrace (regset->get_request, pid, data, nt_type);
3967 #endif
3968 if (res < 0)
3969 {
3970 if (errno == EIO)
3971 {
3972 /* If we get EIO on a regset, do not try it again for
3973 this process. */
3974 disabled_regsets[regset - target_regsets] = 1;
3975 free (buf);
3976 continue;
3977 }
3978 else
3979 {
3980 char s[256];
3981 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3982 pid);
3983 perror (s);
3984 }
3985 }
3986 else if (regset->type == GENERAL_REGS)
3987 saw_general_regs = 1;
3988 regset->store_function (regcache, buf);
3989 regset ++;
3990 free (buf);
3991 }
3992 if (saw_general_regs)
3993 return 0;
3994 else
3995 return 1;
3996 }
3997
3998 static int
3999 regsets_store_inferior_registers (struct regcache *regcache)
4000 {
4001 struct regset_info *regset;
4002 int saw_general_regs = 0;
4003 int pid;
4004 struct iovec iov;
4005
4006 regset = target_regsets;
4007
4008 pid = lwpid_of (get_thread_lwp (current_inferior));
4009 while (regset->size >= 0)
4010 {
4011 void *buf, *data;
4012 int nt_type, res;
4013
4014 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
4015 {
4016 regset ++;
4017 continue;
4018 }
4019
4020 buf = xmalloc (regset->size);
4021
4022 /* First fill the buffer with the current register set contents,
4023 in case there are any items in the kernel's regset that are
4024 not in gdbserver's regcache. */
4025
4026 nt_type = regset->nt_type;
4027 if (nt_type)
4028 {
4029 iov.iov_base = buf;
4030 iov.iov_len = regset->size;
4031 data = (void *) &iov;
4032 }
4033 else
4034 data = buf;
4035
4036 #ifndef __sparc__
4037 res = ptrace (regset->get_request, pid, nt_type, data);
4038 #else
4039 res = ptrace (regset->get_request, pid, &iov, data);
4040 #endif
4041
4042 if (res == 0)
4043 {
4044 /* Then overlay our cached registers on that. */
4045 regset->fill_function (regcache, buf);
4046
4047 /* Only now do we write the register set. */
4048 #ifndef __sparc__
4049 res = ptrace (regset->set_request, pid, nt_type, data);
4050 #else
4051 res = ptrace (regset->set_request, pid, data, nt_type);
4052 #endif
4053 }
4054
4055 if (res < 0)
4056 {
4057 if (errno == EIO)
4058 {
4059 /* If we get EIO on a regset, do not try it again for
4060 this process. */
4061 disabled_regsets[regset - target_regsets] = 1;
4062 free (buf);
4063 continue;
4064 }
4065 else if (errno == ESRCH)
4066 {
4067 /* At this point, ESRCH should mean the process is
4068 already gone, in which case we simply ignore attempts
4069 to change its registers. See also the related
4070 comment in linux_resume_one_lwp. */
4071 free (buf);
4072 return 0;
4073 }
4074 else
4075 {
4076 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4077 }
4078 }
4079 else if (regset->type == GENERAL_REGS)
4080 saw_general_regs = 1;
4081 regset ++;
4082 free (buf);
4083 }
4084 if (saw_general_regs)
4085 return 0;
4086 else
4087 return 1;
4088 }
4089
4090 #else /* !HAVE_LINUX_REGSETS */
4091
4092 #define use_linux_regsets 0
4093 #define regsets_fetch_inferior_registers(regcache) 1
4094 #define regsets_store_inferior_registers(regcache) 1
4095
4096 #endif
4097
4098 /* Return 1 if register REGNO is supported by one of the regset ptrace
4099 calls or 0 if it has to be transferred individually. */
4100
4101 static int
4102 linux_register_in_regsets (int regno)
4103 {
4104 unsigned char mask = 1 << (regno % 8);
4105 size_t index = regno / 8;
4106
4107 return (use_linux_regsets
4108 && (the_low_target.regset_bitmap == NULL
4109 || (the_low_target.regset_bitmap[index] & mask) != 0));
4110 }
4111
4112 #ifdef HAVE_LINUX_USRREGS
4113
4114 int
4115 register_addr (int regnum)
4116 {
4117 int addr;
4118
4119 if (regnum < 0 || regnum >= the_low_target.num_regs)
4120 error ("Invalid register number %d.", regnum);
4121
4122 addr = the_low_target.regmap[regnum];
4123
4124 return addr;
4125 }
4126
4127 /* Fetch one register. */
4128 static void
4129 fetch_register (struct regcache *regcache, int regno)
4130 {
4131 CORE_ADDR regaddr;
4132 int i, size;
4133 char *buf;
4134 int pid;
4135
4136 if (regno >= the_low_target.num_regs)
4137 return;
4138 if ((*the_low_target.cannot_fetch_register) (regno))
4139 return;
4140
4141 regaddr = register_addr (regno);
4142 if (regaddr == -1)
4143 return;
4144
4145 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4146 & -sizeof (PTRACE_XFER_TYPE));
4147 buf = alloca (size);
4148
4149 pid = lwpid_of (get_thread_lwp (current_inferior));
4150 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4151 {
4152 errno = 0;
4153 *(PTRACE_XFER_TYPE *) (buf + i) =
4154 ptrace (PTRACE_PEEKUSER, pid,
4155 /* Coerce to a uintptr_t first to avoid potential gcc warning
4156 of coercing an 8 byte integer to a 4 byte pointer. */
4157 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
4158 regaddr += sizeof (PTRACE_XFER_TYPE);
4159 if (errno != 0)
4160 error ("reading register %d: %s", regno, strerror (errno));
4161 }
4162
4163 if (the_low_target.supply_ptrace_register)
4164 the_low_target.supply_ptrace_register (regcache, regno, buf);
4165 else
4166 supply_register (regcache, regno, buf);
4167 }
4168
4169 /* Store one register. */
4170 static void
4171 store_register (struct regcache *regcache, int regno)
4172 {
4173 CORE_ADDR regaddr;
4174 int i, size;
4175 char *buf;
4176 int pid;
4177
4178 if (regno >= the_low_target.num_regs)
4179 return;
4180 if ((*the_low_target.cannot_store_register) (regno))
4181 return;
4182
4183 regaddr = register_addr (regno);
4184 if (regaddr == -1)
4185 return;
4186
4187 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4188 & -sizeof (PTRACE_XFER_TYPE));
4189 buf = alloca (size);
4190 memset (buf, 0, size);
4191
4192 if (the_low_target.collect_ptrace_register)
4193 the_low_target.collect_ptrace_register (regcache, regno, buf);
4194 else
4195 collect_register (regcache, regno, buf);
4196
4197 pid = lwpid_of (get_thread_lwp (current_inferior));
4198 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4199 {
4200 errno = 0;
4201 ptrace (PTRACE_POKEUSER, pid,
4202 /* Coerce to a uintptr_t first to avoid potential gcc warning
4203 about coercing an 8 byte integer to a 4 byte pointer. */
4204 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
4205 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
4206 if (errno != 0)
4207 {
4208 /* At this point, ESRCH should mean the process is
4209 already gone, in which case we simply ignore attempts
4210 to change its registers. See also the related
4211 comment in linux_resume_one_lwp. */
4212 if (errno == ESRCH)
4213 return;
4214
4215 if ((*the_low_target.cannot_store_register) (regno) == 0)
4216 error ("writing register %d: %s", regno, strerror (errno));
4217 }
4218 regaddr += sizeof (PTRACE_XFER_TYPE);
4219 }
4220 }
4221
4222 /* Fetch all registers, or just one, from the child process.
4223 If REGNO is -1, do this for all registers, skipping any that are
4224 assumed to have been retrieved by regsets_fetch_inferior_registers,
4225 unless ALL is non-zero.
4226 Otherwise, REGNO specifies which register (so we can save time). */
4227 static void
4228 usr_fetch_inferior_registers (struct regcache *regcache, int regno, int all)
4229 {
4230 if (regno == -1)
4231 {
4232 for (regno = 0; regno < the_low_target.num_regs; regno++)
4233 if (all || !linux_register_in_regsets (regno))
4234 fetch_register (regcache, regno);
4235 }
4236 else
4237 fetch_register (regcache, regno);
4238 }
4239
4240 /* Store our register values back into the inferior.
4241 If REGNO is -1, do this for all registers, skipping any that are
4242 assumed to have been saved by regsets_store_inferior_registers,
4243 unless ALL is non-zero.
4244 Otherwise, REGNO specifies which register (so we can save time). */
4245 static void
4246 usr_store_inferior_registers (struct regcache *regcache, int regno, int all)
4247 {
4248 if (regno == -1)
4249 {
4250 for (regno = 0; regno < the_low_target.num_regs; regno++)
4251 if (all || !linux_register_in_regsets (regno))
4252 store_register (regcache, regno);
4253 }
4254 else
4255 store_register (regcache, regno);
4256 }
4257
4258 #else /* !HAVE_LINUX_USRREGS */
4259
4260 #define usr_fetch_inferior_registers(regcache, regno, all) do {} while (0)
4261 #define usr_store_inferior_registers(regcache, regno, all) do {} while (0)
4262
4263 #endif
4264
4265
4266 void
4267 linux_fetch_registers (struct regcache *regcache, int regno)
4268 {
4269 int use_regsets;
4270 int all = 0;
4271
4272 if (regno == -1)
4273 {
4274 all = regsets_fetch_inferior_registers (regcache);
4275 usr_fetch_inferior_registers (regcache, regno, all);
4276 }
4277 else
4278 {
4279 use_regsets = linux_register_in_regsets (regno);
4280 if (use_regsets)
4281 all = regsets_fetch_inferior_registers (regcache);
4282 if (!use_regsets || all)
4283 usr_fetch_inferior_registers (regcache, regno, 1);
4284 }
4285 }
4286
4287 void
4288 linux_store_registers (struct regcache *regcache, int regno)
4289 {
4290 int use_regsets;
4291 int all = 0;
4292
4293 if (regno == -1)
4294 {
4295 all = regsets_store_inferior_registers (regcache);
4296 usr_store_inferior_registers (regcache, regno, all);
4297 }
4298 else
4299 {
4300 use_regsets = linux_register_in_regsets (regno);
4301 if (use_regsets)
4302 all = regsets_store_inferior_registers (regcache);
4303 if (!use_regsets || all)
4304 usr_store_inferior_registers (regcache, regno, 1);
4305 }
4306 }
4307
4308
4309 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4310 to debugger memory starting at MYADDR. */
4311
4312 static int
4313 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4314 {
4315 register int i;
4316 /* Round starting address down to longword boundary. */
4317 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4318 /* Round ending address up; get number of longwords that makes. */
4319 register int count
4320 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4321 / sizeof (PTRACE_XFER_TYPE);
4322 /* Allocate buffer of that many longwords. */
4323 register PTRACE_XFER_TYPE *buffer
4324 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4325 int fd;
4326 char filename[64];
4327 int pid = lwpid_of (get_thread_lwp (current_inferior));
4328
4329 /* Try using /proc. Don't bother for one word. */
4330 if (len >= 3 * sizeof (long))
4331 {
4332 /* We could keep this file open and cache it - possibly one per
4333 thread. That requires some juggling, but is even faster. */
4334 sprintf (filename, "/proc/%d/mem", pid);
4335 fd = open (filename, O_RDONLY | O_LARGEFILE);
4336 if (fd == -1)
4337 goto no_proc;
4338
4339 /* If pread64 is available, use it. It's faster if the kernel
4340 supports it (only one syscall), and it's 64-bit safe even on
4341 32-bit platforms (for instance, SPARC debugging a SPARC64
4342 application). */
4343 #ifdef HAVE_PREAD64
4344 if (pread64 (fd, myaddr, len, memaddr) != len)
4345 #else
4346 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
4347 #endif
4348 {
4349 close (fd);
4350 goto no_proc;
4351 }
4352
4353 close (fd);
4354 return 0;
4355 }
4356
4357 no_proc:
4358 /* Read all the longwords */
4359 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4360 {
4361 errno = 0;
4362 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4363 about coercing an 8 byte integer to a 4 byte pointer. */
4364 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4365 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4366 if (errno)
4367 return errno;
4368 }
4369
4370 /* Copy appropriate bytes out of the buffer. */
4371 memcpy (myaddr,
4372 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4373 len);
4374
4375 return 0;
4376 }
4377
4378 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4379 memory at MEMADDR. On failure (cannot write to the inferior)
4380 returns the value of errno. */
4381
4382 static int
4383 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4384 {
4385 register int i;
4386 /* Round starting address down to longword boundary. */
4387 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4388 /* Round ending address up; get number of longwords that makes. */
4389 register int count
4390 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4391 / sizeof (PTRACE_XFER_TYPE);
4392
4393 /* Allocate buffer of that many longwords. */
4394 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4395 alloca (count * sizeof (PTRACE_XFER_TYPE));
4396
4397 int pid = lwpid_of (get_thread_lwp (current_inferior));
4398
4399 if (debug_threads)
4400 {
4401 /* Dump up to four bytes. */
4402 unsigned int val = * (unsigned int *) myaddr;
4403 if (len == 1)
4404 val = val & 0xff;
4405 else if (len == 2)
4406 val = val & 0xffff;
4407 else if (len == 3)
4408 val = val & 0xffffff;
4409 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4410 val, (long)memaddr);
4411 }
4412
4413 /* Fill start and end extra bytes of buffer with existing memory data. */
4414
4415 errno = 0;
4416 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4417 about coercing an 8 byte integer to a 4 byte pointer. */
4418 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4419 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4420 if (errno)
4421 return errno;
4422
4423 if (count > 1)
4424 {
4425 errno = 0;
4426 buffer[count - 1]
4427 = ptrace (PTRACE_PEEKTEXT, pid,
4428 /* Coerce to a uintptr_t first to avoid potential gcc warning
4429 about coercing an 8 byte integer to a 4 byte pointer. */
4430 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4431 * sizeof (PTRACE_XFER_TYPE)),
4432 0);
4433 if (errno)
4434 return errno;
4435 }
4436
4437 /* Copy data to be written over corresponding part of buffer. */
4438
4439 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4440 myaddr, len);
4441
4442 /* Write the entire buffer. */
4443
4444 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4445 {
4446 errno = 0;
4447 ptrace (PTRACE_POKETEXT, pid,
4448 /* Coerce to a uintptr_t first to avoid potential gcc warning
4449 about coercing an 8 byte integer to a 4 byte pointer. */
4450 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4451 (PTRACE_ARG4_TYPE) buffer[i]);
4452 if (errno)
4453 return errno;
4454 }
4455
4456 return 0;
4457 }
4458
4459 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4460 static int linux_supports_tracefork_flag;
4461
4462 static void
4463 linux_enable_event_reporting (int pid)
4464 {
4465 if (!linux_supports_tracefork_flag)
4466 return;
4467
4468 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4469 }
4470
4471 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4472
4473 static int
4474 linux_tracefork_grandchild (void *arg)
4475 {
4476 _exit (0);
4477 }
4478
4479 #define STACK_SIZE 4096
4480
4481 static int
4482 linux_tracefork_child (void *arg)
4483 {
4484 ptrace (PTRACE_TRACEME, 0, 0, 0);
4485 kill (getpid (), SIGSTOP);
4486
4487 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4488
4489 if (fork () == 0)
4490 linux_tracefork_grandchild (NULL);
4491
4492 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4493
4494 #ifdef __ia64__
4495 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4496 CLONE_VM | SIGCHLD, NULL);
4497 #else
4498 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4499 CLONE_VM | SIGCHLD, NULL);
4500 #endif
4501
4502 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4503
4504 _exit (0);
4505 }
4506
4507 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4508 sure that we can enable the option, and that it had the desired
4509 effect. */
4510
4511 static void
4512 linux_test_for_tracefork (void)
4513 {
4514 int child_pid, ret, status;
4515 long second_pid;
4516 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4517 char *stack = xmalloc (STACK_SIZE * 4);
4518 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4519
4520 linux_supports_tracefork_flag = 0;
4521
4522 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4523
4524 child_pid = fork ();
4525 if (child_pid == 0)
4526 linux_tracefork_child (NULL);
4527
4528 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4529
4530 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4531 #ifdef __ia64__
4532 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4533 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4534 #else /* !__ia64__ */
4535 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4536 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4537 #endif /* !__ia64__ */
4538
4539 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4540
4541 if (child_pid == -1)
4542 perror_with_name ("clone");
4543
4544 ret = my_waitpid (child_pid, &status, 0);
4545 if (ret == -1)
4546 perror_with_name ("waitpid");
4547 else if (ret != child_pid)
4548 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4549 if (! WIFSTOPPED (status))
4550 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4551
4552 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4553 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4554 if (ret != 0)
4555 {
4556 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4557 if (ret != 0)
4558 {
4559 warning ("linux_test_for_tracefork: failed to kill child");
4560 return;
4561 }
4562
4563 ret = my_waitpid (child_pid, &status, 0);
4564 if (ret != child_pid)
4565 warning ("linux_test_for_tracefork: failed to wait for killed child");
4566 else if (!WIFSIGNALED (status))
4567 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4568 "killed child", status);
4569
4570 return;
4571 }
4572
4573 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4574 if (ret != 0)
4575 warning ("linux_test_for_tracefork: failed to resume child");
4576
4577 ret = my_waitpid (child_pid, &status, 0);
4578
4579 if (ret == child_pid && WIFSTOPPED (status)
4580 && status >> 16 == PTRACE_EVENT_FORK)
4581 {
4582 second_pid = 0;
4583 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4584 if (ret == 0 && second_pid != 0)
4585 {
4586 int second_status;
4587
4588 linux_supports_tracefork_flag = 1;
4589 my_waitpid (second_pid, &second_status, 0);
4590 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4591 if (ret != 0)
4592 warning ("linux_test_for_tracefork: failed to kill second child");
4593 my_waitpid (second_pid, &status, 0);
4594 }
4595 }
4596 else
4597 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4598 "(%d, status 0x%x)", ret, status);
4599
4600 do
4601 {
4602 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4603 if (ret != 0)
4604 warning ("linux_test_for_tracefork: failed to kill child");
4605 my_waitpid (child_pid, &status, 0);
4606 }
4607 while (WIFSTOPPED (status));
4608
4609 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4610 free (stack);
4611 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4612 }
4613
4614
4615 static void
4616 linux_look_up_symbols (void)
4617 {
4618 #ifdef USE_THREAD_DB
4619 struct process_info *proc = current_process ();
4620
4621 if (proc->private->thread_db != NULL)
4622 return;
4623
4624 /* If the kernel supports tracing forks then it also supports tracing
4625 clones, and then we don't need to use the magic thread event breakpoint
4626 to learn about threads. */
4627 thread_db_init (!linux_supports_tracefork_flag);
4628 #endif
4629 }
4630
4631 static void
4632 linux_request_interrupt (void)
4633 {
4634 extern unsigned long signal_pid;
4635
4636 if (!ptid_equal (cont_thread, null_ptid)
4637 && !ptid_equal (cont_thread, minus_one_ptid))
4638 {
4639 struct lwp_info *lwp;
4640 int lwpid;
4641
4642 lwp = get_thread_lwp (current_inferior);
4643 lwpid = lwpid_of (lwp);
4644 kill_lwp (lwpid, SIGINT);
4645 }
4646 else
4647 kill_lwp (signal_pid, SIGINT);
4648 }
4649
4650 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4651 to debugger memory starting at MYADDR. */
4652
4653 static int
4654 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4655 {
4656 char filename[PATH_MAX];
4657 int fd, n;
4658 int pid = lwpid_of (get_thread_lwp (current_inferior));
4659
4660 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4661
4662 fd = open (filename, O_RDONLY);
4663 if (fd < 0)
4664 return -1;
4665
4666 if (offset != (CORE_ADDR) 0
4667 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4668 n = -1;
4669 else
4670 n = read (fd, myaddr, len);
4671
4672 close (fd);
4673
4674 return n;
4675 }
4676
4677 /* These breakpoint and watchpoint related wrapper functions simply
4678 pass on the function call if the target has registered a
4679 corresponding function. */
4680
4681 static int
4682 linux_insert_point (char type, CORE_ADDR addr, int len)
4683 {
4684 if (the_low_target.insert_point != NULL)
4685 return the_low_target.insert_point (type, addr, len);
4686 else
4687 /* Unsupported (see target.h). */
4688 return 1;
4689 }
4690
4691 static int
4692 linux_remove_point (char type, CORE_ADDR addr, int len)
4693 {
4694 if (the_low_target.remove_point != NULL)
4695 return the_low_target.remove_point (type, addr, len);
4696 else
4697 /* Unsupported (see target.h). */
4698 return 1;
4699 }
4700
4701 static int
4702 linux_stopped_by_watchpoint (void)
4703 {
4704 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4705
4706 return lwp->stopped_by_watchpoint;
4707 }
4708
4709 static CORE_ADDR
4710 linux_stopped_data_address (void)
4711 {
4712 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4713
4714 return lwp->stopped_data_address;
4715 }
4716
4717 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4718 #if defined(__mcoldfire__)
4719 /* These should really be defined in the kernel's ptrace.h header. */
4720 #define PT_TEXT_ADDR 49*4
4721 #define PT_DATA_ADDR 50*4
4722 #define PT_TEXT_END_ADDR 51*4
4723 #elif defined(BFIN)
4724 #define PT_TEXT_ADDR 220
4725 #define PT_TEXT_END_ADDR 224
4726 #define PT_DATA_ADDR 228
4727 #elif defined(__TMS320C6X__)
4728 #define PT_TEXT_ADDR (0x10000*4)
4729 #define PT_DATA_ADDR (0x10004*4)
4730 #define PT_TEXT_END_ADDR (0x10008*4)
4731 #endif
4732
4733 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4734 to tell gdb about. */
4735
4736 static int
4737 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4738 {
4739 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4740 unsigned long text, text_end, data;
4741 int pid = lwpid_of (get_thread_lwp (current_inferior));
4742
4743 errno = 0;
4744
4745 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4746 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4747 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4748
4749 if (errno == 0)
4750 {
4751 /* Both text and data offsets produced at compile-time (and so
4752 used by gdb) are relative to the beginning of the program,
4753 with the data segment immediately following the text segment.
4754 However, the actual runtime layout in memory may put the data
4755 somewhere else, so when we send gdb a data base-address, we
4756 use the real data base address and subtract the compile-time
4757 data base-address from it (which is just the length of the
4758 text segment). BSS immediately follows data in both
4759 cases. */
4760 *text_p = text;
4761 *data_p = data - (text_end - text);
4762
4763 return 1;
4764 }
4765 #endif
4766 return 0;
4767 }
4768 #endif
4769
4770 static int
4771 linux_qxfer_osdata (const char *annex,
4772 unsigned char *readbuf, unsigned const char *writebuf,
4773 CORE_ADDR offset, int len)
4774 {
4775 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4776 }
4777
4778 /* Convert a native/host siginfo object, into/from the siginfo in the
4779 layout of the inferiors' architecture. */
4780
4781 static void
4782 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4783 {
4784 int done = 0;
4785
4786 if (the_low_target.siginfo_fixup != NULL)
4787 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4788
4789 /* If there was no callback, or the callback didn't do anything,
4790 then just do a straight memcpy. */
4791 if (!done)
4792 {
4793 if (direction == 1)
4794 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4795 else
4796 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4797 }
4798 }
4799
4800 static int
4801 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4802 unsigned const char *writebuf, CORE_ADDR offset, int len)
4803 {
4804 int pid;
4805 siginfo_t siginfo;
4806 char inf_siginfo[sizeof (siginfo_t)];
4807
4808 if (current_inferior == NULL)
4809 return -1;
4810
4811 pid = lwpid_of (get_thread_lwp (current_inferior));
4812
4813 if (debug_threads)
4814 fprintf (stderr, "%s siginfo for lwp %d.\n",
4815 readbuf != NULL ? "Reading" : "Writing",
4816 pid);
4817
4818 if (offset >= sizeof (siginfo))
4819 return -1;
4820
4821 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4822 return -1;
4823
4824 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4825 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4826 inferior with a 64-bit GDBSERVER should look the same as debugging it
4827 with a 32-bit GDBSERVER, we need to convert it. */
4828 siginfo_fixup (&siginfo, inf_siginfo, 0);
4829
4830 if (offset + len > sizeof (siginfo))
4831 len = sizeof (siginfo) - offset;
4832
4833 if (readbuf != NULL)
4834 memcpy (readbuf, inf_siginfo + offset, len);
4835 else
4836 {
4837 memcpy (inf_siginfo + offset, writebuf, len);
4838
4839 /* Convert back to ptrace layout before flushing it out. */
4840 siginfo_fixup (&siginfo, inf_siginfo, 1);
4841
4842 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4843 return -1;
4844 }
4845
4846 return len;
4847 }
4848
4849 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4850 so we notice when children change state; as the handler for the
4851 sigsuspend in my_waitpid. */
4852
4853 static void
4854 sigchld_handler (int signo)
4855 {
4856 int old_errno = errno;
4857
4858 if (debug_threads)
4859 {
4860 do
4861 {
4862 /* fprintf is not async-signal-safe, so call write
4863 directly. */
4864 if (write (2, "sigchld_handler\n",
4865 sizeof ("sigchld_handler\n") - 1) < 0)
4866 break; /* just ignore */
4867 } while (0);
4868 }
4869
4870 if (target_is_async_p ())
4871 async_file_mark (); /* trigger a linux_wait */
4872
4873 errno = old_errno;
4874 }
4875
4876 static int
4877 linux_supports_non_stop (void)
4878 {
4879 return 1;
4880 }
4881
4882 static int
4883 linux_async (int enable)
4884 {
4885 int previous = (linux_event_pipe[0] != -1);
4886
4887 if (debug_threads)
4888 fprintf (stderr, "linux_async (%d), previous=%d\n",
4889 enable, previous);
4890
4891 if (previous != enable)
4892 {
4893 sigset_t mask;
4894 sigemptyset (&mask);
4895 sigaddset (&mask, SIGCHLD);
4896
4897 sigprocmask (SIG_BLOCK, &mask, NULL);
4898
4899 if (enable)
4900 {
4901 if (pipe (linux_event_pipe) == -1)
4902 fatal ("creating event pipe failed.");
4903
4904 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4905 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4906
4907 /* Register the event loop handler. */
4908 add_file_handler (linux_event_pipe[0],
4909 handle_target_event, NULL);
4910
4911 /* Always trigger a linux_wait. */
4912 async_file_mark ();
4913 }
4914 else
4915 {
4916 delete_file_handler (linux_event_pipe[0]);
4917
4918 close (linux_event_pipe[0]);
4919 close (linux_event_pipe[1]);
4920 linux_event_pipe[0] = -1;
4921 linux_event_pipe[1] = -1;
4922 }
4923
4924 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4925 }
4926
4927 return previous;
4928 }
4929
4930 static int
4931 linux_start_non_stop (int nonstop)
4932 {
4933 /* Register or unregister from event-loop accordingly. */
4934 linux_async (nonstop);
4935 return 0;
4936 }
4937
4938 static int
4939 linux_supports_multi_process (void)
4940 {
4941 return 1;
4942 }
4943
4944 static int
4945 linux_supports_disable_randomization (void)
4946 {
4947 #ifdef HAVE_PERSONALITY
4948 return 1;
4949 #else
4950 return 0;
4951 #endif
4952 }
4953
4954 static int
4955 linux_supports_agent (void)
4956 {
4957 return 1;
4958 }
4959
4960 /* Enumerate spufs IDs for process PID. */
4961 static int
4962 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4963 {
4964 int pos = 0;
4965 int written = 0;
4966 char path[128];
4967 DIR *dir;
4968 struct dirent *entry;
4969
4970 sprintf (path, "/proc/%ld/fd", pid);
4971 dir = opendir (path);
4972 if (!dir)
4973 return -1;
4974
4975 rewinddir (dir);
4976 while ((entry = readdir (dir)) != NULL)
4977 {
4978 struct stat st;
4979 struct statfs stfs;
4980 int fd;
4981
4982 fd = atoi (entry->d_name);
4983 if (!fd)
4984 continue;
4985
4986 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4987 if (stat (path, &st) != 0)
4988 continue;
4989 if (!S_ISDIR (st.st_mode))
4990 continue;
4991
4992 if (statfs (path, &stfs) != 0)
4993 continue;
4994 if (stfs.f_type != SPUFS_MAGIC)
4995 continue;
4996
4997 if (pos >= offset && pos + 4 <= offset + len)
4998 {
4999 *(unsigned int *)(buf + pos - offset) = fd;
5000 written += 4;
5001 }
5002 pos += 4;
5003 }
5004
5005 closedir (dir);
5006 return written;
5007 }
5008
5009 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5010 object type, using the /proc file system. */
5011 static int
5012 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5013 unsigned const char *writebuf,
5014 CORE_ADDR offset, int len)
5015 {
5016 long pid = lwpid_of (get_thread_lwp (current_inferior));
5017 char buf[128];
5018 int fd = 0;
5019 int ret = 0;
5020
5021 if (!writebuf && !readbuf)
5022 return -1;
5023
5024 if (!*annex)
5025 {
5026 if (!readbuf)
5027 return -1;
5028 else
5029 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5030 }
5031
5032 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5033 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5034 if (fd <= 0)
5035 return -1;
5036
5037 if (offset != 0
5038 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5039 {
5040 close (fd);
5041 return 0;
5042 }
5043
5044 if (writebuf)
5045 ret = write (fd, writebuf, (size_t) len);
5046 else
5047 ret = read (fd, readbuf, (size_t) len);
5048
5049 close (fd);
5050 return ret;
5051 }
5052
5053 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5054 struct target_loadseg
5055 {
5056 /* Core address to which the segment is mapped. */
5057 Elf32_Addr addr;
5058 /* VMA recorded in the program header. */
5059 Elf32_Addr p_vaddr;
5060 /* Size of this segment in memory. */
5061 Elf32_Word p_memsz;
5062 };
5063
5064 # if defined PT_GETDSBT
5065 struct target_loadmap
5066 {
5067 /* Protocol version number, must be zero. */
5068 Elf32_Word version;
5069 /* Pointer to the DSBT table, its size, and the DSBT index. */
5070 unsigned *dsbt_table;
5071 unsigned dsbt_size, dsbt_index;
5072 /* Number of segments in this map. */
5073 Elf32_Word nsegs;
5074 /* The actual memory map. */
5075 struct target_loadseg segs[/*nsegs*/];
5076 };
5077 # define LINUX_LOADMAP PT_GETDSBT
5078 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5079 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5080 # else
5081 struct target_loadmap
5082 {
5083 /* Protocol version number, must be zero. */
5084 Elf32_Half version;
5085 /* Number of segments in this map. */
5086 Elf32_Half nsegs;
5087 /* The actual memory map. */
5088 struct target_loadseg segs[/*nsegs*/];
5089 };
5090 # define LINUX_LOADMAP PTRACE_GETFDPIC
5091 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5092 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5093 # endif
5094
5095 static int
5096 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5097 unsigned char *myaddr, unsigned int len)
5098 {
5099 int pid = lwpid_of (get_thread_lwp (current_inferior));
5100 int addr = -1;
5101 struct target_loadmap *data = NULL;
5102 unsigned int actual_length, copy_length;
5103
5104 if (strcmp (annex, "exec") == 0)
5105 addr = (int) LINUX_LOADMAP_EXEC;
5106 else if (strcmp (annex, "interp") == 0)
5107 addr = (int) LINUX_LOADMAP_INTERP;
5108 else
5109 return -1;
5110
5111 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5112 return -1;
5113
5114 if (data == NULL)
5115 return -1;
5116
5117 actual_length = sizeof (struct target_loadmap)
5118 + sizeof (struct target_loadseg) * data->nsegs;
5119
5120 if (offset < 0 || offset > actual_length)
5121 return -1;
5122
5123 copy_length = actual_length - offset < len ? actual_length - offset : len;
5124 memcpy (myaddr, (char *) data + offset, copy_length);
5125 return copy_length;
5126 }
5127 #else
5128 # define linux_read_loadmap NULL
5129 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5130
5131 static void
5132 linux_process_qsupported (const char *query)
5133 {
5134 if (the_low_target.process_qsupported != NULL)
5135 the_low_target.process_qsupported (query);
5136 }
5137
5138 static int
5139 linux_supports_tracepoints (void)
5140 {
5141 if (*the_low_target.supports_tracepoints == NULL)
5142 return 0;
5143
5144 return (*the_low_target.supports_tracepoints) ();
5145 }
5146
5147 static CORE_ADDR
5148 linux_read_pc (struct regcache *regcache)
5149 {
5150 if (the_low_target.get_pc == NULL)
5151 return 0;
5152
5153 return (*the_low_target.get_pc) (regcache);
5154 }
5155
5156 static void
5157 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5158 {
5159 gdb_assert (the_low_target.set_pc != NULL);
5160
5161 (*the_low_target.set_pc) (regcache, pc);
5162 }
5163
5164 static int
5165 linux_thread_stopped (struct thread_info *thread)
5166 {
5167 return get_thread_lwp (thread)->stopped;
5168 }
5169
5170 /* This exposes stop-all-threads functionality to other modules. */
5171
5172 static void
5173 linux_pause_all (int freeze)
5174 {
5175 stop_all_lwps (freeze, NULL);
5176 }
5177
5178 /* This exposes unstop-all-threads functionality to other gdbserver
5179 modules. */
5180
5181 static void
5182 linux_unpause_all (int unfreeze)
5183 {
5184 unstop_all_lwps (unfreeze, NULL);
5185 }
5186
5187 static int
5188 linux_prepare_to_access_memory (void)
5189 {
5190 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5191 running LWP. */
5192 if (non_stop)
5193 linux_pause_all (1);
5194 return 0;
5195 }
5196
5197 static void
5198 linux_done_accessing_memory (void)
5199 {
5200 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5201 running LWP. */
5202 if (non_stop)
5203 linux_unpause_all (1);
5204 }
5205
5206 static int
5207 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5208 CORE_ADDR collector,
5209 CORE_ADDR lockaddr,
5210 ULONGEST orig_size,
5211 CORE_ADDR *jump_entry,
5212 CORE_ADDR *trampoline,
5213 ULONGEST *trampoline_size,
5214 unsigned char *jjump_pad_insn,
5215 ULONGEST *jjump_pad_insn_size,
5216 CORE_ADDR *adjusted_insn_addr,
5217 CORE_ADDR *adjusted_insn_addr_end,
5218 char *err)
5219 {
5220 return (*the_low_target.install_fast_tracepoint_jump_pad)
5221 (tpoint, tpaddr, collector, lockaddr, orig_size,
5222 jump_entry, trampoline, trampoline_size,
5223 jjump_pad_insn, jjump_pad_insn_size,
5224 adjusted_insn_addr, adjusted_insn_addr_end,
5225 err);
5226 }
5227
5228 static struct emit_ops *
5229 linux_emit_ops (void)
5230 {
5231 if (the_low_target.emit_ops != NULL)
5232 return (*the_low_target.emit_ops) ();
5233 else
5234 return NULL;
5235 }
5236
5237 static int
5238 linux_get_min_fast_tracepoint_insn_len (void)
5239 {
5240 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5241 }
5242
5243 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5244
5245 static int
5246 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5247 CORE_ADDR *phdr_memaddr, int *num_phdr)
5248 {
5249 char filename[PATH_MAX];
5250 int fd;
5251 const int auxv_size = is_elf64
5252 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5253 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5254
5255 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5256
5257 fd = open (filename, O_RDONLY);
5258 if (fd < 0)
5259 return 1;
5260
5261 *phdr_memaddr = 0;
5262 *num_phdr = 0;
5263 while (read (fd, buf, auxv_size) == auxv_size
5264 && (*phdr_memaddr == 0 || *num_phdr == 0))
5265 {
5266 if (is_elf64)
5267 {
5268 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5269
5270 switch (aux->a_type)
5271 {
5272 case AT_PHDR:
5273 *phdr_memaddr = aux->a_un.a_val;
5274 break;
5275 case AT_PHNUM:
5276 *num_phdr = aux->a_un.a_val;
5277 break;
5278 }
5279 }
5280 else
5281 {
5282 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5283
5284 switch (aux->a_type)
5285 {
5286 case AT_PHDR:
5287 *phdr_memaddr = aux->a_un.a_val;
5288 break;
5289 case AT_PHNUM:
5290 *num_phdr = aux->a_un.a_val;
5291 break;
5292 }
5293 }
5294 }
5295
5296 close (fd);
5297
5298 if (*phdr_memaddr == 0 || *num_phdr == 0)
5299 {
5300 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5301 "phdr_memaddr = %ld, phdr_num = %d",
5302 (long) *phdr_memaddr, *num_phdr);
5303 return 2;
5304 }
5305
5306 return 0;
5307 }
5308
5309 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5310
5311 static CORE_ADDR
5312 get_dynamic (const int pid, const int is_elf64)
5313 {
5314 CORE_ADDR phdr_memaddr, relocation;
5315 int num_phdr, i;
5316 unsigned char *phdr_buf;
5317 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5318
5319 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5320 return 0;
5321
5322 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5323 phdr_buf = alloca (num_phdr * phdr_size);
5324
5325 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5326 return 0;
5327
5328 /* Compute relocation: it is expected to be 0 for "regular" executables,
5329 non-zero for PIE ones. */
5330 relocation = -1;
5331 for (i = 0; relocation == -1 && i < num_phdr; i++)
5332 if (is_elf64)
5333 {
5334 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5335
5336 if (p->p_type == PT_PHDR)
5337 relocation = phdr_memaddr - p->p_vaddr;
5338 }
5339 else
5340 {
5341 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5342
5343 if (p->p_type == PT_PHDR)
5344 relocation = phdr_memaddr - p->p_vaddr;
5345 }
5346
5347 if (relocation == -1)
5348 {
5349 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5350 any real world executables, including PIE executables, have always
5351 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5352 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5353 or present DT_DEBUG anyway (fpc binaries are statically linked).
5354
5355 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5356
5357 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5358
5359 return 0;
5360 }
5361
5362 for (i = 0; i < num_phdr; i++)
5363 {
5364 if (is_elf64)
5365 {
5366 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5367
5368 if (p->p_type == PT_DYNAMIC)
5369 return p->p_vaddr + relocation;
5370 }
5371 else
5372 {
5373 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5374
5375 if (p->p_type == PT_DYNAMIC)
5376 return p->p_vaddr + relocation;
5377 }
5378 }
5379
5380 return 0;
5381 }
5382
5383 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5384 can be 0 if the inferior does not yet have the library list initialized. */
5385
5386 static CORE_ADDR
5387 get_r_debug (const int pid, const int is_elf64)
5388 {
5389 CORE_ADDR dynamic_memaddr;
5390 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5391 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5392
5393 dynamic_memaddr = get_dynamic (pid, is_elf64);
5394 if (dynamic_memaddr == 0)
5395 return (CORE_ADDR) -1;
5396
5397 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5398 {
5399 if (is_elf64)
5400 {
5401 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5402
5403 if (dyn->d_tag == DT_DEBUG)
5404 return dyn->d_un.d_val;
5405
5406 if (dyn->d_tag == DT_NULL)
5407 break;
5408 }
5409 else
5410 {
5411 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5412
5413 if (dyn->d_tag == DT_DEBUG)
5414 return dyn->d_un.d_val;
5415
5416 if (dyn->d_tag == DT_NULL)
5417 break;
5418 }
5419
5420 dynamic_memaddr += dyn_size;
5421 }
5422
5423 return (CORE_ADDR) -1;
5424 }
5425
5426 /* Read one pointer from MEMADDR in the inferior. */
5427
5428 static int
5429 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5430 {
5431 int ret;
5432
5433 /* Go through a union so this works on either big or little endian
5434 hosts, when the inferior's pointer size is smaller than the size
5435 of CORE_ADDR. It is assumed the inferior's endianness is the
5436 same of the superior's. */
5437 union
5438 {
5439 CORE_ADDR core_addr;
5440 unsigned int ui;
5441 unsigned char uc;
5442 } addr;
5443
5444 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5445 if (ret == 0)
5446 {
5447 if (ptr_size == sizeof (CORE_ADDR))
5448 *ptr = addr.core_addr;
5449 else if (ptr_size == sizeof (unsigned int))
5450 *ptr = addr.ui;
5451 else
5452 gdb_assert_not_reached ("unhandled pointer size");
5453 }
5454 return ret;
5455 }
5456
5457 struct link_map_offsets
5458 {
5459 /* Offset and size of r_debug.r_version. */
5460 int r_version_offset;
5461
5462 /* Offset and size of r_debug.r_map. */
5463 int r_map_offset;
5464
5465 /* Offset to l_addr field in struct link_map. */
5466 int l_addr_offset;
5467
5468 /* Offset to l_name field in struct link_map. */
5469 int l_name_offset;
5470
5471 /* Offset to l_ld field in struct link_map. */
5472 int l_ld_offset;
5473
5474 /* Offset to l_next field in struct link_map. */
5475 int l_next_offset;
5476
5477 /* Offset to l_prev field in struct link_map. */
5478 int l_prev_offset;
5479 };
5480
5481 /* Construct qXfer:libraries-svr4:read reply. */
5482
5483 static int
5484 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5485 unsigned const char *writebuf,
5486 CORE_ADDR offset, int len)
5487 {
5488 char *document;
5489 unsigned document_len;
5490 struct process_info_private *const priv = current_process ()->private;
5491 char filename[PATH_MAX];
5492 int pid, is_elf64;
5493
5494 static const struct link_map_offsets lmo_32bit_offsets =
5495 {
5496 0, /* r_version offset. */
5497 4, /* r_debug.r_map offset. */
5498 0, /* l_addr offset in link_map. */
5499 4, /* l_name offset in link_map. */
5500 8, /* l_ld offset in link_map. */
5501 12, /* l_next offset in link_map. */
5502 16 /* l_prev offset in link_map. */
5503 };
5504
5505 static const struct link_map_offsets lmo_64bit_offsets =
5506 {
5507 0, /* r_version offset. */
5508 8, /* r_debug.r_map offset. */
5509 0, /* l_addr offset in link_map. */
5510 8, /* l_name offset in link_map. */
5511 16, /* l_ld offset in link_map. */
5512 24, /* l_next offset in link_map. */
5513 32 /* l_prev offset in link_map. */
5514 };
5515 const struct link_map_offsets *lmo;
5516
5517 if (writebuf != NULL)
5518 return -2;
5519 if (readbuf == NULL)
5520 return -1;
5521
5522 pid = lwpid_of (get_thread_lwp (current_inferior));
5523 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5524 is_elf64 = elf_64_file_p (filename);
5525 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5526
5527 if (priv->r_debug == 0)
5528 priv->r_debug = get_r_debug (pid, is_elf64);
5529
5530 if (priv->r_debug == (CORE_ADDR) -1 || priv->r_debug == 0)
5531 {
5532 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5533 }
5534 else
5535 {
5536 int allocated = 1024;
5537 char *p;
5538 const int ptr_size = is_elf64 ? 8 : 4;
5539 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5540 int r_version, header_done = 0;
5541
5542 document = xmalloc (allocated);
5543 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5544 p = document + strlen (document);
5545
5546 r_version = 0;
5547 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5548 (unsigned char *) &r_version,
5549 sizeof (r_version)) != 0
5550 || r_version != 1)
5551 {
5552 warning ("unexpected r_debug version %d", r_version);
5553 goto done;
5554 }
5555
5556 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5557 &lm_addr, ptr_size) != 0)
5558 {
5559 warning ("unable to read r_map from 0x%lx",
5560 (long) priv->r_debug + lmo->r_map_offset);
5561 goto done;
5562 }
5563
5564 lm_prev = 0;
5565 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5566 &l_name, ptr_size) == 0
5567 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5568 &l_addr, ptr_size) == 0
5569 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5570 &l_ld, ptr_size) == 0
5571 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5572 &l_prev, ptr_size) == 0
5573 && read_one_ptr (lm_addr + lmo->l_next_offset,
5574 &l_next, ptr_size) == 0)
5575 {
5576 unsigned char libname[PATH_MAX];
5577
5578 if (lm_prev != l_prev)
5579 {
5580 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5581 (long) lm_prev, (long) l_prev);
5582 break;
5583 }
5584
5585 /* Not checking for error because reading may stop before
5586 we've got PATH_MAX worth of characters. */
5587 libname[0] = '\0';
5588 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5589 libname[sizeof (libname) - 1] = '\0';
5590 if (libname[0] != '\0')
5591 {
5592 /* 6x the size for xml_escape_text below. */
5593 size_t len = 6 * strlen ((char *) libname);
5594 char *name;
5595
5596 if (!header_done)
5597 {
5598 /* Terminate `<library-list-svr4'. */
5599 *p++ = '>';
5600 header_done = 1;
5601 }
5602
5603 while (allocated < p - document + len + 200)
5604 {
5605 /* Expand to guarantee sufficient storage. */
5606 uintptr_t document_len = p - document;
5607
5608 document = xrealloc (document, 2 * allocated);
5609 allocated *= 2;
5610 p = document + document_len;
5611 }
5612
5613 name = xml_escape_text ((char *) libname);
5614 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5615 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5616 name, (unsigned long) lm_addr,
5617 (unsigned long) l_addr, (unsigned long) l_ld);
5618 free (name);
5619 }
5620 else if (lm_prev == 0)
5621 {
5622 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5623 p = p + strlen (p);
5624 }
5625
5626 if (l_next == 0)
5627 break;
5628
5629 lm_prev = lm_addr;
5630 lm_addr = l_next;
5631 }
5632 done:
5633 if (!header_done)
5634 {
5635 /* Empty list; terminate `<library-list-svr4'. */
5636 strcpy (p, "/>");
5637 }
5638 else
5639 strcpy (p, "</library-list-svr4>");
5640 }
5641
5642 document_len = strlen (document);
5643 if (offset < document_len)
5644 document_len -= offset;
5645 else
5646 document_len = 0;
5647 if (len > document_len)
5648 len = document_len;
5649
5650 memcpy (readbuf, document + offset, len);
5651 xfree (document);
5652
5653 return len;
5654 }
5655
5656 static struct target_ops linux_target_ops = {
5657 linux_create_inferior,
5658 linux_attach,
5659 linux_kill,
5660 linux_detach,
5661 linux_mourn,
5662 linux_join,
5663 linux_thread_alive,
5664 linux_resume,
5665 linux_wait,
5666 linux_fetch_registers,
5667 linux_store_registers,
5668 linux_prepare_to_access_memory,
5669 linux_done_accessing_memory,
5670 linux_read_memory,
5671 linux_write_memory,
5672 linux_look_up_symbols,
5673 linux_request_interrupt,
5674 linux_read_auxv,
5675 linux_insert_point,
5676 linux_remove_point,
5677 linux_stopped_by_watchpoint,
5678 linux_stopped_data_address,
5679 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5680 linux_read_offsets,
5681 #else
5682 NULL,
5683 #endif
5684 #ifdef USE_THREAD_DB
5685 thread_db_get_tls_address,
5686 #else
5687 NULL,
5688 #endif
5689 linux_qxfer_spu,
5690 hostio_last_error_from_errno,
5691 linux_qxfer_osdata,
5692 linux_xfer_siginfo,
5693 linux_supports_non_stop,
5694 linux_async,
5695 linux_start_non_stop,
5696 linux_supports_multi_process,
5697 #ifdef USE_THREAD_DB
5698 thread_db_handle_monitor_command,
5699 #else
5700 NULL,
5701 #endif
5702 linux_common_core_of_thread,
5703 linux_read_loadmap,
5704 linux_process_qsupported,
5705 linux_supports_tracepoints,
5706 linux_read_pc,
5707 linux_write_pc,
5708 linux_thread_stopped,
5709 NULL,
5710 linux_pause_all,
5711 linux_unpause_all,
5712 linux_cancel_breakpoints,
5713 linux_stabilize_threads,
5714 linux_install_fast_tracepoint_jump_pad,
5715 linux_emit_ops,
5716 linux_supports_disable_randomization,
5717 linux_get_min_fast_tracepoint_insn_len,
5718 linux_qxfer_libraries_svr4,
5719 linux_supports_agent,
5720 };
5721
5722 static void
5723 linux_init_signals ()
5724 {
5725 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5726 to find what the cancel signal actually is. */
5727 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5728 signal (__SIGRTMIN+1, SIG_IGN);
5729 #endif
5730 }
5731
5732 void
5733 initialize_low (void)
5734 {
5735 struct sigaction sigchld_action;
5736 memset (&sigchld_action, 0, sizeof (sigchld_action));
5737 set_target_ops (&linux_target_ops);
5738 set_breakpoint_data (the_low_target.breakpoint,
5739 the_low_target.breakpoint_len);
5740 linux_init_signals ();
5741 linux_test_for_tracefork ();
5742 #ifdef HAVE_LINUX_REGSETS
5743 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5744 ;
5745 disabled_regsets = xmalloc (num_regsets);
5746 #endif
5747
5748 sigchld_action.sa_handler = sigchld_handler;
5749 sigemptyset (&sigchld_action.sa_mask);
5750 sigchld_action.sa_flags = SA_RESTART;
5751 sigaction (SIGCHLD, &sigchld_action, NULL);
5752 }