]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
gdb/gdbserver/
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22 #include "agent.h"
23
24 #include <sys/wait.h>
25 #include <stdio.h>
26 #include <sys/param.h>
27 #include <sys/ptrace.h>
28 #include "linux-ptrace.h"
29 #include "linux-procfs.h"
30 #include <signal.h>
31 #include <sys/ioctl.h>
32 #include <fcntl.h>
33 #include <string.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #ifndef ELFMAG0
47 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51 #include <elf.h>
52 #endif
53
54 #ifndef SPUFS_MAGIC
55 #define SPUFS_MAGIC 0x23c9b64e
56 #endif
57
58 #ifdef HAVE_PERSONALITY
59 # include <sys/personality.h>
60 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
61 # define ADDR_NO_RANDOMIZE 0x0040000
62 # endif
63 #endif
64
65 #ifndef O_LARGEFILE
66 #define O_LARGEFILE 0
67 #endif
68
69 #ifndef W_STOPCODE
70 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71 #endif
72
73 /* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75 #ifndef __SIGRTMIN
76 #define __SIGRTMIN 32
77 #endif
78
79 #ifdef __UCLIBC__
80 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
81 #define HAS_NOMMU
82 #endif
83 #endif
84
85 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
86 representation of the thread ID.
87
88 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
89 the same as the LWP ID.
90
91 ``all_processes'' is keyed by the "overall process ID", which
92 GNU/Linux calls tgid, "thread group ID". */
93
94 struct inferior_list all_lwps;
95
96 /* A list of all unknown processes which receive stop signals. Some
97 other process will presumably claim each of these as forked
98 children momentarily. */
99
100 struct simple_pid_list
101 {
102 /* The process ID. */
103 int pid;
104
105 /* The status as reported by waitpid. */
106 int status;
107
108 /* Next in chain. */
109 struct simple_pid_list *next;
110 };
111 struct simple_pid_list *stopped_pids;
112
113 /* Trivial list manipulation functions to keep track of a list of new
114 stopped processes. */
115
116 static void
117 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
118 {
119 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
120
121 new_pid->pid = pid;
122 new_pid->status = status;
123 new_pid->next = *listp;
124 *listp = new_pid;
125 }
126
127 static int
128 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
129 {
130 struct simple_pid_list **p;
131
132 for (p = listp; *p != NULL; p = &(*p)->next)
133 if ((*p)->pid == pid)
134 {
135 struct simple_pid_list *next = (*p)->next;
136
137 *statusp = (*p)->status;
138 xfree (*p);
139 *p = next;
140 return 1;
141 }
142 return 0;
143 }
144
145 /* FIXME this is a bit of a hack, and could be removed. */
146 int stopping_threads;
147
148 /* FIXME make into a target method? */
149 int using_threads = 1;
150
151 /* True if we're presently stabilizing threads (moving them out of
152 jump pads). */
153 static int stabilizing_threads;
154
155 /* This flag is true iff we've just created or attached to our first
156 inferior but it has not stopped yet. As soon as it does, we need
157 to call the low target's arch_setup callback. Doing this only on
158 the first inferior avoids reinializing the architecture on every
159 inferior, and avoids messing with the register caches of the
160 already running inferiors. NOTE: this assumes all inferiors under
161 control of gdbserver have the same architecture. */
162 static int new_inferior;
163
164 static void linux_resume_one_lwp (struct lwp_info *lwp,
165 int step, int signal, siginfo_t *info);
166 static void linux_resume (struct thread_resume *resume_info, size_t n);
167 static void stop_all_lwps (int suspend, struct lwp_info *except);
168 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
169 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
170 static void *add_lwp (ptid_t ptid);
171 static int linux_stopped_by_watchpoint (void);
172 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
173 static void proceed_all_lwps (void);
174 static int finish_step_over (struct lwp_info *lwp);
175 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
176 static int kill_lwp (unsigned long lwpid, int signo);
177 static void linux_enable_event_reporting (int pid);
178
179 /* True if the low target can hardware single-step. Such targets
180 don't need a BREAKPOINT_REINSERT_ADDR callback. */
181
182 static int
183 can_hardware_single_step (void)
184 {
185 return (the_low_target.breakpoint_reinsert_addr == NULL);
186 }
187
188 /* True if the low target supports memory breakpoints. If so, we'll
189 have a GET_PC implementation. */
190
191 static int
192 supports_breakpoints (void)
193 {
194 return (the_low_target.get_pc != NULL);
195 }
196
197 /* Returns true if this target can support fast tracepoints. This
198 does not mean that the in-process agent has been loaded in the
199 inferior. */
200
201 static int
202 supports_fast_tracepoints (void)
203 {
204 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
205 }
206
207 struct pending_signals
208 {
209 int signal;
210 siginfo_t info;
211 struct pending_signals *prev;
212 };
213
214 #define PTRACE_ARG3_TYPE void *
215 #define PTRACE_ARG4_TYPE void *
216 #define PTRACE_XFER_TYPE long
217
218 #ifdef HAVE_LINUX_REGSETS
219 static char *disabled_regsets;
220 static int num_regsets;
221 #endif
222
223 /* The read/write ends of the pipe registered as waitable file in the
224 event loop. */
225 static int linux_event_pipe[2] = { -1, -1 };
226
227 /* True if we're currently in async mode. */
228 #define target_is_async_p() (linux_event_pipe[0] != -1)
229
230 static void send_sigstop (struct lwp_info *lwp);
231 static void wait_for_sigstop (struct inferior_list_entry *entry);
232
233 /* Return non-zero if HEADER is a 64-bit ELF file. */
234
235 static int
236 elf_64_header_p (const Elf64_Ehdr *header)
237 {
238 return (header->e_ident[EI_MAG0] == ELFMAG0
239 && header->e_ident[EI_MAG1] == ELFMAG1
240 && header->e_ident[EI_MAG2] == ELFMAG2
241 && header->e_ident[EI_MAG3] == ELFMAG3
242 && header->e_ident[EI_CLASS] == ELFCLASS64);
243 }
244
245 /* Return non-zero if FILE is a 64-bit ELF file,
246 zero if the file is not a 64-bit ELF file,
247 and -1 if the file is not accessible or doesn't exist. */
248
249 static int
250 elf_64_file_p (const char *file)
251 {
252 Elf64_Ehdr header;
253 int fd;
254
255 fd = open (file, O_RDONLY);
256 if (fd < 0)
257 return -1;
258
259 if (read (fd, &header, sizeof (header)) != sizeof (header))
260 {
261 close (fd);
262 return 0;
263 }
264 close (fd);
265
266 return elf_64_header_p (&header);
267 }
268
269 /* Accepts an integer PID; Returns true if the executable PID is
270 running is a 64-bit ELF file.. */
271
272 int
273 linux_pid_exe_is_elf_64_file (int pid)
274 {
275 char file[MAXPATHLEN];
276
277 sprintf (file, "/proc/%d/exe", pid);
278 return elf_64_file_p (file);
279 }
280
281 static void
282 delete_lwp (struct lwp_info *lwp)
283 {
284 remove_thread (get_lwp_thread (lwp));
285 remove_inferior (&all_lwps, &lwp->head);
286 free (lwp->arch_private);
287 free (lwp);
288 }
289
290 /* Add a process to the common process list, and set its private
291 data. */
292
293 static struct process_info *
294 linux_add_process (int pid, int attached)
295 {
296 struct process_info *proc;
297
298 /* Is this the first process? If so, then set the arch. */
299 if (all_processes.head == NULL)
300 new_inferior = 1;
301
302 proc = add_process (pid, attached);
303 proc->private = xcalloc (1, sizeof (*proc->private));
304
305 if (the_low_target.new_process != NULL)
306 proc->private->arch_private = the_low_target.new_process ();
307
308 return proc;
309 }
310
311 /* Wrapper function for waitpid which handles EINTR, and emulates
312 __WALL for systems where that is not available. */
313
314 static int
315 my_waitpid (int pid, int *status, int flags)
316 {
317 int ret, out_errno;
318
319 if (debug_threads)
320 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
321
322 if (flags & __WALL)
323 {
324 sigset_t block_mask, org_mask, wake_mask;
325 int wnohang;
326
327 wnohang = (flags & WNOHANG) != 0;
328 flags &= ~(__WALL | __WCLONE);
329 flags |= WNOHANG;
330
331 /* Block all signals while here. This avoids knowing about
332 LinuxThread's signals. */
333 sigfillset (&block_mask);
334 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
335
336 /* ... except during the sigsuspend below. */
337 sigemptyset (&wake_mask);
338
339 while (1)
340 {
341 /* Since all signals are blocked, there's no need to check
342 for EINTR here. */
343 ret = waitpid (pid, status, flags);
344 out_errno = errno;
345
346 if (ret == -1 && out_errno != ECHILD)
347 break;
348 else if (ret > 0)
349 break;
350
351 if (flags & __WCLONE)
352 {
353 /* We've tried both flavors now. If WNOHANG is set,
354 there's nothing else to do, just bail out. */
355 if (wnohang)
356 break;
357
358 if (debug_threads)
359 fprintf (stderr, "blocking\n");
360
361 /* Block waiting for signals. */
362 sigsuspend (&wake_mask);
363 }
364
365 flags ^= __WCLONE;
366 }
367
368 sigprocmask (SIG_SETMASK, &org_mask, NULL);
369 }
370 else
371 {
372 do
373 ret = waitpid (pid, status, flags);
374 while (ret == -1 && errno == EINTR);
375 out_errno = errno;
376 }
377
378 if (debug_threads)
379 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
380 pid, flags, status ? *status : -1, ret);
381
382 errno = out_errno;
383 return ret;
384 }
385
386 /* Handle a GNU/Linux extended wait response. If we see a clone
387 event, we need to add the new LWP to our list (and not report the
388 trap to higher layers). */
389
390 static void
391 handle_extended_wait (struct lwp_info *event_child, int wstat)
392 {
393 int event = wstat >> 16;
394 struct lwp_info *new_lwp;
395
396 if (event == PTRACE_EVENT_CLONE)
397 {
398 ptid_t ptid;
399 unsigned long new_pid;
400 int ret, status;
401
402 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
403
404 /* If we haven't already seen the new PID stop, wait for it now. */
405 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
406 {
407 /* The new child has a pending SIGSTOP. We can't affect it until it
408 hits the SIGSTOP, but we're already attached. */
409
410 ret = my_waitpid (new_pid, &status, __WALL);
411
412 if (ret == -1)
413 perror_with_name ("waiting for new child");
414 else if (ret != new_pid)
415 warning ("wait returned unexpected PID %d", ret);
416 else if (!WIFSTOPPED (status))
417 warning ("wait returned unexpected status 0x%x", status);
418 }
419
420 linux_enable_event_reporting (new_pid);
421
422 ptid = ptid_build (pid_of (event_child), new_pid, 0);
423 new_lwp = (struct lwp_info *) add_lwp (ptid);
424 add_thread (ptid, new_lwp);
425
426 /* Either we're going to immediately resume the new thread
427 or leave it stopped. linux_resume_one_lwp is a nop if it
428 thinks the thread is currently running, so set this first
429 before calling linux_resume_one_lwp. */
430 new_lwp->stopped = 1;
431
432 /* Normally we will get the pending SIGSTOP. But in some cases
433 we might get another signal delivered to the group first.
434 If we do get another signal, be sure not to lose it. */
435 if (WSTOPSIG (status) == SIGSTOP)
436 {
437 if (stopping_threads)
438 new_lwp->stop_pc = get_stop_pc (new_lwp);
439 else
440 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
441 }
442 else
443 {
444 new_lwp->stop_expected = 1;
445
446 if (stopping_threads)
447 {
448 new_lwp->stop_pc = get_stop_pc (new_lwp);
449 new_lwp->status_pending_p = 1;
450 new_lwp->status_pending = status;
451 }
452 else
453 /* Pass the signal on. This is what GDB does - except
454 shouldn't we really report it instead? */
455 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
456 }
457
458 /* Always resume the current thread. If we are stopping
459 threads, it will have a pending SIGSTOP; we may as well
460 collect it now. */
461 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
462 }
463 }
464
465 /* Return the PC as read from the regcache of LWP, without any
466 adjustment. */
467
468 static CORE_ADDR
469 get_pc (struct lwp_info *lwp)
470 {
471 struct thread_info *saved_inferior;
472 struct regcache *regcache;
473 CORE_ADDR pc;
474
475 if (the_low_target.get_pc == NULL)
476 return 0;
477
478 saved_inferior = current_inferior;
479 current_inferior = get_lwp_thread (lwp);
480
481 regcache = get_thread_regcache (current_inferior, 1);
482 pc = (*the_low_target.get_pc) (regcache);
483
484 if (debug_threads)
485 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
486
487 current_inferior = saved_inferior;
488 return pc;
489 }
490
491 /* This function should only be called if LWP got a SIGTRAP.
492 The SIGTRAP could mean several things.
493
494 On i386, where decr_pc_after_break is non-zero:
495 If we were single-stepping this process using PTRACE_SINGLESTEP,
496 we will get only the one SIGTRAP (even if the instruction we
497 stepped over was a breakpoint). The value of $eip will be the
498 next instruction.
499 If we continue the process using PTRACE_CONT, we will get a
500 SIGTRAP when we hit a breakpoint. The value of $eip will be
501 the instruction after the breakpoint (i.e. needs to be
502 decremented). If we report the SIGTRAP to GDB, we must also
503 report the undecremented PC. If we cancel the SIGTRAP, we
504 must resume at the decremented PC.
505
506 (Presumably, not yet tested) On a non-decr_pc_after_break machine
507 with hardware or kernel single-step:
508 If we single-step over a breakpoint instruction, our PC will
509 point at the following instruction. If we continue and hit a
510 breakpoint instruction, our PC will point at the breakpoint
511 instruction. */
512
513 static CORE_ADDR
514 get_stop_pc (struct lwp_info *lwp)
515 {
516 CORE_ADDR stop_pc;
517
518 if (the_low_target.get_pc == NULL)
519 return 0;
520
521 stop_pc = get_pc (lwp);
522
523 if (WSTOPSIG (lwp->last_status) == SIGTRAP
524 && !lwp->stepping
525 && !lwp->stopped_by_watchpoint
526 && lwp->last_status >> 16 == 0)
527 stop_pc -= the_low_target.decr_pc_after_break;
528
529 if (debug_threads)
530 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
531
532 return stop_pc;
533 }
534
535 static void *
536 add_lwp (ptid_t ptid)
537 {
538 struct lwp_info *lwp;
539
540 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
541 memset (lwp, 0, sizeof (*lwp));
542
543 lwp->head.id = ptid;
544
545 if (the_low_target.new_thread != NULL)
546 lwp->arch_private = the_low_target.new_thread ();
547
548 add_inferior_to_list (&all_lwps, &lwp->head);
549
550 return lwp;
551 }
552
553 /* Start an inferior process and returns its pid.
554 ALLARGS is a vector of program-name and args. */
555
556 static int
557 linux_create_inferior (char *program, char **allargs)
558 {
559 #ifdef HAVE_PERSONALITY
560 int personality_orig = 0, personality_set = 0;
561 #endif
562 struct lwp_info *new_lwp;
563 int pid;
564 ptid_t ptid;
565
566 #ifdef HAVE_PERSONALITY
567 if (disable_randomization)
568 {
569 errno = 0;
570 personality_orig = personality (0xffffffff);
571 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
572 {
573 personality_set = 1;
574 personality (personality_orig | ADDR_NO_RANDOMIZE);
575 }
576 if (errno != 0 || (personality_set
577 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
578 warning ("Error disabling address space randomization: %s",
579 strerror (errno));
580 }
581 #endif
582
583 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
584 pid = vfork ();
585 #else
586 pid = fork ();
587 #endif
588 if (pid < 0)
589 perror_with_name ("fork");
590
591 if (pid == 0)
592 {
593 ptrace (PTRACE_TRACEME, 0, 0, 0);
594
595 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
596 signal (__SIGRTMIN + 1, SIG_DFL);
597 #endif
598
599 setpgid (0, 0);
600
601 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
602 stdout to stderr so that inferior i/o doesn't corrupt the connection.
603 Also, redirect stdin to /dev/null. */
604 if (remote_connection_is_stdio ())
605 {
606 close (0);
607 open ("/dev/null", O_RDONLY);
608 dup2 (2, 1);
609 if (write (2, "stdin/stdout redirected\n",
610 sizeof ("stdin/stdout redirected\n") - 1) < 0)
611 /* Errors ignored. */;
612 }
613
614 execv (program, allargs);
615 if (errno == ENOENT)
616 execvp (program, allargs);
617
618 fprintf (stderr, "Cannot exec %s: %s.\n", program,
619 strerror (errno));
620 fflush (stderr);
621 _exit (0177);
622 }
623
624 #ifdef HAVE_PERSONALITY
625 if (personality_set)
626 {
627 errno = 0;
628 personality (personality_orig);
629 if (errno != 0)
630 warning ("Error restoring address space randomization: %s",
631 strerror (errno));
632 }
633 #endif
634
635 linux_add_process (pid, 0);
636
637 ptid = ptid_build (pid, pid, 0);
638 new_lwp = add_lwp (ptid);
639 add_thread (ptid, new_lwp);
640 new_lwp->must_set_ptrace_flags = 1;
641
642 return pid;
643 }
644
645 /* Attach to an inferior process. */
646
647 static void
648 linux_attach_lwp_1 (unsigned long lwpid, int initial)
649 {
650 ptid_t ptid;
651 struct lwp_info *new_lwp;
652
653 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
654 {
655 if (!initial)
656 {
657 /* If we fail to attach to an LWP, just warn. */
658 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
659 strerror (errno), errno);
660 fflush (stderr);
661 return;
662 }
663 else
664 /* If we fail to attach to a process, report an error. */
665 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
666 strerror (errno), errno);
667 }
668
669 if (initial)
670 /* If lwp is the tgid, we handle adding existing threads later.
671 Otherwise we just add lwp without bothering about any other
672 threads. */
673 ptid = ptid_build (lwpid, lwpid, 0);
674 else
675 {
676 /* Note that extracting the pid from the current inferior is
677 safe, since we're always called in the context of the same
678 process as this new thread. */
679 int pid = pid_of (get_thread_lwp (current_inferior));
680 ptid = ptid_build (pid, lwpid, 0);
681 }
682
683 new_lwp = (struct lwp_info *) add_lwp (ptid);
684 add_thread (ptid, new_lwp);
685
686 /* We need to wait for SIGSTOP before being able to make the next
687 ptrace call on this LWP. */
688 new_lwp->must_set_ptrace_flags = 1;
689
690 if (linux_proc_pid_is_stopped (lwpid))
691 {
692 if (debug_threads)
693 fprintf (stderr,
694 "Attached to a stopped process\n");
695
696 /* The process is definitely stopped. It is in a job control
697 stop, unless the kernel predates the TASK_STOPPED /
698 TASK_TRACED distinction, in which case it might be in a
699 ptrace stop. Make sure it is in a ptrace stop; from there we
700 can kill it, signal it, et cetera.
701
702 First make sure there is a pending SIGSTOP. Since we are
703 already attached, the process can not transition from stopped
704 to running without a PTRACE_CONT; so we know this signal will
705 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
706 probably already in the queue (unless this kernel is old
707 enough to use TASK_STOPPED for ptrace stops); but since
708 SIGSTOP is not an RT signal, it can only be queued once. */
709 kill_lwp (lwpid, SIGSTOP);
710
711 /* Finally, resume the stopped process. This will deliver the
712 SIGSTOP (or a higher priority signal, just like normal
713 PTRACE_ATTACH), which we'll catch later on. */
714 ptrace (PTRACE_CONT, lwpid, 0, 0);
715 }
716
717 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
718 brings it to a halt.
719
720 There are several cases to consider here:
721
722 1) gdbserver has already attached to the process and is being notified
723 of a new thread that is being created.
724 In this case we should ignore that SIGSTOP and resume the
725 process. This is handled below by setting stop_expected = 1,
726 and the fact that add_thread sets last_resume_kind ==
727 resume_continue.
728
729 2) This is the first thread (the process thread), and we're attaching
730 to it via attach_inferior.
731 In this case we want the process thread to stop.
732 This is handled by having linux_attach set last_resume_kind ==
733 resume_stop after we return.
734
735 If the pid we are attaching to is also the tgid, we attach to and
736 stop all the existing threads. Otherwise, we attach to pid and
737 ignore any other threads in the same group as this pid.
738
739 3) GDB is connecting to gdbserver and is requesting an enumeration of all
740 existing threads.
741 In this case we want the thread to stop.
742 FIXME: This case is currently not properly handled.
743 We should wait for the SIGSTOP but don't. Things work apparently
744 because enough time passes between when we ptrace (ATTACH) and when
745 gdb makes the next ptrace call on the thread.
746
747 On the other hand, if we are currently trying to stop all threads, we
748 should treat the new thread as if we had sent it a SIGSTOP. This works
749 because we are guaranteed that the add_lwp call above added us to the
750 end of the list, and so the new thread has not yet reached
751 wait_for_sigstop (but will). */
752 new_lwp->stop_expected = 1;
753 }
754
755 void
756 linux_attach_lwp (unsigned long lwpid)
757 {
758 linux_attach_lwp_1 (lwpid, 0);
759 }
760
761 /* Attach to PID. If PID is the tgid, attach to it and all
762 of its threads. */
763
764 int
765 linux_attach (unsigned long pid)
766 {
767 /* Attach to PID. We will check for other threads
768 soon. */
769 linux_attach_lwp_1 (pid, 1);
770 linux_add_process (pid, 1);
771
772 if (!non_stop)
773 {
774 struct thread_info *thread;
775
776 /* Don't ignore the initial SIGSTOP if we just attached to this
777 process. It will be collected by wait shortly. */
778 thread = find_thread_ptid (ptid_build (pid, pid, 0));
779 thread->last_resume_kind = resume_stop;
780 }
781
782 if (linux_proc_get_tgid (pid) == pid)
783 {
784 DIR *dir;
785 char pathname[128];
786
787 sprintf (pathname, "/proc/%ld/task", pid);
788
789 dir = opendir (pathname);
790
791 if (!dir)
792 {
793 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
794 fflush (stderr);
795 }
796 else
797 {
798 /* At this point we attached to the tgid. Scan the task for
799 existing threads. */
800 unsigned long lwp;
801 int new_threads_found;
802 int iterations = 0;
803 struct dirent *dp;
804
805 while (iterations < 2)
806 {
807 new_threads_found = 0;
808 /* Add all the other threads. While we go through the
809 threads, new threads may be spawned. Cycle through
810 the list of threads until we have done two iterations without
811 finding new threads. */
812 while ((dp = readdir (dir)) != NULL)
813 {
814 /* Fetch one lwp. */
815 lwp = strtoul (dp->d_name, NULL, 10);
816
817 /* Is this a new thread? */
818 if (lwp
819 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
820 {
821 linux_attach_lwp_1 (lwp, 0);
822 new_threads_found++;
823
824 if (debug_threads)
825 fprintf (stderr, "\
826 Found and attached to new lwp %ld\n", lwp);
827 }
828 }
829
830 if (!new_threads_found)
831 iterations++;
832 else
833 iterations = 0;
834
835 rewinddir (dir);
836 }
837 closedir (dir);
838 }
839 }
840
841 return 0;
842 }
843
844 struct counter
845 {
846 int pid;
847 int count;
848 };
849
850 static int
851 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
852 {
853 struct counter *counter = args;
854
855 if (ptid_get_pid (entry->id) == counter->pid)
856 {
857 if (++counter->count > 1)
858 return 1;
859 }
860
861 return 0;
862 }
863
864 static int
865 last_thread_of_process_p (struct thread_info *thread)
866 {
867 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
868 int pid = ptid_get_pid (ptid);
869 struct counter counter = { pid , 0 };
870
871 return (find_inferior (&all_threads,
872 second_thread_of_pid_p, &counter) == NULL);
873 }
874
875 /* Kill LWP. */
876
877 static void
878 linux_kill_one_lwp (struct lwp_info *lwp)
879 {
880 int pid = lwpid_of (lwp);
881
882 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
883 there is no signal context, and ptrace(PTRACE_KILL) (or
884 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
885 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
886 alternative is to kill with SIGKILL. We only need one SIGKILL
887 per process, not one for each thread. But since we still support
888 linuxthreads, and we also support debugging programs using raw
889 clone without CLONE_THREAD, we send one for each thread. For
890 years, we used PTRACE_KILL only, so we're being a bit paranoid
891 about some old kernels where PTRACE_KILL might work better
892 (dubious if there are any such, but that's why it's paranoia), so
893 we try SIGKILL first, PTRACE_KILL second, and so we're fine
894 everywhere. */
895
896 errno = 0;
897 kill (pid, SIGKILL);
898 if (debug_threads)
899 fprintf (stderr,
900 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
901 target_pid_to_str (ptid_of (lwp)),
902 errno ? strerror (errno) : "OK");
903
904 errno = 0;
905 ptrace (PTRACE_KILL, pid, 0, 0);
906 if (debug_threads)
907 fprintf (stderr,
908 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
909 target_pid_to_str (ptid_of (lwp)),
910 errno ? strerror (errno) : "OK");
911 }
912
913 /* Callback for `find_inferior'. Kills an lwp of a given process,
914 except the leader. */
915
916 static int
917 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
918 {
919 struct thread_info *thread = (struct thread_info *) entry;
920 struct lwp_info *lwp = get_thread_lwp (thread);
921 int wstat;
922 int pid = * (int *) args;
923
924 if (ptid_get_pid (entry->id) != pid)
925 return 0;
926
927 /* We avoid killing the first thread here, because of a Linux kernel (at
928 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
929 the children get a chance to be reaped, it will remain a zombie
930 forever. */
931
932 if (lwpid_of (lwp) == pid)
933 {
934 if (debug_threads)
935 fprintf (stderr, "lkop: is last of process %s\n",
936 target_pid_to_str (entry->id));
937 return 0;
938 }
939
940 do
941 {
942 linux_kill_one_lwp (lwp);
943
944 /* Make sure it died. The loop is most likely unnecessary. */
945 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
946 } while (pid > 0 && WIFSTOPPED (wstat));
947
948 return 0;
949 }
950
951 static int
952 linux_kill (int pid)
953 {
954 struct process_info *process;
955 struct lwp_info *lwp;
956 int wstat;
957 int lwpid;
958
959 process = find_process_pid (pid);
960 if (process == NULL)
961 return -1;
962
963 /* If we're killing a running inferior, make sure it is stopped
964 first, as PTRACE_KILL will not work otherwise. */
965 stop_all_lwps (0, NULL);
966
967 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
968
969 /* See the comment in linux_kill_one_lwp. We did not kill the first
970 thread in the list, so do so now. */
971 lwp = find_lwp_pid (pid_to_ptid (pid));
972
973 if (lwp == NULL)
974 {
975 if (debug_threads)
976 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
977 lwpid_of (lwp), pid);
978 }
979 else
980 {
981 if (debug_threads)
982 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
983 lwpid_of (lwp), pid);
984
985 do
986 {
987 linux_kill_one_lwp (lwp);
988
989 /* Make sure it died. The loop is most likely unnecessary. */
990 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
991 } while (lwpid > 0 && WIFSTOPPED (wstat));
992 }
993
994 the_target->mourn (process);
995
996 /* Since we presently can only stop all lwps of all processes, we
997 need to unstop lwps of other processes. */
998 unstop_all_lwps (0, NULL);
999 return 0;
1000 }
1001
1002 static int
1003 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1004 {
1005 struct thread_info *thread = (struct thread_info *) entry;
1006 struct lwp_info *lwp = get_thread_lwp (thread);
1007 int pid = * (int *) args;
1008
1009 if (ptid_get_pid (entry->id) != pid)
1010 return 0;
1011
1012 /* If this process is stopped but is expecting a SIGSTOP, then make
1013 sure we take care of that now. This isn't absolutely guaranteed
1014 to collect the SIGSTOP, but is fairly likely to. */
1015 if (lwp->stop_expected)
1016 {
1017 int wstat;
1018 /* Clear stop_expected, so that the SIGSTOP will be reported. */
1019 lwp->stop_expected = 0;
1020 linux_resume_one_lwp (lwp, 0, 0, NULL);
1021 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1022 }
1023
1024 /* Flush any pending changes to the process's registers. */
1025 regcache_invalidate_one ((struct inferior_list_entry *)
1026 get_lwp_thread (lwp));
1027
1028 /* Finally, let it resume. */
1029 if (the_low_target.prepare_to_resume != NULL)
1030 the_low_target.prepare_to_resume (lwp);
1031 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
1032
1033 delete_lwp (lwp);
1034 return 0;
1035 }
1036
1037 static int
1038 linux_detach (int pid)
1039 {
1040 struct process_info *process;
1041
1042 process = find_process_pid (pid);
1043 if (process == NULL)
1044 return -1;
1045
1046 /* Stop all threads before detaching. First, ptrace requires that
1047 the thread is stopped to sucessfully detach. Second, thread_db
1048 may need to uninstall thread event breakpoints from memory, which
1049 only works with a stopped process anyway. */
1050 stop_all_lwps (0, NULL);
1051
1052 #ifdef USE_THREAD_DB
1053 thread_db_detach (process);
1054 #endif
1055
1056 /* Stabilize threads (move out of jump pads). */
1057 stabilize_threads ();
1058
1059 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1060
1061 the_target->mourn (process);
1062
1063 /* Since we presently can only stop all lwps of all processes, we
1064 need to unstop lwps of other processes. */
1065 unstop_all_lwps (0, NULL);
1066 return 0;
1067 }
1068
1069 /* Remove all LWPs that belong to process PROC from the lwp list. */
1070
1071 static int
1072 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1073 {
1074 struct lwp_info *lwp = (struct lwp_info *) entry;
1075 struct process_info *process = proc;
1076
1077 if (pid_of (lwp) == pid_of (process))
1078 delete_lwp (lwp);
1079
1080 return 0;
1081 }
1082
1083 static void
1084 linux_mourn (struct process_info *process)
1085 {
1086 struct process_info_private *priv;
1087
1088 #ifdef USE_THREAD_DB
1089 thread_db_mourn (process);
1090 #endif
1091
1092 find_inferior (&all_lwps, delete_lwp_callback, process);
1093
1094 /* Freeing all private data. */
1095 priv = process->private;
1096 free (priv->arch_private);
1097 free (priv);
1098 process->private = NULL;
1099
1100 remove_process (process);
1101 }
1102
1103 static void
1104 linux_join (int pid)
1105 {
1106 int status, ret;
1107
1108 do {
1109 ret = my_waitpid (pid, &status, 0);
1110 if (WIFEXITED (status) || WIFSIGNALED (status))
1111 break;
1112 } while (ret != -1 || errno != ECHILD);
1113 }
1114
1115 /* Return nonzero if the given thread is still alive. */
1116 static int
1117 linux_thread_alive (ptid_t ptid)
1118 {
1119 struct lwp_info *lwp = find_lwp_pid (ptid);
1120
1121 /* We assume we always know if a thread exits. If a whole process
1122 exited but we still haven't been able to report it to GDB, we'll
1123 hold on to the last lwp of the dead process. */
1124 if (lwp != NULL)
1125 return !lwp->dead;
1126 else
1127 return 0;
1128 }
1129
1130 /* Return 1 if this lwp has an interesting status pending. */
1131 static int
1132 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1133 {
1134 struct lwp_info *lwp = (struct lwp_info *) entry;
1135 ptid_t ptid = * (ptid_t *) arg;
1136 struct thread_info *thread;
1137
1138 /* Check if we're only interested in events from a specific process
1139 or its lwps. */
1140 if (!ptid_equal (minus_one_ptid, ptid)
1141 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1142 return 0;
1143
1144 thread = get_lwp_thread (lwp);
1145
1146 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1147 report any status pending the LWP may have. */
1148 if (thread->last_resume_kind == resume_stop
1149 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1150 return 0;
1151
1152 return lwp->status_pending_p;
1153 }
1154
1155 static int
1156 same_lwp (struct inferior_list_entry *entry, void *data)
1157 {
1158 ptid_t ptid = *(ptid_t *) data;
1159 int lwp;
1160
1161 if (ptid_get_lwp (ptid) != 0)
1162 lwp = ptid_get_lwp (ptid);
1163 else
1164 lwp = ptid_get_pid (ptid);
1165
1166 if (ptid_get_lwp (entry->id) == lwp)
1167 return 1;
1168
1169 return 0;
1170 }
1171
1172 struct lwp_info *
1173 find_lwp_pid (ptid_t ptid)
1174 {
1175 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1176 }
1177
1178 static struct lwp_info *
1179 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1180 {
1181 int ret;
1182 int to_wait_for = -1;
1183 struct lwp_info *child = NULL;
1184
1185 if (debug_threads)
1186 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1187
1188 if (ptid_equal (ptid, minus_one_ptid))
1189 to_wait_for = -1; /* any child */
1190 else
1191 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1192
1193 options |= __WALL;
1194
1195 retry:
1196
1197 ret = my_waitpid (to_wait_for, wstatp, options);
1198 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1199 return NULL;
1200 else if (ret == -1)
1201 perror_with_name ("waitpid");
1202
1203 if (debug_threads
1204 && (!WIFSTOPPED (*wstatp)
1205 || (WSTOPSIG (*wstatp) != 32
1206 && WSTOPSIG (*wstatp) != 33)))
1207 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1208
1209 child = find_lwp_pid (pid_to_ptid (ret));
1210
1211 /* If we didn't find a process, one of two things presumably happened:
1212 - A process we started and then detached from has exited. Ignore it.
1213 - A process we are controlling has forked and the new child's stop
1214 was reported to us by the kernel. Save its PID. */
1215 if (child == NULL && WIFSTOPPED (*wstatp))
1216 {
1217 add_to_pid_list (&stopped_pids, ret, *wstatp);
1218 goto retry;
1219 }
1220 else if (child == NULL)
1221 goto retry;
1222
1223 child->stopped = 1;
1224
1225 child->last_status = *wstatp;
1226
1227 /* Architecture-specific setup after inferior is running.
1228 This needs to happen after we have attached to the inferior
1229 and it is stopped for the first time, but before we access
1230 any inferior registers. */
1231 if (new_inferior)
1232 {
1233 the_low_target.arch_setup ();
1234 #ifdef HAVE_LINUX_REGSETS
1235 memset (disabled_regsets, 0, num_regsets);
1236 #endif
1237 new_inferior = 0;
1238 }
1239
1240 /* Fetch the possibly triggered data watchpoint info and store it in
1241 CHILD.
1242
1243 On some archs, like x86, that use debug registers to set
1244 watchpoints, it's possible that the way to know which watched
1245 address trapped, is to check the register that is used to select
1246 which address to watch. Problem is, between setting the
1247 watchpoint and reading back which data address trapped, the user
1248 may change the set of watchpoints, and, as a consequence, GDB
1249 changes the debug registers in the inferior. To avoid reading
1250 back a stale stopped-data-address when that happens, we cache in
1251 LP the fact that a watchpoint trapped, and the corresponding data
1252 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1253 changes the debug registers meanwhile, we have the cached data we
1254 can rely on. */
1255
1256 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1257 {
1258 if (the_low_target.stopped_by_watchpoint == NULL)
1259 {
1260 child->stopped_by_watchpoint = 0;
1261 }
1262 else
1263 {
1264 struct thread_info *saved_inferior;
1265
1266 saved_inferior = current_inferior;
1267 current_inferior = get_lwp_thread (child);
1268
1269 child->stopped_by_watchpoint
1270 = the_low_target.stopped_by_watchpoint ();
1271
1272 if (child->stopped_by_watchpoint)
1273 {
1274 if (the_low_target.stopped_data_address != NULL)
1275 child->stopped_data_address
1276 = the_low_target.stopped_data_address ();
1277 else
1278 child->stopped_data_address = 0;
1279 }
1280
1281 current_inferior = saved_inferior;
1282 }
1283 }
1284
1285 /* Store the STOP_PC, with adjustment applied. This depends on the
1286 architecture being defined already (so that CHILD has a valid
1287 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1288 not). */
1289 if (WIFSTOPPED (*wstatp))
1290 child->stop_pc = get_stop_pc (child);
1291
1292 if (debug_threads
1293 && WIFSTOPPED (*wstatp)
1294 && the_low_target.get_pc != NULL)
1295 {
1296 struct thread_info *saved_inferior = current_inferior;
1297 struct regcache *regcache;
1298 CORE_ADDR pc;
1299
1300 current_inferior = get_lwp_thread (child);
1301 regcache = get_thread_regcache (current_inferior, 1);
1302 pc = (*the_low_target.get_pc) (regcache);
1303 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1304 current_inferior = saved_inferior;
1305 }
1306
1307 return child;
1308 }
1309
1310 /* This function should only be called if the LWP got a SIGTRAP.
1311
1312 Handle any tracepoint steps or hits. Return true if a tracepoint
1313 event was handled, 0 otherwise. */
1314
1315 static int
1316 handle_tracepoints (struct lwp_info *lwp)
1317 {
1318 struct thread_info *tinfo = get_lwp_thread (lwp);
1319 int tpoint_related_event = 0;
1320
1321 /* If this tracepoint hit causes a tracing stop, we'll immediately
1322 uninsert tracepoints. To do this, we temporarily pause all
1323 threads, unpatch away, and then unpause threads. We need to make
1324 sure the unpausing doesn't resume LWP too. */
1325 lwp->suspended++;
1326
1327 /* And we need to be sure that any all-threads-stopping doesn't try
1328 to move threads out of the jump pads, as it could deadlock the
1329 inferior (LWP could be in the jump pad, maybe even holding the
1330 lock.) */
1331
1332 /* Do any necessary step collect actions. */
1333 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1334
1335 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1336
1337 /* See if we just hit a tracepoint and do its main collect
1338 actions. */
1339 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1340
1341 lwp->suspended--;
1342
1343 gdb_assert (lwp->suspended == 0);
1344 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1345
1346 if (tpoint_related_event)
1347 {
1348 if (debug_threads)
1349 fprintf (stderr, "got a tracepoint event\n");
1350 return 1;
1351 }
1352
1353 return 0;
1354 }
1355
1356 /* Convenience wrapper. Returns true if LWP is presently collecting a
1357 fast tracepoint. */
1358
1359 static int
1360 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1361 struct fast_tpoint_collect_status *status)
1362 {
1363 CORE_ADDR thread_area;
1364
1365 if (the_low_target.get_thread_area == NULL)
1366 return 0;
1367
1368 /* Get the thread area address. This is used to recognize which
1369 thread is which when tracing with the in-process agent library.
1370 We don't read anything from the address, and treat it as opaque;
1371 it's the address itself that we assume is unique per-thread. */
1372 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1373 return 0;
1374
1375 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1376 }
1377
1378 /* The reason we resume in the caller, is because we want to be able
1379 to pass lwp->status_pending as WSTAT, and we need to clear
1380 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1381 refuses to resume. */
1382
1383 static int
1384 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1385 {
1386 struct thread_info *saved_inferior;
1387
1388 saved_inferior = current_inferior;
1389 current_inferior = get_lwp_thread (lwp);
1390
1391 if ((wstat == NULL
1392 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1393 && supports_fast_tracepoints ()
1394 && agent_loaded_p ())
1395 {
1396 struct fast_tpoint_collect_status status;
1397 int r;
1398
1399 if (debug_threads)
1400 fprintf (stderr, "\
1401 Checking whether LWP %ld needs to move out of the jump pad.\n",
1402 lwpid_of (lwp));
1403
1404 r = linux_fast_tracepoint_collecting (lwp, &status);
1405
1406 if (wstat == NULL
1407 || (WSTOPSIG (*wstat) != SIGILL
1408 && WSTOPSIG (*wstat) != SIGFPE
1409 && WSTOPSIG (*wstat) != SIGSEGV
1410 && WSTOPSIG (*wstat) != SIGBUS))
1411 {
1412 lwp->collecting_fast_tracepoint = r;
1413
1414 if (r != 0)
1415 {
1416 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1417 {
1418 /* Haven't executed the original instruction yet.
1419 Set breakpoint there, and wait till it's hit,
1420 then single-step until exiting the jump pad. */
1421 lwp->exit_jump_pad_bkpt
1422 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1423 }
1424
1425 if (debug_threads)
1426 fprintf (stderr, "\
1427 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1428 lwpid_of (lwp));
1429 current_inferior = saved_inferior;
1430
1431 return 1;
1432 }
1433 }
1434 else
1435 {
1436 /* If we get a synchronous signal while collecting, *and*
1437 while executing the (relocated) original instruction,
1438 reset the PC to point at the tpoint address, before
1439 reporting to GDB. Otherwise, it's an IPA lib bug: just
1440 report the signal to GDB, and pray for the best. */
1441
1442 lwp->collecting_fast_tracepoint = 0;
1443
1444 if (r != 0
1445 && (status.adjusted_insn_addr <= lwp->stop_pc
1446 && lwp->stop_pc < status.adjusted_insn_addr_end))
1447 {
1448 siginfo_t info;
1449 struct regcache *regcache;
1450
1451 /* The si_addr on a few signals references the address
1452 of the faulting instruction. Adjust that as
1453 well. */
1454 if ((WSTOPSIG (*wstat) == SIGILL
1455 || WSTOPSIG (*wstat) == SIGFPE
1456 || WSTOPSIG (*wstat) == SIGBUS
1457 || WSTOPSIG (*wstat) == SIGSEGV)
1458 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1459 /* Final check just to make sure we don't clobber
1460 the siginfo of non-kernel-sent signals. */
1461 && (uintptr_t) info.si_addr == lwp->stop_pc)
1462 {
1463 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1464 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1465 }
1466
1467 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1468 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1469 lwp->stop_pc = status.tpoint_addr;
1470
1471 /* Cancel any fast tracepoint lock this thread was
1472 holding. */
1473 force_unlock_trace_buffer ();
1474 }
1475
1476 if (lwp->exit_jump_pad_bkpt != NULL)
1477 {
1478 if (debug_threads)
1479 fprintf (stderr,
1480 "Cancelling fast exit-jump-pad: removing bkpt. "
1481 "stopping all threads momentarily.\n");
1482
1483 stop_all_lwps (1, lwp);
1484 cancel_breakpoints ();
1485
1486 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1487 lwp->exit_jump_pad_bkpt = NULL;
1488
1489 unstop_all_lwps (1, lwp);
1490
1491 gdb_assert (lwp->suspended >= 0);
1492 }
1493 }
1494 }
1495
1496 if (debug_threads)
1497 fprintf (stderr, "\
1498 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1499 lwpid_of (lwp));
1500
1501 current_inferior = saved_inferior;
1502 return 0;
1503 }
1504
1505 /* Enqueue one signal in the "signals to report later when out of the
1506 jump pad" list. */
1507
1508 static void
1509 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1510 {
1511 struct pending_signals *p_sig;
1512
1513 if (debug_threads)
1514 fprintf (stderr, "\
1515 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1516
1517 if (debug_threads)
1518 {
1519 struct pending_signals *sig;
1520
1521 for (sig = lwp->pending_signals_to_report;
1522 sig != NULL;
1523 sig = sig->prev)
1524 fprintf (stderr,
1525 " Already queued %d\n",
1526 sig->signal);
1527
1528 fprintf (stderr, " (no more currently queued signals)\n");
1529 }
1530
1531 /* Don't enqueue non-RT signals if they are already in the deferred
1532 queue. (SIGSTOP being the easiest signal to see ending up here
1533 twice) */
1534 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1535 {
1536 struct pending_signals *sig;
1537
1538 for (sig = lwp->pending_signals_to_report;
1539 sig != NULL;
1540 sig = sig->prev)
1541 {
1542 if (sig->signal == WSTOPSIG (*wstat))
1543 {
1544 if (debug_threads)
1545 fprintf (stderr,
1546 "Not requeuing already queued non-RT signal %d"
1547 " for LWP %ld\n",
1548 sig->signal,
1549 lwpid_of (lwp));
1550 return;
1551 }
1552 }
1553 }
1554
1555 p_sig = xmalloc (sizeof (*p_sig));
1556 p_sig->prev = lwp->pending_signals_to_report;
1557 p_sig->signal = WSTOPSIG (*wstat);
1558 memset (&p_sig->info, 0, sizeof (siginfo_t));
1559 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1560
1561 lwp->pending_signals_to_report = p_sig;
1562 }
1563
1564 /* Dequeue one signal from the "signals to report later when out of
1565 the jump pad" list. */
1566
1567 static int
1568 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1569 {
1570 if (lwp->pending_signals_to_report != NULL)
1571 {
1572 struct pending_signals **p_sig;
1573
1574 p_sig = &lwp->pending_signals_to_report;
1575 while ((*p_sig)->prev != NULL)
1576 p_sig = &(*p_sig)->prev;
1577
1578 *wstat = W_STOPCODE ((*p_sig)->signal);
1579 if ((*p_sig)->info.si_signo != 0)
1580 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1581 free (*p_sig);
1582 *p_sig = NULL;
1583
1584 if (debug_threads)
1585 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1586 WSTOPSIG (*wstat), lwpid_of (lwp));
1587
1588 if (debug_threads)
1589 {
1590 struct pending_signals *sig;
1591
1592 for (sig = lwp->pending_signals_to_report;
1593 sig != NULL;
1594 sig = sig->prev)
1595 fprintf (stderr,
1596 " Still queued %d\n",
1597 sig->signal);
1598
1599 fprintf (stderr, " (no more queued signals)\n");
1600 }
1601
1602 return 1;
1603 }
1604
1605 return 0;
1606 }
1607
1608 /* Arrange for a breakpoint to be hit again later. We don't keep the
1609 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1610 will handle the current event, eventually we will resume this LWP,
1611 and this breakpoint will trap again. */
1612
1613 static int
1614 cancel_breakpoint (struct lwp_info *lwp)
1615 {
1616 struct thread_info *saved_inferior;
1617
1618 /* There's nothing to do if we don't support breakpoints. */
1619 if (!supports_breakpoints ())
1620 return 0;
1621
1622 /* breakpoint_at reads from current inferior. */
1623 saved_inferior = current_inferior;
1624 current_inferior = get_lwp_thread (lwp);
1625
1626 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1627 {
1628 if (debug_threads)
1629 fprintf (stderr,
1630 "CB: Push back breakpoint for %s\n",
1631 target_pid_to_str (ptid_of (lwp)));
1632
1633 /* Back up the PC if necessary. */
1634 if (the_low_target.decr_pc_after_break)
1635 {
1636 struct regcache *regcache
1637 = get_thread_regcache (current_inferior, 1);
1638 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1639 }
1640
1641 current_inferior = saved_inferior;
1642 return 1;
1643 }
1644 else
1645 {
1646 if (debug_threads)
1647 fprintf (stderr,
1648 "CB: No breakpoint found at %s for [%s]\n",
1649 paddress (lwp->stop_pc),
1650 target_pid_to_str (ptid_of (lwp)));
1651 }
1652
1653 current_inferior = saved_inferior;
1654 return 0;
1655 }
1656
1657 /* When the event-loop is doing a step-over, this points at the thread
1658 being stepped. */
1659 ptid_t step_over_bkpt;
1660
1661 /* Wait for an event from child PID. If PID is -1, wait for any
1662 child. Store the stop status through the status pointer WSTAT.
1663 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1664 event was found and OPTIONS contains WNOHANG. Return the PID of
1665 the stopped child otherwise. */
1666
1667 static int
1668 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1669 {
1670 struct lwp_info *event_child, *requested_child;
1671 ptid_t wait_ptid;
1672
1673 event_child = NULL;
1674 requested_child = NULL;
1675
1676 /* Check for a lwp with a pending status. */
1677
1678 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1679 {
1680 event_child = (struct lwp_info *)
1681 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1682 if (debug_threads && event_child)
1683 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1684 }
1685 else
1686 {
1687 requested_child = find_lwp_pid (ptid);
1688
1689 if (!stopping_threads
1690 && requested_child->status_pending_p
1691 && requested_child->collecting_fast_tracepoint)
1692 {
1693 enqueue_one_deferred_signal (requested_child,
1694 &requested_child->status_pending);
1695 requested_child->status_pending_p = 0;
1696 requested_child->status_pending = 0;
1697 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1698 }
1699
1700 if (requested_child->suspended
1701 && requested_child->status_pending_p)
1702 fatal ("requesting an event out of a suspended child?");
1703
1704 if (requested_child->status_pending_p)
1705 event_child = requested_child;
1706 }
1707
1708 if (event_child != NULL)
1709 {
1710 if (debug_threads)
1711 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1712 lwpid_of (event_child), event_child->status_pending);
1713 *wstat = event_child->status_pending;
1714 event_child->status_pending_p = 0;
1715 event_child->status_pending = 0;
1716 current_inferior = get_lwp_thread (event_child);
1717 return lwpid_of (event_child);
1718 }
1719
1720 if (ptid_is_pid (ptid))
1721 {
1722 /* A request to wait for a specific tgid. This is not possible
1723 with waitpid, so instead, we wait for any child, and leave
1724 children we're not interested in right now with a pending
1725 status to report later. */
1726 wait_ptid = minus_one_ptid;
1727 }
1728 else
1729 wait_ptid = ptid;
1730
1731 /* We only enter this loop if no process has a pending wait status. Thus
1732 any action taken in response to a wait status inside this loop is
1733 responding as soon as we detect the status, not after any pending
1734 events. */
1735 while (1)
1736 {
1737 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1738
1739 if ((options & WNOHANG) && event_child == NULL)
1740 {
1741 if (debug_threads)
1742 fprintf (stderr, "WNOHANG set, no event found\n");
1743 return 0;
1744 }
1745
1746 if (event_child == NULL)
1747 error ("event from unknown child");
1748
1749 if (ptid_is_pid (ptid)
1750 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1751 {
1752 if (! WIFSTOPPED (*wstat))
1753 mark_lwp_dead (event_child, *wstat);
1754 else
1755 {
1756 event_child->status_pending_p = 1;
1757 event_child->status_pending = *wstat;
1758 }
1759 continue;
1760 }
1761
1762 current_inferior = get_lwp_thread (event_child);
1763
1764 /* Check for thread exit. */
1765 if (! WIFSTOPPED (*wstat))
1766 {
1767 if (debug_threads)
1768 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1769
1770 /* If the last thread is exiting, just return. */
1771 if (last_thread_of_process_p (current_inferior))
1772 {
1773 if (debug_threads)
1774 fprintf (stderr, "LWP %ld is last lwp of process\n",
1775 lwpid_of (event_child));
1776 return lwpid_of (event_child);
1777 }
1778
1779 if (!non_stop)
1780 {
1781 current_inferior = (struct thread_info *) all_threads.head;
1782 if (debug_threads)
1783 fprintf (stderr, "Current inferior is now %ld\n",
1784 lwpid_of (get_thread_lwp (current_inferior)));
1785 }
1786 else
1787 {
1788 current_inferior = NULL;
1789 if (debug_threads)
1790 fprintf (stderr, "Current inferior is now <NULL>\n");
1791 }
1792
1793 /* If we were waiting for this particular child to do something...
1794 well, it did something. */
1795 if (requested_child != NULL)
1796 {
1797 int lwpid = lwpid_of (event_child);
1798
1799 /* Cancel the step-over operation --- the thread that
1800 started it is gone. */
1801 if (finish_step_over (event_child))
1802 unstop_all_lwps (1, event_child);
1803 delete_lwp (event_child);
1804 return lwpid;
1805 }
1806
1807 delete_lwp (event_child);
1808
1809 /* Wait for a more interesting event. */
1810 continue;
1811 }
1812
1813 if (event_child->must_set_ptrace_flags)
1814 {
1815 linux_enable_event_reporting (lwpid_of (event_child));
1816 event_child->must_set_ptrace_flags = 0;
1817 }
1818
1819 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1820 && *wstat >> 16 != 0)
1821 {
1822 handle_extended_wait (event_child, *wstat);
1823 continue;
1824 }
1825
1826 if (WIFSTOPPED (*wstat)
1827 && WSTOPSIG (*wstat) == SIGSTOP
1828 && event_child->stop_expected)
1829 {
1830 int should_stop;
1831
1832 if (debug_threads)
1833 fprintf (stderr, "Expected stop.\n");
1834 event_child->stop_expected = 0;
1835
1836 should_stop = (current_inferior->last_resume_kind == resume_stop
1837 || stopping_threads);
1838
1839 if (!should_stop)
1840 {
1841 linux_resume_one_lwp (event_child,
1842 event_child->stepping, 0, NULL);
1843 continue;
1844 }
1845 }
1846
1847 return lwpid_of (event_child);
1848 }
1849
1850 /* NOTREACHED */
1851 return 0;
1852 }
1853
1854 /* Count the LWP's that have had events. */
1855
1856 static int
1857 count_events_callback (struct inferior_list_entry *entry, void *data)
1858 {
1859 struct lwp_info *lp = (struct lwp_info *) entry;
1860 struct thread_info *thread = get_lwp_thread (lp);
1861 int *count = data;
1862
1863 gdb_assert (count != NULL);
1864
1865 /* Count only resumed LWPs that have a SIGTRAP event pending that
1866 should be reported to GDB. */
1867 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1868 && thread->last_resume_kind != resume_stop
1869 && lp->status_pending_p
1870 && WIFSTOPPED (lp->status_pending)
1871 && WSTOPSIG (lp->status_pending) == SIGTRAP
1872 && !breakpoint_inserted_here (lp->stop_pc))
1873 (*count)++;
1874
1875 return 0;
1876 }
1877
1878 /* Select the LWP (if any) that is currently being single-stepped. */
1879
1880 static int
1881 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1882 {
1883 struct lwp_info *lp = (struct lwp_info *) entry;
1884 struct thread_info *thread = get_lwp_thread (lp);
1885
1886 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1887 && thread->last_resume_kind == resume_step
1888 && lp->status_pending_p)
1889 return 1;
1890 else
1891 return 0;
1892 }
1893
1894 /* Select the Nth LWP that has had a SIGTRAP event that should be
1895 reported to GDB. */
1896
1897 static int
1898 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1899 {
1900 struct lwp_info *lp = (struct lwp_info *) entry;
1901 struct thread_info *thread = get_lwp_thread (lp);
1902 int *selector = data;
1903
1904 gdb_assert (selector != NULL);
1905
1906 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1907 if (thread->last_resume_kind != resume_stop
1908 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1909 && lp->status_pending_p
1910 && WIFSTOPPED (lp->status_pending)
1911 && WSTOPSIG (lp->status_pending) == SIGTRAP
1912 && !breakpoint_inserted_here (lp->stop_pc))
1913 if ((*selector)-- == 0)
1914 return 1;
1915
1916 return 0;
1917 }
1918
1919 static int
1920 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1921 {
1922 struct lwp_info *lp = (struct lwp_info *) entry;
1923 struct thread_info *thread = get_lwp_thread (lp);
1924 struct lwp_info *event_lp = data;
1925
1926 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1927 if (lp == event_lp)
1928 return 0;
1929
1930 /* If a LWP other than the LWP that we're reporting an event for has
1931 hit a GDB breakpoint (as opposed to some random trap signal),
1932 then just arrange for it to hit it again later. We don't keep
1933 the SIGTRAP status and don't forward the SIGTRAP signal to the
1934 LWP. We will handle the current event, eventually we will resume
1935 all LWPs, and this one will get its breakpoint trap again.
1936
1937 If we do not do this, then we run the risk that the user will
1938 delete or disable the breakpoint, but the LWP will have already
1939 tripped on it. */
1940
1941 if (thread->last_resume_kind != resume_stop
1942 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1943 && lp->status_pending_p
1944 && WIFSTOPPED (lp->status_pending)
1945 && WSTOPSIG (lp->status_pending) == SIGTRAP
1946 && !lp->stepping
1947 && !lp->stopped_by_watchpoint
1948 && cancel_breakpoint (lp))
1949 /* Throw away the SIGTRAP. */
1950 lp->status_pending_p = 0;
1951
1952 return 0;
1953 }
1954
1955 static void
1956 linux_cancel_breakpoints (void)
1957 {
1958 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1959 }
1960
1961 /* Select one LWP out of those that have events pending. */
1962
1963 static void
1964 select_event_lwp (struct lwp_info **orig_lp)
1965 {
1966 int num_events = 0;
1967 int random_selector;
1968 struct lwp_info *event_lp;
1969
1970 /* Give preference to any LWP that is being single-stepped. */
1971 event_lp
1972 = (struct lwp_info *) find_inferior (&all_lwps,
1973 select_singlestep_lwp_callback, NULL);
1974 if (event_lp != NULL)
1975 {
1976 if (debug_threads)
1977 fprintf (stderr,
1978 "SEL: Select single-step %s\n",
1979 target_pid_to_str (ptid_of (event_lp)));
1980 }
1981 else
1982 {
1983 /* No single-stepping LWP. Select one at random, out of those
1984 which have had SIGTRAP events. */
1985
1986 /* First see how many SIGTRAP events we have. */
1987 find_inferior (&all_lwps, count_events_callback, &num_events);
1988
1989 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1990 random_selector = (int)
1991 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1992
1993 if (debug_threads && num_events > 1)
1994 fprintf (stderr,
1995 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1996 num_events, random_selector);
1997
1998 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1999 select_event_lwp_callback,
2000 &random_selector);
2001 }
2002
2003 if (event_lp != NULL)
2004 {
2005 /* Switch the event LWP. */
2006 *orig_lp = event_lp;
2007 }
2008 }
2009
2010 /* Decrement the suspend count of an LWP. */
2011
2012 static int
2013 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2014 {
2015 struct lwp_info *lwp = (struct lwp_info *) entry;
2016
2017 /* Ignore EXCEPT. */
2018 if (lwp == except)
2019 return 0;
2020
2021 lwp->suspended--;
2022
2023 gdb_assert (lwp->suspended >= 0);
2024 return 0;
2025 }
2026
2027 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2028 NULL. */
2029
2030 static void
2031 unsuspend_all_lwps (struct lwp_info *except)
2032 {
2033 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2034 }
2035
2036 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2037 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2038 void *data);
2039 static int lwp_running (struct inferior_list_entry *entry, void *data);
2040 static ptid_t linux_wait_1 (ptid_t ptid,
2041 struct target_waitstatus *ourstatus,
2042 int target_options);
2043
2044 /* Stabilize threads (move out of jump pads).
2045
2046 If a thread is midway collecting a fast tracepoint, we need to
2047 finish the collection and move it out of the jump pad before
2048 reporting the signal.
2049
2050 This avoids recursion while collecting (when a signal arrives
2051 midway, and the signal handler itself collects), which would trash
2052 the trace buffer. In case the user set a breakpoint in a signal
2053 handler, this avoids the backtrace showing the jump pad, etc..
2054 Most importantly, there are certain things we can't do safely if
2055 threads are stopped in a jump pad (or in its callee's). For
2056 example:
2057
2058 - starting a new trace run. A thread still collecting the
2059 previous run, could trash the trace buffer when resumed. The trace
2060 buffer control structures would have been reset but the thread had
2061 no way to tell. The thread could even midway memcpy'ing to the
2062 buffer, which would mean that when resumed, it would clobber the
2063 trace buffer that had been set for a new run.
2064
2065 - we can't rewrite/reuse the jump pads for new tracepoints
2066 safely. Say you do tstart while a thread is stopped midway while
2067 collecting. When the thread is later resumed, it finishes the
2068 collection, and returns to the jump pad, to execute the original
2069 instruction that was under the tracepoint jump at the time the
2070 older run had been started. If the jump pad had been rewritten
2071 since for something else in the new run, the thread would now
2072 execute the wrong / random instructions. */
2073
2074 static void
2075 linux_stabilize_threads (void)
2076 {
2077 struct thread_info *save_inferior;
2078 struct lwp_info *lwp_stuck;
2079
2080 lwp_stuck
2081 = (struct lwp_info *) find_inferior (&all_lwps,
2082 stuck_in_jump_pad_callback, NULL);
2083 if (lwp_stuck != NULL)
2084 {
2085 if (debug_threads)
2086 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2087 lwpid_of (lwp_stuck));
2088 return;
2089 }
2090
2091 save_inferior = current_inferior;
2092
2093 stabilizing_threads = 1;
2094
2095 /* Kick 'em all. */
2096 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2097
2098 /* Loop until all are stopped out of the jump pads. */
2099 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2100 {
2101 struct target_waitstatus ourstatus;
2102 struct lwp_info *lwp;
2103 int wstat;
2104
2105 /* Note that we go through the full wait even loop. While
2106 moving threads out of jump pad, we need to be able to step
2107 over internal breakpoints and such. */
2108 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2109
2110 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2111 {
2112 lwp = get_thread_lwp (current_inferior);
2113
2114 /* Lock it. */
2115 lwp->suspended++;
2116
2117 if (ourstatus.value.sig != TARGET_SIGNAL_0
2118 || current_inferior->last_resume_kind == resume_stop)
2119 {
2120 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
2121 enqueue_one_deferred_signal (lwp, &wstat);
2122 }
2123 }
2124 }
2125
2126 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2127
2128 stabilizing_threads = 0;
2129
2130 current_inferior = save_inferior;
2131
2132 if (debug_threads)
2133 {
2134 lwp_stuck
2135 = (struct lwp_info *) find_inferior (&all_lwps,
2136 stuck_in_jump_pad_callback, NULL);
2137 if (lwp_stuck != NULL)
2138 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2139 lwpid_of (lwp_stuck));
2140 }
2141 }
2142
2143 /* Wait for process, returns status. */
2144
2145 static ptid_t
2146 linux_wait_1 (ptid_t ptid,
2147 struct target_waitstatus *ourstatus, int target_options)
2148 {
2149 int w;
2150 struct lwp_info *event_child;
2151 int options;
2152 int pid;
2153 int step_over_finished;
2154 int bp_explains_trap;
2155 int maybe_internal_trap;
2156 int report_to_gdb;
2157 int trace_event;
2158
2159 /* Translate generic target options into linux options. */
2160 options = __WALL;
2161 if (target_options & TARGET_WNOHANG)
2162 options |= WNOHANG;
2163
2164 retry:
2165 bp_explains_trap = 0;
2166 trace_event = 0;
2167 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2168
2169 /* If we were only supposed to resume one thread, only wait for
2170 that thread - if it's still alive. If it died, however - which
2171 can happen if we're coming from the thread death case below -
2172 then we need to make sure we restart the other threads. We could
2173 pick a thread at random or restart all; restarting all is less
2174 arbitrary. */
2175 if (!non_stop
2176 && !ptid_equal (cont_thread, null_ptid)
2177 && !ptid_equal (cont_thread, minus_one_ptid))
2178 {
2179 struct thread_info *thread;
2180
2181 thread = (struct thread_info *) find_inferior_id (&all_threads,
2182 cont_thread);
2183
2184 /* No stepping, no signal - unless one is pending already, of course. */
2185 if (thread == NULL)
2186 {
2187 struct thread_resume resume_info;
2188 resume_info.thread = minus_one_ptid;
2189 resume_info.kind = resume_continue;
2190 resume_info.sig = 0;
2191 linux_resume (&resume_info, 1);
2192 }
2193 else
2194 ptid = cont_thread;
2195 }
2196
2197 if (ptid_equal (step_over_bkpt, null_ptid))
2198 pid = linux_wait_for_event (ptid, &w, options);
2199 else
2200 {
2201 if (debug_threads)
2202 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2203 target_pid_to_str (step_over_bkpt));
2204 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2205 }
2206
2207 if (pid == 0) /* only if TARGET_WNOHANG */
2208 return null_ptid;
2209
2210 event_child = get_thread_lwp (current_inferior);
2211
2212 /* If we are waiting for a particular child, and it exited,
2213 linux_wait_for_event will return its exit status. Similarly if
2214 the last child exited. If this is not the last child, however,
2215 do not report it as exited until there is a 'thread exited' response
2216 available in the remote protocol. Instead, just wait for another event.
2217 This should be safe, because if the thread crashed we will already
2218 have reported the termination signal to GDB; that should stop any
2219 in-progress stepping operations, etc.
2220
2221 Report the exit status of the last thread to exit. This matches
2222 LinuxThreads' behavior. */
2223
2224 if (last_thread_of_process_p (current_inferior))
2225 {
2226 if (WIFEXITED (w) || WIFSIGNALED (w))
2227 {
2228 if (WIFEXITED (w))
2229 {
2230 ourstatus->kind = TARGET_WAITKIND_EXITED;
2231 ourstatus->value.integer = WEXITSTATUS (w);
2232
2233 if (debug_threads)
2234 fprintf (stderr,
2235 "\nChild exited with retcode = %x \n",
2236 WEXITSTATUS (w));
2237 }
2238 else
2239 {
2240 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2241 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2242
2243 if (debug_threads)
2244 fprintf (stderr,
2245 "\nChild terminated with signal = %x \n",
2246 WTERMSIG (w));
2247
2248 }
2249
2250 return ptid_of (event_child);
2251 }
2252 }
2253 else
2254 {
2255 if (!WIFSTOPPED (w))
2256 goto retry;
2257 }
2258
2259 /* If this event was not handled before, and is not a SIGTRAP, we
2260 report it. SIGILL and SIGSEGV are also treated as traps in case
2261 a breakpoint is inserted at the current PC. If this target does
2262 not support internal breakpoints at all, we also report the
2263 SIGTRAP without further processing; it's of no concern to us. */
2264 maybe_internal_trap
2265 = (supports_breakpoints ()
2266 && (WSTOPSIG (w) == SIGTRAP
2267 || ((WSTOPSIG (w) == SIGILL
2268 || WSTOPSIG (w) == SIGSEGV)
2269 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2270
2271 if (maybe_internal_trap)
2272 {
2273 /* Handle anything that requires bookkeeping before deciding to
2274 report the event or continue waiting. */
2275
2276 /* First check if we can explain the SIGTRAP with an internal
2277 breakpoint, or if we should possibly report the event to GDB.
2278 Do this before anything that may remove or insert a
2279 breakpoint. */
2280 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2281
2282 /* We have a SIGTRAP, possibly a step-over dance has just
2283 finished. If so, tweak the state machine accordingly,
2284 reinsert breakpoints and delete any reinsert (software
2285 single-step) breakpoints. */
2286 step_over_finished = finish_step_over (event_child);
2287
2288 /* Now invoke the callbacks of any internal breakpoints there. */
2289 check_breakpoints (event_child->stop_pc);
2290
2291 /* Handle tracepoint data collecting. This may overflow the
2292 trace buffer, and cause a tracing stop, removing
2293 breakpoints. */
2294 trace_event = handle_tracepoints (event_child);
2295
2296 if (bp_explains_trap)
2297 {
2298 /* If we stepped or ran into an internal breakpoint, we've
2299 already handled it. So next time we resume (from this
2300 PC), we should step over it. */
2301 if (debug_threads)
2302 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2303
2304 if (breakpoint_here (event_child->stop_pc))
2305 event_child->need_step_over = 1;
2306 }
2307 }
2308 else
2309 {
2310 /* We have some other signal, possibly a step-over dance was in
2311 progress, and it should be cancelled too. */
2312 step_over_finished = finish_step_over (event_child);
2313 }
2314
2315 /* We have all the data we need. Either report the event to GDB, or
2316 resume threads and keep waiting for more. */
2317
2318 /* If we're collecting a fast tracepoint, finish the collection and
2319 move out of the jump pad before delivering a signal. See
2320 linux_stabilize_threads. */
2321
2322 if (WIFSTOPPED (w)
2323 && WSTOPSIG (w) != SIGTRAP
2324 && supports_fast_tracepoints ()
2325 && agent_loaded_p ())
2326 {
2327 if (debug_threads)
2328 fprintf (stderr,
2329 "Got signal %d for LWP %ld. Check if we need "
2330 "to defer or adjust it.\n",
2331 WSTOPSIG (w), lwpid_of (event_child));
2332
2333 /* Allow debugging the jump pad itself. */
2334 if (current_inferior->last_resume_kind != resume_step
2335 && maybe_move_out_of_jump_pad (event_child, &w))
2336 {
2337 enqueue_one_deferred_signal (event_child, &w);
2338
2339 if (debug_threads)
2340 fprintf (stderr,
2341 "Signal %d for LWP %ld deferred (in jump pad)\n",
2342 WSTOPSIG (w), lwpid_of (event_child));
2343
2344 linux_resume_one_lwp (event_child, 0, 0, NULL);
2345 goto retry;
2346 }
2347 }
2348
2349 if (event_child->collecting_fast_tracepoint)
2350 {
2351 if (debug_threads)
2352 fprintf (stderr, "\
2353 LWP %ld was trying to move out of the jump pad (%d). \
2354 Check if we're already there.\n",
2355 lwpid_of (event_child),
2356 event_child->collecting_fast_tracepoint);
2357
2358 trace_event = 1;
2359
2360 event_child->collecting_fast_tracepoint
2361 = linux_fast_tracepoint_collecting (event_child, NULL);
2362
2363 if (event_child->collecting_fast_tracepoint != 1)
2364 {
2365 /* No longer need this breakpoint. */
2366 if (event_child->exit_jump_pad_bkpt != NULL)
2367 {
2368 if (debug_threads)
2369 fprintf (stderr,
2370 "No longer need exit-jump-pad bkpt; removing it."
2371 "stopping all threads momentarily.\n");
2372
2373 /* Other running threads could hit this breakpoint.
2374 We don't handle moribund locations like GDB does,
2375 instead we always pause all threads when removing
2376 breakpoints, so that any step-over or
2377 decr_pc_after_break adjustment is always taken
2378 care of while the breakpoint is still
2379 inserted. */
2380 stop_all_lwps (1, event_child);
2381 cancel_breakpoints ();
2382
2383 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2384 event_child->exit_jump_pad_bkpt = NULL;
2385
2386 unstop_all_lwps (1, event_child);
2387
2388 gdb_assert (event_child->suspended >= 0);
2389 }
2390 }
2391
2392 if (event_child->collecting_fast_tracepoint == 0)
2393 {
2394 if (debug_threads)
2395 fprintf (stderr,
2396 "fast tracepoint finished "
2397 "collecting successfully.\n");
2398
2399 /* We may have a deferred signal to report. */
2400 if (dequeue_one_deferred_signal (event_child, &w))
2401 {
2402 if (debug_threads)
2403 fprintf (stderr, "dequeued one signal.\n");
2404 }
2405 else
2406 {
2407 if (debug_threads)
2408 fprintf (stderr, "no deferred signals.\n");
2409
2410 if (stabilizing_threads)
2411 {
2412 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2413 ourstatus->value.sig = TARGET_SIGNAL_0;
2414 return ptid_of (event_child);
2415 }
2416 }
2417 }
2418 }
2419
2420 /* Check whether GDB would be interested in this event. */
2421
2422 /* If GDB is not interested in this signal, don't stop other
2423 threads, and don't report it to GDB. Just resume the inferior
2424 right away. We do this for threading-related signals as well as
2425 any that GDB specifically requested we ignore. But never ignore
2426 SIGSTOP if we sent it ourselves, and do not ignore signals when
2427 stepping - they may require special handling to skip the signal
2428 handler. */
2429 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2430 thread library? */
2431 if (WIFSTOPPED (w)
2432 && current_inferior->last_resume_kind != resume_step
2433 && (
2434 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2435 (current_process ()->private->thread_db != NULL
2436 && (WSTOPSIG (w) == __SIGRTMIN
2437 || WSTOPSIG (w) == __SIGRTMIN + 1))
2438 ||
2439 #endif
2440 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2441 && !(WSTOPSIG (w) == SIGSTOP
2442 && current_inferior->last_resume_kind == resume_stop))))
2443 {
2444 siginfo_t info, *info_p;
2445
2446 if (debug_threads)
2447 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2448 WSTOPSIG (w), lwpid_of (event_child));
2449
2450 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2451 info_p = &info;
2452 else
2453 info_p = NULL;
2454 linux_resume_one_lwp (event_child, event_child->stepping,
2455 WSTOPSIG (w), info_p);
2456 goto retry;
2457 }
2458
2459 /* If GDB wanted this thread to single step, we always want to
2460 report the SIGTRAP, and let GDB handle it. Watchpoints should
2461 always be reported. So should signals we can't explain. A
2462 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2463 not support Z0 breakpoints. If we do, we're be able to handle
2464 GDB breakpoints on top of internal breakpoints, by handling the
2465 internal breakpoint and still reporting the event to GDB. If we
2466 don't, we're out of luck, GDB won't see the breakpoint hit. */
2467 report_to_gdb = (!maybe_internal_trap
2468 || current_inferior->last_resume_kind == resume_step
2469 || event_child->stopped_by_watchpoint
2470 || (!step_over_finished
2471 && !bp_explains_trap && !trace_event)
2472 || (gdb_breakpoint_here (event_child->stop_pc)
2473 && gdb_condition_true_at_breakpoint (event_child->stop_pc)));
2474
2475 /* We found no reason GDB would want us to stop. We either hit one
2476 of our own breakpoints, or finished an internal step GDB
2477 shouldn't know about. */
2478 if (!report_to_gdb)
2479 {
2480 if (debug_threads)
2481 {
2482 if (bp_explains_trap)
2483 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2484 if (step_over_finished)
2485 fprintf (stderr, "Step-over finished.\n");
2486 if (trace_event)
2487 fprintf (stderr, "Tracepoint event.\n");
2488 }
2489
2490 /* We're not reporting this breakpoint to GDB, so apply the
2491 decr_pc_after_break adjustment to the inferior's regcache
2492 ourselves. */
2493
2494 if (the_low_target.set_pc != NULL)
2495 {
2496 struct regcache *regcache
2497 = get_thread_regcache (get_lwp_thread (event_child), 1);
2498 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2499 }
2500
2501 /* We may have finished stepping over a breakpoint. If so,
2502 we've stopped and suspended all LWPs momentarily except the
2503 stepping one. This is where we resume them all again. We're
2504 going to keep waiting, so use proceed, which handles stepping
2505 over the next breakpoint. */
2506 if (debug_threads)
2507 fprintf (stderr, "proceeding all threads.\n");
2508
2509 if (step_over_finished)
2510 unsuspend_all_lwps (event_child);
2511
2512 proceed_all_lwps ();
2513 goto retry;
2514 }
2515
2516 if (debug_threads)
2517 {
2518 if (current_inferior->last_resume_kind == resume_step)
2519 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2520 if (event_child->stopped_by_watchpoint)
2521 fprintf (stderr, "Stopped by watchpoint.\n");
2522 if (gdb_breakpoint_here (event_child->stop_pc))
2523 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2524 if (debug_threads)
2525 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2526 }
2527
2528 /* Alright, we're going to report a stop. */
2529
2530 if (!non_stop && !stabilizing_threads)
2531 {
2532 /* In all-stop, stop all threads. */
2533 stop_all_lwps (0, NULL);
2534
2535 /* If we're not waiting for a specific LWP, choose an event LWP
2536 from among those that have had events. Giving equal priority
2537 to all LWPs that have had events helps prevent
2538 starvation. */
2539 if (ptid_equal (ptid, minus_one_ptid))
2540 {
2541 event_child->status_pending_p = 1;
2542 event_child->status_pending = w;
2543
2544 select_event_lwp (&event_child);
2545
2546 event_child->status_pending_p = 0;
2547 w = event_child->status_pending;
2548 }
2549
2550 /* Now that we've selected our final event LWP, cancel any
2551 breakpoints in other LWPs that have hit a GDB breakpoint.
2552 See the comment in cancel_breakpoints_callback to find out
2553 why. */
2554 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2555
2556 /* If we were going a step-over, all other threads but the stepping one
2557 had been paused in start_step_over, with their suspend counts
2558 incremented. We don't want to do a full unstop/unpause, because we're
2559 in all-stop mode (so we want threads stopped), but we still need to
2560 unsuspend the other threads, to decrement their `suspended' count
2561 back. */
2562 if (step_over_finished)
2563 unsuspend_all_lwps (event_child);
2564
2565 /* Stabilize threads (move out of jump pads). */
2566 stabilize_threads ();
2567 }
2568 else
2569 {
2570 /* If we just finished a step-over, then all threads had been
2571 momentarily paused. In all-stop, that's fine, we want
2572 threads stopped by now anyway. In non-stop, we need to
2573 re-resume threads that GDB wanted to be running. */
2574 if (step_over_finished)
2575 unstop_all_lwps (1, event_child);
2576 }
2577
2578 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2579
2580 if (current_inferior->last_resume_kind == resume_stop
2581 && WSTOPSIG (w) == SIGSTOP)
2582 {
2583 /* A thread that has been requested to stop by GDB with vCont;t,
2584 and it stopped cleanly, so report as SIG0. The use of
2585 SIGSTOP is an implementation detail. */
2586 ourstatus->value.sig = TARGET_SIGNAL_0;
2587 }
2588 else if (current_inferior->last_resume_kind == resume_stop
2589 && WSTOPSIG (w) != SIGSTOP)
2590 {
2591 /* A thread that has been requested to stop by GDB with vCont;t,
2592 but, it stopped for other reasons. */
2593 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2594 }
2595 else
2596 {
2597 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2598 }
2599
2600 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2601
2602 if (debug_threads)
2603 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2604 target_pid_to_str (ptid_of (event_child)),
2605 ourstatus->kind,
2606 ourstatus->value.sig);
2607
2608 return ptid_of (event_child);
2609 }
2610
2611 /* Get rid of any pending event in the pipe. */
2612 static void
2613 async_file_flush (void)
2614 {
2615 int ret;
2616 char buf;
2617
2618 do
2619 ret = read (linux_event_pipe[0], &buf, 1);
2620 while (ret >= 0 || (ret == -1 && errno == EINTR));
2621 }
2622
2623 /* Put something in the pipe, so the event loop wakes up. */
2624 static void
2625 async_file_mark (void)
2626 {
2627 int ret;
2628
2629 async_file_flush ();
2630
2631 do
2632 ret = write (linux_event_pipe[1], "+", 1);
2633 while (ret == 0 || (ret == -1 && errno == EINTR));
2634
2635 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2636 be awakened anyway. */
2637 }
2638
2639 static ptid_t
2640 linux_wait (ptid_t ptid,
2641 struct target_waitstatus *ourstatus, int target_options)
2642 {
2643 ptid_t event_ptid;
2644
2645 if (debug_threads)
2646 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2647
2648 /* Flush the async file first. */
2649 if (target_is_async_p ())
2650 async_file_flush ();
2651
2652 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2653
2654 /* If at least one stop was reported, there may be more. A single
2655 SIGCHLD can signal more than one child stop. */
2656 if (target_is_async_p ()
2657 && (target_options & TARGET_WNOHANG) != 0
2658 && !ptid_equal (event_ptid, null_ptid))
2659 async_file_mark ();
2660
2661 return event_ptid;
2662 }
2663
2664 /* Send a signal to an LWP. */
2665
2666 static int
2667 kill_lwp (unsigned long lwpid, int signo)
2668 {
2669 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2670 fails, then we are not using nptl threads and we should be using kill. */
2671
2672 #ifdef __NR_tkill
2673 {
2674 static int tkill_failed;
2675
2676 if (!tkill_failed)
2677 {
2678 int ret;
2679
2680 errno = 0;
2681 ret = syscall (__NR_tkill, lwpid, signo);
2682 if (errno != ENOSYS)
2683 return ret;
2684 tkill_failed = 1;
2685 }
2686 }
2687 #endif
2688
2689 return kill (lwpid, signo);
2690 }
2691
2692 void
2693 linux_stop_lwp (struct lwp_info *lwp)
2694 {
2695 send_sigstop (lwp);
2696 }
2697
2698 static void
2699 send_sigstop (struct lwp_info *lwp)
2700 {
2701 int pid;
2702
2703 pid = lwpid_of (lwp);
2704
2705 /* If we already have a pending stop signal for this process, don't
2706 send another. */
2707 if (lwp->stop_expected)
2708 {
2709 if (debug_threads)
2710 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2711
2712 return;
2713 }
2714
2715 if (debug_threads)
2716 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2717
2718 lwp->stop_expected = 1;
2719 kill_lwp (pid, SIGSTOP);
2720 }
2721
2722 static int
2723 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2724 {
2725 struct lwp_info *lwp = (struct lwp_info *) entry;
2726
2727 /* Ignore EXCEPT. */
2728 if (lwp == except)
2729 return 0;
2730
2731 if (lwp->stopped)
2732 return 0;
2733
2734 send_sigstop (lwp);
2735 return 0;
2736 }
2737
2738 /* Increment the suspend count of an LWP, and stop it, if not stopped
2739 yet. */
2740 static int
2741 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2742 void *except)
2743 {
2744 struct lwp_info *lwp = (struct lwp_info *) entry;
2745
2746 /* Ignore EXCEPT. */
2747 if (lwp == except)
2748 return 0;
2749
2750 lwp->suspended++;
2751
2752 return send_sigstop_callback (entry, except);
2753 }
2754
2755 static void
2756 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2757 {
2758 /* It's dead, really. */
2759 lwp->dead = 1;
2760
2761 /* Store the exit status for later. */
2762 lwp->status_pending_p = 1;
2763 lwp->status_pending = wstat;
2764
2765 /* Prevent trying to stop it. */
2766 lwp->stopped = 1;
2767
2768 /* No further stops are expected from a dead lwp. */
2769 lwp->stop_expected = 0;
2770 }
2771
2772 static void
2773 wait_for_sigstop (struct inferior_list_entry *entry)
2774 {
2775 struct lwp_info *lwp = (struct lwp_info *) entry;
2776 struct thread_info *saved_inferior;
2777 int wstat;
2778 ptid_t saved_tid;
2779 ptid_t ptid;
2780 int pid;
2781
2782 if (lwp->stopped)
2783 {
2784 if (debug_threads)
2785 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2786 lwpid_of (lwp));
2787 return;
2788 }
2789
2790 saved_inferior = current_inferior;
2791 if (saved_inferior != NULL)
2792 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2793 else
2794 saved_tid = null_ptid; /* avoid bogus unused warning */
2795
2796 ptid = lwp->head.id;
2797
2798 if (debug_threads)
2799 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2800
2801 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2802
2803 /* If we stopped with a non-SIGSTOP signal, save it for later
2804 and record the pending SIGSTOP. If the process exited, just
2805 return. */
2806 if (WIFSTOPPED (wstat))
2807 {
2808 if (debug_threads)
2809 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2810 lwpid_of (lwp), WSTOPSIG (wstat));
2811
2812 if (WSTOPSIG (wstat) != SIGSTOP)
2813 {
2814 if (debug_threads)
2815 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2816 lwpid_of (lwp), wstat);
2817
2818 lwp->status_pending_p = 1;
2819 lwp->status_pending = wstat;
2820 }
2821 }
2822 else
2823 {
2824 if (debug_threads)
2825 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2826
2827 lwp = find_lwp_pid (pid_to_ptid (pid));
2828 if (lwp)
2829 {
2830 /* Leave this status pending for the next time we're able to
2831 report it. In the mean time, we'll report this lwp as
2832 dead to GDB, so GDB doesn't try to read registers and
2833 memory from it. This can only happen if this was the
2834 last thread of the process; otherwise, PID is removed
2835 from the thread tables before linux_wait_for_event
2836 returns. */
2837 mark_lwp_dead (lwp, wstat);
2838 }
2839 }
2840
2841 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2842 current_inferior = saved_inferior;
2843 else
2844 {
2845 if (debug_threads)
2846 fprintf (stderr, "Previously current thread died.\n");
2847
2848 if (non_stop)
2849 {
2850 /* We can't change the current inferior behind GDB's back,
2851 otherwise, a subsequent command may apply to the wrong
2852 process. */
2853 current_inferior = NULL;
2854 }
2855 else
2856 {
2857 /* Set a valid thread as current. */
2858 set_desired_inferior (0);
2859 }
2860 }
2861 }
2862
2863 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2864 move it out, because we need to report the stop event to GDB. For
2865 example, if the user puts a breakpoint in the jump pad, it's
2866 because she wants to debug it. */
2867
2868 static int
2869 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2870 {
2871 struct lwp_info *lwp = (struct lwp_info *) entry;
2872 struct thread_info *thread = get_lwp_thread (lwp);
2873
2874 gdb_assert (lwp->suspended == 0);
2875 gdb_assert (lwp->stopped);
2876
2877 /* Allow debugging the jump pad, gdb_collect, etc.. */
2878 return (supports_fast_tracepoints ()
2879 && agent_loaded_p ()
2880 && (gdb_breakpoint_here (lwp->stop_pc)
2881 || lwp->stopped_by_watchpoint
2882 || thread->last_resume_kind == resume_step)
2883 && linux_fast_tracepoint_collecting (lwp, NULL));
2884 }
2885
2886 static void
2887 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2888 {
2889 struct lwp_info *lwp = (struct lwp_info *) entry;
2890 struct thread_info *thread = get_lwp_thread (lwp);
2891 int *wstat;
2892
2893 gdb_assert (lwp->suspended == 0);
2894 gdb_assert (lwp->stopped);
2895
2896 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2897
2898 /* Allow debugging the jump pad, gdb_collect, etc. */
2899 if (!gdb_breakpoint_here (lwp->stop_pc)
2900 && !lwp->stopped_by_watchpoint
2901 && thread->last_resume_kind != resume_step
2902 && maybe_move_out_of_jump_pad (lwp, wstat))
2903 {
2904 if (debug_threads)
2905 fprintf (stderr,
2906 "LWP %ld needs stabilizing (in jump pad)\n",
2907 lwpid_of (lwp));
2908
2909 if (wstat)
2910 {
2911 lwp->status_pending_p = 0;
2912 enqueue_one_deferred_signal (lwp, wstat);
2913
2914 if (debug_threads)
2915 fprintf (stderr,
2916 "Signal %d for LWP %ld deferred "
2917 "(in jump pad)\n",
2918 WSTOPSIG (*wstat), lwpid_of (lwp));
2919 }
2920
2921 linux_resume_one_lwp (lwp, 0, 0, NULL);
2922 }
2923 else
2924 lwp->suspended++;
2925 }
2926
2927 static int
2928 lwp_running (struct inferior_list_entry *entry, void *data)
2929 {
2930 struct lwp_info *lwp = (struct lwp_info *) entry;
2931
2932 if (lwp->dead)
2933 return 0;
2934 if (lwp->stopped)
2935 return 0;
2936 return 1;
2937 }
2938
2939 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2940 If SUSPEND, then also increase the suspend count of every LWP,
2941 except EXCEPT. */
2942
2943 static void
2944 stop_all_lwps (int suspend, struct lwp_info *except)
2945 {
2946 stopping_threads = 1;
2947
2948 if (suspend)
2949 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2950 else
2951 find_inferior (&all_lwps, send_sigstop_callback, except);
2952 for_each_inferior (&all_lwps, wait_for_sigstop);
2953 stopping_threads = 0;
2954 }
2955
2956 /* Resume execution of the inferior process.
2957 If STEP is nonzero, single-step it.
2958 If SIGNAL is nonzero, give it that signal. */
2959
2960 static void
2961 linux_resume_one_lwp (struct lwp_info *lwp,
2962 int step, int signal, siginfo_t *info)
2963 {
2964 struct thread_info *saved_inferior;
2965 int fast_tp_collecting;
2966
2967 if (lwp->stopped == 0)
2968 return;
2969
2970 fast_tp_collecting = lwp->collecting_fast_tracepoint;
2971
2972 gdb_assert (!stabilizing_threads || fast_tp_collecting);
2973
2974 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2975 user used the "jump" command, or "set $pc = foo"). */
2976 if (lwp->stop_pc != get_pc (lwp))
2977 {
2978 /* Collecting 'while-stepping' actions doesn't make sense
2979 anymore. */
2980 release_while_stepping_state_list (get_lwp_thread (lwp));
2981 }
2982
2983 /* If we have pending signals or status, and a new signal, enqueue the
2984 signal. Also enqueue the signal if we are waiting to reinsert a
2985 breakpoint; it will be picked up again below. */
2986 if (signal != 0
2987 && (lwp->status_pending_p
2988 || lwp->pending_signals != NULL
2989 || lwp->bp_reinsert != 0
2990 || fast_tp_collecting))
2991 {
2992 struct pending_signals *p_sig;
2993 p_sig = xmalloc (sizeof (*p_sig));
2994 p_sig->prev = lwp->pending_signals;
2995 p_sig->signal = signal;
2996 if (info == NULL)
2997 memset (&p_sig->info, 0, sizeof (siginfo_t));
2998 else
2999 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3000 lwp->pending_signals = p_sig;
3001 }
3002
3003 if (lwp->status_pending_p)
3004 {
3005 if (debug_threads)
3006 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3007 " has pending status\n",
3008 lwpid_of (lwp), step ? "step" : "continue", signal,
3009 lwp->stop_expected ? "expected" : "not expected");
3010 return;
3011 }
3012
3013 saved_inferior = current_inferior;
3014 current_inferior = get_lwp_thread (lwp);
3015
3016 if (debug_threads)
3017 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
3018 lwpid_of (lwp), step ? "step" : "continue", signal,
3019 lwp->stop_expected ? "expected" : "not expected");
3020
3021 /* This bit needs some thinking about. If we get a signal that
3022 we must report while a single-step reinsert is still pending,
3023 we often end up resuming the thread. It might be better to
3024 (ew) allow a stack of pending events; then we could be sure that
3025 the reinsert happened right away and not lose any signals.
3026
3027 Making this stack would also shrink the window in which breakpoints are
3028 uninserted (see comment in linux_wait_for_lwp) but not enough for
3029 complete correctness, so it won't solve that problem. It may be
3030 worthwhile just to solve this one, however. */
3031 if (lwp->bp_reinsert != 0)
3032 {
3033 if (debug_threads)
3034 fprintf (stderr, " pending reinsert at 0x%s\n",
3035 paddress (lwp->bp_reinsert));
3036
3037 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
3038 {
3039 if (fast_tp_collecting == 0)
3040 {
3041 if (step == 0)
3042 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3043 if (lwp->suspended)
3044 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3045 lwp->suspended);
3046 }
3047
3048 step = 1;
3049 }
3050
3051 /* Postpone any pending signal. It was enqueued above. */
3052 signal = 0;
3053 }
3054
3055 if (fast_tp_collecting == 1)
3056 {
3057 if (debug_threads)
3058 fprintf (stderr, "\
3059 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3060 lwpid_of (lwp));
3061
3062 /* Postpone any pending signal. It was enqueued above. */
3063 signal = 0;
3064 }
3065 else if (fast_tp_collecting == 2)
3066 {
3067 if (debug_threads)
3068 fprintf (stderr, "\
3069 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3070 lwpid_of (lwp));
3071
3072 if (can_hardware_single_step ())
3073 step = 1;
3074 else
3075 fatal ("moving out of jump pad single-stepping"
3076 " not implemented on this target");
3077
3078 /* Postpone any pending signal. It was enqueued above. */
3079 signal = 0;
3080 }
3081
3082 /* If we have while-stepping actions in this thread set it stepping.
3083 If we have a signal to deliver, it may or may not be set to
3084 SIG_IGN, we don't know. Assume so, and allow collecting
3085 while-stepping into a signal handler. A possible smart thing to
3086 do would be to set an internal breakpoint at the signal return
3087 address, continue, and carry on catching this while-stepping
3088 action only when that breakpoint is hit. A future
3089 enhancement. */
3090 if (get_lwp_thread (lwp)->while_stepping != NULL
3091 && can_hardware_single_step ())
3092 {
3093 if (debug_threads)
3094 fprintf (stderr,
3095 "lwp %ld has a while-stepping action -> forcing step.\n",
3096 lwpid_of (lwp));
3097 step = 1;
3098 }
3099
3100 if (debug_threads && the_low_target.get_pc != NULL)
3101 {
3102 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3103 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3104 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3105 }
3106
3107 /* If we have pending signals, consume one unless we are trying to
3108 reinsert a breakpoint or we're trying to finish a fast tracepoint
3109 collect. */
3110 if (lwp->pending_signals != NULL
3111 && lwp->bp_reinsert == 0
3112 && fast_tp_collecting == 0)
3113 {
3114 struct pending_signals **p_sig;
3115
3116 p_sig = &lwp->pending_signals;
3117 while ((*p_sig)->prev != NULL)
3118 p_sig = &(*p_sig)->prev;
3119
3120 signal = (*p_sig)->signal;
3121 if ((*p_sig)->info.si_signo != 0)
3122 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
3123
3124 free (*p_sig);
3125 *p_sig = NULL;
3126 }
3127
3128 if (the_low_target.prepare_to_resume != NULL)
3129 the_low_target.prepare_to_resume (lwp);
3130
3131 regcache_invalidate_one ((struct inferior_list_entry *)
3132 get_lwp_thread (lwp));
3133 errno = 0;
3134 lwp->stopped = 0;
3135 lwp->stopped_by_watchpoint = 0;
3136 lwp->stepping = step;
3137 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
3138 /* Coerce to a uintptr_t first to avoid potential gcc warning
3139 of coercing an 8 byte integer to a 4 byte pointer. */
3140 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
3141
3142 current_inferior = saved_inferior;
3143 if (errno)
3144 {
3145 /* ESRCH from ptrace either means that the thread was already
3146 running (an error) or that it is gone (a race condition). If
3147 it's gone, we will get a notification the next time we wait,
3148 so we can ignore the error. We could differentiate these
3149 two, but it's tricky without waiting; the thread still exists
3150 as a zombie, so sending it signal 0 would succeed. So just
3151 ignore ESRCH. */
3152 if (errno == ESRCH)
3153 return;
3154
3155 perror_with_name ("ptrace");
3156 }
3157 }
3158
3159 struct thread_resume_array
3160 {
3161 struct thread_resume *resume;
3162 size_t n;
3163 };
3164
3165 /* This function is called once per thread. We look up the thread
3166 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3167 resume request.
3168
3169 This algorithm is O(threads * resume elements), but resume elements
3170 is small (and will remain small at least until GDB supports thread
3171 suspension). */
3172 static int
3173 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3174 {
3175 struct lwp_info *lwp;
3176 struct thread_info *thread;
3177 int ndx;
3178 struct thread_resume_array *r;
3179
3180 thread = (struct thread_info *) entry;
3181 lwp = get_thread_lwp (thread);
3182 r = arg;
3183
3184 for (ndx = 0; ndx < r->n; ndx++)
3185 {
3186 ptid_t ptid = r->resume[ndx].thread;
3187 if (ptid_equal (ptid, minus_one_ptid)
3188 || ptid_equal (ptid, entry->id)
3189 || (ptid_is_pid (ptid)
3190 && (ptid_get_pid (ptid) == pid_of (lwp)))
3191 || (ptid_get_lwp (ptid) == -1
3192 && (ptid_get_pid (ptid) == pid_of (lwp))))
3193 {
3194 if (r->resume[ndx].kind == resume_stop
3195 && thread->last_resume_kind == resume_stop)
3196 {
3197 if (debug_threads)
3198 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3199 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3200 ? "stopped"
3201 : "stopping",
3202 lwpid_of (lwp));
3203
3204 continue;
3205 }
3206
3207 lwp->resume = &r->resume[ndx];
3208 thread->last_resume_kind = lwp->resume->kind;
3209
3210 /* If we had a deferred signal to report, dequeue one now.
3211 This can happen if LWP gets more than one signal while
3212 trying to get out of a jump pad. */
3213 if (lwp->stopped
3214 && !lwp->status_pending_p
3215 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3216 {
3217 lwp->status_pending_p = 1;
3218
3219 if (debug_threads)
3220 fprintf (stderr,
3221 "Dequeueing deferred signal %d for LWP %ld, "
3222 "leaving status pending.\n",
3223 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3224 }
3225
3226 return 0;
3227 }
3228 }
3229
3230 /* No resume action for this thread. */
3231 lwp->resume = NULL;
3232
3233 return 0;
3234 }
3235
3236
3237 /* Set *FLAG_P if this lwp has an interesting status pending. */
3238 static int
3239 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3240 {
3241 struct lwp_info *lwp = (struct lwp_info *) entry;
3242
3243 /* LWPs which will not be resumed are not interesting, because
3244 we might not wait for them next time through linux_wait. */
3245 if (lwp->resume == NULL)
3246 return 0;
3247
3248 if (lwp->status_pending_p)
3249 * (int *) flag_p = 1;
3250
3251 return 0;
3252 }
3253
3254 /* Return 1 if this lwp that GDB wants running is stopped at an
3255 internal breakpoint that we need to step over. It assumes that any
3256 required STOP_PC adjustment has already been propagated to the
3257 inferior's regcache. */
3258
3259 static int
3260 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3261 {
3262 struct lwp_info *lwp = (struct lwp_info *) entry;
3263 struct thread_info *thread;
3264 struct thread_info *saved_inferior;
3265 CORE_ADDR pc;
3266
3267 /* LWPs which will not be resumed are not interesting, because we
3268 might not wait for them next time through linux_wait. */
3269
3270 if (!lwp->stopped)
3271 {
3272 if (debug_threads)
3273 fprintf (stderr,
3274 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3275 lwpid_of (lwp));
3276 return 0;
3277 }
3278
3279 thread = get_lwp_thread (lwp);
3280
3281 if (thread->last_resume_kind == resume_stop)
3282 {
3283 if (debug_threads)
3284 fprintf (stderr,
3285 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3286 lwpid_of (lwp));
3287 return 0;
3288 }
3289
3290 gdb_assert (lwp->suspended >= 0);
3291
3292 if (lwp->suspended)
3293 {
3294 if (debug_threads)
3295 fprintf (stderr,
3296 "Need step over [LWP %ld]? Ignoring, suspended\n",
3297 lwpid_of (lwp));
3298 return 0;
3299 }
3300
3301 if (!lwp->need_step_over)
3302 {
3303 if (debug_threads)
3304 fprintf (stderr,
3305 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3306 }
3307
3308 if (lwp->status_pending_p)
3309 {
3310 if (debug_threads)
3311 fprintf (stderr,
3312 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3313 lwpid_of (lwp));
3314 return 0;
3315 }
3316
3317 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3318 or we have. */
3319 pc = get_pc (lwp);
3320
3321 /* If the PC has changed since we stopped, then don't do anything,
3322 and let the breakpoint/tracepoint be hit. This happens if, for
3323 instance, GDB handled the decr_pc_after_break subtraction itself,
3324 GDB is OOL stepping this thread, or the user has issued a "jump"
3325 command, or poked thread's registers herself. */
3326 if (pc != lwp->stop_pc)
3327 {
3328 if (debug_threads)
3329 fprintf (stderr,
3330 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3331 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3332 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3333
3334 lwp->need_step_over = 0;
3335 return 0;
3336 }
3337
3338 saved_inferior = current_inferior;
3339 current_inferior = thread;
3340
3341 /* We can only step over breakpoints we know about. */
3342 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3343 {
3344 /* Don't step over a breakpoint that GDB expects to hit
3345 though. If the condition is being evaluated on the target's side
3346 and it evaluate to false, step over this breakpoint as well. */
3347 if (gdb_breakpoint_here (pc)
3348 && gdb_condition_true_at_breakpoint (pc))
3349 {
3350 if (debug_threads)
3351 fprintf (stderr,
3352 "Need step over [LWP %ld]? yes, but found"
3353 " GDB breakpoint at 0x%s; skipping step over\n",
3354 lwpid_of (lwp), paddress (pc));
3355
3356 current_inferior = saved_inferior;
3357 return 0;
3358 }
3359 else
3360 {
3361 if (debug_threads)
3362 fprintf (stderr,
3363 "Need step over [LWP %ld]? yes, "
3364 "found breakpoint at 0x%s\n",
3365 lwpid_of (lwp), paddress (pc));
3366
3367 /* We've found an lwp that needs stepping over --- return 1 so
3368 that find_inferior stops looking. */
3369 current_inferior = saved_inferior;
3370
3371 /* If the step over is cancelled, this is set again. */
3372 lwp->need_step_over = 0;
3373 return 1;
3374 }
3375 }
3376
3377 current_inferior = saved_inferior;
3378
3379 if (debug_threads)
3380 fprintf (stderr,
3381 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3382 lwpid_of (lwp), paddress (pc));
3383
3384 return 0;
3385 }
3386
3387 /* Start a step-over operation on LWP. When LWP stopped at a
3388 breakpoint, to make progress, we need to remove the breakpoint out
3389 of the way. If we let other threads run while we do that, they may
3390 pass by the breakpoint location and miss hitting it. To avoid
3391 that, a step-over momentarily stops all threads while LWP is
3392 single-stepped while the breakpoint is temporarily uninserted from
3393 the inferior. When the single-step finishes, we reinsert the
3394 breakpoint, and let all threads that are supposed to be running,
3395 run again.
3396
3397 On targets that don't support hardware single-step, we don't
3398 currently support full software single-stepping. Instead, we only
3399 support stepping over the thread event breakpoint, by asking the
3400 low target where to place a reinsert breakpoint. Since this
3401 routine assumes the breakpoint being stepped over is a thread event
3402 breakpoint, it usually assumes the return address of the current
3403 function is a good enough place to set the reinsert breakpoint. */
3404
3405 static int
3406 start_step_over (struct lwp_info *lwp)
3407 {
3408 struct thread_info *saved_inferior;
3409 CORE_ADDR pc;
3410 int step;
3411
3412 if (debug_threads)
3413 fprintf (stderr,
3414 "Starting step-over on LWP %ld. Stopping all threads\n",
3415 lwpid_of (lwp));
3416
3417 stop_all_lwps (1, lwp);
3418 gdb_assert (lwp->suspended == 0);
3419
3420 if (debug_threads)
3421 fprintf (stderr, "Done stopping all threads for step-over.\n");
3422
3423 /* Note, we should always reach here with an already adjusted PC,
3424 either by GDB (if we're resuming due to GDB's request), or by our
3425 caller, if we just finished handling an internal breakpoint GDB
3426 shouldn't care about. */
3427 pc = get_pc (lwp);
3428
3429 saved_inferior = current_inferior;
3430 current_inferior = get_lwp_thread (lwp);
3431
3432 lwp->bp_reinsert = pc;
3433 uninsert_breakpoints_at (pc);
3434 uninsert_fast_tracepoint_jumps_at (pc);
3435
3436 if (can_hardware_single_step ())
3437 {
3438 step = 1;
3439 }
3440 else
3441 {
3442 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3443 set_reinsert_breakpoint (raddr);
3444 step = 0;
3445 }
3446
3447 current_inferior = saved_inferior;
3448
3449 linux_resume_one_lwp (lwp, step, 0, NULL);
3450
3451 /* Require next event from this LWP. */
3452 step_over_bkpt = lwp->head.id;
3453 return 1;
3454 }
3455
3456 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3457 start_step_over, if still there, and delete any reinsert
3458 breakpoints we've set, on non hardware single-step targets. */
3459
3460 static int
3461 finish_step_over (struct lwp_info *lwp)
3462 {
3463 if (lwp->bp_reinsert != 0)
3464 {
3465 if (debug_threads)
3466 fprintf (stderr, "Finished step over.\n");
3467
3468 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3469 may be no breakpoint to reinsert there by now. */
3470 reinsert_breakpoints_at (lwp->bp_reinsert);
3471 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3472
3473 lwp->bp_reinsert = 0;
3474
3475 /* Delete any software-single-step reinsert breakpoints. No
3476 longer needed. We don't have to worry about other threads
3477 hitting this trap, and later not being able to explain it,
3478 because we were stepping over a breakpoint, and we hold all
3479 threads but LWP stopped while doing that. */
3480 if (!can_hardware_single_step ())
3481 delete_reinsert_breakpoints ();
3482
3483 step_over_bkpt = null_ptid;
3484 return 1;
3485 }
3486 else
3487 return 0;
3488 }
3489
3490 /* This function is called once per thread. We check the thread's resume
3491 request, which will tell us whether to resume, step, or leave the thread
3492 stopped; and what signal, if any, it should be sent.
3493
3494 For threads which we aren't explicitly told otherwise, we preserve
3495 the stepping flag; this is used for stepping over gdbserver-placed
3496 breakpoints.
3497
3498 If pending_flags was set in any thread, we queue any needed
3499 signals, since we won't actually resume. We already have a pending
3500 event to report, so we don't need to preserve any step requests;
3501 they should be re-issued if necessary. */
3502
3503 static int
3504 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3505 {
3506 struct lwp_info *lwp;
3507 struct thread_info *thread;
3508 int step;
3509 int leave_all_stopped = * (int *) arg;
3510 int leave_pending;
3511
3512 thread = (struct thread_info *) entry;
3513 lwp = get_thread_lwp (thread);
3514
3515 if (lwp->resume == NULL)
3516 return 0;
3517
3518 if (lwp->resume->kind == resume_stop)
3519 {
3520 if (debug_threads)
3521 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3522
3523 if (!lwp->stopped)
3524 {
3525 if (debug_threads)
3526 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3527
3528 /* Stop the thread, and wait for the event asynchronously,
3529 through the event loop. */
3530 send_sigstop (lwp);
3531 }
3532 else
3533 {
3534 if (debug_threads)
3535 fprintf (stderr, "already stopped LWP %ld\n",
3536 lwpid_of (lwp));
3537
3538 /* The LWP may have been stopped in an internal event that
3539 was not meant to be notified back to GDB (e.g., gdbserver
3540 breakpoint), so we should be reporting a stop event in
3541 this case too. */
3542
3543 /* If the thread already has a pending SIGSTOP, this is a
3544 no-op. Otherwise, something later will presumably resume
3545 the thread and this will cause it to cancel any pending
3546 operation, due to last_resume_kind == resume_stop. If
3547 the thread already has a pending status to report, we
3548 will still report it the next time we wait - see
3549 status_pending_p_callback. */
3550
3551 /* If we already have a pending signal to report, then
3552 there's no need to queue a SIGSTOP, as this means we're
3553 midway through moving the LWP out of the jumppad, and we
3554 will report the pending signal as soon as that is
3555 finished. */
3556 if (lwp->pending_signals_to_report == NULL)
3557 send_sigstop (lwp);
3558 }
3559
3560 /* For stop requests, we're done. */
3561 lwp->resume = NULL;
3562 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3563 return 0;
3564 }
3565
3566 /* If this thread which is about to be resumed has a pending status,
3567 then don't resume any threads - we can just report the pending
3568 status. Make sure to queue any signals that would otherwise be
3569 sent. In all-stop mode, we do this decision based on if *any*
3570 thread has a pending status. If there's a thread that needs the
3571 step-over-breakpoint dance, then don't resume any other thread
3572 but that particular one. */
3573 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3574
3575 if (!leave_pending)
3576 {
3577 if (debug_threads)
3578 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3579
3580 step = (lwp->resume->kind == resume_step);
3581 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3582 }
3583 else
3584 {
3585 if (debug_threads)
3586 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3587
3588 /* If we have a new signal, enqueue the signal. */
3589 if (lwp->resume->sig != 0)
3590 {
3591 struct pending_signals *p_sig;
3592 p_sig = xmalloc (sizeof (*p_sig));
3593 p_sig->prev = lwp->pending_signals;
3594 p_sig->signal = lwp->resume->sig;
3595 memset (&p_sig->info, 0, sizeof (siginfo_t));
3596
3597 /* If this is the same signal we were previously stopped by,
3598 make sure to queue its siginfo. We can ignore the return
3599 value of ptrace; if it fails, we'll skip
3600 PTRACE_SETSIGINFO. */
3601 if (WIFSTOPPED (lwp->last_status)
3602 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3603 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3604
3605 lwp->pending_signals = p_sig;
3606 }
3607 }
3608
3609 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3610 lwp->resume = NULL;
3611 return 0;
3612 }
3613
3614 static void
3615 linux_resume (struct thread_resume *resume_info, size_t n)
3616 {
3617 struct thread_resume_array array = { resume_info, n };
3618 struct lwp_info *need_step_over = NULL;
3619 int any_pending;
3620 int leave_all_stopped;
3621
3622 find_inferior (&all_threads, linux_set_resume_request, &array);
3623
3624 /* If there is a thread which would otherwise be resumed, which has
3625 a pending status, then don't resume any threads - we can just
3626 report the pending status. Make sure to queue any signals that
3627 would otherwise be sent. In non-stop mode, we'll apply this
3628 logic to each thread individually. We consume all pending events
3629 before considering to start a step-over (in all-stop). */
3630 any_pending = 0;
3631 if (!non_stop)
3632 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3633
3634 /* If there is a thread which would otherwise be resumed, which is
3635 stopped at a breakpoint that needs stepping over, then don't
3636 resume any threads - have it step over the breakpoint with all
3637 other threads stopped, then resume all threads again. Make sure
3638 to queue any signals that would otherwise be delivered or
3639 queued. */
3640 if (!any_pending && supports_breakpoints ())
3641 need_step_over
3642 = (struct lwp_info *) find_inferior (&all_lwps,
3643 need_step_over_p, NULL);
3644
3645 leave_all_stopped = (need_step_over != NULL || any_pending);
3646
3647 if (debug_threads)
3648 {
3649 if (need_step_over != NULL)
3650 fprintf (stderr, "Not resuming all, need step over\n");
3651 else if (any_pending)
3652 fprintf (stderr,
3653 "Not resuming, all-stop and found "
3654 "an LWP with pending status\n");
3655 else
3656 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3657 }
3658
3659 /* Even if we're leaving threads stopped, queue all signals we'd
3660 otherwise deliver. */
3661 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3662
3663 if (need_step_over)
3664 start_step_over (need_step_over);
3665 }
3666
3667 /* This function is called once per thread. We check the thread's
3668 last resume request, which will tell us whether to resume, step, or
3669 leave the thread stopped. Any signal the client requested to be
3670 delivered has already been enqueued at this point.
3671
3672 If any thread that GDB wants running is stopped at an internal
3673 breakpoint that needs stepping over, we start a step-over operation
3674 on that particular thread, and leave all others stopped. */
3675
3676 static int
3677 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3678 {
3679 struct lwp_info *lwp = (struct lwp_info *) entry;
3680 struct thread_info *thread;
3681 int step;
3682
3683 if (lwp == except)
3684 return 0;
3685
3686 if (debug_threads)
3687 fprintf (stderr,
3688 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3689
3690 if (!lwp->stopped)
3691 {
3692 if (debug_threads)
3693 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3694 return 0;
3695 }
3696
3697 thread = get_lwp_thread (lwp);
3698
3699 if (thread->last_resume_kind == resume_stop
3700 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3701 {
3702 if (debug_threads)
3703 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3704 lwpid_of (lwp));
3705 return 0;
3706 }
3707
3708 if (lwp->status_pending_p)
3709 {
3710 if (debug_threads)
3711 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3712 lwpid_of (lwp));
3713 return 0;
3714 }
3715
3716 gdb_assert (lwp->suspended >= 0);
3717
3718 if (lwp->suspended)
3719 {
3720 if (debug_threads)
3721 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3722 return 0;
3723 }
3724
3725 if (thread->last_resume_kind == resume_stop
3726 && lwp->pending_signals_to_report == NULL
3727 && lwp->collecting_fast_tracepoint == 0)
3728 {
3729 /* We haven't reported this LWP as stopped yet (otherwise, the
3730 last_status.kind check above would catch it, and we wouldn't
3731 reach here. This LWP may have been momentarily paused by a
3732 stop_all_lwps call while handling for example, another LWP's
3733 step-over. In that case, the pending expected SIGSTOP signal
3734 that was queued at vCont;t handling time will have already
3735 been consumed by wait_for_sigstop, and so we need to requeue
3736 another one here. Note that if the LWP already has a SIGSTOP
3737 pending, this is a no-op. */
3738
3739 if (debug_threads)
3740 fprintf (stderr,
3741 "Client wants LWP %ld to stop. "
3742 "Making sure it has a SIGSTOP pending\n",
3743 lwpid_of (lwp));
3744
3745 send_sigstop (lwp);
3746 }
3747
3748 step = thread->last_resume_kind == resume_step;
3749 linux_resume_one_lwp (lwp, step, 0, NULL);
3750 return 0;
3751 }
3752
3753 static int
3754 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3755 {
3756 struct lwp_info *lwp = (struct lwp_info *) entry;
3757
3758 if (lwp == except)
3759 return 0;
3760
3761 lwp->suspended--;
3762 gdb_assert (lwp->suspended >= 0);
3763
3764 return proceed_one_lwp (entry, except);
3765 }
3766
3767 /* When we finish a step-over, set threads running again. If there's
3768 another thread that may need a step-over, now's the time to start
3769 it. Eventually, we'll move all threads past their breakpoints. */
3770
3771 static void
3772 proceed_all_lwps (void)
3773 {
3774 struct lwp_info *need_step_over;
3775
3776 /* If there is a thread which would otherwise be resumed, which is
3777 stopped at a breakpoint that needs stepping over, then don't
3778 resume any threads - have it step over the breakpoint with all
3779 other threads stopped, then resume all threads again. */
3780
3781 if (supports_breakpoints ())
3782 {
3783 need_step_over
3784 = (struct lwp_info *) find_inferior (&all_lwps,
3785 need_step_over_p, NULL);
3786
3787 if (need_step_over != NULL)
3788 {
3789 if (debug_threads)
3790 fprintf (stderr, "proceed_all_lwps: found "
3791 "thread %ld needing a step-over\n",
3792 lwpid_of (need_step_over));
3793
3794 start_step_over (need_step_over);
3795 return;
3796 }
3797 }
3798
3799 if (debug_threads)
3800 fprintf (stderr, "Proceeding, no step-over needed\n");
3801
3802 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3803 }
3804
3805 /* Stopped LWPs that the client wanted to be running, that don't have
3806 pending statuses, are set to run again, except for EXCEPT, if not
3807 NULL. This undoes a stop_all_lwps call. */
3808
3809 static void
3810 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3811 {
3812 if (debug_threads)
3813 {
3814 if (except)
3815 fprintf (stderr,
3816 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3817 else
3818 fprintf (stderr,
3819 "unstopping all lwps\n");
3820 }
3821
3822 if (unsuspend)
3823 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3824 else
3825 find_inferior (&all_lwps, proceed_one_lwp, except);
3826 }
3827
3828
3829 #ifdef HAVE_LINUX_REGSETS
3830
3831 #define use_linux_regsets 1
3832
3833 static int
3834 regsets_fetch_inferior_registers (struct regcache *regcache)
3835 {
3836 struct regset_info *regset;
3837 int saw_general_regs = 0;
3838 int pid;
3839 struct iovec iov;
3840
3841 regset = target_regsets;
3842
3843 pid = lwpid_of (get_thread_lwp (current_inferior));
3844 while (regset->size >= 0)
3845 {
3846 void *buf, *data;
3847 int nt_type, res;
3848
3849 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3850 {
3851 regset ++;
3852 continue;
3853 }
3854
3855 buf = xmalloc (regset->size);
3856
3857 nt_type = regset->nt_type;
3858 if (nt_type)
3859 {
3860 iov.iov_base = buf;
3861 iov.iov_len = regset->size;
3862 data = (void *) &iov;
3863 }
3864 else
3865 data = buf;
3866
3867 #ifndef __sparc__
3868 res = ptrace (regset->get_request, pid, nt_type, data);
3869 #else
3870 res = ptrace (regset->get_request, pid, data, nt_type);
3871 #endif
3872 if (res < 0)
3873 {
3874 if (errno == EIO)
3875 {
3876 /* If we get EIO on a regset, do not try it again for
3877 this process. */
3878 disabled_regsets[regset - target_regsets] = 1;
3879 free (buf);
3880 continue;
3881 }
3882 else
3883 {
3884 char s[256];
3885 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3886 pid);
3887 perror (s);
3888 }
3889 }
3890 else if (regset->type == GENERAL_REGS)
3891 saw_general_regs = 1;
3892 regset->store_function (regcache, buf);
3893 regset ++;
3894 free (buf);
3895 }
3896 if (saw_general_regs)
3897 return 0;
3898 else
3899 return 1;
3900 }
3901
3902 static int
3903 regsets_store_inferior_registers (struct regcache *regcache)
3904 {
3905 struct regset_info *regset;
3906 int saw_general_regs = 0;
3907 int pid;
3908 struct iovec iov;
3909
3910 regset = target_regsets;
3911
3912 pid = lwpid_of (get_thread_lwp (current_inferior));
3913 while (regset->size >= 0)
3914 {
3915 void *buf, *data;
3916 int nt_type, res;
3917
3918 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3919 {
3920 regset ++;
3921 continue;
3922 }
3923
3924 buf = xmalloc (regset->size);
3925
3926 /* First fill the buffer with the current register set contents,
3927 in case there are any items in the kernel's regset that are
3928 not in gdbserver's regcache. */
3929
3930 nt_type = regset->nt_type;
3931 if (nt_type)
3932 {
3933 iov.iov_base = buf;
3934 iov.iov_len = regset->size;
3935 data = (void *) &iov;
3936 }
3937 else
3938 data = buf;
3939
3940 #ifndef __sparc__
3941 res = ptrace (regset->get_request, pid, nt_type, data);
3942 #else
3943 res = ptrace (regset->get_request, pid, &iov, data);
3944 #endif
3945
3946 if (res == 0)
3947 {
3948 /* Then overlay our cached registers on that. */
3949 regset->fill_function (regcache, buf);
3950
3951 /* Only now do we write the register set. */
3952 #ifndef __sparc__
3953 res = ptrace (regset->set_request, pid, nt_type, data);
3954 #else
3955 res = ptrace (regset->set_request, pid, data, nt_type);
3956 #endif
3957 }
3958
3959 if (res < 0)
3960 {
3961 if (errno == EIO)
3962 {
3963 /* If we get EIO on a regset, do not try it again for
3964 this process. */
3965 disabled_regsets[regset - target_regsets] = 1;
3966 free (buf);
3967 continue;
3968 }
3969 else if (errno == ESRCH)
3970 {
3971 /* At this point, ESRCH should mean the process is
3972 already gone, in which case we simply ignore attempts
3973 to change its registers. See also the related
3974 comment in linux_resume_one_lwp. */
3975 free (buf);
3976 return 0;
3977 }
3978 else
3979 {
3980 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3981 }
3982 }
3983 else if (regset->type == GENERAL_REGS)
3984 saw_general_regs = 1;
3985 regset ++;
3986 free (buf);
3987 }
3988 if (saw_general_regs)
3989 return 0;
3990 else
3991 return 1;
3992 }
3993
3994 #else /* !HAVE_LINUX_REGSETS */
3995
3996 #define use_linux_regsets 0
3997 #define regsets_fetch_inferior_registers(regcache) 1
3998 #define regsets_store_inferior_registers(regcache) 1
3999
4000 #endif
4001
4002 /* Return 1 if register REGNO is supported by one of the regset ptrace
4003 calls or 0 if it has to be transferred individually. */
4004
4005 static int
4006 linux_register_in_regsets (int regno)
4007 {
4008 unsigned char mask = 1 << (regno % 8);
4009 size_t index = regno / 8;
4010
4011 return (use_linux_regsets
4012 && (the_low_target.regset_bitmap == NULL
4013 || (the_low_target.regset_bitmap[index] & mask) != 0));
4014 }
4015
4016 #ifdef HAVE_LINUX_USRREGS
4017
4018 int
4019 register_addr (int regnum)
4020 {
4021 int addr;
4022
4023 if (regnum < 0 || regnum >= the_low_target.num_regs)
4024 error ("Invalid register number %d.", regnum);
4025
4026 addr = the_low_target.regmap[regnum];
4027
4028 return addr;
4029 }
4030
4031 /* Fetch one register. */
4032 static void
4033 fetch_register (struct regcache *regcache, int regno)
4034 {
4035 CORE_ADDR regaddr;
4036 int i, size;
4037 char *buf;
4038 int pid;
4039
4040 if (regno >= the_low_target.num_regs)
4041 return;
4042 if ((*the_low_target.cannot_fetch_register) (regno))
4043 return;
4044
4045 regaddr = register_addr (regno);
4046 if (regaddr == -1)
4047 return;
4048
4049 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4050 & -sizeof (PTRACE_XFER_TYPE));
4051 buf = alloca (size);
4052
4053 pid = lwpid_of (get_thread_lwp (current_inferior));
4054 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4055 {
4056 errno = 0;
4057 *(PTRACE_XFER_TYPE *) (buf + i) =
4058 ptrace (PTRACE_PEEKUSER, pid,
4059 /* Coerce to a uintptr_t first to avoid potential gcc warning
4060 of coercing an 8 byte integer to a 4 byte pointer. */
4061 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
4062 regaddr += sizeof (PTRACE_XFER_TYPE);
4063 if (errno != 0)
4064 error ("reading register %d: %s", regno, strerror (errno));
4065 }
4066
4067 if (the_low_target.supply_ptrace_register)
4068 the_low_target.supply_ptrace_register (regcache, regno, buf);
4069 else
4070 supply_register (regcache, regno, buf);
4071 }
4072
4073 /* Store one register. */
4074 static void
4075 store_register (struct regcache *regcache, int regno)
4076 {
4077 CORE_ADDR regaddr;
4078 int i, size;
4079 char *buf;
4080 int pid;
4081
4082 if (regno >= the_low_target.num_regs)
4083 return;
4084 if ((*the_low_target.cannot_store_register) (regno))
4085 return;
4086
4087 regaddr = register_addr (regno);
4088 if (regaddr == -1)
4089 return;
4090
4091 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4092 & -sizeof (PTRACE_XFER_TYPE));
4093 buf = alloca (size);
4094 memset (buf, 0, size);
4095
4096 if (the_low_target.collect_ptrace_register)
4097 the_low_target.collect_ptrace_register (regcache, regno, buf);
4098 else
4099 collect_register (regcache, regno, buf);
4100
4101 pid = lwpid_of (get_thread_lwp (current_inferior));
4102 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4103 {
4104 errno = 0;
4105 ptrace (PTRACE_POKEUSER, pid,
4106 /* Coerce to a uintptr_t first to avoid potential gcc warning
4107 about coercing an 8 byte integer to a 4 byte pointer. */
4108 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
4109 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
4110 if (errno != 0)
4111 {
4112 /* At this point, ESRCH should mean the process is
4113 already gone, in which case we simply ignore attempts
4114 to change its registers. See also the related
4115 comment in linux_resume_one_lwp. */
4116 if (errno == ESRCH)
4117 return;
4118
4119 if ((*the_low_target.cannot_store_register) (regno) == 0)
4120 error ("writing register %d: %s", regno, strerror (errno));
4121 }
4122 regaddr += sizeof (PTRACE_XFER_TYPE);
4123 }
4124 }
4125
4126 /* Fetch all registers, or just one, from the child process.
4127 If REGNO is -1, do this for all registers, skipping any that are
4128 assumed to have been retrieved by regsets_fetch_inferior_registers,
4129 unless ALL is non-zero.
4130 Otherwise, REGNO specifies which register (so we can save time). */
4131 static void
4132 usr_fetch_inferior_registers (struct regcache *regcache, int regno, int all)
4133 {
4134 if (regno == -1)
4135 {
4136 for (regno = 0; regno < the_low_target.num_regs; regno++)
4137 if (all || !linux_register_in_regsets (regno))
4138 fetch_register (regcache, regno);
4139 }
4140 else
4141 fetch_register (regcache, regno);
4142 }
4143
4144 /* Store our register values back into the inferior.
4145 If REGNO is -1, do this for all registers, skipping any that are
4146 assumed to have been saved by regsets_store_inferior_registers,
4147 unless ALL is non-zero.
4148 Otherwise, REGNO specifies which register (so we can save time). */
4149 static void
4150 usr_store_inferior_registers (struct regcache *regcache, int regno, int all)
4151 {
4152 if (regno == -1)
4153 {
4154 for (regno = 0; regno < the_low_target.num_regs; regno++)
4155 if (all || !linux_register_in_regsets (regno))
4156 store_register (regcache, regno);
4157 }
4158 else
4159 store_register (regcache, regno);
4160 }
4161
4162 #else /* !HAVE_LINUX_USRREGS */
4163
4164 #define usr_fetch_inferior_registers(regcache, regno, all) do {} while (0)
4165 #define usr_store_inferior_registers(regcache, regno, all) do {} while (0)
4166
4167 #endif
4168
4169
4170 void
4171 linux_fetch_registers (struct regcache *regcache, int regno)
4172 {
4173 int use_regsets;
4174 int all = 0;
4175
4176 if (regno == -1)
4177 {
4178 all = regsets_fetch_inferior_registers (regcache);
4179 usr_fetch_inferior_registers (regcache, regno, all);
4180 }
4181 else
4182 {
4183 use_regsets = linux_register_in_regsets (regno);
4184 if (use_regsets)
4185 all = regsets_fetch_inferior_registers (regcache);
4186 if (!use_regsets || all)
4187 usr_fetch_inferior_registers (regcache, regno, 1);
4188 }
4189 }
4190
4191 void
4192 linux_store_registers (struct regcache *regcache, int regno)
4193 {
4194 int use_regsets;
4195 int all = 0;
4196
4197 if (regno == -1)
4198 {
4199 all = regsets_store_inferior_registers (regcache);
4200 usr_store_inferior_registers (regcache, regno, all);
4201 }
4202 else
4203 {
4204 use_regsets = linux_register_in_regsets (regno);
4205 if (use_regsets)
4206 all = regsets_store_inferior_registers (regcache);
4207 if (!use_regsets || all)
4208 usr_store_inferior_registers (regcache, regno, 1);
4209 }
4210 }
4211
4212
4213 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4214 to debugger memory starting at MYADDR. */
4215
4216 static int
4217 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4218 {
4219 register int i;
4220 /* Round starting address down to longword boundary. */
4221 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4222 /* Round ending address up; get number of longwords that makes. */
4223 register int count
4224 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4225 / sizeof (PTRACE_XFER_TYPE);
4226 /* Allocate buffer of that many longwords. */
4227 register PTRACE_XFER_TYPE *buffer
4228 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4229 int fd;
4230 char filename[64];
4231 int pid = lwpid_of (get_thread_lwp (current_inferior));
4232
4233 /* Try using /proc. Don't bother for one word. */
4234 if (len >= 3 * sizeof (long))
4235 {
4236 /* We could keep this file open and cache it - possibly one per
4237 thread. That requires some juggling, but is even faster. */
4238 sprintf (filename, "/proc/%d/mem", pid);
4239 fd = open (filename, O_RDONLY | O_LARGEFILE);
4240 if (fd == -1)
4241 goto no_proc;
4242
4243 /* If pread64 is available, use it. It's faster if the kernel
4244 supports it (only one syscall), and it's 64-bit safe even on
4245 32-bit platforms (for instance, SPARC debugging a SPARC64
4246 application). */
4247 #ifdef HAVE_PREAD64
4248 if (pread64 (fd, myaddr, len, memaddr) != len)
4249 #else
4250 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
4251 #endif
4252 {
4253 close (fd);
4254 goto no_proc;
4255 }
4256
4257 close (fd);
4258 return 0;
4259 }
4260
4261 no_proc:
4262 /* Read all the longwords */
4263 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4264 {
4265 errno = 0;
4266 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4267 about coercing an 8 byte integer to a 4 byte pointer. */
4268 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4269 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4270 if (errno)
4271 return errno;
4272 }
4273
4274 /* Copy appropriate bytes out of the buffer. */
4275 memcpy (myaddr,
4276 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4277 len);
4278
4279 return 0;
4280 }
4281
4282 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4283 memory at MEMADDR. On failure (cannot write to the inferior)
4284 returns the value of errno. */
4285
4286 static int
4287 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4288 {
4289 register int i;
4290 /* Round starting address down to longword boundary. */
4291 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4292 /* Round ending address up; get number of longwords that makes. */
4293 register int count
4294 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4295 / sizeof (PTRACE_XFER_TYPE);
4296
4297 /* Allocate buffer of that many longwords. */
4298 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4299 alloca (count * sizeof (PTRACE_XFER_TYPE));
4300
4301 int pid = lwpid_of (get_thread_lwp (current_inferior));
4302
4303 if (debug_threads)
4304 {
4305 /* Dump up to four bytes. */
4306 unsigned int val = * (unsigned int *) myaddr;
4307 if (len == 1)
4308 val = val & 0xff;
4309 else if (len == 2)
4310 val = val & 0xffff;
4311 else if (len == 3)
4312 val = val & 0xffffff;
4313 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4314 val, (long)memaddr);
4315 }
4316
4317 /* Fill start and end extra bytes of buffer with existing memory data. */
4318
4319 errno = 0;
4320 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4321 about coercing an 8 byte integer to a 4 byte pointer. */
4322 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4323 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4324 if (errno)
4325 return errno;
4326
4327 if (count > 1)
4328 {
4329 errno = 0;
4330 buffer[count - 1]
4331 = ptrace (PTRACE_PEEKTEXT, pid,
4332 /* Coerce to a uintptr_t first to avoid potential gcc warning
4333 about coercing an 8 byte integer to a 4 byte pointer. */
4334 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4335 * sizeof (PTRACE_XFER_TYPE)),
4336 0);
4337 if (errno)
4338 return errno;
4339 }
4340
4341 /* Copy data to be written over corresponding part of buffer. */
4342
4343 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4344 myaddr, len);
4345
4346 /* Write the entire buffer. */
4347
4348 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4349 {
4350 errno = 0;
4351 ptrace (PTRACE_POKETEXT, pid,
4352 /* Coerce to a uintptr_t first to avoid potential gcc warning
4353 about coercing an 8 byte integer to a 4 byte pointer. */
4354 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4355 (PTRACE_ARG4_TYPE) buffer[i]);
4356 if (errno)
4357 return errno;
4358 }
4359
4360 return 0;
4361 }
4362
4363 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4364 static int linux_supports_tracefork_flag;
4365
4366 static void
4367 linux_enable_event_reporting (int pid)
4368 {
4369 if (!linux_supports_tracefork_flag)
4370 return;
4371
4372 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4373 }
4374
4375 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4376
4377 static int
4378 linux_tracefork_grandchild (void *arg)
4379 {
4380 _exit (0);
4381 }
4382
4383 #define STACK_SIZE 4096
4384
4385 static int
4386 linux_tracefork_child (void *arg)
4387 {
4388 ptrace (PTRACE_TRACEME, 0, 0, 0);
4389 kill (getpid (), SIGSTOP);
4390
4391 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4392
4393 if (fork () == 0)
4394 linux_tracefork_grandchild (NULL);
4395
4396 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4397
4398 #ifdef __ia64__
4399 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4400 CLONE_VM | SIGCHLD, NULL);
4401 #else
4402 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4403 CLONE_VM | SIGCHLD, NULL);
4404 #endif
4405
4406 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4407
4408 _exit (0);
4409 }
4410
4411 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4412 sure that we can enable the option, and that it had the desired
4413 effect. */
4414
4415 static void
4416 linux_test_for_tracefork (void)
4417 {
4418 int child_pid, ret, status;
4419 long second_pid;
4420 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4421 char *stack = xmalloc (STACK_SIZE * 4);
4422 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4423
4424 linux_supports_tracefork_flag = 0;
4425
4426 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4427
4428 child_pid = fork ();
4429 if (child_pid == 0)
4430 linux_tracefork_child (NULL);
4431
4432 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4433
4434 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4435 #ifdef __ia64__
4436 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4437 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4438 #else /* !__ia64__ */
4439 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4440 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4441 #endif /* !__ia64__ */
4442
4443 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4444
4445 if (child_pid == -1)
4446 perror_with_name ("clone");
4447
4448 ret = my_waitpid (child_pid, &status, 0);
4449 if (ret == -1)
4450 perror_with_name ("waitpid");
4451 else if (ret != child_pid)
4452 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4453 if (! WIFSTOPPED (status))
4454 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4455
4456 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4457 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4458 if (ret != 0)
4459 {
4460 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4461 if (ret != 0)
4462 {
4463 warning ("linux_test_for_tracefork: failed to kill child");
4464 return;
4465 }
4466
4467 ret = my_waitpid (child_pid, &status, 0);
4468 if (ret != child_pid)
4469 warning ("linux_test_for_tracefork: failed to wait for killed child");
4470 else if (!WIFSIGNALED (status))
4471 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4472 "killed child", status);
4473
4474 return;
4475 }
4476
4477 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4478 if (ret != 0)
4479 warning ("linux_test_for_tracefork: failed to resume child");
4480
4481 ret = my_waitpid (child_pid, &status, 0);
4482
4483 if (ret == child_pid && WIFSTOPPED (status)
4484 && status >> 16 == PTRACE_EVENT_FORK)
4485 {
4486 second_pid = 0;
4487 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4488 if (ret == 0 && second_pid != 0)
4489 {
4490 int second_status;
4491
4492 linux_supports_tracefork_flag = 1;
4493 my_waitpid (second_pid, &second_status, 0);
4494 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4495 if (ret != 0)
4496 warning ("linux_test_for_tracefork: failed to kill second child");
4497 my_waitpid (second_pid, &status, 0);
4498 }
4499 }
4500 else
4501 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4502 "(%d, status 0x%x)", ret, status);
4503
4504 do
4505 {
4506 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4507 if (ret != 0)
4508 warning ("linux_test_for_tracefork: failed to kill child");
4509 my_waitpid (child_pid, &status, 0);
4510 }
4511 while (WIFSTOPPED (status));
4512
4513 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4514 free (stack);
4515 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4516 }
4517
4518
4519 static void
4520 linux_look_up_symbols (void)
4521 {
4522 #ifdef USE_THREAD_DB
4523 struct process_info *proc = current_process ();
4524
4525 if (proc->private->thread_db != NULL)
4526 return;
4527
4528 /* If the kernel supports tracing forks then it also supports tracing
4529 clones, and then we don't need to use the magic thread event breakpoint
4530 to learn about threads. */
4531 thread_db_init (!linux_supports_tracefork_flag);
4532 #endif
4533 }
4534
4535 static void
4536 linux_request_interrupt (void)
4537 {
4538 extern unsigned long signal_pid;
4539
4540 if (!ptid_equal (cont_thread, null_ptid)
4541 && !ptid_equal (cont_thread, minus_one_ptid))
4542 {
4543 struct lwp_info *lwp;
4544 int lwpid;
4545
4546 lwp = get_thread_lwp (current_inferior);
4547 lwpid = lwpid_of (lwp);
4548 kill_lwp (lwpid, SIGINT);
4549 }
4550 else
4551 kill_lwp (signal_pid, SIGINT);
4552 }
4553
4554 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4555 to debugger memory starting at MYADDR. */
4556
4557 static int
4558 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4559 {
4560 char filename[PATH_MAX];
4561 int fd, n;
4562 int pid = lwpid_of (get_thread_lwp (current_inferior));
4563
4564 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4565
4566 fd = open (filename, O_RDONLY);
4567 if (fd < 0)
4568 return -1;
4569
4570 if (offset != (CORE_ADDR) 0
4571 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4572 n = -1;
4573 else
4574 n = read (fd, myaddr, len);
4575
4576 close (fd);
4577
4578 return n;
4579 }
4580
4581 /* These breakpoint and watchpoint related wrapper functions simply
4582 pass on the function call if the target has registered a
4583 corresponding function. */
4584
4585 static int
4586 linux_insert_point (char type, CORE_ADDR addr, int len)
4587 {
4588 if (the_low_target.insert_point != NULL)
4589 return the_low_target.insert_point (type, addr, len);
4590 else
4591 /* Unsupported (see target.h). */
4592 return 1;
4593 }
4594
4595 static int
4596 linux_remove_point (char type, CORE_ADDR addr, int len)
4597 {
4598 if (the_low_target.remove_point != NULL)
4599 return the_low_target.remove_point (type, addr, len);
4600 else
4601 /* Unsupported (see target.h). */
4602 return 1;
4603 }
4604
4605 static int
4606 linux_stopped_by_watchpoint (void)
4607 {
4608 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4609
4610 return lwp->stopped_by_watchpoint;
4611 }
4612
4613 static CORE_ADDR
4614 linux_stopped_data_address (void)
4615 {
4616 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4617
4618 return lwp->stopped_data_address;
4619 }
4620
4621 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4622 #if defined(__mcoldfire__)
4623 /* These should really be defined in the kernel's ptrace.h header. */
4624 #define PT_TEXT_ADDR 49*4
4625 #define PT_DATA_ADDR 50*4
4626 #define PT_TEXT_END_ADDR 51*4
4627 #elif defined(BFIN)
4628 #define PT_TEXT_ADDR 220
4629 #define PT_TEXT_END_ADDR 224
4630 #define PT_DATA_ADDR 228
4631 #elif defined(__TMS320C6X__)
4632 #define PT_TEXT_ADDR (0x10000*4)
4633 #define PT_DATA_ADDR (0x10004*4)
4634 #define PT_TEXT_END_ADDR (0x10008*4)
4635 #endif
4636
4637 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4638 to tell gdb about. */
4639
4640 static int
4641 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4642 {
4643 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4644 unsigned long text, text_end, data;
4645 int pid = lwpid_of (get_thread_lwp (current_inferior));
4646
4647 errno = 0;
4648
4649 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4650 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4651 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4652
4653 if (errno == 0)
4654 {
4655 /* Both text and data offsets produced at compile-time (and so
4656 used by gdb) are relative to the beginning of the program,
4657 with the data segment immediately following the text segment.
4658 However, the actual runtime layout in memory may put the data
4659 somewhere else, so when we send gdb a data base-address, we
4660 use the real data base address and subtract the compile-time
4661 data base-address from it (which is just the length of the
4662 text segment). BSS immediately follows data in both
4663 cases. */
4664 *text_p = text;
4665 *data_p = data - (text_end - text);
4666
4667 return 1;
4668 }
4669 #endif
4670 return 0;
4671 }
4672 #endif
4673
4674 static int
4675 linux_qxfer_osdata (const char *annex,
4676 unsigned char *readbuf, unsigned const char *writebuf,
4677 CORE_ADDR offset, int len)
4678 {
4679 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4680 }
4681
4682 /* Convert a native/host siginfo object, into/from the siginfo in the
4683 layout of the inferiors' architecture. */
4684
4685 static void
4686 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4687 {
4688 int done = 0;
4689
4690 if (the_low_target.siginfo_fixup != NULL)
4691 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4692
4693 /* If there was no callback, or the callback didn't do anything,
4694 then just do a straight memcpy. */
4695 if (!done)
4696 {
4697 if (direction == 1)
4698 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4699 else
4700 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4701 }
4702 }
4703
4704 static int
4705 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4706 unsigned const char *writebuf, CORE_ADDR offset, int len)
4707 {
4708 int pid;
4709 struct siginfo siginfo;
4710 char inf_siginfo[sizeof (struct siginfo)];
4711
4712 if (current_inferior == NULL)
4713 return -1;
4714
4715 pid = lwpid_of (get_thread_lwp (current_inferior));
4716
4717 if (debug_threads)
4718 fprintf (stderr, "%s siginfo for lwp %d.\n",
4719 readbuf != NULL ? "Reading" : "Writing",
4720 pid);
4721
4722 if (offset >= sizeof (siginfo))
4723 return -1;
4724
4725 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4726 return -1;
4727
4728 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4729 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4730 inferior with a 64-bit GDBSERVER should look the same as debugging it
4731 with a 32-bit GDBSERVER, we need to convert it. */
4732 siginfo_fixup (&siginfo, inf_siginfo, 0);
4733
4734 if (offset + len > sizeof (siginfo))
4735 len = sizeof (siginfo) - offset;
4736
4737 if (readbuf != NULL)
4738 memcpy (readbuf, inf_siginfo + offset, len);
4739 else
4740 {
4741 memcpy (inf_siginfo + offset, writebuf, len);
4742
4743 /* Convert back to ptrace layout before flushing it out. */
4744 siginfo_fixup (&siginfo, inf_siginfo, 1);
4745
4746 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4747 return -1;
4748 }
4749
4750 return len;
4751 }
4752
4753 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4754 so we notice when children change state; as the handler for the
4755 sigsuspend in my_waitpid. */
4756
4757 static void
4758 sigchld_handler (int signo)
4759 {
4760 int old_errno = errno;
4761
4762 if (debug_threads)
4763 {
4764 do
4765 {
4766 /* fprintf is not async-signal-safe, so call write
4767 directly. */
4768 if (write (2, "sigchld_handler\n",
4769 sizeof ("sigchld_handler\n") - 1) < 0)
4770 break; /* just ignore */
4771 } while (0);
4772 }
4773
4774 if (target_is_async_p ())
4775 async_file_mark (); /* trigger a linux_wait */
4776
4777 errno = old_errno;
4778 }
4779
4780 static int
4781 linux_supports_non_stop (void)
4782 {
4783 return 1;
4784 }
4785
4786 static int
4787 linux_async (int enable)
4788 {
4789 int previous = (linux_event_pipe[0] != -1);
4790
4791 if (debug_threads)
4792 fprintf (stderr, "linux_async (%d), previous=%d\n",
4793 enable, previous);
4794
4795 if (previous != enable)
4796 {
4797 sigset_t mask;
4798 sigemptyset (&mask);
4799 sigaddset (&mask, SIGCHLD);
4800
4801 sigprocmask (SIG_BLOCK, &mask, NULL);
4802
4803 if (enable)
4804 {
4805 if (pipe (linux_event_pipe) == -1)
4806 fatal ("creating event pipe failed.");
4807
4808 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4809 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4810
4811 /* Register the event loop handler. */
4812 add_file_handler (linux_event_pipe[0],
4813 handle_target_event, NULL);
4814
4815 /* Always trigger a linux_wait. */
4816 async_file_mark ();
4817 }
4818 else
4819 {
4820 delete_file_handler (linux_event_pipe[0]);
4821
4822 close (linux_event_pipe[0]);
4823 close (linux_event_pipe[1]);
4824 linux_event_pipe[0] = -1;
4825 linux_event_pipe[1] = -1;
4826 }
4827
4828 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4829 }
4830
4831 return previous;
4832 }
4833
4834 static int
4835 linux_start_non_stop (int nonstop)
4836 {
4837 /* Register or unregister from event-loop accordingly. */
4838 linux_async (nonstop);
4839 return 0;
4840 }
4841
4842 static int
4843 linux_supports_multi_process (void)
4844 {
4845 return 1;
4846 }
4847
4848 static int
4849 linux_supports_disable_randomization (void)
4850 {
4851 #ifdef HAVE_PERSONALITY
4852 return 1;
4853 #else
4854 return 0;
4855 #endif
4856 }
4857
4858 static int
4859 linux_supports_agent (void)
4860 {
4861 return 1;
4862 }
4863
4864 /* Enumerate spufs IDs for process PID. */
4865 static int
4866 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4867 {
4868 int pos = 0;
4869 int written = 0;
4870 char path[128];
4871 DIR *dir;
4872 struct dirent *entry;
4873
4874 sprintf (path, "/proc/%ld/fd", pid);
4875 dir = opendir (path);
4876 if (!dir)
4877 return -1;
4878
4879 rewinddir (dir);
4880 while ((entry = readdir (dir)) != NULL)
4881 {
4882 struct stat st;
4883 struct statfs stfs;
4884 int fd;
4885
4886 fd = atoi (entry->d_name);
4887 if (!fd)
4888 continue;
4889
4890 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4891 if (stat (path, &st) != 0)
4892 continue;
4893 if (!S_ISDIR (st.st_mode))
4894 continue;
4895
4896 if (statfs (path, &stfs) != 0)
4897 continue;
4898 if (stfs.f_type != SPUFS_MAGIC)
4899 continue;
4900
4901 if (pos >= offset && pos + 4 <= offset + len)
4902 {
4903 *(unsigned int *)(buf + pos - offset) = fd;
4904 written += 4;
4905 }
4906 pos += 4;
4907 }
4908
4909 closedir (dir);
4910 return written;
4911 }
4912
4913 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4914 object type, using the /proc file system. */
4915 static int
4916 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4917 unsigned const char *writebuf,
4918 CORE_ADDR offset, int len)
4919 {
4920 long pid = lwpid_of (get_thread_lwp (current_inferior));
4921 char buf[128];
4922 int fd = 0;
4923 int ret = 0;
4924
4925 if (!writebuf && !readbuf)
4926 return -1;
4927
4928 if (!*annex)
4929 {
4930 if (!readbuf)
4931 return -1;
4932 else
4933 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4934 }
4935
4936 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4937 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4938 if (fd <= 0)
4939 return -1;
4940
4941 if (offset != 0
4942 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4943 {
4944 close (fd);
4945 return 0;
4946 }
4947
4948 if (writebuf)
4949 ret = write (fd, writebuf, (size_t) len);
4950 else
4951 ret = read (fd, readbuf, (size_t) len);
4952
4953 close (fd);
4954 return ret;
4955 }
4956
4957 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
4958 struct target_loadseg
4959 {
4960 /* Core address to which the segment is mapped. */
4961 Elf32_Addr addr;
4962 /* VMA recorded in the program header. */
4963 Elf32_Addr p_vaddr;
4964 /* Size of this segment in memory. */
4965 Elf32_Word p_memsz;
4966 };
4967
4968 # if defined PT_GETDSBT
4969 struct target_loadmap
4970 {
4971 /* Protocol version number, must be zero. */
4972 Elf32_Word version;
4973 /* Pointer to the DSBT table, its size, and the DSBT index. */
4974 unsigned *dsbt_table;
4975 unsigned dsbt_size, dsbt_index;
4976 /* Number of segments in this map. */
4977 Elf32_Word nsegs;
4978 /* The actual memory map. */
4979 struct target_loadseg segs[/*nsegs*/];
4980 };
4981 # define LINUX_LOADMAP PT_GETDSBT
4982 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
4983 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
4984 # else
4985 struct target_loadmap
4986 {
4987 /* Protocol version number, must be zero. */
4988 Elf32_Half version;
4989 /* Number of segments in this map. */
4990 Elf32_Half nsegs;
4991 /* The actual memory map. */
4992 struct target_loadseg segs[/*nsegs*/];
4993 };
4994 # define LINUX_LOADMAP PTRACE_GETFDPIC
4995 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
4996 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
4997 # endif
4998
4999 static int
5000 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5001 unsigned char *myaddr, unsigned int len)
5002 {
5003 int pid = lwpid_of (get_thread_lwp (current_inferior));
5004 int addr = -1;
5005 struct target_loadmap *data = NULL;
5006 unsigned int actual_length, copy_length;
5007
5008 if (strcmp (annex, "exec") == 0)
5009 addr = (int) LINUX_LOADMAP_EXEC;
5010 else if (strcmp (annex, "interp") == 0)
5011 addr = (int) LINUX_LOADMAP_INTERP;
5012 else
5013 return -1;
5014
5015 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5016 return -1;
5017
5018 if (data == NULL)
5019 return -1;
5020
5021 actual_length = sizeof (struct target_loadmap)
5022 + sizeof (struct target_loadseg) * data->nsegs;
5023
5024 if (offset < 0 || offset > actual_length)
5025 return -1;
5026
5027 copy_length = actual_length - offset < len ? actual_length - offset : len;
5028 memcpy (myaddr, (char *) data + offset, copy_length);
5029 return copy_length;
5030 }
5031 #else
5032 # define linux_read_loadmap NULL
5033 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5034
5035 static void
5036 linux_process_qsupported (const char *query)
5037 {
5038 if (the_low_target.process_qsupported != NULL)
5039 the_low_target.process_qsupported (query);
5040 }
5041
5042 static int
5043 linux_supports_tracepoints (void)
5044 {
5045 if (*the_low_target.supports_tracepoints == NULL)
5046 return 0;
5047
5048 return (*the_low_target.supports_tracepoints) ();
5049 }
5050
5051 static CORE_ADDR
5052 linux_read_pc (struct regcache *regcache)
5053 {
5054 if (the_low_target.get_pc == NULL)
5055 return 0;
5056
5057 return (*the_low_target.get_pc) (regcache);
5058 }
5059
5060 static void
5061 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5062 {
5063 gdb_assert (the_low_target.set_pc != NULL);
5064
5065 (*the_low_target.set_pc) (regcache, pc);
5066 }
5067
5068 static int
5069 linux_thread_stopped (struct thread_info *thread)
5070 {
5071 return get_thread_lwp (thread)->stopped;
5072 }
5073
5074 /* This exposes stop-all-threads functionality to other modules. */
5075
5076 static void
5077 linux_pause_all (int freeze)
5078 {
5079 stop_all_lwps (freeze, NULL);
5080 }
5081
5082 /* This exposes unstop-all-threads functionality to other gdbserver
5083 modules. */
5084
5085 static void
5086 linux_unpause_all (int unfreeze)
5087 {
5088 unstop_all_lwps (unfreeze, NULL);
5089 }
5090
5091 static int
5092 linux_prepare_to_access_memory (void)
5093 {
5094 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5095 running LWP. */
5096 if (non_stop)
5097 linux_pause_all (1);
5098 return 0;
5099 }
5100
5101 static void
5102 linux_done_accessing_memory (void)
5103 {
5104 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5105 running LWP. */
5106 if (non_stop)
5107 linux_unpause_all (1);
5108 }
5109
5110 static int
5111 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5112 CORE_ADDR collector,
5113 CORE_ADDR lockaddr,
5114 ULONGEST orig_size,
5115 CORE_ADDR *jump_entry,
5116 CORE_ADDR *trampoline,
5117 ULONGEST *trampoline_size,
5118 unsigned char *jjump_pad_insn,
5119 ULONGEST *jjump_pad_insn_size,
5120 CORE_ADDR *adjusted_insn_addr,
5121 CORE_ADDR *adjusted_insn_addr_end,
5122 char *err)
5123 {
5124 return (*the_low_target.install_fast_tracepoint_jump_pad)
5125 (tpoint, tpaddr, collector, lockaddr, orig_size,
5126 jump_entry, trampoline, trampoline_size,
5127 jjump_pad_insn, jjump_pad_insn_size,
5128 adjusted_insn_addr, adjusted_insn_addr_end,
5129 err);
5130 }
5131
5132 static struct emit_ops *
5133 linux_emit_ops (void)
5134 {
5135 if (the_low_target.emit_ops != NULL)
5136 return (*the_low_target.emit_ops) ();
5137 else
5138 return NULL;
5139 }
5140
5141 static int
5142 linux_get_min_fast_tracepoint_insn_len (void)
5143 {
5144 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5145 }
5146
5147 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5148
5149 static int
5150 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5151 CORE_ADDR *phdr_memaddr, int *num_phdr)
5152 {
5153 char filename[PATH_MAX];
5154 int fd;
5155 const int auxv_size = is_elf64
5156 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5157 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5158
5159 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5160
5161 fd = open (filename, O_RDONLY);
5162 if (fd < 0)
5163 return 1;
5164
5165 *phdr_memaddr = 0;
5166 *num_phdr = 0;
5167 while (read (fd, buf, auxv_size) == auxv_size
5168 && (*phdr_memaddr == 0 || *num_phdr == 0))
5169 {
5170 if (is_elf64)
5171 {
5172 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5173
5174 switch (aux->a_type)
5175 {
5176 case AT_PHDR:
5177 *phdr_memaddr = aux->a_un.a_val;
5178 break;
5179 case AT_PHNUM:
5180 *num_phdr = aux->a_un.a_val;
5181 break;
5182 }
5183 }
5184 else
5185 {
5186 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5187
5188 switch (aux->a_type)
5189 {
5190 case AT_PHDR:
5191 *phdr_memaddr = aux->a_un.a_val;
5192 break;
5193 case AT_PHNUM:
5194 *num_phdr = aux->a_un.a_val;
5195 break;
5196 }
5197 }
5198 }
5199
5200 close (fd);
5201
5202 if (*phdr_memaddr == 0 || *num_phdr == 0)
5203 {
5204 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5205 "phdr_memaddr = %ld, phdr_num = %d",
5206 (long) *phdr_memaddr, *num_phdr);
5207 return 2;
5208 }
5209
5210 return 0;
5211 }
5212
5213 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5214
5215 static CORE_ADDR
5216 get_dynamic (const int pid, const int is_elf64)
5217 {
5218 CORE_ADDR phdr_memaddr, relocation;
5219 int num_phdr, i;
5220 unsigned char *phdr_buf;
5221 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5222
5223 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5224 return 0;
5225
5226 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5227 phdr_buf = alloca (num_phdr * phdr_size);
5228
5229 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5230 return 0;
5231
5232 /* Compute relocation: it is expected to be 0 for "regular" executables,
5233 non-zero for PIE ones. */
5234 relocation = -1;
5235 for (i = 0; relocation == -1 && i < num_phdr; i++)
5236 if (is_elf64)
5237 {
5238 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5239
5240 if (p->p_type == PT_PHDR)
5241 relocation = phdr_memaddr - p->p_vaddr;
5242 }
5243 else
5244 {
5245 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5246
5247 if (p->p_type == PT_PHDR)
5248 relocation = phdr_memaddr - p->p_vaddr;
5249 }
5250
5251 if (relocation == -1)
5252 {
5253 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5254 any real world executables, including PIE executables, have always
5255 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5256 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5257 or present DT_DEBUG anyway (fpc binaries are statically linked).
5258
5259 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5260
5261 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5262
5263 return 0;
5264 }
5265
5266 for (i = 0; i < num_phdr; i++)
5267 {
5268 if (is_elf64)
5269 {
5270 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5271
5272 if (p->p_type == PT_DYNAMIC)
5273 return p->p_vaddr + relocation;
5274 }
5275 else
5276 {
5277 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5278
5279 if (p->p_type == PT_DYNAMIC)
5280 return p->p_vaddr + relocation;
5281 }
5282 }
5283
5284 return 0;
5285 }
5286
5287 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5288 can be 0 if the inferior does not yet have the library list initialized. */
5289
5290 static CORE_ADDR
5291 get_r_debug (const int pid, const int is_elf64)
5292 {
5293 CORE_ADDR dynamic_memaddr;
5294 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5295 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5296
5297 dynamic_memaddr = get_dynamic (pid, is_elf64);
5298 if (dynamic_memaddr == 0)
5299 return (CORE_ADDR) -1;
5300
5301 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5302 {
5303 if (is_elf64)
5304 {
5305 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5306
5307 if (dyn->d_tag == DT_DEBUG)
5308 return dyn->d_un.d_val;
5309
5310 if (dyn->d_tag == DT_NULL)
5311 break;
5312 }
5313 else
5314 {
5315 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5316
5317 if (dyn->d_tag == DT_DEBUG)
5318 return dyn->d_un.d_val;
5319
5320 if (dyn->d_tag == DT_NULL)
5321 break;
5322 }
5323
5324 dynamic_memaddr += dyn_size;
5325 }
5326
5327 return (CORE_ADDR) -1;
5328 }
5329
5330 /* Read one pointer from MEMADDR in the inferior. */
5331
5332 static int
5333 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5334 {
5335 *ptr = 0;
5336 return linux_read_memory (memaddr, (unsigned char *) ptr, ptr_size);
5337 }
5338
5339 struct link_map_offsets
5340 {
5341 /* Offset and size of r_debug.r_version. */
5342 int r_version_offset;
5343
5344 /* Offset and size of r_debug.r_map. */
5345 int r_map_offset;
5346
5347 /* Offset to l_addr field in struct link_map. */
5348 int l_addr_offset;
5349
5350 /* Offset to l_name field in struct link_map. */
5351 int l_name_offset;
5352
5353 /* Offset to l_ld field in struct link_map. */
5354 int l_ld_offset;
5355
5356 /* Offset to l_next field in struct link_map. */
5357 int l_next_offset;
5358
5359 /* Offset to l_prev field in struct link_map. */
5360 int l_prev_offset;
5361 };
5362
5363 /* Construct qXfer:libraries:read reply. */
5364
5365 static int
5366 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5367 unsigned const char *writebuf,
5368 CORE_ADDR offset, int len)
5369 {
5370 char *document;
5371 unsigned document_len;
5372 struct process_info_private *const priv = current_process ()->private;
5373 char filename[PATH_MAX];
5374 int pid, is_elf64;
5375
5376 static const struct link_map_offsets lmo_32bit_offsets =
5377 {
5378 0, /* r_version offset. */
5379 4, /* r_debug.r_map offset. */
5380 0, /* l_addr offset in link_map. */
5381 4, /* l_name offset in link_map. */
5382 8, /* l_ld offset in link_map. */
5383 12, /* l_next offset in link_map. */
5384 16 /* l_prev offset in link_map. */
5385 };
5386
5387 static const struct link_map_offsets lmo_64bit_offsets =
5388 {
5389 0, /* r_version offset. */
5390 8, /* r_debug.r_map offset. */
5391 0, /* l_addr offset in link_map. */
5392 8, /* l_name offset in link_map. */
5393 16, /* l_ld offset in link_map. */
5394 24, /* l_next offset in link_map. */
5395 32 /* l_prev offset in link_map. */
5396 };
5397 const struct link_map_offsets *lmo;
5398
5399 if (writebuf != NULL)
5400 return -2;
5401 if (readbuf == NULL)
5402 return -1;
5403
5404 pid = lwpid_of (get_thread_lwp (current_inferior));
5405 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5406 is_elf64 = elf_64_file_p (filename);
5407 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5408
5409 if (priv->r_debug == 0)
5410 priv->r_debug = get_r_debug (pid, is_elf64);
5411
5412 if (priv->r_debug == (CORE_ADDR) -1 || priv->r_debug == 0)
5413 {
5414 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5415 }
5416 else
5417 {
5418 int allocated = 1024;
5419 char *p;
5420 const int ptr_size = is_elf64 ? 8 : 4;
5421 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5422 int r_version, header_done = 0;
5423
5424 document = xmalloc (allocated);
5425 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5426 p = document + strlen (document);
5427
5428 r_version = 0;
5429 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5430 (unsigned char *) &r_version,
5431 sizeof (r_version)) != 0
5432 || r_version != 1)
5433 {
5434 warning ("unexpected r_debug version %d", r_version);
5435 goto done;
5436 }
5437
5438 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5439 &lm_addr, ptr_size) != 0)
5440 {
5441 warning ("unable to read r_map from 0x%lx",
5442 (long) priv->r_debug + lmo->r_map_offset);
5443 goto done;
5444 }
5445
5446 lm_prev = 0;
5447 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5448 &l_name, ptr_size) == 0
5449 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5450 &l_addr, ptr_size) == 0
5451 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5452 &l_ld, ptr_size) == 0
5453 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5454 &l_prev, ptr_size) == 0
5455 && read_one_ptr (lm_addr + lmo->l_next_offset,
5456 &l_next, ptr_size) == 0)
5457 {
5458 unsigned char libname[PATH_MAX];
5459
5460 if (lm_prev != l_prev)
5461 {
5462 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5463 (long) lm_prev, (long) l_prev);
5464 break;
5465 }
5466
5467 /* Not checking for error because reading may stop before
5468 we've got PATH_MAX worth of characters. */
5469 libname[0] = '\0';
5470 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5471 libname[sizeof (libname) - 1] = '\0';
5472 if (libname[0] != '\0')
5473 {
5474 /* 6x the size for xml_escape_text below. */
5475 size_t len = 6 * strlen ((char *) libname);
5476 char *name;
5477
5478 if (!header_done)
5479 {
5480 /* Terminate `<library-list-svr4'. */
5481 *p++ = '>';
5482 header_done = 1;
5483 }
5484
5485 while (allocated < p - document + len + 200)
5486 {
5487 /* Expand to guarantee sufficient storage. */
5488 uintptr_t document_len = p - document;
5489
5490 document = xrealloc (document, 2 * allocated);
5491 allocated *= 2;
5492 p = document + document_len;
5493 }
5494
5495 name = xml_escape_text ((char *) libname);
5496 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5497 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5498 name, (unsigned long) lm_addr,
5499 (unsigned long) l_addr, (unsigned long) l_ld);
5500 free (name);
5501 }
5502 else if (lm_prev == 0)
5503 {
5504 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5505 p = p + strlen (p);
5506 }
5507
5508 if (l_next == 0)
5509 break;
5510
5511 lm_prev = lm_addr;
5512 lm_addr = l_next;
5513 }
5514 done:
5515 strcpy (p, "</library-list-svr4>");
5516 }
5517
5518 document_len = strlen (document);
5519 if (offset < document_len)
5520 document_len -= offset;
5521 else
5522 document_len = 0;
5523 if (len > document_len)
5524 len = document_len;
5525
5526 memcpy (readbuf, document + offset, len);
5527 xfree (document);
5528
5529 return len;
5530 }
5531
5532 static struct target_ops linux_target_ops = {
5533 linux_create_inferior,
5534 linux_attach,
5535 linux_kill,
5536 linux_detach,
5537 linux_mourn,
5538 linux_join,
5539 linux_thread_alive,
5540 linux_resume,
5541 linux_wait,
5542 linux_fetch_registers,
5543 linux_store_registers,
5544 linux_prepare_to_access_memory,
5545 linux_done_accessing_memory,
5546 linux_read_memory,
5547 linux_write_memory,
5548 linux_look_up_symbols,
5549 linux_request_interrupt,
5550 linux_read_auxv,
5551 linux_insert_point,
5552 linux_remove_point,
5553 linux_stopped_by_watchpoint,
5554 linux_stopped_data_address,
5555 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5556 linux_read_offsets,
5557 #else
5558 NULL,
5559 #endif
5560 #ifdef USE_THREAD_DB
5561 thread_db_get_tls_address,
5562 #else
5563 NULL,
5564 #endif
5565 linux_qxfer_spu,
5566 hostio_last_error_from_errno,
5567 linux_qxfer_osdata,
5568 linux_xfer_siginfo,
5569 linux_supports_non_stop,
5570 linux_async,
5571 linux_start_non_stop,
5572 linux_supports_multi_process,
5573 #ifdef USE_THREAD_DB
5574 thread_db_handle_monitor_command,
5575 #else
5576 NULL,
5577 #endif
5578 linux_common_core_of_thread,
5579 linux_read_loadmap,
5580 linux_process_qsupported,
5581 linux_supports_tracepoints,
5582 linux_read_pc,
5583 linux_write_pc,
5584 linux_thread_stopped,
5585 NULL,
5586 linux_pause_all,
5587 linux_unpause_all,
5588 linux_cancel_breakpoints,
5589 linux_stabilize_threads,
5590 linux_install_fast_tracepoint_jump_pad,
5591 linux_emit_ops,
5592 linux_supports_disable_randomization,
5593 linux_get_min_fast_tracepoint_insn_len,
5594 linux_qxfer_libraries_svr4,
5595 linux_supports_agent,
5596 };
5597
5598 static void
5599 linux_init_signals ()
5600 {
5601 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5602 to find what the cancel signal actually is. */
5603 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5604 signal (__SIGRTMIN+1, SIG_IGN);
5605 #endif
5606 }
5607
5608 void
5609 initialize_low (void)
5610 {
5611 struct sigaction sigchld_action;
5612 memset (&sigchld_action, 0, sizeof (sigchld_action));
5613 set_target_ops (&linux_target_ops);
5614 set_breakpoint_data (the_low_target.breakpoint,
5615 the_low_target.breakpoint_len);
5616 linux_init_signals ();
5617 linux_test_for_tracefork ();
5618 #ifdef HAVE_LINUX_REGSETS
5619 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5620 ;
5621 disabled_regsets = xmalloc (num_regsets);
5622 #endif
5623
5624 sigchld_action.sa_handler = sigchld_handler;
5625 sigemptyset (&sigchld_action.sa_mask);
5626 sigchld_action.sa_flags = SA_RESTART;
5627 sigaction (SIGCHLD, &sigchld_action, NULL);
5628 }