]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
2012-03-02 Pedro Alves <palves@redhat.com>
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include "linux-ptrace.h"
28 #include "linux-procfs.h"
29 #include <signal.h>
30 #include <sys/ioctl.h>
31 #include <fcntl.h>
32 #include <string.h>
33 #include <stdlib.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <sys/syscall.h>
37 #include <sched.h>
38 #include <ctype.h>
39 #include <pwd.h>
40 #include <sys/types.h>
41 #include <dirent.h>
42 #include <sys/stat.h>
43 #include <sys/vfs.h>
44 #include <sys/uio.h>
45 #ifndef ELFMAG0
46 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
47 then ELFMAG0 will have been defined. If it didn't get included by
48 gdb_proc_service.h then including it will likely introduce a duplicate
49 definition of elf_fpregset_t. */
50 #include <elf.h>
51 #endif
52
53 #ifndef SPUFS_MAGIC
54 #define SPUFS_MAGIC 0x23c9b64e
55 #endif
56
57 #ifdef HAVE_PERSONALITY
58 # include <sys/personality.h>
59 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
60 # define ADDR_NO_RANDOMIZE 0x0040000
61 # endif
62 #endif
63
64 #ifndef O_LARGEFILE
65 #define O_LARGEFILE 0
66 #endif
67
68 #ifndef W_STOPCODE
69 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
70 #endif
71
72 /* This is the kernel's hard limit. Not to be confused with
73 SIGRTMIN. */
74 #ifndef __SIGRTMIN
75 #define __SIGRTMIN 32
76 #endif
77
78 #ifdef __UCLIBC__
79 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
80 #define HAS_NOMMU
81 #endif
82 #endif
83
84 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
85 representation of the thread ID.
86
87 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
88 the same as the LWP ID.
89
90 ``all_processes'' is keyed by the "overall process ID", which
91 GNU/Linux calls tgid, "thread group ID". */
92
93 struct inferior_list all_lwps;
94
95 /* A list of all unknown processes which receive stop signals. Some
96 other process will presumably claim each of these as forked
97 children momentarily. */
98
99 struct simple_pid_list
100 {
101 /* The process ID. */
102 int pid;
103
104 /* The status as reported by waitpid. */
105 int status;
106
107 /* Next in chain. */
108 struct simple_pid_list *next;
109 };
110 struct simple_pid_list *stopped_pids;
111
112 /* Trivial list manipulation functions to keep track of a list of new
113 stopped processes. */
114
115 static void
116 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
117 {
118 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
119
120 new_pid->pid = pid;
121 new_pid->status = status;
122 new_pid->next = *listp;
123 *listp = new_pid;
124 }
125
126 static int
127 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
128 {
129 struct simple_pid_list **p;
130
131 for (p = listp; *p != NULL; p = &(*p)->next)
132 if ((*p)->pid == pid)
133 {
134 struct simple_pid_list *next = (*p)->next;
135
136 *statusp = (*p)->status;
137 xfree (*p);
138 *p = next;
139 return 1;
140 }
141 return 0;
142 }
143
144 /* FIXME this is a bit of a hack, and could be removed. */
145 int stopping_threads;
146
147 /* FIXME make into a target method? */
148 int using_threads = 1;
149
150 /* True if we're presently stabilizing threads (moving them out of
151 jump pads). */
152 static int stabilizing_threads;
153
154 /* This flag is true iff we've just created or attached to our first
155 inferior but it has not stopped yet. As soon as it does, we need
156 to call the low target's arch_setup callback. Doing this only on
157 the first inferior avoids reinializing the architecture on every
158 inferior, and avoids messing with the register caches of the
159 already running inferiors. NOTE: this assumes all inferiors under
160 control of gdbserver have the same architecture. */
161 static int new_inferior;
162
163 static void linux_resume_one_lwp (struct lwp_info *lwp,
164 int step, int signal, siginfo_t *info);
165 static void linux_resume (struct thread_resume *resume_info, size_t n);
166 static void stop_all_lwps (int suspend, struct lwp_info *except);
167 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
168 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
169 static void *add_lwp (ptid_t ptid);
170 static int linux_stopped_by_watchpoint (void);
171 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
172 static void proceed_all_lwps (void);
173 static int finish_step_over (struct lwp_info *lwp);
174 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
175 static int kill_lwp (unsigned long lwpid, int signo);
176 static void linux_enable_event_reporting (int pid);
177
178 /* True if the low target can hardware single-step. Such targets
179 don't need a BREAKPOINT_REINSERT_ADDR callback. */
180
181 static int
182 can_hardware_single_step (void)
183 {
184 return (the_low_target.breakpoint_reinsert_addr == NULL);
185 }
186
187 /* True if the low target supports memory breakpoints. If so, we'll
188 have a GET_PC implementation. */
189
190 static int
191 supports_breakpoints (void)
192 {
193 return (the_low_target.get_pc != NULL);
194 }
195
196 /* Returns true if this target can support fast tracepoints. This
197 does not mean that the in-process agent has been loaded in the
198 inferior. */
199
200 static int
201 supports_fast_tracepoints (void)
202 {
203 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
204 }
205
206 struct pending_signals
207 {
208 int signal;
209 siginfo_t info;
210 struct pending_signals *prev;
211 };
212
213 #define PTRACE_ARG3_TYPE void *
214 #define PTRACE_ARG4_TYPE void *
215 #define PTRACE_XFER_TYPE long
216
217 #ifdef HAVE_LINUX_REGSETS
218 static char *disabled_regsets;
219 static int num_regsets;
220 #endif
221
222 /* The read/write ends of the pipe registered as waitable file in the
223 event loop. */
224 static int linux_event_pipe[2] = { -1, -1 };
225
226 /* True if we're currently in async mode. */
227 #define target_is_async_p() (linux_event_pipe[0] != -1)
228
229 static void send_sigstop (struct lwp_info *lwp);
230 static void wait_for_sigstop (struct inferior_list_entry *entry);
231
232 /* Return non-zero if HEADER is a 64-bit ELF file. */
233
234 static int
235 elf_64_header_p (const Elf64_Ehdr *header)
236 {
237 return (header->e_ident[EI_MAG0] == ELFMAG0
238 && header->e_ident[EI_MAG1] == ELFMAG1
239 && header->e_ident[EI_MAG2] == ELFMAG2
240 && header->e_ident[EI_MAG3] == ELFMAG3
241 && header->e_ident[EI_CLASS] == ELFCLASS64);
242 }
243
244 /* Return non-zero if FILE is a 64-bit ELF file,
245 zero if the file is not a 64-bit ELF file,
246 and -1 if the file is not accessible or doesn't exist. */
247
248 static int
249 elf_64_file_p (const char *file)
250 {
251 Elf64_Ehdr header;
252 int fd;
253
254 fd = open (file, O_RDONLY);
255 if (fd < 0)
256 return -1;
257
258 if (read (fd, &header, sizeof (header)) != sizeof (header))
259 {
260 close (fd);
261 return 0;
262 }
263 close (fd);
264
265 return elf_64_header_p (&header);
266 }
267
268 /* Accepts an integer PID; Returns true if the executable PID is
269 running is a 64-bit ELF file.. */
270
271 int
272 linux_pid_exe_is_elf_64_file (int pid)
273 {
274 char file[MAXPATHLEN];
275
276 sprintf (file, "/proc/%d/exe", pid);
277 return elf_64_file_p (file);
278 }
279
280 static void
281 delete_lwp (struct lwp_info *lwp)
282 {
283 remove_thread (get_lwp_thread (lwp));
284 remove_inferior (&all_lwps, &lwp->head);
285 free (lwp->arch_private);
286 free (lwp);
287 }
288
289 /* Add a process to the common process list, and set its private
290 data. */
291
292 static struct process_info *
293 linux_add_process (int pid, int attached)
294 {
295 struct process_info *proc;
296
297 /* Is this the first process? If so, then set the arch. */
298 if (all_processes.head == NULL)
299 new_inferior = 1;
300
301 proc = add_process (pid, attached);
302 proc->private = xcalloc (1, sizeof (*proc->private));
303
304 if (the_low_target.new_process != NULL)
305 proc->private->arch_private = the_low_target.new_process ();
306
307 return proc;
308 }
309
310 /* Wrapper function for waitpid which handles EINTR, and emulates
311 __WALL for systems where that is not available. */
312
313 static int
314 my_waitpid (int pid, int *status, int flags)
315 {
316 int ret, out_errno;
317
318 if (debug_threads)
319 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
320
321 if (flags & __WALL)
322 {
323 sigset_t block_mask, org_mask, wake_mask;
324 int wnohang;
325
326 wnohang = (flags & WNOHANG) != 0;
327 flags &= ~(__WALL | __WCLONE);
328 flags |= WNOHANG;
329
330 /* Block all signals while here. This avoids knowing about
331 LinuxThread's signals. */
332 sigfillset (&block_mask);
333 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
334
335 /* ... except during the sigsuspend below. */
336 sigemptyset (&wake_mask);
337
338 while (1)
339 {
340 /* Since all signals are blocked, there's no need to check
341 for EINTR here. */
342 ret = waitpid (pid, status, flags);
343 out_errno = errno;
344
345 if (ret == -1 && out_errno != ECHILD)
346 break;
347 else if (ret > 0)
348 break;
349
350 if (flags & __WCLONE)
351 {
352 /* We've tried both flavors now. If WNOHANG is set,
353 there's nothing else to do, just bail out. */
354 if (wnohang)
355 break;
356
357 if (debug_threads)
358 fprintf (stderr, "blocking\n");
359
360 /* Block waiting for signals. */
361 sigsuspend (&wake_mask);
362 }
363
364 flags ^= __WCLONE;
365 }
366
367 sigprocmask (SIG_SETMASK, &org_mask, NULL);
368 }
369 else
370 {
371 do
372 ret = waitpid (pid, status, flags);
373 while (ret == -1 && errno == EINTR);
374 out_errno = errno;
375 }
376
377 if (debug_threads)
378 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
379 pid, flags, status ? *status : -1, ret);
380
381 errno = out_errno;
382 return ret;
383 }
384
385 /* Handle a GNU/Linux extended wait response. If we see a clone
386 event, we need to add the new LWP to our list (and not report the
387 trap to higher layers). */
388
389 static void
390 handle_extended_wait (struct lwp_info *event_child, int wstat)
391 {
392 int event = wstat >> 16;
393 struct lwp_info *new_lwp;
394
395 if (event == PTRACE_EVENT_CLONE)
396 {
397 ptid_t ptid;
398 unsigned long new_pid;
399 int ret, status;
400
401 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
402
403 /* If we haven't already seen the new PID stop, wait for it now. */
404 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
405 {
406 /* The new child has a pending SIGSTOP. We can't affect it until it
407 hits the SIGSTOP, but we're already attached. */
408
409 ret = my_waitpid (new_pid, &status, __WALL);
410
411 if (ret == -1)
412 perror_with_name ("waiting for new child");
413 else if (ret != new_pid)
414 warning ("wait returned unexpected PID %d", ret);
415 else if (!WIFSTOPPED (status))
416 warning ("wait returned unexpected status 0x%x", status);
417 }
418
419 linux_enable_event_reporting (new_pid);
420
421 ptid = ptid_build (pid_of (event_child), new_pid, 0);
422 new_lwp = (struct lwp_info *) add_lwp (ptid);
423 add_thread (ptid, new_lwp);
424
425 /* Either we're going to immediately resume the new thread
426 or leave it stopped. linux_resume_one_lwp is a nop if it
427 thinks the thread is currently running, so set this first
428 before calling linux_resume_one_lwp. */
429 new_lwp->stopped = 1;
430
431 /* Normally we will get the pending SIGSTOP. But in some cases
432 we might get another signal delivered to the group first.
433 If we do get another signal, be sure not to lose it. */
434 if (WSTOPSIG (status) == SIGSTOP)
435 {
436 if (stopping_threads)
437 new_lwp->stop_pc = get_stop_pc (new_lwp);
438 else
439 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
440 }
441 else
442 {
443 new_lwp->stop_expected = 1;
444
445 if (stopping_threads)
446 {
447 new_lwp->stop_pc = get_stop_pc (new_lwp);
448 new_lwp->status_pending_p = 1;
449 new_lwp->status_pending = status;
450 }
451 else
452 /* Pass the signal on. This is what GDB does - except
453 shouldn't we really report it instead? */
454 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
455 }
456
457 /* Always resume the current thread. If we are stopping
458 threads, it will have a pending SIGSTOP; we may as well
459 collect it now. */
460 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
461 }
462 }
463
464 /* Return the PC as read from the regcache of LWP, without any
465 adjustment. */
466
467 static CORE_ADDR
468 get_pc (struct lwp_info *lwp)
469 {
470 struct thread_info *saved_inferior;
471 struct regcache *regcache;
472 CORE_ADDR pc;
473
474 if (the_low_target.get_pc == NULL)
475 return 0;
476
477 saved_inferior = current_inferior;
478 current_inferior = get_lwp_thread (lwp);
479
480 regcache = get_thread_regcache (current_inferior, 1);
481 pc = (*the_low_target.get_pc) (regcache);
482
483 if (debug_threads)
484 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
485
486 current_inferior = saved_inferior;
487 return pc;
488 }
489
490 /* This function should only be called if LWP got a SIGTRAP.
491 The SIGTRAP could mean several things.
492
493 On i386, where decr_pc_after_break is non-zero:
494 If we were single-stepping this process using PTRACE_SINGLESTEP,
495 we will get only the one SIGTRAP (even if the instruction we
496 stepped over was a breakpoint). The value of $eip will be the
497 next instruction.
498 If we continue the process using PTRACE_CONT, we will get a
499 SIGTRAP when we hit a breakpoint. The value of $eip will be
500 the instruction after the breakpoint (i.e. needs to be
501 decremented). If we report the SIGTRAP to GDB, we must also
502 report the undecremented PC. If we cancel the SIGTRAP, we
503 must resume at the decremented PC.
504
505 (Presumably, not yet tested) On a non-decr_pc_after_break machine
506 with hardware or kernel single-step:
507 If we single-step over a breakpoint instruction, our PC will
508 point at the following instruction. If we continue and hit a
509 breakpoint instruction, our PC will point at the breakpoint
510 instruction. */
511
512 static CORE_ADDR
513 get_stop_pc (struct lwp_info *lwp)
514 {
515 CORE_ADDR stop_pc;
516
517 if (the_low_target.get_pc == NULL)
518 return 0;
519
520 stop_pc = get_pc (lwp);
521
522 if (WSTOPSIG (lwp->last_status) == SIGTRAP
523 && !lwp->stepping
524 && !lwp->stopped_by_watchpoint
525 && lwp->last_status >> 16 == 0)
526 stop_pc -= the_low_target.decr_pc_after_break;
527
528 if (debug_threads)
529 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
530
531 return stop_pc;
532 }
533
534 static void *
535 add_lwp (ptid_t ptid)
536 {
537 struct lwp_info *lwp;
538
539 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
540 memset (lwp, 0, sizeof (*lwp));
541
542 lwp->head.id = ptid;
543
544 if (the_low_target.new_thread != NULL)
545 lwp->arch_private = the_low_target.new_thread ();
546
547 add_inferior_to_list (&all_lwps, &lwp->head);
548
549 return lwp;
550 }
551
552 /* Start an inferior process and returns its pid.
553 ALLARGS is a vector of program-name and args. */
554
555 static int
556 linux_create_inferior (char *program, char **allargs)
557 {
558 #ifdef HAVE_PERSONALITY
559 int personality_orig = 0, personality_set = 0;
560 #endif
561 struct lwp_info *new_lwp;
562 int pid;
563 ptid_t ptid;
564
565 #ifdef HAVE_PERSONALITY
566 if (disable_randomization)
567 {
568 errno = 0;
569 personality_orig = personality (0xffffffff);
570 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
571 {
572 personality_set = 1;
573 personality (personality_orig | ADDR_NO_RANDOMIZE);
574 }
575 if (errno != 0 || (personality_set
576 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
577 warning ("Error disabling address space randomization: %s",
578 strerror (errno));
579 }
580 #endif
581
582 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
583 pid = vfork ();
584 #else
585 pid = fork ();
586 #endif
587 if (pid < 0)
588 perror_with_name ("fork");
589
590 if (pid == 0)
591 {
592 ptrace (PTRACE_TRACEME, 0, 0, 0);
593
594 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
595 signal (__SIGRTMIN + 1, SIG_DFL);
596 #endif
597
598 setpgid (0, 0);
599
600 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
601 stdout to stderr so that inferior i/o doesn't corrupt the connection.
602 Also, redirect stdin to /dev/null. */
603 if (remote_connection_is_stdio ())
604 {
605 close (0);
606 open ("/dev/null", O_RDONLY);
607 dup2 (2, 1);
608 if (write (2, "stdin/stdout redirected\n",
609 sizeof ("stdin/stdout redirected\n") - 1) < 0)
610 /* Errors ignored. */;
611 }
612
613 execv (program, allargs);
614 if (errno == ENOENT)
615 execvp (program, allargs);
616
617 fprintf (stderr, "Cannot exec %s: %s.\n", program,
618 strerror (errno));
619 fflush (stderr);
620 _exit (0177);
621 }
622
623 #ifdef HAVE_PERSONALITY
624 if (personality_set)
625 {
626 errno = 0;
627 personality (personality_orig);
628 if (errno != 0)
629 warning ("Error restoring address space randomization: %s",
630 strerror (errno));
631 }
632 #endif
633
634 linux_add_process (pid, 0);
635
636 ptid = ptid_build (pid, pid, 0);
637 new_lwp = add_lwp (ptid);
638 add_thread (ptid, new_lwp);
639 new_lwp->must_set_ptrace_flags = 1;
640
641 return pid;
642 }
643
644 /* Attach to an inferior process. */
645
646 static void
647 linux_attach_lwp_1 (unsigned long lwpid, int initial)
648 {
649 ptid_t ptid;
650 struct lwp_info *new_lwp;
651
652 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
653 {
654 if (!initial)
655 {
656 /* If we fail to attach to an LWP, just warn. */
657 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
658 strerror (errno), errno);
659 fflush (stderr);
660 return;
661 }
662 else
663 /* If we fail to attach to a process, report an error. */
664 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
665 strerror (errno), errno);
666 }
667
668 if (initial)
669 /* If lwp is the tgid, we handle adding existing threads later.
670 Otherwise we just add lwp without bothering about any other
671 threads. */
672 ptid = ptid_build (lwpid, lwpid, 0);
673 else
674 {
675 /* Note that extracting the pid from the current inferior is
676 safe, since we're always called in the context of the same
677 process as this new thread. */
678 int pid = pid_of (get_thread_lwp (current_inferior));
679 ptid = ptid_build (pid, lwpid, 0);
680 }
681
682 new_lwp = (struct lwp_info *) add_lwp (ptid);
683 add_thread (ptid, new_lwp);
684
685 /* We need to wait for SIGSTOP before being able to make the next
686 ptrace call on this LWP. */
687 new_lwp->must_set_ptrace_flags = 1;
688
689 if (linux_proc_pid_is_stopped (lwpid))
690 {
691 if (debug_threads)
692 fprintf (stderr,
693 "Attached to a stopped process\n");
694
695 /* The process is definitely stopped. It is in a job control
696 stop, unless the kernel predates the TASK_STOPPED /
697 TASK_TRACED distinction, in which case it might be in a
698 ptrace stop. Make sure it is in a ptrace stop; from there we
699 can kill it, signal it, et cetera.
700
701 First make sure there is a pending SIGSTOP. Since we are
702 already attached, the process can not transition from stopped
703 to running without a PTRACE_CONT; so we know this signal will
704 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
705 probably already in the queue (unless this kernel is old
706 enough to use TASK_STOPPED for ptrace stops); but since
707 SIGSTOP is not an RT signal, it can only be queued once. */
708 kill_lwp (lwpid, SIGSTOP);
709
710 /* Finally, resume the stopped process. This will deliver the
711 SIGSTOP (or a higher priority signal, just like normal
712 PTRACE_ATTACH), which we'll catch later on. */
713 ptrace (PTRACE_CONT, lwpid, 0, 0);
714 }
715
716 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
717 brings it to a halt.
718
719 There are several cases to consider here:
720
721 1) gdbserver has already attached to the process and is being notified
722 of a new thread that is being created.
723 In this case we should ignore that SIGSTOP and resume the
724 process. This is handled below by setting stop_expected = 1,
725 and the fact that add_thread sets last_resume_kind ==
726 resume_continue.
727
728 2) This is the first thread (the process thread), and we're attaching
729 to it via attach_inferior.
730 In this case we want the process thread to stop.
731 This is handled by having linux_attach set last_resume_kind ==
732 resume_stop after we return.
733
734 If the pid we are attaching to is also the tgid, we attach to and
735 stop all the existing threads. Otherwise, we attach to pid and
736 ignore any other threads in the same group as this pid.
737
738 3) GDB is connecting to gdbserver and is requesting an enumeration of all
739 existing threads.
740 In this case we want the thread to stop.
741 FIXME: This case is currently not properly handled.
742 We should wait for the SIGSTOP but don't. Things work apparently
743 because enough time passes between when we ptrace (ATTACH) and when
744 gdb makes the next ptrace call on the thread.
745
746 On the other hand, if we are currently trying to stop all threads, we
747 should treat the new thread as if we had sent it a SIGSTOP. This works
748 because we are guaranteed that the add_lwp call above added us to the
749 end of the list, and so the new thread has not yet reached
750 wait_for_sigstop (but will). */
751 new_lwp->stop_expected = 1;
752 }
753
754 void
755 linux_attach_lwp (unsigned long lwpid)
756 {
757 linux_attach_lwp_1 (lwpid, 0);
758 }
759
760 /* Attach to PID. If PID is the tgid, attach to it and all
761 of its threads. */
762
763 int
764 linux_attach (unsigned long pid)
765 {
766 /* Attach to PID. We will check for other threads
767 soon. */
768 linux_attach_lwp_1 (pid, 1);
769 linux_add_process (pid, 1);
770
771 if (!non_stop)
772 {
773 struct thread_info *thread;
774
775 /* Don't ignore the initial SIGSTOP if we just attached to this
776 process. It will be collected by wait shortly. */
777 thread = find_thread_ptid (ptid_build (pid, pid, 0));
778 thread->last_resume_kind = resume_stop;
779 }
780
781 if (linux_proc_get_tgid (pid) == pid)
782 {
783 DIR *dir;
784 char pathname[128];
785
786 sprintf (pathname, "/proc/%ld/task", pid);
787
788 dir = opendir (pathname);
789
790 if (!dir)
791 {
792 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
793 fflush (stderr);
794 }
795 else
796 {
797 /* At this point we attached to the tgid. Scan the task for
798 existing threads. */
799 unsigned long lwp;
800 int new_threads_found;
801 int iterations = 0;
802 struct dirent *dp;
803
804 while (iterations < 2)
805 {
806 new_threads_found = 0;
807 /* Add all the other threads. While we go through the
808 threads, new threads may be spawned. Cycle through
809 the list of threads until we have done two iterations without
810 finding new threads. */
811 while ((dp = readdir (dir)) != NULL)
812 {
813 /* Fetch one lwp. */
814 lwp = strtoul (dp->d_name, NULL, 10);
815
816 /* Is this a new thread? */
817 if (lwp
818 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
819 {
820 linux_attach_lwp_1 (lwp, 0);
821 new_threads_found++;
822
823 if (debug_threads)
824 fprintf (stderr, "\
825 Found and attached to new lwp %ld\n", lwp);
826 }
827 }
828
829 if (!new_threads_found)
830 iterations++;
831 else
832 iterations = 0;
833
834 rewinddir (dir);
835 }
836 closedir (dir);
837 }
838 }
839
840 return 0;
841 }
842
843 struct counter
844 {
845 int pid;
846 int count;
847 };
848
849 static int
850 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
851 {
852 struct counter *counter = args;
853
854 if (ptid_get_pid (entry->id) == counter->pid)
855 {
856 if (++counter->count > 1)
857 return 1;
858 }
859
860 return 0;
861 }
862
863 static int
864 last_thread_of_process_p (struct thread_info *thread)
865 {
866 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
867 int pid = ptid_get_pid (ptid);
868 struct counter counter = { pid , 0 };
869
870 return (find_inferior (&all_threads,
871 second_thread_of_pid_p, &counter) == NULL);
872 }
873
874 /* Kill LWP. */
875
876 static void
877 linux_kill_one_lwp (struct lwp_info *lwp)
878 {
879 int pid = lwpid_of (lwp);
880
881 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
882 there is no signal context, and ptrace(PTRACE_KILL) (or
883 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
884 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
885 alternative is to kill with SIGKILL. We only need one SIGKILL
886 per process, not one for each thread. But since we still support
887 linuxthreads, and we also support debugging programs using raw
888 clone without CLONE_THREAD, we send one for each thread. For
889 years, we used PTRACE_KILL only, so we're being a bit paranoid
890 about some old kernels where PTRACE_KILL might work better
891 (dubious if there are any such, but that's why it's paranoia), so
892 we try SIGKILL first, PTRACE_KILL second, and so we're fine
893 everywhere. */
894
895 errno = 0;
896 kill (pid, SIGKILL);
897 if (debug_threads)
898 fprintf (stderr,
899 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
900 target_pid_to_str (ptid_of (lwp)),
901 errno ? strerror (errno) : "OK");
902
903 errno = 0;
904 ptrace (PTRACE_KILL, pid, 0, 0);
905 if (debug_threads)
906 fprintf (stderr,
907 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
908 target_pid_to_str (ptid_of (lwp)),
909 errno ? strerror (errno) : "OK");
910 }
911
912 /* Callback for `find_inferior'. Kills an lwp of a given process,
913 except the leader. */
914
915 static int
916 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
917 {
918 struct thread_info *thread = (struct thread_info *) entry;
919 struct lwp_info *lwp = get_thread_lwp (thread);
920 int wstat;
921 int pid = * (int *) args;
922
923 if (ptid_get_pid (entry->id) != pid)
924 return 0;
925
926 /* We avoid killing the first thread here, because of a Linux kernel (at
927 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
928 the children get a chance to be reaped, it will remain a zombie
929 forever. */
930
931 if (lwpid_of (lwp) == pid)
932 {
933 if (debug_threads)
934 fprintf (stderr, "lkop: is last of process %s\n",
935 target_pid_to_str (entry->id));
936 return 0;
937 }
938
939 do
940 {
941 linux_kill_one_lwp (lwp);
942
943 /* Make sure it died. The loop is most likely unnecessary. */
944 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
945 } while (pid > 0 && WIFSTOPPED (wstat));
946
947 return 0;
948 }
949
950 static int
951 linux_kill (int pid)
952 {
953 struct process_info *process;
954 struct lwp_info *lwp;
955 int wstat;
956 int lwpid;
957
958 process = find_process_pid (pid);
959 if (process == NULL)
960 return -1;
961
962 /* If we're killing a running inferior, make sure it is stopped
963 first, as PTRACE_KILL will not work otherwise. */
964 stop_all_lwps (0, NULL);
965
966 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
967
968 /* See the comment in linux_kill_one_lwp. We did not kill the first
969 thread in the list, so do so now. */
970 lwp = find_lwp_pid (pid_to_ptid (pid));
971
972 if (lwp == NULL)
973 {
974 if (debug_threads)
975 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
976 lwpid_of (lwp), pid);
977 }
978 else
979 {
980 if (debug_threads)
981 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
982 lwpid_of (lwp), pid);
983
984 do
985 {
986 linux_kill_one_lwp (lwp);
987
988 /* Make sure it died. The loop is most likely unnecessary. */
989 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
990 } while (lwpid > 0 && WIFSTOPPED (wstat));
991 }
992
993 the_target->mourn (process);
994
995 /* Since we presently can only stop all lwps of all processes, we
996 need to unstop lwps of other processes. */
997 unstop_all_lwps (0, NULL);
998 return 0;
999 }
1000
1001 static int
1002 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1003 {
1004 struct thread_info *thread = (struct thread_info *) entry;
1005 struct lwp_info *lwp = get_thread_lwp (thread);
1006 int pid = * (int *) args;
1007
1008 if (ptid_get_pid (entry->id) != pid)
1009 return 0;
1010
1011 /* If this process is stopped but is expecting a SIGSTOP, then make
1012 sure we take care of that now. This isn't absolutely guaranteed
1013 to collect the SIGSTOP, but is fairly likely to. */
1014 if (lwp->stop_expected)
1015 {
1016 int wstat;
1017 /* Clear stop_expected, so that the SIGSTOP will be reported. */
1018 lwp->stop_expected = 0;
1019 linux_resume_one_lwp (lwp, 0, 0, NULL);
1020 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1021 }
1022
1023 /* Flush any pending changes to the process's registers. */
1024 regcache_invalidate_one ((struct inferior_list_entry *)
1025 get_lwp_thread (lwp));
1026
1027 /* Finally, let it resume. */
1028 if (the_low_target.prepare_to_resume != NULL)
1029 the_low_target.prepare_to_resume (lwp);
1030 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
1031
1032 delete_lwp (lwp);
1033 return 0;
1034 }
1035
1036 static int
1037 linux_detach (int pid)
1038 {
1039 struct process_info *process;
1040
1041 process = find_process_pid (pid);
1042 if (process == NULL)
1043 return -1;
1044
1045 /* Stop all threads before detaching. First, ptrace requires that
1046 the thread is stopped to sucessfully detach. Second, thread_db
1047 may need to uninstall thread event breakpoints from memory, which
1048 only works with a stopped process anyway. */
1049 stop_all_lwps (0, NULL);
1050
1051 #ifdef USE_THREAD_DB
1052 thread_db_detach (process);
1053 #endif
1054
1055 /* Stabilize threads (move out of jump pads). */
1056 stabilize_threads ();
1057
1058 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1059
1060 the_target->mourn (process);
1061
1062 /* Since we presently can only stop all lwps of all processes, we
1063 need to unstop lwps of other processes. */
1064 unstop_all_lwps (0, NULL);
1065 return 0;
1066 }
1067
1068 /* Remove all LWPs that belong to process PROC from the lwp list. */
1069
1070 static int
1071 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1072 {
1073 struct lwp_info *lwp = (struct lwp_info *) entry;
1074 struct process_info *process = proc;
1075
1076 if (pid_of (lwp) == pid_of (process))
1077 delete_lwp (lwp);
1078
1079 return 0;
1080 }
1081
1082 static void
1083 linux_mourn (struct process_info *process)
1084 {
1085 struct process_info_private *priv;
1086
1087 #ifdef USE_THREAD_DB
1088 thread_db_mourn (process);
1089 #endif
1090
1091 find_inferior (&all_lwps, delete_lwp_callback, process);
1092
1093 /* Freeing all private data. */
1094 priv = process->private;
1095 free (priv->arch_private);
1096 free (priv);
1097 process->private = NULL;
1098
1099 remove_process (process);
1100 }
1101
1102 static void
1103 linux_join (int pid)
1104 {
1105 int status, ret;
1106
1107 do {
1108 ret = my_waitpid (pid, &status, 0);
1109 if (WIFEXITED (status) || WIFSIGNALED (status))
1110 break;
1111 } while (ret != -1 || errno != ECHILD);
1112 }
1113
1114 /* Return nonzero if the given thread is still alive. */
1115 static int
1116 linux_thread_alive (ptid_t ptid)
1117 {
1118 struct lwp_info *lwp = find_lwp_pid (ptid);
1119
1120 /* We assume we always know if a thread exits. If a whole process
1121 exited but we still haven't been able to report it to GDB, we'll
1122 hold on to the last lwp of the dead process. */
1123 if (lwp != NULL)
1124 return !lwp->dead;
1125 else
1126 return 0;
1127 }
1128
1129 /* Return 1 if this lwp has an interesting status pending. */
1130 static int
1131 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1132 {
1133 struct lwp_info *lwp = (struct lwp_info *) entry;
1134 ptid_t ptid = * (ptid_t *) arg;
1135 struct thread_info *thread;
1136
1137 /* Check if we're only interested in events from a specific process
1138 or its lwps. */
1139 if (!ptid_equal (minus_one_ptid, ptid)
1140 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1141 return 0;
1142
1143 thread = get_lwp_thread (lwp);
1144
1145 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1146 report any status pending the LWP may have. */
1147 if (thread->last_resume_kind == resume_stop
1148 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1149 return 0;
1150
1151 return lwp->status_pending_p;
1152 }
1153
1154 static int
1155 same_lwp (struct inferior_list_entry *entry, void *data)
1156 {
1157 ptid_t ptid = *(ptid_t *) data;
1158 int lwp;
1159
1160 if (ptid_get_lwp (ptid) != 0)
1161 lwp = ptid_get_lwp (ptid);
1162 else
1163 lwp = ptid_get_pid (ptid);
1164
1165 if (ptid_get_lwp (entry->id) == lwp)
1166 return 1;
1167
1168 return 0;
1169 }
1170
1171 struct lwp_info *
1172 find_lwp_pid (ptid_t ptid)
1173 {
1174 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1175 }
1176
1177 static struct lwp_info *
1178 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1179 {
1180 int ret;
1181 int to_wait_for = -1;
1182 struct lwp_info *child = NULL;
1183
1184 if (debug_threads)
1185 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1186
1187 if (ptid_equal (ptid, minus_one_ptid))
1188 to_wait_for = -1; /* any child */
1189 else
1190 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1191
1192 options |= __WALL;
1193
1194 retry:
1195
1196 ret = my_waitpid (to_wait_for, wstatp, options);
1197 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1198 return NULL;
1199 else if (ret == -1)
1200 perror_with_name ("waitpid");
1201
1202 if (debug_threads
1203 && (!WIFSTOPPED (*wstatp)
1204 || (WSTOPSIG (*wstatp) != 32
1205 && WSTOPSIG (*wstatp) != 33)))
1206 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1207
1208 child = find_lwp_pid (pid_to_ptid (ret));
1209
1210 /* If we didn't find a process, one of two things presumably happened:
1211 - A process we started and then detached from has exited. Ignore it.
1212 - A process we are controlling has forked and the new child's stop
1213 was reported to us by the kernel. Save its PID. */
1214 if (child == NULL && WIFSTOPPED (*wstatp))
1215 {
1216 add_to_pid_list (&stopped_pids, ret, *wstatp);
1217 goto retry;
1218 }
1219 else if (child == NULL)
1220 goto retry;
1221
1222 child->stopped = 1;
1223
1224 child->last_status = *wstatp;
1225
1226 /* Architecture-specific setup after inferior is running.
1227 This needs to happen after we have attached to the inferior
1228 and it is stopped for the first time, but before we access
1229 any inferior registers. */
1230 if (new_inferior)
1231 {
1232 the_low_target.arch_setup ();
1233 #ifdef HAVE_LINUX_REGSETS
1234 memset (disabled_regsets, 0, num_regsets);
1235 #endif
1236 new_inferior = 0;
1237 }
1238
1239 /* Fetch the possibly triggered data watchpoint info and store it in
1240 CHILD.
1241
1242 On some archs, like x86, that use debug registers to set
1243 watchpoints, it's possible that the way to know which watched
1244 address trapped, is to check the register that is used to select
1245 which address to watch. Problem is, between setting the
1246 watchpoint and reading back which data address trapped, the user
1247 may change the set of watchpoints, and, as a consequence, GDB
1248 changes the debug registers in the inferior. To avoid reading
1249 back a stale stopped-data-address when that happens, we cache in
1250 LP the fact that a watchpoint trapped, and the corresponding data
1251 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1252 changes the debug registers meanwhile, we have the cached data we
1253 can rely on. */
1254
1255 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1256 {
1257 if (the_low_target.stopped_by_watchpoint == NULL)
1258 {
1259 child->stopped_by_watchpoint = 0;
1260 }
1261 else
1262 {
1263 struct thread_info *saved_inferior;
1264
1265 saved_inferior = current_inferior;
1266 current_inferior = get_lwp_thread (child);
1267
1268 child->stopped_by_watchpoint
1269 = the_low_target.stopped_by_watchpoint ();
1270
1271 if (child->stopped_by_watchpoint)
1272 {
1273 if (the_low_target.stopped_data_address != NULL)
1274 child->stopped_data_address
1275 = the_low_target.stopped_data_address ();
1276 else
1277 child->stopped_data_address = 0;
1278 }
1279
1280 current_inferior = saved_inferior;
1281 }
1282 }
1283
1284 /* Store the STOP_PC, with adjustment applied. This depends on the
1285 architecture being defined already (so that CHILD has a valid
1286 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1287 not). */
1288 if (WIFSTOPPED (*wstatp))
1289 child->stop_pc = get_stop_pc (child);
1290
1291 if (debug_threads
1292 && WIFSTOPPED (*wstatp)
1293 && the_low_target.get_pc != NULL)
1294 {
1295 struct thread_info *saved_inferior = current_inferior;
1296 struct regcache *regcache;
1297 CORE_ADDR pc;
1298
1299 current_inferior = get_lwp_thread (child);
1300 regcache = get_thread_regcache (current_inferior, 1);
1301 pc = (*the_low_target.get_pc) (regcache);
1302 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1303 current_inferior = saved_inferior;
1304 }
1305
1306 return child;
1307 }
1308
1309 /* This function should only be called if the LWP got a SIGTRAP.
1310
1311 Handle any tracepoint steps or hits. Return true if a tracepoint
1312 event was handled, 0 otherwise. */
1313
1314 static int
1315 handle_tracepoints (struct lwp_info *lwp)
1316 {
1317 struct thread_info *tinfo = get_lwp_thread (lwp);
1318 int tpoint_related_event = 0;
1319
1320 /* If this tracepoint hit causes a tracing stop, we'll immediately
1321 uninsert tracepoints. To do this, we temporarily pause all
1322 threads, unpatch away, and then unpause threads. We need to make
1323 sure the unpausing doesn't resume LWP too. */
1324 lwp->suspended++;
1325
1326 /* And we need to be sure that any all-threads-stopping doesn't try
1327 to move threads out of the jump pads, as it could deadlock the
1328 inferior (LWP could be in the jump pad, maybe even holding the
1329 lock.) */
1330
1331 /* Do any necessary step collect actions. */
1332 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1333
1334 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1335
1336 /* See if we just hit a tracepoint and do its main collect
1337 actions. */
1338 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1339
1340 lwp->suspended--;
1341
1342 gdb_assert (lwp->suspended == 0);
1343 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1344
1345 if (tpoint_related_event)
1346 {
1347 if (debug_threads)
1348 fprintf (stderr, "got a tracepoint event\n");
1349 return 1;
1350 }
1351
1352 return 0;
1353 }
1354
1355 /* Convenience wrapper. Returns true if LWP is presently collecting a
1356 fast tracepoint. */
1357
1358 static int
1359 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1360 struct fast_tpoint_collect_status *status)
1361 {
1362 CORE_ADDR thread_area;
1363
1364 if (the_low_target.get_thread_area == NULL)
1365 return 0;
1366
1367 /* Get the thread area address. This is used to recognize which
1368 thread is which when tracing with the in-process agent library.
1369 We don't read anything from the address, and treat it as opaque;
1370 it's the address itself that we assume is unique per-thread. */
1371 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1372 return 0;
1373
1374 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1375 }
1376
1377 /* The reason we resume in the caller, is because we want to be able
1378 to pass lwp->status_pending as WSTAT, and we need to clear
1379 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1380 refuses to resume. */
1381
1382 static int
1383 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1384 {
1385 struct thread_info *saved_inferior;
1386
1387 saved_inferior = current_inferior;
1388 current_inferior = get_lwp_thread (lwp);
1389
1390 if ((wstat == NULL
1391 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1392 && supports_fast_tracepoints ()
1393 && in_process_agent_loaded ())
1394 {
1395 struct fast_tpoint_collect_status status;
1396 int r;
1397
1398 if (debug_threads)
1399 fprintf (stderr, "\
1400 Checking whether LWP %ld needs to move out of the jump pad.\n",
1401 lwpid_of (lwp));
1402
1403 r = linux_fast_tracepoint_collecting (lwp, &status);
1404
1405 if (wstat == NULL
1406 || (WSTOPSIG (*wstat) != SIGILL
1407 && WSTOPSIG (*wstat) != SIGFPE
1408 && WSTOPSIG (*wstat) != SIGSEGV
1409 && WSTOPSIG (*wstat) != SIGBUS))
1410 {
1411 lwp->collecting_fast_tracepoint = r;
1412
1413 if (r != 0)
1414 {
1415 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1416 {
1417 /* Haven't executed the original instruction yet.
1418 Set breakpoint there, and wait till it's hit,
1419 then single-step until exiting the jump pad. */
1420 lwp->exit_jump_pad_bkpt
1421 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1422 }
1423
1424 if (debug_threads)
1425 fprintf (stderr, "\
1426 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1427 lwpid_of (lwp));
1428 current_inferior = saved_inferior;
1429
1430 return 1;
1431 }
1432 }
1433 else
1434 {
1435 /* If we get a synchronous signal while collecting, *and*
1436 while executing the (relocated) original instruction,
1437 reset the PC to point at the tpoint address, before
1438 reporting to GDB. Otherwise, it's an IPA lib bug: just
1439 report the signal to GDB, and pray for the best. */
1440
1441 lwp->collecting_fast_tracepoint = 0;
1442
1443 if (r != 0
1444 && (status.adjusted_insn_addr <= lwp->stop_pc
1445 && lwp->stop_pc < status.adjusted_insn_addr_end))
1446 {
1447 siginfo_t info;
1448 struct regcache *regcache;
1449
1450 /* The si_addr on a few signals references the address
1451 of the faulting instruction. Adjust that as
1452 well. */
1453 if ((WSTOPSIG (*wstat) == SIGILL
1454 || WSTOPSIG (*wstat) == SIGFPE
1455 || WSTOPSIG (*wstat) == SIGBUS
1456 || WSTOPSIG (*wstat) == SIGSEGV)
1457 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1458 /* Final check just to make sure we don't clobber
1459 the siginfo of non-kernel-sent signals. */
1460 && (uintptr_t) info.si_addr == lwp->stop_pc)
1461 {
1462 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1463 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1464 }
1465
1466 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1467 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1468 lwp->stop_pc = status.tpoint_addr;
1469
1470 /* Cancel any fast tracepoint lock this thread was
1471 holding. */
1472 force_unlock_trace_buffer ();
1473 }
1474
1475 if (lwp->exit_jump_pad_bkpt != NULL)
1476 {
1477 if (debug_threads)
1478 fprintf (stderr,
1479 "Cancelling fast exit-jump-pad: removing bkpt. "
1480 "stopping all threads momentarily.\n");
1481
1482 stop_all_lwps (1, lwp);
1483 cancel_breakpoints ();
1484
1485 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1486 lwp->exit_jump_pad_bkpt = NULL;
1487
1488 unstop_all_lwps (1, lwp);
1489
1490 gdb_assert (lwp->suspended >= 0);
1491 }
1492 }
1493 }
1494
1495 if (debug_threads)
1496 fprintf (stderr, "\
1497 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1498 lwpid_of (lwp));
1499
1500 current_inferior = saved_inferior;
1501 return 0;
1502 }
1503
1504 /* Enqueue one signal in the "signals to report later when out of the
1505 jump pad" list. */
1506
1507 static void
1508 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1509 {
1510 struct pending_signals *p_sig;
1511
1512 if (debug_threads)
1513 fprintf (stderr, "\
1514 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1515
1516 if (debug_threads)
1517 {
1518 struct pending_signals *sig;
1519
1520 for (sig = lwp->pending_signals_to_report;
1521 sig != NULL;
1522 sig = sig->prev)
1523 fprintf (stderr,
1524 " Already queued %d\n",
1525 sig->signal);
1526
1527 fprintf (stderr, " (no more currently queued signals)\n");
1528 }
1529
1530 /* Don't enqueue non-RT signals if they are already in the deferred
1531 queue. (SIGSTOP being the easiest signal to see ending up here
1532 twice) */
1533 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1534 {
1535 struct pending_signals *sig;
1536
1537 for (sig = lwp->pending_signals_to_report;
1538 sig != NULL;
1539 sig = sig->prev)
1540 {
1541 if (sig->signal == WSTOPSIG (*wstat))
1542 {
1543 if (debug_threads)
1544 fprintf (stderr,
1545 "Not requeuing already queued non-RT signal %d"
1546 " for LWP %ld\n",
1547 sig->signal,
1548 lwpid_of (lwp));
1549 return;
1550 }
1551 }
1552 }
1553
1554 p_sig = xmalloc (sizeof (*p_sig));
1555 p_sig->prev = lwp->pending_signals_to_report;
1556 p_sig->signal = WSTOPSIG (*wstat);
1557 memset (&p_sig->info, 0, sizeof (siginfo_t));
1558 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1559
1560 lwp->pending_signals_to_report = p_sig;
1561 }
1562
1563 /* Dequeue one signal from the "signals to report later when out of
1564 the jump pad" list. */
1565
1566 static int
1567 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1568 {
1569 if (lwp->pending_signals_to_report != NULL)
1570 {
1571 struct pending_signals **p_sig;
1572
1573 p_sig = &lwp->pending_signals_to_report;
1574 while ((*p_sig)->prev != NULL)
1575 p_sig = &(*p_sig)->prev;
1576
1577 *wstat = W_STOPCODE ((*p_sig)->signal);
1578 if ((*p_sig)->info.si_signo != 0)
1579 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1580 free (*p_sig);
1581 *p_sig = NULL;
1582
1583 if (debug_threads)
1584 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1585 WSTOPSIG (*wstat), lwpid_of (lwp));
1586
1587 if (debug_threads)
1588 {
1589 struct pending_signals *sig;
1590
1591 for (sig = lwp->pending_signals_to_report;
1592 sig != NULL;
1593 sig = sig->prev)
1594 fprintf (stderr,
1595 " Still queued %d\n",
1596 sig->signal);
1597
1598 fprintf (stderr, " (no more queued signals)\n");
1599 }
1600
1601 return 1;
1602 }
1603
1604 return 0;
1605 }
1606
1607 /* Arrange for a breakpoint to be hit again later. We don't keep the
1608 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1609 will handle the current event, eventually we will resume this LWP,
1610 and this breakpoint will trap again. */
1611
1612 static int
1613 cancel_breakpoint (struct lwp_info *lwp)
1614 {
1615 struct thread_info *saved_inferior;
1616
1617 /* There's nothing to do if we don't support breakpoints. */
1618 if (!supports_breakpoints ())
1619 return 0;
1620
1621 /* breakpoint_at reads from current inferior. */
1622 saved_inferior = current_inferior;
1623 current_inferior = get_lwp_thread (lwp);
1624
1625 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1626 {
1627 if (debug_threads)
1628 fprintf (stderr,
1629 "CB: Push back breakpoint for %s\n",
1630 target_pid_to_str (ptid_of (lwp)));
1631
1632 /* Back up the PC if necessary. */
1633 if (the_low_target.decr_pc_after_break)
1634 {
1635 struct regcache *regcache
1636 = get_thread_regcache (current_inferior, 1);
1637 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1638 }
1639
1640 current_inferior = saved_inferior;
1641 return 1;
1642 }
1643 else
1644 {
1645 if (debug_threads)
1646 fprintf (stderr,
1647 "CB: No breakpoint found at %s for [%s]\n",
1648 paddress (lwp->stop_pc),
1649 target_pid_to_str (ptid_of (lwp)));
1650 }
1651
1652 current_inferior = saved_inferior;
1653 return 0;
1654 }
1655
1656 /* When the event-loop is doing a step-over, this points at the thread
1657 being stepped. */
1658 ptid_t step_over_bkpt;
1659
1660 /* Wait for an event from child PID. If PID is -1, wait for any
1661 child. Store the stop status through the status pointer WSTAT.
1662 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1663 event was found and OPTIONS contains WNOHANG. Return the PID of
1664 the stopped child otherwise. */
1665
1666 static int
1667 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1668 {
1669 struct lwp_info *event_child, *requested_child;
1670 ptid_t wait_ptid;
1671
1672 event_child = NULL;
1673 requested_child = NULL;
1674
1675 /* Check for a lwp with a pending status. */
1676
1677 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1678 {
1679 event_child = (struct lwp_info *)
1680 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1681 if (debug_threads && event_child)
1682 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1683 }
1684 else
1685 {
1686 requested_child = find_lwp_pid (ptid);
1687
1688 if (!stopping_threads
1689 && requested_child->status_pending_p
1690 && requested_child->collecting_fast_tracepoint)
1691 {
1692 enqueue_one_deferred_signal (requested_child,
1693 &requested_child->status_pending);
1694 requested_child->status_pending_p = 0;
1695 requested_child->status_pending = 0;
1696 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1697 }
1698
1699 if (requested_child->suspended
1700 && requested_child->status_pending_p)
1701 fatal ("requesting an event out of a suspended child?");
1702
1703 if (requested_child->status_pending_p)
1704 event_child = requested_child;
1705 }
1706
1707 if (event_child != NULL)
1708 {
1709 if (debug_threads)
1710 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1711 lwpid_of (event_child), event_child->status_pending);
1712 *wstat = event_child->status_pending;
1713 event_child->status_pending_p = 0;
1714 event_child->status_pending = 0;
1715 current_inferior = get_lwp_thread (event_child);
1716 return lwpid_of (event_child);
1717 }
1718
1719 if (ptid_is_pid (ptid))
1720 {
1721 /* A request to wait for a specific tgid. This is not possible
1722 with waitpid, so instead, we wait for any child, and leave
1723 children we're not interested in right now with a pending
1724 status to report later. */
1725 wait_ptid = minus_one_ptid;
1726 }
1727 else
1728 wait_ptid = ptid;
1729
1730 /* We only enter this loop if no process has a pending wait status. Thus
1731 any action taken in response to a wait status inside this loop is
1732 responding as soon as we detect the status, not after any pending
1733 events. */
1734 while (1)
1735 {
1736 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1737
1738 if ((options & WNOHANG) && event_child == NULL)
1739 {
1740 if (debug_threads)
1741 fprintf (stderr, "WNOHANG set, no event found\n");
1742 return 0;
1743 }
1744
1745 if (event_child == NULL)
1746 error ("event from unknown child");
1747
1748 if (ptid_is_pid (ptid)
1749 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1750 {
1751 if (! WIFSTOPPED (*wstat))
1752 mark_lwp_dead (event_child, *wstat);
1753 else
1754 {
1755 event_child->status_pending_p = 1;
1756 event_child->status_pending = *wstat;
1757 }
1758 continue;
1759 }
1760
1761 current_inferior = get_lwp_thread (event_child);
1762
1763 /* Check for thread exit. */
1764 if (! WIFSTOPPED (*wstat))
1765 {
1766 if (debug_threads)
1767 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1768
1769 /* If the last thread is exiting, just return. */
1770 if (last_thread_of_process_p (current_inferior))
1771 {
1772 if (debug_threads)
1773 fprintf (stderr, "LWP %ld is last lwp of process\n",
1774 lwpid_of (event_child));
1775 return lwpid_of (event_child);
1776 }
1777
1778 if (!non_stop)
1779 {
1780 current_inferior = (struct thread_info *) all_threads.head;
1781 if (debug_threads)
1782 fprintf (stderr, "Current inferior is now %ld\n",
1783 lwpid_of (get_thread_lwp (current_inferior)));
1784 }
1785 else
1786 {
1787 current_inferior = NULL;
1788 if (debug_threads)
1789 fprintf (stderr, "Current inferior is now <NULL>\n");
1790 }
1791
1792 /* If we were waiting for this particular child to do something...
1793 well, it did something. */
1794 if (requested_child != NULL)
1795 {
1796 int lwpid = lwpid_of (event_child);
1797
1798 /* Cancel the step-over operation --- the thread that
1799 started it is gone. */
1800 if (finish_step_over (event_child))
1801 unstop_all_lwps (1, event_child);
1802 delete_lwp (event_child);
1803 return lwpid;
1804 }
1805
1806 delete_lwp (event_child);
1807
1808 /* Wait for a more interesting event. */
1809 continue;
1810 }
1811
1812 if (event_child->must_set_ptrace_flags)
1813 {
1814 linux_enable_event_reporting (lwpid_of (event_child));
1815 event_child->must_set_ptrace_flags = 0;
1816 }
1817
1818 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1819 && *wstat >> 16 != 0)
1820 {
1821 handle_extended_wait (event_child, *wstat);
1822 continue;
1823 }
1824
1825 if (WIFSTOPPED (*wstat)
1826 && WSTOPSIG (*wstat) == SIGSTOP
1827 && event_child->stop_expected)
1828 {
1829 int should_stop;
1830
1831 if (debug_threads)
1832 fprintf (stderr, "Expected stop.\n");
1833 event_child->stop_expected = 0;
1834
1835 should_stop = (current_inferior->last_resume_kind == resume_stop
1836 || stopping_threads);
1837
1838 if (!should_stop)
1839 {
1840 linux_resume_one_lwp (event_child,
1841 event_child->stepping, 0, NULL);
1842 continue;
1843 }
1844 }
1845
1846 return lwpid_of (event_child);
1847 }
1848
1849 /* NOTREACHED */
1850 return 0;
1851 }
1852
1853 /* Count the LWP's that have had events. */
1854
1855 static int
1856 count_events_callback (struct inferior_list_entry *entry, void *data)
1857 {
1858 struct lwp_info *lp = (struct lwp_info *) entry;
1859 struct thread_info *thread = get_lwp_thread (lp);
1860 int *count = data;
1861
1862 gdb_assert (count != NULL);
1863
1864 /* Count only resumed LWPs that have a SIGTRAP event pending that
1865 should be reported to GDB. */
1866 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1867 && thread->last_resume_kind != resume_stop
1868 && lp->status_pending_p
1869 && WIFSTOPPED (lp->status_pending)
1870 && WSTOPSIG (lp->status_pending) == SIGTRAP
1871 && !breakpoint_inserted_here (lp->stop_pc))
1872 (*count)++;
1873
1874 return 0;
1875 }
1876
1877 /* Select the LWP (if any) that is currently being single-stepped. */
1878
1879 static int
1880 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1881 {
1882 struct lwp_info *lp = (struct lwp_info *) entry;
1883 struct thread_info *thread = get_lwp_thread (lp);
1884
1885 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1886 && thread->last_resume_kind == resume_step
1887 && lp->status_pending_p)
1888 return 1;
1889 else
1890 return 0;
1891 }
1892
1893 /* Select the Nth LWP that has had a SIGTRAP event that should be
1894 reported to GDB. */
1895
1896 static int
1897 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1898 {
1899 struct lwp_info *lp = (struct lwp_info *) entry;
1900 struct thread_info *thread = get_lwp_thread (lp);
1901 int *selector = data;
1902
1903 gdb_assert (selector != NULL);
1904
1905 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1906 if (thread->last_resume_kind != resume_stop
1907 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1908 && lp->status_pending_p
1909 && WIFSTOPPED (lp->status_pending)
1910 && WSTOPSIG (lp->status_pending) == SIGTRAP
1911 && !breakpoint_inserted_here (lp->stop_pc))
1912 if ((*selector)-- == 0)
1913 return 1;
1914
1915 return 0;
1916 }
1917
1918 static int
1919 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1920 {
1921 struct lwp_info *lp = (struct lwp_info *) entry;
1922 struct thread_info *thread = get_lwp_thread (lp);
1923 struct lwp_info *event_lp = data;
1924
1925 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1926 if (lp == event_lp)
1927 return 0;
1928
1929 /* If a LWP other than the LWP that we're reporting an event for has
1930 hit a GDB breakpoint (as opposed to some random trap signal),
1931 then just arrange for it to hit it again later. We don't keep
1932 the SIGTRAP status and don't forward the SIGTRAP signal to the
1933 LWP. We will handle the current event, eventually we will resume
1934 all LWPs, and this one will get its breakpoint trap again.
1935
1936 If we do not do this, then we run the risk that the user will
1937 delete or disable the breakpoint, but the LWP will have already
1938 tripped on it. */
1939
1940 if (thread->last_resume_kind != resume_stop
1941 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1942 && lp->status_pending_p
1943 && WIFSTOPPED (lp->status_pending)
1944 && WSTOPSIG (lp->status_pending) == SIGTRAP
1945 && !lp->stepping
1946 && !lp->stopped_by_watchpoint
1947 && cancel_breakpoint (lp))
1948 /* Throw away the SIGTRAP. */
1949 lp->status_pending_p = 0;
1950
1951 return 0;
1952 }
1953
1954 static void
1955 linux_cancel_breakpoints (void)
1956 {
1957 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1958 }
1959
1960 /* Select one LWP out of those that have events pending. */
1961
1962 static void
1963 select_event_lwp (struct lwp_info **orig_lp)
1964 {
1965 int num_events = 0;
1966 int random_selector;
1967 struct lwp_info *event_lp;
1968
1969 /* Give preference to any LWP that is being single-stepped. */
1970 event_lp
1971 = (struct lwp_info *) find_inferior (&all_lwps,
1972 select_singlestep_lwp_callback, NULL);
1973 if (event_lp != NULL)
1974 {
1975 if (debug_threads)
1976 fprintf (stderr,
1977 "SEL: Select single-step %s\n",
1978 target_pid_to_str (ptid_of (event_lp)));
1979 }
1980 else
1981 {
1982 /* No single-stepping LWP. Select one at random, out of those
1983 which have had SIGTRAP events. */
1984
1985 /* First see how many SIGTRAP events we have. */
1986 find_inferior (&all_lwps, count_events_callback, &num_events);
1987
1988 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1989 random_selector = (int)
1990 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1991
1992 if (debug_threads && num_events > 1)
1993 fprintf (stderr,
1994 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1995 num_events, random_selector);
1996
1997 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1998 select_event_lwp_callback,
1999 &random_selector);
2000 }
2001
2002 if (event_lp != NULL)
2003 {
2004 /* Switch the event LWP. */
2005 *orig_lp = event_lp;
2006 }
2007 }
2008
2009 /* Decrement the suspend count of an LWP. */
2010
2011 static int
2012 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2013 {
2014 struct lwp_info *lwp = (struct lwp_info *) entry;
2015
2016 /* Ignore EXCEPT. */
2017 if (lwp == except)
2018 return 0;
2019
2020 lwp->suspended--;
2021
2022 gdb_assert (lwp->suspended >= 0);
2023 return 0;
2024 }
2025
2026 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2027 NULL. */
2028
2029 static void
2030 unsuspend_all_lwps (struct lwp_info *except)
2031 {
2032 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2033 }
2034
2035 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2036 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2037 void *data);
2038 static int lwp_running (struct inferior_list_entry *entry, void *data);
2039 static ptid_t linux_wait_1 (ptid_t ptid,
2040 struct target_waitstatus *ourstatus,
2041 int target_options);
2042
2043 /* Stabilize threads (move out of jump pads).
2044
2045 If a thread is midway collecting a fast tracepoint, we need to
2046 finish the collection and move it out of the jump pad before
2047 reporting the signal.
2048
2049 This avoids recursion while collecting (when a signal arrives
2050 midway, and the signal handler itself collects), which would trash
2051 the trace buffer. In case the user set a breakpoint in a signal
2052 handler, this avoids the backtrace showing the jump pad, etc..
2053 Most importantly, there are certain things we can't do safely if
2054 threads are stopped in a jump pad (or in its callee's). For
2055 example:
2056
2057 - starting a new trace run. A thread still collecting the
2058 previous run, could trash the trace buffer when resumed. The trace
2059 buffer control structures would have been reset but the thread had
2060 no way to tell. The thread could even midway memcpy'ing to the
2061 buffer, which would mean that when resumed, it would clobber the
2062 trace buffer that had been set for a new run.
2063
2064 - we can't rewrite/reuse the jump pads for new tracepoints
2065 safely. Say you do tstart while a thread is stopped midway while
2066 collecting. When the thread is later resumed, it finishes the
2067 collection, and returns to the jump pad, to execute the original
2068 instruction that was under the tracepoint jump at the time the
2069 older run had been started. If the jump pad had been rewritten
2070 since for something else in the new run, the thread would now
2071 execute the wrong / random instructions. */
2072
2073 static void
2074 linux_stabilize_threads (void)
2075 {
2076 struct thread_info *save_inferior;
2077 struct lwp_info *lwp_stuck;
2078
2079 lwp_stuck
2080 = (struct lwp_info *) find_inferior (&all_lwps,
2081 stuck_in_jump_pad_callback, NULL);
2082 if (lwp_stuck != NULL)
2083 {
2084 if (debug_threads)
2085 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2086 lwpid_of (lwp_stuck));
2087 return;
2088 }
2089
2090 save_inferior = current_inferior;
2091
2092 stabilizing_threads = 1;
2093
2094 /* Kick 'em all. */
2095 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2096
2097 /* Loop until all are stopped out of the jump pads. */
2098 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2099 {
2100 struct target_waitstatus ourstatus;
2101 struct lwp_info *lwp;
2102 int wstat;
2103
2104 /* Note that we go through the full wait even loop. While
2105 moving threads out of jump pad, we need to be able to step
2106 over internal breakpoints and such. */
2107 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2108
2109 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2110 {
2111 lwp = get_thread_lwp (current_inferior);
2112
2113 /* Lock it. */
2114 lwp->suspended++;
2115
2116 if (ourstatus.value.sig != TARGET_SIGNAL_0
2117 || current_inferior->last_resume_kind == resume_stop)
2118 {
2119 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
2120 enqueue_one_deferred_signal (lwp, &wstat);
2121 }
2122 }
2123 }
2124
2125 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2126
2127 stabilizing_threads = 0;
2128
2129 current_inferior = save_inferior;
2130
2131 if (debug_threads)
2132 {
2133 lwp_stuck
2134 = (struct lwp_info *) find_inferior (&all_lwps,
2135 stuck_in_jump_pad_callback, NULL);
2136 if (lwp_stuck != NULL)
2137 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2138 lwpid_of (lwp_stuck));
2139 }
2140 }
2141
2142 /* Wait for process, returns status. */
2143
2144 static ptid_t
2145 linux_wait_1 (ptid_t ptid,
2146 struct target_waitstatus *ourstatus, int target_options)
2147 {
2148 int w;
2149 struct lwp_info *event_child;
2150 int options;
2151 int pid;
2152 int step_over_finished;
2153 int bp_explains_trap;
2154 int maybe_internal_trap;
2155 int report_to_gdb;
2156 int trace_event;
2157
2158 /* Translate generic target options into linux options. */
2159 options = __WALL;
2160 if (target_options & TARGET_WNOHANG)
2161 options |= WNOHANG;
2162
2163 retry:
2164 bp_explains_trap = 0;
2165 trace_event = 0;
2166 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2167
2168 /* If we were only supposed to resume one thread, only wait for
2169 that thread - if it's still alive. If it died, however - which
2170 can happen if we're coming from the thread death case below -
2171 then we need to make sure we restart the other threads. We could
2172 pick a thread at random or restart all; restarting all is less
2173 arbitrary. */
2174 if (!non_stop
2175 && !ptid_equal (cont_thread, null_ptid)
2176 && !ptid_equal (cont_thread, minus_one_ptid))
2177 {
2178 struct thread_info *thread;
2179
2180 thread = (struct thread_info *) find_inferior_id (&all_threads,
2181 cont_thread);
2182
2183 /* No stepping, no signal - unless one is pending already, of course. */
2184 if (thread == NULL)
2185 {
2186 struct thread_resume resume_info;
2187 resume_info.thread = minus_one_ptid;
2188 resume_info.kind = resume_continue;
2189 resume_info.sig = 0;
2190 linux_resume (&resume_info, 1);
2191 }
2192 else
2193 ptid = cont_thread;
2194 }
2195
2196 if (ptid_equal (step_over_bkpt, null_ptid))
2197 pid = linux_wait_for_event (ptid, &w, options);
2198 else
2199 {
2200 if (debug_threads)
2201 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2202 target_pid_to_str (step_over_bkpt));
2203 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2204 }
2205
2206 if (pid == 0) /* only if TARGET_WNOHANG */
2207 return null_ptid;
2208
2209 event_child = get_thread_lwp (current_inferior);
2210
2211 /* If we are waiting for a particular child, and it exited,
2212 linux_wait_for_event will return its exit status. Similarly if
2213 the last child exited. If this is not the last child, however,
2214 do not report it as exited until there is a 'thread exited' response
2215 available in the remote protocol. Instead, just wait for another event.
2216 This should be safe, because if the thread crashed we will already
2217 have reported the termination signal to GDB; that should stop any
2218 in-progress stepping operations, etc.
2219
2220 Report the exit status of the last thread to exit. This matches
2221 LinuxThreads' behavior. */
2222
2223 if (last_thread_of_process_p (current_inferior))
2224 {
2225 if (WIFEXITED (w) || WIFSIGNALED (w))
2226 {
2227 if (WIFEXITED (w))
2228 {
2229 ourstatus->kind = TARGET_WAITKIND_EXITED;
2230 ourstatus->value.integer = WEXITSTATUS (w);
2231
2232 if (debug_threads)
2233 fprintf (stderr,
2234 "\nChild exited with retcode = %x \n",
2235 WEXITSTATUS (w));
2236 }
2237 else
2238 {
2239 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2240 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2241
2242 if (debug_threads)
2243 fprintf (stderr,
2244 "\nChild terminated with signal = %x \n",
2245 WTERMSIG (w));
2246
2247 }
2248
2249 return ptid_of (event_child);
2250 }
2251 }
2252 else
2253 {
2254 if (!WIFSTOPPED (w))
2255 goto retry;
2256 }
2257
2258 /* If this event was not handled before, and is not a SIGTRAP, we
2259 report it. SIGILL and SIGSEGV are also treated as traps in case
2260 a breakpoint is inserted at the current PC. If this target does
2261 not support internal breakpoints at all, we also report the
2262 SIGTRAP without further processing; it's of no concern to us. */
2263 maybe_internal_trap
2264 = (supports_breakpoints ()
2265 && (WSTOPSIG (w) == SIGTRAP
2266 || ((WSTOPSIG (w) == SIGILL
2267 || WSTOPSIG (w) == SIGSEGV)
2268 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2269
2270 if (maybe_internal_trap)
2271 {
2272 /* Handle anything that requires bookkeeping before deciding to
2273 report the event or continue waiting. */
2274
2275 /* First check if we can explain the SIGTRAP with an internal
2276 breakpoint, or if we should possibly report the event to GDB.
2277 Do this before anything that may remove or insert a
2278 breakpoint. */
2279 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2280
2281 /* We have a SIGTRAP, possibly a step-over dance has just
2282 finished. If so, tweak the state machine accordingly,
2283 reinsert breakpoints and delete any reinsert (software
2284 single-step) breakpoints. */
2285 step_over_finished = finish_step_over (event_child);
2286
2287 /* Now invoke the callbacks of any internal breakpoints there. */
2288 check_breakpoints (event_child->stop_pc);
2289
2290 /* Handle tracepoint data collecting. This may overflow the
2291 trace buffer, and cause a tracing stop, removing
2292 breakpoints. */
2293 trace_event = handle_tracepoints (event_child);
2294
2295 if (bp_explains_trap)
2296 {
2297 /* If we stepped or ran into an internal breakpoint, we've
2298 already handled it. So next time we resume (from this
2299 PC), we should step over it. */
2300 if (debug_threads)
2301 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2302
2303 if (breakpoint_here (event_child->stop_pc))
2304 event_child->need_step_over = 1;
2305 }
2306 }
2307 else
2308 {
2309 /* We have some other signal, possibly a step-over dance was in
2310 progress, and it should be cancelled too. */
2311 step_over_finished = finish_step_over (event_child);
2312 }
2313
2314 /* We have all the data we need. Either report the event to GDB, or
2315 resume threads and keep waiting for more. */
2316
2317 /* If we're collecting a fast tracepoint, finish the collection and
2318 move out of the jump pad before delivering a signal. See
2319 linux_stabilize_threads. */
2320
2321 if (WIFSTOPPED (w)
2322 && WSTOPSIG (w) != SIGTRAP
2323 && supports_fast_tracepoints ()
2324 && in_process_agent_loaded ())
2325 {
2326 if (debug_threads)
2327 fprintf (stderr,
2328 "Got signal %d for LWP %ld. Check if we need "
2329 "to defer or adjust it.\n",
2330 WSTOPSIG (w), lwpid_of (event_child));
2331
2332 /* Allow debugging the jump pad itself. */
2333 if (current_inferior->last_resume_kind != resume_step
2334 && maybe_move_out_of_jump_pad (event_child, &w))
2335 {
2336 enqueue_one_deferred_signal (event_child, &w);
2337
2338 if (debug_threads)
2339 fprintf (stderr,
2340 "Signal %d for LWP %ld deferred (in jump pad)\n",
2341 WSTOPSIG (w), lwpid_of (event_child));
2342
2343 linux_resume_one_lwp (event_child, 0, 0, NULL);
2344 goto retry;
2345 }
2346 }
2347
2348 if (event_child->collecting_fast_tracepoint)
2349 {
2350 if (debug_threads)
2351 fprintf (stderr, "\
2352 LWP %ld was trying to move out of the jump pad (%d). \
2353 Check if we're already there.\n",
2354 lwpid_of (event_child),
2355 event_child->collecting_fast_tracepoint);
2356
2357 trace_event = 1;
2358
2359 event_child->collecting_fast_tracepoint
2360 = linux_fast_tracepoint_collecting (event_child, NULL);
2361
2362 if (event_child->collecting_fast_tracepoint != 1)
2363 {
2364 /* No longer need this breakpoint. */
2365 if (event_child->exit_jump_pad_bkpt != NULL)
2366 {
2367 if (debug_threads)
2368 fprintf (stderr,
2369 "No longer need exit-jump-pad bkpt; removing it."
2370 "stopping all threads momentarily.\n");
2371
2372 /* Other running threads could hit this breakpoint.
2373 We don't handle moribund locations like GDB does,
2374 instead we always pause all threads when removing
2375 breakpoints, so that any step-over or
2376 decr_pc_after_break adjustment is always taken
2377 care of while the breakpoint is still
2378 inserted. */
2379 stop_all_lwps (1, event_child);
2380 cancel_breakpoints ();
2381
2382 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2383 event_child->exit_jump_pad_bkpt = NULL;
2384
2385 unstop_all_lwps (1, event_child);
2386
2387 gdb_assert (event_child->suspended >= 0);
2388 }
2389 }
2390
2391 if (event_child->collecting_fast_tracepoint == 0)
2392 {
2393 if (debug_threads)
2394 fprintf (stderr,
2395 "fast tracepoint finished "
2396 "collecting successfully.\n");
2397
2398 /* We may have a deferred signal to report. */
2399 if (dequeue_one_deferred_signal (event_child, &w))
2400 {
2401 if (debug_threads)
2402 fprintf (stderr, "dequeued one signal.\n");
2403 }
2404 else
2405 {
2406 if (debug_threads)
2407 fprintf (stderr, "no deferred signals.\n");
2408
2409 if (stabilizing_threads)
2410 {
2411 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2412 ourstatus->value.sig = TARGET_SIGNAL_0;
2413 return ptid_of (event_child);
2414 }
2415 }
2416 }
2417 }
2418
2419 /* Check whether GDB would be interested in this event. */
2420
2421 /* If GDB is not interested in this signal, don't stop other
2422 threads, and don't report it to GDB. Just resume the inferior
2423 right away. We do this for threading-related signals as well as
2424 any that GDB specifically requested we ignore. But never ignore
2425 SIGSTOP if we sent it ourselves, and do not ignore signals when
2426 stepping - they may require special handling to skip the signal
2427 handler. */
2428 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2429 thread library? */
2430 if (WIFSTOPPED (w)
2431 && current_inferior->last_resume_kind != resume_step
2432 && (
2433 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2434 (current_process ()->private->thread_db != NULL
2435 && (WSTOPSIG (w) == __SIGRTMIN
2436 || WSTOPSIG (w) == __SIGRTMIN + 1))
2437 ||
2438 #endif
2439 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2440 && !(WSTOPSIG (w) == SIGSTOP
2441 && current_inferior->last_resume_kind == resume_stop))))
2442 {
2443 siginfo_t info, *info_p;
2444
2445 if (debug_threads)
2446 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2447 WSTOPSIG (w), lwpid_of (event_child));
2448
2449 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2450 info_p = &info;
2451 else
2452 info_p = NULL;
2453 linux_resume_one_lwp (event_child, event_child->stepping,
2454 WSTOPSIG (w), info_p);
2455 goto retry;
2456 }
2457
2458 /* If GDB wanted this thread to single step, we always want to
2459 report the SIGTRAP, and let GDB handle it. Watchpoints should
2460 always be reported. So should signals we can't explain. A
2461 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2462 not support Z0 breakpoints. If we do, we're be able to handle
2463 GDB breakpoints on top of internal breakpoints, by handling the
2464 internal breakpoint and still reporting the event to GDB. If we
2465 don't, we're out of luck, GDB won't see the breakpoint hit. */
2466 report_to_gdb = (!maybe_internal_trap
2467 || current_inferior->last_resume_kind == resume_step
2468 || event_child->stopped_by_watchpoint
2469 || (!step_over_finished
2470 && !bp_explains_trap && !trace_event)
2471 || (gdb_breakpoint_here (event_child->stop_pc)
2472 && gdb_condition_true_at_breakpoint (event_child->stop_pc)));
2473
2474 /* We found no reason GDB would want us to stop. We either hit one
2475 of our own breakpoints, or finished an internal step GDB
2476 shouldn't know about. */
2477 if (!report_to_gdb)
2478 {
2479 if (debug_threads)
2480 {
2481 if (bp_explains_trap)
2482 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2483 if (step_over_finished)
2484 fprintf (stderr, "Step-over finished.\n");
2485 if (trace_event)
2486 fprintf (stderr, "Tracepoint event.\n");
2487 }
2488
2489 /* We're not reporting this breakpoint to GDB, so apply the
2490 decr_pc_after_break adjustment to the inferior's regcache
2491 ourselves. */
2492
2493 if (the_low_target.set_pc != NULL)
2494 {
2495 struct regcache *regcache
2496 = get_thread_regcache (get_lwp_thread (event_child), 1);
2497 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2498 }
2499
2500 /* We may have finished stepping over a breakpoint. If so,
2501 we've stopped and suspended all LWPs momentarily except the
2502 stepping one. This is where we resume them all again. We're
2503 going to keep waiting, so use proceed, which handles stepping
2504 over the next breakpoint. */
2505 if (debug_threads)
2506 fprintf (stderr, "proceeding all threads.\n");
2507
2508 if (step_over_finished)
2509 unsuspend_all_lwps (event_child);
2510
2511 proceed_all_lwps ();
2512 goto retry;
2513 }
2514
2515 if (debug_threads)
2516 {
2517 if (current_inferior->last_resume_kind == resume_step)
2518 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2519 if (event_child->stopped_by_watchpoint)
2520 fprintf (stderr, "Stopped by watchpoint.\n");
2521 if (gdb_breakpoint_here (event_child->stop_pc))
2522 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2523 if (debug_threads)
2524 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2525 }
2526
2527 /* Alright, we're going to report a stop. */
2528
2529 if (!non_stop && !stabilizing_threads)
2530 {
2531 /* In all-stop, stop all threads. */
2532 stop_all_lwps (0, NULL);
2533
2534 /* If we're not waiting for a specific LWP, choose an event LWP
2535 from among those that have had events. Giving equal priority
2536 to all LWPs that have had events helps prevent
2537 starvation. */
2538 if (ptid_equal (ptid, minus_one_ptid))
2539 {
2540 event_child->status_pending_p = 1;
2541 event_child->status_pending = w;
2542
2543 select_event_lwp (&event_child);
2544
2545 event_child->status_pending_p = 0;
2546 w = event_child->status_pending;
2547 }
2548
2549 /* Now that we've selected our final event LWP, cancel any
2550 breakpoints in other LWPs that have hit a GDB breakpoint.
2551 See the comment in cancel_breakpoints_callback to find out
2552 why. */
2553 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2554
2555 /* If we were going a step-over, all other threads but the stepping one
2556 had been paused in start_step_over, with their suspend counts
2557 incremented. We don't want to do a full unstop/unpause, because we're
2558 in all-stop mode (so we want threads stopped), but we still need to
2559 unsuspend the other threads, to decrement their `suspended' count
2560 back. */
2561 if (step_over_finished)
2562 unsuspend_all_lwps (event_child);
2563
2564 /* Stabilize threads (move out of jump pads). */
2565 stabilize_threads ();
2566 }
2567 else
2568 {
2569 /* If we just finished a step-over, then all threads had been
2570 momentarily paused. In all-stop, that's fine, we want
2571 threads stopped by now anyway. In non-stop, we need to
2572 re-resume threads that GDB wanted to be running. */
2573 if (step_over_finished)
2574 unstop_all_lwps (1, event_child);
2575 }
2576
2577 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2578
2579 if (current_inferior->last_resume_kind == resume_stop
2580 && WSTOPSIG (w) == SIGSTOP)
2581 {
2582 /* A thread that has been requested to stop by GDB with vCont;t,
2583 and it stopped cleanly, so report as SIG0. The use of
2584 SIGSTOP is an implementation detail. */
2585 ourstatus->value.sig = TARGET_SIGNAL_0;
2586 }
2587 else if (current_inferior->last_resume_kind == resume_stop
2588 && WSTOPSIG (w) != SIGSTOP)
2589 {
2590 /* A thread that has been requested to stop by GDB with vCont;t,
2591 but, it stopped for other reasons. */
2592 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2593 }
2594 else
2595 {
2596 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2597 }
2598
2599 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2600
2601 if (debug_threads)
2602 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2603 target_pid_to_str (ptid_of (event_child)),
2604 ourstatus->kind,
2605 ourstatus->value.sig);
2606
2607 return ptid_of (event_child);
2608 }
2609
2610 /* Get rid of any pending event in the pipe. */
2611 static void
2612 async_file_flush (void)
2613 {
2614 int ret;
2615 char buf;
2616
2617 do
2618 ret = read (linux_event_pipe[0], &buf, 1);
2619 while (ret >= 0 || (ret == -1 && errno == EINTR));
2620 }
2621
2622 /* Put something in the pipe, so the event loop wakes up. */
2623 static void
2624 async_file_mark (void)
2625 {
2626 int ret;
2627
2628 async_file_flush ();
2629
2630 do
2631 ret = write (linux_event_pipe[1], "+", 1);
2632 while (ret == 0 || (ret == -1 && errno == EINTR));
2633
2634 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2635 be awakened anyway. */
2636 }
2637
2638 static ptid_t
2639 linux_wait (ptid_t ptid,
2640 struct target_waitstatus *ourstatus, int target_options)
2641 {
2642 ptid_t event_ptid;
2643
2644 if (debug_threads)
2645 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2646
2647 /* Flush the async file first. */
2648 if (target_is_async_p ())
2649 async_file_flush ();
2650
2651 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2652
2653 /* If at least one stop was reported, there may be more. A single
2654 SIGCHLD can signal more than one child stop. */
2655 if (target_is_async_p ()
2656 && (target_options & TARGET_WNOHANG) != 0
2657 && !ptid_equal (event_ptid, null_ptid))
2658 async_file_mark ();
2659
2660 return event_ptid;
2661 }
2662
2663 /* Send a signal to an LWP. */
2664
2665 static int
2666 kill_lwp (unsigned long lwpid, int signo)
2667 {
2668 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2669 fails, then we are not using nptl threads and we should be using kill. */
2670
2671 #ifdef __NR_tkill
2672 {
2673 static int tkill_failed;
2674
2675 if (!tkill_failed)
2676 {
2677 int ret;
2678
2679 errno = 0;
2680 ret = syscall (__NR_tkill, lwpid, signo);
2681 if (errno != ENOSYS)
2682 return ret;
2683 tkill_failed = 1;
2684 }
2685 }
2686 #endif
2687
2688 return kill (lwpid, signo);
2689 }
2690
2691 void
2692 linux_stop_lwp (struct lwp_info *lwp)
2693 {
2694 send_sigstop (lwp);
2695 }
2696
2697 static void
2698 send_sigstop (struct lwp_info *lwp)
2699 {
2700 int pid;
2701
2702 pid = lwpid_of (lwp);
2703
2704 /* If we already have a pending stop signal for this process, don't
2705 send another. */
2706 if (lwp->stop_expected)
2707 {
2708 if (debug_threads)
2709 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2710
2711 return;
2712 }
2713
2714 if (debug_threads)
2715 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2716
2717 lwp->stop_expected = 1;
2718 kill_lwp (pid, SIGSTOP);
2719 }
2720
2721 static int
2722 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2723 {
2724 struct lwp_info *lwp = (struct lwp_info *) entry;
2725
2726 /* Ignore EXCEPT. */
2727 if (lwp == except)
2728 return 0;
2729
2730 if (lwp->stopped)
2731 return 0;
2732
2733 send_sigstop (lwp);
2734 return 0;
2735 }
2736
2737 /* Increment the suspend count of an LWP, and stop it, if not stopped
2738 yet. */
2739 static int
2740 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2741 void *except)
2742 {
2743 struct lwp_info *lwp = (struct lwp_info *) entry;
2744
2745 /* Ignore EXCEPT. */
2746 if (lwp == except)
2747 return 0;
2748
2749 lwp->suspended++;
2750
2751 return send_sigstop_callback (entry, except);
2752 }
2753
2754 static void
2755 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2756 {
2757 /* It's dead, really. */
2758 lwp->dead = 1;
2759
2760 /* Store the exit status for later. */
2761 lwp->status_pending_p = 1;
2762 lwp->status_pending = wstat;
2763
2764 /* Prevent trying to stop it. */
2765 lwp->stopped = 1;
2766
2767 /* No further stops are expected from a dead lwp. */
2768 lwp->stop_expected = 0;
2769 }
2770
2771 static void
2772 wait_for_sigstop (struct inferior_list_entry *entry)
2773 {
2774 struct lwp_info *lwp = (struct lwp_info *) entry;
2775 struct thread_info *saved_inferior;
2776 int wstat;
2777 ptid_t saved_tid;
2778 ptid_t ptid;
2779 int pid;
2780
2781 if (lwp->stopped)
2782 {
2783 if (debug_threads)
2784 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2785 lwpid_of (lwp));
2786 return;
2787 }
2788
2789 saved_inferior = current_inferior;
2790 if (saved_inferior != NULL)
2791 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2792 else
2793 saved_tid = null_ptid; /* avoid bogus unused warning */
2794
2795 ptid = lwp->head.id;
2796
2797 if (debug_threads)
2798 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2799
2800 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2801
2802 /* If we stopped with a non-SIGSTOP signal, save it for later
2803 and record the pending SIGSTOP. If the process exited, just
2804 return. */
2805 if (WIFSTOPPED (wstat))
2806 {
2807 if (debug_threads)
2808 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2809 lwpid_of (lwp), WSTOPSIG (wstat));
2810
2811 if (WSTOPSIG (wstat) != SIGSTOP)
2812 {
2813 if (debug_threads)
2814 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2815 lwpid_of (lwp), wstat);
2816
2817 lwp->status_pending_p = 1;
2818 lwp->status_pending = wstat;
2819 }
2820 }
2821 else
2822 {
2823 if (debug_threads)
2824 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2825
2826 lwp = find_lwp_pid (pid_to_ptid (pid));
2827 if (lwp)
2828 {
2829 /* Leave this status pending for the next time we're able to
2830 report it. In the mean time, we'll report this lwp as
2831 dead to GDB, so GDB doesn't try to read registers and
2832 memory from it. This can only happen if this was the
2833 last thread of the process; otherwise, PID is removed
2834 from the thread tables before linux_wait_for_event
2835 returns. */
2836 mark_lwp_dead (lwp, wstat);
2837 }
2838 }
2839
2840 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2841 current_inferior = saved_inferior;
2842 else
2843 {
2844 if (debug_threads)
2845 fprintf (stderr, "Previously current thread died.\n");
2846
2847 if (non_stop)
2848 {
2849 /* We can't change the current inferior behind GDB's back,
2850 otherwise, a subsequent command may apply to the wrong
2851 process. */
2852 current_inferior = NULL;
2853 }
2854 else
2855 {
2856 /* Set a valid thread as current. */
2857 set_desired_inferior (0);
2858 }
2859 }
2860 }
2861
2862 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2863 move it out, because we need to report the stop event to GDB. For
2864 example, if the user puts a breakpoint in the jump pad, it's
2865 because she wants to debug it. */
2866
2867 static int
2868 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2869 {
2870 struct lwp_info *lwp = (struct lwp_info *) entry;
2871 struct thread_info *thread = get_lwp_thread (lwp);
2872
2873 gdb_assert (lwp->suspended == 0);
2874 gdb_assert (lwp->stopped);
2875
2876 /* Allow debugging the jump pad, gdb_collect, etc.. */
2877 return (supports_fast_tracepoints ()
2878 && in_process_agent_loaded ()
2879 && (gdb_breakpoint_here (lwp->stop_pc)
2880 || lwp->stopped_by_watchpoint
2881 || thread->last_resume_kind == resume_step)
2882 && linux_fast_tracepoint_collecting (lwp, NULL));
2883 }
2884
2885 static void
2886 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2887 {
2888 struct lwp_info *lwp = (struct lwp_info *) entry;
2889 struct thread_info *thread = get_lwp_thread (lwp);
2890 int *wstat;
2891
2892 gdb_assert (lwp->suspended == 0);
2893 gdb_assert (lwp->stopped);
2894
2895 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2896
2897 /* Allow debugging the jump pad, gdb_collect, etc. */
2898 if (!gdb_breakpoint_here (lwp->stop_pc)
2899 && !lwp->stopped_by_watchpoint
2900 && thread->last_resume_kind != resume_step
2901 && maybe_move_out_of_jump_pad (lwp, wstat))
2902 {
2903 if (debug_threads)
2904 fprintf (stderr,
2905 "LWP %ld needs stabilizing (in jump pad)\n",
2906 lwpid_of (lwp));
2907
2908 if (wstat)
2909 {
2910 lwp->status_pending_p = 0;
2911 enqueue_one_deferred_signal (lwp, wstat);
2912
2913 if (debug_threads)
2914 fprintf (stderr,
2915 "Signal %d for LWP %ld deferred "
2916 "(in jump pad)\n",
2917 WSTOPSIG (*wstat), lwpid_of (lwp));
2918 }
2919
2920 linux_resume_one_lwp (lwp, 0, 0, NULL);
2921 }
2922 else
2923 lwp->suspended++;
2924 }
2925
2926 static int
2927 lwp_running (struct inferior_list_entry *entry, void *data)
2928 {
2929 struct lwp_info *lwp = (struct lwp_info *) entry;
2930
2931 if (lwp->dead)
2932 return 0;
2933 if (lwp->stopped)
2934 return 0;
2935 return 1;
2936 }
2937
2938 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2939 If SUSPEND, then also increase the suspend count of every LWP,
2940 except EXCEPT. */
2941
2942 static void
2943 stop_all_lwps (int suspend, struct lwp_info *except)
2944 {
2945 stopping_threads = 1;
2946
2947 if (suspend)
2948 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2949 else
2950 find_inferior (&all_lwps, send_sigstop_callback, except);
2951 for_each_inferior (&all_lwps, wait_for_sigstop);
2952 stopping_threads = 0;
2953 }
2954
2955 /* Resume execution of the inferior process.
2956 If STEP is nonzero, single-step it.
2957 If SIGNAL is nonzero, give it that signal. */
2958
2959 static void
2960 linux_resume_one_lwp (struct lwp_info *lwp,
2961 int step, int signal, siginfo_t *info)
2962 {
2963 struct thread_info *saved_inferior;
2964 int fast_tp_collecting;
2965
2966 if (lwp->stopped == 0)
2967 return;
2968
2969 fast_tp_collecting = lwp->collecting_fast_tracepoint;
2970
2971 gdb_assert (!stabilizing_threads || fast_tp_collecting);
2972
2973 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2974 user used the "jump" command, or "set $pc = foo"). */
2975 if (lwp->stop_pc != get_pc (lwp))
2976 {
2977 /* Collecting 'while-stepping' actions doesn't make sense
2978 anymore. */
2979 release_while_stepping_state_list (get_lwp_thread (lwp));
2980 }
2981
2982 /* If we have pending signals or status, and a new signal, enqueue the
2983 signal. Also enqueue the signal if we are waiting to reinsert a
2984 breakpoint; it will be picked up again below. */
2985 if (signal != 0
2986 && (lwp->status_pending_p
2987 || lwp->pending_signals != NULL
2988 || lwp->bp_reinsert != 0
2989 || fast_tp_collecting))
2990 {
2991 struct pending_signals *p_sig;
2992 p_sig = xmalloc (sizeof (*p_sig));
2993 p_sig->prev = lwp->pending_signals;
2994 p_sig->signal = signal;
2995 if (info == NULL)
2996 memset (&p_sig->info, 0, sizeof (siginfo_t));
2997 else
2998 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2999 lwp->pending_signals = p_sig;
3000 }
3001
3002 if (lwp->status_pending_p)
3003 {
3004 if (debug_threads)
3005 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3006 " has pending status\n",
3007 lwpid_of (lwp), step ? "step" : "continue", signal,
3008 lwp->stop_expected ? "expected" : "not expected");
3009 return;
3010 }
3011
3012 saved_inferior = current_inferior;
3013 current_inferior = get_lwp_thread (lwp);
3014
3015 if (debug_threads)
3016 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
3017 lwpid_of (lwp), step ? "step" : "continue", signal,
3018 lwp->stop_expected ? "expected" : "not expected");
3019
3020 /* This bit needs some thinking about. If we get a signal that
3021 we must report while a single-step reinsert is still pending,
3022 we often end up resuming the thread. It might be better to
3023 (ew) allow a stack of pending events; then we could be sure that
3024 the reinsert happened right away and not lose any signals.
3025
3026 Making this stack would also shrink the window in which breakpoints are
3027 uninserted (see comment in linux_wait_for_lwp) but not enough for
3028 complete correctness, so it won't solve that problem. It may be
3029 worthwhile just to solve this one, however. */
3030 if (lwp->bp_reinsert != 0)
3031 {
3032 if (debug_threads)
3033 fprintf (stderr, " pending reinsert at 0x%s\n",
3034 paddress (lwp->bp_reinsert));
3035
3036 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
3037 {
3038 if (fast_tp_collecting == 0)
3039 {
3040 if (step == 0)
3041 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3042 if (lwp->suspended)
3043 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3044 lwp->suspended);
3045 }
3046
3047 step = 1;
3048 }
3049
3050 /* Postpone any pending signal. It was enqueued above. */
3051 signal = 0;
3052 }
3053
3054 if (fast_tp_collecting == 1)
3055 {
3056 if (debug_threads)
3057 fprintf (stderr, "\
3058 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3059 lwpid_of (lwp));
3060
3061 /* Postpone any pending signal. It was enqueued above. */
3062 signal = 0;
3063 }
3064 else if (fast_tp_collecting == 2)
3065 {
3066 if (debug_threads)
3067 fprintf (stderr, "\
3068 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3069 lwpid_of (lwp));
3070
3071 if (can_hardware_single_step ())
3072 step = 1;
3073 else
3074 fatal ("moving out of jump pad single-stepping"
3075 " not implemented on this target");
3076
3077 /* Postpone any pending signal. It was enqueued above. */
3078 signal = 0;
3079 }
3080
3081 /* If we have while-stepping actions in this thread set it stepping.
3082 If we have a signal to deliver, it may or may not be set to
3083 SIG_IGN, we don't know. Assume so, and allow collecting
3084 while-stepping into a signal handler. A possible smart thing to
3085 do would be to set an internal breakpoint at the signal return
3086 address, continue, and carry on catching this while-stepping
3087 action only when that breakpoint is hit. A future
3088 enhancement. */
3089 if (get_lwp_thread (lwp)->while_stepping != NULL
3090 && can_hardware_single_step ())
3091 {
3092 if (debug_threads)
3093 fprintf (stderr,
3094 "lwp %ld has a while-stepping action -> forcing step.\n",
3095 lwpid_of (lwp));
3096 step = 1;
3097 }
3098
3099 if (debug_threads && the_low_target.get_pc != NULL)
3100 {
3101 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3102 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3103 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3104 }
3105
3106 /* If we have pending signals, consume one unless we are trying to
3107 reinsert a breakpoint or we're trying to finish a fast tracepoint
3108 collect. */
3109 if (lwp->pending_signals != NULL
3110 && lwp->bp_reinsert == 0
3111 && fast_tp_collecting == 0)
3112 {
3113 struct pending_signals **p_sig;
3114
3115 p_sig = &lwp->pending_signals;
3116 while ((*p_sig)->prev != NULL)
3117 p_sig = &(*p_sig)->prev;
3118
3119 signal = (*p_sig)->signal;
3120 if ((*p_sig)->info.si_signo != 0)
3121 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
3122
3123 free (*p_sig);
3124 *p_sig = NULL;
3125 }
3126
3127 if (the_low_target.prepare_to_resume != NULL)
3128 the_low_target.prepare_to_resume (lwp);
3129
3130 regcache_invalidate_one ((struct inferior_list_entry *)
3131 get_lwp_thread (lwp));
3132 errno = 0;
3133 lwp->stopped = 0;
3134 lwp->stopped_by_watchpoint = 0;
3135 lwp->stepping = step;
3136 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
3137 /* Coerce to a uintptr_t first to avoid potential gcc warning
3138 of coercing an 8 byte integer to a 4 byte pointer. */
3139 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
3140
3141 current_inferior = saved_inferior;
3142 if (errno)
3143 {
3144 /* ESRCH from ptrace either means that the thread was already
3145 running (an error) or that it is gone (a race condition). If
3146 it's gone, we will get a notification the next time we wait,
3147 so we can ignore the error. We could differentiate these
3148 two, but it's tricky without waiting; the thread still exists
3149 as a zombie, so sending it signal 0 would succeed. So just
3150 ignore ESRCH. */
3151 if (errno == ESRCH)
3152 return;
3153
3154 perror_with_name ("ptrace");
3155 }
3156 }
3157
3158 struct thread_resume_array
3159 {
3160 struct thread_resume *resume;
3161 size_t n;
3162 };
3163
3164 /* This function is called once per thread. We look up the thread
3165 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3166 resume request.
3167
3168 This algorithm is O(threads * resume elements), but resume elements
3169 is small (and will remain small at least until GDB supports thread
3170 suspension). */
3171 static int
3172 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3173 {
3174 struct lwp_info *lwp;
3175 struct thread_info *thread;
3176 int ndx;
3177 struct thread_resume_array *r;
3178
3179 thread = (struct thread_info *) entry;
3180 lwp = get_thread_lwp (thread);
3181 r = arg;
3182
3183 for (ndx = 0; ndx < r->n; ndx++)
3184 {
3185 ptid_t ptid = r->resume[ndx].thread;
3186 if (ptid_equal (ptid, minus_one_ptid)
3187 || ptid_equal (ptid, entry->id)
3188 || (ptid_is_pid (ptid)
3189 && (ptid_get_pid (ptid) == pid_of (lwp)))
3190 || (ptid_get_lwp (ptid) == -1
3191 && (ptid_get_pid (ptid) == pid_of (lwp))))
3192 {
3193 if (r->resume[ndx].kind == resume_stop
3194 && thread->last_resume_kind == resume_stop)
3195 {
3196 if (debug_threads)
3197 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3198 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3199 ? "stopped"
3200 : "stopping",
3201 lwpid_of (lwp));
3202
3203 continue;
3204 }
3205
3206 lwp->resume = &r->resume[ndx];
3207 thread->last_resume_kind = lwp->resume->kind;
3208
3209 /* If we had a deferred signal to report, dequeue one now.
3210 This can happen if LWP gets more than one signal while
3211 trying to get out of a jump pad. */
3212 if (lwp->stopped
3213 && !lwp->status_pending_p
3214 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3215 {
3216 lwp->status_pending_p = 1;
3217
3218 if (debug_threads)
3219 fprintf (stderr,
3220 "Dequeueing deferred signal %d for LWP %ld, "
3221 "leaving status pending.\n",
3222 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3223 }
3224
3225 return 0;
3226 }
3227 }
3228
3229 /* No resume action for this thread. */
3230 lwp->resume = NULL;
3231
3232 return 0;
3233 }
3234
3235
3236 /* Set *FLAG_P if this lwp has an interesting status pending. */
3237 static int
3238 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3239 {
3240 struct lwp_info *lwp = (struct lwp_info *) entry;
3241
3242 /* LWPs which will not be resumed are not interesting, because
3243 we might not wait for them next time through linux_wait. */
3244 if (lwp->resume == NULL)
3245 return 0;
3246
3247 if (lwp->status_pending_p)
3248 * (int *) flag_p = 1;
3249
3250 return 0;
3251 }
3252
3253 /* Return 1 if this lwp that GDB wants running is stopped at an
3254 internal breakpoint that we need to step over. It assumes that any
3255 required STOP_PC adjustment has already been propagated to the
3256 inferior's regcache. */
3257
3258 static int
3259 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3260 {
3261 struct lwp_info *lwp = (struct lwp_info *) entry;
3262 struct thread_info *thread;
3263 struct thread_info *saved_inferior;
3264 CORE_ADDR pc;
3265
3266 /* LWPs which will not be resumed are not interesting, because we
3267 might not wait for them next time through linux_wait. */
3268
3269 if (!lwp->stopped)
3270 {
3271 if (debug_threads)
3272 fprintf (stderr,
3273 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3274 lwpid_of (lwp));
3275 return 0;
3276 }
3277
3278 thread = get_lwp_thread (lwp);
3279
3280 if (thread->last_resume_kind == resume_stop)
3281 {
3282 if (debug_threads)
3283 fprintf (stderr,
3284 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3285 lwpid_of (lwp));
3286 return 0;
3287 }
3288
3289 gdb_assert (lwp->suspended >= 0);
3290
3291 if (lwp->suspended)
3292 {
3293 if (debug_threads)
3294 fprintf (stderr,
3295 "Need step over [LWP %ld]? Ignoring, suspended\n",
3296 lwpid_of (lwp));
3297 return 0;
3298 }
3299
3300 if (!lwp->need_step_over)
3301 {
3302 if (debug_threads)
3303 fprintf (stderr,
3304 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3305 }
3306
3307 if (lwp->status_pending_p)
3308 {
3309 if (debug_threads)
3310 fprintf (stderr,
3311 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3312 lwpid_of (lwp));
3313 return 0;
3314 }
3315
3316 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3317 or we have. */
3318 pc = get_pc (lwp);
3319
3320 /* If the PC has changed since we stopped, then don't do anything,
3321 and let the breakpoint/tracepoint be hit. This happens if, for
3322 instance, GDB handled the decr_pc_after_break subtraction itself,
3323 GDB is OOL stepping this thread, or the user has issued a "jump"
3324 command, or poked thread's registers herself. */
3325 if (pc != lwp->stop_pc)
3326 {
3327 if (debug_threads)
3328 fprintf (stderr,
3329 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3330 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3331 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3332
3333 lwp->need_step_over = 0;
3334 return 0;
3335 }
3336
3337 saved_inferior = current_inferior;
3338 current_inferior = thread;
3339
3340 /* We can only step over breakpoints we know about. */
3341 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3342 {
3343 /* Don't step over a breakpoint that GDB expects to hit
3344 though. If the condition is being evaluated on the target's side
3345 and it evaluate to false, step over this breakpoint as well. */
3346 if (gdb_breakpoint_here (pc)
3347 && gdb_condition_true_at_breakpoint (pc))
3348 {
3349 if (debug_threads)
3350 fprintf (stderr,
3351 "Need step over [LWP %ld]? yes, but found"
3352 " GDB breakpoint at 0x%s; skipping step over\n",
3353 lwpid_of (lwp), paddress (pc));
3354
3355 current_inferior = saved_inferior;
3356 return 0;
3357 }
3358 else
3359 {
3360 if (debug_threads)
3361 fprintf (stderr,
3362 "Need step over [LWP %ld]? yes, "
3363 "found breakpoint at 0x%s\n",
3364 lwpid_of (lwp), paddress (pc));
3365
3366 /* We've found an lwp that needs stepping over --- return 1 so
3367 that find_inferior stops looking. */
3368 current_inferior = saved_inferior;
3369
3370 /* If the step over is cancelled, this is set again. */
3371 lwp->need_step_over = 0;
3372 return 1;
3373 }
3374 }
3375
3376 current_inferior = saved_inferior;
3377
3378 if (debug_threads)
3379 fprintf (stderr,
3380 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3381 lwpid_of (lwp), paddress (pc));
3382
3383 return 0;
3384 }
3385
3386 /* Start a step-over operation on LWP. When LWP stopped at a
3387 breakpoint, to make progress, we need to remove the breakpoint out
3388 of the way. If we let other threads run while we do that, they may
3389 pass by the breakpoint location and miss hitting it. To avoid
3390 that, a step-over momentarily stops all threads while LWP is
3391 single-stepped while the breakpoint is temporarily uninserted from
3392 the inferior. When the single-step finishes, we reinsert the
3393 breakpoint, and let all threads that are supposed to be running,
3394 run again.
3395
3396 On targets that don't support hardware single-step, we don't
3397 currently support full software single-stepping. Instead, we only
3398 support stepping over the thread event breakpoint, by asking the
3399 low target where to place a reinsert breakpoint. Since this
3400 routine assumes the breakpoint being stepped over is a thread event
3401 breakpoint, it usually assumes the return address of the current
3402 function is a good enough place to set the reinsert breakpoint. */
3403
3404 static int
3405 start_step_over (struct lwp_info *lwp)
3406 {
3407 struct thread_info *saved_inferior;
3408 CORE_ADDR pc;
3409 int step;
3410
3411 if (debug_threads)
3412 fprintf (stderr,
3413 "Starting step-over on LWP %ld. Stopping all threads\n",
3414 lwpid_of (lwp));
3415
3416 stop_all_lwps (1, lwp);
3417 gdb_assert (lwp->suspended == 0);
3418
3419 if (debug_threads)
3420 fprintf (stderr, "Done stopping all threads for step-over.\n");
3421
3422 /* Note, we should always reach here with an already adjusted PC,
3423 either by GDB (if we're resuming due to GDB's request), or by our
3424 caller, if we just finished handling an internal breakpoint GDB
3425 shouldn't care about. */
3426 pc = get_pc (lwp);
3427
3428 saved_inferior = current_inferior;
3429 current_inferior = get_lwp_thread (lwp);
3430
3431 lwp->bp_reinsert = pc;
3432 uninsert_breakpoints_at (pc);
3433 uninsert_fast_tracepoint_jumps_at (pc);
3434
3435 if (can_hardware_single_step ())
3436 {
3437 step = 1;
3438 }
3439 else
3440 {
3441 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3442 set_reinsert_breakpoint (raddr);
3443 step = 0;
3444 }
3445
3446 current_inferior = saved_inferior;
3447
3448 linux_resume_one_lwp (lwp, step, 0, NULL);
3449
3450 /* Require next event from this LWP. */
3451 step_over_bkpt = lwp->head.id;
3452 return 1;
3453 }
3454
3455 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3456 start_step_over, if still there, and delete any reinsert
3457 breakpoints we've set, on non hardware single-step targets. */
3458
3459 static int
3460 finish_step_over (struct lwp_info *lwp)
3461 {
3462 if (lwp->bp_reinsert != 0)
3463 {
3464 if (debug_threads)
3465 fprintf (stderr, "Finished step over.\n");
3466
3467 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3468 may be no breakpoint to reinsert there by now. */
3469 reinsert_breakpoints_at (lwp->bp_reinsert);
3470 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3471
3472 lwp->bp_reinsert = 0;
3473
3474 /* Delete any software-single-step reinsert breakpoints. No
3475 longer needed. We don't have to worry about other threads
3476 hitting this trap, and later not being able to explain it,
3477 because we were stepping over a breakpoint, and we hold all
3478 threads but LWP stopped while doing that. */
3479 if (!can_hardware_single_step ())
3480 delete_reinsert_breakpoints ();
3481
3482 step_over_bkpt = null_ptid;
3483 return 1;
3484 }
3485 else
3486 return 0;
3487 }
3488
3489 /* This function is called once per thread. We check the thread's resume
3490 request, which will tell us whether to resume, step, or leave the thread
3491 stopped; and what signal, if any, it should be sent.
3492
3493 For threads which we aren't explicitly told otherwise, we preserve
3494 the stepping flag; this is used for stepping over gdbserver-placed
3495 breakpoints.
3496
3497 If pending_flags was set in any thread, we queue any needed
3498 signals, since we won't actually resume. We already have a pending
3499 event to report, so we don't need to preserve any step requests;
3500 they should be re-issued if necessary. */
3501
3502 static int
3503 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3504 {
3505 struct lwp_info *lwp;
3506 struct thread_info *thread;
3507 int step;
3508 int leave_all_stopped = * (int *) arg;
3509 int leave_pending;
3510
3511 thread = (struct thread_info *) entry;
3512 lwp = get_thread_lwp (thread);
3513
3514 if (lwp->resume == NULL)
3515 return 0;
3516
3517 if (lwp->resume->kind == resume_stop)
3518 {
3519 if (debug_threads)
3520 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3521
3522 if (!lwp->stopped)
3523 {
3524 if (debug_threads)
3525 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3526
3527 /* Stop the thread, and wait for the event asynchronously,
3528 through the event loop. */
3529 send_sigstop (lwp);
3530 }
3531 else
3532 {
3533 if (debug_threads)
3534 fprintf (stderr, "already stopped LWP %ld\n",
3535 lwpid_of (lwp));
3536
3537 /* The LWP may have been stopped in an internal event that
3538 was not meant to be notified back to GDB (e.g., gdbserver
3539 breakpoint), so we should be reporting a stop event in
3540 this case too. */
3541
3542 /* If the thread already has a pending SIGSTOP, this is a
3543 no-op. Otherwise, something later will presumably resume
3544 the thread and this will cause it to cancel any pending
3545 operation, due to last_resume_kind == resume_stop. If
3546 the thread already has a pending status to report, we
3547 will still report it the next time we wait - see
3548 status_pending_p_callback. */
3549
3550 /* If we already have a pending signal to report, then
3551 there's no need to queue a SIGSTOP, as this means we're
3552 midway through moving the LWP out of the jumppad, and we
3553 will report the pending signal as soon as that is
3554 finished. */
3555 if (lwp->pending_signals_to_report == NULL)
3556 send_sigstop (lwp);
3557 }
3558
3559 /* For stop requests, we're done. */
3560 lwp->resume = NULL;
3561 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3562 return 0;
3563 }
3564
3565 /* If this thread which is about to be resumed has a pending status,
3566 then don't resume any threads - we can just report the pending
3567 status. Make sure to queue any signals that would otherwise be
3568 sent. In all-stop mode, we do this decision based on if *any*
3569 thread has a pending status. If there's a thread that needs the
3570 step-over-breakpoint dance, then don't resume any other thread
3571 but that particular one. */
3572 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3573
3574 if (!leave_pending)
3575 {
3576 if (debug_threads)
3577 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3578
3579 step = (lwp->resume->kind == resume_step);
3580 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3581 }
3582 else
3583 {
3584 if (debug_threads)
3585 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3586
3587 /* If we have a new signal, enqueue the signal. */
3588 if (lwp->resume->sig != 0)
3589 {
3590 struct pending_signals *p_sig;
3591 p_sig = xmalloc (sizeof (*p_sig));
3592 p_sig->prev = lwp->pending_signals;
3593 p_sig->signal = lwp->resume->sig;
3594 memset (&p_sig->info, 0, sizeof (siginfo_t));
3595
3596 /* If this is the same signal we were previously stopped by,
3597 make sure to queue its siginfo. We can ignore the return
3598 value of ptrace; if it fails, we'll skip
3599 PTRACE_SETSIGINFO. */
3600 if (WIFSTOPPED (lwp->last_status)
3601 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3602 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3603
3604 lwp->pending_signals = p_sig;
3605 }
3606 }
3607
3608 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3609 lwp->resume = NULL;
3610 return 0;
3611 }
3612
3613 static void
3614 linux_resume (struct thread_resume *resume_info, size_t n)
3615 {
3616 struct thread_resume_array array = { resume_info, n };
3617 struct lwp_info *need_step_over = NULL;
3618 int any_pending;
3619 int leave_all_stopped;
3620
3621 find_inferior (&all_threads, linux_set_resume_request, &array);
3622
3623 /* If there is a thread which would otherwise be resumed, which has
3624 a pending status, then don't resume any threads - we can just
3625 report the pending status. Make sure to queue any signals that
3626 would otherwise be sent. In non-stop mode, we'll apply this
3627 logic to each thread individually. We consume all pending events
3628 before considering to start a step-over (in all-stop). */
3629 any_pending = 0;
3630 if (!non_stop)
3631 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3632
3633 /* If there is a thread which would otherwise be resumed, which is
3634 stopped at a breakpoint that needs stepping over, then don't
3635 resume any threads - have it step over the breakpoint with all
3636 other threads stopped, then resume all threads again. Make sure
3637 to queue any signals that would otherwise be delivered or
3638 queued. */
3639 if (!any_pending && supports_breakpoints ())
3640 need_step_over
3641 = (struct lwp_info *) find_inferior (&all_lwps,
3642 need_step_over_p, NULL);
3643
3644 leave_all_stopped = (need_step_over != NULL || any_pending);
3645
3646 if (debug_threads)
3647 {
3648 if (need_step_over != NULL)
3649 fprintf (stderr, "Not resuming all, need step over\n");
3650 else if (any_pending)
3651 fprintf (stderr,
3652 "Not resuming, all-stop and found "
3653 "an LWP with pending status\n");
3654 else
3655 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3656 }
3657
3658 /* Even if we're leaving threads stopped, queue all signals we'd
3659 otherwise deliver. */
3660 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3661
3662 if (need_step_over)
3663 start_step_over (need_step_over);
3664 }
3665
3666 /* This function is called once per thread. We check the thread's
3667 last resume request, which will tell us whether to resume, step, or
3668 leave the thread stopped. Any signal the client requested to be
3669 delivered has already been enqueued at this point.
3670
3671 If any thread that GDB wants running is stopped at an internal
3672 breakpoint that needs stepping over, we start a step-over operation
3673 on that particular thread, and leave all others stopped. */
3674
3675 static int
3676 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3677 {
3678 struct lwp_info *lwp = (struct lwp_info *) entry;
3679 struct thread_info *thread;
3680 int step;
3681
3682 if (lwp == except)
3683 return 0;
3684
3685 if (debug_threads)
3686 fprintf (stderr,
3687 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3688
3689 if (!lwp->stopped)
3690 {
3691 if (debug_threads)
3692 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3693 return 0;
3694 }
3695
3696 thread = get_lwp_thread (lwp);
3697
3698 if (thread->last_resume_kind == resume_stop
3699 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3700 {
3701 if (debug_threads)
3702 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3703 lwpid_of (lwp));
3704 return 0;
3705 }
3706
3707 if (lwp->status_pending_p)
3708 {
3709 if (debug_threads)
3710 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3711 lwpid_of (lwp));
3712 return 0;
3713 }
3714
3715 gdb_assert (lwp->suspended >= 0);
3716
3717 if (lwp->suspended)
3718 {
3719 if (debug_threads)
3720 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3721 return 0;
3722 }
3723
3724 if (thread->last_resume_kind == resume_stop
3725 && lwp->pending_signals_to_report == NULL
3726 && lwp->collecting_fast_tracepoint == 0)
3727 {
3728 /* We haven't reported this LWP as stopped yet (otherwise, the
3729 last_status.kind check above would catch it, and we wouldn't
3730 reach here. This LWP may have been momentarily paused by a
3731 stop_all_lwps call while handling for example, another LWP's
3732 step-over. In that case, the pending expected SIGSTOP signal
3733 that was queued at vCont;t handling time will have already
3734 been consumed by wait_for_sigstop, and so we need to requeue
3735 another one here. Note that if the LWP already has a SIGSTOP
3736 pending, this is a no-op. */
3737
3738 if (debug_threads)
3739 fprintf (stderr,
3740 "Client wants LWP %ld to stop. "
3741 "Making sure it has a SIGSTOP pending\n",
3742 lwpid_of (lwp));
3743
3744 send_sigstop (lwp);
3745 }
3746
3747 step = thread->last_resume_kind == resume_step;
3748 linux_resume_one_lwp (lwp, step, 0, NULL);
3749 return 0;
3750 }
3751
3752 static int
3753 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3754 {
3755 struct lwp_info *lwp = (struct lwp_info *) entry;
3756
3757 if (lwp == except)
3758 return 0;
3759
3760 lwp->suspended--;
3761 gdb_assert (lwp->suspended >= 0);
3762
3763 return proceed_one_lwp (entry, except);
3764 }
3765
3766 /* When we finish a step-over, set threads running again. If there's
3767 another thread that may need a step-over, now's the time to start
3768 it. Eventually, we'll move all threads past their breakpoints. */
3769
3770 static void
3771 proceed_all_lwps (void)
3772 {
3773 struct lwp_info *need_step_over;
3774
3775 /* If there is a thread which would otherwise be resumed, which is
3776 stopped at a breakpoint that needs stepping over, then don't
3777 resume any threads - have it step over the breakpoint with all
3778 other threads stopped, then resume all threads again. */
3779
3780 if (supports_breakpoints ())
3781 {
3782 need_step_over
3783 = (struct lwp_info *) find_inferior (&all_lwps,
3784 need_step_over_p, NULL);
3785
3786 if (need_step_over != NULL)
3787 {
3788 if (debug_threads)
3789 fprintf (stderr, "proceed_all_lwps: found "
3790 "thread %ld needing a step-over\n",
3791 lwpid_of (need_step_over));
3792
3793 start_step_over (need_step_over);
3794 return;
3795 }
3796 }
3797
3798 if (debug_threads)
3799 fprintf (stderr, "Proceeding, no step-over needed\n");
3800
3801 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3802 }
3803
3804 /* Stopped LWPs that the client wanted to be running, that don't have
3805 pending statuses, are set to run again, except for EXCEPT, if not
3806 NULL. This undoes a stop_all_lwps call. */
3807
3808 static void
3809 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3810 {
3811 if (debug_threads)
3812 {
3813 if (except)
3814 fprintf (stderr,
3815 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3816 else
3817 fprintf (stderr,
3818 "unstopping all lwps\n");
3819 }
3820
3821 if (unsuspend)
3822 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3823 else
3824 find_inferior (&all_lwps, proceed_one_lwp, except);
3825 }
3826
3827
3828 #ifdef HAVE_LINUX_REGSETS
3829
3830 #define use_linux_regsets 1
3831
3832 static int
3833 regsets_fetch_inferior_registers (struct regcache *regcache)
3834 {
3835 struct regset_info *regset;
3836 int saw_general_regs = 0;
3837 int pid;
3838 struct iovec iov;
3839
3840 regset = target_regsets;
3841
3842 pid = lwpid_of (get_thread_lwp (current_inferior));
3843 while (regset->size >= 0)
3844 {
3845 void *buf, *data;
3846 int nt_type, res;
3847
3848 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3849 {
3850 regset ++;
3851 continue;
3852 }
3853
3854 buf = xmalloc (regset->size);
3855
3856 nt_type = regset->nt_type;
3857 if (nt_type)
3858 {
3859 iov.iov_base = buf;
3860 iov.iov_len = regset->size;
3861 data = (void *) &iov;
3862 }
3863 else
3864 data = buf;
3865
3866 #ifndef __sparc__
3867 res = ptrace (regset->get_request, pid, nt_type, data);
3868 #else
3869 res = ptrace (regset->get_request, pid, data, nt_type);
3870 #endif
3871 if (res < 0)
3872 {
3873 if (errno == EIO)
3874 {
3875 /* If we get EIO on a regset, do not try it again for
3876 this process. */
3877 disabled_regsets[regset - target_regsets] = 1;
3878 free (buf);
3879 continue;
3880 }
3881 else
3882 {
3883 char s[256];
3884 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3885 pid);
3886 perror (s);
3887 }
3888 }
3889 else if (regset->type == GENERAL_REGS)
3890 saw_general_regs = 1;
3891 regset->store_function (regcache, buf);
3892 regset ++;
3893 free (buf);
3894 }
3895 if (saw_general_regs)
3896 return 0;
3897 else
3898 return 1;
3899 }
3900
3901 static int
3902 regsets_store_inferior_registers (struct regcache *regcache)
3903 {
3904 struct regset_info *regset;
3905 int saw_general_regs = 0;
3906 int pid;
3907 struct iovec iov;
3908
3909 regset = target_regsets;
3910
3911 pid = lwpid_of (get_thread_lwp (current_inferior));
3912 while (regset->size >= 0)
3913 {
3914 void *buf, *data;
3915 int nt_type, res;
3916
3917 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3918 {
3919 regset ++;
3920 continue;
3921 }
3922
3923 buf = xmalloc (regset->size);
3924
3925 /* First fill the buffer with the current register set contents,
3926 in case there are any items in the kernel's regset that are
3927 not in gdbserver's regcache. */
3928
3929 nt_type = regset->nt_type;
3930 if (nt_type)
3931 {
3932 iov.iov_base = buf;
3933 iov.iov_len = regset->size;
3934 data = (void *) &iov;
3935 }
3936 else
3937 data = buf;
3938
3939 #ifndef __sparc__
3940 res = ptrace (regset->get_request, pid, nt_type, data);
3941 #else
3942 res = ptrace (regset->get_request, pid, &iov, data);
3943 #endif
3944
3945 if (res == 0)
3946 {
3947 /* Then overlay our cached registers on that. */
3948 regset->fill_function (regcache, buf);
3949
3950 /* Only now do we write the register set. */
3951 #ifndef __sparc__
3952 res = ptrace (regset->set_request, pid, nt_type, data);
3953 #else
3954 res = ptrace (regset->set_request, pid, data, nt_type);
3955 #endif
3956 }
3957
3958 if (res < 0)
3959 {
3960 if (errno == EIO)
3961 {
3962 /* If we get EIO on a regset, do not try it again for
3963 this process. */
3964 disabled_regsets[regset - target_regsets] = 1;
3965 free (buf);
3966 continue;
3967 }
3968 else if (errno == ESRCH)
3969 {
3970 /* At this point, ESRCH should mean the process is
3971 already gone, in which case we simply ignore attempts
3972 to change its registers. See also the related
3973 comment in linux_resume_one_lwp. */
3974 free (buf);
3975 return 0;
3976 }
3977 else
3978 {
3979 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3980 }
3981 }
3982 else if (regset->type == GENERAL_REGS)
3983 saw_general_regs = 1;
3984 regset ++;
3985 free (buf);
3986 }
3987 if (saw_general_regs)
3988 return 0;
3989 else
3990 return 1;
3991 }
3992
3993 #else /* !HAVE_LINUX_REGSETS */
3994
3995 #define use_linux_regsets 0
3996 #define regsets_fetch_inferior_registers(regcache) 1
3997 #define regsets_store_inferior_registers(regcache) 1
3998
3999 #endif
4000
4001 /* Return 1 if register REGNO is supported by one of the regset ptrace
4002 calls or 0 if it has to be transferred individually. */
4003
4004 static int
4005 linux_register_in_regsets (int regno)
4006 {
4007 unsigned char mask = 1 << (regno % 8);
4008 size_t index = regno / 8;
4009
4010 return (use_linux_regsets
4011 && (the_low_target.regset_bitmap == NULL
4012 || (the_low_target.regset_bitmap[index] & mask) != 0));
4013 }
4014
4015 #ifdef HAVE_LINUX_USRREGS
4016
4017 int
4018 register_addr (int regnum)
4019 {
4020 int addr;
4021
4022 if (regnum < 0 || regnum >= the_low_target.num_regs)
4023 error ("Invalid register number %d.", regnum);
4024
4025 addr = the_low_target.regmap[regnum];
4026
4027 return addr;
4028 }
4029
4030 /* Fetch one register. */
4031 static void
4032 fetch_register (struct regcache *regcache, int regno)
4033 {
4034 CORE_ADDR regaddr;
4035 int i, size;
4036 char *buf;
4037 int pid;
4038
4039 if (regno >= the_low_target.num_regs)
4040 return;
4041 if ((*the_low_target.cannot_fetch_register) (regno))
4042 return;
4043
4044 regaddr = register_addr (regno);
4045 if (regaddr == -1)
4046 return;
4047
4048 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4049 & -sizeof (PTRACE_XFER_TYPE));
4050 buf = alloca (size);
4051
4052 pid = lwpid_of (get_thread_lwp (current_inferior));
4053 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4054 {
4055 errno = 0;
4056 *(PTRACE_XFER_TYPE *) (buf + i) =
4057 ptrace (PTRACE_PEEKUSER, pid,
4058 /* Coerce to a uintptr_t first to avoid potential gcc warning
4059 of coercing an 8 byte integer to a 4 byte pointer. */
4060 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
4061 regaddr += sizeof (PTRACE_XFER_TYPE);
4062 if (errno != 0)
4063 error ("reading register %d: %s", regno, strerror (errno));
4064 }
4065
4066 if (the_low_target.supply_ptrace_register)
4067 the_low_target.supply_ptrace_register (regcache, regno, buf);
4068 else
4069 supply_register (regcache, regno, buf);
4070 }
4071
4072 /* Store one register. */
4073 static void
4074 store_register (struct regcache *regcache, int regno)
4075 {
4076 CORE_ADDR regaddr;
4077 int i, size;
4078 char *buf;
4079 int pid;
4080
4081 if (regno >= the_low_target.num_regs)
4082 return;
4083 if ((*the_low_target.cannot_store_register) (regno))
4084 return;
4085
4086 regaddr = register_addr (regno);
4087 if (regaddr == -1)
4088 return;
4089
4090 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4091 & -sizeof (PTRACE_XFER_TYPE));
4092 buf = alloca (size);
4093 memset (buf, 0, size);
4094
4095 if (the_low_target.collect_ptrace_register)
4096 the_low_target.collect_ptrace_register (regcache, regno, buf);
4097 else
4098 collect_register (regcache, regno, buf);
4099
4100 pid = lwpid_of (get_thread_lwp (current_inferior));
4101 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4102 {
4103 errno = 0;
4104 ptrace (PTRACE_POKEUSER, pid,
4105 /* Coerce to a uintptr_t first to avoid potential gcc warning
4106 about coercing an 8 byte integer to a 4 byte pointer. */
4107 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
4108 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
4109 if (errno != 0)
4110 {
4111 /* At this point, ESRCH should mean the process is
4112 already gone, in which case we simply ignore attempts
4113 to change its registers. See also the related
4114 comment in linux_resume_one_lwp. */
4115 if (errno == ESRCH)
4116 return;
4117
4118 if ((*the_low_target.cannot_store_register) (regno) == 0)
4119 error ("writing register %d: %s", regno, strerror (errno));
4120 }
4121 regaddr += sizeof (PTRACE_XFER_TYPE);
4122 }
4123 }
4124
4125 /* Fetch all registers, or just one, from the child process.
4126 If REGNO is -1, do this for all registers, skipping any that are
4127 assumed to have been retrieved by regsets_fetch_inferior_registers,
4128 unless ALL is non-zero.
4129 Otherwise, REGNO specifies which register (so we can save time). */
4130 static void
4131 usr_fetch_inferior_registers (struct regcache *regcache, int regno, int all)
4132 {
4133 if (regno == -1)
4134 {
4135 for (regno = 0; regno < the_low_target.num_regs; regno++)
4136 if (all || !linux_register_in_regsets (regno))
4137 fetch_register (regcache, regno);
4138 }
4139 else
4140 fetch_register (regcache, regno);
4141 }
4142
4143 /* Store our register values back into the inferior.
4144 If REGNO is -1, do this for all registers, skipping any that are
4145 assumed to have been saved by regsets_store_inferior_registers,
4146 unless ALL is non-zero.
4147 Otherwise, REGNO specifies which register (so we can save time). */
4148 static void
4149 usr_store_inferior_registers (struct regcache *regcache, int regno, int all)
4150 {
4151 if (regno == -1)
4152 {
4153 for (regno = 0; regno < the_low_target.num_regs; regno++)
4154 if (all || !linux_register_in_regsets (regno))
4155 store_register (regcache, regno);
4156 }
4157 else
4158 store_register (regcache, regno);
4159 }
4160
4161 #else /* !HAVE_LINUX_USRREGS */
4162
4163 #define usr_fetch_inferior_registers(regcache, regno, all) do {} while (0)
4164 #define usr_store_inferior_registers(regcache, regno, all) do {} while (0)
4165
4166 #endif
4167
4168
4169 void
4170 linux_fetch_registers (struct regcache *regcache, int regno)
4171 {
4172 int use_regsets;
4173 int all = 0;
4174
4175 if (regno == -1)
4176 {
4177 all = regsets_fetch_inferior_registers (regcache);
4178 usr_fetch_inferior_registers (regcache, regno, all);
4179 }
4180 else
4181 {
4182 use_regsets = linux_register_in_regsets (regno);
4183 if (use_regsets)
4184 all = regsets_fetch_inferior_registers (regcache);
4185 if (!use_regsets || all)
4186 usr_fetch_inferior_registers (regcache, regno, 1);
4187 }
4188 }
4189
4190 void
4191 linux_store_registers (struct regcache *regcache, int regno)
4192 {
4193 int use_regsets;
4194 int all = 0;
4195
4196 if (regno == -1)
4197 {
4198 all = regsets_store_inferior_registers (regcache);
4199 usr_store_inferior_registers (regcache, regno, all);
4200 }
4201 else
4202 {
4203 use_regsets = linux_register_in_regsets (regno);
4204 if (use_regsets)
4205 all = regsets_store_inferior_registers (regcache);
4206 if (!use_regsets || all)
4207 usr_store_inferior_registers (regcache, regno, 1);
4208 }
4209 }
4210
4211
4212 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4213 to debugger memory starting at MYADDR. */
4214
4215 static int
4216 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4217 {
4218 register int i;
4219 /* Round starting address down to longword boundary. */
4220 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4221 /* Round ending address up; get number of longwords that makes. */
4222 register int count
4223 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4224 / sizeof (PTRACE_XFER_TYPE);
4225 /* Allocate buffer of that many longwords. */
4226 register PTRACE_XFER_TYPE *buffer
4227 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4228 int fd;
4229 char filename[64];
4230 int pid = lwpid_of (get_thread_lwp (current_inferior));
4231
4232 /* Try using /proc. Don't bother for one word. */
4233 if (len >= 3 * sizeof (long))
4234 {
4235 /* We could keep this file open and cache it - possibly one per
4236 thread. That requires some juggling, but is even faster. */
4237 sprintf (filename, "/proc/%d/mem", pid);
4238 fd = open (filename, O_RDONLY | O_LARGEFILE);
4239 if (fd == -1)
4240 goto no_proc;
4241
4242 /* If pread64 is available, use it. It's faster if the kernel
4243 supports it (only one syscall), and it's 64-bit safe even on
4244 32-bit platforms (for instance, SPARC debugging a SPARC64
4245 application). */
4246 #ifdef HAVE_PREAD64
4247 if (pread64 (fd, myaddr, len, memaddr) != len)
4248 #else
4249 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
4250 #endif
4251 {
4252 close (fd);
4253 goto no_proc;
4254 }
4255
4256 close (fd);
4257 return 0;
4258 }
4259
4260 no_proc:
4261 /* Read all the longwords */
4262 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4263 {
4264 errno = 0;
4265 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4266 about coercing an 8 byte integer to a 4 byte pointer. */
4267 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4268 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4269 if (errno)
4270 return errno;
4271 }
4272
4273 /* Copy appropriate bytes out of the buffer. */
4274 memcpy (myaddr,
4275 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4276 len);
4277
4278 return 0;
4279 }
4280
4281 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4282 memory at MEMADDR. On failure (cannot write to the inferior)
4283 returns the value of errno. */
4284
4285 static int
4286 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4287 {
4288 register int i;
4289 /* Round starting address down to longword boundary. */
4290 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4291 /* Round ending address up; get number of longwords that makes. */
4292 register int count
4293 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4294 / sizeof (PTRACE_XFER_TYPE);
4295
4296 /* Allocate buffer of that many longwords. */
4297 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4298 alloca (count * sizeof (PTRACE_XFER_TYPE));
4299
4300 int pid = lwpid_of (get_thread_lwp (current_inferior));
4301
4302 if (debug_threads)
4303 {
4304 /* Dump up to four bytes. */
4305 unsigned int val = * (unsigned int *) myaddr;
4306 if (len == 1)
4307 val = val & 0xff;
4308 else if (len == 2)
4309 val = val & 0xffff;
4310 else if (len == 3)
4311 val = val & 0xffffff;
4312 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4313 val, (long)memaddr);
4314 }
4315
4316 /* Fill start and end extra bytes of buffer with existing memory data. */
4317
4318 errno = 0;
4319 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4320 about coercing an 8 byte integer to a 4 byte pointer. */
4321 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4322 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4323 if (errno)
4324 return errno;
4325
4326 if (count > 1)
4327 {
4328 errno = 0;
4329 buffer[count - 1]
4330 = ptrace (PTRACE_PEEKTEXT, pid,
4331 /* Coerce to a uintptr_t first to avoid potential gcc warning
4332 about coercing an 8 byte integer to a 4 byte pointer. */
4333 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4334 * sizeof (PTRACE_XFER_TYPE)),
4335 0);
4336 if (errno)
4337 return errno;
4338 }
4339
4340 /* Copy data to be written over corresponding part of buffer. */
4341
4342 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4343 myaddr, len);
4344
4345 /* Write the entire buffer. */
4346
4347 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4348 {
4349 errno = 0;
4350 ptrace (PTRACE_POKETEXT, pid,
4351 /* Coerce to a uintptr_t first to avoid potential gcc warning
4352 about coercing an 8 byte integer to a 4 byte pointer. */
4353 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4354 (PTRACE_ARG4_TYPE) buffer[i]);
4355 if (errno)
4356 return errno;
4357 }
4358
4359 return 0;
4360 }
4361
4362 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4363 static int linux_supports_tracefork_flag;
4364
4365 static void
4366 linux_enable_event_reporting (int pid)
4367 {
4368 if (!linux_supports_tracefork_flag)
4369 return;
4370
4371 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4372 }
4373
4374 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4375
4376 static int
4377 linux_tracefork_grandchild (void *arg)
4378 {
4379 _exit (0);
4380 }
4381
4382 #define STACK_SIZE 4096
4383
4384 static int
4385 linux_tracefork_child (void *arg)
4386 {
4387 ptrace (PTRACE_TRACEME, 0, 0, 0);
4388 kill (getpid (), SIGSTOP);
4389
4390 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4391
4392 if (fork () == 0)
4393 linux_tracefork_grandchild (NULL);
4394
4395 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4396
4397 #ifdef __ia64__
4398 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4399 CLONE_VM | SIGCHLD, NULL);
4400 #else
4401 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4402 CLONE_VM | SIGCHLD, NULL);
4403 #endif
4404
4405 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4406
4407 _exit (0);
4408 }
4409
4410 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4411 sure that we can enable the option, and that it had the desired
4412 effect. */
4413
4414 static void
4415 linux_test_for_tracefork (void)
4416 {
4417 int child_pid, ret, status;
4418 long second_pid;
4419 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4420 char *stack = xmalloc (STACK_SIZE * 4);
4421 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4422
4423 linux_supports_tracefork_flag = 0;
4424
4425 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4426
4427 child_pid = fork ();
4428 if (child_pid == 0)
4429 linux_tracefork_child (NULL);
4430
4431 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4432
4433 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4434 #ifdef __ia64__
4435 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4436 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4437 #else /* !__ia64__ */
4438 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4439 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4440 #endif /* !__ia64__ */
4441
4442 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4443
4444 if (child_pid == -1)
4445 perror_with_name ("clone");
4446
4447 ret = my_waitpid (child_pid, &status, 0);
4448 if (ret == -1)
4449 perror_with_name ("waitpid");
4450 else if (ret != child_pid)
4451 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4452 if (! WIFSTOPPED (status))
4453 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4454
4455 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4456 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4457 if (ret != 0)
4458 {
4459 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4460 if (ret != 0)
4461 {
4462 warning ("linux_test_for_tracefork: failed to kill child");
4463 return;
4464 }
4465
4466 ret = my_waitpid (child_pid, &status, 0);
4467 if (ret != child_pid)
4468 warning ("linux_test_for_tracefork: failed to wait for killed child");
4469 else if (!WIFSIGNALED (status))
4470 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4471 "killed child", status);
4472
4473 return;
4474 }
4475
4476 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4477 if (ret != 0)
4478 warning ("linux_test_for_tracefork: failed to resume child");
4479
4480 ret = my_waitpid (child_pid, &status, 0);
4481
4482 if (ret == child_pid && WIFSTOPPED (status)
4483 && status >> 16 == PTRACE_EVENT_FORK)
4484 {
4485 second_pid = 0;
4486 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4487 if (ret == 0 && second_pid != 0)
4488 {
4489 int second_status;
4490
4491 linux_supports_tracefork_flag = 1;
4492 my_waitpid (second_pid, &second_status, 0);
4493 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4494 if (ret != 0)
4495 warning ("linux_test_for_tracefork: failed to kill second child");
4496 my_waitpid (second_pid, &status, 0);
4497 }
4498 }
4499 else
4500 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4501 "(%d, status 0x%x)", ret, status);
4502
4503 do
4504 {
4505 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4506 if (ret != 0)
4507 warning ("linux_test_for_tracefork: failed to kill child");
4508 my_waitpid (child_pid, &status, 0);
4509 }
4510 while (WIFSTOPPED (status));
4511
4512 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4513 free (stack);
4514 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4515 }
4516
4517
4518 static void
4519 linux_look_up_symbols (void)
4520 {
4521 #ifdef USE_THREAD_DB
4522 struct process_info *proc = current_process ();
4523
4524 if (proc->private->thread_db != NULL)
4525 return;
4526
4527 /* If the kernel supports tracing forks then it also supports tracing
4528 clones, and then we don't need to use the magic thread event breakpoint
4529 to learn about threads. */
4530 thread_db_init (!linux_supports_tracefork_flag);
4531 #endif
4532 }
4533
4534 static void
4535 linux_request_interrupt (void)
4536 {
4537 extern unsigned long signal_pid;
4538
4539 if (!ptid_equal (cont_thread, null_ptid)
4540 && !ptid_equal (cont_thread, minus_one_ptid))
4541 {
4542 struct lwp_info *lwp;
4543 int lwpid;
4544
4545 lwp = get_thread_lwp (current_inferior);
4546 lwpid = lwpid_of (lwp);
4547 kill_lwp (lwpid, SIGINT);
4548 }
4549 else
4550 kill_lwp (signal_pid, SIGINT);
4551 }
4552
4553 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4554 to debugger memory starting at MYADDR. */
4555
4556 static int
4557 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4558 {
4559 char filename[PATH_MAX];
4560 int fd, n;
4561 int pid = lwpid_of (get_thread_lwp (current_inferior));
4562
4563 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4564
4565 fd = open (filename, O_RDONLY);
4566 if (fd < 0)
4567 return -1;
4568
4569 if (offset != (CORE_ADDR) 0
4570 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4571 n = -1;
4572 else
4573 n = read (fd, myaddr, len);
4574
4575 close (fd);
4576
4577 return n;
4578 }
4579
4580 /* These breakpoint and watchpoint related wrapper functions simply
4581 pass on the function call if the target has registered a
4582 corresponding function. */
4583
4584 static int
4585 linux_insert_point (char type, CORE_ADDR addr, int len)
4586 {
4587 if (the_low_target.insert_point != NULL)
4588 return the_low_target.insert_point (type, addr, len);
4589 else
4590 /* Unsupported (see target.h). */
4591 return 1;
4592 }
4593
4594 static int
4595 linux_remove_point (char type, CORE_ADDR addr, int len)
4596 {
4597 if (the_low_target.remove_point != NULL)
4598 return the_low_target.remove_point (type, addr, len);
4599 else
4600 /* Unsupported (see target.h). */
4601 return 1;
4602 }
4603
4604 static int
4605 linux_stopped_by_watchpoint (void)
4606 {
4607 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4608
4609 return lwp->stopped_by_watchpoint;
4610 }
4611
4612 static CORE_ADDR
4613 linux_stopped_data_address (void)
4614 {
4615 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4616
4617 return lwp->stopped_data_address;
4618 }
4619
4620 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4621 #if defined(__mcoldfire__)
4622 /* These should really be defined in the kernel's ptrace.h header. */
4623 #define PT_TEXT_ADDR 49*4
4624 #define PT_DATA_ADDR 50*4
4625 #define PT_TEXT_END_ADDR 51*4
4626 #elif defined(BFIN)
4627 #define PT_TEXT_ADDR 220
4628 #define PT_TEXT_END_ADDR 224
4629 #define PT_DATA_ADDR 228
4630 #elif defined(__TMS320C6X__)
4631 #define PT_TEXT_ADDR (0x10000*4)
4632 #define PT_DATA_ADDR (0x10004*4)
4633 #define PT_TEXT_END_ADDR (0x10008*4)
4634 #endif
4635
4636 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4637 to tell gdb about. */
4638
4639 static int
4640 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4641 {
4642 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4643 unsigned long text, text_end, data;
4644 int pid = lwpid_of (get_thread_lwp (current_inferior));
4645
4646 errno = 0;
4647
4648 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4649 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4650 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4651
4652 if (errno == 0)
4653 {
4654 /* Both text and data offsets produced at compile-time (and so
4655 used by gdb) are relative to the beginning of the program,
4656 with the data segment immediately following the text segment.
4657 However, the actual runtime layout in memory may put the data
4658 somewhere else, so when we send gdb a data base-address, we
4659 use the real data base address and subtract the compile-time
4660 data base-address from it (which is just the length of the
4661 text segment). BSS immediately follows data in both
4662 cases. */
4663 *text_p = text;
4664 *data_p = data - (text_end - text);
4665
4666 return 1;
4667 }
4668 #endif
4669 return 0;
4670 }
4671 #endif
4672
4673 static int
4674 linux_qxfer_osdata (const char *annex,
4675 unsigned char *readbuf, unsigned const char *writebuf,
4676 CORE_ADDR offset, int len)
4677 {
4678 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4679 }
4680
4681 /* Convert a native/host siginfo object, into/from the siginfo in the
4682 layout of the inferiors' architecture. */
4683
4684 static void
4685 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4686 {
4687 int done = 0;
4688
4689 if (the_low_target.siginfo_fixup != NULL)
4690 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4691
4692 /* If there was no callback, or the callback didn't do anything,
4693 then just do a straight memcpy. */
4694 if (!done)
4695 {
4696 if (direction == 1)
4697 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4698 else
4699 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4700 }
4701 }
4702
4703 static int
4704 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4705 unsigned const char *writebuf, CORE_ADDR offset, int len)
4706 {
4707 int pid;
4708 struct siginfo siginfo;
4709 char inf_siginfo[sizeof (struct siginfo)];
4710
4711 if (current_inferior == NULL)
4712 return -1;
4713
4714 pid = lwpid_of (get_thread_lwp (current_inferior));
4715
4716 if (debug_threads)
4717 fprintf (stderr, "%s siginfo for lwp %d.\n",
4718 readbuf != NULL ? "Reading" : "Writing",
4719 pid);
4720
4721 if (offset >= sizeof (siginfo))
4722 return -1;
4723
4724 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4725 return -1;
4726
4727 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4728 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4729 inferior with a 64-bit GDBSERVER should look the same as debugging it
4730 with a 32-bit GDBSERVER, we need to convert it. */
4731 siginfo_fixup (&siginfo, inf_siginfo, 0);
4732
4733 if (offset + len > sizeof (siginfo))
4734 len = sizeof (siginfo) - offset;
4735
4736 if (readbuf != NULL)
4737 memcpy (readbuf, inf_siginfo + offset, len);
4738 else
4739 {
4740 memcpy (inf_siginfo + offset, writebuf, len);
4741
4742 /* Convert back to ptrace layout before flushing it out. */
4743 siginfo_fixup (&siginfo, inf_siginfo, 1);
4744
4745 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4746 return -1;
4747 }
4748
4749 return len;
4750 }
4751
4752 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4753 so we notice when children change state; as the handler for the
4754 sigsuspend in my_waitpid. */
4755
4756 static void
4757 sigchld_handler (int signo)
4758 {
4759 int old_errno = errno;
4760
4761 if (debug_threads)
4762 {
4763 do
4764 {
4765 /* fprintf is not async-signal-safe, so call write
4766 directly. */
4767 if (write (2, "sigchld_handler\n",
4768 sizeof ("sigchld_handler\n") - 1) < 0)
4769 break; /* just ignore */
4770 } while (0);
4771 }
4772
4773 if (target_is_async_p ())
4774 async_file_mark (); /* trigger a linux_wait */
4775
4776 errno = old_errno;
4777 }
4778
4779 static int
4780 linux_supports_non_stop (void)
4781 {
4782 return 1;
4783 }
4784
4785 static int
4786 linux_async (int enable)
4787 {
4788 int previous = (linux_event_pipe[0] != -1);
4789
4790 if (debug_threads)
4791 fprintf (stderr, "linux_async (%d), previous=%d\n",
4792 enable, previous);
4793
4794 if (previous != enable)
4795 {
4796 sigset_t mask;
4797 sigemptyset (&mask);
4798 sigaddset (&mask, SIGCHLD);
4799
4800 sigprocmask (SIG_BLOCK, &mask, NULL);
4801
4802 if (enable)
4803 {
4804 if (pipe (linux_event_pipe) == -1)
4805 fatal ("creating event pipe failed.");
4806
4807 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4808 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4809
4810 /* Register the event loop handler. */
4811 add_file_handler (linux_event_pipe[0],
4812 handle_target_event, NULL);
4813
4814 /* Always trigger a linux_wait. */
4815 async_file_mark ();
4816 }
4817 else
4818 {
4819 delete_file_handler (linux_event_pipe[0]);
4820
4821 close (linux_event_pipe[0]);
4822 close (linux_event_pipe[1]);
4823 linux_event_pipe[0] = -1;
4824 linux_event_pipe[1] = -1;
4825 }
4826
4827 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4828 }
4829
4830 return previous;
4831 }
4832
4833 static int
4834 linux_start_non_stop (int nonstop)
4835 {
4836 /* Register or unregister from event-loop accordingly. */
4837 linux_async (nonstop);
4838 return 0;
4839 }
4840
4841 static int
4842 linux_supports_multi_process (void)
4843 {
4844 return 1;
4845 }
4846
4847 static int
4848 linux_supports_disable_randomization (void)
4849 {
4850 #ifdef HAVE_PERSONALITY
4851 return 1;
4852 #else
4853 return 0;
4854 #endif
4855 }
4856
4857 /* Enumerate spufs IDs for process PID. */
4858 static int
4859 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4860 {
4861 int pos = 0;
4862 int written = 0;
4863 char path[128];
4864 DIR *dir;
4865 struct dirent *entry;
4866
4867 sprintf (path, "/proc/%ld/fd", pid);
4868 dir = opendir (path);
4869 if (!dir)
4870 return -1;
4871
4872 rewinddir (dir);
4873 while ((entry = readdir (dir)) != NULL)
4874 {
4875 struct stat st;
4876 struct statfs stfs;
4877 int fd;
4878
4879 fd = atoi (entry->d_name);
4880 if (!fd)
4881 continue;
4882
4883 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4884 if (stat (path, &st) != 0)
4885 continue;
4886 if (!S_ISDIR (st.st_mode))
4887 continue;
4888
4889 if (statfs (path, &stfs) != 0)
4890 continue;
4891 if (stfs.f_type != SPUFS_MAGIC)
4892 continue;
4893
4894 if (pos >= offset && pos + 4 <= offset + len)
4895 {
4896 *(unsigned int *)(buf + pos - offset) = fd;
4897 written += 4;
4898 }
4899 pos += 4;
4900 }
4901
4902 closedir (dir);
4903 return written;
4904 }
4905
4906 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4907 object type, using the /proc file system. */
4908 static int
4909 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4910 unsigned const char *writebuf,
4911 CORE_ADDR offset, int len)
4912 {
4913 long pid = lwpid_of (get_thread_lwp (current_inferior));
4914 char buf[128];
4915 int fd = 0;
4916 int ret = 0;
4917
4918 if (!writebuf && !readbuf)
4919 return -1;
4920
4921 if (!*annex)
4922 {
4923 if (!readbuf)
4924 return -1;
4925 else
4926 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4927 }
4928
4929 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4930 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4931 if (fd <= 0)
4932 return -1;
4933
4934 if (offset != 0
4935 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4936 {
4937 close (fd);
4938 return 0;
4939 }
4940
4941 if (writebuf)
4942 ret = write (fd, writebuf, (size_t) len);
4943 else
4944 ret = read (fd, readbuf, (size_t) len);
4945
4946 close (fd);
4947 return ret;
4948 }
4949
4950 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
4951 struct target_loadseg
4952 {
4953 /* Core address to which the segment is mapped. */
4954 Elf32_Addr addr;
4955 /* VMA recorded in the program header. */
4956 Elf32_Addr p_vaddr;
4957 /* Size of this segment in memory. */
4958 Elf32_Word p_memsz;
4959 };
4960
4961 # if defined PT_GETDSBT
4962 struct target_loadmap
4963 {
4964 /* Protocol version number, must be zero. */
4965 Elf32_Word version;
4966 /* Pointer to the DSBT table, its size, and the DSBT index. */
4967 unsigned *dsbt_table;
4968 unsigned dsbt_size, dsbt_index;
4969 /* Number of segments in this map. */
4970 Elf32_Word nsegs;
4971 /* The actual memory map. */
4972 struct target_loadseg segs[/*nsegs*/];
4973 };
4974 # define LINUX_LOADMAP PT_GETDSBT
4975 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
4976 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
4977 # else
4978 struct target_loadmap
4979 {
4980 /* Protocol version number, must be zero. */
4981 Elf32_Half version;
4982 /* Number of segments in this map. */
4983 Elf32_Half nsegs;
4984 /* The actual memory map. */
4985 struct target_loadseg segs[/*nsegs*/];
4986 };
4987 # define LINUX_LOADMAP PTRACE_GETFDPIC
4988 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
4989 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
4990 # endif
4991
4992 static int
4993 linux_read_loadmap (const char *annex, CORE_ADDR offset,
4994 unsigned char *myaddr, unsigned int len)
4995 {
4996 int pid = lwpid_of (get_thread_lwp (current_inferior));
4997 int addr = -1;
4998 struct target_loadmap *data = NULL;
4999 unsigned int actual_length, copy_length;
5000
5001 if (strcmp (annex, "exec") == 0)
5002 addr = (int) LINUX_LOADMAP_EXEC;
5003 else if (strcmp (annex, "interp") == 0)
5004 addr = (int) LINUX_LOADMAP_INTERP;
5005 else
5006 return -1;
5007
5008 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5009 return -1;
5010
5011 if (data == NULL)
5012 return -1;
5013
5014 actual_length = sizeof (struct target_loadmap)
5015 + sizeof (struct target_loadseg) * data->nsegs;
5016
5017 if (offset < 0 || offset > actual_length)
5018 return -1;
5019
5020 copy_length = actual_length - offset < len ? actual_length - offset : len;
5021 memcpy (myaddr, (char *) data + offset, copy_length);
5022 return copy_length;
5023 }
5024 #else
5025 # define linux_read_loadmap NULL
5026 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5027
5028 static void
5029 linux_process_qsupported (const char *query)
5030 {
5031 if (the_low_target.process_qsupported != NULL)
5032 the_low_target.process_qsupported (query);
5033 }
5034
5035 static int
5036 linux_supports_tracepoints (void)
5037 {
5038 if (*the_low_target.supports_tracepoints == NULL)
5039 return 0;
5040
5041 return (*the_low_target.supports_tracepoints) ();
5042 }
5043
5044 static CORE_ADDR
5045 linux_read_pc (struct regcache *regcache)
5046 {
5047 if (the_low_target.get_pc == NULL)
5048 return 0;
5049
5050 return (*the_low_target.get_pc) (regcache);
5051 }
5052
5053 static void
5054 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5055 {
5056 gdb_assert (the_low_target.set_pc != NULL);
5057
5058 (*the_low_target.set_pc) (regcache, pc);
5059 }
5060
5061 static int
5062 linux_thread_stopped (struct thread_info *thread)
5063 {
5064 return get_thread_lwp (thread)->stopped;
5065 }
5066
5067 /* This exposes stop-all-threads functionality to other modules. */
5068
5069 static void
5070 linux_pause_all (int freeze)
5071 {
5072 stop_all_lwps (freeze, NULL);
5073 }
5074
5075 /* This exposes unstop-all-threads functionality to other gdbserver
5076 modules. */
5077
5078 static void
5079 linux_unpause_all (int unfreeze)
5080 {
5081 unstop_all_lwps (unfreeze, NULL);
5082 }
5083
5084 static int
5085 linux_prepare_to_access_memory (void)
5086 {
5087 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5088 running LWP. */
5089 if (non_stop)
5090 linux_pause_all (1);
5091 return 0;
5092 }
5093
5094 static void
5095 linux_done_accessing_memory (void)
5096 {
5097 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5098 running LWP. */
5099 if (non_stop)
5100 linux_unpause_all (1);
5101 }
5102
5103 static int
5104 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5105 CORE_ADDR collector,
5106 CORE_ADDR lockaddr,
5107 ULONGEST orig_size,
5108 CORE_ADDR *jump_entry,
5109 CORE_ADDR *trampoline,
5110 ULONGEST *trampoline_size,
5111 unsigned char *jjump_pad_insn,
5112 ULONGEST *jjump_pad_insn_size,
5113 CORE_ADDR *adjusted_insn_addr,
5114 CORE_ADDR *adjusted_insn_addr_end,
5115 char *err)
5116 {
5117 return (*the_low_target.install_fast_tracepoint_jump_pad)
5118 (tpoint, tpaddr, collector, lockaddr, orig_size,
5119 jump_entry, trampoline, trampoline_size,
5120 jjump_pad_insn, jjump_pad_insn_size,
5121 adjusted_insn_addr, adjusted_insn_addr_end,
5122 err);
5123 }
5124
5125 static struct emit_ops *
5126 linux_emit_ops (void)
5127 {
5128 if (the_low_target.emit_ops != NULL)
5129 return (*the_low_target.emit_ops) ();
5130 else
5131 return NULL;
5132 }
5133
5134 static int
5135 linux_get_min_fast_tracepoint_insn_len (void)
5136 {
5137 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5138 }
5139
5140 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5141
5142 static int
5143 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5144 CORE_ADDR *phdr_memaddr, int *num_phdr)
5145 {
5146 char filename[PATH_MAX];
5147 int fd;
5148 const int auxv_size = is_elf64
5149 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5150 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5151
5152 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5153
5154 fd = open (filename, O_RDONLY);
5155 if (fd < 0)
5156 return 1;
5157
5158 *phdr_memaddr = 0;
5159 *num_phdr = 0;
5160 while (read (fd, buf, auxv_size) == auxv_size
5161 && (*phdr_memaddr == 0 || *num_phdr == 0))
5162 {
5163 if (is_elf64)
5164 {
5165 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5166
5167 switch (aux->a_type)
5168 {
5169 case AT_PHDR:
5170 *phdr_memaddr = aux->a_un.a_val;
5171 break;
5172 case AT_PHNUM:
5173 *num_phdr = aux->a_un.a_val;
5174 break;
5175 }
5176 }
5177 else
5178 {
5179 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5180
5181 switch (aux->a_type)
5182 {
5183 case AT_PHDR:
5184 *phdr_memaddr = aux->a_un.a_val;
5185 break;
5186 case AT_PHNUM:
5187 *num_phdr = aux->a_un.a_val;
5188 break;
5189 }
5190 }
5191 }
5192
5193 close (fd);
5194
5195 if (*phdr_memaddr == 0 || *num_phdr == 0)
5196 {
5197 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5198 "phdr_memaddr = %ld, phdr_num = %d",
5199 (long) *phdr_memaddr, *num_phdr);
5200 return 2;
5201 }
5202
5203 return 0;
5204 }
5205
5206 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5207
5208 static CORE_ADDR
5209 get_dynamic (const int pid, const int is_elf64)
5210 {
5211 CORE_ADDR phdr_memaddr, relocation;
5212 int num_phdr, i;
5213 unsigned char *phdr_buf;
5214 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5215
5216 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5217 return 0;
5218
5219 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5220 phdr_buf = alloca (num_phdr * phdr_size);
5221
5222 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5223 return 0;
5224
5225 /* Compute relocation: it is expected to be 0 for "regular" executables,
5226 non-zero for PIE ones. */
5227 relocation = -1;
5228 for (i = 0; relocation == -1 && i < num_phdr; i++)
5229 if (is_elf64)
5230 {
5231 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5232
5233 if (p->p_type == PT_PHDR)
5234 relocation = phdr_memaddr - p->p_vaddr;
5235 }
5236 else
5237 {
5238 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5239
5240 if (p->p_type == PT_PHDR)
5241 relocation = phdr_memaddr - p->p_vaddr;
5242 }
5243
5244 if (relocation == -1)
5245 {
5246 warning ("Unexpected missing PT_PHDR");
5247 return 0;
5248 }
5249
5250 for (i = 0; i < num_phdr; i++)
5251 {
5252 if (is_elf64)
5253 {
5254 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5255
5256 if (p->p_type == PT_DYNAMIC)
5257 return p->p_vaddr + relocation;
5258 }
5259 else
5260 {
5261 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5262
5263 if (p->p_type == PT_DYNAMIC)
5264 return p->p_vaddr + relocation;
5265 }
5266 }
5267
5268 return 0;
5269 }
5270
5271 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5272 can be 0 if the inferior does not yet have the library list initialized. */
5273
5274 static CORE_ADDR
5275 get_r_debug (const int pid, const int is_elf64)
5276 {
5277 CORE_ADDR dynamic_memaddr;
5278 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5279 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5280
5281 dynamic_memaddr = get_dynamic (pid, is_elf64);
5282 if (dynamic_memaddr == 0)
5283 return (CORE_ADDR) -1;
5284
5285 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5286 {
5287 if (is_elf64)
5288 {
5289 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5290
5291 if (dyn->d_tag == DT_DEBUG)
5292 return dyn->d_un.d_val;
5293
5294 if (dyn->d_tag == DT_NULL)
5295 break;
5296 }
5297 else
5298 {
5299 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5300
5301 if (dyn->d_tag == DT_DEBUG)
5302 return dyn->d_un.d_val;
5303
5304 if (dyn->d_tag == DT_NULL)
5305 break;
5306 }
5307
5308 dynamic_memaddr += dyn_size;
5309 }
5310
5311 return (CORE_ADDR) -1;
5312 }
5313
5314 /* Read one pointer from MEMADDR in the inferior. */
5315
5316 static int
5317 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5318 {
5319 *ptr = 0;
5320 return linux_read_memory (memaddr, (unsigned char *) ptr, ptr_size);
5321 }
5322
5323 struct link_map_offsets
5324 {
5325 /* Offset and size of r_debug.r_version. */
5326 int r_version_offset;
5327
5328 /* Offset and size of r_debug.r_map. */
5329 int r_map_offset;
5330
5331 /* Offset to l_addr field in struct link_map. */
5332 int l_addr_offset;
5333
5334 /* Offset to l_name field in struct link_map. */
5335 int l_name_offset;
5336
5337 /* Offset to l_ld field in struct link_map. */
5338 int l_ld_offset;
5339
5340 /* Offset to l_next field in struct link_map. */
5341 int l_next_offset;
5342
5343 /* Offset to l_prev field in struct link_map. */
5344 int l_prev_offset;
5345 };
5346
5347 /* Construct qXfer:libraries:read reply. */
5348
5349 static int
5350 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5351 unsigned const char *writebuf,
5352 CORE_ADDR offset, int len)
5353 {
5354 char *document;
5355 unsigned document_len;
5356 struct process_info_private *const priv = current_process ()->private;
5357 char filename[PATH_MAX];
5358 int pid, is_elf64;
5359
5360 static const struct link_map_offsets lmo_32bit_offsets =
5361 {
5362 0, /* r_version offset. */
5363 4, /* r_debug.r_map offset. */
5364 0, /* l_addr offset in link_map. */
5365 4, /* l_name offset in link_map. */
5366 8, /* l_ld offset in link_map. */
5367 12, /* l_next offset in link_map. */
5368 16 /* l_prev offset in link_map. */
5369 };
5370
5371 static const struct link_map_offsets lmo_64bit_offsets =
5372 {
5373 0, /* r_version offset. */
5374 8, /* r_debug.r_map offset. */
5375 0, /* l_addr offset in link_map. */
5376 8, /* l_name offset in link_map. */
5377 16, /* l_ld offset in link_map. */
5378 24, /* l_next offset in link_map. */
5379 32 /* l_prev offset in link_map. */
5380 };
5381 const struct link_map_offsets *lmo;
5382
5383 if (writebuf != NULL)
5384 return -2;
5385 if (readbuf == NULL)
5386 return -1;
5387
5388 pid = lwpid_of (get_thread_lwp (current_inferior));
5389 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5390 is_elf64 = elf_64_file_p (filename);
5391 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5392
5393 if (priv->r_debug == 0)
5394 priv->r_debug = get_r_debug (pid, is_elf64);
5395
5396 if (priv->r_debug == (CORE_ADDR) -1 || priv->r_debug == 0)
5397 {
5398 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5399 }
5400 else
5401 {
5402 int allocated = 1024;
5403 char *p;
5404 const int ptr_size = is_elf64 ? 8 : 4;
5405 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5406 int r_version, header_done = 0;
5407
5408 document = xmalloc (allocated);
5409 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5410 p = document + strlen (document);
5411
5412 r_version = 0;
5413 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5414 (unsigned char *) &r_version,
5415 sizeof (r_version)) != 0
5416 || r_version != 1)
5417 {
5418 warning ("unexpected r_debug version %d", r_version);
5419 goto done;
5420 }
5421
5422 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5423 &lm_addr, ptr_size) != 0)
5424 {
5425 warning ("unable to read r_map from 0x%lx",
5426 (long) priv->r_debug + lmo->r_map_offset);
5427 goto done;
5428 }
5429
5430 lm_prev = 0;
5431 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5432 &l_name, ptr_size) == 0
5433 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5434 &l_addr, ptr_size) == 0
5435 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5436 &l_ld, ptr_size) == 0
5437 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5438 &l_prev, ptr_size) == 0
5439 && read_one_ptr (lm_addr + lmo->l_next_offset,
5440 &l_next, ptr_size) == 0)
5441 {
5442 unsigned char libname[PATH_MAX];
5443
5444 if (lm_prev != l_prev)
5445 {
5446 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5447 (long) lm_prev, (long) l_prev);
5448 break;
5449 }
5450
5451 /* Not checking for error because reading may stop before
5452 we've got PATH_MAX worth of characters. */
5453 libname[0] = '\0';
5454 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5455 libname[sizeof (libname) - 1] = '\0';
5456 if (libname[0] != '\0')
5457 {
5458 /* 6x the size for xml_escape_text below. */
5459 size_t len = 6 * strlen ((char *) libname);
5460 char *name;
5461
5462 if (!header_done)
5463 {
5464 /* Terminate `<library-list-svr4'. */
5465 *p++ = '>';
5466 header_done = 1;
5467 }
5468
5469 while (allocated < p - document + len + 200)
5470 {
5471 /* Expand to guarantee sufficient storage. */
5472 uintptr_t document_len = p - document;
5473
5474 document = xrealloc (document, 2 * allocated);
5475 allocated *= 2;
5476 p = document + document_len;
5477 }
5478
5479 name = xml_escape_text ((char *) libname);
5480 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5481 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5482 name, (unsigned long) lm_addr,
5483 (unsigned long) l_addr, (unsigned long) l_ld);
5484 free (name);
5485 }
5486 else if (lm_prev == 0)
5487 {
5488 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5489 p = p + strlen (p);
5490 }
5491
5492 if (l_next == 0)
5493 break;
5494
5495 lm_prev = lm_addr;
5496 lm_addr = l_next;
5497 }
5498 done:
5499 strcpy (p, "</library-list-svr4>");
5500 }
5501
5502 document_len = strlen (document);
5503 if (offset < document_len)
5504 document_len -= offset;
5505 else
5506 document_len = 0;
5507 if (len > document_len)
5508 len = document_len;
5509
5510 memcpy (readbuf, document + offset, len);
5511 xfree (document);
5512
5513 return len;
5514 }
5515
5516 static struct target_ops linux_target_ops = {
5517 linux_create_inferior,
5518 linux_attach,
5519 linux_kill,
5520 linux_detach,
5521 linux_mourn,
5522 linux_join,
5523 linux_thread_alive,
5524 linux_resume,
5525 linux_wait,
5526 linux_fetch_registers,
5527 linux_store_registers,
5528 linux_prepare_to_access_memory,
5529 linux_done_accessing_memory,
5530 linux_read_memory,
5531 linux_write_memory,
5532 linux_look_up_symbols,
5533 linux_request_interrupt,
5534 linux_read_auxv,
5535 linux_insert_point,
5536 linux_remove_point,
5537 linux_stopped_by_watchpoint,
5538 linux_stopped_data_address,
5539 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5540 linux_read_offsets,
5541 #else
5542 NULL,
5543 #endif
5544 #ifdef USE_THREAD_DB
5545 thread_db_get_tls_address,
5546 #else
5547 NULL,
5548 #endif
5549 linux_qxfer_spu,
5550 hostio_last_error_from_errno,
5551 linux_qxfer_osdata,
5552 linux_xfer_siginfo,
5553 linux_supports_non_stop,
5554 linux_async,
5555 linux_start_non_stop,
5556 linux_supports_multi_process,
5557 #ifdef USE_THREAD_DB
5558 thread_db_handle_monitor_command,
5559 #else
5560 NULL,
5561 #endif
5562 linux_common_core_of_thread,
5563 linux_read_loadmap,
5564 linux_process_qsupported,
5565 linux_supports_tracepoints,
5566 linux_read_pc,
5567 linux_write_pc,
5568 linux_thread_stopped,
5569 NULL,
5570 linux_pause_all,
5571 linux_unpause_all,
5572 linux_cancel_breakpoints,
5573 linux_stabilize_threads,
5574 linux_install_fast_tracepoint_jump_pad,
5575 linux_emit_ops,
5576 linux_supports_disable_randomization,
5577 linux_get_min_fast_tracepoint_insn_len,
5578 linux_qxfer_libraries_svr4,
5579 };
5580
5581 static void
5582 linux_init_signals ()
5583 {
5584 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5585 to find what the cancel signal actually is. */
5586 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5587 signal (__SIGRTMIN+1, SIG_IGN);
5588 #endif
5589 }
5590
5591 void
5592 initialize_low (void)
5593 {
5594 struct sigaction sigchld_action;
5595 memset (&sigchld_action, 0, sizeof (sigchld_action));
5596 set_target_ops (&linux_target_ops);
5597 set_breakpoint_data (the_low_target.breakpoint,
5598 the_low_target.breakpoint_len);
5599 linux_init_signals ();
5600 linux_test_for_tracefork ();
5601 #ifdef HAVE_LINUX_REGSETS
5602 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5603 ;
5604 disabled_regsets = xmalloc (num_regsets);
5605 #endif
5606
5607 sigchld_action.sa_handler = sigchld_handler;
5608 sigemptyset (&sigchld_action.sa_mask);
5609 sigchld_action.sa_flags = SA_RESTART;
5610 sigaction (SIGCHLD, &sigchld_action, NULL);
5611 }