]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
PR gdb/15236: gdbserver write to linux memory with zero length corrupts stack
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2013 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22 #include "agent.h"
23
24 #include "gdb_wait.h"
25 #include <stdio.h>
26 #include <sys/param.h>
27 #include <sys/ptrace.h>
28 #include "linux-ptrace.h"
29 #include "linux-procfs.h"
30 #include <signal.h>
31 #include <sys/ioctl.h>
32 #include <fcntl.h>
33 #include <string.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include "gdb_stat.h"
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #ifndef ELFMAG0
47 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51 #include <elf.h>
52 #endif
53
54 #ifndef SPUFS_MAGIC
55 #define SPUFS_MAGIC 0x23c9b64e
56 #endif
57
58 #ifdef HAVE_PERSONALITY
59 # include <sys/personality.h>
60 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
61 # define ADDR_NO_RANDOMIZE 0x0040000
62 # endif
63 #endif
64
65 #ifndef O_LARGEFILE
66 #define O_LARGEFILE 0
67 #endif
68
69 #ifndef W_STOPCODE
70 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71 #endif
72
73 /* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75 #ifndef __SIGRTMIN
76 #define __SIGRTMIN 32
77 #endif
78
79 #ifdef __UCLIBC__
80 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
81 /* PTRACE_TEXT_ADDR and friends. */
82 #include <asm/ptrace.h>
83 #define HAS_NOMMU
84 #endif
85 #endif
86
87 #ifndef HAVE_ELF32_AUXV_T
88 /* Copied from glibc's elf.h. */
89 typedef struct
90 {
91 uint32_t a_type; /* Entry type */
92 union
93 {
94 uint32_t a_val; /* Integer value */
95 /* We use to have pointer elements added here. We cannot do that,
96 though, since it does not work when using 32-bit definitions
97 on 64-bit platforms and vice versa. */
98 } a_un;
99 } Elf32_auxv_t;
100 #endif
101
102 #ifndef HAVE_ELF64_AUXV_T
103 /* Copied from glibc's elf.h. */
104 typedef struct
105 {
106 uint64_t a_type; /* Entry type */
107 union
108 {
109 uint64_t a_val; /* Integer value */
110 /* We use to have pointer elements added here. We cannot do that,
111 though, since it does not work when using 32-bit definitions
112 on 64-bit platforms and vice versa. */
113 } a_un;
114 } Elf64_auxv_t;
115 #endif
116
117 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
118 representation of the thread ID.
119
120 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
121 the same as the LWP ID.
122
123 ``all_processes'' is keyed by the "overall process ID", which
124 GNU/Linux calls tgid, "thread group ID". */
125
126 struct inferior_list all_lwps;
127
128 /* A list of all unknown processes which receive stop signals. Some
129 other process will presumably claim each of these as forked
130 children momentarily. */
131
132 struct simple_pid_list
133 {
134 /* The process ID. */
135 int pid;
136
137 /* The status as reported by waitpid. */
138 int status;
139
140 /* Next in chain. */
141 struct simple_pid_list *next;
142 };
143 struct simple_pid_list *stopped_pids;
144
145 /* Trivial list manipulation functions to keep track of a list of new
146 stopped processes. */
147
148 static void
149 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
150 {
151 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
152
153 new_pid->pid = pid;
154 new_pid->status = status;
155 new_pid->next = *listp;
156 *listp = new_pid;
157 }
158
159 static int
160 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
161 {
162 struct simple_pid_list **p;
163
164 for (p = listp; *p != NULL; p = &(*p)->next)
165 if ((*p)->pid == pid)
166 {
167 struct simple_pid_list *next = (*p)->next;
168
169 *statusp = (*p)->status;
170 xfree (*p);
171 *p = next;
172 return 1;
173 }
174 return 0;
175 }
176
177 enum stopping_threads_kind
178 {
179 /* Not stopping threads presently. */
180 NOT_STOPPING_THREADS,
181
182 /* Stopping threads. */
183 STOPPING_THREADS,
184
185 /* Stopping and suspending threads. */
186 STOPPING_AND_SUSPENDING_THREADS
187 };
188
189 /* This is set while stop_all_lwps is in effect. */
190 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
191
192 /* FIXME make into a target method? */
193 int using_threads = 1;
194
195 /* True if we're presently stabilizing threads (moving them out of
196 jump pads). */
197 static int stabilizing_threads;
198
199 /* This flag is true iff we've just created or attached to our first
200 inferior but it has not stopped yet. As soon as it does, we need
201 to call the low target's arch_setup callback. Doing this only on
202 the first inferior avoids reinializing the architecture on every
203 inferior, and avoids messing with the register caches of the
204 already running inferiors. NOTE: this assumes all inferiors under
205 control of gdbserver have the same architecture. */
206 static int new_inferior;
207
208 static void linux_resume_one_lwp (struct lwp_info *lwp,
209 int step, int signal, siginfo_t *info);
210 static void linux_resume (struct thread_resume *resume_info, size_t n);
211 static void stop_all_lwps (int suspend, struct lwp_info *except);
212 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
213 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
214 static void *add_lwp (ptid_t ptid);
215 static int linux_stopped_by_watchpoint (void);
216 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
217 static void proceed_all_lwps (void);
218 static int finish_step_over (struct lwp_info *lwp);
219 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
220 static int kill_lwp (unsigned long lwpid, int signo);
221 static void linux_enable_event_reporting (int pid);
222
223 /* True if the low target can hardware single-step. Such targets
224 don't need a BREAKPOINT_REINSERT_ADDR callback. */
225
226 static int
227 can_hardware_single_step (void)
228 {
229 return (the_low_target.breakpoint_reinsert_addr == NULL);
230 }
231
232 /* True if the low target supports memory breakpoints. If so, we'll
233 have a GET_PC implementation. */
234
235 static int
236 supports_breakpoints (void)
237 {
238 return (the_low_target.get_pc != NULL);
239 }
240
241 /* Returns true if this target can support fast tracepoints. This
242 does not mean that the in-process agent has been loaded in the
243 inferior. */
244
245 static int
246 supports_fast_tracepoints (void)
247 {
248 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
249 }
250
251 struct pending_signals
252 {
253 int signal;
254 siginfo_t info;
255 struct pending_signals *prev;
256 };
257
258 #ifdef HAVE_LINUX_REGSETS
259 static char *disabled_regsets;
260 static int num_regsets;
261 #endif
262
263 /* The read/write ends of the pipe registered as waitable file in the
264 event loop. */
265 static int linux_event_pipe[2] = { -1, -1 };
266
267 /* True if we're currently in async mode. */
268 #define target_is_async_p() (linux_event_pipe[0] != -1)
269
270 static void send_sigstop (struct lwp_info *lwp);
271 static void wait_for_sigstop (struct inferior_list_entry *entry);
272
273 /* Return non-zero if HEADER is a 64-bit ELF file. */
274
275 static int
276 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
277 {
278 if (header->e_ident[EI_MAG0] == ELFMAG0
279 && header->e_ident[EI_MAG1] == ELFMAG1
280 && header->e_ident[EI_MAG2] == ELFMAG2
281 && header->e_ident[EI_MAG3] == ELFMAG3)
282 {
283 *machine = header->e_machine;
284 return header->e_ident[EI_CLASS] == ELFCLASS64;
285
286 }
287 *machine = EM_NONE;
288 return -1;
289 }
290
291 /* Return non-zero if FILE is a 64-bit ELF file,
292 zero if the file is not a 64-bit ELF file,
293 and -1 if the file is not accessible or doesn't exist. */
294
295 static int
296 elf_64_file_p (const char *file, unsigned int *machine)
297 {
298 Elf64_Ehdr header;
299 int fd;
300
301 fd = open (file, O_RDONLY);
302 if (fd < 0)
303 return -1;
304
305 if (read (fd, &header, sizeof (header)) != sizeof (header))
306 {
307 close (fd);
308 return 0;
309 }
310 close (fd);
311
312 return elf_64_header_p (&header, machine);
313 }
314
315 /* Accepts an integer PID; Returns true if the executable PID is
316 running is a 64-bit ELF file.. */
317
318 int
319 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
320 {
321 char file[MAXPATHLEN];
322
323 sprintf (file, "/proc/%d/exe", pid);
324 return elf_64_file_p (file, machine);
325 }
326
327 static void
328 delete_lwp (struct lwp_info *lwp)
329 {
330 remove_thread (get_lwp_thread (lwp));
331 remove_inferior (&all_lwps, &lwp->head);
332 free (lwp->arch_private);
333 free (lwp);
334 }
335
336 /* Add a process to the common process list, and set its private
337 data. */
338
339 static struct process_info *
340 linux_add_process (int pid, int attached)
341 {
342 struct process_info *proc;
343
344 /* Is this the first process? If so, then set the arch. */
345 if (all_processes.head == NULL)
346 new_inferior = 1;
347
348 proc = add_process (pid, attached);
349 proc->private = xcalloc (1, sizeof (*proc->private));
350
351 if (the_low_target.new_process != NULL)
352 proc->private->arch_private = the_low_target.new_process ();
353
354 return proc;
355 }
356
357 /* Wrapper function for waitpid which handles EINTR, and emulates
358 __WALL for systems where that is not available. */
359
360 static int
361 my_waitpid (int pid, int *status, int flags)
362 {
363 int ret, out_errno;
364
365 if (debug_threads)
366 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
367
368 if (flags & __WALL)
369 {
370 sigset_t block_mask, org_mask, wake_mask;
371 int wnohang;
372
373 wnohang = (flags & WNOHANG) != 0;
374 flags &= ~(__WALL | __WCLONE);
375 flags |= WNOHANG;
376
377 /* Block all signals while here. This avoids knowing about
378 LinuxThread's signals. */
379 sigfillset (&block_mask);
380 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
381
382 /* ... except during the sigsuspend below. */
383 sigemptyset (&wake_mask);
384
385 while (1)
386 {
387 /* Since all signals are blocked, there's no need to check
388 for EINTR here. */
389 ret = waitpid (pid, status, flags);
390 out_errno = errno;
391
392 if (ret == -1 && out_errno != ECHILD)
393 break;
394 else if (ret > 0)
395 break;
396
397 if (flags & __WCLONE)
398 {
399 /* We've tried both flavors now. If WNOHANG is set,
400 there's nothing else to do, just bail out. */
401 if (wnohang)
402 break;
403
404 if (debug_threads)
405 fprintf (stderr, "blocking\n");
406
407 /* Block waiting for signals. */
408 sigsuspend (&wake_mask);
409 }
410
411 flags ^= __WCLONE;
412 }
413
414 sigprocmask (SIG_SETMASK, &org_mask, NULL);
415 }
416 else
417 {
418 do
419 ret = waitpid (pid, status, flags);
420 while (ret == -1 && errno == EINTR);
421 out_errno = errno;
422 }
423
424 if (debug_threads)
425 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
426 pid, flags, status ? *status : -1, ret);
427
428 errno = out_errno;
429 return ret;
430 }
431
432 /* Handle a GNU/Linux extended wait response. If we see a clone
433 event, we need to add the new LWP to our list (and not report the
434 trap to higher layers). */
435
436 static void
437 handle_extended_wait (struct lwp_info *event_child, int wstat)
438 {
439 int event = wstat >> 16;
440 struct lwp_info *new_lwp;
441
442 if (event == PTRACE_EVENT_CLONE)
443 {
444 ptid_t ptid;
445 unsigned long new_pid;
446 int ret, status;
447
448 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), (PTRACE_ARG3_TYPE) 0,
449 &new_pid);
450
451 /* If we haven't already seen the new PID stop, wait for it now. */
452 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
453 {
454 /* The new child has a pending SIGSTOP. We can't affect it until it
455 hits the SIGSTOP, but we're already attached. */
456
457 ret = my_waitpid (new_pid, &status, __WALL);
458
459 if (ret == -1)
460 perror_with_name ("waiting for new child");
461 else if (ret != new_pid)
462 warning ("wait returned unexpected PID %d", ret);
463 else if (!WIFSTOPPED (status))
464 warning ("wait returned unexpected status 0x%x", status);
465 }
466
467 linux_enable_event_reporting (new_pid);
468
469 ptid = ptid_build (pid_of (event_child), new_pid, 0);
470 new_lwp = (struct lwp_info *) add_lwp (ptid);
471 add_thread (ptid, new_lwp);
472
473 /* Either we're going to immediately resume the new thread
474 or leave it stopped. linux_resume_one_lwp is a nop if it
475 thinks the thread is currently running, so set this first
476 before calling linux_resume_one_lwp. */
477 new_lwp->stopped = 1;
478
479 /* If we're suspending all threads, leave this one suspended
480 too. */
481 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
482 new_lwp->suspended = 1;
483
484 /* Normally we will get the pending SIGSTOP. But in some cases
485 we might get another signal delivered to the group first.
486 If we do get another signal, be sure not to lose it. */
487 if (WSTOPSIG (status) == SIGSTOP)
488 {
489 if (stopping_threads != NOT_STOPPING_THREADS)
490 new_lwp->stop_pc = get_stop_pc (new_lwp);
491 else
492 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
493 }
494 else
495 {
496 new_lwp->stop_expected = 1;
497
498 if (stopping_threads != NOT_STOPPING_THREADS)
499 {
500 new_lwp->stop_pc = get_stop_pc (new_lwp);
501 new_lwp->status_pending_p = 1;
502 new_lwp->status_pending = status;
503 }
504 else
505 /* Pass the signal on. This is what GDB does - except
506 shouldn't we really report it instead? */
507 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
508 }
509
510 /* Always resume the current thread. If we are stopping
511 threads, it will have a pending SIGSTOP; we may as well
512 collect it now. */
513 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
514 }
515 }
516
517 /* Return the PC as read from the regcache of LWP, without any
518 adjustment. */
519
520 static CORE_ADDR
521 get_pc (struct lwp_info *lwp)
522 {
523 struct thread_info *saved_inferior;
524 struct regcache *regcache;
525 CORE_ADDR pc;
526
527 if (the_low_target.get_pc == NULL)
528 return 0;
529
530 saved_inferior = current_inferior;
531 current_inferior = get_lwp_thread (lwp);
532
533 regcache = get_thread_regcache (current_inferior, 1);
534 pc = (*the_low_target.get_pc) (regcache);
535
536 if (debug_threads)
537 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
538
539 current_inferior = saved_inferior;
540 return pc;
541 }
542
543 /* This function should only be called if LWP got a SIGTRAP.
544 The SIGTRAP could mean several things.
545
546 On i386, where decr_pc_after_break is non-zero:
547 If we were single-stepping this process using PTRACE_SINGLESTEP,
548 we will get only the one SIGTRAP (even if the instruction we
549 stepped over was a breakpoint). The value of $eip will be the
550 next instruction.
551 If we continue the process using PTRACE_CONT, we will get a
552 SIGTRAP when we hit a breakpoint. The value of $eip will be
553 the instruction after the breakpoint (i.e. needs to be
554 decremented). If we report the SIGTRAP to GDB, we must also
555 report the undecremented PC. If we cancel the SIGTRAP, we
556 must resume at the decremented PC.
557
558 (Presumably, not yet tested) On a non-decr_pc_after_break machine
559 with hardware or kernel single-step:
560 If we single-step over a breakpoint instruction, our PC will
561 point at the following instruction. If we continue and hit a
562 breakpoint instruction, our PC will point at the breakpoint
563 instruction. */
564
565 static CORE_ADDR
566 get_stop_pc (struct lwp_info *lwp)
567 {
568 CORE_ADDR stop_pc;
569
570 if (the_low_target.get_pc == NULL)
571 return 0;
572
573 stop_pc = get_pc (lwp);
574
575 if (WSTOPSIG (lwp->last_status) == SIGTRAP
576 && !lwp->stepping
577 && !lwp->stopped_by_watchpoint
578 && lwp->last_status >> 16 == 0)
579 stop_pc -= the_low_target.decr_pc_after_break;
580
581 if (debug_threads)
582 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
583
584 return stop_pc;
585 }
586
587 static void *
588 add_lwp (ptid_t ptid)
589 {
590 struct lwp_info *lwp;
591
592 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
593 memset (lwp, 0, sizeof (*lwp));
594
595 lwp->head.id = ptid;
596
597 if (the_low_target.new_thread != NULL)
598 lwp->arch_private = the_low_target.new_thread ();
599
600 add_inferior_to_list (&all_lwps, &lwp->head);
601
602 return lwp;
603 }
604
605 /* Start an inferior process and returns its pid.
606 ALLARGS is a vector of program-name and args. */
607
608 static int
609 linux_create_inferior (char *program, char **allargs)
610 {
611 #ifdef HAVE_PERSONALITY
612 int personality_orig = 0, personality_set = 0;
613 #endif
614 struct lwp_info *new_lwp;
615 int pid;
616 ptid_t ptid;
617
618 #ifdef HAVE_PERSONALITY
619 if (disable_randomization)
620 {
621 errno = 0;
622 personality_orig = personality (0xffffffff);
623 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
624 {
625 personality_set = 1;
626 personality (personality_orig | ADDR_NO_RANDOMIZE);
627 }
628 if (errno != 0 || (personality_set
629 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
630 warning ("Error disabling address space randomization: %s",
631 strerror (errno));
632 }
633 #endif
634
635 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
636 pid = vfork ();
637 #else
638 pid = fork ();
639 #endif
640 if (pid < 0)
641 perror_with_name ("fork");
642
643 if (pid == 0)
644 {
645 ptrace (PTRACE_TRACEME, 0, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
646
647 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
648 signal (__SIGRTMIN + 1, SIG_DFL);
649 #endif
650
651 setpgid (0, 0);
652
653 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
654 stdout to stderr so that inferior i/o doesn't corrupt the connection.
655 Also, redirect stdin to /dev/null. */
656 if (remote_connection_is_stdio ())
657 {
658 close (0);
659 open ("/dev/null", O_RDONLY);
660 dup2 (2, 1);
661 if (write (2, "stdin/stdout redirected\n",
662 sizeof ("stdin/stdout redirected\n") - 1) < 0)
663 {
664 /* Errors ignored. */;
665 }
666 }
667
668 execv (program, allargs);
669 if (errno == ENOENT)
670 execvp (program, allargs);
671
672 fprintf (stderr, "Cannot exec %s: %s.\n", program,
673 strerror (errno));
674 fflush (stderr);
675 _exit (0177);
676 }
677
678 #ifdef HAVE_PERSONALITY
679 if (personality_set)
680 {
681 errno = 0;
682 personality (personality_orig);
683 if (errno != 0)
684 warning ("Error restoring address space randomization: %s",
685 strerror (errno));
686 }
687 #endif
688
689 linux_add_process (pid, 0);
690
691 ptid = ptid_build (pid, pid, 0);
692 new_lwp = add_lwp (ptid);
693 add_thread (ptid, new_lwp);
694 new_lwp->must_set_ptrace_flags = 1;
695
696 return pid;
697 }
698
699 /* Attach to an inferior process. */
700
701 static void
702 linux_attach_lwp_1 (unsigned long lwpid, int initial)
703 {
704 ptid_t ptid;
705 struct lwp_info *new_lwp;
706
707 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0)
708 != 0)
709 {
710 struct buffer buffer;
711
712 if (!initial)
713 {
714 /* If we fail to attach to an LWP, just warn. */
715 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
716 strerror (errno), errno);
717 fflush (stderr);
718 return;
719 }
720
721 /* If we fail to attach to a process, report an error. */
722 buffer_init (&buffer);
723 linux_ptrace_attach_warnings (lwpid, &buffer);
724 buffer_grow_str0 (&buffer, "");
725 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
726 lwpid, strerror (errno), errno);
727 }
728
729 if (initial)
730 /* If lwp is the tgid, we handle adding existing threads later.
731 Otherwise we just add lwp without bothering about any other
732 threads. */
733 ptid = ptid_build (lwpid, lwpid, 0);
734 else
735 {
736 /* Note that extracting the pid from the current inferior is
737 safe, since we're always called in the context of the same
738 process as this new thread. */
739 int pid = pid_of (get_thread_lwp (current_inferior));
740 ptid = ptid_build (pid, lwpid, 0);
741 }
742
743 new_lwp = (struct lwp_info *) add_lwp (ptid);
744 add_thread (ptid, new_lwp);
745
746 /* We need to wait for SIGSTOP before being able to make the next
747 ptrace call on this LWP. */
748 new_lwp->must_set_ptrace_flags = 1;
749
750 if (linux_proc_pid_is_stopped (lwpid))
751 {
752 if (debug_threads)
753 fprintf (stderr,
754 "Attached to a stopped process\n");
755
756 /* The process is definitely stopped. It is in a job control
757 stop, unless the kernel predates the TASK_STOPPED /
758 TASK_TRACED distinction, in which case it might be in a
759 ptrace stop. Make sure it is in a ptrace stop; from there we
760 can kill it, signal it, et cetera.
761
762 First make sure there is a pending SIGSTOP. Since we are
763 already attached, the process can not transition from stopped
764 to running without a PTRACE_CONT; so we know this signal will
765 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
766 probably already in the queue (unless this kernel is old
767 enough to use TASK_STOPPED for ptrace stops); but since
768 SIGSTOP is not an RT signal, it can only be queued once. */
769 kill_lwp (lwpid, SIGSTOP);
770
771 /* Finally, resume the stopped process. This will deliver the
772 SIGSTOP (or a higher priority signal, just like normal
773 PTRACE_ATTACH), which we'll catch later on. */
774 ptrace (PTRACE_CONT, lwpid, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
775 }
776
777 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
778 brings it to a halt.
779
780 There are several cases to consider here:
781
782 1) gdbserver has already attached to the process and is being notified
783 of a new thread that is being created.
784 In this case we should ignore that SIGSTOP and resume the
785 process. This is handled below by setting stop_expected = 1,
786 and the fact that add_thread sets last_resume_kind ==
787 resume_continue.
788
789 2) This is the first thread (the process thread), and we're attaching
790 to it via attach_inferior.
791 In this case we want the process thread to stop.
792 This is handled by having linux_attach set last_resume_kind ==
793 resume_stop after we return.
794
795 If the pid we are attaching to is also the tgid, we attach to and
796 stop all the existing threads. Otherwise, we attach to pid and
797 ignore any other threads in the same group as this pid.
798
799 3) GDB is connecting to gdbserver and is requesting an enumeration of all
800 existing threads.
801 In this case we want the thread to stop.
802 FIXME: This case is currently not properly handled.
803 We should wait for the SIGSTOP but don't. Things work apparently
804 because enough time passes between when we ptrace (ATTACH) and when
805 gdb makes the next ptrace call on the thread.
806
807 On the other hand, if we are currently trying to stop all threads, we
808 should treat the new thread as if we had sent it a SIGSTOP. This works
809 because we are guaranteed that the add_lwp call above added us to the
810 end of the list, and so the new thread has not yet reached
811 wait_for_sigstop (but will). */
812 new_lwp->stop_expected = 1;
813 }
814
815 void
816 linux_attach_lwp (unsigned long lwpid)
817 {
818 linux_attach_lwp_1 (lwpid, 0);
819 }
820
821 /* Attach to PID. If PID is the tgid, attach to it and all
822 of its threads. */
823
824 static int
825 linux_attach (unsigned long pid)
826 {
827 /* Attach to PID. We will check for other threads
828 soon. */
829 linux_attach_lwp_1 (pid, 1);
830 linux_add_process (pid, 1);
831
832 if (!non_stop)
833 {
834 struct thread_info *thread;
835
836 /* Don't ignore the initial SIGSTOP if we just attached to this
837 process. It will be collected by wait shortly. */
838 thread = find_thread_ptid (ptid_build (pid, pid, 0));
839 thread->last_resume_kind = resume_stop;
840 }
841
842 if (linux_proc_get_tgid (pid) == pid)
843 {
844 DIR *dir;
845 char pathname[128];
846
847 sprintf (pathname, "/proc/%ld/task", pid);
848
849 dir = opendir (pathname);
850
851 if (!dir)
852 {
853 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
854 fflush (stderr);
855 }
856 else
857 {
858 /* At this point we attached to the tgid. Scan the task for
859 existing threads. */
860 unsigned long lwp;
861 int new_threads_found;
862 int iterations = 0;
863 struct dirent *dp;
864
865 while (iterations < 2)
866 {
867 new_threads_found = 0;
868 /* Add all the other threads. While we go through the
869 threads, new threads may be spawned. Cycle through
870 the list of threads until we have done two iterations without
871 finding new threads. */
872 while ((dp = readdir (dir)) != NULL)
873 {
874 /* Fetch one lwp. */
875 lwp = strtoul (dp->d_name, NULL, 10);
876
877 /* Is this a new thread? */
878 if (lwp
879 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
880 {
881 linux_attach_lwp_1 (lwp, 0);
882 new_threads_found++;
883
884 if (debug_threads)
885 fprintf (stderr, "\
886 Found and attached to new lwp %ld\n", lwp);
887 }
888 }
889
890 if (!new_threads_found)
891 iterations++;
892 else
893 iterations = 0;
894
895 rewinddir (dir);
896 }
897 closedir (dir);
898 }
899 }
900
901 return 0;
902 }
903
904 struct counter
905 {
906 int pid;
907 int count;
908 };
909
910 static int
911 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
912 {
913 struct counter *counter = args;
914
915 if (ptid_get_pid (entry->id) == counter->pid)
916 {
917 if (++counter->count > 1)
918 return 1;
919 }
920
921 return 0;
922 }
923
924 static int
925 last_thread_of_process_p (struct thread_info *thread)
926 {
927 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
928 int pid = ptid_get_pid (ptid);
929 struct counter counter = { pid , 0 };
930
931 return (find_inferior (&all_threads,
932 second_thread_of_pid_p, &counter) == NULL);
933 }
934
935 /* Kill LWP. */
936
937 static void
938 linux_kill_one_lwp (struct lwp_info *lwp)
939 {
940 int pid = lwpid_of (lwp);
941
942 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
943 there is no signal context, and ptrace(PTRACE_KILL) (or
944 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
945 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
946 alternative is to kill with SIGKILL. We only need one SIGKILL
947 per process, not one for each thread. But since we still support
948 linuxthreads, and we also support debugging programs using raw
949 clone without CLONE_THREAD, we send one for each thread. For
950 years, we used PTRACE_KILL only, so we're being a bit paranoid
951 about some old kernels where PTRACE_KILL might work better
952 (dubious if there are any such, but that's why it's paranoia), so
953 we try SIGKILL first, PTRACE_KILL second, and so we're fine
954 everywhere. */
955
956 errno = 0;
957 kill (pid, SIGKILL);
958 if (debug_threads)
959 fprintf (stderr,
960 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
961 target_pid_to_str (ptid_of (lwp)),
962 errno ? strerror (errno) : "OK");
963
964 errno = 0;
965 ptrace (PTRACE_KILL, pid, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
966 if (debug_threads)
967 fprintf (stderr,
968 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
969 target_pid_to_str (ptid_of (lwp)),
970 errno ? strerror (errno) : "OK");
971 }
972
973 /* Callback for `find_inferior'. Kills an lwp of a given process,
974 except the leader. */
975
976 static int
977 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
978 {
979 struct thread_info *thread = (struct thread_info *) entry;
980 struct lwp_info *lwp = get_thread_lwp (thread);
981 int wstat;
982 int pid = * (int *) args;
983
984 if (ptid_get_pid (entry->id) != pid)
985 return 0;
986
987 /* We avoid killing the first thread here, because of a Linux kernel (at
988 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
989 the children get a chance to be reaped, it will remain a zombie
990 forever. */
991
992 if (lwpid_of (lwp) == pid)
993 {
994 if (debug_threads)
995 fprintf (stderr, "lkop: is last of process %s\n",
996 target_pid_to_str (entry->id));
997 return 0;
998 }
999
1000 do
1001 {
1002 linux_kill_one_lwp (lwp);
1003
1004 /* Make sure it died. The loop is most likely unnecessary. */
1005 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1006 } while (pid > 0 && WIFSTOPPED (wstat));
1007
1008 return 0;
1009 }
1010
1011 static int
1012 linux_kill (int pid)
1013 {
1014 struct process_info *process;
1015 struct lwp_info *lwp;
1016 int wstat;
1017 int lwpid;
1018
1019 process = find_process_pid (pid);
1020 if (process == NULL)
1021 return -1;
1022
1023 /* If we're killing a running inferior, make sure it is stopped
1024 first, as PTRACE_KILL will not work otherwise. */
1025 stop_all_lwps (0, NULL);
1026
1027 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1028
1029 /* See the comment in linux_kill_one_lwp. We did not kill the first
1030 thread in the list, so do so now. */
1031 lwp = find_lwp_pid (pid_to_ptid (pid));
1032
1033 if (lwp == NULL)
1034 {
1035 if (debug_threads)
1036 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
1037 lwpid_of (lwp), pid);
1038 }
1039 else
1040 {
1041 if (debug_threads)
1042 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
1043 lwpid_of (lwp), pid);
1044
1045 do
1046 {
1047 linux_kill_one_lwp (lwp);
1048
1049 /* Make sure it died. The loop is most likely unnecessary. */
1050 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1051 } while (lwpid > 0 && WIFSTOPPED (wstat));
1052 }
1053
1054 the_target->mourn (process);
1055
1056 /* Since we presently can only stop all lwps of all processes, we
1057 need to unstop lwps of other processes. */
1058 unstop_all_lwps (0, NULL);
1059 return 0;
1060 }
1061
1062 /* Get pending signal of THREAD, for detaching purposes. This is the
1063 signal the thread last stopped for, which we need to deliver to the
1064 thread when detaching, otherwise, it'd be suppressed/lost. */
1065
1066 static int
1067 get_detach_signal (struct thread_info *thread)
1068 {
1069 enum gdb_signal signo = GDB_SIGNAL_0;
1070 int status;
1071 struct lwp_info *lp = get_thread_lwp (thread);
1072
1073 if (lp->status_pending_p)
1074 status = lp->status_pending;
1075 else
1076 {
1077 /* If the thread had been suspended by gdbserver, and it stopped
1078 cleanly, then it'll have stopped with SIGSTOP. But we don't
1079 want to deliver that SIGSTOP. */
1080 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1081 || thread->last_status.value.sig == GDB_SIGNAL_0)
1082 return 0;
1083
1084 /* Otherwise, we may need to deliver the signal we
1085 intercepted. */
1086 status = lp->last_status;
1087 }
1088
1089 if (!WIFSTOPPED (status))
1090 {
1091 if (debug_threads)
1092 fprintf (stderr,
1093 "GPS: lwp %s hasn't stopped: no pending signal\n",
1094 target_pid_to_str (ptid_of (lp)));
1095 return 0;
1096 }
1097
1098 /* Extended wait statuses aren't real SIGTRAPs. */
1099 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1100 {
1101 if (debug_threads)
1102 fprintf (stderr,
1103 "GPS: lwp %s had stopped with extended "
1104 "status: no pending signal\n",
1105 target_pid_to_str (ptid_of (lp)));
1106 return 0;
1107 }
1108
1109 signo = gdb_signal_from_host (WSTOPSIG (status));
1110
1111 if (program_signals_p && !program_signals[signo])
1112 {
1113 if (debug_threads)
1114 fprintf (stderr,
1115 "GPS: lwp %s had signal %s, but it is in nopass state\n",
1116 target_pid_to_str (ptid_of (lp)),
1117 gdb_signal_to_string (signo));
1118 return 0;
1119 }
1120 else if (!program_signals_p
1121 /* If we have no way to know which signals GDB does not
1122 want to have passed to the program, assume
1123 SIGTRAP/SIGINT, which is GDB's default. */
1124 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1125 {
1126 if (debug_threads)
1127 fprintf (stderr,
1128 "GPS: lwp %s had signal %s, "
1129 "but we don't know if we should pass it. Default to not.\n",
1130 target_pid_to_str (ptid_of (lp)),
1131 gdb_signal_to_string (signo));
1132 return 0;
1133 }
1134 else
1135 {
1136 if (debug_threads)
1137 fprintf (stderr,
1138 "GPS: lwp %s has pending signal %s: delivering it.\n",
1139 target_pid_to_str (ptid_of (lp)),
1140 gdb_signal_to_string (signo));
1141
1142 return WSTOPSIG (status);
1143 }
1144 }
1145
1146 static int
1147 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1148 {
1149 struct thread_info *thread = (struct thread_info *) entry;
1150 struct lwp_info *lwp = get_thread_lwp (thread);
1151 int pid = * (int *) args;
1152 int sig;
1153
1154 if (ptid_get_pid (entry->id) != pid)
1155 return 0;
1156
1157 /* If there is a pending SIGSTOP, get rid of it. */
1158 if (lwp->stop_expected)
1159 {
1160 if (debug_threads)
1161 fprintf (stderr,
1162 "Sending SIGCONT to %s\n",
1163 target_pid_to_str (ptid_of (lwp)));
1164
1165 kill_lwp (lwpid_of (lwp), SIGCONT);
1166 lwp->stop_expected = 0;
1167 }
1168
1169 /* Flush any pending changes to the process's registers. */
1170 regcache_invalidate_one ((struct inferior_list_entry *)
1171 get_lwp_thread (lwp));
1172
1173 /* Pass on any pending signal for this thread. */
1174 sig = get_detach_signal (thread);
1175
1176 /* Finally, let it resume. */
1177 if (the_low_target.prepare_to_resume != NULL)
1178 the_low_target.prepare_to_resume (lwp);
1179 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
1180 (PTRACE_ARG4_TYPE) (long) sig) < 0)
1181 error (_("Can't detach %s: %s"),
1182 target_pid_to_str (ptid_of (lwp)),
1183 strerror (errno));
1184
1185 delete_lwp (lwp);
1186 return 0;
1187 }
1188
1189 static int
1190 linux_detach (int pid)
1191 {
1192 struct process_info *process;
1193
1194 process = find_process_pid (pid);
1195 if (process == NULL)
1196 return -1;
1197
1198 /* Stop all threads before detaching. First, ptrace requires that
1199 the thread is stopped to sucessfully detach. Second, thread_db
1200 may need to uninstall thread event breakpoints from memory, which
1201 only works with a stopped process anyway. */
1202 stop_all_lwps (0, NULL);
1203
1204 #ifdef USE_THREAD_DB
1205 thread_db_detach (process);
1206 #endif
1207
1208 /* Stabilize threads (move out of jump pads). */
1209 stabilize_threads ();
1210
1211 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1212
1213 the_target->mourn (process);
1214
1215 /* Since we presently can only stop all lwps of all processes, we
1216 need to unstop lwps of other processes. */
1217 unstop_all_lwps (0, NULL);
1218 return 0;
1219 }
1220
1221 /* Remove all LWPs that belong to process PROC from the lwp list. */
1222
1223 static int
1224 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1225 {
1226 struct lwp_info *lwp = (struct lwp_info *) entry;
1227 struct process_info *process = proc;
1228
1229 if (pid_of (lwp) == pid_of (process))
1230 delete_lwp (lwp);
1231
1232 return 0;
1233 }
1234
1235 static void
1236 linux_mourn (struct process_info *process)
1237 {
1238 struct process_info_private *priv;
1239
1240 #ifdef USE_THREAD_DB
1241 thread_db_mourn (process);
1242 #endif
1243
1244 find_inferior (&all_lwps, delete_lwp_callback, process);
1245
1246 /* Freeing all private data. */
1247 priv = process->private;
1248 free (priv->arch_private);
1249 free (priv);
1250 process->private = NULL;
1251
1252 remove_process (process);
1253 }
1254
1255 static void
1256 linux_join (int pid)
1257 {
1258 int status, ret;
1259
1260 do {
1261 ret = my_waitpid (pid, &status, 0);
1262 if (WIFEXITED (status) || WIFSIGNALED (status))
1263 break;
1264 } while (ret != -1 || errno != ECHILD);
1265 }
1266
1267 /* Return nonzero if the given thread is still alive. */
1268 static int
1269 linux_thread_alive (ptid_t ptid)
1270 {
1271 struct lwp_info *lwp = find_lwp_pid (ptid);
1272
1273 /* We assume we always know if a thread exits. If a whole process
1274 exited but we still haven't been able to report it to GDB, we'll
1275 hold on to the last lwp of the dead process. */
1276 if (lwp != NULL)
1277 return !lwp->dead;
1278 else
1279 return 0;
1280 }
1281
1282 /* Return 1 if this lwp has an interesting status pending. */
1283 static int
1284 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1285 {
1286 struct lwp_info *lwp = (struct lwp_info *) entry;
1287 ptid_t ptid = * (ptid_t *) arg;
1288 struct thread_info *thread;
1289
1290 /* Check if we're only interested in events from a specific process
1291 or its lwps. */
1292 if (!ptid_equal (minus_one_ptid, ptid)
1293 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1294 return 0;
1295
1296 thread = get_lwp_thread (lwp);
1297
1298 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1299 report any status pending the LWP may have. */
1300 if (thread->last_resume_kind == resume_stop
1301 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1302 return 0;
1303
1304 return lwp->status_pending_p;
1305 }
1306
1307 static int
1308 same_lwp (struct inferior_list_entry *entry, void *data)
1309 {
1310 ptid_t ptid = *(ptid_t *) data;
1311 int lwp;
1312
1313 if (ptid_get_lwp (ptid) != 0)
1314 lwp = ptid_get_lwp (ptid);
1315 else
1316 lwp = ptid_get_pid (ptid);
1317
1318 if (ptid_get_lwp (entry->id) == lwp)
1319 return 1;
1320
1321 return 0;
1322 }
1323
1324 struct lwp_info *
1325 find_lwp_pid (ptid_t ptid)
1326 {
1327 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1328 }
1329
1330 static struct lwp_info *
1331 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1332 {
1333 int ret;
1334 int to_wait_for = -1;
1335 struct lwp_info *child = NULL;
1336
1337 if (debug_threads)
1338 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1339
1340 if (ptid_equal (ptid, minus_one_ptid))
1341 to_wait_for = -1; /* any child */
1342 else
1343 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1344
1345 options |= __WALL;
1346
1347 retry:
1348
1349 ret = my_waitpid (to_wait_for, wstatp, options);
1350 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1351 return NULL;
1352 else if (ret == -1)
1353 perror_with_name ("waitpid");
1354
1355 if (debug_threads
1356 && (!WIFSTOPPED (*wstatp)
1357 || (WSTOPSIG (*wstatp) != 32
1358 && WSTOPSIG (*wstatp) != 33)))
1359 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1360
1361 child = find_lwp_pid (pid_to_ptid (ret));
1362
1363 /* If we didn't find a process, one of two things presumably happened:
1364 - A process we started and then detached from has exited. Ignore it.
1365 - A process we are controlling has forked and the new child's stop
1366 was reported to us by the kernel. Save its PID. */
1367 if (child == NULL && WIFSTOPPED (*wstatp))
1368 {
1369 add_to_pid_list (&stopped_pids, ret, *wstatp);
1370 goto retry;
1371 }
1372 else if (child == NULL)
1373 goto retry;
1374
1375 child->stopped = 1;
1376
1377 child->last_status = *wstatp;
1378
1379 /* Architecture-specific setup after inferior is running.
1380 This needs to happen after we have attached to the inferior
1381 and it is stopped for the first time, but before we access
1382 any inferior registers. */
1383 if (new_inferior)
1384 {
1385 the_low_target.arch_setup ();
1386 #ifdef HAVE_LINUX_REGSETS
1387 memset (disabled_regsets, 0, num_regsets);
1388 #endif
1389 new_inferior = 0;
1390 }
1391
1392 /* Fetch the possibly triggered data watchpoint info and store it in
1393 CHILD.
1394
1395 On some archs, like x86, that use debug registers to set
1396 watchpoints, it's possible that the way to know which watched
1397 address trapped, is to check the register that is used to select
1398 which address to watch. Problem is, between setting the
1399 watchpoint and reading back which data address trapped, the user
1400 may change the set of watchpoints, and, as a consequence, GDB
1401 changes the debug registers in the inferior. To avoid reading
1402 back a stale stopped-data-address when that happens, we cache in
1403 LP the fact that a watchpoint trapped, and the corresponding data
1404 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1405 changes the debug registers meanwhile, we have the cached data we
1406 can rely on. */
1407
1408 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1409 {
1410 if (the_low_target.stopped_by_watchpoint == NULL)
1411 {
1412 child->stopped_by_watchpoint = 0;
1413 }
1414 else
1415 {
1416 struct thread_info *saved_inferior;
1417
1418 saved_inferior = current_inferior;
1419 current_inferior = get_lwp_thread (child);
1420
1421 child->stopped_by_watchpoint
1422 = the_low_target.stopped_by_watchpoint ();
1423
1424 if (child->stopped_by_watchpoint)
1425 {
1426 if (the_low_target.stopped_data_address != NULL)
1427 child->stopped_data_address
1428 = the_low_target.stopped_data_address ();
1429 else
1430 child->stopped_data_address = 0;
1431 }
1432
1433 current_inferior = saved_inferior;
1434 }
1435 }
1436
1437 /* Store the STOP_PC, with adjustment applied. This depends on the
1438 architecture being defined already (so that CHILD has a valid
1439 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1440 not). */
1441 if (WIFSTOPPED (*wstatp))
1442 child->stop_pc = get_stop_pc (child);
1443
1444 if (debug_threads
1445 && WIFSTOPPED (*wstatp)
1446 && the_low_target.get_pc != NULL)
1447 {
1448 struct thread_info *saved_inferior = current_inferior;
1449 struct regcache *regcache;
1450 CORE_ADDR pc;
1451
1452 current_inferior = get_lwp_thread (child);
1453 regcache = get_thread_regcache (current_inferior, 1);
1454 pc = (*the_low_target.get_pc) (regcache);
1455 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1456 current_inferior = saved_inferior;
1457 }
1458
1459 return child;
1460 }
1461
1462 /* This function should only be called if the LWP got a SIGTRAP.
1463
1464 Handle any tracepoint steps or hits. Return true if a tracepoint
1465 event was handled, 0 otherwise. */
1466
1467 static int
1468 handle_tracepoints (struct lwp_info *lwp)
1469 {
1470 struct thread_info *tinfo = get_lwp_thread (lwp);
1471 int tpoint_related_event = 0;
1472
1473 /* If this tracepoint hit causes a tracing stop, we'll immediately
1474 uninsert tracepoints. To do this, we temporarily pause all
1475 threads, unpatch away, and then unpause threads. We need to make
1476 sure the unpausing doesn't resume LWP too. */
1477 lwp->suspended++;
1478
1479 /* And we need to be sure that any all-threads-stopping doesn't try
1480 to move threads out of the jump pads, as it could deadlock the
1481 inferior (LWP could be in the jump pad, maybe even holding the
1482 lock.) */
1483
1484 /* Do any necessary step collect actions. */
1485 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1486
1487 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1488
1489 /* See if we just hit a tracepoint and do its main collect
1490 actions. */
1491 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1492
1493 lwp->suspended--;
1494
1495 gdb_assert (lwp->suspended == 0);
1496 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1497
1498 if (tpoint_related_event)
1499 {
1500 if (debug_threads)
1501 fprintf (stderr, "got a tracepoint event\n");
1502 return 1;
1503 }
1504
1505 return 0;
1506 }
1507
1508 /* Convenience wrapper. Returns true if LWP is presently collecting a
1509 fast tracepoint. */
1510
1511 static int
1512 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1513 struct fast_tpoint_collect_status *status)
1514 {
1515 CORE_ADDR thread_area;
1516
1517 if (the_low_target.get_thread_area == NULL)
1518 return 0;
1519
1520 /* Get the thread area address. This is used to recognize which
1521 thread is which when tracing with the in-process agent library.
1522 We don't read anything from the address, and treat it as opaque;
1523 it's the address itself that we assume is unique per-thread. */
1524 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1525 return 0;
1526
1527 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1528 }
1529
1530 /* The reason we resume in the caller, is because we want to be able
1531 to pass lwp->status_pending as WSTAT, and we need to clear
1532 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1533 refuses to resume. */
1534
1535 static int
1536 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1537 {
1538 struct thread_info *saved_inferior;
1539
1540 saved_inferior = current_inferior;
1541 current_inferior = get_lwp_thread (lwp);
1542
1543 if ((wstat == NULL
1544 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1545 && supports_fast_tracepoints ()
1546 && agent_loaded_p ())
1547 {
1548 struct fast_tpoint_collect_status status;
1549 int r;
1550
1551 if (debug_threads)
1552 fprintf (stderr, "\
1553 Checking whether LWP %ld needs to move out of the jump pad.\n",
1554 lwpid_of (lwp));
1555
1556 r = linux_fast_tracepoint_collecting (lwp, &status);
1557
1558 if (wstat == NULL
1559 || (WSTOPSIG (*wstat) != SIGILL
1560 && WSTOPSIG (*wstat) != SIGFPE
1561 && WSTOPSIG (*wstat) != SIGSEGV
1562 && WSTOPSIG (*wstat) != SIGBUS))
1563 {
1564 lwp->collecting_fast_tracepoint = r;
1565
1566 if (r != 0)
1567 {
1568 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1569 {
1570 /* Haven't executed the original instruction yet.
1571 Set breakpoint there, and wait till it's hit,
1572 then single-step until exiting the jump pad. */
1573 lwp->exit_jump_pad_bkpt
1574 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1575 }
1576
1577 if (debug_threads)
1578 fprintf (stderr, "\
1579 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1580 lwpid_of (lwp));
1581 current_inferior = saved_inferior;
1582
1583 return 1;
1584 }
1585 }
1586 else
1587 {
1588 /* If we get a synchronous signal while collecting, *and*
1589 while executing the (relocated) original instruction,
1590 reset the PC to point at the tpoint address, before
1591 reporting to GDB. Otherwise, it's an IPA lib bug: just
1592 report the signal to GDB, and pray for the best. */
1593
1594 lwp->collecting_fast_tracepoint = 0;
1595
1596 if (r != 0
1597 && (status.adjusted_insn_addr <= lwp->stop_pc
1598 && lwp->stop_pc < status.adjusted_insn_addr_end))
1599 {
1600 siginfo_t info;
1601 struct regcache *regcache;
1602
1603 /* The si_addr on a few signals references the address
1604 of the faulting instruction. Adjust that as
1605 well. */
1606 if ((WSTOPSIG (*wstat) == SIGILL
1607 || WSTOPSIG (*wstat) == SIGFPE
1608 || WSTOPSIG (*wstat) == SIGBUS
1609 || WSTOPSIG (*wstat) == SIGSEGV)
1610 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp),
1611 (PTRACE_ARG3_TYPE) 0, &info) == 0
1612 /* Final check just to make sure we don't clobber
1613 the siginfo of non-kernel-sent signals. */
1614 && (uintptr_t) info.si_addr == lwp->stop_pc)
1615 {
1616 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1617 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp),
1618 (PTRACE_ARG3_TYPE) 0, &info);
1619 }
1620
1621 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1622 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1623 lwp->stop_pc = status.tpoint_addr;
1624
1625 /* Cancel any fast tracepoint lock this thread was
1626 holding. */
1627 force_unlock_trace_buffer ();
1628 }
1629
1630 if (lwp->exit_jump_pad_bkpt != NULL)
1631 {
1632 if (debug_threads)
1633 fprintf (stderr,
1634 "Cancelling fast exit-jump-pad: removing bkpt. "
1635 "stopping all threads momentarily.\n");
1636
1637 stop_all_lwps (1, lwp);
1638 cancel_breakpoints ();
1639
1640 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1641 lwp->exit_jump_pad_bkpt = NULL;
1642
1643 unstop_all_lwps (1, lwp);
1644
1645 gdb_assert (lwp->suspended >= 0);
1646 }
1647 }
1648 }
1649
1650 if (debug_threads)
1651 fprintf (stderr, "\
1652 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1653 lwpid_of (lwp));
1654
1655 current_inferior = saved_inferior;
1656 return 0;
1657 }
1658
1659 /* Enqueue one signal in the "signals to report later when out of the
1660 jump pad" list. */
1661
1662 static void
1663 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1664 {
1665 struct pending_signals *p_sig;
1666
1667 if (debug_threads)
1668 fprintf (stderr, "\
1669 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1670
1671 if (debug_threads)
1672 {
1673 struct pending_signals *sig;
1674
1675 for (sig = lwp->pending_signals_to_report;
1676 sig != NULL;
1677 sig = sig->prev)
1678 fprintf (stderr,
1679 " Already queued %d\n",
1680 sig->signal);
1681
1682 fprintf (stderr, " (no more currently queued signals)\n");
1683 }
1684
1685 /* Don't enqueue non-RT signals if they are already in the deferred
1686 queue. (SIGSTOP being the easiest signal to see ending up here
1687 twice) */
1688 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1689 {
1690 struct pending_signals *sig;
1691
1692 for (sig = lwp->pending_signals_to_report;
1693 sig != NULL;
1694 sig = sig->prev)
1695 {
1696 if (sig->signal == WSTOPSIG (*wstat))
1697 {
1698 if (debug_threads)
1699 fprintf (stderr,
1700 "Not requeuing already queued non-RT signal %d"
1701 " for LWP %ld\n",
1702 sig->signal,
1703 lwpid_of (lwp));
1704 return;
1705 }
1706 }
1707 }
1708
1709 p_sig = xmalloc (sizeof (*p_sig));
1710 p_sig->prev = lwp->pending_signals_to_report;
1711 p_sig->signal = WSTOPSIG (*wstat);
1712 memset (&p_sig->info, 0, sizeof (siginfo_t));
1713 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
1714 &p_sig->info);
1715
1716 lwp->pending_signals_to_report = p_sig;
1717 }
1718
1719 /* Dequeue one signal from the "signals to report later when out of
1720 the jump pad" list. */
1721
1722 static int
1723 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1724 {
1725 if (lwp->pending_signals_to_report != NULL)
1726 {
1727 struct pending_signals **p_sig;
1728
1729 p_sig = &lwp->pending_signals_to_report;
1730 while ((*p_sig)->prev != NULL)
1731 p_sig = &(*p_sig)->prev;
1732
1733 *wstat = W_STOPCODE ((*p_sig)->signal);
1734 if ((*p_sig)->info.si_signo != 0)
1735 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
1736 &(*p_sig)->info);
1737 free (*p_sig);
1738 *p_sig = NULL;
1739
1740 if (debug_threads)
1741 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1742 WSTOPSIG (*wstat), lwpid_of (lwp));
1743
1744 if (debug_threads)
1745 {
1746 struct pending_signals *sig;
1747
1748 for (sig = lwp->pending_signals_to_report;
1749 sig != NULL;
1750 sig = sig->prev)
1751 fprintf (stderr,
1752 " Still queued %d\n",
1753 sig->signal);
1754
1755 fprintf (stderr, " (no more queued signals)\n");
1756 }
1757
1758 return 1;
1759 }
1760
1761 return 0;
1762 }
1763
1764 /* Arrange for a breakpoint to be hit again later. We don't keep the
1765 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1766 will handle the current event, eventually we will resume this LWP,
1767 and this breakpoint will trap again. */
1768
1769 static int
1770 cancel_breakpoint (struct lwp_info *lwp)
1771 {
1772 struct thread_info *saved_inferior;
1773
1774 /* There's nothing to do if we don't support breakpoints. */
1775 if (!supports_breakpoints ())
1776 return 0;
1777
1778 /* breakpoint_at reads from current inferior. */
1779 saved_inferior = current_inferior;
1780 current_inferior = get_lwp_thread (lwp);
1781
1782 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1783 {
1784 if (debug_threads)
1785 fprintf (stderr,
1786 "CB: Push back breakpoint for %s\n",
1787 target_pid_to_str (ptid_of (lwp)));
1788
1789 /* Back up the PC if necessary. */
1790 if (the_low_target.decr_pc_after_break)
1791 {
1792 struct regcache *regcache
1793 = get_thread_regcache (current_inferior, 1);
1794 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1795 }
1796
1797 current_inferior = saved_inferior;
1798 return 1;
1799 }
1800 else
1801 {
1802 if (debug_threads)
1803 fprintf (stderr,
1804 "CB: No breakpoint found at %s for [%s]\n",
1805 paddress (lwp->stop_pc),
1806 target_pid_to_str (ptid_of (lwp)));
1807 }
1808
1809 current_inferior = saved_inferior;
1810 return 0;
1811 }
1812
1813 /* When the event-loop is doing a step-over, this points at the thread
1814 being stepped. */
1815 ptid_t step_over_bkpt;
1816
1817 /* Wait for an event from child PID. If PID is -1, wait for any
1818 child. Store the stop status through the status pointer WSTAT.
1819 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1820 event was found and OPTIONS contains WNOHANG. Return the PID of
1821 the stopped child otherwise. */
1822
1823 static int
1824 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1825 {
1826 struct lwp_info *event_child, *requested_child;
1827 ptid_t wait_ptid;
1828
1829 event_child = NULL;
1830 requested_child = NULL;
1831
1832 /* Check for a lwp with a pending status. */
1833
1834 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1835 {
1836 event_child = (struct lwp_info *)
1837 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1838 if (debug_threads && event_child)
1839 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1840 }
1841 else
1842 {
1843 requested_child = find_lwp_pid (ptid);
1844
1845 if (stopping_threads == NOT_STOPPING_THREADS
1846 && requested_child->status_pending_p
1847 && requested_child->collecting_fast_tracepoint)
1848 {
1849 enqueue_one_deferred_signal (requested_child,
1850 &requested_child->status_pending);
1851 requested_child->status_pending_p = 0;
1852 requested_child->status_pending = 0;
1853 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1854 }
1855
1856 if (requested_child->suspended
1857 && requested_child->status_pending_p)
1858 fatal ("requesting an event out of a suspended child?");
1859
1860 if (requested_child->status_pending_p)
1861 event_child = requested_child;
1862 }
1863
1864 if (event_child != NULL)
1865 {
1866 if (debug_threads)
1867 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1868 lwpid_of (event_child), event_child->status_pending);
1869 *wstat = event_child->status_pending;
1870 event_child->status_pending_p = 0;
1871 event_child->status_pending = 0;
1872 current_inferior = get_lwp_thread (event_child);
1873 return lwpid_of (event_child);
1874 }
1875
1876 if (ptid_is_pid (ptid))
1877 {
1878 /* A request to wait for a specific tgid. This is not possible
1879 with waitpid, so instead, we wait for any child, and leave
1880 children we're not interested in right now with a pending
1881 status to report later. */
1882 wait_ptid = minus_one_ptid;
1883 }
1884 else
1885 wait_ptid = ptid;
1886
1887 /* We only enter this loop if no process has a pending wait status. Thus
1888 any action taken in response to a wait status inside this loop is
1889 responding as soon as we detect the status, not after any pending
1890 events. */
1891 while (1)
1892 {
1893 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1894
1895 if ((options & WNOHANG) && event_child == NULL)
1896 {
1897 if (debug_threads)
1898 fprintf (stderr, "WNOHANG set, no event found\n");
1899 return 0;
1900 }
1901
1902 if (event_child == NULL)
1903 error ("event from unknown child");
1904
1905 if (ptid_is_pid (ptid)
1906 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1907 {
1908 if (! WIFSTOPPED (*wstat))
1909 mark_lwp_dead (event_child, *wstat);
1910 else
1911 {
1912 event_child->status_pending_p = 1;
1913 event_child->status_pending = *wstat;
1914 }
1915 continue;
1916 }
1917
1918 current_inferior = get_lwp_thread (event_child);
1919
1920 /* Check for thread exit. */
1921 if (! WIFSTOPPED (*wstat))
1922 {
1923 if (debug_threads)
1924 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1925
1926 /* If the last thread is exiting, just return. */
1927 if (last_thread_of_process_p (current_inferior))
1928 {
1929 if (debug_threads)
1930 fprintf (stderr, "LWP %ld is last lwp of process\n",
1931 lwpid_of (event_child));
1932 return lwpid_of (event_child);
1933 }
1934
1935 if (!non_stop)
1936 {
1937 current_inferior = (struct thread_info *) all_threads.head;
1938 if (debug_threads)
1939 fprintf (stderr, "Current inferior is now %ld\n",
1940 lwpid_of (get_thread_lwp (current_inferior)));
1941 }
1942 else
1943 {
1944 current_inferior = NULL;
1945 if (debug_threads)
1946 fprintf (stderr, "Current inferior is now <NULL>\n");
1947 }
1948
1949 /* If we were waiting for this particular child to do something...
1950 well, it did something. */
1951 if (requested_child != NULL)
1952 {
1953 int lwpid = lwpid_of (event_child);
1954
1955 /* Cancel the step-over operation --- the thread that
1956 started it is gone. */
1957 if (finish_step_over (event_child))
1958 unstop_all_lwps (1, event_child);
1959 delete_lwp (event_child);
1960 return lwpid;
1961 }
1962
1963 delete_lwp (event_child);
1964
1965 /* Wait for a more interesting event. */
1966 continue;
1967 }
1968
1969 if (event_child->must_set_ptrace_flags)
1970 {
1971 linux_enable_event_reporting (lwpid_of (event_child));
1972 event_child->must_set_ptrace_flags = 0;
1973 }
1974
1975 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1976 && *wstat >> 16 != 0)
1977 {
1978 handle_extended_wait (event_child, *wstat);
1979 continue;
1980 }
1981
1982 if (WIFSTOPPED (*wstat)
1983 && WSTOPSIG (*wstat) == SIGSTOP
1984 && event_child->stop_expected)
1985 {
1986 int should_stop;
1987
1988 if (debug_threads)
1989 fprintf (stderr, "Expected stop.\n");
1990 event_child->stop_expected = 0;
1991
1992 should_stop = (current_inferior->last_resume_kind == resume_stop
1993 || stopping_threads != NOT_STOPPING_THREADS);
1994
1995 if (!should_stop)
1996 {
1997 linux_resume_one_lwp (event_child,
1998 event_child->stepping, 0, NULL);
1999 continue;
2000 }
2001 }
2002
2003 return lwpid_of (event_child);
2004 }
2005
2006 /* NOTREACHED */
2007 return 0;
2008 }
2009
2010 /* Count the LWP's that have had events. */
2011
2012 static int
2013 count_events_callback (struct inferior_list_entry *entry, void *data)
2014 {
2015 struct lwp_info *lp = (struct lwp_info *) entry;
2016 struct thread_info *thread = get_lwp_thread (lp);
2017 int *count = data;
2018
2019 gdb_assert (count != NULL);
2020
2021 /* Count only resumed LWPs that have a SIGTRAP event pending that
2022 should be reported to GDB. */
2023 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2024 && thread->last_resume_kind != resume_stop
2025 && lp->status_pending_p
2026 && WIFSTOPPED (lp->status_pending)
2027 && WSTOPSIG (lp->status_pending) == SIGTRAP
2028 && !breakpoint_inserted_here (lp->stop_pc))
2029 (*count)++;
2030
2031 return 0;
2032 }
2033
2034 /* Select the LWP (if any) that is currently being single-stepped. */
2035
2036 static int
2037 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2038 {
2039 struct lwp_info *lp = (struct lwp_info *) entry;
2040 struct thread_info *thread = get_lwp_thread (lp);
2041
2042 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2043 && thread->last_resume_kind == resume_step
2044 && lp->status_pending_p)
2045 return 1;
2046 else
2047 return 0;
2048 }
2049
2050 /* Select the Nth LWP that has had a SIGTRAP event that should be
2051 reported to GDB. */
2052
2053 static int
2054 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2055 {
2056 struct lwp_info *lp = (struct lwp_info *) entry;
2057 struct thread_info *thread = get_lwp_thread (lp);
2058 int *selector = data;
2059
2060 gdb_assert (selector != NULL);
2061
2062 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2063 if (thread->last_resume_kind != resume_stop
2064 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2065 && lp->status_pending_p
2066 && WIFSTOPPED (lp->status_pending)
2067 && WSTOPSIG (lp->status_pending) == SIGTRAP
2068 && !breakpoint_inserted_here (lp->stop_pc))
2069 if ((*selector)-- == 0)
2070 return 1;
2071
2072 return 0;
2073 }
2074
2075 static int
2076 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2077 {
2078 struct lwp_info *lp = (struct lwp_info *) entry;
2079 struct thread_info *thread = get_lwp_thread (lp);
2080 struct lwp_info *event_lp = data;
2081
2082 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2083 if (lp == event_lp)
2084 return 0;
2085
2086 /* If a LWP other than the LWP that we're reporting an event for has
2087 hit a GDB breakpoint (as opposed to some random trap signal),
2088 then just arrange for it to hit it again later. We don't keep
2089 the SIGTRAP status and don't forward the SIGTRAP signal to the
2090 LWP. We will handle the current event, eventually we will resume
2091 all LWPs, and this one will get its breakpoint trap again.
2092
2093 If we do not do this, then we run the risk that the user will
2094 delete or disable the breakpoint, but the LWP will have already
2095 tripped on it. */
2096
2097 if (thread->last_resume_kind != resume_stop
2098 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2099 && lp->status_pending_p
2100 && WIFSTOPPED (lp->status_pending)
2101 && WSTOPSIG (lp->status_pending) == SIGTRAP
2102 && !lp->stepping
2103 && !lp->stopped_by_watchpoint
2104 && cancel_breakpoint (lp))
2105 /* Throw away the SIGTRAP. */
2106 lp->status_pending_p = 0;
2107
2108 return 0;
2109 }
2110
2111 static void
2112 linux_cancel_breakpoints (void)
2113 {
2114 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2115 }
2116
2117 /* Select one LWP out of those that have events pending. */
2118
2119 static void
2120 select_event_lwp (struct lwp_info **orig_lp)
2121 {
2122 int num_events = 0;
2123 int random_selector;
2124 struct lwp_info *event_lp;
2125
2126 /* Give preference to any LWP that is being single-stepped. */
2127 event_lp
2128 = (struct lwp_info *) find_inferior (&all_lwps,
2129 select_singlestep_lwp_callback, NULL);
2130 if (event_lp != NULL)
2131 {
2132 if (debug_threads)
2133 fprintf (stderr,
2134 "SEL: Select single-step %s\n",
2135 target_pid_to_str (ptid_of (event_lp)));
2136 }
2137 else
2138 {
2139 /* No single-stepping LWP. Select one at random, out of those
2140 which have had SIGTRAP events. */
2141
2142 /* First see how many SIGTRAP events we have. */
2143 find_inferior (&all_lwps, count_events_callback, &num_events);
2144
2145 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2146 random_selector = (int)
2147 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2148
2149 if (debug_threads && num_events > 1)
2150 fprintf (stderr,
2151 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2152 num_events, random_selector);
2153
2154 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2155 select_event_lwp_callback,
2156 &random_selector);
2157 }
2158
2159 if (event_lp != NULL)
2160 {
2161 /* Switch the event LWP. */
2162 *orig_lp = event_lp;
2163 }
2164 }
2165
2166 /* Decrement the suspend count of an LWP. */
2167
2168 static int
2169 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2170 {
2171 struct lwp_info *lwp = (struct lwp_info *) entry;
2172
2173 /* Ignore EXCEPT. */
2174 if (lwp == except)
2175 return 0;
2176
2177 lwp->suspended--;
2178
2179 gdb_assert (lwp->suspended >= 0);
2180 return 0;
2181 }
2182
2183 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2184 NULL. */
2185
2186 static void
2187 unsuspend_all_lwps (struct lwp_info *except)
2188 {
2189 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2190 }
2191
2192 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2193 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2194 void *data);
2195 static int lwp_running (struct inferior_list_entry *entry, void *data);
2196 static ptid_t linux_wait_1 (ptid_t ptid,
2197 struct target_waitstatus *ourstatus,
2198 int target_options);
2199
2200 /* Stabilize threads (move out of jump pads).
2201
2202 If a thread is midway collecting a fast tracepoint, we need to
2203 finish the collection and move it out of the jump pad before
2204 reporting the signal.
2205
2206 This avoids recursion while collecting (when a signal arrives
2207 midway, and the signal handler itself collects), which would trash
2208 the trace buffer. In case the user set a breakpoint in a signal
2209 handler, this avoids the backtrace showing the jump pad, etc..
2210 Most importantly, there are certain things we can't do safely if
2211 threads are stopped in a jump pad (or in its callee's). For
2212 example:
2213
2214 - starting a new trace run. A thread still collecting the
2215 previous run, could trash the trace buffer when resumed. The trace
2216 buffer control structures would have been reset but the thread had
2217 no way to tell. The thread could even midway memcpy'ing to the
2218 buffer, which would mean that when resumed, it would clobber the
2219 trace buffer that had been set for a new run.
2220
2221 - we can't rewrite/reuse the jump pads for new tracepoints
2222 safely. Say you do tstart while a thread is stopped midway while
2223 collecting. When the thread is later resumed, it finishes the
2224 collection, and returns to the jump pad, to execute the original
2225 instruction that was under the tracepoint jump at the time the
2226 older run had been started. If the jump pad had been rewritten
2227 since for something else in the new run, the thread would now
2228 execute the wrong / random instructions. */
2229
2230 static void
2231 linux_stabilize_threads (void)
2232 {
2233 struct thread_info *save_inferior;
2234 struct lwp_info *lwp_stuck;
2235
2236 lwp_stuck
2237 = (struct lwp_info *) find_inferior (&all_lwps,
2238 stuck_in_jump_pad_callback, NULL);
2239 if (lwp_stuck != NULL)
2240 {
2241 if (debug_threads)
2242 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2243 lwpid_of (lwp_stuck));
2244 return;
2245 }
2246
2247 save_inferior = current_inferior;
2248
2249 stabilizing_threads = 1;
2250
2251 /* Kick 'em all. */
2252 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2253
2254 /* Loop until all are stopped out of the jump pads. */
2255 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2256 {
2257 struct target_waitstatus ourstatus;
2258 struct lwp_info *lwp;
2259 int wstat;
2260
2261 /* Note that we go through the full wait even loop. While
2262 moving threads out of jump pad, we need to be able to step
2263 over internal breakpoints and such. */
2264 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2265
2266 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2267 {
2268 lwp = get_thread_lwp (current_inferior);
2269
2270 /* Lock it. */
2271 lwp->suspended++;
2272
2273 if (ourstatus.value.sig != GDB_SIGNAL_0
2274 || current_inferior->last_resume_kind == resume_stop)
2275 {
2276 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2277 enqueue_one_deferred_signal (lwp, &wstat);
2278 }
2279 }
2280 }
2281
2282 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2283
2284 stabilizing_threads = 0;
2285
2286 current_inferior = save_inferior;
2287
2288 if (debug_threads)
2289 {
2290 lwp_stuck
2291 = (struct lwp_info *) find_inferior (&all_lwps,
2292 stuck_in_jump_pad_callback, NULL);
2293 if (lwp_stuck != NULL)
2294 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2295 lwpid_of (lwp_stuck));
2296 }
2297 }
2298
2299 /* Wait for process, returns status. */
2300
2301 static ptid_t
2302 linux_wait_1 (ptid_t ptid,
2303 struct target_waitstatus *ourstatus, int target_options)
2304 {
2305 int w;
2306 struct lwp_info *event_child;
2307 int options;
2308 int pid;
2309 int step_over_finished;
2310 int bp_explains_trap;
2311 int maybe_internal_trap;
2312 int report_to_gdb;
2313 int trace_event;
2314
2315 /* Translate generic target options into linux options. */
2316 options = __WALL;
2317 if (target_options & TARGET_WNOHANG)
2318 options |= WNOHANG;
2319
2320 retry:
2321 bp_explains_trap = 0;
2322 trace_event = 0;
2323 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2324
2325 /* If we were only supposed to resume one thread, only wait for
2326 that thread - if it's still alive. If it died, however - which
2327 can happen if we're coming from the thread death case below -
2328 then we need to make sure we restart the other threads. We could
2329 pick a thread at random or restart all; restarting all is less
2330 arbitrary. */
2331 if (!non_stop
2332 && !ptid_equal (cont_thread, null_ptid)
2333 && !ptid_equal (cont_thread, minus_one_ptid))
2334 {
2335 struct thread_info *thread;
2336
2337 thread = (struct thread_info *) find_inferior_id (&all_threads,
2338 cont_thread);
2339
2340 /* No stepping, no signal - unless one is pending already, of course. */
2341 if (thread == NULL)
2342 {
2343 struct thread_resume resume_info;
2344 resume_info.thread = minus_one_ptid;
2345 resume_info.kind = resume_continue;
2346 resume_info.sig = 0;
2347 linux_resume (&resume_info, 1);
2348 }
2349 else
2350 ptid = cont_thread;
2351 }
2352
2353 if (ptid_equal (step_over_bkpt, null_ptid))
2354 pid = linux_wait_for_event (ptid, &w, options);
2355 else
2356 {
2357 if (debug_threads)
2358 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2359 target_pid_to_str (step_over_bkpt));
2360 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2361 }
2362
2363 if (pid == 0) /* only if TARGET_WNOHANG */
2364 return null_ptid;
2365
2366 event_child = get_thread_lwp (current_inferior);
2367
2368 /* If we are waiting for a particular child, and it exited,
2369 linux_wait_for_event will return its exit status. Similarly if
2370 the last child exited. If this is not the last child, however,
2371 do not report it as exited until there is a 'thread exited' response
2372 available in the remote protocol. Instead, just wait for another event.
2373 This should be safe, because if the thread crashed we will already
2374 have reported the termination signal to GDB; that should stop any
2375 in-progress stepping operations, etc.
2376
2377 Report the exit status of the last thread to exit. This matches
2378 LinuxThreads' behavior. */
2379
2380 if (last_thread_of_process_p (current_inferior))
2381 {
2382 if (WIFEXITED (w) || WIFSIGNALED (w))
2383 {
2384 if (WIFEXITED (w))
2385 {
2386 ourstatus->kind = TARGET_WAITKIND_EXITED;
2387 ourstatus->value.integer = WEXITSTATUS (w);
2388
2389 if (debug_threads)
2390 fprintf (stderr,
2391 "\nChild exited with retcode = %x \n",
2392 WEXITSTATUS (w));
2393 }
2394 else
2395 {
2396 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2397 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2398
2399 if (debug_threads)
2400 fprintf (stderr,
2401 "\nChild terminated with signal = %x \n",
2402 WTERMSIG (w));
2403
2404 }
2405
2406 return ptid_of (event_child);
2407 }
2408 }
2409 else
2410 {
2411 if (!WIFSTOPPED (w))
2412 goto retry;
2413 }
2414
2415 /* If this event was not handled before, and is not a SIGTRAP, we
2416 report it. SIGILL and SIGSEGV are also treated as traps in case
2417 a breakpoint is inserted at the current PC. If this target does
2418 not support internal breakpoints at all, we also report the
2419 SIGTRAP without further processing; it's of no concern to us. */
2420 maybe_internal_trap
2421 = (supports_breakpoints ()
2422 && (WSTOPSIG (w) == SIGTRAP
2423 || ((WSTOPSIG (w) == SIGILL
2424 || WSTOPSIG (w) == SIGSEGV)
2425 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2426
2427 if (maybe_internal_trap)
2428 {
2429 /* Handle anything that requires bookkeeping before deciding to
2430 report the event or continue waiting. */
2431
2432 /* First check if we can explain the SIGTRAP with an internal
2433 breakpoint, or if we should possibly report the event to GDB.
2434 Do this before anything that may remove or insert a
2435 breakpoint. */
2436 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2437
2438 /* We have a SIGTRAP, possibly a step-over dance has just
2439 finished. If so, tweak the state machine accordingly,
2440 reinsert breakpoints and delete any reinsert (software
2441 single-step) breakpoints. */
2442 step_over_finished = finish_step_over (event_child);
2443
2444 /* Now invoke the callbacks of any internal breakpoints there. */
2445 check_breakpoints (event_child->stop_pc);
2446
2447 /* Handle tracepoint data collecting. This may overflow the
2448 trace buffer, and cause a tracing stop, removing
2449 breakpoints. */
2450 trace_event = handle_tracepoints (event_child);
2451
2452 if (bp_explains_trap)
2453 {
2454 /* If we stepped or ran into an internal breakpoint, we've
2455 already handled it. So next time we resume (from this
2456 PC), we should step over it. */
2457 if (debug_threads)
2458 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2459
2460 if (breakpoint_here (event_child->stop_pc))
2461 event_child->need_step_over = 1;
2462 }
2463 }
2464 else
2465 {
2466 /* We have some other signal, possibly a step-over dance was in
2467 progress, and it should be cancelled too. */
2468 step_over_finished = finish_step_over (event_child);
2469 }
2470
2471 /* We have all the data we need. Either report the event to GDB, or
2472 resume threads and keep waiting for more. */
2473
2474 /* If we're collecting a fast tracepoint, finish the collection and
2475 move out of the jump pad before delivering a signal. See
2476 linux_stabilize_threads. */
2477
2478 if (WIFSTOPPED (w)
2479 && WSTOPSIG (w) != SIGTRAP
2480 && supports_fast_tracepoints ()
2481 && agent_loaded_p ())
2482 {
2483 if (debug_threads)
2484 fprintf (stderr,
2485 "Got signal %d for LWP %ld. Check if we need "
2486 "to defer or adjust it.\n",
2487 WSTOPSIG (w), lwpid_of (event_child));
2488
2489 /* Allow debugging the jump pad itself. */
2490 if (current_inferior->last_resume_kind != resume_step
2491 && maybe_move_out_of_jump_pad (event_child, &w))
2492 {
2493 enqueue_one_deferred_signal (event_child, &w);
2494
2495 if (debug_threads)
2496 fprintf (stderr,
2497 "Signal %d for LWP %ld deferred (in jump pad)\n",
2498 WSTOPSIG (w), lwpid_of (event_child));
2499
2500 linux_resume_one_lwp (event_child, 0, 0, NULL);
2501 goto retry;
2502 }
2503 }
2504
2505 if (event_child->collecting_fast_tracepoint)
2506 {
2507 if (debug_threads)
2508 fprintf (stderr, "\
2509 LWP %ld was trying to move out of the jump pad (%d). \
2510 Check if we're already there.\n",
2511 lwpid_of (event_child),
2512 event_child->collecting_fast_tracepoint);
2513
2514 trace_event = 1;
2515
2516 event_child->collecting_fast_tracepoint
2517 = linux_fast_tracepoint_collecting (event_child, NULL);
2518
2519 if (event_child->collecting_fast_tracepoint != 1)
2520 {
2521 /* No longer need this breakpoint. */
2522 if (event_child->exit_jump_pad_bkpt != NULL)
2523 {
2524 if (debug_threads)
2525 fprintf (stderr,
2526 "No longer need exit-jump-pad bkpt; removing it."
2527 "stopping all threads momentarily.\n");
2528
2529 /* Other running threads could hit this breakpoint.
2530 We don't handle moribund locations like GDB does,
2531 instead we always pause all threads when removing
2532 breakpoints, so that any step-over or
2533 decr_pc_after_break adjustment is always taken
2534 care of while the breakpoint is still
2535 inserted. */
2536 stop_all_lwps (1, event_child);
2537 cancel_breakpoints ();
2538
2539 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2540 event_child->exit_jump_pad_bkpt = NULL;
2541
2542 unstop_all_lwps (1, event_child);
2543
2544 gdb_assert (event_child->suspended >= 0);
2545 }
2546 }
2547
2548 if (event_child->collecting_fast_tracepoint == 0)
2549 {
2550 if (debug_threads)
2551 fprintf (stderr,
2552 "fast tracepoint finished "
2553 "collecting successfully.\n");
2554
2555 /* We may have a deferred signal to report. */
2556 if (dequeue_one_deferred_signal (event_child, &w))
2557 {
2558 if (debug_threads)
2559 fprintf (stderr, "dequeued one signal.\n");
2560 }
2561 else
2562 {
2563 if (debug_threads)
2564 fprintf (stderr, "no deferred signals.\n");
2565
2566 if (stabilizing_threads)
2567 {
2568 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2569 ourstatus->value.sig = GDB_SIGNAL_0;
2570 return ptid_of (event_child);
2571 }
2572 }
2573 }
2574 }
2575
2576 /* Check whether GDB would be interested in this event. */
2577
2578 /* If GDB is not interested in this signal, don't stop other
2579 threads, and don't report it to GDB. Just resume the inferior
2580 right away. We do this for threading-related signals as well as
2581 any that GDB specifically requested we ignore. But never ignore
2582 SIGSTOP if we sent it ourselves, and do not ignore signals when
2583 stepping - they may require special handling to skip the signal
2584 handler. */
2585 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2586 thread library? */
2587 if (WIFSTOPPED (w)
2588 && current_inferior->last_resume_kind != resume_step
2589 && (
2590 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2591 (current_process ()->private->thread_db != NULL
2592 && (WSTOPSIG (w) == __SIGRTMIN
2593 || WSTOPSIG (w) == __SIGRTMIN + 1))
2594 ||
2595 #endif
2596 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2597 && !(WSTOPSIG (w) == SIGSTOP
2598 && current_inferior->last_resume_kind == resume_stop))))
2599 {
2600 siginfo_t info, *info_p;
2601
2602 if (debug_threads)
2603 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2604 WSTOPSIG (w), lwpid_of (event_child));
2605
2606 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child),
2607 (PTRACE_ARG3_TYPE) 0, &info) == 0)
2608 info_p = &info;
2609 else
2610 info_p = NULL;
2611 linux_resume_one_lwp (event_child, event_child->stepping,
2612 WSTOPSIG (w), info_p);
2613 goto retry;
2614 }
2615
2616 /* If GDB wanted this thread to single step, we always want to
2617 report the SIGTRAP, and let GDB handle it. Watchpoints should
2618 always be reported. So should signals we can't explain. A
2619 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2620 not support Z0 breakpoints. If we do, we're be able to handle
2621 GDB breakpoints on top of internal breakpoints, by handling the
2622 internal breakpoint and still reporting the event to GDB. If we
2623 don't, we're out of luck, GDB won't see the breakpoint hit. */
2624 report_to_gdb = (!maybe_internal_trap
2625 || current_inferior->last_resume_kind == resume_step
2626 || event_child->stopped_by_watchpoint
2627 || (!step_over_finished
2628 && !bp_explains_trap && !trace_event)
2629 || (gdb_breakpoint_here (event_child->stop_pc)
2630 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2631 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2632
2633 run_breakpoint_commands (event_child->stop_pc);
2634
2635 /* We found no reason GDB would want us to stop. We either hit one
2636 of our own breakpoints, or finished an internal step GDB
2637 shouldn't know about. */
2638 if (!report_to_gdb)
2639 {
2640 if (debug_threads)
2641 {
2642 if (bp_explains_trap)
2643 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2644 if (step_over_finished)
2645 fprintf (stderr, "Step-over finished.\n");
2646 if (trace_event)
2647 fprintf (stderr, "Tracepoint event.\n");
2648 }
2649
2650 /* We're not reporting this breakpoint to GDB, so apply the
2651 decr_pc_after_break adjustment to the inferior's regcache
2652 ourselves. */
2653
2654 if (the_low_target.set_pc != NULL)
2655 {
2656 struct regcache *regcache
2657 = get_thread_regcache (get_lwp_thread (event_child), 1);
2658 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2659 }
2660
2661 /* We may have finished stepping over a breakpoint. If so,
2662 we've stopped and suspended all LWPs momentarily except the
2663 stepping one. This is where we resume them all again. We're
2664 going to keep waiting, so use proceed, which handles stepping
2665 over the next breakpoint. */
2666 if (debug_threads)
2667 fprintf (stderr, "proceeding all threads.\n");
2668
2669 if (step_over_finished)
2670 unsuspend_all_lwps (event_child);
2671
2672 proceed_all_lwps ();
2673 goto retry;
2674 }
2675
2676 if (debug_threads)
2677 {
2678 if (current_inferior->last_resume_kind == resume_step)
2679 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2680 if (event_child->stopped_by_watchpoint)
2681 fprintf (stderr, "Stopped by watchpoint.\n");
2682 if (gdb_breakpoint_here (event_child->stop_pc))
2683 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2684 if (debug_threads)
2685 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2686 }
2687
2688 /* Alright, we're going to report a stop. */
2689
2690 if (!non_stop && !stabilizing_threads)
2691 {
2692 /* In all-stop, stop all threads. */
2693 stop_all_lwps (0, NULL);
2694
2695 /* If we're not waiting for a specific LWP, choose an event LWP
2696 from among those that have had events. Giving equal priority
2697 to all LWPs that have had events helps prevent
2698 starvation. */
2699 if (ptid_equal (ptid, minus_one_ptid))
2700 {
2701 event_child->status_pending_p = 1;
2702 event_child->status_pending = w;
2703
2704 select_event_lwp (&event_child);
2705
2706 event_child->status_pending_p = 0;
2707 w = event_child->status_pending;
2708 }
2709
2710 /* Now that we've selected our final event LWP, cancel any
2711 breakpoints in other LWPs that have hit a GDB breakpoint.
2712 See the comment in cancel_breakpoints_callback to find out
2713 why. */
2714 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2715
2716 /* If we were going a step-over, all other threads but the stepping one
2717 had been paused in start_step_over, with their suspend counts
2718 incremented. We don't want to do a full unstop/unpause, because we're
2719 in all-stop mode (so we want threads stopped), but we still need to
2720 unsuspend the other threads, to decrement their `suspended' count
2721 back. */
2722 if (step_over_finished)
2723 unsuspend_all_lwps (event_child);
2724
2725 /* Stabilize threads (move out of jump pads). */
2726 stabilize_threads ();
2727 }
2728 else
2729 {
2730 /* If we just finished a step-over, then all threads had been
2731 momentarily paused. In all-stop, that's fine, we want
2732 threads stopped by now anyway. In non-stop, we need to
2733 re-resume threads that GDB wanted to be running. */
2734 if (step_over_finished)
2735 unstop_all_lwps (1, event_child);
2736 }
2737
2738 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2739
2740 if (current_inferior->last_resume_kind == resume_stop
2741 && WSTOPSIG (w) == SIGSTOP)
2742 {
2743 /* A thread that has been requested to stop by GDB with vCont;t,
2744 and it stopped cleanly, so report as SIG0. The use of
2745 SIGSTOP is an implementation detail. */
2746 ourstatus->value.sig = GDB_SIGNAL_0;
2747 }
2748 else if (current_inferior->last_resume_kind == resume_stop
2749 && WSTOPSIG (w) != SIGSTOP)
2750 {
2751 /* A thread that has been requested to stop by GDB with vCont;t,
2752 but, it stopped for other reasons. */
2753 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2754 }
2755 else
2756 {
2757 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2758 }
2759
2760 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2761
2762 if (debug_threads)
2763 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2764 target_pid_to_str (ptid_of (event_child)),
2765 ourstatus->kind,
2766 ourstatus->value.sig);
2767
2768 return ptid_of (event_child);
2769 }
2770
2771 /* Get rid of any pending event in the pipe. */
2772 static void
2773 async_file_flush (void)
2774 {
2775 int ret;
2776 char buf;
2777
2778 do
2779 ret = read (linux_event_pipe[0], &buf, 1);
2780 while (ret >= 0 || (ret == -1 && errno == EINTR));
2781 }
2782
2783 /* Put something in the pipe, so the event loop wakes up. */
2784 static void
2785 async_file_mark (void)
2786 {
2787 int ret;
2788
2789 async_file_flush ();
2790
2791 do
2792 ret = write (linux_event_pipe[1], "+", 1);
2793 while (ret == 0 || (ret == -1 && errno == EINTR));
2794
2795 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2796 be awakened anyway. */
2797 }
2798
2799 static ptid_t
2800 linux_wait (ptid_t ptid,
2801 struct target_waitstatus *ourstatus, int target_options)
2802 {
2803 ptid_t event_ptid;
2804
2805 if (debug_threads)
2806 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2807
2808 /* Flush the async file first. */
2809 if (target_is_async_p ())
2810 async_file_flush ();
2811
2812 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2813
2814 /* If at least one stop was reported, there may be more. A single
2815 SIGCHLD can signal more than one child stop. */
2816 if (target_is_async_p ()
2817 && (target_options & TARGET_WNOHANG) != 0
2818 && !ptid_equal (event_ptid, null_ptid))
2819 async_file_mark ();
2820
2821 return event_ptid;
2822 }
2823
2824 /* Send a signal to an LWP. */
2825
2826 static int
2827 kill_lwp (unsigned long lwpid, int signo)
2828 {
2829 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2830 fails, then we are not using nptl threads and we should be using kill. */
2831
2832 #ifdef __NR_tkill
2833 {
2834 static int tkill_failed;
2835
2836 if (!tkill_failed)
2837 {
2838 int ret;
2839
2840 errno = 0;
2841 ret = syscall (__NR_tkill, lwpid, signo);
2842 if (errno != ENOSYS)
2843 return ret;
2844 tkill_failed = 1;
2845 }
2846 }
2847 #endif
2848
2849 return kill (lwpid, signo);
2850 }
2851
2852 void
2853 linux_stop_lwp (struct lwp_info *lwp)
2854 {
2855 send_sigstop (lwp);
2856 }
2857
2858 static void
2859 send_sigstop (struct lwp_info *lwp)
2860 {
2861 int pid;
2862
2863 pid = lwpid_of (lwp);
2864
2865 /* If we already have a pending stop signal for this process, don't
2866 send another. */
2867 if (lwp->stop_expected)
2868 {
2869 if (debug_threads)
2870 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2871
2872 return;
2873 }
2874
2875 if (debug_threads)
2876 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2877
2878 lwp->stop_expected = 1;
2879 kill_lwp (pid, SIGSTOP);
2880 }
2881
2882 static int
2883 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2884 {
2885 struct lwp_info *lwp = (struct lwp_info *) entry;
2886
2887 /* Ignore EXCEPT. */
2888 if (lwp == except)
2889 return 0;
2890
2891 if (lwp->stopped)
2892 return 0;
2893
2894 send_sigstop (lwp);
2895 return 0;
2896 }
2897
2898 /* Increment the suspend count of an LWP, and stop it, if not stopped
2899 yet. */
2900 static int
2901 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2902 void *except)
2903 {
2904 struct lwp_info *lwp = (struct lwp_info *) entry;
2905
2906 /* Ignore EXCEPT. */
2907 if (lwp == except)
2908 return 0;
2909
2910 lwp->suspended++;
2911
2912 return send_sigstop_callback (entry, except);
2913 }
2914
2915 static void
2916 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2917 {
2918 /* It's dead, really. */
2919 lwp->dead = 1;
2920
2921 /* Store the exit status for later. */
2922 lwp->status_pending_p = 1;
2923 lwp->status_pending = wstat;
2924
2925 /* Prevent trying to stop it. */
2926 lwp->stopped = 1;
2927
2928 /* No further stops are expected from a dead lwp. */
2929 lwp->stop_expected = 0;
2930 }
2931
2932 static void
2933 wait_for_sigstop (struct inferior_list_entry *entry)
2934 {
2935 struct lwp_info *lwp = (struct lwp_info *) entry;
2936 struct thread_info *saved_inferior;
2937 int wstat;
2938 ptid_t saved_tid;
2939 ptid_t ptid;
2940 int pid;
2941
2942 if (lwp->stopped)
2943 {
2944 if (debug_threads)
2945 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2946 lwpid_of (lwp));
2947 return;
2948 }
2949
2950 saved_inferior = current_inferior;
2951 if (saved_inferior != NULL)
2952 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2953 else
2954 saved_tid = null_ptid; /* avoid bogus unused warning */
2955
2956 ptid = lwp->head.id;
2957
2958 if (debug_threads)
2959 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2960
2961 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2962
2963 /* If we stopped with a non-SIGSTOP signal, save it for later
2964 and record the pending SIGSTOP. If the process exited, just
2965 return. */
2966 if (WIFSTOPPED (wstat))
2967 {
2968 if (debug_threads)
2969 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2970 lwpid_of (lwp), WSTOPSIG (wstat));
2971
2972 if (WSTOPSIG (wstat) != SIGSTOP)
2973 {
2974 if (debug_threads)
2975 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2976 lwpid_of (lwp), wstat);
2977
2978 lwp->status_pending_p = 1;
2979 lwp->status_pending = wstat;
2980 }
2981 }
2982 else
2983 {
2984 if (debug_threads)
2985 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2986
2987 lwp = find_lwp_pid (pid_to_ptid (pid));
2988 if (lwp)
2989 {
2990 /* Leave this status pending for the next time we're able to
2991 report it. In the mean time, we'll report this lwp as
2992 dead to GDB, so GDB doesn't try to read registers and
2993 memory from it. This can only happen if this was the
2994 last thread of the process; otherwise, PID is removed
2995 from the thread tables before linux_wait_for_event
2996 returns. */
2997 mark_lwp_dead (lwp, wstat);
2998 }
2999 }
3000
3001 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
3002 current_inferior = saved_inferior;
3003 else
3004 {
3005 if (debug_threads)
3006 fprintf (stderr, "Previously current thread died.\n");
3007
3008 if (non_stop)
3009 {
3010 /* We can't change the current inferior behind GDB's back,
3011 otherwise, a subsequent command may apply to the wrong
3012 process. */
3013 current_inferior = NULL;
3014 }
3015 else
3016 {
3017 /* Set a valid thread as current. */
3018 set_desired_inferior (0);
3019 }
3020 }
3021 }
3022
3023 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3024 move it out, because we need to report the stop event to GDB. For
3025 example, if the user puts a breakpoint in the jump pad, it's
3026 because she wants to debug it. */
3027
3028 static int
3029 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3030 {
3031 struct lwp_info *lwp = (struct lwp_info *) entry;
3032 struct thread_info *thread = get_lwp_thread (lwp);
3033
3034 gdb_assert (lwp->suspended == 0);
3035 gdb_assert (lwp->stopped);
3036
3037 /* Allow debugging the jump pad, gdb_collect, etc.. */
3038 return (supports_fast_tracepoints ()
3039 && agent_loaded_p ()
3040 && (gdb_breakpoint_here (lwp->stop_pc)
3041 || lwp->stopped_by_watchpoint
3042 || thread->last_resume_kind == resume_step)
3043 && linux_fast_tracepoint_collecting (lwp, NULL));
3044 }
3045
3046 static void
3047 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3048 {
3049 struct lwp_info *lwp = (struct lwp_info *) entry;
3050 struct thread_info *thread = get_lwp_thread (lwp);
3051 int *wstat;
3052
3053 gdb_assert (lwp->suspended == 0);
3054 gdb_assert (lwp->stopped);
3055
3056 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3057
3058 /* Allow debugging the jump pad, gdb_collect, etc. */
3059 if (!gdb_breakpoint_here (lwp->stop_pc)
3060 && !lwp->stopped_by_watchpoint
3061 && thread->last_resume_kind != resume_step
3062 && maybe_move_out_of_jump_pad (lwp, wstat))
3063 {
3064 if (debug_threads)
3065 fprintf (stderr,
3066 "LWP %ld needs stabilizing (in jump pad)\n",
3067 lwpid_of (lwp));
3068
3069 if (wstat)
3070 {
3071 lwp->status_pending_p = 0;
3072 enqueue_one_deferred_signal (lwp, wstat);
3073
3074 if (debug_threads)
3075 fprintf (stderr,
3076 "Signal %d for LWP %ld deferred "
3077 "(in jump pad)\n",
3078 WSTOPSIG (*wstat), lwpid_of (lwp));
3079 }
3080
3081 linux_resume_one_lwp (lwp, 0, 0, NULL);
3082 }
3083 else
3084 lwp->suspended++;
3085 }
3086
3087 static int
3088 lwp_running (struct inferior_list_entry *entry, void *data)
3089 {
3090 struct lwp_info *lwp = (struct lwp_info *) entry;
3091
3092 if (lwp->dead)
3093 return 0;
3094 if (lwp->stopped)
3095 return 0;
3096 return 1;
3097 }
3098
3099 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3100 If SUSPEND, then also increase the suspend count of every LWP,
3101 except EXCEPT. */
3102
3103 static void
3104 stop_all_lwps (int suspend, struct lwp_info *except)
3105 {
3106 /* Should not be called recursively. */
3107 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3108
3109 stopping_threads = (suspend
3110 ? STOPPING_AND_SUSPENDING_THREADS
3111 : STOPPING_THREADS);
3112
3113 if (suspend)
3114 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3115 else
3116 find_inferior (&all_lwps, send_sigstop_callback, except);
3117 for_each_inferior (&all_lwps, wait_for_sigstop);
3118 stopping_threads = NOT_STOPPING_THREADS;
3119 }
3120
3121 /* Resume execution of the inferior process.
3122 If STEP is nonzero, single-step it.
3123 If SIGNAL is nonzero, give it that signal. */
3124
3125 static void
3126 linux_resume_one_lwp (struct lwp_info *lwp,
3127 int step, int signal, siginfo_t *info)
3128 {
3129 struct thread_info *saved_inferior;
3130 int fast_tp_collecting;
3131
3132 if (lwp->stopped == 0)
3133 return;
3134
3135 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3136
3137 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3138
3139 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3140 user used the "jump" command, or "set $pc = foo"). */
3141 if (lwp->stop_pc != get_pc (lwp))
3142 {
3143 /* Collecting 'while-stepping' actions doesn't make sense
3144 anymore. */
3145 release_while_stepping_state_list (get_lwp_thread (lwp));
3146 }
3147
3148 /* If we have pending signals or status, and a new signal, enqueue the
3149 signal. Also enqueue the signal if we are waiting to reinsert a
3150 breakpoint; it will be picked up again below. */
3151 if (signal != 0
3152 && (lwp->status_pending_p
3153 || lwp->pending_signals != NULL
3154 || lwp->bp_reinsert != 0
3155 || fast_tp_collecting))
3156 {
3157 struct pending_signals *p_sig;
3158 p_sig = xmalloc (sizeof (*p_sig));
3159 p_sig->prev = lwp->pending_signals;
3160 p_sig->signal = signal;
3161 if (info == NULL)
3162 memset (&p_sig->info, 0, sizeof (siginfo_t));
3163 else
3164 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3165 lwp->pending_signals = p_sig;
3166 }
3167
3168 if (lwp->status_pending_p)
3169 {
3170 if (debug_threads)
3171 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3172 " has pending status\n",
3173 lwpid_of (lwp), step ? "step" : "continue", signal,
3174 lwp->stop_expected ? "expected" : "not expected");
3175 return;
3176 }
3177
3178 saved_inferior = current_inferior;
3179 current_inferior = get_lwp_thread (lwp);
3180
3181 if (debug_threads)
3182 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
3183 lwpid_of (lwp), step ? "step" : "continue", signal,
3184 lwp->stop_expected ? "expected" : "not expected");
3185
3186 /* This bit needs some thinking about. If we get a signal that
3187 we must report while a single-step reinsert is still pending,
3188 we often end up resuming the thread. It might be better to
3189 (ew) allow a stack of pending events; then we could be sure that
3190 the reinsert happened right away and not lose any signals.
3191
3192 Making this stack would also shrink the window in which breakpoints are
3193 uninserted (see comment in linux_wait_for_lwp) but not enough for
3194 complete correctness, so it won't solve that problem. It may be
3195 worthwhile just to solve this one, however. */
3196 if (lwp->bp_reinsert != 0)
3197 {
3198 if (debug_threads)
3199 fprintf (stderr, " pending reinsert at 0x%s\n",
3200 paddress (lwp->bp_reinsert));
3201
3202 if (can_hardware_single_step ())
3203 {
3204 if (fast_tp_collecting == 0)
3205 {
3206 if (step == 0)
3207 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3208 if (lwp->suspended)
3209 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3210 lwp->suspended);
3211 }
3212
3213 step = 1;
3214 }
3215
3216 /* Postpone any pending signal. It was enqueued above. */
3217 signal = 0;
3218 }
3219
3220 if (fast_tp_collecting == 1)
3221 {
3222 if (debug_threads)
3223 fprintf (stderr, "\
3224 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3225 lwpid_of (lwp));
3226
3227 /* Postpone any pending signal. It was enqueued above. */
3228 signal = 0;
3229 }
3230 else if (fast_tp_collecting == 2)
3231 {
3232 if (debug_threads)
3233 fprintf (stderr, "\
3234 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3235 lwpid_of (lwp));
3236
3237 if (can_hardware_single_step ())
3238 step = 1;
3239 else
3240 fatal ("moving out of jump pad single-stepping"
3241 " not implemented on this target");
3242
3243 /* Postpone any pending signal. It was enqueued above. */
3244 signal = 0;
3245 }
3246
3247 /* If we have while-stepping actions in this thread set it stepping.
3248 If we have a signal to deliver, it may or may not be set to
3249 SIG_IGN, we don't know. Assume so, and allow collecting
3250 while-stepping into a signal handler. A possible smart thing to
3251 do would be to set an internal breakpoint at the signal return
3252 address, continue, and carry on catching this while-stepping
3253 action only when that breakpoint is hit. A future
3254 enhancement. */
3255 if (get_lwp_thread (lwp)->while_stepping != NULL
3256 && can_hardware_single_step ())
3257 {
3258 if (debug_threads)
3259 fprintf (stderr,
3260 "lwp %ld has a while-stepping action -> forcing step.\n",
3261 lwpid_of (lwp));
3262 step = 1;
3263 }
3264
3265 if (debug_threads && the_low_target.get_pc != NULL)
3266 {
3267 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3268 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3269 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3270 }
3271
3272 /* If we have pending signals, consume one unless we are trying to
3273 reinsert a breakpoint or we're trying to finish a fast tracepoint
3274 collect. */
3275 if (lwp->pending_signals != NULL
3276 && lwp->bp_reinsert == 0
3277 && fast_tp_collecting == 0)
3278 {
3279 struct pending_signals **p_sig;
3280
3281 p_sig = &lwp->pending_signals;
3282 while ((*p_sig)->prev != NULL)
3283 p_sig = &(*p_sig)->prev;
3284
3285 signal = (*p_sig)->signal;
3286 if ((*p_sig)->info.si_signo != 0)
3287 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
3288 &(*p_sig)->info);
3289
3290 free (*p_sig);
3291 *p_sig = NULL;
3292 }
3293
3294 if (the_low_target.prepare_to_resume != NULL)
3295 the_low_target.prepare_to_resume (lwp);
3296
3297 regcache_invalidate_one ((struct inferior_list_entry *)
3298 get_lwp_thread (lwp));
3299 errno = 0;
3300 lwp->stopped = 0;
3301 lwp->stopped_by_watchpoint = 0;
3302 lwp->stepping = step;
3303 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp),
3304 (PTRACE_ARG3_TYPE) 0,
3305 /* Coerce to a uintptr_t first to avoid potential gcc warning
3306 of coercing an 8 byte integer to a 4 byte pointer. */
3307 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
3308
3309 current_inferior = saved_inferior;
3310 if (errno)
3311 {
3312 /* ESRCH from ptrace either means that the thread was already
3313 running (an error) or that it is gone (a race condition). If
3314 it's gone, we will get a notification the next time we wait,
3315 so we can ignore the error. We could differentiate these
3316 two, but it's tricky without waiting; the thread still exists
3317 as a zombie, so sending it signal 0 would succeed. So just
3318 ignore ESRCH. */
3319 if (errno == ESRCH)
3320 return;
3321
3322 perror_with_name ("ptrace");
3323 }
3324 }
3325
3326 struct thread_resume_array
3327 {
3328 struct thread_resume *resume;
3329 size_t n;
3330 };
3331
3332 /* This function is called once per thread. We look up the thread
3333 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3334 resume request.
3335
3336 This algorithm is O(threads * resume elements), but resume elements
3337 is small (and will remain small at least until GDB supports thread
3338 suspension). */
3339 static int
3340 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3341 {
3342 struct lwp_info *lwp;
3343 struct thread_info *thread;
3344 int ndx;
3345 struct thread_resume_array *r;
3346
3347 thread = (struct thread_info *) entry;
3348 lwp = get_thread_lwp (thread);
3349 r = arg;
3350
3351 for (ndx = 0; ndx < r->n; ndx++)
3352 {
3353 ptid_t ptid = r->resume[ndx].thread;
3354 if (ptid_equal (ptid, minus_one_ptid)
3355 || ptid_equal (ptid, entry->id)
3356 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3357 of PID'. */
3358 || (ptid_get_pid (ptid) == pid_of (lwp)
3359 && (ptid_is_pid (ptid)
3360 || ptid_get_lwp (ptid) == -1)))
3361 {
3362 if (r->resume[ndx].kind == resume_stop
3363 && thread->last_resume_kind == resume_stop)
3364 {
3365 if (debug_threads)
3366 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3367 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3368 ? "stopped"
3369 : "stopping",
3370 lwpid_of (lwp));
3371
3372 continue;
3373 }
3374
3375 lwp->resume = &r->resume[ndx];
3376 thread->last_resume_kind = lwp->resume->kind;
3377
3378 /* If we had a deferred signal to report, dequeue one now.
3379 This can happen if LWP gets more than one signal while
3380 trying to get out of a jump pad. */
3381 if (lwp->stopped
3382 && !lwp->status_pending_p
3383 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3384 {
3385 lwp->status_pending_p = 1;
3386
3387 if (debug_threads)
3388 fprintf (stderr,
3389 "Dequeueing deferred signal %d for LWP %ld, "
3390 "leaving status pending.\n",
3391 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3392 }
3393
3394 return 0;
3395 }
3396 }
3397
3398 /* No resume action for this thread. */
3399 lwp->resume = NULL;
3400
3401 return 0;
3402 }
3403
3404
3405 /* Set *FLAG_P if this lwp has an interesting status pending. */
3406 static int
3407 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3408 {
3409 struct lwp_info *lwp = (struct lwp_info *) entry;
3410
3411 /* LWPs which will not be resumed are not interesting, because
3412 we might not wait for them next time through linux_wait. */
3413 if (lwp->resume == NULL)
3414 return 0;
3415
3416 if (lwp->status_pending_p)
3417 * (int *) flag_p = 1;
3418
3419 return 0;
3420 }
3421
3422 /* Return 1 if this lwp that GDB wants running is stopped at an
3423 internal breakpoint that we need to step over. It assumes that any
3424 required STOP_PC adjustment has already been propagated to the
3425 inferior's regcache. */
3426
3427 static int
3428 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3429 {
3430 struct lwp_info *lwp = (struct lwp_info *) entry;
3431 struct thread_info *thread;
3432 struct thread_info *saved_inferior;
3433 CORE_ADDR pc;
3434
3435 /* LWPs which will not be resumed are not interesting, because we
3436 might not wait for them next time through linux_wait. */
3437
3438 if (!lwp->stopped)
3439 {
3440 if (debug_threads)
3441 fprintf (stderr,
3442 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3443 lwpid_of (lwp));
3444 return 0;
3445 }
3446
3447 thread = get_lwp_thread (lwp);
3448
3449 if (thread->last_resume_kind == resume_stop)
3450 {
3451 if (debug_threads)
3452 fprintf (stderr,
3453 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3454 lwpid_of (lwp));
3455 return 0;
3456 }
3457
3458 gdb_assert (lwp->suspended >= 0);
3459
3460 if (lwp->suspended)
3461 {
3462 if (debug_threads)
3463 fprintf (stderr,
3464 "Need step over [LWP %ld]? Ignoring, suspended\n",
3465 lwpid_of (lwp));
3466 return 0;
3467 }
3468
3469 if (!lwp->need_step_over)
3470 {
3471 if (debug_threads)
3472 fprintf (stderr,
3473 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3474 }
3475
3476 if (lwp->status_pending_p)
3477 {
3478 if (debug_threads)
3479 fprintf (stderr,
3480 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3481 lwpid_of (lwp));
3482 return 0;
3483 }
3484
3485 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3486 or we have. */
3487 pc = get_pc (lwp);
3488
3489 /* If the PC has changed since we stopped, then don't do anything,
3490 and let the breakpoint/tracepoint be hit. This happens if, for
3491 instance, GDB handled the decr_pc_after_break subtraction itself,
3492 GDB is OOL stepping this thread, or the user has issued a "jump"
3493 command, or poked thread's registers herself. */
3494 if (pc != lwp->stop_pc)
3495 {
3496 if (debug_threads)
3497 fprintf (stderr,
3498 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3499 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3500 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3501
3502 lwp->need_step_over = 0;
3503 return 0;
3504 }
3505
3506 saved_inferior = current_inferior;
3507 current_inferior = thread;
3508
3509 /* We can only step over breakpoints we know about. */
3510 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3511 {
3512 /* Don't step over a breakpoint that GDB expects to hit
3513 though. If the condition is being evaluated on the target's side
3514 and it evaluate to false, step over this breakpoint as well. */
3515 if (gdb_breakpoint_here (pc)
3516 && gdb_condition_true_at_breakpoint (pc)
3517 && gdb_no_commands_at_breakpoint (pc))
3518 {
3519 if (debug_threads)
3520 fprintf (stderr,
3521 "Need step over [LWP %ld]? yes, but found"
3522 " GDB breakpoint at 0x%s; skipping step over\n",
3523 lwpid_of (lwp), paddress (pc));
3524
3525 current_inferior = saved_inferior;
3526 return 0;
3527 }
3528 else
3529 {
3530 if (debug_threads)
3531 fprintf (stderr,
3532 "Need step over [LWP %ld]? yes, "
3533 "found breakpoint at 0x%s\n",
3534 lwpid_of (lwp), paddress (pc));
3535
3536 /* We've found an lwp that needs stepping over --- return 1 so
3537 that find_inferior stops looking. */
3538 current_inferior = saved_inferior;
3539
3540 /* If the step over is cancelled, this is set again. */
3541 lwp->need_step_over = 0;
3542 return 1;
3543 }
3544 }
3545
3546 current_inferior = saved_inferior;
3547
3548 if (debug_threads)
3549 fprintf (stderr,
3550 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3551 lwpid_of (lwp), paddress (pc));
3552
3553 return 0;
3554 }
3555
3556 /* Start a step-over operation on LWP. When LWP stopped at a
3557 breakpoint, to make progress, we need to remove the breakpoint out
3558 of the way. If we let other threads run while we do that, they may
3559 pass by the breakpoint location and miss hitting it. To avoid
3560 that, a step-over momentarily stops all threads while LWP is
3561 single-stepped while the breakpoint is temporarily uninserted from
3562 the inferior. When the single-step finishes, we reinsert the
3563 breakpoint, and let all threads that are supposed to be running,
3564 run again.
3565
3566 On targets that don't support hardware single-step, we don't
3567 currently support full software single-stepping. Instead, we only
3568 support stepping over the thread event breakpoint, by asking the
3569 low target where to place a reinsert breakpoint. Since this
3570 routine assumes the breakpoint being stepped over is a thread event
3571 breakpoint, it usually assumes the return address of the current
3572 function is a good enough place to set the reinsert breakpoint. */
3573
3574 static int
3575 start_step_over (struct lwp_info *lwp)
3576 {
3577 struct thread_info *saved_inferior;
3578 CORE_ADDR pc;
3579 int step;
3580
3581 if (debug_threads)
3582 fprintf (stderr,
3583 "Starting step-over on LWP %ld. Stopping all threads\n",
3584 lwpid_of (lwp));
3585
3586 stop_all_lwps (1, lwp);
3587 gdb_assert (lwp->suspended == 0);
3588
3589 if (debug_threads)
3590 fprintf (stderr, "Done stopping all threads for step-over.\n");
3591
3592 /* Note, we should always reach here with an already adjusted PC,
3593 either by GDB (if we're resuming due to GDB's request), or by our
3594 caller, if we just finished handling an internal breakpoint GDB
3595 shouldn't care about. */
3596 pc = get_pc (lwp);
3597
3598 saved_inferior = current_inferior;
3599 current_inferior = get_lwp_thread (lwp);
3600
3601 lwp->bp_reinsert = pc;
3602 uninsert_breakpoints_at (pc);
3603 uninsert_fast_tracepoint_jumps_at (pc);
3604
3605 if (can_hardware_single_step ())
3606 {
3607 step = 1;
3608 }
3609 else
3610 {
3611 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3612 set_reinsert_breakpoint (raddr);
3613 step = 0;
3614 }
3615
3616 current_inferior = saved_inferior;
3617
3618 linux_resume_one_lwp (lwp, step, 0, NULL);
3619
3620 /* Require next event from this LWP. */
3621 step_over_bkpt = lwp->head.id;
3622 return 1;
3623 }
3624
3625 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3626 start_step_over, if still there, and delete any reinsert
3627 breakpoints we've set, on non hardware single-step targets. */
3628
3629 static int
3630 finish_step_over (struct lwp_info *lwp)
3631 {
3632 if (lwp->bp_reinsert != 0)
3633 {
3634 if (debug_threads)
3635 fprintf (stderr, "Finished step over.\n");
3636
3637 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3638 may be no breakpoint to reinsert there by now. */
3639 reinsert_breakpoints_at (lwp->bp_reinsert);
3640 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3641
3642 lwp->bp_reinsert = 0;
3643
3644 /* Delete any software-single-step reinsert breakpoints. No
3645 longer needed. We don't have to worry about other threads
3646 hitting this trap, and later not being able to explain it,
3647 because we were stepping over a breakpoint, and we hold all
3648 threads but LWP stopped while doing that. */
3649 if (!can_hardware_single_step ())
3650 delete_reinsert_breakpoints ();
3651
3652 step_over_bkpt = null_ptid;
3653 return 1;
3654 }
3655 else
3656 return 0;
3657 }
3658
3659 /* This function is called once per thread. We check the thread's resume
3660 request, which will tell us whether to resume, step, or leave the thread
3661 stopped; and what signal, if any, it should be sent.
3662
3663 For threads which we aren't explicitly told otherwise, we preserve
3664 the stepping flag; this is used for stepping over gdbserver-placed
3665 breakpoints.
3666
3667 If pending_flags was set in any thread, we queue any needed
3668 signals, since we won't actually resume. We already have a pending
3669 event to report, so we don't need to preserve any step requests;
3670 they should be re-issued if necessary. */
3671
3672 static int
3673 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3674 {
3675 struct lwp_info *lwp;
3676 struct thread_info *thread;
3677 int step;
3678 int leave_all_stopped = * (int *) arg;
3679 int leave_pending;
3680
3681 thread = (struct thread_info *) entry;
3682 lwp = get_thread_lwp (thread);
3683
3684 if (lwp->resume == NULL)
3685 return 0;
3686
3687 if (lwp->resume->kind == resume_stop)
3688 {
3689 if (debug_threads)
3690 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3691
3692 if (!lwp->stopped)
3693 {
3694 if (debug_threads)
3695 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3696
3697 /* Stop the thread, and wait for the event asynchronously,
3698 through the event loop. */
3699 send_sigstop (lwp);
3700 }
3701 else
3702 {
3703 if (debug_threads)
3704 fprintf (stderr, "already stopped LWP %ld\n",
3705 lwpid_of (lwp));
3706
3707 /* The LWP may have been stopped in an internal event that
3708 was not meant to be notified back to GDB (e.g., gdbserver
3709 breakpoint), so we should be reporting a stop event in
3710 this case too. */
3711
3712 /* If the thread already has a pending SIGSTOP, this is a
3713 no-op. Otherwise, something later will presumably resume
3714 the thread and this will cause it to cancel any pending
3715 operation, due to last_resume_kind == resume_stop. If
3716 the thread already has a pending status to report, we
3717 will still report it the next time we wait - see
3718 status_pending_p_callback. */
3719
3720 /* If we already have a pending signal to report, then
3721 there's no need to queue a SIGSTOP, as this means we're
3722 midway through moving the LWP out of the jumppad, and we
3723 will report the pending signal as soon as that is
3724 finished. */
3725 if (lwp->pending_signals_to_report == NULL)
3726 send_sigstop (lwp);
3727 }
3728
3729 /* For stop requests, we're done. */
3730 lwp->resume = NULL;
3731 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3732 return 0;
3733 }
3734
3735 /* If this thread which is about to be resumed has a pending status,
3736 then don't resume any threads - we can just report the pending
3737 status. Make sure to queue any signals that would otherwise be
3738 sent. In all-stop mode, we do this decision based on if *any*
3739 thread has a pending status. If there's a thread that needs the
3740 step-over-breakpoint dance, then don't resume any other thread
3741 but that particular one. */
3742 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3743
3744 if (!leave_pending)
3745 {
3746 if (debug_threads)
3747 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3748
3749 step = (lwp->resume->kind == resume_step);
3750 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3751 }
3752 else
3753 {
3754 if (debug_threads)
3755 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3756
3757 /* If we have a new signal, enqueue the signal. */
3758 if (lwp->resume->sig != 0)
3759 {
3760 struct pending_signals *p_sig;
3761 p_sig = xmalloc (sizeof (*p_sig));
3762 p_sig->prev = lwp->pending_signals;
3763 p_sig->signal = lwp->resume->sig;
3764 memset (&p_sig->info, 0, sizeof (siginfo_t));
3765
3766 /* If this is the same signal we were previously stopped by,
3767 make sure to queue its siginfo. We can ignore the return
3768 value of ptrace; if it fails, we'll skip
3769 PTRACE_SETSIGINFO. */
3770 if (WIFSTOPPED (lwp->last_status)
3771 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3772 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
3773 &p_sig->info);
3774
3775 lwp->pending_signals = p_sig;
3776 }
3777 }
3778
3779 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3780 lwp->resume = NULL;
3781 return 0;
3782 }
3783
3784 static void
3785 linux_resume (struct thread_resume *resume_info, size_t n)
3786 {
3787 struct thread_resume_array array = { resume_info, n };
3788 struct lwp_info *need_step_over = NULL;
3789 int any_pending;
3790 int leave_all_stopped;
3791
3792 find_inferior (&all_threads, linux_set_resume_request, &array);
3793
3794 /* If there is a thread which would otherwise be resumed, which has
3795 a pending status, then don't resume any threads - we can just
3796 report the pending status. Make sure to queue any signals that
3797 would otherwise be sent. In non-stop mode, we'll apply this
3798 logic to each thread individually. We consume all pending events
3799 before considering to start a step-over (in all-stop). */
3800 any_pending = 0;
3801 if (!non_stop)
3802 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3803
3804 /* If there is a thread which would otherwise be resumed, which is
3805 stopped at a breakpoint that needs stepping over, then don't
3806 resume any threads - have it step over the breakpoint with all
3807 other threads stopped, then resume all threads again. Make sure
3808 to queue any signals that would otherwise be delivered or
3809 queued. */
3810 if (!any_pending && supports_breakpoints ())
3811 need_step_over
3812 = (struct lwp_info *) find_inferior (&all_lwps,
3813 need_step_over_p, NULL);
3814
3815 leave_all_stopped = (need_step_over != NULL || any_pending);
3816
3817 if (debug_threads)
3818 {
3819 if (need_step_over != NULL)
3820 fprintf (stderr, "Not resuming all, need step over\n");
3821 else if (any_pending)
3822 fprintf (stderr,
3823 "Not resuming, all-stop and found "
3824 "an LWP with pending status\n");
3825 else
3826 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3827 }
3828
3829 /* Even if we're leaving threads stopped, queue all signals we'd
3830 otherwise deliver. */
3831 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3832
3833 if (need_step_over)
3834 start_step_over (need_step_over);
3835 }
3836
3837 /* This function is called once per thread. We check the thread's
3838 last resume request, which will tell us whether to resume, step, or
3839 leave the thread stopped. Any signal the client requested to be
3840 delivered has already been enqueued at this point.
3841
3842 If any thread that GDB wants running is stopped at an internal
3843 breakpoint that needs stepping over, we start a step-over operation
3844 on that particular thread, and leave all others stopped. */
3845
3846 static int
3847 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3848 {
3849 struct lwp_info *lwp = (struct lwp_info *) entry;
3850 struct thread_info *thread;
3851 int step;
3852
3853 if (lwp == except)
3854 return 0;
3855
3856 if (debug_threads)
3857 fprintf (stderr,
3858 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3859
3860 if (!lwp->stopped)
3861 {
3862 if (debug_threads)
3863 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3864 return 0;
3865 }
3866
3867 thread = get_lwp_thread (lwp);
3868
3869 if (thread->last_resume_kind == resume_stop
3870 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3871 {
3872 if (debug_threads)
3873 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3874 lwpid_of (lwp));
3875 return 0;
3876 }
3877
3878 if (lwp->status_pending_p)
3879 {
3880 if (debug_threads)
3881 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3882 lwpid_of (lwp));
3883 return 0;
3884 }
3885
3886 gdb_assert (lwp->suspended >= 0);
3887
3888 if (lwp->suspended)
3889 {
3890 if (debug_threads)
3891 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3892 return 0;
3893 }
3894
3895 if (thread->last_resume_kind == resume_stop
3896 && lwp->pending_signals_to_report == NULL
3897 && lwp->collecting_fast_tracepoint == 0)
3898 {
3899 /* We haven't reported this LWP as stopped yet (otherwise, the
3900 last_status.kind check above would catch it, and we wouldn't
3901 reach here. This LWP may have been momentarily paused by a
3902 stop_all_lwps call while handling for example, another LWP's
3903 step-over. In that case, the pending expected SIGSTOP signal
3904 that was queued at vCont;t handling time will have already
3905 been consumed by wait_for_sigstop, and so we need to requeue
3906 another one here. Note that if the LWP already has a SIGSTOP
3907 pending, this is a no-op. */
3908
3909 if (debug_threads)
3910 fprintf (stderr,
3911 "Client wants LWP %ld to stop. "
3912 "Making sure it has a SIGSTOP pending\n",
3913 lwpid_of (lwp));
3914
3915 send_sigstop (lwp);
3916 }
3917
3918 step = thread->last_resume_kind == resume_step;
3919 linux_resume_one_lwp (lwp, step, 0, NULL);
3920 return 0;
3921 }
3922
3923 static int
3924 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3925 {
3926 struct lwp_info *lwp = (struct lwp_info *) entry;
3927
3928 if (lwp == except)
3929 return 0;
3930
3931 lwp->suspended--;
3932 gdb_assert (lwp->suspended >= 0);
3933
3934 return proceed_one_lwp (entry, except);
3935 }
3936
3937 /* When we finish a step-over, set threads running again. If there's
3938 another thread that may need a step-over, now's the time to start
3939 it. Eventually, we'll move all threads past their breakpoints. */
3940
3941 static void
3942 proceed_all_lwps (void)
3943 {
3944 struct lwp_info *need_step_over;
3945
3946 /* If there is a thread which would otherwise be resumed, which is
3947 stopped at a breakpoint that needs stepping over, then don't
3948 resume any threads - have it step over the breakpoint with all
3949 other threads stopped, then resume all threads again. */
3950
3951 if (supports_breakpoints ())
3952 {
3953 need_step_over
3954 = (struct lwp_info *) find_inferior (&all_lwps,
3955 need_step_over_p, NULL);
3956
3957 if (need_step_over != NULL)
3958 {
3959 if (debug_threads)
3960 fprintf (stderr, "proceed_all_lwps: found "
3961 "thread %ld needing a step-over\n",
3962 lwpid_of (need_step_over));
3963
3964 start_step_over (need_step_over);
3965 return;
3966 }
3967 }
3968
3969 if (debug_threads)
3970 fprintf (stderr, "Proceeding, no step-over needed\n");
3971
3972 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3973 }
3974
3975 /* Stopped LWPs that the client wanted to be running, that don't have
3976 pending statuses, are set to run again, except for EXCEPT, if not
3977 NULL. This undoes a stop_all_lwps call. */
3978
3979 static void
3980 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3981 {
3982 if (debug_threads)
3983 {
3984 if (except)
3985 fprintf (stderr,
3986 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3987 else
3988 fprintf (stderr,
3989 "unstopping all lwps\n");
3990 }
3991
3992 if (unsuspend)
3993 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3994 else
3995 find_inferior (&all_lwps, proceed_one_lwp, except);
3996 }
3997
3998
3999 #ifdef HAVE_LINUX_REGSETS
4000
4001 #define use_linux_regsets 1
4002
4003 static int
4004 regsets_fetch_inferior_registers (struct regcache *regcache)
4005 {
4006 struct regset_info *regset;
4007 int saw_general_regs = 0;
4008 int pid;
4009 struct iovec iov;
4010
4011 regset = target_regsets;
4012
4013 pid = lwpid_of (get_thread_lwp (current_inferior));
4014 while (regset->size >= 0)
4015 {
4016 void *buf, *data;
4017 int nt_type, res;
4018
4019 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
4020 {
4021 regset ++;
4022 continue;
4023 }
4024
4025 buf = xmalloc (regset->size);
4026
4027 nt_type = regset->nt_type;
4028 if (nt_type)
4029 {
4030 iov.iov_base = buf;
4031 iov.iov_len = regset->size;
4032 data = (void *) &iov;
4033 }
4034 else
4035 data = buf;
4036
4037 #ifndef __sparc__
4038 res = ptrace (regset->get_request, pid,
4039 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4040 #else
4041 res = ptrace (regset->get_request, pid, data, nt_type);
4042 #endif
4043 if (res < 0)
4044 {
4045 if (errno == EIO)
4046 {
4047 /* If we get EIO on a regset, do not try it again for
4048 this process. */
4049 disabled_regsets[regset - target_regsets] = 1;
4050 free (buf);
4051 continue;
4052 }
4053 else
4054 {
4055 char s[256];
4056 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4057 pid);
4058 perror (s);
4059 }
4060 }
4061 else if (regset->type == GENERAL_REGS)
4062 saw_general_regs = 1;
4063 regset->store_function (regcache, buf);
4064 regset ++;
4065 free (buf);
4066 }
4067 if (saw_general_regs)
4068 return 0;
4069 else
4070 return 1;
4071 }
4072
4073 static int
4074 regsets_store_inferior_registers (struct regcache *regcache)
4075 {
4076 struct regset_info *regset;
4077 int saw_general_regs = 0;
4078 int pid;
4079 struct iovec iov;
4080
4081 regset = target_regsets;
4082
4083 pid = lwpid_of (get_thread_lwp (current_inferior));
4084 while (regset->size >= 0)
4085 {
4086 void *buf, *data;
4087 int nt_type, res;
4088
4089 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
4090 {
4091 regset ++;
4092 continue;
4093 }
4094
4095 buf = xmalloc (regset->size);
4096
4097 /* First fill the buffer with the current register set contents,
4098 in case there are any items in the kernel's regset that are
4099 not in gdbserver's regcache. */
4100
4101 nt_type = regset->nt_type;
4102 if (nt_type)
4103 {
4104 iov.iov_base = buf;
4105 iov.iov_len = regset->size;
4106 data = (void *) &iov;
4107 }
4108 else
4109 data = buf;
4110
4111 #ifndef __sparc__
4112 res = ptrace (regset->get_request, pid,
4113 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4114 #else
4115 res = ptrace (regset->get_request, pid, data, nt_type);
4116 #endif
4117
4118 if (res == 0)
4119 {
4120 /* Then overlay our cached registers on that. */
4121 regset->fill_function (regcache, buf);
4122
4123 /* Only now do we write the register set. */
4124 #ifndef __sparc__
4125 res = ptrace (regset->set_request, pid,
4126 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4127 #else
4128 res = ptrace (regset->set_request, pid, data, nt_type);
4129 #endif
4130 }
4131
4132 if (res < 0)
4133 {
4134 if (errno == EIO)
4135 {
4136 /* If we get EIO on a regset, do not try it again for
4137 this process. */
4138 disabled_regsets[regset - target_regsets] = 1;
4139 free (buf);
4140 continue;
4141 }
4142 else if (errno == ESRCH)
4143 {
4144 /* At this point, ESRCH should mean the process is
4145 already gone, in which case we simply ignore attempts
4146 to change its registers. See also the related
4147 comment in linux_resume_one_lwp. */
4148 free (buf);
4149 return 0;
4150 }
4151 else
4152 {
4153 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4154 }
4155 }
4156 else if (regset->type == GENERAL_REGS)
4157 saw_general_regs = 1;
4158 regset ++;
4159 free (buf);
4160 }
4161 if (saw_general_regs)
4162 return 0;
4163 else
4164 return 1;
4165 }
4166
4167 #else /* !HAVE_LINUX_REGSETS */
4168
4169 #define use_linux_regsets 0
4170 #define regsets_fetch_inferior_registers(regcache) 1
4171 #define regsets_store_inferior_registers(regcache) 1
4172
4173 #endif
4174
4175 /* Return 1 if register REGNO is supported by one of the regset ptrace
4176 calls or 0 if it has to be transferred individually. */
4177
4178 static int
4179 linux_register_in_regsets (int regno)
4180 {
4181 unsigned char mask = 1 << (regno % 8);
4182 size_t index = regno / 8;
4183
4184 return (use_linux_regsets
4185 && (the_low_target.regset_bitmap == NULL
4186 || (the_low_target.regset_bitmap[index] & mask) != 0));
4187 }
4188
4189 #ifdef HAVE_LINUX_USRREGS
4190
4191 int
4192 register_addr (int regnum)
4193 {
4194 int addr;
4195
4196 if (regnum < 0 || regnum >= the_low_target.num_regs)
4197 error ("Invalid register number %d.", regnum);
4198
4199 addr = the_low_target.regmap[regnum];
4200
4201 return addr;
4202 }
4203
4204 /* Fetch one register. */
4205 static void
4206 fetch_register (struct regcache *regcache, int regno)
4207 {
4208 CORE_ADDR regaddr;
4209 int i, size;
4210 char *buf;
4211 int pid;
4212
4213 if (regno >= the_low_target.num_regs)
4214 return;
4215 if ((*the_low_target.cannot_fetch_register) (regno))
4216 return;
4217
4218 regaddr = register_addr (regno);
4219 if (regaddr == -1)
4220 return;
4221
4222 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4223 & -sizeof (PTRACE_XFER_TYPE));
4224 buf = alloca (size);
4225
4226 pid = lwpid_of (get_thread_lwp (current_inferior));
4227 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4228 {
4229 errno = 0;
4230 *(PTRACE_XFER_TYPE *) (buf + i) =
4231 ptrace (PTRACE_PEEKUSER, pid,
4232 /* Coerce to a uintptr_t first to avoid potential gcc warning
4233 of coercing an 8 byte integer to a 4 byte pointer. */
4234 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, (PTRACE_ARG4_TYPE) 0);
4235 regaddr += sizeof (PTRACE_XFER_TYPE);
4236 if (errno != 0)
4237 error ("reading register %d: %s", regno, strerror (errno));
4238 }
4239
4240 if (the_low_target.supply_ptrace_register)
4241 the_low_target.supply_ptrace_register (regcache, regno, buf);
4242 else
4243 supply_register (regcache, regno, buf);
4244 }
4245
4246 /* Store one register. */
4247 static void
4248 store_register (struct regcache *regcache, int regno)
4249 {
4250 CORE_ADDR regaddr;
4251 int i, size;
4252 char *buf;
4253 int pid;
4254
4255 if (regno >= the_low_target.num_regs)
4256 return;
4257 if ((*the_low_target.cannot_store_register) (regno))
4258 return;
4259
4260 regaddr = register_addr (regno);
4261 if (regaddr == -1)
4262 return;
4263
4264 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4265 & -sizeof (PTRACE_XFER_TYPE));
4266 buf = alloca (size);
4267 memset (buf, 0, size);
4268
4269 if (the_low_target.collect_ptrace_register)
4270 the_low_target.collect_ptrace_register (regcache, regno, buf);
4271 else
4272 collect_register (regcache, regno, buf);
4273
4274 pid = lwpid_of (get_thread_lwp (current_inferior));
4275 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4276 {
4277 errno = 0;
4278 ptrace (PTRACE_POKEUSER, pid,
4279 /* Coerce to a uintptr_t first to avoid potential gcc warning
4280 about coercing an 8 byte integer to a 4 byte pointer. */
4281 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
4282 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
4283 if (errno != 0)
4284 {
4285 /* At this point, ESRCH should mean the process is
4286 already gone, in which case we simply ignore attempts
4287 to change its registers. See also the related
4288 comment in linux_resume_one_lwp. */
4289 if (errno == ESRCH)
4290 return;
4291
4292 if ((*the_low_target.cannot_store_register) (regno) == 0)
4293 error ("writing register %d: %s", regno, strerror (errno));
4294 }
4295 regaddr += sizeof (PTRACE_XFER_TYPE);
4296 }
4297 }
4298
4299 /* Fetch all registers, or just one, from the child process.
4300 If REGNO is -1, do this for all registers, skipping any that are
4301 assumed to have been retrieved by regsets_fetch_inferior_registers,
4302 unless ALL is non-zero.
4303 Otherwise, REGNO specifies which register (so we can save time). */
4304 static void
4305 usr_fetch_inferior_registers (struct regcache *regcache, int regno, int all)
4306 {
4307 if (regno == -1)
4308 {
4309 for (regno = 0; regno < the_low_target.num_regs; regno++)
4310 if (all || !linux_register_in_regsets (regno))
4311 fetch_register (regcache, regno);
4312 }
4313 else
4314 fetch_register (regcache, regno);
4315 }
4316
4317 /* Store our register values back into the inferior.
4318 If REGNO is -1, do this for all registers, skipping any that are
4319 assumed to have been saved by regsets_store_inferior_registers,
4320 unless ALL is non-zero.
4321 Otherwise, REGNO specifies which register (so we can save time). */
4322 static void
4323 usr_store_inferior_registers (struct regcache *regcache, int regno, int all)
4324 {
4325 if (regno == -1)
4326 {
4327 for (regno = 0; regno < the_low_target.num_regs; regno++)
4328 if (all || !linux_register_in_regsets (regno))
4329 store_register (regcache, regno);
4330 }
4331 else
4332 store_register (regcache, regno);
4333 }
4334
4335 #else /* !HAVE_LINUX_USRREGS */
4336
4337 #define usr_fetch_inferior_registers(regcache, regno, all) do {} while (0)
4338 #define usr_store_inferior_registers(regcache, regno, all) do {} while (0)
4339
4340 #endif
4341
4342
4343 void
4344 linux_fetch_registers (struct regcache *regcache, int regno)
4345 {
4346 int use_regsets;
4347 int all = 0;
4348
4349 if (regno == -1)
4350 {
4351 if (the_low_target.fetch_register != NULL)
4352 for (regno = 0; regno < the_low_target.num_regs; regno++)
4353 (*the_low_target.fetch_register) (regcache, regno);
4354
4355 all = regsets_fetch_inferior_registers (regcache);
4356 usr_fetch_inferior_registers (regcache, -1, all);
4357 }
4358 else
4359 {
4360 if (the_low_target.fetch_register != NULL
4361 && (*the_low_target.fetch_register) (regcache, regno))
4362 return;
4363
4364 use_regsets = linux_register_in_regsets (regno);
4365 if (use_regsets)
4366 all = regsets_fetch_inferior_registers (regcache);
4367 if (!use_regsets || all)
4368 usr_fetch_inferior_registers (regcache, regno, 1);
4369 }
4370 }
4371
4372 void
4373 linux_store_registers (struct regcache *regcache, int regno)
4374 {
4375 int use_regsets;
4376 int all = 0;
4377
4378 if (regno == -1)
4379 {
4380 all = regsets_store_inferior_registers (regcache);
4381 usr_store_inferior_registers (regcache, regno, all);
4382 }
4383 else
4384 {
4385 use_regsets = linux_register_in_regsets (regno);
4386 if (use_regsets)
4387 all = regsets_store_inferior_registers (regcache);
4388 if (!use_regsets || all)
4389 usr_store_inferior_registers (regcache, regno, 1);
4390 }
4391 }
4392
4393
4394 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4395 to debugger memory starting at MYADDR. */
4396
4397 static int
4398 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4399 {
4400 int pid = lwpid_of (get_thread_lwp (current_inferior));
4401 register PTRACE_XFER_TYPE *buffer;
4402 register CORE_ADDR addr;
4403 register int count;
4404 char filename[64];
4405 register int i;
4406 int ret;
4407 int fd;
4408
4409 /* Try using /proc. Don't bother for one word. */
4410 if (len >= 3 * sizeof (long))
4411 {
4412 int bytes;
4413
4414 /* We could keep this file open and cache it - possibly one per
4415 thread. That requires some juggling, but is even faster. */
4416 sprintf (filename, "/proc/%d/mem", pid);
4417 fd = open (filename, O_RDONLY | O_LARGEFILE);
4418 if (fd == -1)
4419 goto no_proc;
4420
4421 /* If pread64 is available, use it. It's faster if the kernel
4422 supports it (only one syscall), and it's 64-bit safe even on
4423 32-bit platforms (for instance, SPARC debugging a SPARC64
4424 application). */
4425 #ifdef HAVE_PREAD64
4426 bytes = pread64 (fd, myaddr, len, memaddr);
4427 #else
4428 bytes = -1;
4429 if (lseek (fd, memaddr, SEEK_SET) != -1)
4430 bytes = read (fd, myaddr, len);
4431 #endif
4432
4433 close (fd);
4434 if (bytes == len)
4435 return 0;
4436
4437 /* Some data was read, we'll try to get the rest with ptrace. */
4438 if (bytes > 0)
4439 {
4440 memaddr += bytes;
4441 myaddr += bytes;
4442 len -= bytes;
4443 }
4444 }
4445
4446 no_proc:
4447 /* Round starting address down to longword boundary. */
4448 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4449 /* Round ending address up; get number of longwords that makes. */
4450 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4451 / sizeof (PTRACE_XFER_TYPE));
4452 /* Allocate buffer of that many longwords. */
4453 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4454
4455 /* Read all the longwords */
4456 errno = 0;
4457 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4458 {
4459 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4460 about coercing an 8 byte integer to a 4 byte pointer. */
4461 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4462 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4463 (PTRACE_ARG4_TYPE) 0);
4464 if (errno)
4465 break;
4466 }
4467 ret = errno;
4468
4469 /* Copy appropriate bytes out of the buffer. */
4470 if (i > 0)
4471 {
4472 i *= sizeof (PTRACE_XFER_TYPE);
4473 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4474 memcpy (myaddr,
4475 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4476 i < len ? i : len);
4477 }
4478
4479 return ret;
4480 }
4481
4482 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4483 memory at MEMADDR. On failure (cannot write to the inferior)
4484 returns the value of errno. Always succeeds if LEN is zero. */
4485
4486 static int
4487 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4488 {
4489 register int i;
4490 /* Round starting address down to longword boundary. */
4491 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4492 /* Round ending address up; get number of longwords that makes. */
4493 register int count
4494 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4495 / sizeof (PTRACE_XFER_TYPE);
4496
4497 /* Allocate buffer of that many longwords. */
4498 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4499 alloca (count * sizeof (PTRACE_XFER_TYPE));
4500
4501 int pid = lwpid_of (get_thread_lwp (current_inferior));
4502
4503 if (len == 0)
4504 {
4505 /* Zero length write always succeeds. */
4506 return 0;
4507 }
4508
4509 if (debug_threads)
4510 {
4511 /* Dump up to four bytes. */
4512 unsigned int val = * (unsigned int *) myaddr;
4513 if (len == 1)
4514 val = val & 0xff;
4515 else if (len == 2)
4516 val = val & 0xffff;
4517 else if (len == 3)
4518 val = val & 0xffffff;
4519 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4520 val, (long)memaddr);
4521 }
4522
4523 /* Fill start and end extra bytes of buffer with existing memory data. */
4524
4525 errno = 0;
4526 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4527 about coercing an 8 byte integer to a 4 byte pointer. */
4528 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4529 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4530 (PTRACE_ARG4_TYPE) 0);
4531 if (errno)
4532 return errno;
4533
4534 if (count > 1)
4535 {
4536 errno = 0;
4537 buffer[count - 1]
4538 = ptrace (PTRACE_PEEKTEXT, pid,
4539 /* Coerce to a uintptr_t first to avoid potential gcc warning
4540 about coercing an 8 byte integer to a 4 byte pointer. */
4541 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4542 * sizeof (PTRACE_XFER_TYPE)),
4543 (PTRACE_ARG4_TYPE) 0);
4544 if (errno)
4545 return errno;
4546 }
4547
4548 /* Copy data to be written over corresponding part of buffer. */
4549
4550 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4551 myaddr, len);
4552
4553 /* Write the entire buffer. */
4554
4555 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4556 {
4557 errno = 0;
4558 ptrace (PTRACE_POKETEXT, pid,
4559 /* Coerce to a uintptr_t first to avoid potential gcc warning
4560 about coercing an 8 byte integer to a 4 byte pointer. */
4561 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4562 (PTRACE_ARG4_TYPE) buffer[i]);
4563 if (errno)
4564 return errno;
4565 }
4566
4567 return 0;
4568 }
4569
4570 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4571 static int linux_supports_tracefork_flag;
4572
4573 static void
4574 linux_enable_event_reporting (int pid)
4575 {
4576 if (!linux_supports_tracefork_flag)
4577 return;
4578
4579 ptrace (PTRACE_SETOPTIONS, pid, (PTRACE_ARG3_TYPE) 0,
4580 (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4581 }
4582
4583 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4584
4585 static int
4586 linux_tracefork_grandchild (void *arg)
4587 {
4588 _exit (0);
4589 }
4590
4591 #define STACK_SIZE 4096
4592
4593 static int
4594 linux_tracefork_child (void *arg)
4595 {
4596 ptrace (PTRACE_TRACEME, 0, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
4597 kill (getpid (), SIGSTOP);
4598
4599 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4600
4601 if (fork () == 0)
4602 linux_tracefork_grandchild (NULL);
4603
4604 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4605
4606 #ifdef __ia64__
4607 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4608 CLONE_VM | SIGCHLD, NULL);
4609 #else
4610 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4611 CLONE_VM | SIGCHLD, NULL);
4612 #endif
4613
4614 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4615
4616 _exit (0);
4617 }
4618
4619 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4620 sure that we can enable the option, and that it had the desired
4621 effect. */
4622
4623 static void
4624 linux_test_for_tracefork (void)
4625 {
4626 int child_pid, ret, status;
4627 long second_pid;
4628 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4629 char *stack = xmalloc (STACK_SIZE * 4);
4630 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4631
4632 linux_supports_tracefork_flag = 0;
4633
4634 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4635
4636 child_pid = fork ();
4637 if (child_pid == 0)
4638 linux_tracefork_child (NULL);
4639
4640 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4641
4642 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4643 #ifdef __ia64__
4644 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4645 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4646 #else /* !__ia64__ */
4647 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4648 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4649 #endif /* !__ia64__ */
4650
4651 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4652
4653 if (child_pid == -1)
4654 perror_with_name ("clone");
4655
4656 ret = my_waitpid (child_pid, &status, 0);
4657 if (ret == -1)
4658 perror_with_name ("waitpid");
4659 else if (ret != child_pid)
4660 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4661 if (! WIFSTOPPED (status))
4662 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4663
4664 ret = ptrace (PTRACE_SETOPTIONS, child_pid, (PTRACE_ARG3_TYPE) 0,
4665 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4666 if (ret != 0)
4667 {
4668 ret = ptrace (PTRACE_KILL, child_pid, (PTRACE_ARG3_TYPE) 0,
4669 (PTRACE_ARG4_TYPE) 0);
4670 if (ret != 0)
4671 {
4672 warning ("linux_test_for_tracefork: failed to kill child");
4673 return;
4674 }
4675
4676 ret = my_waitpid (child_pid, &status, 0);
4677 if (ret != child_pid)
4678 warning ("linux_test_for_tracefork: failed to wait for killed child");
4679 else if (!WIFSIGNALED (status))
4680 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4681 "killed child", status);
4682
4683 return;
4684 }
4685
4686 ret = ptrace (PTRACE_CONT, child_pid, (PTRACE_ARG3_TYPE) 0,
4687 (PTRACE_ARG4_TYPE) 0);
4688 if (ret != 0)
4689 warning ("linux_test_for_tracefork: failed to resume child");
4690
4691 ret = my_waitpid (child_pid, &status, 0);
4692
4693 if (ret == child_pid && WIFSTOPPED (status)
4694 && status >> 16 == PTRACE_EVENT_FORK)
4695 {
4696 second_pid = 0;
4697 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, (PTRACE_ARG3_TYPE) 0,
4698 &second_pid);
4699 if (ret == 0 && second_pid != 0)
4700 {
4701 int second_status;
4702
4703 linux_supports_tracefork_flag = 1;
4704 my_waitpid (second_pid, &second_status, 0);
4705 ret = ptrace (PTRACE_KILL, second_pid, (PTRACE_ARG3_TYPE) 0,
4706 (PTRACE_ARG4_TYPE) 0);
4707 if (ret != 0)
4708 warning ("linux_test_for_tracefork: failed to kill second child");
4709 my_waitpid (second_pid, &status, 0);
4710 }
4711 }
4712 else
4713 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4714 "(%d, status 0x%x)", ret, status);
4715
4716 do
4717 {
4718 ret = ptrace (PTRACE_KILL, child_pid, (PTRACE_ARG3_TYPE) 0,
4719 (PTRACE_ARG4_TYPE) 0);
4720 if (ret != 0)
4721 warning ("linux_test_for_tracefork: failed to kill child");
4722 my_waitpid (child_pid, &status, 0);
4723 }
4724 while (WIFSTOPPED (status));
4725
4726 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4727 free (stack);
4728 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4729 }
4730
4731
4732 static void
4733 linux_look_up_symbols (void)
4734 {
4735 #ifdef USE_THREAD_DB
4736 struct process_info *proc = current_process ();
4737
4738 if (proc->private->thread_db != NULL)
4739 return;
4740
4741 /* If the kernel supports tracing forks then it also supports tracing
4742 clones, and then we don't need to use the magic thread event breakpoint
4743 to learn about threads. */
4744 thread_db_init (!linux_supports_tracefork_flag);
4745 #endif
4746 }
4747
4748 static void
4749 linux_request_interrupt (void)
4750 {
4751 extern unsigned long signal_pid;
4752
4753 if (!ptid_equal (cont_thread, null_ptid)
4754 && !ptid_equal (cont_thread, minus_one_ptid))
4755 {
4756 struct lwp_info *lwp;
4757 int lwpid;
4758
4759 lwp = get_thread_lwp (current_inferior);
4760 lwpid = lwpid_of (lwp);
4761 kill_lwp (lwpid, SIGINT);
4762 }
4763 else
4764 kill_lwp (signal_pid, SIGINT);
4765 }
4766
4767 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4768 to debugger memory starting at MYADDR. */
4769
4770 static int
4771 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4772 {
4773 char filename[PATH_MAX];
4774 int fd, n;
4775 int pid = lwpid_of (get_thread_lwp (current_inferior));
4776
4777 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4778
4779 fd = open (filename, O_RDONLY);
4780 if (fd < 0)
4781 return -1;
4782
4783 if (offset != (CORE_ADDR) 0
4784 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4785 n = -1;
4786 else
4787 n = read (fd, myaddr, len);
4788
4789 close (fd);
4790
4791 return n;
4792 }
4793
4794 /* These breakpoint and watchpoint related wrapper functions simply
4795 pass on the function call if the target has registered a
4796 corresponding function. */
4797
4798 static int
4799 linux_insert_point (char type, CORE_ADDR addr, int len)
4800 {
4801 if (the_low_target.insert_point != NULL)
4802 return the_low_target.insert_point (type, addr, len);
4803 else
4804 /* Unsupported (see target.h). */
4805 return 1;
4806 }
4807
4808 static int
4809 linux_remove_point (char type, CORE_ADDR addr, int len)
4810 {
4811 if (the_low_target.remove_point != NULL)
4812 return the_low_target.remove_point (type, addr, len);
4813 else
4814 /* Unsupported (see target.h). */
4815 return 1;
4816 }
4817
4818 static int
4819 linux_stopped_by_watchpoint (void)
4820 {
4821 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4822
4823 return lwp->stopped_by_watchpoint;
4824 }
4825
4826 static CORE_ADDR
4827 linux_stopped_data_address (void)
4828 {
4829 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4830
4831 return lwp->stopped_data_address;
4832 }
4833
4834 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4835 #if ! (defined(PT_TEXT_ADDR) \
4836 || defined(PT_DATA_ADDR) \
4837 || defined(PT_TEXT_END_ADDR))
4838 #if defined(__mcoldfire__)
4839 /* These should really be defined in the kernel's ptrace.h header. */
4840 #define PT_TEXT_ADDR 49*4
4841 #define PT_DATA_ADDR 50*4
4842 #define PT_TEXT_END_ADDR 51*4
4843 #elif defined(BFIN)
4844 #define PT_TEXT_ADDR 220
4845 #define PT_TEXT_END_ADDR 224
4846 #define PT_DATA_ADDR 228
4847 #elif defined(__TMS320C6X__)
4848 #define PT_TEXT_ADDR (0x10000*4)
4849 #define PT_DATA_ADDR (0x10004*4)
4850 #define PT_TEXT_END_ADDR (0x10008*4)
4851 #endif
4852 #endif
4853
4854 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4855 to tell gdb about. */
4856
4857 static int
4858 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4859 {
4860 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4861 unsigned long text, text_end, data;
4862 int pid = lwpid_of (get_thread_lwp (current_inferior));
4863
4864 errno = 0;
4865
4866 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) PT_TEXT_ADDR,
4867 (PTRACE_ARG4_TYPE) 0);
4868 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) PT_TEXT_END_ADDR,
4869 (PTRACE_ARG4_TYPE) 0);
4870 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) PT_DATA_ADDR,
4871 (PTRACE_ARG4_TYPE) 0);
4872
4873 if (errno == 0)
4874 {
4875 /* Both text and data offsets produced at compile-time (and so
4876 used by gdb) are relative to the beginning of the program,
4877 with the data segment immediately following the text segment.
4878 However, the actual runtime layout in memory may put the data
4879 somewhere else, so when we send gdb a data base-address, we
4880 use the real data base address and subtract the compile-time
4881 data base-address from it (which is just the length of the
4882 text segment). BSS immediately follows data in both
4883 cases. */
4884 *text_p = text;
4885 *data_p = data - (text_end - text);
4886
4887 return 1;
4888 }
4889 #endif
4890 return 0;
4891 }
4892 #endif
4893
4894 static int
4895 linux_qxfer_osdata (const char *annex,
4896 unsigned char *readbuf, unsigned const char *writebuf,
4897 CORE_ADDR offset, int len)
4898 {
4899 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4900 }
4901
4902 /* Convert a native/host siginfo object, into/from the siginfo in the
4903 layout of the inferiors' architecture. */
4904
4905 static void
4906 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4907 {
4908 int done = 0;
4909
4910 if (the_low_target.siginfo_fixup != NULL)
4911 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4912
4913 /* If there was no callback, or the callback didn't do anything,
4914 then just do a straight memcpy. */
4915 if (!done)
4916 {
4917 if (direction == 1)
4918 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4919 else
4920 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4921 }
4922 }
4923
4924 static int
4925 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4926 unsigned const char *writebuf, CORE_ADDR offset, int len)
4927 {
4928 int pid;
4929 siginfo_t siginfo;
4930 char inf_siginfo[sizeof (siginfo_t)];
4931
4932 if (current_inferior == NULL)
4933 return -1;
4934
4935 pid = lwpid_of (get_thread_lwp (current_inferior));
4936
4937 if (debug_threads)
4938 fprintf (stderr, "%s siginfo for lwp %d.\n",
4939 readbuf != NULL ? "Reading" : "Writing",
4940 pid);
4941
4942 if (offset >= sizeof (siginfo))
4943 return -1;
4944
4945 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_ARG3_TYPE) 0, &siginfo) != 0)
4946 return -1;
4947
4948 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4949 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4950 inferior with a 64-bit GDBSERVER should look the same as debugging it
4951 with a 32-bit GDBSERVER, we need to convert it. */
4952 siginfo_fixup (&siginfo, inf_siginfo, 0);
4953
4954 if (offset + len > sizeof (siginfo))
4955 len = sizeof (siginfo) - offset;
4956
4957 if (readbuf != NULL)
4958 memcpy (readbuf, inf_siginfo + offset, len);
4959 else
4960 {
4961 memcpy (inf_siginfo + offset, writebuf, len);
4962
4963 /* Convert back to ptrace layout before flushing it out. */
4964 siginfo_fixup (&siginfo, inf_siginfo, 1);
4965
4966 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_ARG3_TYPE) 0, &siginfo) != 0)
4967 return -1;
4968 }
4969
4970 return len;
4971 }
4972
4973 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4974 so we notice when children change state; as the handler for the
4975 sigsuspend in my_waitpid. */
4976
4977 static void
4978 sigchld_handler (int signo)
4979 {
4980 int old_errno = errno;
4981
4982 if (debug_threads)
4983 {
4984 do
4985 {
4986 /* fprintf is not async-signal-safe, so call write
4987 directly. */
4988 if (write (2, "sigchld_handler\n",
4989 sizeof ("sigchld_handler\n") - 1) < 0)
4990 break; /* just ignore */
4991 } while (0);
4992 }
4993
4994 if (target_is_async_p ())
4995 async_file_mark (); /* trigger a linux_wait */
4996
4997 errno = old_errno;
4998 }
4999
5000 static int
5001 linux_supports_non_stop (void)
5002 {
5003 return 1;
5004 }
5005
5006 static int
5007 linux_async (int enable)
5008 {
5009 int previous = (linux_event_pipe[0] != -1);
5010
5011 if (debug_threads)
5012 fprintf (stderr, "linux_async (%d), previous=%d\n",
5013 enable, previous);
5014
5015 if (previous != enable)
5016 {
5017 sigset_t mask;
5018 sigemptyset (&mask);
5019 sigaddset (&mask, SIGCHLD);
5020
5021 sigprocmask (SIG_BLOCK, &mask, NULL);
5022
5023 if (enable)
5024 {
5025 if (pipe (linux_event_pipe) == -1)
5026 fatal ("creating event pipe failed.");
5027
5028 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5029 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5030
5031 /* Register the event loop handler. */
5032 add_file_handler (linux_event_pipe[0],
5033 handle_target_event, NULL);
5034
5035 /* Always trigger a linux_wait. */
5036 async_file_mark ();
5037 }
5038 else
5039 {
5040 delete_file_handler (linux_event_pipe[0]);
5041
5042 close (linux_event_pipe[0]);
5043 close (linux_event_pipe[1]);
5044 linux_event_pipe[0] = -1;
5045 linux_event_pipe[1] = -1;
5046 }
5047
5048 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5049 }
5050
5051 return previous;
5052 }
5053
5054 static int
5055 linux_start_non_stop (int nonstop)
5056 {
5057 /* Register or unregister from event-loop accordingly. */
5058 linux_async (nonstop);
5059 return 0;
5060 }
5061
5062 static int
5063 linux_supports_multi_process (void)
5064 {
5065 return 1;
5066 }
5067
5068 static int
5069 linux_supports_disable_randomization (void)
5070 {
5071 #ifdef HAVE_PERSONALITY
5072 return 1;
5073 #else
5074 return 0;
5075 #endif
5076 }
5077
5078 static int
5079 linux_supports_agent (void)
5080 {
5081 return 1;
5082 }
5083
5084 /* Enumerate spufs IDs for process PID. */
5085 static int
5086 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5087 {
5088 int pos = 0;
5089 int written = 0;
5090 char path[128];
5091 DIR *dir;
5092 struct dirent *entry;
5093
5094 sprintf (path, "/proc/%ld/fd", pid);
5095 dir = opendir (path);
5096 if (!dir)
5097 return -1;
5098
5099 rewinddir (dir);
5100 while ((entry = readdir (dir)) != NULL)
5101 {
5102 struct stat st;
5103 struct statfs stfs;
5104 int fd;
5105
5106 fd = atoi (entry->d_name);
5107 if (!fd)
5108 continue;
5109
5110 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5111 if (stat (path, &st) != 0)
5112 continue;
5113 if (!S_ISDIR (st.st_mode))
5114 continue;
5115
5116 if (statfs (path, &stfs) != 0)
5117 continue;
5118 if (stfs.f_type != SPUFS_MAGIC)
5119 continue;
5120
5121 if (pos >= offset && pos + 4 <= offset + len)
5122 {
5123 *(unsigned int *)(buf + pos - offset) = fd;
5124 written += 4;
5125 }
5126 pos += 4;
5127 }
5128
5129 closedir (dir);
5130 return written;
5131 }
5132
5133 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5134 object type, using the /proc file system. */
5135 static int
5136 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5137 unsigned const char *writebuf,
5138 CORE_ADDR offset, int len)
5139 {
5140 long pid = lwpid_of (get_thread_lwp (current_inferior));
5141 char buf[128];
5142 int fd = 0;
5143 int ret = 0;
5144
5145 if (!writebuf && !readbuf)
5146 return -1;
5147
5148 if (!*annex)
5149 {
5150 if (!readbuf)
5151 return -1;
5152 else
5153 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5154 }
5155
5156 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5157 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5158 if (fd <= 0)
5159 return -1;
5160
5161 if (offset != 0
5162 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5163 {
5164 close (fd);
5165 return 0;
5166 }
5167
5168 if (writebuf)
5169 ret = write (fd, writebuf, (size_t) len);
5170 else
5171 ret = read (fd, readbuf, (size_t) len);
5172
5173 close (fd);
5174 return ret;
5175 }
5176
5177 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5178 struct target_loadseg
5179 {
5180 /* Core address to which the segment is mapped. */
5181 Elf32_Addr addr;
5182 /* VMA recorded in the program header. */
5183 Elf32_Addr p_vaddr;
5184 /* Size of this segment in memory. */
5185 Elf32_Word p_memsz;
5186 };
5187
5188 # if defined PT_GETDSBT
5189 struct target_loadmap
5190 {
5191 /* Protocol version number, must be zero. */
5192 Elf32_Word version;
5193 /* Pointer to the DSBT table, its size, and the DSBT index. */
5194 unsigned *dsbt_table;
5195 unsigned dsbt_size, dsbt_index;
5196 /* Number of segments in this map. */
5197 Elf32_Word nsegs;
5198 /* The actual memory map. */
5199 struct target_loadseg segs[/*nsegs*/];
5200 };
5201 # define LINUX_LOADMAP PT_GETDSBT
5202 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5203 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5204 # else
5205 struct target_loadmap
5206 {
5207 /* Protocol version number, must be zero. */
5208 Elf32_Half version;
5209 /* Number of segments in this map. */
5210 Elf32_Half nsegs;
5211 /* The actual memory map. */
5212 struct target_loadseg segs[/*nsegs*/];
5213 };
5214 # define LINUX_LOADMAP PTRACE_GETFDPIC
5215 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5216 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5217 # endif
5218
5219 static int
5220 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5221 unsigned char *myaddr, unsigned int len)
5222 {
5223 int pid = lwpid_of (get_thread_lwp (current_inferior));
5224 int addr = -1;
5225 struct target_loadmap *data = NULL;
5226 unsigned int actual_length, copy_length;
5227
5228 if (strcmp (annex, "exec") == 0)
5229 addr = (int) LINUX_LOADMAP_EXEC;
5230 else if (strcmp (annex, "interp") == 0)
5231 addr = (int) LINUX_LOADMAP_INTERP;
5232 else
5233 return -1;
5234
5235 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5236 return -1;
5237
5238 if (data == NULL)
5239 return -1;
5240
5241 actual_length = sizeof (struct target_loadmap)
5242 + sizeof (struct target_loadseg) * data->nsegs;
5243
5244 if (offset < 0 || offset > actual_length)
5245 return -1;
5246
5247 copy_length = actual_length - offset < len ? actual_length - offset : len;
5248 memcpy (myaddr, (char *) data + offset, copy_length);
5249 return copy_length;
5250 }
5251 #else
5252 # define linux_read_loadmap NULL
5253 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5254
5255 static void
5256 linux_process_qsupported (const char *query)
5257 {
5258 if (the_low_target.process_qsupported != NULL)
5259 the_low_target.process_qsupported (query);
5260 }
5261
5262 static int
5263 linux_supports_tracepoints (void)
5264 {
5265 if (*the_low_target.supports_tracepoints == NULL)
5266 return 0;
5267
5268 return (*the_low_target.supports_tracepoints) ();
5269 }
5270
5271 static CORE_ADDR
5272 linux_read_pc (struct regcache *regcache)
5273 {
5274 if (the_low_target.get_pc == NULL)
5275 return 0;
5276
5277 return (*the_low_target.get_pc) (regcache);
5278 }
5279
5280 static void
5281 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5282 {
5283 gdb_assert (the_low_target.set_pc != NULL);
5284
5285 (*the_low_target.set_pc) (regcache, pc);
5286 }
5287
5288 static int
5289 linux_thread_stopped (struct thread_info *thread)
5290 {
5291 return get_thread_lwp (thread)->stopped;
5292 }
5293
5294 /* This exposes stop-all-threads functionality to other modules. */
5295
5296 static void
5297 linux_pause_all (int freeze)
5298 {
5299 stop_all_lwps (freeze, NULL);
5300 }
5301
5302 /* This exposes unstop-all-threads functionality to other gdbserver
5303 modules. */
5304
5305 static void
5306 linux_unpause_all (int unfreeze)
5307 {
5308 unstop_all_lwps (unfreeze, NULL);
5309 }
5310
5311 static int
5312 linux_prepare_to_access_memory (void)
5313 {
5314 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5315 running LWP. */
5316 if (non_stop)
5317 linux_pause_all (1);
5318 return 0;
5319 }
5320
5321 static void
5322 linux_done_accessing_memory (void)
5323 {
5324 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5325 running LWP. */
5326 if (non_stop)
5327 linux_unpause_all (1);
5328 }
5329
5330 static int
5331 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5332 CORE_ADDR collector,
5333 CORE_ADDR lockaddr,
5334 ULONGEST orig_size,
5335 CORE_ADDR *jump_entry,
5336 CORE_ADDR *trampoline,
5337 ULONGEST *trampoline_size,
5338 unsigned char *jjump_pad_insn,
5339 ULONGEST *jjump_pad_insn_size,
5340 CORE_ADDR *adjusted_insn_addr,
5341 CORE_ADDR *adjusted_insn_addr_end,
5342 char *err)
5343 {
5344 return (*the_low_target.install_fast_tracepoint_jump_pad)
5345 (tpoint, tpaddr, collector, lockaddr, orig_size,
5346 jump_entry, trampoline, trampoline_size,
5347 jjump_pad_insn, jjump_pad_insn_size,
5348 adjusted_insn_addr, adjusted_insn_addr_end,
5349 err);
5350 }
5351
5352 static struct emit_ops *
5353 linux_emit_ops (void)
5354 {
5355 if (the_low_target.emit_ops != NULL)
5356 return (*the_low_target.emit_ops) ();
5357 else
5358 return NULL;
5359 }
5360
5361 static int
5362 linux_get_min_fast_tracepoint_insn_len (void)
5363 {
5364 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5365 }
5366
5367 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5368
5369 static int
5370 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5371 CORE_ADDR *phdr_memaddr, int *num_phdr)
5372 {
5373 char filename[PATH_MAX];
5374 int fd;
5375 const int auxv_size = is_elf64
5376 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5377 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5378
5379 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5380
5381 fd = open (filename, O_RDONLY);
5382 if (fd < 0)
5383 return 1;
5384
5385 *phdr_memaddr = 0;
5386 *num_phdr = 0;
5387 while (read (fd, buf, auxv_size) == auxv_size
5388 && (*phdr_memaddr == 0 || *num_phdr == 0))
5389 {
5390 if (is_elf64)
5391 {
5392 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5393
5394 switch (aux->a_type)
5395 {
5396 case AT_PHDR:
5397 *phdr_memaddr = aux->a_un.a_val;
5398 break;
5399 case AT_PHNUM:
5400 *num_phdr = aux->a_un.a_val;
5401 break;
5402 }
5403 }
5404 else
5405 {
5406 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5407
5408 switch (aux->a_type)
5409 {
5410 case AT_PHDR:
5411 *phdr_memaddr = aux->a_un.a_val;
5412 break;
5413 case AT_PHNUM:
5414 *num_phdr = aux->a_un.a_val;
5415 break;
5416 }
5417 }
5418 }
5419
5420 close (fd);
5421
5422 if (*phdr_memaddr == 0 || *num_phdr == 0)
5423 {
5424 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5425 "phdr_memaddr = %ld, phdr_num = %d",
5426 (long) *phdr_memaddr, *num_phdr);
5427 return 2;
5428 }
5429
5430 return 0;
5431 }
5432
5433 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5434
5435 static CORE_ADDR
5436 get_dynamic (const int pid, const int is_elf64)
5437 {
5438 CORE_ADDR phdr_memaddr, relocation;
5439 int num_phdr, i;
5440 unsigned char *phdr_buf;
5441 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5442
5443 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5444 return 0;
5445
5446 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5447 phdr_buf = alloca (num_phdr * phdr_size);
5448
5449 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5450 return 0;
5451
5452 /* Compute relocation: it is expected to be 0 for "regular" executables,
5453 non-zero for PIE ones. */
5454 relocation = -1;
5455 for (i = 0; relocation == -1 && i < num_phdr; i++)
5456 if (is_elf64)
5457 {
5458 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5459
5460 if (p->p_type == PT_PHDR)
5461 relocation = phdr_memaddr - p->p_vaddr;
5462 }
5463 else
5464 {
5465 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5466
5467 if (p->p_type == PT_PHDR)
5468 relocation = phdr_memaddr - p->p_vaddr;
5469 }
5470
5471 if (relocation == -1)
5472 {
5473 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5474 any real world executables, including PIE executables, have always
5475 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5476 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5477 or present DT_DEBUG anyway (fpc binaries are statically linked).
5478
5479 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5480
5481 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5482
5483 return 0;
5484 }
5485
5486 for (i = 0; i < num_phdr; i++)
5487 {
5488 if (is_elf64)
5489 {
5490 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5491
5492 if (p->p_type == PT_DYNAMIC)
5493 return p->p_vaddr + relocation;
5494 }
5495 else
5496 {
5497 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5498
5499 if (p->p_type == PT_DYNAMIC)
5500 return p->p_vaddr + relocation;
5501 }
5502 }
5503
5504 return 0;
5505 }
5506
5507 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5508 can be 0 if the inferior does not yet have the library list initialized.
5509 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5510 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5511
5512 static CORE_ADDR
5513 get_r_debug (const int pid, const int is_elf64)
5514 {
5515 CORE_ADDR dynamic_memaddr;
5516 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5517 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5518 CORE_ADDR map = -1;
5519
5520 dynamic_memaddr = get_dynamic (pid, is_elf64);
5521 if (dynamic_memaddr == 0)
5522 return map;
5523
5524 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5525 {
5526 if (is_elf64)
5527 {
5528 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5529 #ifdef DT_MIPS_RLD_MAP
5530 union
5531 {
5532 Elf64_Xword map;
5533 unsigned char buf[sizeof (Elf64_Xword)];
5534 }
5535 rld_map;
5536
5537 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5538 {
5539 if (linux_read_memory (dyn->d_un.d_val,
5540 rld_map.buf, sizeof (rld_map.buf)) == 0)
5541 return rld_map.map;
5542 else
5543 break;
5544 }
5545 #endif /* DT_MIPS_RLD_MAP */
5546
5547 if (dyn->d_tag == DT_DEBUG && map == -1)
5548 map = dyn->d_un.d_val;
5549
5550 if (dyn->d_tag == DT_NULL)
5551 break;
5552 }
5553 else
5554 {
5555 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5556 #ifdef DT_MIPS_RLD_MAP
5557 union
5558 {
5559 Elf32_Word map;
5560 unsigned char buf[sizeof (Elf32_Word)];
5561 }
5562 rld_map;
5563
5564 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5565 {
5566 if (linux_read_memory (dyn->d_un.d_val,
5567 rld_map.buf, sizeof (rld_map.buf)) == 0)
5568 return rld_map.map;
5569 else
5570 break;
5571 }
5572 #endif /* DT_MIPS_RLD_MAP */
5573
5574 if (dyn->d_tag == DT_DEBUG && map == -1)
5575 map = dyn->d_un.d_val;
5576
5577 if (dyn->d_tag == DT_NULL)
5578 break;
5579 }
5580
5581 dynamic_memaddr += dyn_size;
5582 }
5583
5584 return map;
5585 }
5586
5587 /* Read one pointer from MEMADDR in the inferior. */
5588
5589 static int
5590 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5591 {
5592 int ret;
5593
5594 /* Go through a union so this works on either big or little endian
5595 hosts, when the inferior's pointer size is smaller than the size
5596 of CORE_ADDR. It is assumed the inferior's endianness is the
5597 same of the superior's. */
5598 union
5599 {
5600 CORE_ADDR core_addr;
5601 unsigned int ui;
5602 unsigned char uc;
5603 } addr;
5604
5605 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5606 if (ret == 0)
5607 {
5608 if (ptr_size == sizeof (CORE_ADDR))
5609 *ptr = addr.core_addr;
5610 else if (ptr_size == sizeof (unsigned int))
5611 *ptr = addr.ui;
5612 else
5613 gdb_assert_not_reached ("unhandled pointer size");
5614 }
5615 return ret;
5616 }
5617
5618 struct link_map_offsets
5619 {
5620 /* Offset and size of r_debug.r_version. */
5621 int r_version_offset;
5622
5623 /* Offset and size of r_debug.r_map. */
5624 int r_map_offset;
5625
5626 /* Offset to l_addr field in struct link_map. */
5627 int l_addr_offset;
5628
5629 /* Offset to l_name field in struct link_map. */
5630 int l_name_offset;
5631
5632 /* Offset to l_ld field in struct link_map. */
5633 int l_ld_offset;
5634
5635 /* Offset to l_next field in struct link_map. */
5636 int l_next_offset;
5637
5638 /* Offset to l_prev field in struct link_map. */
5639 int l_prev_offset;
5640 };
5641
5642 /* Construct qXfer:libraries-svr4:read reply. */
5643
5644 static int
5645 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5646 unsigned const char *writebuf,
5647 CORE_ADDR offset, int len)
5648 {
5649 char *document;
5650 unsigned document_len;
5651 struct process_info_private *const priv = current_process ()->private;
5652 char filename[PATH_MAX];
5653 int pid, is_elf64;
5654
5655 static const struct link_map_offsets lmo_32bit_offsets =
5656 {
5657 0, /* r_version offset. */
5658 4, /* r_debug.r_map offset. */
5659 0, /* l_addr offset in link_map. */
5660 4, /* l_name offset in link_map. */
5661 8, /* l_ld offset in link_map. */
5662 12, /* l_next offset in link_map. */
5663 16 /* l_prev offset in link_map. */
5664 };
5665
5666 static const struct link_map_offsets lmo_64bit_offsets =
5667 {
5668 0, /* r_version offset. */
5669 8, /* r_debug.r_map offset. */
5670 0, /* l_addr offset in link_map. */
5671 8, /* l_name offset in link_map. */
5672 16, /* l_ld offset in link_map. */
5673 24, /* l_next offset in link_map. */
5674 32 /* l_prev offset in link_map. */
5675 };
5676 const struct link_map_offsets *lmo;
5677 unsigned int machine;
5678
5679 if (writebuf != NULL)
5680 return -2;
5681 if (readbuf == NULL)
5682 return -1;
5683
5684 pid = lwpid_of (get_thread_lwp (current_inferior));
5685 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5686 is_elf64 = elf_64_file_p (filename, &machine);
5687 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5688
5689 if (priv->r_debug == 0)
5690 priv->r_debug = get_r_debug (pid, is_elf64);
5691
5692 /* We failed to find DT_DEBUG. Such situation will not change for this
5693 inferior - do not retry it. Report it to GDB as E01, see for the reasons
5694 at the GDB solib-svr4.c side. */
5695 if (priv->r_debug == (CORE_ADDR) -1)
5696 return -1;
5697
5698 if (priv->r_debug == 0)
5699 {
5700 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5701 }
5702 else
5703 {
5704 int allocated = 1024;
5705 char *p;
5706 const int ptr_size = is_elf64 ? 8 : 4;
5707 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5708 int r_version, header_done = 0;
5709
5710 document = xmalloc (allocated);
5711 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5712 p = document + strlen (document);
5713
5714 r_version = 0;
5715 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5716 (unsigned char *) &r_version,
5717 sizeof (r_version)) != 0
5718 || r_version != 1)
5719 {
5720 warning ("unexpected r_debug version %d", r_version);
5721 goto done;
5722 }
5723
5724 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5725 &lm_addr, ptr_size) != 0)
5726 {
5727 warning ("unable to read r_map from 0x%lx",
5728 (long) priv->r_debug + lmo->r_map_offset);
5729 goto done;
5730 }
5731
5732 lm_prev = 0;
5733 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5734 &l_name, ptr_size) == 0
5735 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5736 &l_addr, ptr_size) == 0
5737 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5738 &l_ld, ptr_size) == 0
5739 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5740 &l_prev, ptr_size) == 0
5741 && read_one_ptr (lm_addr + lmo->l_next_offset,
5742 &l_next, ptr_size) == 0)
5743 {
5744 unsigned char libname[PATH_MAX];
5745
5746 if (lm_prev != l_prev)
5747 {
5748 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5749 (long) lm_prev, (long) l_prev);
5750 break;
5751 }
5752
5753 /* Not checking for error because reading may stop before
5754 we've got PATH_MAX worth of characters. */
5755 libname[0] = '\0';
5756 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5757 libname[sizeof (libname) - 1] = '\0';
5758 if (libname[0] != '\0')
5759 {
5760 /* 6x the size for xml_escape_text below. */
5761 size_t len = 6 * strlen ((char *) libname);
5762 char *name;
5763
5764 if (!header_done)
5765 {
5766 /* Terminate `<library-list-svr4'. */
5767 *p++ = '>';
5768 header_done = 1;
5769 }
5770
5771 while (allocated < p - document + len + 200)
5772 {
5773 /* Expand to guarantee sufficient storage. */
5774 uintptr_t document_len = p - document;
5775
5776 document = xrealloc (document, 2 * allocated);
5777 allocated *= 2;
5778 p = document + document_len;
5779 }
5780
5781 name = xml_escape_text ((char *) libname);
5782 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5783 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5784 name, (unsigned long) lm_addr,
5785 (unsigned long) l_addr, (unsigned long) l_ld);
5786 free (name);
5787 }
5788 else if (lm_prev == 0)
5789 {
5790 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5791 p = p + strlen (p);
5792 }
5793
5794 if (l_next == 0)
5795 break;
5796
5797 lm_prev = lm_addr;
5798 lm_addr = l_next;
5799 }
5800 done:
5801 if (!header_done)
5802 {
5803 /* Empty list; terminate `<library-list-svr4'. */
5804 strcpy (p, "/>");
5805 }
5806 else
5807 strcpy (p, "</library-list-svr4>");
5808 }
5809
5810 document_len = strlen (document);
5811 if (offset < document_len)
5812 document_len -= offset;
5813 else
5814 document_len = 0;
5815 if (len > document_len)
5816 len = document_len;
5817
5818 memcpy (readbuf, document + offset, len);
5819 xfree (document);
5820
5821 return len;
5822 }
5823
5824 static struct target_ops linux_target_ops = {
5825 linux_create_inferior,
5826 linux_attach,
5827 linux_kill,
5828 linux_detach,
5829 linux_mourn,
5830 linux_join,
5831 linux_thread_alive,
5832 linux_resume,
5833 linux_wait,
5834 linux_fetch_registers,
5835 linux_store_registers,
5836 linux_prepare_to_access_memory,
5837 linux_done_accessing_memory,
5838 linux_read_memory,
5839 linux_write_memory,
5840 linux_look_up_symbols,
5841 linux_request_interrupt,
5842 linux_read_auxv,
5843 linux_insert_point,
5844 linux_remove_point,
5845 linux_stopped_by_watchpoint,
5846 linux_stopped_data_address,
5847 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5848 linux_read_offsets,
5849 #else
5850 NULL,
5851 #endif
5852 #ifdef USE_THREAD_DB
5853 thread_db_get_tls_address,
5854 #else
5855 NULL,
5856 #endif
5857 linux_qxfer_spu,
5858 hostio_last_error_from_errno,
5859 linux_qxfer_osdata,
5860 linux_xfer_siginfo,
5861 linux_supports_non_stop,
5862 linux_async,
5863 linux_start_non_stop,
5864 linux_supports_multi_process,
5865 #ifdef USE_THREAD_DB
5866 thread_db_handle_monitor_command,
5867 #else
5868 NULL,
5869 #endif
5870 linux_common_core_of_thread,
5871 linux_read_loadmap,
5872 linux_process_qsupported,
5873 linux_supports_tracepoints,
5874 linux_read_pc,
5875 linux_write_pc,
5876 linux_thread_stopped,
5877 NULL,
5878 linux_pause_all,
5879 linux_unpause_all,
5880 linux_cancel_breakpoints,
5881 linux_stabilize_threads,
5882 linux_install_fast_tracepoint_jump_pad,
5883 linux_emit_ops,
5884 linux_supports_disable_randomization,
5885 linux_get_min_fast_tracepoint_insn_len,
5886 linux_qxfer_libraries_svr4,
5887 linux_supports_agent,
5888 };
5889
5890 static void
5891 linux_init_signals ()
5892 {
5893 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5894 to find what the cancel signal actually is. */
5895 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5896 signal (__SIGRTMIN+1, SIG_IGN);
5897 #endif
5898 }
5899
5900 void
5901 initialize_low (void)
5902 {
5903 struct sigaction sigchld_action;
5904 memset (&sigchld_action, 0, sizeof (sigchld_action));
5905 set_target_ops (&linux_target_ops);
5906 set_breakpoint_data (the_low_target.breakpoint,
5907 the_low_target.breakpoint_len);
5908 linux_init_signals ();
5909 linux_test_for_tracefork ();
5910 linux_ptrace_init_warnings ();
5911 #ifdef HAVE_LINUX_REGSETS
5912 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5913 ;
5914 disabled_regsets = xmalloc (num_regsets);
5915 #endif
5916
5917 sigchld_action.sa_handler = sigchld_handler;
5918 sigemptyset (&sigchld_action.sa_mask);
5919 sigchld_action.sa_flags = SA_RESTART;
5920 sigaction (SIGCHLD, &sigchld_action, NULL);
5921 }