]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
* linux-low.c: Move definition checks upwards for PT_TEXT_ADDR,
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2013 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22 #include "agent.h"
23
24 #include "gdb_wait.h"
25 #include <stdio.h>
26 #include <sys/param.h>
27 #include <sys/ptrace.h>
28 #include "linux-ptrace.h"
29 #include "linux-procfs.h"
30 #include <signal.h>
31 #include <sys/ioctl.h>
32 #include <fcntl.h>
33 #include <string.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include "gdb_stat.h"
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #ifndef ELFMAG0
47 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51 #include <elf.h>
52 #endif
53
54 #ifndef SPUFS_MAGIC
55 #define SPUFS_MAGIC 0x23c9b64e
56 #endif
57
58 #ifdef HAVE_PERSONALITY
59 # include <sys/personality.h>
60 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
61 # define ADDR_NO_RANDOMIZE 0x0040000
62 # endif
63 #endif
64
65 #ifndef O_LARGEFILE
66 #define O_LARGEFILE 0
67 #endif
68
69 #ifndef W_STOPCODE
70 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71 #endif
72
73 /* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75 #ifndef __SIGRTMIN
76 #define __SIGRTMIN 32
77 #endif
78
79 #ifdef __UCLIBC__
80 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
81 /* PTRACE_TEXT_ADDR and friends. */
82 #include <asm/ptrace.h>
83 #define HAS_NOMMU
84 #endif
85 #endif
86
87 /* Some targets did not define these ptrace constants from the start,
88 so gdbserver defines them locally here. In the future, these may
89 be removed after they are added to asm/ptrace.h. */
90 #if !(defined(PT_TEXT_ADDR) \
91 || defined(PT_DATA_ADDR) \
92 || defined(PT_TEXT_END_ADDR))
93 #if defined(__mcoldfire__)
94 /* These are still undefined in 3.10 kernels. */
95 #define PT_TEXT_ADDR 49*4
96 #define PT_DATA_ADDR 50*4
97 #define PT_TEXT_END_ADDR 51*4
98 /* BFIN already defines these since at least 2.6.32 kernels. */
99 #elif defined(BFIN)
100 #define PT_TEXT_ADDR 220
101 #define PT_TEXT_END_ADDR 224
102 #define PT_DATA_ADDR 228
103 /* These are still undefined in 3.10 kernels. */
104 #elif defined(__TMS320C6X__)
105 #define PT_TEXT_ADDR (0x10000*4)
106 #define PT_DATA_ADDR (0x10004*4)
107 #define PT_TEXT_END_ADDR (0x10008*4)
108 #endif
109 #endif
110
111 #ifdef HAVE_LINUX_BTRACE
112 # include "linux-btrace.h"
113 #endif
114
115 #ifndef HAVE_ELF32_AUXV_T
116 /* Copied from glibc's elf.h. */
117 typedef struct
118 {
119 uint32_t a_type; /* Entry type */
120 union
121 {
122 uint32_t a_val; /* Integer value */
123 /* We use to have pointer elements added here. We cannot do that,
124 though, since it does not work when using 32-bit definitions
125 on 64-bit platforms and vice versa. */
126 } a_un;
127 } Elf32_auxv_t;
128 #endif
129
130 #ifndef HAVE_ELF64_AUXV_T
131 /* Copied from glibc's elf.h. */
132 typedef struct
133 {
134 uint64_t a_type; /* Entry type */
135 union
136 {
137 uint64_t a_val; /* Integer value */
138 /* We use to have pointer elements added here. We cannot do that,
139 though, since it does not work when using 32-bit definitions
140 on 64-bit platforms and vice versa. */
141 } a_un;
142 } Elf64_auxv_t;
143 #endif
144
145 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
146 representation of the thread ID.
147
148 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
149 the same as the LWP ID.
150
151 ``all_processes'' is keyed by the "overall process ID", which
152 GNU/Linux calls tgid, "thread group ID". */
153
154 struct inferior_list all_lwps;
155
156 /* A list of all unknown processes which receive stop signals. Some
157 other process will presumably claim each of these as forked
158 children momentarily. */
159
160 struct simple_pid_list
161 {
162 /* The process ID. */
163 int pid;
164
165 /* The status as reported by waitpid. */
166 int status;
167
168 /* Next in chain. */
169 struct simple_pid_list *next;
170 };
171 struct simple_pid_list *stopped_pids;
172
173 /* Trivial list manipulation functions to keep track of a list of new
174 stopped processes. */
175
176 static void
177 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
178 {
179 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
180
181 new_pid->pid = pid;
182 new_pid->status = status;
183 new_pid->next = *listp;
184 *listp = new_pid;
185 }
186
187 static int
188 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
189 {
190 struct simple_pid_list **p;
191
192 for (p = listp; *p != NULL; p = &(*p)->next)
193 if ((*p)->pid == pid)
194 {
195 struct simple_pid_list *next = (*p)->next;
196
197 *statusp = (*p)->status;
198 xfree (*p);
199 *p = next;
200 return 1;
201 }
202 return 0;
203 }
204
205 enum stopping_threads_kind
206 {
207 /* Not stopping threads presently. */
208 NOT_STOPPING_THREADS,
209
210 /* Stopping threads. */
211 STOPPING_THREADS,
212
213 /* Stopping and suspending threads. */
214 STOPPING_AND_SUSPENDING_THREADS
215 };
216
217 /* This is set while stop_all_lwps is in effect. */
218 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
219
220 /* FIXME make into a target method? */
221 int using_threads = 1;
222
223 /* True if we're presently stabilizing threads (moving them out of
224 jump pads). */
225 static int stabilizing_threads;
226
227 /* This flag is true iff we've just created or attached to our first
228 inferior but it has not stopped yet. As soon as it does, we need
229 to call the low target's arch_setup callback. Doing this only on
230 the first inferior avoids reinializing the architecture on every
231 inferior, and avoids messing with the register caches of the
232 already running inferiors. NOTE: this assumes all inferiors under
233 control of gdbserver have the same architecture. */
234 static int new_inferior;
235
236 static void linux_resume_one_lwp (struct lwp_info *lwp,
237 int step, int signal, siginfo_t *info);
238 static void linux_resume (struct thread_resume *resume_info, size_t n);
239 static void stop_all_lwps (int suspend, struct lwp_info *except);
240 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
241 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
242 static void *add_lwp (ptid_t ptid);
243 static int linux_stopped_by_watchpoint (void);
244 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
245 static void proceed_all_lwps (void);
246 static int finish_step_over (struct lwp_info *lwp);
247 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
248 static int kill_lwp (unsigned long lwpid, int signo);
249 static void linux_enable_event_reporting (int pid);
250
251 /* True if the low target can hardware single-step. Such targets
252 don't need a BREAKPOINT_REINSERT_ADDR callback. */
253
254 static int
255 can_hardware_single_step (void)
256 {
257 return (the_low_target.breakpoint_reinsert_addr == NULL);
258 }
259
260 /* True if the low target supports memory breakpoints. If so, we'll
261 have a GET_PC implementation. */
262
263 static int
264 supports_breakpoints (void)
265 {
266 return (the_low_target.get_pc != NULL);
267 }
268
269 /* Returns true if this target can support fast tracepoints. This
270 does not mean that the in-process agent has been loaded in the
271 inferior. */
272
273 static int
274 supports_fast_tracepoints (void)
275 {
276 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
277 }
278
279 struct pending_signals
280 {
281 int signal;
282 siginfo_t info;
283 struct pending_signals *prev;
284 };
285
286 #ifdef HAVE_LINUX_REGSETS
287 static char *disabled_regsets;
288 static int num_regsets;
289 #endif
290
291 /* The read/write ends of the pipe registered as waitable file in the
292 event loop. */
293 static int linux_event_pipe[2] = { -1, -1 };
294
295 /* True if we're currently in async mode. */
296 #define target_is_async_p() (linux_event_pipe[0] != -1)
297
298 static void send_sigstop (struct lwp_info *lwp);
299 static void wait_for_sigstop (struct inferior_list_entry *entry);
300
301 /* Return non-zero if HEADER is a 64-bit ELF file. */
302
303 static int
304 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
305 {
306 if (header->e_ident[EI_MAG0] == ELFMAG0
307 && header->e_ident[EI_MAG1] == ELFMAG1
308 && header->e_ident[EI_MAG2] == ELFMAG2
309 && header->e_ident[EI_MAG3] == ELFMAG3)
310 {
311 *machine = header->e_machine;
312 return header->e_ident[EI_CLASS] == ELFCLASS64;
313
314 }
315 *machine = EM_NONE;
316 return -1;
317 }
318
319 /* Return non-zero if FILE is a 64-bit ELF file,
320 zero if the file is not a 64-bit ELF file,
321 and -1 if the file is not accessible or doesn't exist. */
322
323 static int
324 elf_64_file_p (const char *file, unsigned int *machine)
325 {
326 Elf64_Ehdr header;
327 int fd;
328
329 fd = open (file, O_RDONLY);
330 if (fd < 0)
331 return -1;
332
333 if (read (fd, &header, sizeof (header)) != sizeof (header))
334 {
335 close (fd);
336 return 0;
337 }
338 close (fd);
339
340 return elf_64_header_p (&header, machine);
341 }
342
343 /* Accepts an integer PID; Returns true if the executable PID is
344 running is a 64-bit ELF file.. */
345
346 int
347 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
348 {
349 char file[MAXPATHLEN];
350
351 sprintf (file, "/proc/%d/exe", pid);
352 return elf_64_file_p (file, machine);
353 }
354
355 static void
356 delete_lwp (struct lwp_info *lwp)
357 {
358 remove_thread (get_lwp_thread (lwp));
359 remove_inferior (&all_lwps, &lwp->head);
360 free (lwp->arch_private);
361 free (lwp);
362 }
363
364 /* Add a process to the common process list, and set its private
365 data. */
366
367 static struct process_info *
368 linux_add_process (int pid, int attached)
369 {
370 struct process_info *proc;
371
372 /* Is this the first process? If so, then set the arch. */
373 if (all_processes.head == NULL)
374 new_inferior = 1;
375
376 proc = add_process (pid, attached);
377 proc->private = xcalloc (1, sizeof (*proc->private));
378
379 if (the_low_target.new_process != NULL)
380 proc->private->arch_private = the_low_target.new_process ();
381
382 return proc;
383 }
384
385 /* Wrapper function for waitpid which handles EINTR, and emulates
386 __WALL for systems where that is not available. */
387
388 static int
389 my_waitpid (int pid, int *status, int flags)
390 {
391 int ret, out_errno;
392
393 if (debug_threads)
394 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
395
396 if (flags & __WALL)
397 {
398 sigset_t block_mask, org_mask, wake_mask;
399 int wnohang;
400
401 wnohang = (flags & WNOHANG) != 0;
402 flags &= ~(__WALL | __WCLONE);
403 flags |= WNOHANG;
404
405 /* Block all signals while here. This avoids knowing about
406 LinuxThread's signals. */
407 sigfillset (&block_mask);
408 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
409
410 /* ... except during the sigsuspend below. */
411 sigemptyset (&wake_mask);
412
413 while (1)
414 {
415 /* Since all signals are blocked, there's no need to check
416 for EINTR here. */
417 ret = waitpid (pid, status, flags);
418 out_errno = errno;
419
420 if (ret == -1 && out_errno != ECHILD)
421 break;
422 else if (ret > 0)
423 break;
424
425 if (flags & __WCLONE)
426 {
427 /* We've tried both flavors now. If WNOHANG is set,
428 there's nothing else to do, just bail out. */
429 if (wnohang)
430 break;
431
432 if (debug_threads)
433 fprintf (stderr, "blocking\n");
434
435 /* Block waiting for signals. */
436 sigsuspend (&wake_mask);
437 }
438
439 flags ^= __WCLONE;
440 }
441
442 sigprocmask (SIG_SETMASK, &org_mask, NULL);
443 }
444 else
445 {
446 do
447 ret = waitpid (pid, status, flags);
448 while (ret == -1 && errno == EINTR);
449 out_errno = errno;
450 }
451
452 if (debug_threads)
453 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
454 pid, flags, status ? *status : -1, ret);
455
456 errno = out_errno;
457 return ret;
458 }
459
460 /* Handle a GNU/Linux extended wait response. If we see a clone
461 event, we need to add the new LWP to our list (and not report the
462 trap to higher layers). */
463
464 static void
465 handle_extended_wait (struct lwp_info *event_child, int wstat)
466 {
467 int event = wstat >> 16;
468 struct lwp_info *new_lwp;
469
470 if (event == PTRACE_EVENT_CLONE)
471 {
472 ptid_t ptid;
473 unsigned long new_pid;
474 int ret, status;
475
476 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), (PTRACE_ARG3_TYPE) 0,
477 &new_pid);
478
479 /* If we haven't already seen the new PID stop, wait for it now. */
480 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
481 {
482 /* The new child has a pending SIGSTOP. We can't affect it until it
483 hits the SIGSTOP, but we're already attached. */
484
485 ret = my_waitpid (new_pid, &status, __WALL);
486
487 if (ret == -1)
488 perror_with_name ("waiting for new child");
489 else if (ret != new_pid)
490 warning ("wait returned unexpected PID %d", ret);
491 else if (!WIFSTOPPED (status))
492 warning ("wait returned unexpected status 0x%x", status);
493 }
494
495 ptid = ptid_build (pid_of (event_child), new_pid, 0);
496 new_lwp = (struct lwp_info *) add_lwp (ptid);
497 add_thread (ptid, new_lwp);
498
499 /* Either we're going to immediately resume the new thread
500 or leave it stopped. linux_resume_one_lwp is a nop if it
501 thinks the thread is currently running, so set this first
502 before calling linux_resume_one_lwp. */
503 new_lwp->stopped = 1;
504
505 /* If we're suspending all threads, leave this one suspended
506 too. */
507 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
508 new_lwp->suspended = 1;
509
510 /* Normally we will get the pending SIGSTOP. But in some cases
511 we might get another signal delivered to the group first.
512 If we do get another signal, be sure not to lose it. */
513 if (WSTOPSIG (status) == SIGSTOP)
514 {
515 if (stopping_threads != NOT_STOPPING_THREADS)
516 new_lwp->stop_pc = get_stop_pc (new_lwp);
517 else
518 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
519 }
520 else
521 {
522 new_lwp->stop_expected = 1;
523
524 if (stopping_threads != NOT_STOPPING_THREADS)
525 {
526 new_lwp->stop_pc = get_stop_pc (new_lwp);
527 new_lwp->status_pending_p = 1;
528 new_lwp->status_pending = status;
529 }
530 else
531 /* Pass the signal on. This is what GDB does - except
532 shouldn't we really report it instead? */
533 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
534 }
535
536 /* Always resume the current thread. If we are stopping
537 threads, it will have a pending SIGSTOP; we may as well
538 collect it now. */
539 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
540 }
541 }
542
543 /* Return the PC as read from the regcache of LWP, without any
544 adjustment. */
545
546 static CORE_ADDR
547 get_pc (struct lwp_info *lwp)
548 {
549 struct thread_info *saved_inferior;
550 struct regcache *regcache;
551 CORE_ADDR pc;
552
553 if (the_low_target.get_pc == NULL)
554 return 0;
555
556 saved_inferior = current_inferior;
557 current_inferior = get_lwp_thread (lwp);
558
559 regcache = get_thread_regcache (current_inferior, 1);
560 pc = (*the_low_target.get_pc) (regcache);
561
562 if (debug_threads)
563 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
564
565 current_inferior = saved_inferior;
566 return pc;
567 }
568
569 /* This function should only be called if LWP got a SIGTRAP.
570 The SIGTRAP could mean several things.
571
572 On i386, where decr_pc_after_break is non-zero:
573 If we were single-stepping this process using PTRACE_SINGLESTEP,
574 we will get only the one SIGTRAP (even if the instruction we
575 stepped over was a breakpoint). The value of $eip will be the
576 next instruction.
577 If we continue the process using PTRACE_CONT, we will get a
578 SIGTRAP when we hit a breakpoint. The value of $eip will be
579 the instruction after the breakpoint (i.e. needs to be
580 decremented). If we report the SIGTRAP to GDB, we must also
581 report the undecremented PC. If we cancel the SIGTRAP, we
582 must resume at the decremented PC.
583
584 (Presumably, not yet tested) On a non-decr_pc_after_break machine
585 with hardware or kernel single-step:
586 If we single-step over a breakpoint instruction, our PC will
587 point at the following instruction. If we continue and hit a
588 breakpoint instruction, our PC will point at the breakpoint
589 instruction. */
590
591 static CORE_ADDR
592 get_stop_pc (struct lwp_info *lwp)
593 {
594 CORE_ADDR stop_pc;
595
596 if (the_low_target.get_pc == NULL)
597 return 0;
598
599 stop_pc = get_pc (lwp);
600
601 if (WSTOPSIG (lwp->last_status) == SIGTRAP
602 && !lwp->stepping
603 && !lwp->stopped_by_watchpoint
604 && lwp->last_status >> 16 == 0)
605 stop_pc -= the_low_target.decr_pc_after_break;
606
607 if (debug_threads)
608 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
609
610 return stop_pc;
611 }
612
613 static void *
614 add_lwp (ptid_t ptid)
615 {
616 struct lwp_info *lwp;
617
618 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
619 memset (lwp, 0, sizeof (*lwp));
620
621 lwp->head.id = ptid;
622
623 if (the_low_target.new_thread != NULL)
624 lwp->arch_private = the_low_target.new_thread ();
625
626 add_inferior_to_list (&all_lwps, &lwp->head);
627
628 return lwp;
629 }
630
631 /* Start an inferior process and returns its pid.
632 ALLARGS is a vector of program-name and args. */
633
634 static int
635 linux_create_inferior (char *program, char **allargs)
636 {
637 #ifdef HAVE_PERSONALITY
638 int personality_orig = 0, personality_set = 0;
639 #endif
640 struct lwp_info *new_lwp;
641 int pid;
642 ptid_t ptid;
643
644 #ifdef HAVE_PERSONALITY
645 if (disable_randomization)
646 {
647 errno = 0;
648 personality_orig = personality (0xffffffff);
649 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
650 {
651 personality_set = 1;
652 personality (personality_orig | ADDR_NO_RANDOMIZE);
653 }
654 if (errno != 0 || (personality_set
655 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
656 warning ("Error disabling address space randomization: %s",
657 strerror (errno));
658 }
659 #endif
660
661 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
662 pid = vfork ();
663 #else
664 pid = fork ();
665 #endif
666 if (pid < 0)
667 perror_with_name ("fork");
668
669 if (pid == 0)
670 {
671 ptrace (PTRACE_TRACEME, 0, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
672
673 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
674 signal (__SIGRTMIN + 1, SIG_DFL);
675 #endif
676
677 setpgid (0, 0);
678
679 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
680 stdout to stderr so that inferior i/o doesn't corrupt the connection.
681 Also, redirect stdin to /dev/null. */
682 if (remote_connection_is_stdio ())
683 {
684 close (0);
685 open ("/dev/null", O_RDONLY);
686 dup2 (2, 1);
687 if (write (2, "stdin/stdout redirected\n",
688 sizeof ("stdin/stdout redirected\n") - 1) < 0)
689 {
690 /* Errors ignored. */;
691 }
692 }
693
694 execv (program, allargs);
695 if (errno == ENOENT)
696 execvp (program, allargs);
697
698 fprintf (stderr, "Cannot exec %s: %s.\n", program,
699 strerror (errno));
700 fflush (stderr);
701 _exit (0177);
702 }
703
704 #ifdef HAVE_PERSONALITY
705 if (personality_set)
706 {
707 errno = 0;
708 personality (personality_orig);
709 if (errno != 0)
710 warning ("Error restoring address space randomization: %s",
711 strerror (errno));
712 }
713 #endif
714
715 linux_add_process (pid, 0);
716
717 ptid = ptid_build (pid, pid, 0);
718 new_lwp = add_lwp (ptid);
719 add_thread (ptid, new_lwp);
720 new_lwp->must_set_ptrace_flags = 1;
721
722 return pid;
723 }
724
725 /* Attach to an inferior process. */
726
727 static void
728 linux_attach_lwp_1 (unsigned long lwpid, int initial)
729 {
730 ptid_t ptid;
731 struct lwp_info *new_lwp;
732
733 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0)
734 != 0)
735 {
736 struct buffer buffer;
737
738 if (!initial)
739 {
740 /* If we fail to attach to an LWP, just warn. */
741 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
742 strerror (errno), errno);
743 fflush (stderr);
744 return;
745 }
746
747 /* If we fail to attach to a process, report an error. */
748 buffer_init (&buffer);
749 linux_ptrace_attach_warnings (lwpid, &buffer);
750 buffer_grow_str0 (&buffer, "");
751 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
752 lwpid, strerror (errno), errno);
753 }
754
755 if (initial)
756 /* If lwp is the tgid, we handle adding existing threads later.
757 Otherwise we just add lwp without bothering about any other
758 threads. */
759 ptid = ptid_build (lwpid, lwpid, 0);
760 else
761 {
762 /* Note that extracting the pid from the current inferior is
763 safe, since we're always called in the context of the same
764 process as this new thread. */
765 int pid = pid_of (get_thread_lwp (current_inferior));
766 ptid = ptid_build (pid, lwpid, 0);
767 }
768
769 new_lwp = (struct lwp_info *) add_lwp (ptid);
770 add_thread (ptid, new_lwp);
771
772 /* We need to wait for SIGSTOP before being able to make the next
773 ptrace call on this LWP. */
774 new_lwp->must_set_ptrace_flags = 1;
775
776 if (linux_proc_pid_is_stopped (lwpid))
777 {
778 if (debug_threads)
779 fprintf (stderr,
780 "Attached to a stopped process\n");
781
782 /* The process is definitely stopped. It is in a job control
783 stop, unless the kernel predates the TASK_STOPPED /
784 TASK_TRACED distinction, in which case it might be in a
785 ptrace stop. Make sure it is in a ptrace stop; from there we
786 can kill it, signal it, et cetera.
787
788 First make sure there is a pending SIGSTOP. Since we are
789 already attached, the process can not transition from stopped
790 to running without a PTRACE_CONT; so we know this signal will
791 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
792 probably already in the queue (unless this kernel is old
793 enough to use TASK_STOPPED for ptrace stops); but since
794 SIGSTOP is not an RT signal, it can only be queued once. */
795 kill_lwp (lwpid, SIGSTOP);
796
797 /* Finally, resume the stopped process. This will deliver the
798 SIGSTOP (or a higher priority signal, just like normal
799 PTRACE_ATTACH), which we'll catch later on. */
800 ptrace (PTRACE_CONT, lwpid, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
801 }
802
803 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
804 brings it to a halt.
805
806 There are several cases to consider here:
807
808 1) gdbserver has already attached to the process and is being notified
809 of a new thread that is being created.
810 In this case we should ignore that SIGSTOP and resume the
811 process. This is handled below by setting stop_expected = 1,
812 and the fact that add_thread sets last_resume_kind ==
813 resume_continue.
814
815 2) This is the first thread (the process thread), and we're attaching
816 to it via attach_inferior.
817 In this case we want the process thread to stop.
818 This is handled by having linux_attach set last_resume_kind ==
819 resume_stop after we return.
820
821 If the pid we are attaching to is also the tgid, we attach to and
822 stop all the existing threads. Otherwise, we attach to pid and
823 ignore any other threads in the same group as this pid.
824
825 3) GDB is connecting to gdbserver and is requesting an enumeration of all
826 existing threads.
827 In this case we want the thread to stop.
828 FIXME: This case is currently not properly handled.
829 We should wait for the SIGSTOP but don't. Things work apparently
830 because enough time passes between when we ptrace (ATTACH) and when
831 gdb makes the next ptrace call on the thread.
832
833 On the other hand, if we are currently trying to stop all threads, we
834 should treat the new thread as if we had sent it a SIGSTOP. This works
835 because we are guaranteed that the add_lwp call above added us to the
836 end of the list, and so the new thread has not yet reached
837 wait_for_sigstop (but will). */
838 new_lwp->stop_expected = 1;
839 }
840
841 void
842 linux_attach_lwp (unsigned long lwpid)
843 {
844 linux_attach_lwp_1 (lwpid, 0);
845 }
846
847 /* Attach to PID. If PID is the tgid, attach to it and all
848 of its threads. */
849
850 static int
851 linux_attach (unsigned long pid)
852 {
853 /* Attach to PID. We will check for other threads
854 soon. */
855 linux_attach_lwp_1 (pid, 1);
856 linux_add_process (pid, 1);
857
858 if (!non_stop)
859 {
860 struct thread_info *thread;
861
862 /* Don't ignore the initial SIGSTOP if we just attached to this
863 process. It will be collected by wait shortly. */
864 thread = find_thread_ptid (ptid_build (pid, pid, 0));
865 thread->last_resume_kind = resume_stop;
866 }
867
868 if (linux_proc_get_tgid (pid) == pid)
869 {
870 DIR *dir;
871 char pathname[128];
872
873 sprintf (pathname, "/proc/%ld/task", pid);
874
875 dir = opendir (pathname);
876
877 if (!dir)
878 {
879 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
880 fflush (stderr);
881 }
882 else
883 {
884 /* At this point we attached to the tgid. Scan the task for
885 existing threads. */
886 unsigned long lwp;
887 int new_threads_found;
888 int iterations = 0;
889 struct dirent *dp;
890
891 while (iterations < 2)
892 {
893 new_threads_found = 0;
894 /* Add all the other threads. While we go through the
895 threads, new threads may be spawned. Cycle through
896 the list of threads until we have done two iterations without
897 finding new threads. */
898 while ((dp = readdir (dir)) != NULL)
899 {
900 /* Fetch one lwp. */
901 lwp = strtoul (dp->d_name, NULL, 10);
902
903 /* Is this a new thread? */
904 if (lwp
905 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
906 {
907 linux_attach_lwp_1 (lwp, 0);
908 new_threads_found++;
909
910 if (debug_threads)
911 fprintf (stderr, "\
912 Found and attached to new lwp %ld\n", lwp);
913 }
914 }
915
916 if (!new_threads_found)
917 iterations++;
918 else
919 iterations = 0;
920
921 rewinddir (dir);
922 }
923 closedir (dir);
924 }
925 }
926
927 return 0;
928 }
929
930 struct counter
931 {
932 int pid;
933 int count;
934 };
935
936 static int
937 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
938 {
939 struct counter *counter = args;
940
941 if (ptid_get_pid (entry->id) == counter->pid)
942 {
943 if (++counter->count > 1)
944 return 1;
945 }
946
947 return 0;
948 }
949
950 static int
951 last_thread_of_process_p (struct thread_info *thread)
952 {
953 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
954 int pid = ptid_get_pid (ptid);
955 struct counter counter = { pid , 0 };
956
957 return (find_inferior (&all_threads,
958 second_thread_of_pid_p, &counter) == NULL);
959 }
960
961 /* Kill LWP. */
962
963 static void
964 linux_kill_one_lwp (struct lwp_info *lwp)
965 {
966 int pid = lwpid_of (lwp);
967
968 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
969 there is no signal context, and ptrace(PTRACE_KILL) (or
970 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
971 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
972 alternative is to kill with SIGKILL. We only need one SIGKILL
973 per process, not one for each thread. But since we still support
974 linuxthreads, and we also support debugging programs using raw
975 clone without CLONE_THREAD, we send one for each thread. For
976 years, we used PTRACE_KILL only, so we're being a bit paranoid
977 about some old kernels where PTRACE_KILL might work better
978 (dubious if there are any such, but that's why it's paranoia), so
979 we try SIGKILL first, PTRACE_KILL second, and so we're fine
980 everywhere. */
981
982 errno = 0;
983 kill (pid, SIGKILL);
984 if (debug_threads)
985 fprintf (stderr,
986 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
987 target_pid_to_str (ptid_of (lwp)),
988 errno ? strerror (errno) : "OK");
989
990 errno = 0;
991 ptrace (PTRACE_KILL, pid, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
992 if (debug_threads)
993 fprintf (stderr,
994 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
995 target_pid_to_str (ptid_of (lwp)),
996 errno ? strerror (errno) : "OK");
997 }
998
999 /* Callback for `find_inferior'. Kills an lwp of a given process,
1000 except the leader. */
1001
1002 static int
1003 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1004 {
1005 struct thread_info *thread = (struct thread_info *) entry;
1006 struct lwp_info *lwp = get_thread_lwp (thread);
1007 int wstat;
1008 int pid = * (int *) args;
1009
1010 if (ptid_get_pid (entry->id) != pid)
1011 return 0;
1012
1013 /* We avoid killing the first thread here, because of a Linux kernel (at
1014 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1015 the children get a chance to be reaped, it will remain a zombie
1016 forever. */
1017
1018 if (lwpid_of (lwp) == pid)
1019 {
1020 if (debug_threads)
1021 fprintf (stderr, "lkop: is last of process %s\n",
1022 target_pid_to_str (entry->id));
1023 return 0;
1024 }
1025
1026 do
1027 {
1028 linux_kill_one_lwp (lwp);
1029
1030 /* Make sure it died. The loop is most likely unnecessary. */
1031 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1032 } while (pid > 0 && WIFSTOPPED (wstat));
1033
1034 return 0;
1035 }
1036
1037 static int
1038 linux_kill (int pid)
1039 {
1040 struct process_info *process;
1041 struct lwp_info *lwp;
1042 int wstat;
1043 int lwpid;
1044
1045 process = find_process_pid (pid);
1046 if (process == NULL)
1047 return -1;
1048
1049 /* If we're killing a running inferior, make sure it is stopped
1050 first, as PTRACE_KILL will not work otherwise. */
1051 stop_all_lwps (0, NULL);
1052
1053 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1054
1055 /* See the comment in linux_kill_one_lwp. We did not kill the first
1056 thread in the list, so do so now. */
1057 lwp = find_lwp_pid (pid_to_ptid (pid));
1058
1059 if (lwp == NULL)
1060 {
1061 if (debug_threads)
1062 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
1063 lwpid_of (lwp), pid);
1064 }
1065 else
1066 {
1067 if (debug_threads)
1068 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
1069 lwpid_of (lwp), pid);
1070
1071 do
1072 {
1073 linux_kill_one_lwp (lwp);
1074
1075 /* Make sure it died. The loop is most likely unnecessary. */
1076 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1077 } while (lwpid > 0 && WIFSTOPPED (wstat));
1078 }
1079
1080 the_target->mourn (process);
1081
1082 /* Since we presently can only stop all lwps of all processes, we
1083 need to unstop lwps of other processes. */
1084 unstop_all_lwps (0, NULL);
1085 return 0;
1086 }
1087
1088 /* Get pending signal of THREAD, for detaching purposes. This is the
1089 signal the thread last stopped for, which we need to deliver to the
1090 thread when detaching, otherwise, it'd be suppressed/lost. */
1091
1092 static int
1093 get_detach_signal (struct thread_info *thread)
1094 {
1095 enum gdb_signal signo = GDB_SIGNAL_0;
1096 int status;
1097 struct lwp_info *lp = get_thread_lwp (thread);
1098
1099 if (lp->status_pending_p)
1100 status = lp->status_pending;
1101 else
1102 {
1103 /* If the thread had been suspended by gdbserver, and it stopped
1104 cleanly, then it'll have stopped with SIGSTOP. But we don't
1105 want to deliver that SIGSTOP. */
1106 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1107 || thread->last_status.value.sig == GDB_SIGNAL_0)
1108 return 0;
1109
1110 /* Otherwise, we may need to deliver the signal we
1111 intercepted. */
1112 status = lp->last_status;
1113 }
1114
1115 if (!WIFSTOPPED (status))
1116 {
1117 if (debug_threads)
1118 fprintf (stderr,
1119 "GPS: lwp %s hasn't stopped: no pending signal\n",
1120 target_pid_to_str (ptid_of (lp)));
1121 return 0;
1122 }
1123
1124 /* Extended wait statuses aren't real SIGTRAPs. */
1125 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1126 {
1127 if (debug_threads)
1128 fprintf (stderr,
1129 "GPS: lwp %s had stopped with extended "
1130 "status: no pending signal\n",
1131 target_pid_to_str (ptid_of (lp)));
1132 return 0;
1133 }
1134
1135 signo = gdb_signal_from_host (WSTOPSIG (status));
1136
1137 if (program_signals_p && !program_signals[signo])
1138 {
1139 if (debug_threads)
1140 fprintf (stderr,
1141 "GPS: lwp %s had signal %s, but it is in nopass state\n",
1142 target_pid_to_str (ptid_of (lp)),
1143 gdb_signal_to_string (signo));
1144 return 0;
1145 }
1146 else if (!program_signals_p
1147 /* If we have no way to know which signals GDB does not
1148 want to have passed to the program, assume
1149 SIGTRAP/SIGINT, which is GDB's default. */
1150 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1151 {
1152 if (debug_threads)
1153 fprintf (stderr,
1154 "GPS: lwp %s had signal %s, "
1155 "but we don't know if we should pass it. Default to not.\n",
1156 target_pid_to_str (ptid_of (lp)),
1157 gdb_signal_to_string (signo));
1158 return 0;
1159 }
1160 else
1161 {
1162 if (debug_threads)
1163 fprintf (stderr,
1164 "GPS: lwp %s has pending signal %s: delivering it.\n",
1165 target_pid_to_str (ptid_of (lp)),
1166 gdb_signal_to_string (signo));
1167
1168 return WSTOPSIG (status);
1169 }
1170 }
1171
1172 static int
1173 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1174 {
1175 struct thread_info *thread = (struct thread_info *) entry;
1176 struct lwp_info *lwp = get_thread_lwp (thread);
1177 int pid = * (int *) args;
1178 int sig;
1179
1180 if (ptid_get_pid (entry->id) != pid)
1181 return 0;
1182
1183 /* If there is a pending SIGSTOP, get rid of it. */
1184 if (lwp->stop_expected)
1185 {
1186 if (debug_threads)
1187 fprintf (stderr,
1188 "Sending SIGCONT to %s\n",
1189 target_pid_to_str (ptid_of (lwp)));
1190
1191 kill_lwp (lwpid_of (lwp), SIGCONT);
1192 lwp->stop_expected = 0;
1193 }
1194
1195 /* Flush any pending changes to the process's registers. */
1196 regcache_invalidate_one ((struct inferior_list_entry *)
1197 get_lwp_thread (lwp));
1198
1199 /* Pass on any pending signal for this thread. */
1200 sig = get_detach_signal (thread);
1201
1202 /* Finally, let it resume. */
1203 if (the_low_target.prepare_to_resume != NULL)
1204 the_low_target.prepare_to_resume (lwp);
1205 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
1206 (PTRACE_ARG4_TYPE) (long) sig) < 0)
1207 error (_("Can't detach %s: %s"),
1208 target_pid_to_str (ptid_of (lwp)),
1209 strerror (errno));
1210
1211 delete_lwp (lwp);
1212 return 0;
1213 }
1214
1215 static int
1216 linux_detach (int pid)
1217 {
1218 struct process_info *process;
1219
1220 process = find_process_pid (pid);
1221 if (process == NULL)
1222 return -1;
1223
1224 /* Stop all threads before detaching. First, ptrace requires that
1225 the thread is stopped to sucessfully detach. Second, thread_db
1226 may need to uninstall thread event breakpoints from memory, which
1227 only works with a stopped process anyway. */
1228 stop_all_lwps (0, NULL);
1229
1230 #ifdef USE_THREAD_DB
1231 thread_db_detach (process);
1232 #endif
1233
1234 /* Stabilize threads (move out of jump pads). */
1235 stabilize_threads ();
1236
1237 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1238
1239 the_target->mourn (process);
1240
1241 /* Since we presently can only stop all lwps of all processes, we
1242 need to unstop lwps of other processes. */
1243 unstop_all_lwps (0, NULL);
1244 return 0;
1245 }
1246
1247 /* Remove all LWPs that belong to process PROC from the lwp list. */
1248
1249 static int
1250 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1251 {
1252 struct lwp_info *lwp = (struct lwp_info *) entry;
1253 struct process_info *process = proc;
1254
1255 if (pid_of (lwp) == pid_of (process))
1256 delete_lwp (lwp);
1257
1258 return 0;
1259 }
1260
1261 static void
1262 linux_mourn (struct process_info *process)
1263 {
1264 struct process_info_private *priv;
1265
1266 #ifdef USE_THREAD_DB
1267 thread_db_mourn (process);
1268 #endif
1269
1270 find_inferior (&all_lwps, delete_lwp_callback, process);
1271
1272 /* Freeing all private data. */
1273 priv = process->private;
1274 free (priv->arch_private);
1275 free (priv);
1276 process->private = NULL;
1277
1278 remove_process (process);
1279 }
1280
1281 static void
1282 linux_join (int pid)
1283 {
1284 int status, ret;
1285
1286 do {
1287 ret = my_waitpid (pid, &status, 0);
1288 if (WIFEXITED (status) || WIFSIGNALED (status))
1289 break;
1290 } while (ret != -1 || errno != ECHILD);
1291 }
1292
1293 /* Return nonzero if the given thread is still alive. */
1294 static int
1295 linux_thread_alive (ptid_t ptid)
1296 {
1297 struct lwp_info *lwp = find_lwp_pid (ptid);
1298
1299 /* We assume we always know if a thread exits. If a whole process
1300 exited but we still haven't been able to report it to GDB, we'll
1301 hold on to the last lwp of the dead process. */
1302 if (lwp != NULL)
1303 return !lwp->dead;
1304 else
1305 return 0;
1306 }
1307
1308 /* Return 1 if this lwp has an interesting status pending. */
1309 static int
1310 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1311 {
1312 struct lwp_info *lwp = (struct lwp_info *) entry;
1313 ptid_t ptid = * (ptid_t *) arg;
1314 struct thread_info *thread;
1315
1316 /* Check if we're only interested in events from a specific process
1317 or its lwps. */
1318 if (!ptid_equal (minus_one_ptid, ptid)
1319 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1320 return 0;
1321
1322 thread = get_lwp_thread (lwp);
1323
1324 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1325 report any status pending the LWP may have. */
1326 if (thread->last_resume_kind == resume_stop
1327 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1328 return 0;
1329
1330 return lwp->status_pending_p;
1331 }
1332
1333 static int
1334 same_lwp (struct inferior_list_entry *entry, void *data)
1335 {
1336 ptid_t ptid = *(ptid_t *) data;
1337 int lwp;
1338
1339 if (ptid_get_lwp (ptid) != 0)
1340 lwp = ptid_get_lwp (ptid);
1341 else
1342 lwp = ptid_get_pid (ptid);
1343
1344 if (ptid_get_lwp (entry->id) == lwp)
1345 return 1;
1346
1347 return 0;
1348 }
1349
1350 struct lwp_info *
1351 find_lwp_pid (ptid_t ptid)
1352 {
1353 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1354 }
1355
1356 static struct lwp_info *
1357 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1358 {
1359 int ret;
1360 int to_wait_for = -1;
1361 struct lwp_info *child = NULL;
1362
1363 if (debug_threads)
1364 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1365
1366 if (ptid_equal (ptid, minus_one_ptid))
1367 to_wait_for = -1; /* any child */
1368 else
1369 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1370
1371 options |= __WALL;
1372
1373 retry:
1374
1375 ret = my_waitpid (to_wait_for, wstatp, options);
1376 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1377 return NULL;
1378 else if (ret == -1)
1379 perror_with_name ("waitpid");
1380
1381 if (debug_threads
1382 && (!WIFSTOPPED (*wstatp)
1383 || (WSTOPSIG (*wstatp) != 32
1384 && WSTOPSIG (*wstatp) != 33)))
1385 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1386
1387 child = find_lwp_pid (pid_to_ptid (ret));
1388
1389 /* If we didn't find a process, one of two things presumably happened:
1390 - A process we started and then detached from has exited. Ignore it.
1391 - A process we are controlling has forked and the new child's stop
1392 was reported to us by the kernel. Save its PID. */
1393 if (child == NULL && WIFSTOPPED (*wstatp))
1394 {
1395 add_to_pid_list (&stopped_pids, ret, *wstatp);
1396 goto retry;
1397 }
1398 else if (child == NULL)
1399 goto retry;
1400
1401 child->stopped = 1;
1402
1403 child->last_status = *wstatp;
1404
1405 /* Architecture-specific setup after inferior is running.
1406 This needs to happen after we have attached to the inferior
1407 and it is stopped for the first time, but before we access
1408 any inferior registers. */
1409 if (new_inferior)
1410 {
1411 the_low_target.arch_setup ();
1412 #ifdef HAVE_LINUX_REGSETS
1413 memset (disabled_regsets, 0, num_regsets);
1414 #endif
1415 new_inferior = 0;
1416 }
1417
1418 /* Fetch the possibly triggered data watchpoint info and store it in
1419 CHILD.
1420
1421 On some archs, like x86, that use debug registers to set
1422 watchpoints, it's possible that the way to know which watched
1423 address trapped, is to check the register that is used to select
1424 which address to watch. Problem is, between setting the
1425 watchpoint and reading back which data address trapped, the user
1426 may change the set of watchpoints, and, as a consequence, GDB
1427 changes the debug registers in the inferior. To avoid reading
1428 back a stale stopped-data-address when that happens, we cache in
1429 LP the fact that a watchpoint trapped, and the corresponding data
1430 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1431 changes the debug registers meanwhile, we have the cached data we
1432 can rely on. */
1433
1434 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1435 {
1436 if (the_low_target.stopped_by_watchpoint == NULL)
1437 {
1438 child->stopped_by_watchpoint = 0;
1439 }
1440 else
1441 {
1442 struct thread_info *saved_inferior;
1443
1444 saved_inferior = current_inferior;
1445 current_inferior = get_lwp_thread (child);
1446
1447 child->stopped_by_watchpoint
1448 = the_low_target.stopped_by_watchpoint ();
1449
1450 if (child->stopped_by_watchpoint)
1451 {
1452 if (the_low_target.stopped_data_address != NULL)
1453 child->stopped_data_address
1454 = the_low_target.stopped_data_address ();
1455 else
1456 child->stopped_data_address = 0;
1457 }
1458
1459 current_inferior = saved_inferior;
1460 }
1461 }
1462
1463 /* Store the STOP_PC, with adjustment applied. This depends on the
1464 architecture being defined already (so that CHILD has a valid
1465 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1466 not). */
1467 if (WIFSTOPPED (*wstatp))
1468 child->stop_pc = get_stop_pc (child);
1469
1470 if (debug_threads
1471 && WIFSTOPPED (*wstatp)
1472 && the_low_target.get_pc != NULL)
1473 {
1474 struct thread_info *saved_inferior = current_inferior;
1475 struct regcache *regcache;
1476 CORE_ADDR pc;
1477
1478 current_inferior = get_lwp_thread (child);
1479 regcache = get_thread_regcache (current_inferior, 1);
1480 pc = (*the_low_target.get_pc) (regcache);
1481 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1482 current_inferior = saved_inferior;
1483 }
1484
1485 return child;
1486 }
1487
1488 /* This function should only be called if the LWP got a SIGTRAP.
1489
1490 Handle any tracepoint steps or hits. Return true if a tracepoint
1491 event was handled, 0 otherwise. */
1492
1493 static int
1494 handle_tracepoints (struct lwp_info *lwp)
1495 {
1496 struct thread_info *tinfo = get_lwp_thread (lwp);
1497 int tpoint_related_event = 0;
1498
1499 /* If this tracepoint hit causes a tracing stop, we'll immediately
1500 uninsert tracepoints. To do this, we temporarily pause all
1501 threads, unpatch away, and then unpause threads. We need to make
1502 sure the unpausing doesn't resume LWP too. */
1503 lwp->suspended++;
1504
1505 /* And we need to be sure that any all-threads-stopping doesn't try
1506 to move threads out of the jump pads, as it could deadlock the
1507 inferior (LWP could be in the jump pad, maybe even holding the
1508 lock.) */
1509
1510 /* Do any necessary step collect actions. */
1511 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1512
1513 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1514
1515 /* See if we just hit a tracepoint and do its main collect
1516 actions. */
1517 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1518
1519 lwp->suspended--;
1520
1521 gdb_assert (lwp->suspended == 0);
1522 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1523
1524 if (tpoint_related_event)
1525 {
1526 if (debug_threads)
1527 fprintf (stderr, "got a tracepoint event\n");
1528 return 1;
1529 }
1530
1531 return 0;
1532 }
1533
1534 /* Convenience wrapper. Returns true if LWP is presently collecting a
1535 fast tracepoint. */
1536
1537 static int
1538 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1539 struct fast_tpoint_collect_status *status)
1540 {
1541 CORE_ADDR thread_area;
1542
1543 if (the_low_target.get_thread_area == NULL)
1544 return 0;
1545
1546 /* Get the thread area address. This is used to recognize which
1547 thread is which when tracing with the in-process agent library.
1548 We don't read anything from the address, and treat it as opaque;
1549 it's the address itself that we assume is unique per-thread. */
1550 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1551 return 0;
1552
1553 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1554 }
1555
1556 /* The reason we resume in the caller, is because we want to be able
1557 to pass lwp->status_pending as WSTAT, and we need to clear
1558 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1559 refuses to resume. */
1560
1561 static int
1562 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1563 {
1564 struct thread_info *saved_inferior;
1565
1566 saved_inferior = current_inferior;
1567 current_inferior = get_lwp_thread (lwp);
1568
1569 if ((wstat == NULL
1570 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1571 && supports_fast_tracepoints ()
1572 && agent_loaded_p ())
1573 {
1574 struct fast_tpoint_collect_status status;
1575 int r;
1576
1577 if (debug_threads)
1578 fprintf (stderr, "\
1579 Checking whether LWP %ld needs to move out of the jump pad.\n",
1580 lwpid_of (lwp));
1581
1582 r = linux_fast_tracepoint_collecting (lwp, &status);
1583
1584 if (wstat == NULL
1585 || (WSTOPSIG (*wstat) != SIGILL
1586 && WSTOPSIG (*wstat) != SIGFPE
1587 && WSTOPSIG (*wstat) != SIGSEGV
1588 && WSTOPSIG (*wstat) != SIGBUS))
1589 {
1590 lwp->collecting_fast_tracepoint = r;
1591
1592 if (r != 0)
1593 {
1594 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1595 {
1596 /* Haven't executed the original instruction yet.
1597 Set breakpoint there, and wait till it's hit,
1598 then single-step until exiting the jump pad. */
1599 lwp->exit_jump_pad_bkpt
1600 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1601 }
1602
1603 if (debug_threads)
1604 fprintf (stderr, "\
1605 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1606 lwpid_of (lwp));
1607 current_inferior = saved_inferior;
1608
1609 return 1;
1610 }
1611 }
1612 else
1613 {
1614 /* If we get a synchronous signal while collecting, *and*
1615 while executing the (relocated) original instruction,
1616 reset the PC to point at the tpoint address, before
1617 reporting to GDB. Otherwise, it's an IPA lib bug: just
1618 report the signal to GDB, and pray for the best. */
1619
1620 lwp->collecting_fast_tracepoint = 0;
1621
1622 if (r != 0
1623 && (status.adjusted_insn_addr <= lwp->stop_pc
1624 && lwp->stop_pc < status.adjusted_insn_addr_end))
1625 {
1626 siginfo_t info;
1627 struct regcache *regcache;
1628
1629 /* The si_addr on a few signals references the address
1630 of the faulting instruction. Adjust that as
1631 well. */
1632 if ((WSTOPSIG (*wstat) == SIGILL
1633 || WSTOPSIG (*wstat) == SIGFPE
1634 || WSTOPSIG (*wstat) == SIGBUS
1635 || WSTOPSIG (*wstat) == SIGSEGV)
1636 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp),
1637 (PTRACE_ARG3_TYPE) 0, &info) == 0
1638 /* Final check just to make sure we don't clobber
1639 the siginfo of non-kernel-sent signals. */
1640 && (uintptr_t) info.si_addr == lwp->stop_pc)
1641 {
1642 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1643 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp),
1644 (PTRACE_ARG3_TYPE) 0, &info);
1645 }
1646
1647 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1648 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1649 lwp->stop_pc = status.tpoint_addr;
1650
1651 /* Cancel any fast tracepoint lock this thread was
1652 holding. */
1653 force_unlock_trace_buffer ();
1654 }
1655
1656 if (lwp->exit_jump_pad_bkpt != NULL)
1657 {
1658 if (debug_threads)
1659 fprintf (stderr,
1660 "Cancelling fast exit-jump-pad: removing bkpt. "
1661 "stopping all threads momentarily.\n");
1662
1663 stop_all_lwps (1, lwp);
1664 cancel_breakpoints ();
1665
1666 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1667 lwp->exit_jump_pad_bkpt = NULL;
1668
1669 unstop_all_lwps (1, lwp);
1670
1671 gdb_assert (lwp->suspended >= 0);
1672 }
1673 }
1674 }
1675
1676 if (debug_threads)
1677 fprintf (stderr, "\
1678 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1679 lwpid_of (lwp));
1680
1681 current_inferior = saved_inferior;
1682 return 0;
1683 }
1684
1685 /* Enqueue one signal in the "signals to report later when out of the
1686 jump pad" list. */
1687
1688 static void
1689 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1690 {
1691 struct pending_signals *p_sig;
1692
1693 if (debug_threads)
1694 fprintf (stderr, "\
1695 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1696
1697 if (debug_threads)
1698 {
1699 struct pending_signals *sig;
1700
1701 for (sig = lwp->pending_signals_to_report;
1702 sig != NULL;
1703 sig = sig->prev)
1704 fprintf (stderr,
1705 " Already queued %d\n",
1706 sig->signal);
1707
1708 fprintf (stderr, " (no more currently queued signals)\n");
1709 }
1710
1711 /* Don't enqueue non-RT signals if they are already in the deferred
1712 queue. (SIGSTOP being the easiest signal to see ending up here
1713 twice) */
1714 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1715 {
1716 struct pending_signals *sig;
1717
1718 for (sig = lwp->pending_signals_to_report;
1719 sig != NULL;
1720 sig = sig->prev)
1721 {
1722 if (sig->signal == WSTOPSIG (*wstat))
1723 {
1724 if (debug_threads)
1725 fprintf (stderr,
1726 "Not requeuing already queued non-RT signal %d"
1727 " for LWP %ld\n",
1728 sig->signal,
1729 lwpid_of (lwp));
1730 return;
1731 }
1732 }
1733 }
1734
1735 p_sig = xmalloc (sizeof (*p_sig));
1736 p_sig->prev = lwp->pending_signals_to_report;
1737 p_sig->signal = WSTOPSIG (*wstat);
1738 memset (&p_sig->info, 0, sizeof (siginfo_t));
1739 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
1740 &p_sig->info);
1741
1742 lwp->pending_signals_to_report = p_sig;
1743 }
1744
1745 /* Dequeue one signal from the "signals to report later when out of
1746 the jump pad" list. */
1747
1748 static int
1749 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1750 {
1751 if (lwp->pending_signals_to_report != NULL)
1752 {
1753 struct pending_signals **p_sig;
1754
1755 p_sig = &lwp->pending_signals_to_report;
1756 while ((*p_sig)->prev != NULL)
1757 p_sig = &(*p_sig)->prev;
1758
1759 *wstat = W_STOPCODE ((*p_sig)->signal);
1760 if ((*p_sig)->info.si_signo != 0)
1761 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
1762 &(*p_sig)->info);
1763 free (*p_sig);
1764 *p_sig = NULL;
1765
1766 if (debug_threads)
1767 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1768 WSTOPSIG (*wstat), lwpid_of (lwp));
1769
1770 if (debug_threads)
1771 {
1772 struct pending_signals *sig;
1773
1774 for (sig = lwp->pending_signals_to_report;
1775 sig != NULL;
1776 sig = sig->prev)
1777 fprintf (stderr,
1778 " Still queued %d\n",
1779 sig->signal);
1780
1781 fprintf (stderr, " (no more queued signals)\n");
1782 }
1783
1784 return 1;
1785 }
1786
1787 return 0;
1788 }
1789
1790 /* Arrange for a breakpoint to be hit again later. We don't keep the
1791 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1792 will handle the current event, eventually we will resume this LWP,
1793 and this breakpoint will trap again. */
1794
1795 static int
1796 cancel_breakpoint (struct lwp_info *lwp)
1797 {
1798 struct thread_info *saved_inferior;
1799
1800 /* There's nothing to do if we don't support breakpoints. */
1801 if (!supports_breakpoints ())
1802 return 0;
1803
1804 /* breakpoint_at reads from current inferior. */
1805 saved_inferior = current_inferior;
1806 current_inferior = get_lwp_thread (lwp);
1807
1808 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1809 {
1810 if (debug_threads)
1811 fprintf (stderr,
1812 "CB: Push back breakpoint for %s\n",
1813 target_pid_to_str (ptid_of (lwp)));
1814
1815 /* Back up the PC if necessary. */
1816 if (the_low_target.decr_pc_after_break)
1817 {
1818 struct regcache *regcache
1819 = get_thread_regcache (current_inferior, 1);
1820 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1821 }
1822
1823 current_inferior = saved_inferior;
1824 return 1;
1825 }
1826 else
1827 {
1828 if (debug_threads)
1829 fprintf (stderr,
1830 "CB: No breakpoint found at %s for [%s]\n",
1831 paddress (lwp->stop_pc),
1832 target_pid_to_str (ptid_of (lwp)));
1833 }
1834
1835 current_inferior = saved_inferior;
1836 return 0;
1837 }
1838
1839 /* When the event-loop is doing a step-over, this points at the thread
1840 being stepped. */
1841 ptid_t step_over_bkpt;
1842
1843 /* Wait for an event from child PID. If PID is -1, wait for any
1844 child. Store the stop status through the status pointer WSTAT.
1845 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1846 event was found and OPTIONS contains WNOHANG. Return the PID of
1847 the stopped child otherwise. */
1848
1849 static int
1850 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1851 {
1852 struct lwp_info *event_child, *requested_child;
1853 ptid_t wait_ptid;
1854
1855 event_child = NULL;
1856 requested_child = NULL;
1857
1858 /* Check for a lwp with a pending status. */
1859
1860 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1861 {
1862 event_child = (struct lwp_info *)
1863 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1864 if (debug_threads && event_child)
1865 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1866 }
1867 else
1868 {
1869 requested_child = find_lwp_pid (ptid);
1870
1871 if (stopping_threads == NOT_STOPPING_THREADS
1872 && requested_child->status_pending_p
1873 && requested_child->collecting_fast_tracepoint)
1874 {
1875 enqueue_one_deferred_signal (requested_child,
1876 &requested_child->status_pending);
1877 requested_child->status_pending_p = 0;
1878 requested_child->status_pending = 0;
1879 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1880 }
1881
1882 if (requested_child->suspended
1883 && requested_child->status_pending_p)
1884 fatal ("requesting an event out of a suspended child?");
1885
1886 if (requested_child->status_pending_p)
1887 event_child = requested_child;
1888 }
1889
1890 if (event_child != NULL)
1891 {
1892 if (debug_threads)
1893 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1894 lwpid_of (event_child), event_child->status_pending);
1895 *wstat = event_child->status_pending;
1896 event_child->status_pending_p = 0;
1897 event_child->status_pending = 0;
1898 current_inferior = get_lwp_thread (event_child);
1899 return lwpid_of (event_child);
1900 }
1901
1902 if (ptid_is_pid (ptid))
1903 {
1904 /* A request to wait for a specific tgid. This is not possible
1905 with waitpid, so instead, we wait for any child, and leave
1906 children we're not interested in right now with a pending
1907 status to report later. */
1908 wait_ptid = minus_one_ptid;
1909 }
1910 else
1911 wait_ptid = ptid;
1912
1913 /* We only enter this loop if no process has a pending wait status. Thus
1914 any action taken in response to a wait status inside this loop is
1915 responding as soon as we detect the status, not after any pending
1916 events. */
1917 while (1)
1918 {
1919 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1920
1921 if ((options & WNOHANG) && event_child == NULL)
1922 {
1923 if (debug_threads)
1924 fprintf (stderr, "WNOHANG set, no event found\n");
1925 return 0;
1926 }
1927
1928 if (event_child == NULL)
1929 error ("event from unknown child");
1930
1931 if (ptid_is_pid (ptid)
1932 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1933 {
1934 if (! WIFSTOPPED (*wstat))
1935 mark_lwp_dead (event_child, *wstat);
1936 else
1937 {
1938 event_child->status_pending_p = 1;
1939 event_child->status_pending = *wstat;
1940 }
1941 continue;
1942 }
1943
1944 current_inferior = get_lwp_thread (event_child);
1945
1946 /* Check for thread exit. */
1947 if (! WIFSTOPPED (*wstat))
1948 {
1949 if (debug_threads)
1950 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1951
1952 /* If the last thread is exiting, just return. */
1953 if (last_thread_of_process_p (current_inferior))
1954 {
1955 if (debug_threads)
1956 fprintf (stderr, "LWP %ld is last lwp of process\n",
1957 lwpid_of (event_child));
1958 return lwpid_of (event_child);
1959 }
1960
1961 if (!non_stop)
1962 {
1963 current_inferior = (struct thread_info *) all_threads.head;
1964 if (debug_threads)
1965 fprintf (stderr, "Current inferior is now %ld\n",
1966 lwpid_of (get_thread_lwp (current_inferior)));
1967 }
1968 else
1969 {
1970 current_inferior = NULL;
1971 if (debug_threads)
1972 fprintf (stderr, "Current inferior is now <NULL>\n");
1973 }
1974
1975 /* If we were waiting for this particular child to do something...
1976 well, it did something. */
1977 if (requested_child != NULL)
1978 {
1979 int lwpid = lwpid_of (event_child);
1980
1981 /* Cancel the step-over operation --- the thread that
1982 started it is gone. */
1983 if (finish_step_over (event_child))
1984 unstop_all_lwps (1, event_child);
1985 delete_lwp (event_child);
1986 return lwpid;
1987 }
1988
1989 delete_lwp (event_child);
1990
1991 /* Wait for a more interesting event. */
1992 continue;
1993 }
1994
1995 if (event_child->must_set_ptrace_flags)
1996 {
1997 linux_enable_event_reporting (lwpid_of (event_child));
1998 event_child->must_set_ptrace_flags = 0;
1999 }
2000
2001 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
2002 && *wstat >> 16 != 0)
2003 {
2004 handle_extended_wait (event_child, *wstat);
2005 continue;
2006 }
2007
2008 if (WIFSTOPPED (*wstat)
2009 && WSTOPSIG (*wstat) == SIGSTOP
2010 && event_child->stop_expected)
2011 {
2012 int should_stop;
2013
2014 if (debug_threads)
2015 fprintf (stderr, "Expected stop.\n");
2016 event_child->stop_expected = 0;
2017
2018 should_stop = (current_inferior->last_resume_kind == resume_stop
2019 || stopping_threads != NOT_STOPPING_THREADS);
2020
2021 if (!should_stop)
2022 {
2023 linux_resume_one_lwp (event_child,
2024 event_child->stepping, 0, NULL);
2025 continue;
2026 }
2027 }
2028
2029 return lwpid_of (event_child);
2030 }
2031
2032 /* NOTREACHED */
2033 return 0;
2034 }
2035
2036 /* Count the LWP's that have had events. */
2037
2038 static int
2039 count_events_callback (struct inferior_list_entry *entry, void *data)
2040 {
2041 struct lwp_info *lp = (struct lwp_info *) entry;
2042 struct thread_info *thread = get_lwp_thread (lp);
2043 int *count = data;
2044
2045 gdb_assert (count != NULL);
2046
2047 /* Count only resumed LWPs that have a SIGTRAP event pending that
2048 should be reported to GDB. */
2049 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2050 && thread->last_resume_kind != resume_stop
2051 && lp->status_pending_p
2052 && WIFSTOPPED (lp->status_pending)
2053 && WSTOPSIG (lp->status_pending) == SIGTRAP
2054 && !breakpoint_inserted_here (lp->stop_pc))
2055 (*count)++;
2056
2057 return 0;
2058 }
2059
2060 /* Select the LWP (if any) that is currently being single-stepped. */
2061
2062 static int
2063 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2064 {
2065 struct lwp_info *lp = (struct lwp_info *) entry;
2066 struct thread_info *thread = get_lwp_thread (lp);
2067
2068 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2069 && thread->last_resume_kind == resume_step
2070 && lp->status_pending_p)
2071 return 1;
2072 else
2073 return 0;
2074 }
2075
2076 /* Select the Nth LWP that has had a SIGTRAP event that should be
2077 reported to GDB. */
2078
2079 static int
2080 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2081 {
2082 struct lwp_info *lp = (struct lwp_info *) entry;
2083 struct thread_info *thread = get_lwp_thread (lp);
2084 int *selector = data;
2085
2086 gdb_assert (selector != NULL);
2087
2088 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2089 if (thread->last_resume_kind != resume_stop
2090 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2091 && lp->status_pending_p
2092 && WIFSTOPPED (lp->status_pending)
2093 && WSTOPSIG (lp->status_pending) == SIGTRAP
2094 && !breakpoint_inserted_here (lp->stop_pc))
2095 if ((*selector)-- == 0)
2096 return 1;
2097
2098 return 0;
2099 }
2100
2101 static int
2102 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2103 {
2104 struct lwp_info *lp = (struct lwp_info *) entry;
2105 struct thread_info *thread = get_lwp_thread (lp);
2106 struct lwp_info *event_lp = data;
2107
2108 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2109 if (lp == event_lp)
2110 return 0;
2111
2112 /* If a LWP other than the LWP that we're reporting an event for has
2113 hit a GDB breakpoint (as opposed to some random trap signal),
2114 then just arrange for it to hit it again later. We don't keep
2115 the SIGTRAP status and don't forward the SIGTRAP signal to the
2116 LWP. We will handle the current event, eventually we will resume
2117 all LWPs, and this one will get its breakpoint trap again.
2118
2119 If we do not do this, then we run the risk that the user will
2120 delete or disable the breakpoint, but the LWP will have already
2121 tripped on it. */
2122
2123 if (thread->last_resume_kind != resume_stop
2124 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2125 && lp->status_pending_p
2126 && WIFSTOPPED (lp->status_pending)
2127 && WSTOPSIG (lp->status_pending) == SIGTRAP
2128 && !lp->stepping
2129 && !lp->stopped_by_watchpoint
2130 && cancel_breakpoint (lp))
2131 /* Throw away the SIGTRAP. */
2132 lp->status_pending_p = 0;
2133
2134 return 0;
2135 }
2136
2137 static void
2138 linux_cancel_breakpoints (void)
2139 {
2140 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2141 }
2142
2143 /* Select one LWP out of those that have events pending. */
2144
2145 static void
2146 select_event_lwp (struct lwp_info **orig_lp)
2147 {
2148 int num_events = 0;
2149 int random_selector;
2150 struct lwp_info *event_lp;
2151
2152 /* Give preference to any LWP that is being single-stepped. */
2153 event_lp
2154 = (struct lwp_info *) find_inferior (&all_lwps,
2155 select_singlestep_lwp_callback, NULL);
2156 if (event_lp != NULL)
2157 {
2158 if (debug_threads)
2159 fprintf (stderr,
2160 "SEL: Select single-step %s\n",
2161 target_pid_to_str (ptid_of (event_lp)));
2162 }
2163 else
2164 {
2165 /* No single-stepping LWP. Select one at random, out of those
2166 which have had SIGTRAP events. */
2167
2168 /* First see how many SIGTRAP events we have. */
2169 find_inferior (&all_lwps, count_events_callback, &num_events);
2170
2171 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2172 random_selector = (int)
2173 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2174
2175 if (debug_threads && num_events > 1)
2176 fprintf (stderr,
2177 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2178 num_events, random_selector);
2179
2180 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2181 select_event_lwp_callback,
2182 &random_selector);
2183 }
2184
2185 if (event_lp != NULL)
2186 {
2187 /* Switch the event LWP. */
2188 *orig_lp = event_lp;
2189 }
2190 }
2191
2192 /* Decrement the suspend count of an LWP. */
2193
2194 static int
2195 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2196 {
2197 struct lwp_info *lwp = (struct lwp_info *) entry;
2198
2199 /* Ignore EXCEPT. */
2200 if (lwp == except)
2201 return 0;
2202
2203 lwp->suspended--;
2204
2205 gdb_assert (lwp->suspended >= 0);
2206 return 0;
2207 }
2208
2209 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2210 NULL. */
2211
2212 static void
2213 unsuspend_all_lwps (struct lwp_info *except)
2214 {
2215 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2216 }
2217
2218 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2219 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2220 void *data);
2221 static int lwp_running (struct inferior_list_entry *entry, void *data);
2222 static ptid_t linux_wait_1 (ptid_t ptid,
2223 struct target_waitstatus *ourstatus,
2224 int target_options);
2225
2226 /* Stabilize threads (move out of jump pads).
2227
2228 If a thread is midway collecting a fast tracepoint, we need to
2229 finish the collection and move it out of the jump pad before
2230 reporting the signal.
2231
2232 This avoids recursion while collecting (when a signal arrives
2233 midway, and the signal handler itself collects), which would trash
2234 the trace buffer. In case the user set a breakpoint in a signal
2235 handler, this avoids the backtrace showing the jump pad, etc..
2236 Most importantly, there are certain things we can't do safely if
2237 threads are stopped in a jump pad (or in its callee's). For
2238 example:
2239
2240 - starting a new trace run. A thread still collecting the
2241 previous run, could trash the trace buffer when resumed. The trace
2242 buffer control structures would have been reset but the thread had
2243 no way to tell. The thread could even midway memcpy'ing to the
2244 buffer, which would mean that when resumed, it would clobber the
2245 trace buffer that had been set for a new run.
2246
2247 - we can't rewrite/reuse the jump pads for new tracepoints
2248 safely. Say you do tstart while a thread is stopped midway while
2249 collecting. When the thread is later resumed, it finishes the
2250 collection, and returns to the jump pad, to execute the original
2251 instruction that was under the tracepoint jump at the time the
2252 older run had been started. If the jump pad had been rewritten
2253 since for something else in the new run, the thread would now
2254 execute the wrong / random instructions. */
2255
2256 static void
2257 linux_stabilize_threads (void)
2258 {
2259 struct thread_info *save_inferior;
2260 struct lwp_info *lwp_stuck;
2261
2262 lwp_stuck
2263 = (struct lwp_info *) find_inferior (&all_lwps,
2264 stuck_in_jump_pad_callback, NULL);
2265 if (lwp_stuck != NULL)
2266 {
2267 if (debug_threads)
2268 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2269 lwpid_of (lwp_stuck));
2270 return;
2271 }
2272
2273 save_inferior = current_inferior;
2274
2275 stabilizing_threads = 1;
2276
2277 /* Kick 'em all. */
2278 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2279
2280 /* Loop until all are stopped out of the jump pads. */
2281 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2282 {
2283 struct target_waitstatus ourstatus;
2284 struct lwp_info *lwp;
2285 int wstat;
2286
2287 /* Note that we go through the full wait even loop. While
2288 moving threads out of jump pad, we need to be able to step
2289 over internal breakpoints and such. */
2290 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2291
2292 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2293 {
2294 lwp = get_thread_lwp (current_inferior);
2295
2296 /* Lock it. */
2297 lwp->suspended++;
2298
2299 if (ourstatus.value.sig != GDB_SIGNAL_0
2300 || current_inferior->last_resume_kind == resume_stop)
2301 {
2302 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2303 enqueue_one_deferred_signal (lwp, &wstat);
2304 }
2305 }
2306 }
2307
2308 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2309
2310 stabilizing_threads = 0;
2311
2312 current_inferior = save_inferior;
2313
2314 if (debug_threads)
2315 {
2316 lwp_stuck
2317 = (struct lwp_info *) find_inferior (&all_lwps,
2318 stuck_in_jump_pad_callback, NULL);
2319 if (lwp_stuck != NULL)
2320 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2321 lwpid_of (lwp_stuck));
2322 }
2323 }
2324
2325 /* Wait for process, returns status. */
2326
2327 static ptid_t
2328 linux_wait_1 (ptid_t ptid,
2329 struct target_waitstatus *ourstatus, int target_options)
2330 {
2331 int w;
2332 struct lwp_info *event_child;
2333 int options;
2334 int pid;
2335 int step_over_finished;
2336 int bp_explains_trap;
2337 int maybe_internal_trap;
2338 int report_to_gdb;
2339 int trace_event;
2340
2341 /* Translate generic target options into linux options. */
2342 options = __WALL;
2343 if (target_options & TARGET_WNOHANG)
2344 options |= WNOHANG;
2345
2346 retry:
2347 bp_explains_trap = 0;
2348 trace_event = 0;
2349 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2350
2351 /* If we were only supposed to resume one thread, only wait for
2352 that thread - if it's still alive. If it died, however - which
2353 can happen if we're coming from the thread death case below -
2354 then we need to make sure we restart the other threads. We could
2355 pick a thread at random or restart all; restarting all is less
2356 arbitrary. */
2357 if (!non_stop
2358 && !ptid_equal (cont_thread, null_ptid)
2359 && !ptid_equal (cont_thread, minus_one_ptid))
2360 {
2361 struct thread_info *thread;
2362
2363 thread = (struct thread_info *) find_inferior_id (&all_threads,
2364 cont_thread);
2365
2366 /* No stepping, no signal - unless one is pending already, of course. */
2367 if (thread == NULL)
2368 {
2369 struct thread_resume resume_info;
2370 resume_info.thread = minus_one_ptid;
2371 resume_info.kind = resume_continue;
2372 resume_info.sig = 0;
2373 linux_resume (&resume_info, 1);
2374 }
2375 else
2376 ptid = cont_thread;
2377 }
2378
2379 if (ptid_equal (step_over_bkpt, null_ptid))
2380 pid = linux_wait_for_event (ptid, &w, options);
2381 else
2382 {
2383 if (debug_threads)
2384 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2385 target_pid_to_str (step_over_bkpt));
2386 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2387 }
2388
2389 if (pid == 0) /* only if TARGET_WNOHANG */
2390 return null_ptid;
2391
2392 event_child = get_thread_lwp (current_inferior);
2393
2394 /* If we are waiting for a particular child, and it exited,
2395 linux_wait_for_event will return its exit status. Similarly if
2396 the last child exited. If this is not the last child, however,
2397 do not report it as exited until there is a 'thread exited' response
2398 available in the remote protocol. Instead, just wait for another event.
2399 This should be safe, because if the thread crashed we will already
2400 have reported the termination signal to GDB; that should stop any
2401 in-progress stepping operations, etc.
2402
2403 Report the exit status of the last thread to exit. This matches
2404 LinuxThreads' behavior. */
2405
2406 if (last_thread_of_process_p (current_inferior))
2407 {
2408 if (WIFEXITED (w) || WIFSIGNALED (w))
2409 {
2410 if (WIFEXITED (w))
2411 {
2412 ourstatus->kind = TARGET_WAITKIND_EXITED;
2413 ourstatus->value.integer = WEXITSTATUS (w);
2414
2415 if (debug_threads)
2416 fprintf (stderr,
2417 "\nChild exited with retcode = %x \n",
2418 WEXITSTATUS (w));
2419 }
2420 else
2421 {
2422 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2423 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2424
2425 if (debug_threads)
2426 fprintf (stderr,
2427 "\nChild terminated with signal = %x \n",
2428 WTERMSIG (w));
2429
2430 }
2431
2432 return ptid_of (event_child);
2433 }
2434 }
2435 else
2436 {
2437 if (!WIFSTOPPED (w))
2438 goto retry;
2439 }
2440
2441 /* If this event was not handled before, and is not a SIGTRAP, we
2442 report it. SIGILL and SIGSEGV are also treated as traps in case
2443 a breakpoint is inserted at the current PC. If this target does
2444 not support internal breakpoints at all, we also report the
2445 SIGTRAP without further processing; it's of no concern to us. */
2446 maybe_internal_trap
2447 = (supports_breakpoints ()
2448 && (WSTOPSIG (w) == SIGTRAP
2449 || ((WSTOPSIG (w) == SIGILL
2450 || WSTOPSIG (w) == SIGSEGV)
2451 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2452
2453 if (maybe_internal_trap)
2454 {
2455 /* Handle anything that requires bookkeeping before deciding to
2456 report the event or continue waiting. */
2457
2458 /* First check if we can explain the SIGTRAP with an internal
2459 breakpoint, or if we should possibly report the event to GDB.
2460 Do this before anything that may remove or insert a
2461 breakpoint. */
2462 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2463
2464 /* We have a SIGTRAP, possibly a step-over dance has just
2465 finished. If so, tweak the state machine accordingly,
2466 reinsert breakpoints and delete any reinsert (software
2467 single-step) breakpoints. */
2468 step_over_finished = finish_step_over (event_child);
2469
2470 /* Now invoke the callbacks of any internal breakpoints there. */
2471 check_breakpoints (event_child->stop_pc);
2472
2473 /* Handle tracepoint data collecting. This may overflow the
2474 trace buffer, and cause a tracing stop, removing
2475 breakpoints. */
2476 trace_event = handle_tracepoints (event_child);
2477
2478 if (bp_explains_trap)
2479 {
2480 /* If we stepped or ran into an internal breakpoint, we've
2481 already handled it. So next time we resume (from this
2482 PC), we should step over it. */
2483 if (debug_threads)
2484 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2485
2486 if (breakpoint_here (event_child->stop_pc))
2487 event_child->need_step_over = 1;
2488 }
2489 }
2490 else
2491 {
2492 /* We have some other signal, possibly a step-over dance was in
2493 progress, and it should be cancelled too. */
2494 step_over_finished = finish_step_over (event_child);
2495 }
2496
2497 /* We have all the data we need. Either report the event to GDB, or
2498 resume threads and keep waiting for more. */
2499
2500 /* If we're collecting a fast tracepoint, finish the collection and
2501 move out of the jump pad before delivering a signal. See
2502 linux_stabilize_threads. */
2503
2504 if (WIFSTOPPED (w)
2505 && WSTOPSIG (w) != SIGTRAP
2506 && supports_fast_tracepoints ()
2507 && agent_loaded_p ())
2508 {
2509 if (debug_threads)
2510 fprintf (stderr,
2511 "Got signal %d for LWP %ld. Check if we need "
2512 "to defer or adjust it.\n",
2513 WSTOPSIG (w), lwpid_of (event_child));
2514
2515 /* Allow debugging the jump pad itself. */
2516 if (current_inferior->last_resume_kind != resume_step
2517 && maybe_move_out_of_jump_pad (event_child, &w))
2518 {
2519 enqueue_one_deferred_signal (event_child, &w);
2520
2521 if (debug_threads)
2522 fprintf (stderr,
2523 "Signal %d for LWP %ld deferred (in jump pad)\n",
2524 WSTOPSIG (w), lwpid_of (event_child));
2525
2526 linux_resume_one_lwp (event_child, 0, 0, NULL);
2527 goto retry;
2528 }
2529 }
2530
2531 if (event_child->collecting_fast_tracepoint)
2532 {
2533 if (debug_threads)
2534 fprintf (stderr, "\
2535 LWP %ld was trying to move out of the jump pad (%d). \
2536 Check if we're already there.\n",
2537 lwpid_of (event_child),
2538 event_child->collecting_fast_tracepoint);
2539
2540 trace_event = 1;
2541
2542 event_child->collecting_fast_tracepoint
2543 = linux_fast_tracepoint_collecting (event_child, NULL);
2544
2545 if (event_child->collecting_fast_tracepoint != 1)
2546 {
2547 /* No longer need this breakpoint. */
2548 if (event_child->exit_jump_pad_bkpt != NULL)
2549 {
2550 if (debug_threads)
2551 fprintf (stderr,
2552 "No longer need exit-jump-pad bkpt; removing it."
2553 "stopping all threads momentarily.\n");
2554
2555 /* Other running threads could hit this breakpoint.
2556 We don't handle moribund locations like GDB does,
2557 instead we always pause all threads when removing
2558 breakpoints, so that any step-over or
2559 decr_pc_after_break adjustment is always taken
2560 care of while the breakpoint is still
2561 inserted. */
2562 stop_all_lwps (1, event_child);
2563 cancel_breakpoints ();
2564
2565 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2566 event_child->exit_jump_pad_bkpt = NULL;
2567
2568 unstop_all_lwps (1, event_child);
2569
2570 gdb_assert (event_child->suspended >= 0);
2571 }
2572 }
2573
2574 if (event_child->collecting_fast_tracepoint == 0)
2575 {
2576 if (debug_threads)
2577 fprintf (stderr,
2578 "fast tracepoint finished "
2579 "collecting successfully.\n");
2580
2581 /* We may have a deferred signal to report. */
2582 if (dequeue_one_deferred_signal (event_child, &w))
2583 {
2584 if (debug_threads)
2585 fprintf (stderr, "dequeued one signal.\n");
2586 }
2587 else
2588 {
2589 if (debug_threads)
2590 fprintf (stderr, "no deferred signals.\n");
2591
2592 if (stabilizing_threads)
2593 {
2594 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2595 ourstatus->value.sig = GDB_SIGNAL_0;
2596 return ptid_of (event_child);
2597 }
2598 }
2599 }
2600 }
2601
2602 /* Check whether GDB would be interested in this event. */
2603
2604 /* If GDB is not interested in this signal, don't stop other
2605 threads, and don't report it to GDB. Just resume the inferior
2606 right away. We do this for threading-related signals as well as
2607 any that GDB specifically requested we ignore. But never ignore
2608 SIGSTOP if we sent it ourselves, and do not ignore signals when
2609 stepping - they may require special handling to skip the signal
2610 handler. */
2611 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2612 thread library? */
2613 if (WIFSTOPPED (w)
2614 && current_inferior->last_resume_kind != resume_step
2615 && (
2616 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2617 (current_process ()->private->thread_db != NULL
2618 && (WSTOPSIG (w) == __SIGRTMIN
2619 || WSTOPSIG (w) == __SIGRTMIN + 1))
2620 ||
2621 #endif
2622 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2623 && !(WSTOPSIG (w) == SIGSTOP
2624 && current_inferior->last_resume_kind == resume_stop))))
2625 {
2626 siginfo_t info, *info_p;
2627
2628 if (debug_threads)
2629 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2630 WSTOPSIG (w), lwpid_of (event_child));
2631
2632 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child),
2633 (PTRACE_ARG3_TYPE) 0, &info) == 0)
2634 info_p = &info;
2635 else
2636 info_p = NULL;
2637 linux_resume_one_lwp (event_child, event_child->stepping,
2638 WSTOPSIG (w), info_p);
2639 goto retry;
2640 }
2641
2642 /* If GDB wanted this thread to single step, we always want to
2643 report the SIGTRAP, and let GDB handle it. Watchpoints should
2644 always be reported. So should signals we can't explain. A
2645 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2646 not support Z0 breakpoints. If we do, we're be able to handle
2647 GDB breakpoints on top of internal breakpoints, by handling the
2648 internal breakpoint and still reporting the event to GDB. If we
2649 don't, we're out of luck, GDB won't see the breakpoint hit. */
2650 report_to_gdb = (!maybe_internal_trap
2651 || current_inferior->last_resume_kind == resume_step
2652 || event_child->stopped_by_watchpoint
2653 || (!step_over_finished
2654 && !bp_explains_trap && !trace_event)
2655 || (gdb_breakpoint_here (event_child->stop_pc)
2656 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2657 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2658
2659 run_breakpoint_commands (event_child->stop_pc);
2660
2661 /* We found no reason GDB would want us to stop. We either hit one
2662 of our own breakpoints, or finished an internal step GDB
2663 shouldn't know about. */
2664 if (!report_to_gdb)
2665 {
2666 if (debug_threads)
2667 {
2668 if (bp_explains_trap)
2669 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2670 if (step_over_finished)
2671 fprintf (stderr, "Step-over finished.\n");
2672 if (trace_event)
2673 fprintf (stderr, "Tracepoint event.\n");
2674 }
2675
2676 /* We're not reporting this breakpoint to GDB, so apply the
2677 decr_pc_after_break adjustment to the inferior's regcache
2678 ourselves. */
2679
2680 if (the_low_target.set_pc != NULL)
2681 {
2682 struct regcache *regcache
2683 = get_thread_regcache (get_lwp_thread (event_child), 1);
2684 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2685 }
2686
2687 /* We may have finished stepping over a breakpoint. If so,
2688 we've stopped and suspended all LWPs momentarily except the
2689 stepping one. This is where we resume them all again. We're
2690 going to keep waiting, so use proceed, which handles stepping
2691 over the next breakpoint. */
2692 if (debug_threads)
2693 fprintf (stderr, "proceeding all threads.\n");
2694
2695 if (step_over_finished)
2696 unsuspend_all_lwps (event_child);
2697
2698 proceed_all_lwps ();
2699 goto retry;
2700 }
2701
2702 if (debug_threads)
2703 {
2704 if (current_inferior->last_resume_kind == resume_step)
2705 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2706 if (event_child->stopped_by_watchpoint)
2707 fprintf (stderr, "Stopped by watchpoint.\n");
2708 if (gdb_breakpoint_here (event_child->stop_pc))
2709 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2710 if (debug_threads)
2711 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2712 }
2713
2714 /* Alright, we're going to report a stop. */
2715
2716 if (!non_stop && !stabilizing_threads)
2717 {
2718 /* In all-stop, stop all threads. */
2719 stop_all_lwps (0, NULL);
2720
2721 /* If we're not waiting for a specific LWP, choose an event LWP
2722 from among those that have had events. Giving equal priority
2723 to all LWPs that have had events helps prevent
2724 starvation. */
2725 if (ptid_equal (ptid, minus_one_ptid))
2726 {
2727 event_child->status_pending_p = 1;
2728 event_child->status_pending = w;
2729
2730 select_event_lwp (&event_child);
2731
2732 event_child->status_pending_p = 0;
2733 w = event_child->status_pending;
2734 }
2735
2736 /* Now that we've selected our final event LWP, cancel any
2737 breakpoints in other LWPs that have hit a GDB breakpoint.
2738 See the comment in cancel_breakpoints_callback to find out
2739 why. */
2740 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2741
2742 /* If we were going a step-over, all other threads but the stepping one
2743 had been paused in start_step_over, with their suspend counts
2744 incremented. We don't want to do a full unstop/unpause, because we're
2745 in all-stop mode (so we want threads stopped), but we still need to
2746 unsuspend the other threads, to decrement their `suspended' count
2747 back. */
2748 if (step_over_finished)
2749 unsuspend_all_lwps (event_child);
2750
2751 /* Stabilize threads (move out of jump pads). */
2752 stabilize_threads ();
2753 }
2754 else
2755 {
2756 /* If we just finished a step-over, then all threads had been
2757 momentarily paused. In all-stop, that's fine, we want
2758 threads stopped by now anyway. In non-stop, we need to
2759 re-resume threads that GDB wanted to be running. */
2760 if (step_over_finished)
2761 unstop_all_lwps (1, event_child);
2762 }
2763
2764 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2765
2766 if (current_inferior->last_resume_kind == resume_stop
2767 && WSTOPSIG (w) == SIGSTOP)
2768 {
2769 /* A thread that has been requested to stop by GDB with vCont;t,
2770 and it stopped cleanly, so report as SIG0. The use of
2771 SIGSTOP is an implementation detail. */
2772 ourstatus->value.sig = GDB_SIGNAL_0;
2773 }
2774 else if (current_inferior->last_resume_kind == resume_stop
2775 && WSTOPSIG (w) != SIGSTOP)
2776 {
2777 /* A thread that has been requested to stop by GDB with vCont;t,
2778 but, it stopped for other reasons. */
2779 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2780 }
2781 else
2782 {
2783 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2784 }
2785
2786 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2787
2788 if (debug_threads)
2789 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2790 target_pid_to_str (ptid_of (event_child)),
2791 ourstatus->kind,
2792 ourstatus->value.sig);
2793
2794 return ptid_of (event_child);
2795 }
2796
2797 /* Get rid of any pending event in the pipe. */
2798 static void
2799 async_file_flush (void)
2800 {
2801 int ret;
2802 char buf;
2803
2804 do
2805 ret = read (linux_event_pipe[0], &buf, 1);
2806 while (ret >= 0 || (ret == -1 && errno == EINTR));
2807 }
2808
2809 /* Put something in the pipe, so the event loop wakes up. */
2810 static void
2811 async_file_mark (void)
2812 {
2813 int ret;
2814
2815 async_file_flush ();
2816
2817 do
2818 ret = write (linux_event_pipe[1], "+", 1);
2819 while (ret == 0 || (ret == -1 && errno == EINTR));
2820
2821 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2822 be awakened anyway. */
2823 }
2824
2825 static ptid_t
2826 linux_wait (ptid_t ptid,
2827 struct target_waitstatus *ourstatus, int target_options)
2828 {
2829 ptid_t event_ptid;
2830
2831 if (debug_threads)
2832 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2833
2834 /* Flush the async file first. */
2835 if (target_is_async_p ())
2836 async_file_flush ();
2837
2838 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2839
2840 /* If at least one stop was reported, there may be more. A single
2841 SIGCHLD can signal more than one child stop. */
2842 if (target_is_async_p ()
2843 && (target_options & TARGET_WNOHANG) != 0
2844 && !ptid_equal (event_ptid, null_ptid))
2845 async_file_mark ();
2846
2847 return event_ptid;
2848 }
2849
2850 /* Send a signal to an LWP. */
2851
2852 static int
2853 kill_lwp (unsigned long lwpid, int signo)
2854 {
2855 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2856 fails, then we are not using nptl threads and we should be using kill. */
2857
2858 #ifdef __NR_tkill
2859 {
2860 static int tkill_failed;
2861
2862 if (!tkill_failed)
2863 {
2864 int ret;
2865
2866 errno = 0;
2867 ret = syscall (__NR_tkill, lwpid, signo);
2868 if (errno != ENOSYS)
2869 return ret;
2870 tkill_failed = 1;
2871 }
2872 }
2873 #endif
2874
2875 return kill (lwpid, signo);
2876 }
2877
2878 void
2879 linux_stop_lwp (struct lwp_info *lwp)
2880 {
2881 send_sigstop (lwp);
2882 }
2883
2884 static void
2885 send_sigstop (struct lwp_info *lwp)
2886 {
2887 int pid;
2888
2889 pid = lwpid_of (lwp);
2890
2891 /* If we already have a pending stop signal for this process, don't
2892 send another. */
2893 if (lwp->stop_expected)
2894 {
2895 if (debug_threads)
2896 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2897
2898 return;
2899 }
2900
2901 if (debug_threads)
2902 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2903
2904 lwp->stop_expected = 1;
2905 kill_lwp (pid, SIGSTOP);
2906 }
2907
2908 static int
2909 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2910 {
2911 struct lwp_info *lwp = (struct lwp_info *) entry;
2912
2913 /* Ignore EXCEPT. */
2914 if (lwp == except)
2915 return 0;
2916
2917 if (lwp->stopped)
2918 return 0;
2919
2920 send_sigstop (lwp);
2921 return 0;
2922 }
2923
2924 /* Increment the suspend count of an LWP, and stop it, if not stopped
2925 yet. */
2926 static int
2927 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2928 void *except)
2929 {
2930 struct lwp_info *lwp = (struct lwp_info *) entry;
2931
2932 /* Ignore EXCEPT. */
2933 if (lwp == except)
2934 return 0;
2935
2936 lwp->suspended++;
2937
2938 return send_sigstop_callback (entry, except);
2939 }
2940
2941 static void
2942 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2943 {
2944 /* It's dead, really. */
2945 lwp->dead = 1;
2946
2947 /* Store the exit status for later. */
2948 lwp->status_pending_p = 1;
2949 lwp->status_pending = wstat;
2950
2951 /* Prevent trying to stop it. */
2952 lwp->stopped = 1;
2953
2954 /* No further stops are expected from a dead lwp. */
2955 lwp->stop_expected = 0;
2956 }
2957
2958 static void
2959 wait_for_sigstop (struct inferior_list_entry *entry)
2960 {
2961 struct lwp_info *lwp = (struct lwp_info *) entry;
2962 struct thread_info *saved_inferior;
2963 int wstat;
2964 ptid_t saved_tid;
2965 ptid_t ptid;
2966 int pid;
2967
2968 if (lwp->stopped)
2969 {
2970 if (debug_threads)
2971 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2972 lwpid_of (lwp));
2973 return;
2974 }
2975
2976 saved_inferior = current_inferior;
2977 if (saved_inferior != NULL)
2978 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2979 else
2980 saved_tid = null_ptid; /* avoid bogus unused warning */
2981
2982 ptid = lwp->head.id;
2983
2984 if (debug_threads)
2985 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2986
2987 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2988
2989 /* If we stopped with a non-SIGSTOP signal, save it for later
2990 and record the pending SIGSTOP. If the process exited, just
2991 return. */
2992 if (WIFSTOPPED (wstat))
2993 {
2994 if (debug_threads)
2995 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2996 lwpid_of (lwp), WSTOPSIG (wstat));
2997
2998 if (WSTOPSIG (wstat) != SIGSTOP)
2999 {
3000 if (debug_threads)
3001 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
3002 lwpid_of (lwp), wstat);
3003
3004 lwp->status_pending_p = 1;
3005 lwp->status_pending = wstat;
3006 }
3007 }
3008 else
3009 {
3010 if (debug_threads)
3011 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
3012
3013 lwp = find_lwp_pid (pid_to_ptid (pid));
3014 if (lwp)
3015 {
3016 /* Leave this status pending for the next time we're able to
3017 report it. In the mean time, we'll report this lwp as
3018 dead to GDB, so GDB doesn't try to read registers and
3019 memory from it. This can only happen if this was the
3020 last thread of the process; otherwise, PID is removed
3021 from the thread tables before linux_wait_for_event
3022 returns. */
3023 mark_lwp_dead (lwp, wstat);
3024 }
3025 }
3026
3027 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
3028 current_inferior = saved_inferior;
3029 else
3030 {
3031 if (debug_threads)
3032 fprintf (stderr, "Previously current thread died.\n");
3033
3034 if (non_stop)
3035 {
3036 /* We can't change the current inferior behind GDB's back,
3037 otherwise, a subsequent command may apply to the wrong
3038 process. */
3039 current_inferior = NULL;
3040 }
3041 else
3042 {
3043 /* Set a valid thread as current. */
3044 set_desired_inferior (0);
3045 }
3046 }
3047 }
3048
3049 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3050 move it out, because we need to report the stop event to GDB. For
3051 example, if the user puts a breakpoint in the jump pad, it's
3052 because she wants to debug it. */
3053
3054 static int
3055 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3056 {
3057 struct lwp_info *lwp = (struct lwp_info *) entry;
3058 struct thread_info *thread = get_lwp_thread (lwp);
3059
3060 gdb_assert (lwp->suspended == 0);
3061 gdb_assert (lwp->stopped);
3062
3063 /* Allow debugging the jump pad, gdb_collect, etc.. */
3064 return (supports_fast_tracepoints ()
3065 && agent_loaded_p ()
3066 && (gdb_breakpoint_here (lwp->stop_pc)
3067 || lwp->stopped_by_watchpoint
3068 || thread->last_resume_kind == resume_step)
3069 && linux_fast_tracepoint_collecting (lwp, NULL));
3070 }
3071
3072 static void
3073 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3074 {
3075 struct lwp_info *lwp = (struct lwp_info *) entry;
3076 struct thread_info *thread = get_lwp_thread (lwp);
3077 int *wstat;
3078
3079 gdb_assert (lwp->suspended == 0);
3080 gdb_assert (lwp->stopped);
3081
3082 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3083
3084 /* Allow debugging the jump pad, gdb_collect, etc. */
3085 if (!gdb_breakpoint_here (lwp->stop_pc)
3086 && !lwp->stopped_by_watchpoint
3087 && thread->last_resume_kind != resume_step
3088 && maybe_move_out_of_jump_pad (lwp, wstat))
3089 {
3090 if (debug_threads)
3091 fprintf (stderr,
3092 "LWP %ld needs stabilizing (in jump pad)\n",
3093 lwpid_of (lwp));
3094
3095 if (wstat)
3096 {
3097 lwp->status_pending_p = 0;
3098 enqueue_one_deferred_signal (lwp, wstat);
3099
3100 if (debug_threads)
3101 fprintf (stderr,
3102 "Signal %d for LWP %ld deferred "
3103 "(in jump pad)\n",
3104 WSTOPSIG (*wstat), lwpid_of (lwp));
3105 }
3106
3107 linux_resume_one_lwp (lwp, 0, 0, NULL);
3108 }
3109 else
3110 lwp->suspended++;
3111 }
3112
3113 static int
3114 lwp_running (struct inferior_list_entry *entry, void *data)
3115 {
3116 struct lwp_info *lwp = (struct lwp_info *) entry;
3117
3118 if (lwp->dead)
3119 return 0;
3120 if (lwp->stopped)
3121 return 0;
3122 return 1;
3123 }
3124
3125 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3126 If SUSPEND, then also increase the suspend count of every LWP,
3127 except EXCEPT. */
3128
3129 static void
3130 stop_all_lwps (int suspend, struct lwp_info *except)
3131 {
3132 /* Should not be called recursively. */
3133 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3134
3135 stopping_threads = (suspend
3136 ? STOPPING_AND_SUSPENDING_THREADS
3137 : STOPPING_THREADS);
3138
3139 if (suspend)
3140 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3141 else
3142 find_inferior (&all_lwps, send_sigstop_callback, except);
3143 for_each_inferior (&all_lwps, wait_for_sigstop);
3144 stopping_threads = NOT_STOPPING_THREADS;
3145 }
3146
3147 /* Resume execution of the inferior process.
3148 If STEP is nonzero, single-step it.
3149 If SIGNAL is nonzero, give it that signal. */
3150
3151 static void
3152 linux_resume_one_lwp (struct lwp_info *lwp,
3153 int step, int signal, siginfo_t *info)
3154 {
3155 struct thread_info *saved_inferior;
3156 int fast_tp_collecting;
3157
3158 if (lwp->stopped == 0)
3159 return;
3160
3161 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3162
3163 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3164
3165 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3166 user used the "jump" command, or "set $pc = foo"). */
3167 if (lwp->stop_pc != get_pc (lwp))
3168 {
3169 /* Collecting 'while-stepping' actions doesn't make sense
3170 anymore. */
3171 release_while_stepping_state_list (get_lwp_thread (lwp));
3172 }
3173
3174 /* If we have pending signals or status, and a new signal, enqueue the
3175 signal. Also enqueue the signal if we are waiting to reinsert a
3176 breakpoint; it will be picked up again below. */
3177 if (signal != 0
3178 && (lwp->status_pending_p
3179 || lwp->pending_signals != NULL
3180 || lwp->bp_reinsert != 0
3181 || fast_tp_collecting))
3182 {
3183 struct pending_signals *p_sig;
3184 p_sig = xmalloc (sizeof (*p_sig));
3185 p_sig->prev = lwp->pending_signals;
3186 p_sig->signal = signal;
3187 if (info == NULL)
3188 memset (&p_sig->info, 0, sizeof (siginfo_t));
3189 else
3190 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3191 lwp->pending_signals = p_sig;
3192 }
3193
3194 if (lwp->status_pending_p)
3195 {
3196 if (debug_threads)
3197 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3198 " has pending status\n",
3199 lwpid_of (lwp), step ? "step" : "continue", signal,
3200 lwp->stop_expected ? "expected" : "not expected");
3201 return;
3202 }
3203
3204 saved_inferior = current_inferior;
3205 current_inferior = get_lwp_thread (lwp);
3206
3207 if (debug_threads)
3208 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
3209 lwpid_of (lwp), step ? "step" : "continue", signal,
3210 lwp->stop_expected ? "expected" : "not expected");
3211
3212 /* This bit needs some thinking about. If we get a signal that
3213 we must report while a single-step reinsert is still pending,
3214 we often end up resuming the thread. It might be better to
3215 (ew) allow a stack of pending events; then we could be sure that
3216 the reinsert happened right away and not lose any signals.
3217
3218 Making this stack would also shrink the window in which breakpoints are
3219 uninserted (see comment in linux_wait_for_lwp) but not enough for
3220 complete correctness, so it won't solve that problem. It may be
3221 worthwhile just to solve this one, however. */
3222 if (lwp->bp_reinsert != 0)
3223 {
3224 if (debug_threads)
3225 fprintf (stderr, " pending reinsert at 0x%s\n",
3226 paddress (lwp->bp_reinsert));
3227
3228 if (can_hardware_single_step ())
3229 {
3230 if (fast_tp_collecting == 0)
3231 {
3232 if (step == 0)
3233 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3234 if (lwp->suspended)
3235 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3236 lwp->suspended);
3237 }
3238
3239 step = 1;
3240 }
3241
3242 /* Postpone any pending signal. It was enqueued above. */
3243 signal = 0;
3244 }
3245
3246 if (fast_tp_collecting == 1)
3247 {
3248 if (debug_threads)
3249 fprintf (stderr, "\
3250 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3251 lwpid_of (lwp));
3252
3253 /* Postpone any pending signal. It was enqueued above. */
3254 signal = 0;
3255 }
3256 else if (fast_tp_collecting == 2)
3257 {
3258 if (debug_threads)
3259 fprintf (stderr, "\
3260 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3261 lwpid_of (lwp));
3262
3263 if (can_hardware_single_step ())
3264 step = 1;
3265 else
3266 fatal ("moving out of jump pad single-stepping"
3267 " not implemented on this target");
3268
3269 /* Postpone any pending signal. It was enqueued above. */
3270 signal = 0;
3271 }
3272
3273 /* If we have while-stepping actions in this thread set it stepping.
3274 If we have a signal to deliver, it may or may not be set to
3275 SIG_IGN, we don't know. Assume so, and allow collecting
3276 while-stepping into a signal handler. A possible smart thing to
3277 do would be to set an internal breakpoint at the signal return
3278 address, continue, and carry on catching this while-stepping
3279 action only when that breakpoint is hit. A future
3280 enhancement. */
3281 if (get_lwp_thread (lwp)->while_stepping != NULL
3282 && can_hardware_single_step ())
3283 {
3284 if (debug_threads)
3285 fprintf (stderr,
3286 "lwp %ld has a while-stepping action -> forcing step.\n",
3287 lwpid_of (lwp));
3288 step = 1;
3289 }
3290
3291 if (debug_threads && the_low_target.get_pc != NULL)
3292 {
3293 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3294 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3295 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3296 }
3297
3298 /* If we have pending signals, consume one unless we are trying to
3299 reinsert a breakpoint or we're trying to finish a fast tracepoint
3300 collect. */
3301 if (lwp->pending_signals != NULL
3302 && lwp->bp_reinsert == 0
3303 && fast_tp_collecting == 0)
3304 {
3305 struct pending_signals **p_sig;
3306
3307 p_sig = &lwp->pending_signals;
3308 while ((*p_sig)->prev != NULL)
3309 p_sig = &(*p_sig)->prev;
3310
3311 signal = (*p_sig)->signal;
3312 if ((*p_sig)->info.si_signo != 0)
3313 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
3314 &(*p_sig)->info);
3315
3316 free (*p_sig);
3317 *p_sig = NULL;
3318 }
3319
3320 if (the_low_target.prepare_to_resume != NULL)
3321 the_low_target.prepare_to_resume (lwp);
3322
3323 regcache_invalidate_one ((struct inferior_list_entry *)
3324 get_lwp_thread (lwp));
3325 errno = 0;
3326 lwp->stopped = 0;
3327 lwp->stopped_by_watchpoint = 0;
3328 lwp->stepping = step;
3329 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp),
3330 (PTRACE_ARG3_TYPE) 0,
3331 /* Coerce to a uintptr_t first to avoid potential gcc warning
3332 of coercing an 8 byte integer to a 4 byte pointer. */
3333 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
3334
3335 current_inferior = saved_inferior;
3336 if (errno)
3337 {
3338 /* ESRCH from ptrace either means that the thread was already
3339 running (an error) or that it is gone (a race condition). If
3340 it's gone, we will get a notification the next time we wait,
3341 so we can ignore the error. We could differentiate these
3342 two, but it's tricky without waiting; the thread still exists
3343 as a zombie, so sending it signal 0 would succeed. So just
3344 ignore ESRCH. */
3345 if (errno == ESRCH)
3346 return;
3347
3348 perror_with_name ("ptrace");
3349 }
3350 }
3351
3352 struct thread_resume_array
3353 {
3354 struct thread_resume *resume;
3355 size_t n;
3356 };
3357
3358 /* This function is called once per thread. We look up the thread
3359 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3360 resume request.
3361
3362 This algorithm is O(threads * resume elements), but resume elements
3363 is small (and will remain small at least until GDB supports thread
3364 suspension). */
3365 static int
3366 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3367 {
3368 struct lwp_info *lwp;
3369 struct thread_info *thread;
3370 int ndx;
3371 struct thread_resume_array *r;
3372
3373 thread = (struct thread_info *) entry;
3374 lwp = get_thread_lwp (thread);
3375 r = arg;
3376
3377 for (ndx = 0; ndx < r->n; ndx++)
3378 {
3379 ptid_t ptid = r->resume[ndx].thread;
3380 if (ptid_equal (ptid, minus_one_ptid)
3381 || ptid_equal (ptid, entry->id)
3382 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3383 of PID'. */
3384 || (ptid_get_pid (ptid) == pid_of (lwp)
3385 && (ptid_is_pid (ptid)
3386 || ptid_get_lwp (ptid) == -1)))
3387 {
3388 if (r->resume[ndx].kind == resume_stop
3389 && thread->last_resume_kind == resume_stop)
3390 {
3391 if (debug_threads)
3392 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3393 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3394 ? "stopped"
3395 : "stopping",
3396 lwpid_of (lwp));
3397
3398 continue;
3399 }
3400
3401 lwp->resume = &r->resume[ndx];
3402 thread->last_resume_kind = lwp->resume->kind;
3403
3404 /* If we had a deferred signal to report, dequeue one now.
3405 This can happen if LWP gets more than one signal while
3406 trying to get out of a jump pad. */
3407 if (lwp->stopped
3408 && !lwp->status_pending_p
3409 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3410 {
3411 lwp->status_pending_p = 1;
3412
3413 if (debug_threads)
3414 fprintf (stderr,
3415 "Dequeueing deferred signal %d for LWP %ld, "
3416 "leaving status pending.\n",
3417 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3418 }
3419
3420 return 0;
3421 }
3422 }
3423
3424 /* No resume action for this thread. */
3425 lwp->resume = NULL;
3426
3427 return 0;
3428 }
3429
3430
3431 /* Set *FLAG_P if this lwp has an interesting status pending. */
3432 static int
3433 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3434 {
3435 struct lwp_info *lwp = (struct lwp_info *) entry;
3436
3437 /* LWPs which will not be resumed are not interesting, because
3438 we might not wait for them next time through linux_wait. */
3439 if (lwp->resume == NULL)
3440 return 0;
3441
3442 if (lwp->status_pending_p)
3443 * (int *) flag_p = 1;
3444
3445 return 0;
3446 }
3447
3448 /* Return 1 if this lwp that GDB wants running is stopped at an
3449 internal breakpoint that we need to step over. It assumes that any
3450 required STOP_PC adjustment has already been propagated to the
3451 inferior's regcache. */
3452
3453 static int
3454 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3455 {
3456 struct lwp_info *lwp = (struct lwp_info *) entry;
3457 struct thread_info *thread;
3458 struct thread_info *saved_inferior;
3459 CORE_ADDR pc;
3460
3461 /* LWPs which will not be resumed are not interesting, because we
3462 might not wait for them next time through linux_wait. */
3463
3464 if (!lwp->stopped)
3465 {
3466 if (debug_threads)
3467 fprintf (stderr,
3468 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3469 lwpid_of (lwp));
3470 return 0;
3471 }
3472
3473 thread = get_lwp_thread (lwp);
3474
3475 if (thread->last_resume_kind == resume_stop)
3476 {
3477 if (debug_threads)
3478 fprintf (stderr,
3479 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3480 lwpid_of (lwp));
3481 return 0;
3482 }
3483
3484 gdb_assert (lwp->suspended >= 0);
3485
3486 if (lwp->suspended)
3487 {
3488 if (debug_threads)
3489 fprintf (stderr,
3490 "Need step over [LWP %ld]? Ignoring, suspended\n",
3491 lwpid_of (lwp));
3492 return 0;
3493 }
3494
3495 if (!lwp->need_step_over)
3496 {
3497 if (debug_threads)
3498 fprintf (stderr,
3499 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3500 }
3501
3502 if (lwp->status_pending_p)
3503 {
3504 if (debug_threads)
3505 fprintf (stderr,
3506 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3507 lwpid_of (lwp));
3508 return 0;
3509 }
3510
3511 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3512 or we have. */
3513 pc = get_pc (lwp);
3514
3515 /* If the PC has changed since we stopped, then don't do anything,
3516 and let the breakpoint/tracepoint be hit. This happens if, for
3517 instance, GDB handled the decr_pc_after_break subtraction itself,
3518 GDB is OOL stepping this thread, or the user has issued a "jump"
3519 command, or poked thread's registers herself. */
3520 if (pc != lwp->stop_pc)
3521 {
3522 if (debug_threads)
3523 fprintf (stderr,
3524 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3525 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3526 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3527
3528 lwp->need_step_over = 0;
3529 return 0;
3530 }
3531
3532 saved_inferior = current_inferior;
3533 current_inferior = thread;
3534
3535 /* We can only step over breakpoints we know about. */
3536 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3537 {
3538 /* Don't step over a breakpoint that GDB expects to hit
3539 though. If the condition is being evaluated on the target's side
3540 and it evaluate to false, step over this breakpoint as well. */
3541 if (gdb_breakpoint_here (pc)
3542 && gdb_condition_true_at_breakpoint (pc)
3543 && gdb_no_commands_at_breakpoint (pc))
3544 {
3545 if (debug_threads)
3546 fprintf (stderr,
3547 "Need step over [LWP %ld]? yes, but found"
3548 " GDB breakpoint at 0x%s; skipping step over\n",
3549 lwpid_of (lwp), paddress (pc));
3550
3551 current_inferior = saved_inferior;
3552 return 0;
3553 }
3554 else
3555 {
3556 if (debug_threads)
3557 fprintf (stderr,
3558 "Need step over [LWP %ld]? yes, "
3559 "found breakpoint at 0x%s\n",
3560 lwpid_of (lwp), paddress (pc));
3561
3562 /* We've found an lwp that needs stepping over --- return 1 so
3563 that find_inferior stops looking. */
3564 current_inferior = saved_inferior;
3565
3566 /* If the step over is cancelled, this is set again. */
3567 lwp->need_step_over = 0;
3568 return 1;
3569 }
3570 }
3571
3572 current_inferior = saved_inferior;
3573
3574 if (debug_threads)
3575 fprintf (stderr,
3576 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3577 lwpid_of (lwp), paddress (pc));
3578
3579 return 0;
3580 }
3581
3582 /* Start a step-over operation on LWP. When LWP stopped at a
3583 breakpoint, to make progress, we need to remove the breakpoint out
3584 of the way. If we let other threads run while we do that, they may
3585 pass by the breakpoint location and miss hitting it. To avoid
3586 that, a step-over momentarily stops all threads while LWP is
3587 single-stepped while the breakpoint is temporarily uninserted from
3588 the inferior. When the single-step finishes, we reinsert the
3589 breakpoint, and let all threads that are supposed to be running,
3590 run again.
3591
3592 On targets that don't support hardware single-step, we don't
3593 currently support full software single-stepping. Instead, we only
3594 support stepping over the thread event breakpoint, by asking the
3595 low target where to place a reinsert breakpoint. Since this
3596 routine assumes the breakpoint being stepped over is a thread event
3597 breakpoint, it usually assumes the return address of the current
3598 function is a good enough place to set the reinsert breakpoint. */
3599
3600 static int
3601 start_step_over (struct lwp_info *lwp)
3602 {
3603 struct thread_info *saved_inferior;
3604 CORE_ADDR pc;
3605 int step;
3606
3607 if (debug_threads)
3608 fprintf (stderr,
3609 "Starting step-over on LWP %ld. Stopping all threads\n",
3610 lwpid_of (lwp));
3611
3612 stop_all_lwps (1, lwp);
3613 gdb_assert (lwp->suspended == 0);
3614
3615 if (debug_threads)
3616 fprintf (stderr, "Done stopping all threads for step-over.\n");
3617
3618 /* Note, we should always reach here with an already adjusted PC,
3619 either by GDB (if we're resuming due to GDB's request), or by our
3620 caller, if we just finished handling an internal breakpoint GDB
3621 shouldn't care about. */
3622 pc = get_pc (lwp);
3623
3624 saved_inferior = current_inferior;
3625 current_inferior = get_lwp_thread (lwp);
3626
3627 lwp->bp_reinsert = pc;
3628 uninsert_breakpoints_at (pc);
3629 uninsert_fast_tracepoint_jumps_at (pc);
3630
3631 if (can_hardware_single_step ())
3632 {
3633 step = 1;
3634 }
3635 else
3636 {
3637 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3638 set_reinsert_breakpoint (raddr);
3639 step = 0;
3640 }
3641
3642 current_inferior = saved_inferior;
3643
3644 linux_resume_one_lwp (lwp, step, 0, NULL);
3645
3646 /* Require next event from this LWP. */
3647 step_over_bkpt = lwp->head.id;
3648 return 1;
3649 }
3650
3651 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3652 start_step_over, if still there, and delete any reinsert
3653 breakpoints we've set, on non hardware single-step targets. */
3654
3655 static int
3656 finish_step_over (struct lwp_info *lwp)
3657 {
3658 if (lwp->bp_reinsert != 0)
3659 {
3660 if (debug_threads)
3661 fprintf (stderr, "Finished step over.\n");
3662
3663 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3664 may be no breakpoint to reinsert there by now. */
3665 reinsert_breakpoints_at (lwp->bp_reinsert);
3666 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3667
3668 lwp->bp_reinsert = 0;
3669
3670 /* Delete any software-single-step reinsert breakpoints. No
3671 longer needed. We don't have to worry about other threads
3672 hitting this trap, and later not being able to explain it,
3673 because we were stepping over a breakpoint, and we hold all
3674 threads but LWP stopped while doing that. */
3675 if (!can_hardware_single_step ())
3676 delete_reinsert_breakpoints ();
3677
3678 step_over_bkpt = null_ptid;
3679 return 1;
3680 }
3681 else
3682 return 0;
3683 }
3684
3685 /* This function is called once per thread. We check the thread's resume
3686 request, which will tell us whether to resume, step, or leave the thread
3687 stopped; and what signal, if any, it should be sent.
3688
3689 For threads which we aren't explicitly told otherwise, we preserve
3690 the stepping flag; this is used for stepping over gdbserver-placed
3691 breakpoints.
3692
3693 If pending_flags was set in any thread, we queue any needed
3694 signals, since we won't actually resume. We already have a pending
3695 event to report, so we don't need to preserve any step requests;
3696 they should be re-issued if necessary. */
3697
3698 static int
3699 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3700 {
3701 struct lwp_info *lwp;
3702 struct thread_info *thread;
3703 int step;
3704 int leave_all_stopped = * (int *) arg;
3705 int leave_pending;
3706
3707 thread = (struct thread_info *) entry;
3708 lwp = get_thread_lwp (thread);
3709
3710 if (lwp->resume == NULL)
3711 return 0;
3712
3713 if (lwp->resume->kind == resume_stop)
3714 {
3715 if (debug_threads)
3716 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3717
3718 if (!lwp->stopped)
3719 {
3720 if (debug_threads)
3721 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3722
3723 /* Stop the thread, and wait for the event asynchronously,
3724 through the event loop. */
3725 send_sigstop (lwp);
3726 }
3727 else
3728 {
3729 if (debug_threads)
3730 fprintf (stderr, "already stopped LWP %ld\n",
3731 lwpid_of (lwp));
3732
3733 /* The LWP may have been stopped in an internal event that
3734 was not meant to be notified back to GDB (e.g., gdbserver
3735 breakpoint), so we should be reporting a stop event in
3736 this case too. */
3737
3738 /* If the thread already has a pending SIGSTOP, this is a
3739 no-op. Otherwise, something later will presumably resume
3740 the thread and this will cause it to cancel any pending
3741 operation, due to last_resume_kind == resume_stop. If
3742 the thread already has a pending status to report, we
3743 will still report it the next time we wait - see
3744 status_pending_p_callback. */
3745
3746 /* If we already have a pending signal to report, then
3747 there's no need to queue a SIGSTOP, as this means we're
3748 midway through moving the LWP out of the jumppad, and we
3749 will report the pending signal as soon as that is
3750 finished. */
3751 if (lwp->pending_signals_to_report == NULL)
3752 send_sigstop (lwp);
3753 }
3754
3755 /* For stop requests, we're done. */
3756 lwp->resume = NULL;
3757 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3758 return 0;
3759 }
3760
3761 /* If this thread which is about to be resumed has a pending status,
3762 then don't resume any threads - we can just report the pending
3763 status. Make sure to queue any signals that would otherwise be
3764 sent. In all-stop mode, we do this decision based on if *any*
3765 thread has a pending status. If there's a thread that needs the
3766 step-over-breakpoint dance, then don't resume any other thread
3767 but that particular one. */
3768 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3769
3770 if (!leave_pending)
3771 {
3772 if (debug_threads)
3773 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3774
3775 step = (lwp->resume->kind == resume_step);
3776 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3777 }
3778 else
3779 {
3780 if (debug_threads)
3781 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3782
3783 /* If we have a new signal, enqueue the signal. */
3784 if (lwp->resume->sig != 0)
3785 {
3786 struct pending_signals *p_sig;
3787 p_sig = xmalloc (sizeof (*p_sig));
3788 p_sig->prev = lwp->pending_signals;
3789 p_sig->signal = lwp->resume->sig;
3790 memset (&p_sig->info, 0, sizeof (siginfo_t));
3791
3792 /* If this is the same signal we were previously stopped by,
3793 make sure to queue its siginfo. We can ignore the return
3794 value of ptrace; if it fails, we'll skip
3795 PTRACE_SETSIGINFO. */
3796 if (WIFSTOPPED (lwp->last_status)
3797 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3798 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
3799 &p_sig->info);
3800
3801 lwp->pending_signals = p_sig;
3802 }
3803 }
3804
3805 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3806 lwp->resume = NULL;
3807 return 0;
3808 }
3809
3810 static void
3811 linux_resume (struct thread_resume *resume_info, size_t n)
3812 {
3813 struct thread_resume_array array = { resume_info, n };
3814 struct lwp_info *need_step_over = NULL;
3815 int any_pending;
3816 int leave_all_stopped;
3817
3818 find_inferior (&all_threads, linux_set_resume_request, &array);
3819
3820 /* If there is a thread which would otherwise be resumed, which has
3821 a pending status, then don't resume any threads - we can just
3822 report the pending status. Make sure to queue any signals that
3823 would otherwise be sent. In non-stop mode, we'll apply this
3824 logic to each thread individually. We consume all pending events
3825 before considering to start a step-over (in all-stop). */
3826 any_pending = 0;
3827 if (!non_stop)
3828 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3829
3830 /* If there is a thread which would otherwise be resumed, which is
3831 stopped at a breakpoint that needs stepping over, then don't
3832 resume any threads - have it step over the breakpoint with all
3833 other threads stopped, then resume all threads again. Make sure
3834 to queue any signals that would otherwise be delivered or
3835 queued. */
3836 if (!any_pending && supports_breakpoints ())
3837 need_step_over
3838 = (struct lwp_info *) find_inferior (&all_lwps,
3839 need_step_over_p, NULL);
3840
3841 leave_all_stopped = (need_step_over != NULL || any_pending);
3842
3843 if (debug_threads)
3844 {
3845 if (need_step_over != NULL)
3846 fprintf (stderr, "Not resuming all, need step over\n");
3847 else if (any_pending)
3848 fprintf (stderr,
3849 "Not resuming, all-stop and found "
3850 "an LWP with pending status\n");
3851 else
3852 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3853 }
3854
3855 /* Even if we're leaving threads stopped, queue all signals we'd
3856 otherwise deliver. */
3857 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3858
3859 if (need_step_over)
3860 start_step_over (need_step_over);
3861 }
3862
3863 /* This function is called once per thread. We check the thread's
3864 last resume request, which will tell us whether to resume, step, or
3865 leave the thread stopped. Any signal the client requested to be
3866 delivered has already been enqueued at this point.
3867
3868 If any thread that GDB wants running is stopped at an internal
3869 breakpoint that needs stepping over, we start a step-over operation
3870 on that particular thread, and leave all others stopped. */
3871
3872 static int
3873 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3874 {
3875 struct lwp_info *lwp = (struct lwp_info *) entry;
3876 struct thread_info *thread;
3877 int step;
3878
3879 if (lwp == except)
3880 return 0;
3881
3882 if (debug_threads)
3883 fprintf (stderr,
3884 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3885
3886 if (!lwp->stopped)
3887 {
3888 if (debug_threads)
3889 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3890 return 0;
3891 }
3892
3893 thread = get_lwp_thread (lwp);
3894
3895 if (thread->last_resume_kind == resume_stop
3896 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3897 {
3898 if (debug_threads)
3899 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3900 lwpid_of (lwp));
3901 return 0;
3902 }
3903
3904 if (lwp->status_pending_p)
3905 {
3906 if (debug_threads)
3907 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3908 lwpid_of (lwp));
3909 return 0;
3910 }
3911
3912 gdb_assert (lwp->suspended >= 0);
3913
3914 if (lwp->suspended)
3915 {
3916 if (debug_threads)
3917 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3918 return 0;
3919 }
3920
3921 if (thread->last_resume_kind == resume_stop
3922 && lwp->pending_signals_to_report == NULL
3923 && lwp->collecting_fast_tracepoint == 0)
3924 {
3925 /* We haven't reported this LWP as stopped yet (otherwise, the
3926 last_status.kind check above would catch it, and we wouldn't
3927 reach here. This LWP may have been momentarily paused by a
3928 stop_all_lwps call while handling for example, another LWP's
3929 step-over. In that case, the pending expected SIGSTOP signal
3930 that was queued at vCont;t handling time will have already
3931 been consumed by wait_for_sigstop, and so we need to requeue
3932 another one here. Note that if the LWP already has a SIGSTOP
3933 pending, this is a no-op. */
3934
3935 if (debug_threads)
3936 fprintf (stderr,
3937 "Client wants LWP %ld to stop. "
3938 "Making sure it has a SIGSTOP pending\n",
3939 lwpid_of (lwp));
3940
3941 send_sigstop (lwp);
3942 }
3943
3944 step = thread->last_resume_kind == resume_step;
3945 linux_resume_one_lwp (lwp, step, 0, NULL);
3946 return 0;
3947 }
3948
3949 static int
3950 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3951 {
3952 struct lwp_info *lwp = (struct lwp_info *) entry;
3953
3954 if (lwp == except)
3955 return 0;
3956
3957 lwp->suspended--;
3958 gdb_assert (lwp->suspended >= 0);
3959
3960 return proceed_one_lwp (entry, except);
3961 }
3962
3963 /* When we finish a step-over, set threads running again. If there's
3964 another thread that may need a step-over, now's the time to start
3965 it. Eventually, we'll move all threads past their breakpoints. */
3966
3967 static void
3968 proceed_all_lwps (void)
3969 {
3970 struct lwp_info *need_step_over;
3971
3972 /* If there is a thread which would otherwise be resumed, which is
3973 stopped at a breakpoint that needs stepping over, then don't
3974 resume any threads - have it step over the breakpoint with all
3975 other threads stopped, then resume all threads again. */
3976
3977 if (supports_breakpoints ())
3978 {
3979 need_step_over
3980 = (struct lwp_info *) find_inferior (&all_lwps,
3981 need_step_over_p, NULL);
3982
3983 if (need_step_over != NULL)
3984 {
3985 if (debug_threads)
3986 fprintf (stderr, "proceed_all_lwps: found "
3987 "thread %ld needing a step-over\n",
3988 lwpid_of (need_step_over));
3989
3990 start_step_over (need_step_over);
3991 return;
3992 }
3993 }
3994
3995 if (debug_threads)
3996 fprintf (stderr, "Proceeding, no step-over needed\n");
3997
3998 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3999 }
4000
4001 /* Stopped LWPs that the client wanted to be running, that don't have
4002 pending statuses, are set to run again, except for EXCEPT, if not
4003 NULL. This undoes a stop_all_lwps call. */
4004
4005 static void
4006 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4007 {
4008 if (debug_threads)
4009 {
4010 if (except)
4011 fprintf (stderr,
4012 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
4013 else
4014 fprintf (stderr,
4015 "unstopping all lwps\n");
4016 }
4017
4018 if (unsuspend)
4019 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
4020 else
4021 find_inferior (&all_lwps, proceed_one_lwp, except);
4022 }
4023
4024
4025 #ifdef HAVE_LINUX_REGSETS
4026
4027 #define use_linux_regsets 1
4028
4029 static int
4030 regsets_fetch_inferior_registers (struct regcache *regcache)
4031 {
4032 struct regset_info *regset;
4033 int saw_general_regs = 0;
4034 int pid;
4035 struct iovec iov;
4036
4037 regset = target_regsets;
4038
4039 pid = lwpid_of (get_thread_lwp (current_inferior));
4040 while (regset->size >= 0)
4041 {
4042 void *buf, *data;
4043 int nt_type, res;
4044
4045 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
4046 {
4047 regset ++;
4048 continue;
4049 }
4050
4051 buf = xmalloc (regset->size);
4052
4053 nt_type = regset->nt_type;
4054 if (nt_type)
4055 {
4056 iov.iov_base = buf;
4057 iov.iov_len = regset->size;
4058 data = (void *) &iov;
4059 }
4060 else
4061 data = buf;
4062
4063 #ifndef __sparc__
4064 res = ptrace (regset->get_request, pid,
4065 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4066 #else
4067 res = ptrace (regset->get_request, pid, data, nt_type);
4068 #endif
4069 if (res < 0)
4070 {
4071 if (errno == EIO)
4072 {
4073 /* If we get EIO on a regset, do not try it again for
4074 this process. */
4075 disabled_regsets[regset - target_regsets] = 1;
4076 free (buf);
4077 continue;
4078 }
4079 else
4080 {
4081 char s[256];
4082 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4083 pid);
4084 perror (s);
4085 }
4086 }
4087 else if (regset->type == GENERAL_REGS)
4088 saw_general_regs = 1;
4089 regset->store_function (regcache, buf);
4090 regset ++;
4091 free (buf);
4092 }
4093 if (saw_general_regs)
4094 return 0;
4095 else
4096 return 1;
4097 }
4098
4099 static int
4100 regsets_store_inferior_registers (struct regcache *regcache)
4101 {
4102 struct regset_info *regset;
4103 int saw_general_regs = 0;
4104 int pid;
4105 struct iovec iov;
4106
4107 regset = target_regsets;
4108
4109 pid = lwpid_of (get_thread_lwp (current_inferior));
4110 while (regset->size >= 0)
4111 {
4112 void *buf, *data;
4113 int nt_type, res;
4114
4115 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
4116 {
4117 regset ++;
4118 continue;
4119 }
4120
4121 buf = xmalloc (regset->size);
4122
4123 /* First fill the buffer with the current register set contents,
4124 in case there are any items in the kernel's regset that are
4125 not in gdbserver's regcache. */
4126
4127 nt_type = regset->nt_type;
4128 if (nt_type)
4129 {
4130 iov.iov_base = buf;
4131 iov.iov_len = regset->size;
4132 data = (void *) &iov;
4133 }
4134 else
4135 data = buf;
4136
4137 #ifndef __sparc__
4138 res = ptrace (regset->get_request, pid,
4139 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4140 #else
4141 res = ptrace (regset->get_request, pid, data, nt_type);
4142 #endif
4143
4144 if (res == 0)
4145 {
4146 /* Then overlay our cached registers on that. */
4147 regset->fill_function (regcache, buf);
4148
4149 /* Only now do we write the register set. */
4150 #ifndef __sparc__
4151 res = ptrace (regset->set_request, pid,
4152 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4153 #else
4154 res = ptrace (regset->set_request, pid, data, nt_type);
4155 #endif
4156 }
4157
4158 if (res < 0)
4159 {
4160 if (errno == EIO)
4161 {
4162 /* If we get EIO on a regset, do not try it again for
4163 this process. */
4164 disabled_regsets[regset - target_regsets] = 1;
4165 free (buf);
4166 continue;
4167 }
4168 else if (errno == ESRCH)
4169 {
4170 /* At this point, ESRCH should mean the process is
4171 already gone, in which case we simply ignore attempts
4172 to change its registers. See also the related
4173 comment in linux_resume_one_lwp. */
4174 free (buf);
4175 return 0;
4176 }
4177 else
4178 {
4179 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4180 }
4181 }
4182 else if (regset->type == GENERAL_REGS)
4183 saw_general_regs = 1;
4184 regset ++;
4185 free (buf);
4186 }
4187 if (saw_general_regs)
4188 return 0;
4189 else
4190 return 1;
4191 }
4192
4193 #else /* !HAVE_LINUX_REGSETS */
4194
4195 #define use_linux_regsets 0
4196 #define regsets_fetch_inferior_registers(regcache) 1
4197 #define regsets_store_inferior_registers(regcache) 1
4198
4199 #endif
4200
4201 /* Return 1 if register REGNO is supported by one of the regset ptrace
4202 calls or 0 if it has to be transferred individually. */
4203
4204 static int
4205 linux_register_in_regsets (int regno)
4206 {
4207 unsigned char mask = 1 << (regno % 8);
4208 size_t index = regno / 8;
4209
4210 return (use_linux_regsets
4211 && (the_low_target.regset_bitmap == NULL
4212 || (the_low_target.regset_bitmap[index] & mask) != 0));
4213 }
4214
4215 #ifdef HAVE_LINUX_USRREGS
4216
4217 int
4218 register_addr (int regnum)
4219 {
4220 int addr;
4221
4222 if (regnum < 0 || regnum >= the_low_target.num_regs)
4223 error ("Invalid register number %d.", regnum);
4224
4225 addr = the_low_target.regmap[regnum];
4226
4227 return addr;
4228 }
4229
4230 /* Fetch one register. */
4231 static void
4232 fetch_register (struct regcache *regcache, int regno)
4233 {
4234 CORE_ADDR regaddr;
4235 int i, size;
4236 char *buf;
4237 int pid;
4238
4239 if (regno >= the_low_target.num_regs)
4240 return;
4241 if ((*the_low_target.cannot_fetch_register) (regno))
4242 return;
4243
4244 regaddr = register_addr (regno);
4245 if (regaddr == -1)
4246 return;
4247
4248 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4249 & -sizeof (PTRACE_XFER_TYPE));
4250 buf = alloca (size);
4251
4252 pid = lwpid_of (get_thread_lwp (current_inferior));
4253 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4254 {
4255 errno = 0;
4256 *(PTRACE_XFER_TYPE *) (buf + i) =
4257 ptrace (PTRACE_PEEKUSER, pid,
4258 /* Coerce to a uintptr_t first to avoid potential gcc warning
4259 of coercing an 8 byte integer to a 4 byte pointer. */
4260 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, (PTRACE_ARG4_TYPE) 0);
4261 regaddr += sizeof (PTRACE_XFER_TYPE);
4262 if (errno != 0)
4263 error ("reading register %d: %s", regno, strerror (errno));
4264 }
4265
4266 if (the_low_target.supply_ptrace_register)
4267 the_low_target.supply_ptrace_register (regcache, regno, buf);
4268 else
4269 supply_register (regcache, regno, buf);
4270 }
4271
4272 /* Store one register. */
4273 static void
4274 store_register (struct regcache *regcache, int regno)
4275 {
4276 CORE_ADDR regaddr;
4277 int i, size;
4278 char *buf;
4279 int pid;
4280
4281 if (regno >= the_low_target.num_regs)
4282 return;
4283 if ((*the_low_target.cannot_store_register) (regno))
4284 return;
4285
4286 regaddr = register_addr (regno);
4287 if (regaddr == -1)
4288 return;
4289
4290 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4291 & -sizeof (PTRACE_XFER_TYPE));
4292 buf = alloca (size);
4293 memset (buf, 0, size);
4294
4295 if (the_low_target.collect_ptrace_register)
4296 the_low_target.collect_ptrace_register (regcache, regno, buf);
4297 else
4298 collect_register (regcache, regno, buf);
4299
4300 pid = lwpid_of (get_thread_lwp (current_inferior));
4301 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4302 {
4303 errno = 0;
4304 ptrace (PTRACE_POKEUSER, pid,
4305 /* Coerce to a uintptr_t first to avoid potential gcc warning
4306 about coercing an 8 byte integer to a 4 byte pointer. */
4307 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
4308 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
4309 if (errno != 0)
4310 {
4311 /* At this point, ESRCH should mean the process is
4312 already gone, in which case we simply ignore attempts
4313 to change its registers. See also the related
4314 comment in linux_resume_one_lwp. */
4315 if (errno == ESRCH)
4316 return;
4317
4318 if ((*the_low_target.cannot_store_register) (regno) == 0)
4319 error ("writing register %d: %s", regno, strerror (errno));
4320 }
4321 regaddr += sizeof (PTRACE_XFER_TYPE);
4322 }
4323 }
4324
4325 /* Fetch all registers, or just one, from the child process.
4326 If REGNO is -1, do this for all registers, skipping any that are
4327 assumed to have been retrieved by regsets_fetch_inferior_registers,
4328 unless ALL is non-zero.
4329 Otherwise, REGNO specifies which register (so we can save time). */
4330 static void
4331 usr_fetch_inferior_registers (struct regcache *regcache, int regno, int all)
4332 {
4333 if (regno == -1)
4334 {
4335 for (regno = 0; regno < the_low_target.num_regs; regno++)
4336 if (all || !linux_register_in_regsets (regno))
4337 fetch_register (regcache, regno);
4338 }
4339 else
4340 fetch_register (regcache, regno);
4341 }
4342
4343 /* Store our register values back into the inferior.
4344 If REGNO is -1, do this for all registers, skipping any that are
4345 assumed to have been saved by regsets_store_inferior_registers,
4346 unless ALL is non-zero.
4347 Otherwise, REGNO specifies which register (so we can save time). */
4348 static void
4349 usr_store_inferior_registers (struct regcache *regcache, int regno, int all)
4350 {
4351 if (regno == -1)
4352 {
4353 for (regno = 0; regno < the_low_target.num_regs; regno++)
4354 if (all || !linux_register_in_regsets (regno))
4355 store_register (regcache, regno);
4356 }
4357 else
4358 store_register (regcache, regno);
4359 }
4360
4361 #else /* !HAVE_LINUX_USRREGS */
4362
4363 #define usr_fetch_inferior_registers(regcache, regno, all) do {} while (0)
4364 #define usr_store_inferior_registers(regcache, regno, all) do {} while (0)
4365
4366 #endif
4367
4368
4369 void
4370 linux_fetch_registers (struct regcache *regcache, int regno)
4371 {
4372 int use_regsets;
4373 int all = 0;
4374
4375 if (regno == -1)
4376 {
4377 if (the_low_target.fetch_register != NULL)
4378 for (regno = 0; regno < the_low_target.num_regs; regno++)
4379 (*the_low_target.fetch_register) (regcache, regno);
4380
4381 all = regsets_fetch_inferior_registers (regcache);
4382 usr_fetch_inferior_registers (regcache, -1, all);
4383 }
4384 else
4385 {
4386 if (the_low_target.fetch_register != NULL
4387 && (*the_low_target.fetch_register) (regcache, regno))
4388 return;
4389
4390 use_regsets = linux_register_in_regsets (regno);
4391 if (use_regsets)
4392 all = regsets_fetch_inferior_registers (regcache);
4393 if (!use_regsets || all)
4394 usr_fetch_inferior_registers (regcache, regno, 1);
4395 }
4396 }
4397
4398 void
4399 linux_store_registers (struct regcache *regcache, int regno)
4400 {
4401 int use_regsets;
4402 int all = 0;
4403
4404 if (regno == -1)
4405 {
4406 all = regsets_store_inferior_registers (regcache);
4407 usr_store_inferior_registers (regcache, regno, all);
4408 }
4409 else
4410 {
4411 use_regsets = linux_register_in_regsets (regno);
4412 if (use_regsets)
4413 all = regsets_store_inferior_registers (regcache);
4414 if (!use_regsets || all)
4415 usr_store_inferior_registers (regcache, regno, 1);
4416 }
4417 }
4418
4419
4420 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4421 to debugger memory starting at MYADDR. */
4422
4423 static int
4424 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4425 {
4426 int pid = lwpid_of (get_thread_lwp (current_inferior));
4427 register PTRACE_XFER_TYPE *buffer;
4428 register CORE_ADDR addr;
4429 register int count;
4430 char filename[64];
4431 register int i;
4432 int ret;
4433 int fd;
4434
4435 /* Try using /proc. Don't bother for one word. */
4436 if (len >= 3 * sizeof (long))
4437 {
4438 int bytes;
4439
4440 /* We could keep this file open and cache it - possibly one per
4441 thread. That requires some juggling, but is even faster. */
4442 sprintf (filename, "/proc/%d/mem", pid);
4443 fd = open (filename, O_RDONLY | O_LARGEFILE);
4444 if (fd == -1)
4445 goto no_proc;
4446
4447 /* If pread64 is available, use it. It's faster if the kernel
4448 supports it (only one syscall), and it's 64-bit safe even on
4449 32-bit platforms (for instance, SPARC debugging a SPARC64
4450 application). */
4451 #ifdef HAVE_PREAD64
4452 bytes = pread64 (fd, myaddr, len, memaddr);
4453 #else
4454 bytes = -1;
4455 if (lseek (fd, memaddr, SEEK_SET) != -1)
4456 bytes = read (fd, myaddr, len);
4457 #endif
4458
4459 close (fd);
4460 if (bytes == len)
4461 return 0;
4462
4463 /* Some data was read, we'll try to get the rest with ptrace. */
4464 if (bytes > 0)
4465 {
4466 memaddr += bytes;
4467 myaddr += bytes;
4468 len -= bytes;
4469 }
4470 }
4471
4472 no_proc:
4473 /* Round starting address down to longword boundary. */
4474 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4475 /* Round ending address up; get number of longwords that makes. */
4476 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4477 / sizeof (PTRACE_XFER_TYPE));
4478 /* Allocate buffer of that many longwords. */
4479 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4480
4481 /* Read all the longwords */
4482 errno = 0;
4483 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4484 {
4485 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4486 about coercing an 8 byte integer to a 4 byte pointer. */
4487 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4488 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4489 (PTRACE_ARG4_TYPE) 0);
4490 if (errno)
4491 break;
4492 }
4493 ret = errno;
4494
4495 /* Copy appropriate bytes out of the buffer. */
4496 if (i > 0)
4497 {
4498 i *= sizeof (PTRACE_XFER_TYPE);
4499 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4500 memcpy (myaddr,
4501 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4502 i < len ? i : len);
4503 }
4504
4505 return ret;
4506 }
4507
4508 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4509 memory at MEMADDR. On failure (cannot write to the inferior)
4510 returns the value of errno. Always succeeds if LEN is zero. */
4511
4512 static int
4513 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4514 {
4515 register int i;
4516 /* Round starting address down to longword boundary. */
4517 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4518 /* Round ending address up; get number of longwords that makes. */
4519 register int count
4520 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4521 / sizeof (PTRACE_XFER_TYPE);
4522
4523 /* Allocate buffer of that many longwords. */
4524 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4525 alloca (count * sizeof (PTRACE_XFER_TYPE));
4526
4527 int pid = lwpid_of (get_thread_lwp (current_inferior));
4528
4529 if (len == 0)
4530 {
4531 /* Zero length write always succeeds. */
4532 return 0;
4533 }
4534
4535 if (debug_threads)
4536 {
4537 /* Dump up to four bytes. */
4538 unsigned int val = * (unsigned int *) myaddr;
4539 if (len == 1)
4540 val = val & 0xff;
4541 else if (len == 2)
4542 val = val & 0xffff;
4543 else if (len == 3)
4544 val = val & 0xffffff;
4545 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4546 val, (long)memaddr);
4547 }
4548
4549 /* Fill start and end extra bytes of buffer with existing memory data. */
4550
4551 errno = 0;
4552 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4553 about coercing an 8 byte integer to a 4 byte pointer. */
4554 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4555 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4556 (PTRACE_ARG4_TYPE) 0);
4557 if (errno)
4558 return errno;
4559
4560 if (count > 1)
4561 {
4562 errno = 0;
4563 buffer[count - 1]
4564 = ptrace (PTRACE_PEEKTEXT, pid,
4565 /* Coerce to a uintptr_t first to avoid potential gcc warning
4566 about coercing an 8 byte integer to a 4 byte pointer. */
4567 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4568 * sizeof (PTRACE_XFER_TYPE)),
4569 (PTRACE_ARG4_TYPE) 0);
4570 if (errno)
4571 return errno;
4572 }
4573
4574 /* Copy data to be written over corresponding part of buffer. */
4575
4576 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4577 myaddr, len);
4578
4579 /* Write the entire buffer. */
4580
4581 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4582 {
4583 errno = 0;
4584 ptrace (PTRACE_POKETEXT, pid,
4585 /* Coerce to a uintptr_t first to avoid potential gcc warning
4586 about coercing an 8 byte integer to a 4 byte pointer. */
4587 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4588 (PTRACE_ARG4_TYPE) buffer[i]);
4589 if (errno)
4590 return errno;
4591 }
4592
4593 return 0;
4594 }
4595
4596 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4597 static int linux_supports_tracefork_flag;
4598
4599 static void
4600 linux_enable_event_reporting (int pid)
4601 {
4602 if (!linux_supports_tracefork_flag)
4603 return;
4604
4605 ptrace (PTRACE_SETOPTIONS, pid, (PTRACE_ARG3_TYPE) 0,
4606 (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4607 }
4608
4609 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4610
4611 static int
4612 linux_tracefork_grandchild (void *arg)
4613 {
4614 _exit (0);
4615 }
4616
4617 #define STACK_SIZE 4096
4618
4619 static int
4620 linux_tracefork_child (void *arg)
4621 {
4622 ptrace (PTRACE_TRACEME, 0, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
4623 kill (getpid (), SIGSTOP);
4624
4625 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4626
4627 if (fork () == 0)
4628 linux_tracefork_grandchild (NULL);
4629
4630 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4631
4632 #ifdef __ia64__
4633 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4634 CLONE_VM | SIGCHLD, NULL);
4635 #else
4636 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4637 CLONE_VM | SIGCHLD, NULL);
4638 #endif
4639
4640 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4641
4642 _exit (0);
4643 }
4644
4645 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4646 sure that we can enable the option, and that it had the desired
4647 effect. */
4648
4649 static void
4650 linux_test_for_tracefork (void)
4651 {
4652 int child_pid, ret, status;
4653 long second_pid;
4654 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4655 char *stack = xmalloc (STACK_SIZE * 4);
4656 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4657
4658 linux_supports_tracefork_flag = 0;
4659
4660 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4661
4662 child_pid = fork ();
4663 if (child_pid == 0)
4664 linux_tracefork_child (NULL);
4665
4666 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4667
4668 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4669 #ifdef __ia64__
4670 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4671 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4672 #else /* !__ia64__ */
4673 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4674 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4675 #endif /* !__ia64__ */
4676
4677 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4678
4679 if (child_pid == -1)
4680 perror_with_name ("clone");
4681
4682 ret = my_waitpid (child_pid, &status, 0);
4683 if (ret == -1)
4684 perror_with_name ("waitpid");
4685 else if (ret != child_pid)
4686 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4687 if (! WIFSTOPPED (status))
4688 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4689
4690 ret = ptrace (PTRACE_SETOPTIONS, child_pid, (PTRACE_ARG3_TYPE) 0,
4691 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4692 if (ret != 0)
4693 {
4694 ret = ptrace (PTRACE_KILL, child_pid, (PTRACE_ARG3_TYPE) 0,
4695 (PTRACE_ARG4_TYPE) 0);
4696 if (ret != 0)
4697 {
4698 warning ("linux_test_for_tracefork: failed to kill child");
4699 return;
4700 }
4701
4702 ret = my_waitpid (child_pid, &status, 0);
4703 if (ret != child_pid)
4704 warning ("linux_test_for_tracefork: failed to wait for killed child");
4705 else if (!WIFSIGNALED (status))
4706 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4707 "killed child", status);
4708
4709 return;
4710 }
4711
4712 ret = ptrace (PTRACE_CONT, child_pid, (PTRACE_ARG3_TYPE) 0,
4713 (PTRACE_ARG4_TYPE) 0);
4714 if (ret != 0)
4715 warning ("linux_test_for_tracefork: failed to resume child");
4716
4717 ret = my_waitpid (child_pid, &status, 0);
4718
4719 if (ret == child_pid && WIFSTOPPED (status)
4720 && status >> 16 == PTRACE_EVENT_FORK)
4721 {
4722 second_pid = 0;
4723 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, (PTRACE_ARG3_TYPE) 0,
4724 &second_pid);
4725 if (ret == 0 && second_pid != 0)
4726 {
4727 int second_status;
4728
4729 linux_supports_tracefork_flag = 1;
4730 my_waitpid (second_pid, &second_status, 0);
4731 ret = ptrace (PTRACE_KILL, second_pid, (PTRACE_ARG3_TYPE) 0,
4732 (PTRACE_ARG4_TYPE) 0);
4733 if (ret != 0)
4734 warning ("linux_test_for_tracefork: failed to kill second child");
4735 my_waitpid (second_pid, &status, 0);
4736 }
4737 }
4738 else
4739 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4740 "(%d, status 0x%x)", ret, status);
4741
4742 do
4743 {
4744 ret = ptrace (PTRACE_KILL, child_pid, (PTRACE_ARG3_TYPE) 0,
4745 (PTRACE_ARG4_TYPE) 0);
4746 if (ret != 0)
4747 warning ("linux_test_for_tracefork: failed to kill child");
4748 my_waitpid (child_pid, &status, 0);
4749 }
4750 while (WIFSTOPPED (status));
4751
4752 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4753 free (stack);
4754 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4755 }
4756
4757
4758 static void
4759 linux_look_up_symbols (void)
4760 {
4761 #ifdef USE_THREAD_DB
4762 struct process_info *proc = current_process ();
4763
4764 if (proc->private->thread_db != NULL)
4765 return;
4766
4767 /* If the kernel supports tracing forks then it also supports tracing
4768 clones, and then we don't need to use the magic thread event breakpoint
4769 to learn about threads. */
4770 thread_db_init (!linux_supports_tracefork_flag);
4771 #endif
4772 }
4773
4774 static void
4775 linux_request_interrupt (void)
4776 {
4777 extern unsigned long signal_pid;
4778
4779 if (!ptid_equal (cont_thread, null_ptid)
4780 && !ptid_equal (cont_thread, minus_one_ptid))
4781 {
4782 struct lwp_info *lwp;
4783 int lwpid;
4784
4785 lwp = get_thread_lwp (current_inferior);
4786 lwpid = lwpid_of (lwp);
4787 kill_lwp (lwpid, SIGINT);
4788 }
4789 else
4790 kill_lwp (signal_pid, SIGINT);
4791 }
4792
4793 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4794 to debugger memory starting at MYADDR. */
4795
4796 static int
4797 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4798 {
4799 char filename[PATH_MAX];
4800 int fd, n;
4801 int pid = lwpid_of (get_thread_lwp (current_inferior));
4802
4803 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4804
4805 fd = open (filename, O_RDONLY);
4806 if (fd < 0)
4807 return -1;
4808
4809 if (offset != (CORE_ADDR) 0
4810 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4811 n = -1;
4812 else
4813 n = read (fd, myaddr, len);
4814
4815 close (fd);
4816
4817 return n;
4818 }
4819
4820 /* These breakpoint and watchpoint related wrapper functions simply
4821 pass on the function call if the target has registered a
4822 corresponding function. */
4823
4824 static int
4825 linux_insert_point (char type, CORE_ADDR addr, int len)
4826 {
4827 if (the_low_target.insert_point != NULL)
4828 return the_low_target.insert_point (type, addr, len);
4829 else
4830 /* Unsupported (see target.h). */
4831 return 1;
4832 }
4833
4834 static int
4835 linux_remove_point (char type, CORE_ADDR addr, int len)
4836 {
4837 if (the_low_target.remove_point != NULL)
4838 return the_low_target.remove_point (type, addr, len);
4839 else
4840 /* Unsupported (see target.h). */
4841 return 1;
4842 }
4843
4844 static int
4845 linux_stopped_by_watchpoint (void)
4846 {
4847 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4848
4849 return lwp->stopped_by_watchpoint;
4850 }
4851
4852 static CORE_ADDR
4853 linux_stopped_data_address (void)
4854 {
4855 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4856
4857 return lwp->stopped_data_address;
4858 }
4859
4860 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4861 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4862 && defined(PT_TEXT_END_ADDR)
4863
4864 /* This is only used for targets that define PT_TEXT_ADDR,
4865 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4866 the target has different ways of acquiring this information, like
4867 loadmaps. */
4868
4869 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4870 to tell gdb about. */
4871
4872 static int
4873 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4874 {
4875 unsigned long text, text_end, data;
4876 int pid = lwpid_of (get_thread_lwp (current_inferior));
4877
4878 errno = 0;
4879
4880 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) PT_TEXT_ADDR,
4881 (PTRACE_ARG4_TYPE) 0);
4882 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) PT_TEXT_END_ADDR,
4883 (PTRACE_ARG4_TYPE) 0);
4884 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) PT_DATA_ADDR,
4885 (PTRACE_ARG4_TYPE) 0);
4886
4887 if (errno == 0)
4888 {
4889 /* Both text and data offsets produced at compile-time (and so
4890 used by gdb) are relative to the beginning of the program,
4891 with the data segment immediately following the text segment.
4892 However, the actual runtime layout in memory may put the data
4893 somewhere else, so when we send gdb a data base-address, we
4894 use the real data base address and subtract the compile-time
4895 data base-address from it (which is just the length of the
4896 text segment). BSS immediately follows data in both
4897 cases. */
4898 *text_p = text;
4899 *data_p = data - (text_end - text);
4900
4901 return 1;
4902 }
4903 return 0;
4904 }
4905 #endif
4906
4907 static int
4908 linux_qxfer_osdata (const char *annex,
4909 unsigned char *readbuf, unsigned const char *writebuf,
4910 CORE_ADDR offset, int len)
4911 {
4912 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4913 }
4914
4915 /* Convert a native/host siginfo object, into/from the siginfo in the
4916 layout of the inferiors' architecture. */
4917
4918 static void
4919 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4920 {
4921 int done = 0;
4922
4923 if (the_low_target.siginfo_fixup != NULL)
4924 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4925
4926 /* If there was no callback, or the callback didn't do anything,
4927 then just do a straight memcpy. */
4928 if (!done)
4929 {
4930 if (direction == 1)
4931 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4932 else
4933 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4934 }
4935 }
4936
4937 static int
4938 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4939 unsigned const char *writebuf, CORE_ADDR offset, int len)
4940 {
4941 int pid;
4942 siginfo_t siginfo;
4943 char inf_siginfo[sizeof (siginfo_t)];
4944
4945 if (current_inferior == NULL)
4946 return -1;
4947
4948 pid = lwpid_of (get_thread_lwp (current_inferior));
4949
4950 if (debug_threads)
4951 fprintf (stderr, "%s siginfo for lwp %d.\n",
4952 readbuf != NULL ? "Reading" : "Writing",
4953 pid);
4954
4955 if (offset >= sizeof (siginfo))
4956 return -1;
4957
4958 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_ARG3_TYPE) 0, &siginfo) != 0)
4959 return -1;
4960
4961 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4962 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4963 inferior with a 64-bit GDBSERVER should look the same as debugging it
4964 with a 32-bit GDBSERVER, we need to convert it. */
4965 siginfo_fixup (&siginfo, inf_siginfo, 0);
4966
4967 if (offset + len > sizeof (siginfo))
4968 len = sizeof (siginfo) - offset;
4969
4970 if (readbuf != NULL)
4971 memcpy (readbuf, inf_siginfo + offset, len);
4972 else
4973 {
4974 memcpy (inf_siginfo + offset, writebuf, len);
4975
4976 /* Convert back to ptrace layout before flushing it out. */
4977 siginfo_fixup (&siginfo, inf_siginfo, 1);
4978
4979 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_ARG3_TYPE) 0, &siginfo) != 0)
4980 return -1;
4981 }
4982
4983 return len;
4984 }
4985
4986 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4987 so we notice when children change state; as the handler for the
4988 sigsuspend in my_waitpid. */
4989
4990 static void
4991 sigchld_handler (int signo)
4992 {
4993 int old_errno = errno;
4994
4995 if (debug_threads)
4996 {
4997 do
4998 {
4999 /* fprintf is not async-signal-safe, so call write
5000 directly. */
5001 if (write (2, "sigchld_handler\n",
5002 sizeof ("sigchld_handler\n") - 1) < 0)
5003 break; /* just ignore */
5004 } while (0);
5005 }
5006
5007 if (target_is_async_p ())
5008 async_file_mark (); /* trigger a linux_wait */
5009
5010 errno = old_errno;
5011 }
5012
5013 static int
5014 linux_supports_non_stop (void)
5015 {
5016 return 1;
5017 }
5018
5019 static int
5020 linux_async (int enable)
5021 {
5022 int previous = (linux_event_pipe[0] != -1);
5023
5024 if (debug_threads)
5025 fprintf (stderr, "linux_async (%d), previous=%d\n",
5026 enable, previous);
5027
5028 if (previous != enable)
5029 {
5030 sigset_t mask;
5031 sigemptyset (&mask);
5032 sigaddset (&mask, SIGCHLD);
5033
5034 sigprocmask (SIG_BLOCK, &mask, NULL);
5035
5036 if (enable)
5037 {
5038 if (pipe (linux_event_pipe) == -1)
5039 fatal ("creating event pipe failed.");
5040
5041 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5042 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5043
5044 /* Register the event loop handler. */
5045 add_file_handler (linux_event_pipe[0],
5046 handle_target_event, NULL);
5047
5048 /* Always trigger a linux_wait. */
5049 async_file_mark ();
5050 }
5051 else
5052 {
5053 delete_file_handler (linux_event_pipe[0]);
5054
5055 close (linux_event_pipe[0]);
5056 close (linux_event_pipe[1]);
5057 linux_event_pipe[0] = -1;
5058 linux_event_pipe[1] = -1;
5059 }
5060
5061 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5062 }
5063
5064 return previous;
5065 }
5066
5067 static int
5068 linux_start_non_stop (int nonstop)
5069 {
5070 /* Register or unregister from event-loop accordingly. */
5071 linux_async (nonstop);
5072 return 0;
5073 }
5074
5075 static int
5076 linux_supports_multi_process (void)
5077 {
5078 return 1;
5079 }
5080
5081 static int
5082 linux_supports_disable_randomization (void)
5083 {
5084 #ifdef HAVE_PERSONALITY
5085 return 1;
5086 #else
5087 return 0;
5088 #endif
5089 }
5090
5091 static int
5092 linux_supports_agent (void)
5093 {
5094 return 1;
5095 }
5096
5097 /* Enumerate spufs IDs for process PID. */
5098 static int
5099 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5100 {
5101 int pos = 0;
5102 int written = 0;
5103 char path[128];
5104 DIR *dir;
5105 struct dirent *entry;
5106
5107 sprintf (path, "/proc/%ld/fd", pid);
5108 dir = opendir (path);
5109 if (!dir)
5110 return -1;
5111
5112 rewinddir (dir);
5113 while ((entry = readdir (dir)) != NULL)
5114 {
5115 struct stat st;
5116 struct statfs stfs;
5117 int fd;
5118
5119 fd = atoi (entry->d_name);
5120 if (!fd)
5121 continue;
5122
5123 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5124 if (stat (path, &st) != 0)
5125 continue;
5126 if (!S_ISDIR (st.st_mode))
5127 continue;
5128
5129 if (statfs (path, &stfs) != 0)
5130 continue;
5131 if (stfs.f_type != SPUFS_MAGIC)
5132 continue;
5133
5134 if (pos >= offset && pos + 4 <= offset + len)
5135 {
5136 *(unsigned int *)(buf + pos - offset) = fd;
5137 written += 4;
5138 }
5139 pos += 4;
5140 }
5141
5142 closedir (dir);
5143 return written;
5144 }
5145
5146 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5147 object type, using the /proc file system. */
5148 static int
5149 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5150 unsigned const char *writebuf,
5151 CORE_ADDR offset, int len)
5152 {
5153 long pid = lwpid_of (get_thread_lwp (current_inferior));
5154 char buf[128];
5155 int fd = 0;
5156 int ret = 0;
5157
5158 if (!writebuf && !readbuf)
5159 return -1;
5160
5161 if (!*annex)
5162 {
5163 if (!readbuf)
5164 return -1;
5165 else
5166 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5167 }
5168
5169 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5170 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5171 if (fd <= 0)
5172 return -1;
5173
5174 if (offset != 0
5175 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5176 {
5177 close (fd);
5178 return 0;
5179 }
5180
5181 if (writebuf)
5182 ret = write (fd, writebuf, (size_t) len);
5183 else
5184 ret = read (fd, readbuf, (size_t) len);
5185
5186 close (fd);
5187 return ret;
5188 }
5189
5190 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5191 struct target_loadseg
5192 {
5193 /* Core address to which the segment is mapped. */
5194 Elf32_Addr addr;
5195 /* VMA recorded in the program header. */
5196 Elf32_Addr p_vaddr;
5197 /* Size of this segment in memory. */
5198 Elf32_Word p_memsz;
5199 };
5200
5201 # if defined PT_GETDSBT
5202 struct target_loadmap
5203 {
5204 /* Protocol version number, must be zero. */
5205 Elf32_Word version;
5206 /* Pointer to the DSBT table, its size, and the DSBT index. */
5207 unsigned *dsbt_table;
5208 unsigned dsbt_size, dsbt_index;
5209 /* Number of segments in this map. */
5210 Elf32_Word nsegs;
5211 /* The actual memory map. */
5212 struct target_loadseg segs[/*nsegs*/];
5213 };
5214 # define LINUX_LOADMAP PT_GETDSBT
5215 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5216 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5217 # else
5218 struct target_loadmap
5219 {
5220 /* Protocol version number, must be zero. */
5221 Elf32_Half version;
5222 /* Number of segments in this map. */
5223 Elf32_Half nsegs;
5224 /* The actual memory map. */
5225 struct target_loadseg segs[/*nsegs*/];
5226 };
5227 # define LINUX_LOADMAP PTRACE_GETFDPIC
5228 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5229 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5230 # endif
5231
5232 static int
5233 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5234 unsigned char *myaddr, unsigned int len)
5235 {
5236 int pid = lwpid_of (get_thread_lwp (current_inferior));
5237 int addr = -1;
5238 struct target_loadmap *data = NULL;
5239 unsigned int actual_length, copy_length;
5240
5241 if (strcmp (annex, "exec") == 0)
5242 addr = (int) LINUX_LOADMAP_EXEC;
5243 else if (strcmp (annex, "interp") == 0)
5244 addr = (int) LINUX_LOADMAP_INTERP;
5245 else
5246 return -1;
5247
5248 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5249 return -1;
5250
5251 if (data == NULL)
5252 return -1;
5253
5254 actual_length = sizeof (struct target_loadmap)
5255 + sizeof (struct target_loadseg) * data->nsegs;
5256
5257 if (offset < 0 || offset > actual_length)
5258 return -1;
5259
5260 copy_length = actual_length - offset < len ? actual_length - offset : len;
5261 memcpy (myaddr, (char *) data + offset, copy_length);
5262 return copy_length;
5263 }
5264 #else
5265 # define linux_read_loadmap NULL
5266 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5267
5268 static void
5269 linux_process_qsupported (const char *query)
5270 {
5271 if (the_low_target.process_qsupported != NULL)
5272 the_low_target.process_qsupported (query);
5273 }
5274
5275 static int
5276 linux_supports_tracepoints (void)
5277 {
5278 if (*the_low_target.supports_tracepoints == NULL)
5279 return 0;
5280
5281 return (*the_low_target.supports_tracepoints) ();
5282 }
5283
5284 static CORE_ADDR
5285 linux_read_pc (struct regcache *regcache)
5286 {
5287 if (the_low_target.get_pc == NULL)
5288 return 0;
5289
5290 return (*the_low_target.get_pc) (regcache);
5291 }
5292
5293 static void
5294 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5295 {
5296 gdb_assert (the_low_target.set_pc != NULL);
5297
5298 (*the_low_target.set_pc) (regcache, pc);
5299 }
5300
5301 static int
5302 linux_thread_stopped (struct thread_info *thread)
5303 {
5304 return get_thread_lwp (thread)->stopped;
5305 }
5306
5307 /* This exposes stop-all-threads functionality to other modules. */
5308
5309 static void
5310 linux_pause_all (int freeze)
5311 {
5312 stop_all_lwps (freeze, NULL);
5313 }
5314
5315 /* This exposes unstop-all-threads functionality to other gdbserver
5316 modules. */
5317
5318 static void
5319 linux_unpause_all (int unfreeze)
5320 {
5321 unstop_all_lwps (unfreeze, NULL);
5322 }
5323
5324 static int
5325 linux_prepare_to_access_memory (void)
5326 {
5327 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5328 running LWP. */
5329 if (non_stop)
5330 linux_pause_all (1);
5331 return 0;
5332 }
5333
5334 static void
5335 linux_done_accessing_memory (void)
5336 {
5337 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5338 running LWP. */
5339 if (non_stop)
5340 linux_unpause_all (1);
5341 }
5342
5343 static int
5344 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5345 CORE_ADDR collector,
5346 CORE_ADDR lockaddr,
5347 ULONGEST orig_size,
5348 CORE_ADDR *jump_entry,
5349 CORE_ADDR *trampoline,
5350 ULONGEST *trampoline_size,
5351 unsigned char *jjump_pad_insn,
5352 ULONGEST *jjump_pad_insn_size,
5353 CORE_ADDR *adjusted_insn_addr,
5354 CORE_ADDR *adjusted_insn_addr_end,
5355 char *err)
5356 {
5357 return (*the_low_target.install_fast_tracepoint_jump_pad)
5358 (tpoint, tpaddr, collector, lockaddr, orig_size,
5359 jump_entry, trampoline, trampoline_size,
5360 jjump_pad_insn, jjump_pad_insn_size,
5361 adjusted_insn_addr, adjusted_insn_addr_end,
5362 err);
5363 }
5364
5365 static struct emit_ops *
5366 linux_emit_ops (void)
5367 {
5368 if (the_low_target.emit_ops != NULL)
5369 return (*the_low_target.emit_ops) ();
5370 else
5371 return NULL;
5372 }
5373
5374 static int
5375 linux_get_min_fast_tracepoint_insn_len (void)
5376 {
5377 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5378 }
5379
5380 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5381
5382 static int
5383 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5384 CORE_ADDR *phdr_memaddr, int *num_phdr)
5385 {
5386 char filename[PATH_MAX];
5387 int fd;
5388 const int auxv_size = is_elf64
5389 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5390 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5391
5392 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5393
5394 fd = open (filename, O_RDONLY);
5395 if (fd < 0)
5396 return 1;
5397
5398 *phdr_memaddr = 0;
5399 *num_phdr = 0;
5400 while (read (fd, buf, auxv_size) == auxv_size
5401 && (*phdr_memaddr == 0 || *num_phdr == 0))
5402 {
5403 if (is_elf64)
5404 {
5405 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5406
5407 switch (aux->a_type)
5408 {
5409 case AT_PHDR:
5410 *phdr_memaddr = aux->a_un.a_val;
5411 break;
5412 case AT_PHNUM:
5413 *num_phdr = aux->a_un.a_val;
5414 break;
5415 }
5416 }
5417 else
5418 {
5419 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5420
5421 switch (aux->a_type)
5422 {
5423 case AT_PHDR:
5424 *phdr_memaddr = aux->a_un.a_val;
5425 break;
5426 case AT_PHNUM:
5427 *num_phdr = aux->a_un.a_val;
5428 break;
5429 }
5430 }
5431 }
5432
5433 close (fd);
5434
5435 if (*phdr_memaddr == 0 || *num_phdr == 0)
5436 {
5437 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5438 "phdr_memaddr = %ld, phdr_num = %d",
5439 (long) *phdr_memaddr, *num_phdr);
5440 return 2;
5441 }
5442
5443 return 0;
5444 }
5445
5446 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5447
5448 static CORE_ADDR
5449 get_dynamic (const int pid, const int is_elf64)
5450 {
5451 CORE_ADDR phdr_memaddr, relocation;
5452 int num_phdr, i;
5453 unsigned char *phdr_buf;
5454 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5455
5456 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5457 return 0;
5458
5459 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5460 phdr_buf = alloca (num_phdr * phdr_size);
5461
5462 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5463 return 0;
5464
5465 /* Compute relocation: it is expected to be 0 for "regular" executables,
5466 non-zero for PIE ones. */
5467 relocation = -1;
5468 for (i = 0; relocation == -1 && i < num_phdr; i++)
5469 if (is_elf64)
5470 {
5471 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5472
5473 if (p->p_type == PT_PHDR)
5474 relocation = phdr_memaddr - p->p_vaddr;
5475 }
5476 else
5477 {
5478 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5479
5480 if (p->p_type == PT_PHDR)
5481 relocation = phdr_memaddr - p->p_vaddr;
5482 }
5483
5484 if (relocation == -1)
5485 {
5486 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5487 any real world executables, including PIE executables, have always
5488 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5489 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5490 or present DT_DEBUG anyway (fpc binaries are statically linked).
5491
5492 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5493
5494 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5495
5496 return 0;
5497 }
5498
5499 for (i = 0; i < num_phdr; i++)
5500 {
5501 if (is_elf64)
5502 {
5503 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5504
5505 if (p->p_type == PT_DYNAMIC)
5506 return p->p_vaddr + relocation;
5507 }
5508 else
5509 {
5510 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5511
5512 if (p->p_type == PT_DYNAMIC)
5513 return p->p_vaddr + relocation;
5514 }
5515 }
5516
5517 return 0;
5518 }
5519
5520 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5521 can be 0 if the inferior does not yet have the library list initialized.
5522 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5523 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5524
5525 static CORE_ADDR
5526 get_r_debug (const int pid, const int is_elf64)
5527 {
5528 CORE_ADDR dynamic_memaddr;
5529 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5530 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5531 CORE_ADDR map = -1;
5532
5533 dynamic_memaddr = get_dynamic (pid, is_elf64);
5534 if (dynamic_memaddr == 0)
5535 return map;
5536
5537 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5538 {
5539 if (is_elf64)
5540 {
5541 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5542 #ifdef DT_MIPS_RLD_MAP
5543 union
5544 {
5545 Elf64_Xword map;
5546 unsigned char buf[sizeof (Elf64_Xword)];
5547 }
5548 rld_map;
5549
5550 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5551 {
5552 if (linux_read_memory (dyn->d_un.d_val,
5553 rld_map.buf, sizeof (rld_map.buf)) == 0)
5554 return rld_map.map;
5555 else
5556 break;
5557 }
5558 #endif /* DT_MIPS_RLD_MAP */
5559
5560 if (dyn->d_tag == DT_DEBUG && map == -1)
5561 map = dyn->d_un.d_val;
5562
5563 if (dyn->d_tag == DT_NULL)
5564 break;
5565 }
5566 else
5567 {
5568 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5569 #ifdef DT_MIPS_RLD_MAP
5570 union
5571 {
5572 Elf32_Word map;
5573 unsigned char buf[sizeof (Elf32_Word)];
5574 }
5575 rld_map;
5576
5577 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5578 {
5579 if (linux_read_memory (dyn->d_un.d_val,
5580 rld_map.buf, sizeof (rld_map.buf)) == 0)
5581 return rld_map.map;
5582 else
5583 break;
5584 }
5585 #endif /* DT_MIPS_RLD_MAP */
5586
5587 if (dyn->d_tag == DT_DEBUG && map == -1)
5588 map = dyn->d_un.d_val;
5589
5590 if (dyn->d_tag == DT_NULL)
5591 break;
5592 }
5593
5594 dynamic_memaddr += dyn_size;
5595 }
5596
5597 return map;
5598 }
5599
5600 /* Read one pointer from MEMADDR in the inferior. */
5601
5602 static int
5603 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5604 {
5605 int ret;
5606
5607 /* Go through a union so this works on either big or little endian
5608 hosts, when the inferior's pointer size is smaller than the size
5609 of CORE_ADDR. It is assumed the inferior's endianness is the
5610 same of the superior's. */
5611 union
5612 {
5613 CORE_ADDR core_addr;
5614 unsigned int ui;
5615 unsigned char uc;
5616 } addr;
5617
5618 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5619 if (ret == 0)
5620 {
5621 if (ptr_size == sizeof (CORE_ADDR))
5622 *ptr = addr.core_addr;
5623 else if (ptr_size == sizeof (unsigned int))
5624 *ptr = addr.ui;
5625 else
5626 gdb_assert_not_reached ("unhandled pointer size");
5627 }
5628 return ret;
5629 }
5630
5631 struct link_map_offsets
5632 {
5633 /* Offset and size of r_debug.r_version. */
5634 int r_version_offset;
5635
5636 /* Offset and size of r_debug.r_map. */
5637 int r_map_offset;
5638
5639 /* Offset to l_addr field in struct link_map. */
5640 int l_addr_offset;
5641
5642 /* Offset to l_name field in struct link_map. */
5643 int l_name_offset;
5644
5645 /* Offset to l_ld field in struct link_map. */
5646 int l_ld_offset;
5647
5648 /* Offset to l_next field in struct link_map. */
5649 int l_next_offset;
5650
5651 /* Offset to l_prev field in struct link_map. */
5652 int l_prev_offset;
5653 };
5654
5655 /* Construct qXfer:libraries-svr4:read reply. */
5656
5657 static int
5658 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5659 unsigned const char *writebuf,
5660 CORE_ADDR offset, int len)
5661 {
5662 char *document;
5663 unsigned document_len;
5664 struct process_info_private *const priv = current_process ()->private;
5665 char filename[PATH_MAX];
5666 int pid, is_elf64;
5667
5668 static const struct link_map_offsets lmo_32bit_offsets =
5669 {
5670 0, /* r_version offset. */
5671 4, /* r_debug.r_map offset. */
5672 0, /* l_addr offset in link_map. */
5673 4, /* l_name offset in link_map. */
5674 8, /* l_ld offset in link_map. */
5675 12, /* l_next offset in link_map. */
5676 16 /* l_prev offset in link_map. */
5677 };
5678
5679 static const struct link_map_offsets lmo_64bit_offsets =
5680 {
5681 0, /* r_version offset. */
5682 8, /* r_debug.r_map offset. */
5683 0, /* l_addr offset in link_map. */
5684 8, /* l_name offset in link_map. */
5685 16, /* l_ld offset in link_map. */
5686 24, /* l_next offset in link_map. */
5687 32 /* l_prev offset in link_map. */
5688 };
5689 const struct link_map_offsets *lmo;
5690 unsigned int machine;
5691
5692 if (writebuf != NULL)
5693 return -2;
5694 if (readbuf == NULL)
5695 return -1;
5696
5697 pid = lwpid_of (get_thread_lwp (current_inferior));
5698 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5699 is_elf64 = elf_64_file_p (filename, &machine);
5700 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5701
5702 if (priv->r_debug == 0)
5703 priv->r_debug = get_r_debug (pid, is_elf64);
5704
5705 /* We failed to find DT_DEBUG. Such situation will not change for this
5706 inferior - do not retry it. Report it to GDB as E01, see for the reasons
5707 at the GDB solib-svr4.c side. */
5708 if (priv->r_debug == (CORE_ADDR) -1)
5709 return -1;
5710
5711 if (priv->r_debug == 0)
5712 {
5713 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5714 }
5715 else
5716 {
5717 int allocated = 1024;
5718 char *p;
5719 const int ptr_size = is_elf64 ? 8 : 4;
5720 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5721 int r_version, header_done = 0;
5722
5723 document = xmalloc (allocated);
5724 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5725 p = document + strlen (document);
5726
5727 r_version = 0;
5728 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5729 (unsigned char *) &r_version,
5730 sizeof (r_version)) != 0
5731 || r_version != 1)
5732 {
5733 warning ("unexpected r_debug version %d", r_version);
5734 goto done;
5735 }
5736
5737 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5738 &lm_addr, ptr_size) != 0)
5739 {
5740 warning ("unable to read r_map from 0x%lx",
5741 (long) priv->r_debug + lmo->r_map_offset);
5742 goto done;
5743 }
5744
5745 lm_prev = 0;
5746 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5747 &l_name, ptr_size) == 0
5748 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5749 &l_addr, ptr_size) == 0
5750 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5751 &l_ld, ptr_size) == 0
5752 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5753 &l_prev, ptr_size) == 0
5754 && read_one_ptr (lm_addr + lmo->l_next_offset,
5755 &l_next, ptr_size) == 0)
5756 {
5757 unsigned char libname[PATH_MAX];
5758
5759 if (lm_prev != l_prev)
5760 {
5761 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5762 (long) lm_prev, (long) l_prev);
5763 break;
5764 }
5765
5766 /* Not checking for error because reading may stop before
5767 we've got PATH_MAX worth of characters. */
5768 libname[0] = '\0';
5769 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5770 libname[sizeof (libname) - 1] = '\0';
5771 if (libname[0] != '\0')
5772 {
5773 /* 6x the size for xml_escape_text below. */
5774 size_t len = 6 * strlen ((char *) libname);
5775 char *name;
5776
5777 if (!header_done)
5778 {
5779 /* Terminate `<library-list-svr4'. */
5780 *p++ = '>';
5781 header_done = 1;
5782 }
5783
5784 while (allocated < p - document + len + 200)
5785 {
5786 /* Expand to guarantee sufficient storage. */
5787 uintptr_t document_len = p - document;
5788
5789 document = xrealloc (document, 2 * allocated);
5790 allocated *= 2;
5791 p = document + document_len;
5792 }
5793
5794 name = xml_escape_text ((char *) libname);
5795 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5796 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5797 name, (unsigned long) lm_addr,
5798 (unsigned long) l_addr, (unsigned long) l_ld);
5799 free (name);
5800 }
5801 else if (lm_prev == 0)
5802 {
5803 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5804 p = p + strlen (p);
5805 }
5806
5807 if (l_next == 0)
5808 break;
5809
5810 lm_prev = lm_addr;
5811 lm_addr = l_next;
5812 }
5813 done:
5814 if (!header_done)
5815 {
5816 /* Empty list; terminate `<library-list-svr4'. */
5817 strcpy (p, "/>");
5818 }
5819 else
5820 strcpy (p, "</library-list-svr4>");
5821 }
5822
5823 document_len = strlen (document);
5824 if (offset < document_len)
5825 document_len -= offset;
5826 else
5827 document_len = 0;
5828 if (len > document_len)
5829 len = document_len;
5830
5831 memcpy (readbuf, document + offset, len);
5832 xfree (document);
5833
5834 return len;
5835 }
5836
5837 #ifdef HAVE_LINUX_BTRACE
5838
5839 /* Enable branch tracing. */
5840
5841 static struct btrace_target_info *
5842 linux_low_enable_btrace (ptid_t ptid)
5843 {
5844 struct btrace_target_info *tinfo;
5845
5846 tinfo = linux_enable_btrace (ptid);
5847 if (tinfo != NULL)
5848 tinfo->ptr_bits = register_size (0) * 8;
5849
5850 return tinfo;
5851 }
5852
5853 /* Read branch trace data as btrace xml document. */
5854
5855 static void
5856 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5857 int type)
5858 {
5859 VEC (btrace_block_s) *btrace;
5860 struct btrace_block *block;
5861 int i;
5862
5863 btrace = linux_read_btrace (tinfo, type);
5864
5865 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5866 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5867
5868 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
5869 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5870 paddress (block->begin), paddress (block->end));
5871
5872 buffer_grow_str (buffer, "</btrace>\n");
5873
5874 VEC_free (btrace_block_s, btrace);
5875 }
5876 #endif /* HAVE_LINUX_BTRACE */
5877
5878 static struct target_ops linux_target_ops = {
5879 linux_create_inferior,
5880 linux_attach,
5881 linux_kill,
5882 linux_detach,
5883 linux_mourn,
5884 linux_join,
5885 linux_thread_alive,
5886 linux_resume,
5887 linux_wait,
5888 linux_fetch_registers,
5889 linux_store_registers,
5890 linux_prepare_to_access_memory,
5891 linux_done_accessing_memory,
5892 linux_read_memory,
5893 linux_write_memory,
5894 linux_look_up_symbols,
5895 linux_request_interrupt,
5896 linux_read_auxv,
5897 linux_insert_point,
5898 linux_remove_point,
5899 linux_stopped_by_watchpoint,
5900 linux_stopped_data_address,
5901 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5902 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5903 && defined(PT_TEXT_END_ADDR)
5904 linux_read_offsets,
5905 #else
5906 NULL,
5907 #endif
5908 #ifdef USE_THREAD_DB
5909 thread_db_get_tls_address,
5910 #else
5911 NULL,
5912 #endif
5913 linux_qxfer_spu,
5914 hostio_last_error_from_errno,
5915 linux_qxfer_osdata,
5916 linux_xfer_siginfo,
5917 linux_supports_non_stop,
5918 linux_async,
5919 linux_start_non_stop,
5920 linux_supports_multi_process,
5921 #ifdef USE_THREAD_DB
5922 thread_db_handle_monitor_command,
5923 #else
5924 NULL,
5925 #endif
5926 linux_common_core_of_thread,
5927 linux_read_loadmap,
5928 linux_process_qsupported,
5929 linux_supports_tracepoints,
5930 linux_read_pc,
5931 linux_write_pc,
5932 linux_thread_stopped,
5933 NULL,
5934 linux_pause_all,
5935 linux_unpause_all,
5936 linux_cancel_breakpoints,
5937 linux_stabilize_threads,
5938 linux_install_fast_tracepoint_jump_pad,
5939 linux_emit_ops,
5940 linux_supports_disable_randomization,
5941 linux_get_min_fast_tracepoint_insn_len,
5942 linux_qxfer_libraries_svr4,
5943 linux_supports_agent,
5944 #ifdef HAVE_LINUX_BTRACE
5945 linux_supports_btrace,
5946 linux_low_enable_btrace,
5947 linux_disable_btrace,
5948 linux_low_read_btrace,
5949 #else
5950 NULL,
5951 NULL,
5952 NULL,
5953 NULL,
5954 #endif
5955 };
5956
5957 static void
5958 linux_init_signals ()
5959 {
5960 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5961 to find what the cancel signal actually is. */
5962 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5963 signal (__SIGRTMIN+1, SIG_IGN);
5964 #endif
5965 }
5966
5967 void
5968 initialize_low (void)
5969 {
5970 struct sigaction sigchld_action;
5971 memset (&sigchld_action, 0, sizeof (sigchld_action));
5972 set_target_ops (&linux_target_ops);
5973 set_breakpoint_data (the_low_target.breakpoint,
5974 the_low_target.breakpoint_len);
5975 linux_init_signals ();
5976 linux_test_for_tracefork ();
5977 linux_ptrace_init_warnings ();
5978 #ifdef HAVE_LINUX_REGSETS
5979 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5980 ;
5981 disabled_regsets = xmalloc (num_regsets);
5982 #endif
5983
5984 sigchld_action.sa_handler = sigchld_handler;
5985 sigemptyset (&sigchld_action.sa_mask);
5986 sigchld_action.sa_flags = SA_RESTART;
5987 sigaction (SIGCHLD, &sigchld_action, NULL);
5988 }