]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
* gdb.dwarf2/gdb-index.exp (add_gdb_index): Use explicit test name
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
28e7fd62 2 Copyright (C) 1995-2013 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
d26e3629 21#include "linux-osdata.h"
58b4daa5 22#include "agent.h"
da6d8c04 23
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
8bdce1ff 26#include "gdb_wait.h"
da6d8c04 27#include <stdio.h>
da6d8c04 28#include <sys/ptrace.h>
af96c192 29#include "linux-ptrace.h"
e3deef73 30#include "linux-procfs.h"
da6d8c04
DJ
31#include <signal.h>
32#include <sys/ioctl.h>
33#include <fcntl.h>
d07c63e7 34#include <string.h>
0a30fbc4
DJ
35#include <stdlib.h>
36#include <unistd.h>
fa6a77dc 37#include <errno.h>
fd500816 38#include <sys/syscall.h>
f9387fc3 39#include <sched.h>
07e059b5
VP
40#include <ctype.h>
41#include <pwd.h>
42#include <sys/types.h>
43#include <dirent.h>
8bdce1ff 44#include "gdb_stat.h"
efcbbd14 45#include <sys/vfs.h>
1570b33e 46#include <sys/uio.h>
957f3f49
DE
47#ifndef ELFMAG0
48/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
49 then ELFMAG0 will have been defined. If it didn't get included by
50 gdb_proc_service.h then including it will likely introduce a duplicate
51 definition of elf_fpregset_t. */
52#include <elf.h>
53#endif
efcbbd14
UW
54
55#ifndef SPUFS_MAGIC
56#define SPUFS_MAGIC 0x23c9b64e
57#endif
da6d8c04 58
03583c20
UW
59#ifdef HAVE_PERSONALITY
60# include <sys/personality.h>
61# if !HAVE_DECL_ADDR_NO_RANDOMIZE
62# define ADDR_NO_RANDOMIZE 0x0040000
63# endif
64#endif
65
fd462a61
DJ
66#ifndef O_LARGEFILE
67#define O_LARGEFILE 0
68#endif
69
ec8ebe72
DE
70#ifndef W_STOPCODE
71#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
72#endif
73
1a981360
PA
74/* This is the kernel's hard limit. Not to be confused with
75 SIGRTMIN. */
76#ifndef __SIGRTMIN
77#define __SIGRTMIN 32
78#endif
79
db0dfaa0
LM
80/* Some targets did not define these ptrace constants from the start,
81 so gdbserver defines them locally here. In the future, these may
82 be removed after they are added to asm/ptrace.h. */
83#if !(defined(PT_TEXT_ADDR) \
84 || defined(PT_DATA_ADDR) \
85 || defined(PT_TEXT_END_ADDR))
86#if defined(__mcoldfire__)
87/* These are still undefined in 3.10 kernels. */
88#define PT_TEXT_ADDR 49*4
89#define PT_DATA_ADDR 50*4
90#define PT_TEXT_END_ADDR 51*4
91/* BFIN already defines these since at least 2.6.32 kernels. */
92#elif defined(BFIN)
93#define PT_TEXT_ADDR 220
94#define PT_TEXT_END_ADDR 224
95#define PT_DATA_ADDR 228
96/* These are still undefined in 3.10 kernels. */
97#elif defined(__TMS320C6X__)
98#define PT_TEXT_ADDR (0x10000*4)
99#define PT_DATA_ADDR (0x10004*4)
100#define PT_TEXT_END_ADDR (0x10008*4)
101#endif
102#endif
103
9accd112
MM
104#ifdef HAVE_LINUX_BTRACE
105# include "linux-btrace.h"
106#endif
107
8365dcf5
TJB
108#ifndef HAVE_ELF32_AUXV_T
109/* Copied from glibc's elf.h. */
110typedef struct
111{
112 uint32_t a_type; /* Entry type */
113 union
114 {
115 uint32_t a_val; /* Integer value */
116 /* We use to have pointer elements added here. We cannot do that,
117 though, since it does not work when using 32-bit definitions
118 on 64-bit platforms and vice versa. */
119 } a_un;
120} Elf32_auxv_t;
121#endif
122
123#ifndef HAVE_ELF64_AUXV_T
124/* Copied from glibc's elf.h. */
125typedef struct
126{
127 uint64_t a_type; /* Entry type */
128 union
129 {
130 uint64_t a_val; /* Integer value */
131 /* We use to have pointer elements added here. We cannot do that,
132 though, since it does not work when using 32-bit definitions
133 on 64-bit platforms and vice versa. */
134 } a_un;
135} Elf64_auxv_t;
136#endif
137
24a09b5f
DJ
138/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
139 representation of the thread ID.
611cb4a5 140
54a0b537 141 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
95954743
PA
142 the same as the LWP ID.
143
144 ``all_processes'' is keyed by the "overall process ID", which
145 GNU/Linux calls tgid, "thread group ID". */
0d62e5e8 146
54a0b537 147struct inferior_list all_lwps;
0d62e5e8 148
05044653
PA
149/* A list of all unknown processes which receive stop signals. Some
150 other process will presumably claim each of these as forked
151 children momentarily. */
24a09b5f 152
05044653
PA
153struct simple_pid_list
154{
155 /* The process ID. */
156 int pid;
157
158 /* The status as reported by waitpid. */
159 int status;
160
161 /* Next in chain. */
162 struct simple_pid_list *next;
163};
164struct simple_pid_list *stopped_pids;
165
166/* Trivial list manipulation functions to keep track of a list of new
167 stopped processes. */
168
169static void
170add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
171{
172 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
173
174 new_pid->pid = pid;
175 new_pid->status = status;
176 new_pid->next = *listp;
177 *listp = new_pid;
178}
179
180static int
181pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
182{
183 struct simple_pid_list **p;
184
185 for (p = listp; *p != NULL; p = &(*p)->next)
186 if ((*p)->pid == pid)
187 {
188 struct simple_pid_list *next = (*p)->next;
189
190 *statusp = (*p)->status;
191 xfree (*p);
192 *p = next;
193 return 1;
194 }
195 return 0;
196}
24a09b5f 197
bde24c0a
PA
198enum stopping_threads_kind
199 {
200 /* Not stopping threads presently. */
201 NOT_STOPPING_THREADS,
202
203 /* Stopping threads. */
204 STOPPING_THREADS,
205
206 /* Stopping and suspending threads. */
207 STOPPING_AND_SUSPENDING_THREADS
208 };
209
210/* This is set while stop_all_lwps is in effect. */
211enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
212
213/* FIXME make into a target method? */
24a09b5f 214int using_threads = 1;
24a09b5f 215
fa593d66
PA
216/* True if we're presently stabilizing threads (moving them out of
217 jump pads). */
218static int stabilizing_threads;
219
2acc282a 220static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 221 int step, int signal, siginfo_t *info);
2bd7c093 222static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
223static void stop_all_lwps (int suspend, struct lwp_info *except);
224static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
95954743 225static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
95954743 226static void *add_lwp (ptid_t ptid);
c35fafde 227static int linux_stopped_by_watchpoint (void);
95954743 228static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
d50171e4 229static void proceed_all_lwps (void);
d50171e4
PA
230static int finish_step_over (struct lwp_info *lwp);
231static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
232static int kill_lwp (unsigned long lwpid, int signo);
233
234/* True if the low target can hardware single-step. Such targets
235 don't need a BREAKPOINT_REINSERT_ADDR callback. */
236
237static int
238can_hardware_single_step (void)
239{
240 return (the_low_target.breakpoint_reinsert_addr == NULL);
241}
242
243/* True if the low target supports memory breakpoints. If so, we'll
244 have a GET_PC implementation. */
245
246static int
247supports_breakpoints (void)
248{
249 return (the_low_target.get_pc != NULL);
250}
0d62e5e8 251
fa593d66
PA
252/* Returns true if this target can support fast tracepoints. This
253 does not mean that the in-process agent has been loaded in the
254 inferior. */
255
256static int
257supports_fast_tracepoints (void)
258{
259 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
260}
261
c2d6af84
PA
262/* True if LWP is stopped in its stepping range. */
263
264static int
265lwp_in_step_range (struct lwp_info *lwp)
266{
267 CORE_ADDR pc = lwp->stop_pc;
268
269 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
270}
271
0d62e5e8
DJ
272struct pending_signals
273{
274 int signal;
32ca6d61 275 siginfo_t info;
0d62e5e8
DJ
276 struct pending_signals *prev;
277};
611cb4a5 278
bd99dc85
PA
279/* The read/write ends of the pipe registered as waitable file in the
280 event loop. */
281static int linux_event_pipe[2] = { -1, -1 };
282
283/* True if we're currently in async mode. */
284#define target_is_async_p() (linux_event_pipe[0] != -1)
285
02fc4de7 286static void send_sigstop (struct lwp_info *lwp);
bd99dc85
PA
287static void wait_for_sigstop (struct inferior_list_entry *entry);
288
d0722149
DE
289/* Return non-zero if HEADER is a 64-bit ELF file. */
290
291static int
214d508e 292elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 293{
214d508e
L
294 if (header->e_ident[EI_MAG0] == ELFMAG0
295 && header->e_ident[EI_MAG1] == ELFMAG1
296 && header->e_ident[EI_MAG2] == ELFMAG2
297 && header->e_ident[EI_MAG3] == ELFMAG3)
298 {
299 *machine = header->e_machine;
300 return header->e_ident[EI_CLASS] == ELFCLASS64;
301
302 }
303 *machine = EM_NONE;
304 return -1;
d0722149
DE
305}
306
307/* Return non-zero if FILE is a 64-bit ELF file,
308 zero if the file is not a 64-bit ELF file,
309 and -1 if the file is not accessible or doesn't exist. */
310
be07f1a2 311static int
214d508e 312elf_64_file_p (const char *file, unsigned int *machine)
d0722149 313{
957f3f49 314 Elf64_Ehdr header;
d0722149
DE
315 int fd;
316
317 fd = open (file, O_RDONLY);
318 if (fd < 0)
319 return -1;
320
321 if (read (fd, &header, sizeof (header)) != sizeof (header))
322 {
323 close (fd);
324 return 0;
325 }
326 close (fd);
327
214d508e 328 return elf_64_header_p (&header, machine);
d0722149
DE
329}
330
be07f1a2
PA
331/* Accepts an integer PID; Returns true if the executable PID is
332 running is a 64-bit ELF file.. */
333
334int
214d508e 335linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 336{
d8d2a3ee 337 char file[PATH_MAX];
be07f1a2
PA
338
339 sprintf (file, "/proc/%d/exe", pid);
214d508e 340 return elf_64_file_p (file, machine);
be07f1a2
PA
341}
342
bd99dc85
PA
343static void
344delete_lwp (struct lwp_info *lwp)
345{
346 remove_thread (get_lwp_thread (lwp));
347 remove_inferior (&all_lwps, &lwp->head);
aa5ca48f 348 free (lwp->arch_private);
bd99dc85
PA
349 free (lwp);
350}
351
95954743
PA
352/* Add a process to the common process list, and set its private
353 data. */
354
355static struct process_info *
356linux_add_process (int pid, int attached)
357{
358 struct process_info *proc;
359
95954743
PA
360 proc = add_process (pid, attached);
361 proc->private = xcalloc (1, sizeof (*proc->private));
362
3aee8918
PA
363 /* Set the arch when the first LWP stops. */
364 proc->private->new_inferior = 1;
365
aa5ca48f
DE
366 if (the_low_target.new_process != NULL)
367 proc->private->arch_private = the_low_target.new_process ();
368
95954743
PA
369 return proc;
370}
371
bd99dc85
PA
372/* Handle a GNU/Linux extended wait response. If we see a clone
373 event, we need to add the new LWP to our list (and not report the
374 trap to higher layers). */
0d62e5e8 375
24a09b5f 376static void
54a0b537 377handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f
DJ
378{
379 int event = wstat >> 16;
54a0b537 380 struct lwp_info *new_lwp;
24a09b5f
DJ
381
382 if (event == PTRACE_EVENT_CLONE)
383 {
95954743 384 ptid_t ptid;
24a09b5f 385 unsigned long new_pid;
05044653 386 int ret, status;
24a09b5f 387
b8e1b30e 388 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), (PTRACE_TYPE_ARG3) 0,
56f7af9c 389 &new_pid);
24a09b5f
DJ
390
391 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 392 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
393 {
394 /* The new child has a pending SIGSTOP. We can't affect it until it
395 hits the SIGSTOP, but we're already attached. */
396
97438e3f 397 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
398
399 if (ret == -1)
400 perror_with_name ("waiting for new child");
401 else if (ret != new_pid)
402 warning ("wait returned unexpected PID %d", ret);
da5898ce 403 else if (!WIFSTOPPED (status))
24a09b5f
DJ
404 warning ("wait returned unexpected status 0x%x", status);
405 }
406
95954743
PA
407 ptid = ptid_build (pid_of (event_child), new_pid, 0);
408 new_lwp = (struct lwp_info *) add_lwp (ptid);
409 add_thread (ptid, new_lwp);
24a09b5f 410
e27d73f6
DE
411 /* Either we're going to immediately resume the new thread
412 or leave it stopped. linux_resume_one_lwp is a nop if it
413 thinks the thread is currently running, so set this first
414 before calling linux_resume_one_lwp. */
415 new_lwp->stopped = 1;
416
bde24c0a
PA
417 /* If we're suspending all threads, leave this one suspended
418 too. */
419 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
420 new_lwp->suspended = 1;
421
da5898ce
DJ
422 /* Normally we will get the pending SIGSTOP. But in some cases
423 we might get another signal delivered to the group first.
f21cc1a2 424 If we do get another signal, be sure not to lose it. */
da5898ce
DJ
425 if (WSTOPSIG (status) == SIGSTOP)
426 {
bde24c0a 427 if (stopping_threads != NOT_STOPPING_THREADS)
d50171e4
PA
428 new_lwp->stop_pc = get_stop_pc (new_lwp);
429 else
e27d73f6 430 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
da5898ce 431 }
24a09b5f 432 else
da5898ce 433 {
54a0b537 434 new_lwp->stop_expected = 1;
d50171e4 435
bde24c0a 436 if (stopping_threads != NOT_STOPPING_THREADS)
da5898ce 437 {
d50171e4 438 new_lwp->stop_pc = get_stop_pc (new_lwp);
54a0b537
PA
439 new_lwp->status_pending_p = 1;
440 new_lwp->status_pending = status;
da5898ce
DJ
441 }
442 else
443 /* Pass the signal on. This is what GDB does - except
444 shouldn't we really report it instead? */
e27d73f6 445 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
da5898ce 446 }
24a09b5f
DJ
447
448 /* Always resume the current thread. If we are stopping
449 threads, it will have a pending SIGSTOP; we may as well
450 collect it now. */
2acc282a 451 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
24a09b5f
DJ
452 }
453}
454
d50171e4
PA
455/* Return the PC as read from the regcache of LWP, without any
456 adjustment. */
457
458static CORE_ADDR
459get_pc (struct lwp_info *lwp)
460{
461 struct thread_info *saved_inferior;
462 struct regcache *regcache;
463 CORE_ADDR pc;
464
465 if (the_low_target.get_pc == NULL)
466 return 0;
467
468 saved_inferior = current_inferior;
469 current_inferior = get_lwp_thread (lwp);
470
471 regcache = get_thread_regcache (current_inferior, 1);
472 pc = (*the_low_target.get_pc) (regcache);
473
474 if (debug_threads)
475 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
476
477 current_inferior = saved_inferior;
478 return pc;
479}
480
481/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
482 The SIGTRAP could mean several things.
483
484 On i386, where decr_pc_after_break is non-zero:
485 If we were single-stepping this process using PTRACE_SINGLESTEP,
486 we will get only the one SIGTRAP (even if the instruction we
487 stepped over was a breakpoint). The value of $eip will be the
488 next instruction.
489 If we continue the process using PTRACE_CONT, we will get a
490 SIGTRAP when we hit a breakpoint. The value of $eip will be
491 the instruction after the breakpoint (i.e. needs to be
492 decremented). If we report the SIGTRAP to GDB, we must also
493 report the undecremented PC. If we cancel the SIGTRAP, we
494 must resume at the decremented PC.
495
496 (Presumably, not yet tested) On a non-decr_pc_after_break machine
497 with hardware or kernel single-step:
498 If we single-step over a breakpoint instruction, our PC will
499 point at the following instruction. If we continue and hit a
500 breakpoint instruction, our PC will point at the breakpoint
501 instruction. */
502
503static CORE_ADDR
d50171e4 504get_stop_pc (struct lwp_info *lwp)
0d62e5e8 505{
d50171e4
PA
506 CORE_ADDR stop_pc;
507
508 if (the_low_target.get_pc == NULL)
509 return 0;
0d62e5e8 510
d50171e4
PA
511 stop_pc = get_pc (lwp);
512
bdabb078
PA
513 if (WSTOPSIG (lwp->last_status) == SIGTRAP
514 && !lwp->stepping
515 && !lwp->stopped_by_watchpoint
516 && lwp->last_status >> 16 == 0)
47c0c975
DE
517 stop_pc -= the_low_target.decr_pc_after_break;
518
519 if (debug_threads)
520 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
521
522 return stop_pc;
0d62e5e8 523}
ce3a066d 524
0d62e5e8 525static void *
95954743 526add_lwp (ptid_t ptid)
611cb4a5 527{
54a0b537 528 struct lwp_info *lwp;
0d62e5e8 529
54a0b537
PA
530 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
531 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 532
95954743 533 lwp->head.id = ptid;
0d62e5e8 534
aa5ca48f
DE
535 if (the_low_target.new_thread != NULL)
536 lwp->arch_private = the_low_target.new_thread ();
537
54a0b537 538 add_inferior_to_list (&all_lwps, &lwp->head);
0d62e5e8 539
54a0b537 540 return lwp;
0d62e5e8 541}
611cb4a5 542
da6d8c04
DJ
543/* Start an inferior process and returns its pid.
544 ALLARGS is a vector of program-name and args. */
545
ce3a066d
DJ
546static int
547linux_create_inferior (char *program, char **allargs)
da6d8c04 548{
03583c20
UW
549#ifdef HAVE_PERSONALITY
550 int personality_orig = 0, personality_set = 0;
551#endif
a6dbe5df 552 struct lwp_info *new_lwp;
da6d8c04 553 int pid;
95954743 554 ptid_t ptid;
da6d8c04 555
03583c20
UW
556#ifdef HAVE_PERSONALITY
557 if (disable_randomization)
558 {
559 errno = 0;
560 personality_orig = personality (0xffffffff);
561 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
562 {
563 personality_set = 1;
564 personality (personality_orig | ADDR_NO_RANDOMIZE);
565 }
566 if (errno != 0 || (personality_set
567 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
568 warning ("Error disabling address space randomization: %s",
569 strerror (errno));
570 }
571#endif
572
42c81e2a 573#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
574 pid = vfork ();
575#else
da6d8c04 576 pid = fork ();
52fb6437 577#endif
da6d8c04
DJ
578 if (pid < 0)
579 perror_with_name ("fork");
580
581 if (pid == 0)
582 {
b8e1b30e 583 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da6d8c04 584
1a981360 585#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 586 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 587#endif
0d62e5e8 588
a9fa9f7d
DJ
589 setpgid (0, 0);
590
e0f9f062
DE
591 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
592 stdout to stderr so that inferior i/o doesn't corrupt the connection.
593 Also, redirect stdin to /dev/null. */
594 if (remote_connection_is_stdio ())
595 {
596 close (0);
597 open ("/dev/null", O_RDONLY);
598 dup2 (2, 1);
3e52c33d
JK
599 if (write (2, "stdin/stdout redirected\n",
600 sizeof ("stdin/stdout redirected\n") - 1) < 0)
8c29b58e
YQ
601 {
602 /* Errors ignored. */;
603 }
e0f9f062
DE
604 }
605
2b876972
DJ
606 execv (program, allargs);
607 if (errno == ENOENT)
608 execvp (program, allargs);
da6d8c04
DJ
609
610 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 611 strerror (errno));
da6d8c04
DJ
612 fflush (stderr);
613 _exit (0177);
614 }
615
03583c20
UW
616#ifdef HAVE_PERSONALITY
617 if (personality_set)
618 {
619 errno = 0;
620 personality (personality_orig);
621 if (errno != 0)
622 warning ("Error restoring address space randomization: %s",
623 strerror (errno));
624 }
625#endif
626
95954743
PA
627 linux_add_process (pid, 0);
628
629 ptid = ptid_build (pid, pid, 0);
630 new_lwp = add_lwp (ptid);
631 add_thread (ptid, new_lwp);
a6dbe5df 632 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 633
a9fa9f7d 634 return pid;
da6d8c04
DJ
635}
636
637/* Attach to an inferior process. */
638
95954743
PA
639static void
640linux_attach_lwp_1 (unsigned long lwpid, int initial)
da6d8c04 641{
95954743 642 ptid_t ptid;
54a0b537 643 struct lwp_info *new_lwp;
611cb4a5 644
b8e1b30e 645 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 646 != 0)
da6d8c04 647 {
87b0bb13
JK
648 struct buffer buffer;
649
95954743 650 if (!initial)
2d717e4f
DJ
651 {
652 /* If we fail to attach to an LWP, just warn. */
95954743 653 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
2d717e4f
DJ
654 strerror (errno), errno);
655 fflush (stderr);
656 return;
657 }
5f572dec
JK
658
659 /* If we fail to attach to a process, report an error. */
87b0bb13
JK
660 buffer_init (&buffer);
661 linux_ptrace_attach_warnings (lwpid, &buffer);
662 buffer_grow_str0 (&buffer, "");
663 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
664 lwpid, strerror (errno), errno);
da6d8c04
DJ
665 }
666
95954743 667 if (initial)
e3deef73
LM
668 /* If lwp is the tgid, we handle adding existing threads later.
669 Otherwise we just add lwp without bothering about any other
670 threads. */
95954743
PA
671 ptid = ptid_build (lwpid, lwpid, 0);
672 else
673 {
674 /* Note that extracting the pid from the current inferior is
675 safe, since we're always called in the context of the same
676 process as this new thread. */
677 int pid = pid_of (get_thread_lwp (current_inferior));
678 ptid = ptid_build (pid, lwpid, 0);
679 }
24a09b5f 680
95954743
PA
681 new_lwp = (struct lwp_info *) add_lwp (ptid);
682 add_thread (ptid, new_lwp);
0d62e5e8 683
a6dbe5df
PA
684 /* We need to wait for SIGSTOP before being able to make the next
685 ptrace call on this LWP. */
686 new_lwp->must_set_ptrace_flags = 1;
687
644cebc9 688 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
689 {
690 if (debug_threads)
691 fprintf (stderr,
692 "Attached to a stopped process\n");
693
694 /* The process is definitely stopped. It is in a job control
695 stop, unless the kernel predates the TASK_STOPPED /
696 TASK_TRACED distinction, in which case it might be in a
697 ptrace stop. Make sure it is in a ptrace stop; from there we
698 can kill it, signal it, et cetera.
699
700 First make sure there is a pending SIGSTOP. Since we are
701 already attached, the process can not transition from stopped
702 to running without a PTRACE_CONT; so we know this signal will
703 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
704 probably already in the queue (unless this kernel is old
705 enough to use TASK_STOPPED for ptrace stops); but since
706 SIGSTOP is not an RT signal, it can only be queued once. */
707 kill_lwp (lwpid, SIGSTOP);
708
709 /* Finally, resume the stopped process. This will deliver the
710 SIGSTOP (or a higher priority signal, just like normal
711 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 712 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
713 }
714
0d62e5e8 715 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
716 brings it to a halt.
717
718 There are several cases to consider here:
719
720 1) gdbserver has already attached to the process and is being notified
1b3f6016 721 of a new thread that is being created.
d50171e4
PA
722 In this case we should ignore that SIGSTOP and resume the
723 process. This is handled below by setting stop_expected = 1,
8336d594 724 and the fact that add_thread sets last_resume_kind ==
d50171e4 725 resume_continue.
0e21c1ec
DE
726
727 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
728 to it via attach_inferior.
729 In this case we want the process thread to stop.
d50171e4
PA
730 This is handled by having linux_attach set last_resume_kind ==
731 resume_stop after we return.
e3deef73
LM
732
733 If the pid we are attaching to is also the tgid, we attach to and
734 stop all the existing threads. Otherwise, we attach to pid and
735 ignore any other threads in the same group as this pid.
0e21c1ec
DE
736
737 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
738 existing threads.
739 In this case we want the thread to stop.
740 FIXME: This case is currently not properly handled.
741 We should wait for the SIGSTOP but don't. Things work apparently
742 because enough time passes between when we ptrace (ATTACH) and when
743 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
744
745 On the other hand, if we are currently trying to stop all threads, we
746 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 747 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
748 end of the list, and so the new thread has not yet reached
749 wait_for_sigstop (but will). */
d50171e4 750 new_lwp->stop_expected = 1;
0d62e5e8
DJ
751}
752
95954743
PA
753void
754linux_attach_lwp (unsigned long lwpid)
755{
756 linux_attach_lwp_1 (lwpid, 0);
757}
758
e3deef73
LM
759/* Attach to PID. If PID is the tgid, attach to it and all
760 of its threads. */
761
c52daf70 762static int
a1928bad 763linux_attach (unsigned long pid)
0d62e5e8 764{
e3deef73
LM
765 /* Attach to PID. We will check for other threads
766 soon. */
95954743 767 linux_attach_lwp_1 (pid, 1);
95954743 768 linux_add_process (pid, 1);
0d62e5e8 769
bd99dc85
PA
770 if (!non_stop)
771 {
8336d594
PA
772 struct thread_info *thread;
773
774 /* Don't ignore the initial SIGSTOP if we just attached to this
775 process. It will be collected by wait shortly. */
776 thread = find_thread_ptid (ptid_build (pid, pid, 0));
777 thread->last_resume_kind = resume_stop;
bd99dc85 778 }
0d62e5e8 779
e3deef73
LM
780 if (linux_proc_get_tgid (pid) == pid)
781 {
782 DIR *dir;
783 char pathname[128];
784
785 sprintf (pathname, "/proc/%ld/task", pid);
786
787 dir = opendir (pathname);
788
789 if (!dir)
790 {
791 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
792 fflush (stderr);
793 }
794 else
795 {
796 /* At this point we attached to the tgid. Scan the task for
797 existing threads. */
798 unsigned long lwp;
799 int new_threads_found;
800 int iterations = 0;
801 struct dirent *dp;
802
803 while (iterations < 2)
804 {
805 new_threads_found = 0;
806 /* Add all the other threads. While we go through the
807 threads, new threads may be spawned. Cycle through
808 the list of threads until we have done two iterations without
809 finding new threads. */
810 while ((dp = readdir (dir)) != NULL)
811 {
812 /* Fetch one lwp. */
813 lwp = strtoul (dp->d_name, NULL, 10);
814
815 /* Is this a new thread? */
816 if (lwp
817 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
818 {
819 linux_attach_lwp_1 (lwp, 0);
820 new_threads_found++;
821
822 if (debug_threads)
823 fprintf (stderr, "\
824Found and attached to new lwp %ld\n", lwp);
825 }
826 }
827
828 if (!new_threads_found)
829 iterations++;
830 else
831 iterations = 0;
832
833 rewinddir (dir);
834 }
835 closedir (dir);
836 }
837 }
838
95954743
PA
839 return 0;
840}
841
842struct counter
843{
844 int pid;
845 int count;
846};
847
848static int
849second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
850{
851 struct counter *counter = args;
852
853 if (ptid_get_pid (entry->id) == counter->pid)
854 {
855 if (++counter->count > 1)
856 return 1;
857 }
d61ddec4 858
da6d8c04
DJ
859 return 0;
860}
861
95954743
PA
862static int
863last_thread_of_process_p (struct thread_info *thread)
864{
865 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
866 int pid = ptid_get_pid (ptid);
867 struct counter counter = { pid , 0 };
da6d8c04 868
95954743
PA
869 return (find_inferior (&all_threads,
870 second_thread_of_pid_p, &counter) == NULL);
871}
872
da84f473
PA
873/* Kill LWP. */
874
875static void
876linux_kill_one_lwp (struct lwp_info *lwp)
877{
878 int pid = lwpid_of (lwp);
879
880 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
881 there is no signal context, and ptrace(PTRACE_KILL) (or
882 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
883 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
884 alternative is to kill with SIGKILL. We only need one SIGKILL
885 per process, not one for each thread. But since we still support
886 linuxthreads, and we also support debugging programs using raw
887 clone without CLONE_THREAD, we send one for each thread. For
888 years, we used PTRACE_KILL only, so we're being a bit paranoid
889 about some old kernels where PTRACE_KILL might work better
890 (dubious if there are any such, but that's why it's paranoia), so
891 we try SIGKILL first, PTRACE_KILL second, and so we're fine
892 everywhere. */
893
894 errno = 0;
895 kill (pid, SIGKILL);
896 if (debug_threads)
897 fprintf (stderr,
898 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
899 target_pid_to_str (ptid_of (lwp)),
900 errno ? strerror (errno) : "OK");
901
902 errno = 0;
b8e1b30e 903 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473
PA
904 if (debug_threads)
905 fprintf (stderr,
906 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
907 target_pid_to_str (ptid_of (lwp)),
908 errno ? strerror (errno) : "OK");
909}
910
911/* Callback for `find_inferior'. Kills an lwp of a given process,
912 except the leader. */
95954743
PA
913
914static int
da84f473 915kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
da6d8c04 916{
0d62e5e8 917 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 918 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 919 int wstat;
95954743
PA
920 int pid = * (int *) args;
921
922 if (ptid_get_pid (entry->id) != pid)
923 return 0;
0d62e5e8 924
fd500816
DJ
925 /* We avoid killing the first thread here, because of a Linux kernel (at
926 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
927 the children get a chance to be reaped, it will remain a zombie
928 forever. */
95954743 929
12b42a12 930 if (lwpid_of (lwp) == pid)
95954743
PA
931 {
932 if (debug_threads)
933 fprintf (stderr, "lkop: is last of process %s\n",
934 target_pid_to_str (entry->id));
935 return 0;
936 }
fd500816 937
0d62e5e8
DJ
938 do
939 {
da84f473 940 linux_kill_one_lwp (lwp);
0d62e5e8
DJ
941
942 /* Make sure it died. The loop is most likely unnecessary. */
95954743 943 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
bd99dc85 944 } while (pid > 0 && WIFSTOPPED (wstat));
95954743
PA
945
946 return 0;
da6d8c04
DJ
947}
948
95954743
PA
949static int
950linux_kill (int pid)
0d62e5e8 951{
95954743 952 struct process_info *process;
54a0b537 953 struct lwp_info *lwp;
fd500816 954 int wstat;
95954743 955 int lwpid;
fd500816 956
95954743
PA
957 process = find_process_pid (pid);
958 if (process == NULL)
959 return -1;
9d606399 960
f9e39928
PA
961 /* If we're killing a running inferior, make sure it is stopped
962 first, as PTRACE_KILL will not work otherwise. */
7984d532 963 stop_all_lwps (0, NULL);
f9e39928 964
da84f473 965 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
fd500816 966
54a0b537 967 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 968 thread in the list, so do so now. */
95954743 969 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 970
784867a5 971 if (lwp == NULL)
fd500816 972 {
784867a5
JK
973 if (debug_threads)
974 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
975 lwpid_of (lwp), pid);
976 }
977 else
978 {
979 if (debug_threads)
980 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
981 lwpid_of (lwp), pid);
fd500816 982
784867a5
JK
983 do
984 {
da84f473 985 linux_kill_one_lwp (lwp);
784867a5
JK
986
987 /* Make sure it died. The loop is most likely unnecessary. */
988 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
989 } while (lwpid > 0 && WIFSTOPPED (wstat));
990 }
2d717e4f 991
8336d594 992 the_target->mourn (process);
f9e39928
PA
993
994 /* Since we presently can only stop all lwps of all processes, we
995 need to unstop lwps of other processes. */
7984d532 996 unstop_all_lwps (0, NULL);
95954743 997 return 0;
0d62e5e8
DJ
998}
999
9b224c5e
PA
1000/* Get pending signal of THREAD, for detaching purposes. This is the
1001 signal the thread last stopped for, which we need to deliver to the
1002 thread when detaching, otherwise, it'd be suppressed/lost. */
1003
1004static int
1005get_detach_signal (struct thread_info *thread)
1006{
a493e3e2 1007 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1008 int status;
1009 struct lwp_info *lp = get_thread_lwp (thread);
1010
1011 if (lp->status_pending_p)
1012 status = lp->status_pending;
1013 else
1014 {
1015 /* If the thread had been suspended by gdbserver, and it stopped
1016 cleanly, then it'll have stopped with SIGSTOP. But we don't
1017 want to deliver that SIGSTOP. */
1018 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1019 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1020 return 0;
1021
1022 /* Otherwise, we may need to deliver the signal we
1023 intercepted. */
1024 status = lp->last_status;
1025 }
1026
1027 if (!WIFSTOPPED (status))
1028 {
1029 if (debug_threads)
1030 fprintf (stderr,
1031 "GPS: lwp %s hasn't stopped: no pending signal\n",
1032 target_pid_to_str (ptid_of (lp)));
1033 return 0;
1034 }
1035
1036 /* Extended wait statuses aren't real SIGTRAPs. */
1037 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1038 {
1039 if (debug_threads)
1040 fprintf (stderr,
1041 "GPS: lwp %s had stopped with extended "
1042 "status: no pending signal\n",
1043 target_pid_to_str (ptid_of (lp)));
1044 return 0;
1045 }
1046
2ea28649 1047 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e
PA
1048
1049 if (program_signals_p && !program_signals[signo])
1050 {
1051 if (debug_threads)
1052 fprintf (stderr,
1053 "GPS: lwp %s had signal %s, but it is in nopass state\n",
1054 target_pid_to_str (ptid_of (lp)),
2ea28649 1055 gdb_signal_to_string (signo));
9b224c5e
PA
1056 return 0;
1057 }
1058 else if (!program_signals_p
1059 /* If we have no way to know which signals GDB does not
1060 want to have passed to the program, assume
1061 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1062 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1063 {
1064 if (debug_threads)
1065 fprintf (stderr,
1066 "GPS: lwp %s had signal %s, "
1067 "but we don't know if we should pass it. Default to not.\n",
1068 target_pid_to_str (ptid_of (lp)),
2ea28649 1069 gdb_signal_to_string (signo));
9b224c5e
PA
1070 return 0;
1071 }
1072 else
1073 {
1074 if (debug_threads)
1075 fprintf (stderr,
1076 "GPS: lwp %s has pending signal %s: delivering it.\n",
1077 target_pid_to_str (ptid_of (lp)),
2ea28649 1078 gdb_signal_to_string (signo));
9b224c5e
PA
1079
1080 return WSTOPSIG (status);
1081 }
1082}
1083
95954743
PA
1084static int
1085linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
1086{
1087 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1088 struct lwp_info *lwp = get_thread_lwp (thread);
95954743 1089 int pid = * (int *) args;
9b224c5e 1090 int sig;
95954743
PA
1091
1092 if (ptid_get_pid (entry->id) != pid)
1093 return 0;
6ad8ae5c 1094
9b224c5e 1095 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1096 if (lwp->stop_expected)
ae13219e 1097 {
9b224c5e
PA
1098 if (debug_threads)
1099 fprintf (stderr,
1100 "Sending SIGCONT to %s\n",
1101 target_pid_to_str (ptid_of (lwp)));
1102
1103 kill_lwp (lwpid_of (lwp), SIGCONT);
54a0b537 1104 lwp->stop_expected = 0;
ae13219e
DJ
1105 }
1106
1107 /* Flush any pending changes to the process's registers. */
3aee8918 1108 regcache_invalidate_thread (get_lwp_thread (lwp));
ae13219e 1109
9b224c5e
PA
1110 /* Pass on any pending signal for this thread. */
1111 sig = get_detach_signal (thread);
1112
ae13219e 1113 /* Finally, let it resume. */
82bfbe7e
PA
1114 if (the_low_target.prepare_to_resume != NULL)
1115 the_low_target.prepare_to_resume (lwp);
b8e1b30e
LM
1116 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1117 (PTRACE_TYPE_ARG4) (long) sig) < 0)
9b224c5e
PA
1118 error (_("Can't detach %s: %s"),
1119 target_pid_to_str (ptid_of (lwp)),
1120 strerror (errno));
bd99dc85
PA
1121
1122 delete_lwp (lwp);
95954743 1123 return 0;
6ad8ae5c
DJ
1124}
1125
95954743
PA
1126static int
1127linux_detach (int pid)
1128{
1129 struct process_info *process;
1130
1131 process = find_process_pid (pid);
1132 if (process == NULL)
1133 return -1;
1134
f9e39928
PA
1135 /* Stop all threads before detaching. First, ptrace requires that
1136 the thread is stopped to sucessfully detach. Second, thread_db
1137 may need to uninstall thread event breakpoints from memory, which
1138 only works with a stopped process anyway. */
7984d532 1139 stop_all_lwps (0, NULL);
f9e39928 1140
ca5c370d 1141#ifdef USE_THREAD_DB
8336d594 1142 thread_db_detach (process);
ca5c370d
PA
1143#endif
1144
fa593d66
PA
1145 /* Stabilize threads (move out of jump pads). */
1146 stabilize_threads ();
1147
95954743 1148 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
1149
1150 the_target->mourn (process);
f9e39928
PA
1151
1152 /* Since we presently can only stop all lwps of all processes, we
1153 need to unstop lwps of other processes. */
7984d532 1154 unstop_all_lwps (0, NULL);
f9e39928
PA
1155 return 0;
1156}
1157
1158/* Remove all LWPs that belong to process PROC from the lwp list. */
1159
1160static int
1161delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1162{
1163 struct lwp_info *lwp = (struct lwp_info *) entry;
1164 struct process_info *process = proc;
1165
1166 if (pid_of (lwp) == pid_of (process))
1167 delete_lwp (lwp);
1168
dd6953e1 1169 return 0;
6ad8ae5c
DJ
1170}
1171
8336d594
PA
1172static void
1173linux_mourn (struct process_info *process)
1174{
1175 struct process_info_private *priv;
1176
1177#ifdef USE_THREAD_DB
1178 thread_db_mourn (process);
1179#endif
1180
f9e39928
PA
1181 find_inferior (&all_lwps, delete_lwp_callback, process);
1182
8336d594
PA
1183 /* Freeing all private data. */
1184 priv = process->private;
1185 free (priv->arch_private);
1186 free (priv);
1187 process->private = NULL;
505106cd
PA
1188
1189 remove_process (process);
8336d594
PA
1190}
1191
444d6139 1192static void
95954743 1193linux_join (int pid)
444d6139 1194{
444d6139
PA
1195 int status, ret;
1196
1197 do {
95954743 1198 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1199 if (WIFEXITED (status) || WIFSIGNALED (status))
1200 break;
1201 } while (ret != -1 || errno != ECHILD);
1202}
1203
6ad8ae5c 1204/* Return nonzero if the given thread is still alive. */
0d62e5e8 1205static int
95954743 1206linux_thread_alive (ptid_t ptid)
0d62e5e8 1207{
95954743
PA
1208 struct lwp_info *lwp = find_lwp_pid (ptid);
1209
1210 /* We assume we always know if a thread exits. If a whole process
1211 exited but we still haven't been able to report it to GDB, we'll
1212 hold on to the last lwp of the dead process. */
1213 if (lwp != NULL)
1214 return !lwp->dead;
0d62e5e8
DJ
1215 else
1216 return 0;
1217}
1218
6bf5e0ba 1219/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1220static int
d50171e4 1221status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1222{
54a0b537 1223 struct lwp_info *lwp = (struct lwp_info *) entry;
95954743 1224 ptid_t ptid = * (ptid_t *) arg;
7984d532 1225 struct thread_info *thread;
95954743
PA
1226
1227 /* Check if we're only interested in events from a specific process
1228 or its lwps. */
1229 if (!ptid_equal (minus_one_ptid, ptid)
1230 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1231 return 0;
0d62e5e8 1232
d50171e4
PA
1233 thread = get_lwp_thread (lwp);
1234
1235 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1236 report any status pending the LWP may have. */
8336d594 1237 if (thread->last_resume_kind == resume_stop
7984d532 1238 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4 1239 return 0;
0d62e5e8 1240
d50171e4 1241 return lwp->status_pending_p;
0d62e5e8
DJ
1242}
1243
95954743
PA
1244static int
1245same_lwp (struct inferior_list_entry *entry, void *data)
1246{
1247 ptid_t ptid = *(ptid_t *) data;
1248 int lwp;
1249
1250 if (ptid_get_lwp (ptid) != 0)
1251 lwp = ptid_get_lwp (ptid);
1252 else
1253 lwp = ptid_get_pid (ptid);
1254
1255 if (ptid_get_lwp (entry->id) == lwp)
1256 return 1;
1257
1258 return 0;
1259}
1260
1261struct lwp_info *
1262find_lwp_pid (ptid_t ptid)
1263{
1264 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1265}
1266
bd99dc85 1267static struct lwp_info *
95954743 1268linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
611cb4a5 1269{
0d62e5e8 1270 int ret;
95954743 1271 int to_wait_for = -1;
bd99dc85 1272 struct lwp_info *child = NULL;
0d62e5e8 1273
bd99dc85 1274 if (debug_threads)
95954743
PA
1275 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1276
1277 if (ptid_equal (ptid, minus_one_ptid))
1278 to_wait_for = -1; /* any child */
1279 else
1280 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
0d62e5e8 1281
bd99dc85 1282 options |= __WALL;
0d62e5e8 1283
bd99dc85 1284retry:
0d62e5e8 1285
bd99dc85
PA
1286 ret = my_waitpid (to_wait_for, wstatp, options);
1287 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1288 return NULL;
1289 else if (ret == -1)
1290 perror_with_name ("waitpid");
0d62e5e8
DJ
1291
1292 if (debug_threads
1293 && (!WIFSTOPPED (*wstatp)
1294 || (WSTOPSIG (*wstatp) != 32
1295 && WSTOPSIG (*wstatp) != 33)))
1296 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1297
95954743 1298 child = find_lwp_pid (pid_to_ptid (ret));
0d62e5e8 1299
24a09b5f
DJ
1300 /* If we didn't find a process, one of two things presumably happened:
1301 - A process we started and then detached from has exited. Ignore it.
1302 - A process we are controlling has forked and the new child's stop
1303 was reported to us by the kernel. Save its PID. */
bd99dc85 1304 if (child == NULL && WIFSTOPPED (*wstatp))
24a09b5f 1305 {
05044653 1306 add_to_pid_list (&stopped_pids, ret, *wstatp);
24a09b5f
DJ
1307 goto retry;
1308 }
bd99dc85 1309 else if (child == NULL)
24a09b5f
DJ
1310 goto retry;
1311
bd99dc85 1312 child->stopped = 1;
0d62e5e8 1313
bd99dc85 1314 child->last_status = *wstatp;
32ca6d61 1315
3aee8918 1316 if (WIFSTOPPED (*wstatp))
d61ddec4 1317 {
3aee8918
PA
1318 struct process_info *proc;
1319
1320 /* Architecture-specific setup after inferior is running. This
1321 needs to happen after we have attached to the inferior and it
1322 is stopped for the first time, but before we access any
1323 inferior registers. */
1324 proc = find_process_pid (pid_of (child));
1325 if (proc->private->new_inferior)
1326 {
1327 struct thread_info *saved_inferior;
1328
1329 saved_inferior = current_inferior;
1330 current_inferior = get_lwp_thread (child);
1331
1332 the_low_target.arch_setup ();
1333
1334 current_inferior = saved_inferior;
1335
1336 proc->private->new_inferior = 0;
1337 }
d61ddec4
UW
1338 }
1339
c3adc08c
PA
1340 /* Fetch the possibly triggered data watchpoint info and store it in
1341 CHILD.
1342
1343 On some archs, like x86, that use debug registers to set
1344 watchpoints, it's possible that the way to know which watched
1345 address trapped, is to check the register that is used to select
1346 which address to watch. Problem is, between setting the
1347 watchpoint and reading back which data address trapped, the user
1348 may change the set of watchpoints, and, as a consequence, GDB
1349 changes the debug registers in the inferior. To avoid reading
1350 back a stale stopped-data-address when that happens, we cache in
1351 LP the fact that a watchpoint trapped, and the corresponding data
1352 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1353 changes the debug registers meanwhile, we have the cached data we
1354 can rely on. */
1355
1356 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1357 {
1358 if (the_low_target.stopped_by_watchpoint == NULL)
1359 {
1360 child->stopped_by_watchpoint = 0;
1361 }
1362 else
1363 {
1364 struct thread_info *saved_inferior;
1365
1366 saved_inferior = current_inferior;
1367 current_inferior = get_lwp_thread (child);
1368
1369 child->stopped_by_watchpoint
1370 = the_low_target.stopped_by_watchpoint ();
1371
1372 if (child->stopped_by_watchpoint)
1373 {
1374 if (the_low_target.stopped_data_address != NULL)
1375 child->stopped_data_address
1376 = the_low_target.stopped_data_address ();
1377 else
1378 child->stopped_data_address = 0;
1379 }
1380
1381 current_inferior = saved_inferior;
1382 }
1383 }
1384
d50171e4
PA
1385 /* Store the STOP_PC, with adjustment applied. This depends on the
1386 architecture being defined already (so that CHILD has a valid
1387 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1388 not). */
1389 if (WIFSTOPPED (*wstatp))
1390 child->stop_pc = get_stop_pc (child);
1391
0d62e5e8 1392 if (debug_threads
47c0c975
DE
1393 && WIFSTOPPED (*wstatp)
1394 && the_low_target.get_pc != NULL)
0d62e5e8 1395 {
896c7fbb 1396 struct thread_info *saved_inferior = current_inferior;
bce522a2 1397 struct regcache *regcache;
47c0c975
DE
1398 CORE_ADDR pc;
1399
d50171e4 1400 current_inferior = get_lwp_thread (child);
bce522a2 1401 regcache = get_thread_regcache (current_inferior, 1);
442ea881 1402 pc = (*the_low_target.get_pc) (regcache);
47c0c975 1403 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
896c7fbb 1404 current_inferior = saved_inferior;
0d62e5e8 1405 }
bd99dc85
PA
1406
1407 return child;
0d62e5e8 1408}
611cb4a5 1409
219f2f23
PA
1410/* This function should only be called if the LWP got a SIGTRAP.
1411
1412 Handle any tracepoint steps or hits. Return true if a tracepoint
1413 event was handled, 0 otherwise. */
1414
1415static int
1416handle_tracepoints (struct lwp_info *lwp)
1417{
1418 struct thread_info *tinfo = get_lwp_thread (lwp);
1419 int tpoint_related_event = 0;
1420
7984d532
PA
1421 /* If this tracepoint hit causes a tracing stop, we'll immediately
1422 uninsert tracepoints. To do this, we temporarily pause all
1423 threads, unpatch away, and then unpause threads. We need to make
1424 sure the unpausing doesn't resume LWP too. */
1425 lwp->suspended++;
1426
219f2f23
PA
1427 /* And we need to be sure that any all-threads-stopping doesn't try
1428 to move threads out of the jump pads, as it could deadlock the
1429 inferior (LWP could be in the jump pad, maybe even holding the
1430 lock.) */
1431
1432 /* Do any necessary step collect actions. */
1433 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1434
fa593d66
PA
1435 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1436
219f2f23
PA
1437 /* See if we just hit a tracepoint and do its main collect
1438 actions. */
1439 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1440
7984d532
PA
1441 lwp->suspended--;
1442
1443 gdb_assert (lwp->suspended == 0);
fa593d66 1444 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1445
219f2f23
PA
1446 if (tpoint_related_event)
1447 {
1448 if (debug_threads)
1449 fprintf (stderr, "got a tracepoint event\n");
1450 return 1;
1451 }
1452
1453 return 0;
1454}
1455
fa593d66
PA
1456/* Convenience wrapper. Returns true if LWP is presently collecting a
1457 fast tracepoint. */
1458
1459static int
1460linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1461 struct fast_tpoint_collect_status *status)
1462{
1463 CORE_ADDR thread_area;
1464
1465 if (the_low_target.get_thread_area == NULL)
1466 return 0;
1467
1468 /* Get the thread area address. This is used to recognize which
1469 thread is which when tracing with the in-process agent library.
1470 We don't read anything from the address, and treat it as opaque;
1471 it's the address itself that we assume is unique per-thread. */
1472 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1473 return 0;
1474
1475 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1476}
1477
1478/* The reason we resume in the caller, is because we want to be able
1479 to pass lwp->status_pending as WSTAT, and we need to clear
1480 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1481 refuses to resume. */
1482
1483static int
1484maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1485{
1486 struct thread_info *saved_inferior;
1487
1488 saved_inferior = current_inferior;
1489 current_inferior = get_lwp_thread (lwp);
1490
1491 if ((wstat == NULL
1492 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1493 && supports_fast_tracepoints ()
58b4daa5 1494 && agent_loaded_p ())
fa593d66
PA
1495 {
1496 struct fast_tpoint_collect_status status;
1497 int r;
1498
1499 if (debug_threads)
1500 fprintf (stderr, "\
1501Checking whether LWP %ld needs to move out of the jump pad.\n",
1502 lwpid_of (lwp));
1503
1504 r = linux_fast_tracepoint_collecting (lwp, &status);
1505
1506 if (wstat == NULL
1507 || (WSTOPSIG (*wstat) != SIGILL
1508 && WSTOPSIG (*wstat) != SIGFPE
1509 && WSTOPSIG (*wstat) != SIGSEGV
1510 && WSTOPSIG (*wstat) != SIGBUS))
1511 {
1512 lwp->collecting_fast_tracepoint = r;
1513
1514 if (r != 0)
1515 {
1516 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1517 {
1518 /* Haven't executed the original instruction yet.
1519 Set breakpoint there, and wait till it's hit,
1520 then single-step until exiting the jump pad. */
1521 lwp->exit_jump_pad_bkpt
1522 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1523 }
1524
1525 if (debug_threads)
1526 fprintf (stderr, "\
1527Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1528 lwpid_of (lwp));
0cccb683 1529 current_inferior = saved_inferior;
fa593d66
PA
1530
1531 return 1;
1532 }
1533 }
1534 else
1535 {
1536 /* If we get a synchronous signal while collecting, *and*
1537 while executing the (relocated) original instruction,
1538 reset the PC to point at the tpoint address, before
1539 reporting to GDB. Otherwise, it's an IPA lib bug: just
1540 report the signal to GDB, and pray for the best. */
1541
1542 lwp->collecting_fast_tracepoint = 0;
1543
1544 if (r != 0
1545 && (status.adjusted_insn_addr <= lwp->stop_pc
1546 && lwp->stop_pc < status.adjusted_insn_addr_end))
1547 {
1548 siginfo_t info;
1549 struct regcache *regcache;
1550
1551 /* The si_addr on a few signals references the address
1552 of the faulting instruction. Adjust that as
1553 well. */
1554 if ((WSTOPSIG (*wstat) == SIGILL
1555 || WSTOPSIG (*wstat) == SIGFPE
1556 || WSTOPSIG (*wstat) == SIGBUS
1557 || WSTOPSIG (*wstat) == SIGSEGV)
56f7af9c 1558 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp),
b8e1b30e 1559 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
1560 /* Final check just to make sure we don't clobber
1561 the siginfo of non-kernel-sent signals. */
1562 && (uintptr_t) info.si_addr == lwp->stop_pc)
1563 {
1564 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
56f7af9c 1565 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp),
b8e1b30e 1566 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
1567 }
1568
1569 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1570 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1571 lwp->stop_pc = status.tpoint_addr;
1572
1573 /* Cancel any fast tracepoint lock this thread was
1574 holding. */
1575 force_unlock_trace_buffer ();
1576 }
1577
1578 if (lwp->exit_jump_pad_bkpt != NULL)
1579 {
1580 if (debug_threads)
1581 fprintf (stderr,
1582 "Cancelling fast exit-jump-pad: removing bkpt. "
1583 "stopping all threads momentarily.\n");
1584
1585 stop_all_lwps (1, lwp);
1586 cancel_breakpoints ();
1587
1588 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1589 lwp->exit_jump_pad_bkpt = NULL;
1590
1591 unstop_all_lwps (1, lwp);
1592
1593 gdb_assert (lwp->suspended >= 0);
1594 }
1595 }
1596 }
1597
1598 if (debug_threads)
1599 fprintf (stderr, "\
1600Checking whether LWP %ld needs to move out of the jump pad...no\n",
1601 lwpid_of (lwp));
0cccb683
YQ
1602
1603 current_inferior = saved_inferior;
fa593d66
PA
1604 return 0;
1605}
1606
1607/* Enqueue one signal in the "signals to report later when out of the
1608 jump pad" list. */
1609
1610static void
1611enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1612{
1613 struct pending_signals *p_sig;
1614
1615 if (debug_threads)
1616 fprintf (stderr, "\
1617Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1618
1619 if (debug_threads)
1620 {
1621 struct pending_signals *sig;
1622
1623 for (sig = lwp->pending_signals_to_report;
1624 sig != NULL;
1625 sig = sig->prev)
1626 fprintf (stderr,
1627 " Already queued %d\n",
1628 sig->signal);
1629
1630 fprintf (stderr, " (no more currently queued signals)\n");
1631 }
1632
1a981360
PA
1633 /* Don't enqueue non-RT signals if they are already in the deferred
1634 queue. (SIGSTOP being the easiest signal to see ending up here
1635 twice) */
1636 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1637 {
1638 struct pending_signals *sig;
1639
1640 for (sig = lwp->pending_signals_to_report;
1641 sig != NULL;
1642 sig = sig->prev)
1643 {
1644 if (sig->signal == WSTOPSIG (*wstat))
1645 {
1646 if (debug_threads)
1647 fprintf (stderr,
1648 "Not requeuing already queued non-RT signal %d"
1649 " for LWP %ld\n",
1650 sig->signal,
1651 lwpid_of (lwp));
1652 return;
1653 }
1654 }
1655 }
1656
fa593d66
PA
1657 p_sig = xmalloc (sizeof (*p_sig));
1658 p_sig->prev = lwp->pending_signals_to_report;
1659 p_sig->signal = WSTOPSIG (*wstat);
1660 memset (&p_sig->info, 0, sizeof (siginfo_t));
b8e1b30e 1661 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1662 &p_sig->info);
fa593d66
PA
1663
1664 lwp->pending_signals_to_report = p_sig;
1665}
1666
1667/* Dequeue one signal from the "signals to report later when out of
1668 the jump pad" list. */
1669
1670static int
1671dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1672{
1673 if (lwp->pending_signals_to_report != NULL)
1674 {
1675 struct pending_signals **p_sig;
1676
1677 p_sig = &lwp->pending_signals_to_report;
1678 while ((*p_sig)->prev != NULL)
1679 p_sig = &(*p_sig)->prev;
1680
1681 *wstat = W_STOPCODE ((*p_sig)->signal);
1682 if ((*p_sig)->info.si_signo != 0)
b8e1b30e 1683 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1684 &(*p_sig)->info);
fa593d66
PA
1685 free (*p_sig);
1686 *p_sig = NULL;
1687
1688 if (debug_threads)
1689 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1690 WSTOPSIG (*wstat), lwpid_of (lwp));
1691
1692 if (debug_threads)
1693 {
1694 struct pending_signals *sig;
1695
1696 for (sig = lwp->pending_signals_to_report;
1697 sig != NULL;
1698 sig = sig->prev)
1699 fprintf (stderr,
1700 " Still queued %d\n",
1701 sig->signal);
1702
1703 fprintf (stderr, " (no more queued signals)\n");
1704 }
1705
1706 return 1;
1707 }
1708
1709 return 0;
1710}
1711
d50171e4
PA
1712/* Arrange for a breakpoint to be hit again later. We don't keep the
1713 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1714 will handle the current event, eventually we will resume this LWP,
1715 and this breakpoint will trap again. */
1716
1717static int
1718cancel_breakpoint (struct lwp_info *lwp)
1719{
1720 struct thread_info *saved_inferior;
d50171e4
PA
1721
1722 /* There's nothing to do if we don't support breakpoints. */
1723 if (!supports_breakpoints ())
1724 return 0;
1725
d50171e4
PA
1726 /* breakpoint_at reads from current inferior. */
1727 saved_inferior = current_inferior;
1728 current_inferior = get_lwp_thread (lwp);
1729
1730 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1731 {
1732 if (debug_threads)
1733 fprintf (stderr,
1734 "CB: Push back breakpoint for %s\n",
fc7238bb 1735 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1736
1737 /* Back up the PC if necessary. */
1738 if (the_low_target.decr_pc_after_break)
1739 {
1740 struct regcache *regcache
fc7238bb 1741 = get_thread_regcache (current_inferior, 1);
d50171e4
PA
1742 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1743 }
1744
1745 current_inferior = saved_inferior;
1746 return 1;
1747 }
1748 else
1749 {
1750 if (debug_threads)
1751 fprintf (stderr,
1752 "CB: No breakpoint found at %s for [%s]\n",
1753 paddress (lwp->stop_pc),
fc7238bb 1754 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1755 }
1756
1757 current_inferior = saved_inferior;
1758 return 0;
1759}
1760
1761/* When the event-loop is doing a step-over, this points at the thread
1762 being stepped. */
1763ptid_t step_over_bkpt;
1764
bd99dc85
PA
1765/* Wait for an event from child PID. If PID is -1, wait for any
1766 child. Store the stop status through the status pointer WSTAT.
1767 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1768 event was found and OPTIONS contains WNOHANG. Return the PID of
1769 the stopped child otherwise. */
1770
0d62e5e8 1771static int
d8301ad1 1772linux_wait_for_event (ptid_t ptid, int *wstat, int options)
0d62e5e8 1773{
d50171e4 1774 struct lwp_info *event_child, *requested_child;
d8301ad1 1775 ptid_t wait_ptid;
d50171e4 1776
d50171e4
PA
1777 event_child = NULL;
1778 requested_child = NULL;
0d62e5e8 1779
95954743 1780 /* Check for a lwp with a pending status. */
bd99dc85 1781
e825046f 1782 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
0d62e5e8 1783 {
54a0b537 1784 event_child = (struct lwp_info *)
d50171e4 1785 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
0d62e5e8 1786 if (debug_threads && event_child)
bd99dc85 1787 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
0d62e5e8
DJ
1788 }
1789 else
1790 {
95954743 1791 requested_child = find_lwp_pid (ptid);
d50171e4 1792
bde24c0a 1793 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66
PA
1794 && requested_child->status_pending_p
1795 && requested_child->collecting_fast_tracepoint)
1796 {
1797 enqueue_one_deferred_signal (requested_child,
1798 &requested_child->status_pending);
1799 requested_child->status_pending_p = 0;
1800 requested_child->status_pending = 0;
1801 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1802 }
1803
1804 if (requested_child->suspended
1805 && requested_child->status_pending_p)
1806 fatal ("requesting an event out of a suspended child?");
1807
d50171e4 1808 if (requested_child->status_pending_p)
bd99dc85 1809 event_child = requested_child;
0d62e5e8 1810 }
611cb4a5 1811
0d62e5e8
DJ
1812 if (event_child != NULL)
1813 {
bd99dc85
PA
1814 if (debug_threads)
1815 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1816 lwpid_of (event_child), event_child->status_pending);
1817 *wstat = event_child->status_pending;
1818 event_child->status_pending_p = 0;
1819 event_child->status_pending = 0;
1820 current_inferior = get_lwp_thread (event_child);
1821 return lwpid_of (event_child);
0d62e5e8
DJ
1822 }
1823
d8301ad1
JK
1824 if (ptid_is_pid (ptid))
1825 {
1826 /* A request to wait for a specific tgid. This is not possible
1827 with waitpid, so instead, we wait for any child, and leave
1828 children we're not interested in right now with a pending
1829 status to report later. */
1830 wait_ptid = minus_one_ptid;
1831 }
1832 else
1833 wait_ptid = ptid;
1834
0d62e5e8
DJ
1835 /* We only enter this loop if no process has a pending wait status. Thus
1836 any action taken in response to a wait status inside this loop is
1837 responding as soon as we detect the status, not after any pending
1838 events. */
1839 while (1)
1840 {
d8301ad1 1841 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
0d62e5e8 1842
bd99dc85 1843 if ((options & WNOHANG) && event_child == NULL)
d50171e4
PA
1844 {
1845 if (debug_threads)
1846 fprintf (stderr, "WNOHANG set, no event found\n");
1847 return 0;
1848 }
0d62e5e8
DJ
1849
1850 if (event_child == NULL)
1851 error ("event from unknown child");
611cb4a5 1852
d8301ad1
JK
1853 if (ptid_is_pid (ptid)
1854 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1855 {
1856 if (! WIFSTOPPED (*wstat))
1857 mark_lwp_dead (event_child, *wstat);
1858 else
1859 {
1860 event_child->status_pending_p = 1;
1861 event_child->status_pending = *wstat;
1862 }
1863 continue;
1864 }
1865
bd99dc85 1866 current_inferior = get_lwp_thread (event_child);
0d62e5e8 1867
89be2091 1868 /* Check for thread exit. */
bd99dc85 1869 if (! WIFSTOPPED (*wstat))
0d62e5e8 1870 {
89be2091 1871 if (debug_threads)
95954743 1872 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
89be2091
DJ
1873
1874 /* If the last thread is exiting, just return. */
95954743 1875 if (last_thread_of_process_p (current_inferior))
bd99dc85
PA
1876 {
1877 if (debug_threads)
95954743
PA
1878 fprintf (stderr, "LWP %ld is last lwp of process\n",
1879 lwpid_of (event_child));
bd99dc85
PA
1880 return lwpid_of (event_child);
1881 }
89be2091 1882
bd99dc85
PA
1883 if (!non_stop)
1884 {
1885 current_inferior = (struct thread_info *) all_threads.head;
1886 if (debug_threads)
1887 fprintf (stderr, "Current inferior is now %ld\n",
1888 lwpid_of (get_thread_lwp (current_inferior)));
1889 }
1890 else
1891 {
1892 current_inferior = NULL;
1893 if (debug_threads)
1894 fprintf (stderr, "Current inferior is now <NULL>\n");
1895 }
89be2091
DJ
1896
1897 /* If we were waiting for this particular child to do something...
1898 well, it did something. */
bd99dc85 1899 if (requested_child != NULL)
d50171e4
PA
1900 {
1901 int lwpid = lwpid_of (event_child);
1902
1903 /* Cancel the step-over operation --- the thread that
1904 started it is gone. */
1905 if (finish_step_over (event_child))
7984d532 1906 unstop_all_lwps (1, event_child);
d50171e4
PA
1907 delete_lwp (event_child);
1908 return lwpid;
1909 }
1910
1911 delete_lwp (event_child);
89be2091
DJ
1912
1913 /* Wait for a more interesting event. */
1914 continue;
1915 }
1916
a6dbe5df
PA
1917 if (event_child->must_set_ptrace_flags)
1918 {
1e7fc18c 1919 linux_enable_event_reporting (lwpid_of (event_child));
a6dbe5df
PA
1920 event_child->must_set_ptrace_flags = 0;
1921 }
1922
bd99dc85
PA
1923 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1924 && *wstat >> 16 != 0)
24a09b5f 1925 {
bd99dc85 1926 handle_extended_wait (event_child, *wstat);
24a09b5f
DJ
1927 continue;
1928 }
1929
d50171e4
PA
1930 if (WIFSTOPPED (*wstat)
1931 && WSTOPSIG (*wstat) == SIGSTOP
1932 && event_child->stop_expected)
1933 {
1934 int should_stop;
1935
1936 if (debug_threads)
1937 fprintf (stderr, "Expected stop.\n");
1938 event_child->stop_expected = 0;
1939
8336d594 1940 should_stop = (current_inferior->last_resume_kind == resume_stop
bde24c0a 1941 || stopping_threads != NOT_STOPPING_THREADS);
d50171e4
PA
1942
1943 if (!should_stop)
1944 {
1945 linux_resume_one_lwp (event_child,
1946 event_child->stepping, 0, NULL);
1947 continue;
1948 }
1949 }
1950
bd99dc85 1951 return lwpid_of (event_child);
611cb4a5 1952 }
0d62e5e8 1953
611cb4a5
DJ
1954 /* NOTREACHED */
1955 return 0;
1956}
1957
6bf5e0ba
PA
1958/* Count the LWP's that have had events. */
1959
1960static int
1961count_events_callback (struct inferior_list_entry *entry, void *data)
1962{
1963 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1964 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1965 int *count = data;
1966
1967 gdb_assert (count != NULL);
1968
1969 /* Count only resumed LWPs that have a SIGTRAP event pending that
1970 should be reported to GDB. */
8336d594
PA
1971 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1972 && thread->last_resume_kind != resume_stop
6bf5e0ba
PA
1973 && lp->status_pending_p
1974 && WIFSTOPPED (lp->status_pending)
1975 && WSTOPSIG (lp->status_pending) == SIGTRAP
1976 && !breakpoint_inserted_here (lp->stop_pc))
1977 (*count)++;
1978
1979 return 0;
1980}
1981
1982/* Select the LWP (if any) that is currently being single-stepped. */
1983
1984static int
1985select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1986{
1987 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1988 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba 1989
8336d594
PA
1990 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1991 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
1992 && lp->status_pending_p)
1993 return 1;
1994 else
1995 return 0;
1996}
1997
1998/* Select the Nth LWP that has had a SIGTRAP event that should be
1999 reported to GDB. */
2000
2001static int
2002select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2003{
2004 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 2005 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
2006 int *selector = data;
2007
2008 gdb_assert (selector != NULL);
2009
2010 /* Select only resumed LWPs that have a SIGTRAP event pending. */
8336d594
PA
2011 if (thread->last_resume_kind != resume_stop
2012 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
2013 && lp->status_pending_p
2014 && WIFSTOPPED (lp->status_pending)
2015 && WSTOPSIG (lp->status_pending) == SIGTRAP
2016 && !breakpoint_inserted_here (lp->stop_pc))
2017 if ((*selector)-- == 0)
2018 return 1;
2019
2020 return 0;
2021}
2022
2023static int
2024cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2025{
2026 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 2027 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
2028 struct lwp_info *event_lp = data;
2029
2030 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2031 if (lp == event_lp)
2032 return 0;
2033
2034 /* If a LWP other than the LWP that we're reporting an event for has
2035 hit a GDB breakpoint (as opposed to some random trap signal),
2036 then just arrange for it to hit it again later. We don't keep
2037 the SIGTRAP status and don't forward the SIGTRAP signal to the
2038 LWP. We will handle the current event, eventually we will resume
2039 all LWPs, and this one will get its breakpoint trap again.
2040
2041 If we do not do this, then we run the risk that the user will
2042 delete or disable the breakpoint, but the LWP will have already
2043 tripped on it. */
2044
8336d594
PA
2045 if (thread->last_resume_kind != resume_stop
2046 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
2047 && lp->status_pending_p
2048 && WIFSTOPPED (lp->status_pending)
2049 && WSTOPSIG (lp->status_pending) == SIGTRAP
bdabb078
PA
2050 && !lp->stepping
2051 && !lp->stopped_by_watchpoint
6bf5e0ba
PA
2052 && cancel_breakpoint (lp))
2053 /* Throw away the SIGTRAP. */
2054 lp->status_pending_p = 0;
2055
2056 return 0;
2057}
2058
7984d532
PA
2059static void
2060linux_cancel_breakpoints (void)
2061{
2062 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2063}
2064
6bf5e0ba
PA
2065/* Select one LWP out of those that have events pending. */
2066
2067static void
2068select_event_lwp (struct lwp_info **orig_lp)
2069{
2070 int num_events = 0;
2071 int random_selector;
2072 struct lwp_info *event_lp;
2073
2074 /* Give preference to any LWP that is being single-stepped. */
2075 event_lp
2076 = (struct lwp_info *) find_inferior (&all_lwps,
2077 select_singlestep_lwp_callback, NULL);
2078 if (event_lp != NULL)
2079 {
2080 if (debug_threads)
2081 fprintf (stderr,
2082 "SEL: Select single-step %s\n",
2083 target_pid_to_str (ptid_of (event_lp)));
2084 }
2085 else
2086 {
2087 /* No single-stepping LWP. Select one at random, out of those
2088 which have had SIGTRAP events. */
2089
2090 /* First see how many SIGTRAP events we have. */
2091 find_inferior (&all_lwps, count_events_callback, &num_events);
2092
2093 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2094 random_selector = (int)
2095 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2096
2097 if (debug_threads && num_events > 1)
2098 fprintf (stderr,
2099 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2100 num_events, random_selector);
2101
2102 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2103 select_event_lwp_callback,
2104 &random_selector);
2105 }
2106
2107 if (event_lp != NULL)
2108 {
2109 /* Switch the event LWP. */
2110 *orig_lp = event_lp;
2111 }
2112}
2113
7984d532
PA
2114/* Decrement the suspend count of an LWP. */
2115
2116static int
2117unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2118{
2119 struct lwp_info *lwp = (struct lwp_info *) entry;
2120
2121 /* Ignore EXCEPT. */
2122 if (lwp == except)
2123 return 0;
2124
2125 lwp->suspended--;
2126
2127 gdb_assert (lwp->suspended >= 0);
2128 return 0;
2129}
2130
2131/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2132 NULL. */
2133
2134static void
2135unsuspend_all_lwps (struct lwp_info *except)
2136{
2137 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2138}
2139
fa593d66
PA
2140static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2141static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2142 void *data);
2143static int lwp_running (struct inferior_list_entry *entry, void *data);
2144static ptid_t linux_wait_1 (ptid_t ptid,
2145 struct target_waitstatus *ourstatus,
2146 int target_options);
2147
2148/* Stabilize threads (move out of jump pads).
2149
2150 If a thread is midway collecting a fast tracepoint, we need to
2151 finish the collection and move it out of the jump pad before
2152 reporting the signal.
2153
2154 This avoids recursion while collecting (when a signal arrives
2155 midway, and the signal handler itself collects), which would trash
2156 the trace buffer. In case the user set a breakpoint in a signal
2157 handler, this avoids the backtrace showing the jump pad, etc..
2158 Most importantly, there are certain things we can't do safely if
2159 threads are stopped in a jump pad (or in its callee's). For
2160 example:
2161
2162 - starting a new trace run. A thread still collecting the
2163 previous run, could trash the trace buffer when resumed. The trace
2164 buffer control structures would have been reset but the thread had
2165 no way to tell. The thread could even midway memcpy'ing to the
2166 buffer, which would mean that when resumed, it would clobber the
2167 trace buffer that had been set for a new run.
2168
2169 - we can't rewrite/reuse the jump pads for new tracepoints
2170 safely. Say you do tstart while a thread is stopped midway while
2171 collecting. When the thread is later resumed, it finishes the
2172 collection, and returns to the jump pad, to execute the original
2173 instruction that was under the tracepoint jump at the time the
2174 older run had been started. If the jump pad had been rewritten
2175 since for something else in the new run, the thread would now
2176 execute the wrong / random instructions. */
2177
2178static void
2179linux_stabilize_threads (void)
2180{
2181 struct thread_info *save_inferior;
2182 struct lwp_info *lwp_stuck;
2183
2184 lwp_stuck
2185 = (struct lwp_info *) find_inferior (&all_lwps,
2186 stuck_in_jump_pad_callback, NULL);
2187 if (lwp_stuck != NULL)
2188 {
b4d51a55
PA
2189 if (debug_threads)
2190 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2191 lwpid_of (lwp_stuck));
fa593d66
PA
2192 return;
2193 }
2194
2195 save_inferior = current_inferior;
2196
2197 stabilizing_threads = 1;
2198
2199 /* Kick 'em all. */
2200 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2201
2202 /* Loop until all are stopped out of the jump pads. */
2203 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2204 {
2205 struct target_waitstatus ourstatus;
2206 struct lwp_info *lwp;
fa593d66
PA
2207 int wstat;
2208
2209 /* Note that we go through the full wait even loop. While
2210 moving threads out of jump pad, we need to be able to step
2211 over internal breakpoints and such. */
32fcada3 2212 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2213
2214 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2215 {
2216 lwp = get_thread_lwp (current_inferior);
2217
2218 /* Lock it. */
2219 lwp->suspended++;
2220
a493e3e2 2221 if (ourstatus.value.sig != GDB_SIGNAL_0
fa593d66
PA
2222 || current_inferior->last_resume_kind == resume_stop)
2223 {
2ea28649 2224 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2225 enqueue_one_deferred_signal (lwp, &wstat);
2226 }
2227 }
2228 }
2229
2230 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2231
2232 stabilizing_threads = 0;
2233
2234 current_inferior = save_inferior;
2235
b4d51a55 2236 if (debug_threads)
fa593d66 2237 {
b4d51a55
PA
2238 lwp_stuck
2239 = (struct lwp_info *) find_inferior (&all_lwps,
2240 stuck_in_jump_pad_callback, NULL);
2241 if (lwp_stuck != NULL)
fa593d66
PA
2242 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2243 lwpid_of (lwp_stuck));
2244 }
2245}
2246
0d62e5e8 2247/* Wait for process, returns status. */
da6d8c04 2248
95954743
PA
2249static ptid_t
2250linux_wait_1 (ptid_t ptid,
2251 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2252{
e5f1222d 2253 int w;
fc7238bb 2254 struct lwp_info *event_child;
bd99dc85 2255 int options;
bd99dc85 2256 int pid;
6bf5e0ba
PA
2257 int step_over_finished;
2258 int bp_explains_trap;
2259 int maybe_internal_trap;
2260 int report_to_gdb;
219f2f23 2261 int trace_event;
c2d6af84 2262 int in_step_range;
bd99dc85
PA
2263
2264 /* Translate generic target options into linux options. */
2265 options = __WALL;
2266 if (target_options & TARGET_WNOHANG)
2267 options |= WNOHANG;
0d62e5e8
DJ
2268
2269retry:
fa593d66
PA
2270 bp_explains_trap = 0;
2271 trace_event = 0;
c2d6af84 2272 in_step_range = 0;
bd99dc85
PA
2273 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2274
0d62e5e8
DJ
2275 /* If we were only supposed to resume one thread, only wait for
2276 that thread - if it's still alive. If it died, however - which
2277 can happen if we're coming from the thread death case below -
2278 then we need to make sure we restart the other threads. We could
2279 pick a thread at random or restart all; restarting all is less
2280 arbitrary. */
95954743
PA
2281 if (!non_stop
2282 && !ptid_equal (cont_thread, null_ptid)
2283 && !ptid_equal (cont_thread, minus_one_ptid))
0d62e5e8 2284 {
fc7238bb
PA
2285 struct thread_info *thread;
2286
bd99dc85
PA
2287 thread = (struct thread_info *) find_inferior_id (&all_threads,
2288 cont_thread);
0d62e5e8
DJ
2289
2290 /* No stepping, no signal - unless one is pending already, of course. */
bd99dc85 2291 if (thread == NULL)
64386c31
DJ
2292 {
2293 struct thread_resume resume_info;
95954743 2294 resume_info.thread = minus_one_ptid;
bd99dc85
PA
2295 resume_info.kind = resume_continue;
2296 resume_info.sig = 0;
2bd7c093 2297 linux_resume (&resume_info, 1);
64386c31 2298 }
bd99dc85 2299 else
95954743 2300 ptid = cont_thread;
0d62e5e8 2301 }
da6d8c04 2302
6bf5e0ba
PA
2303 if (ptid_equal (step_over_bkpt, null_ptid))
2304 pid = linux_wait_for_event (ptid, &w, options);
2305 else
2306 {
2307 if (debug_threads)
2308 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2309 target_pid_to_str (step_over_bkpt));
2310 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2311 }
2312
bd99dc85 2313 if (pid == 0) /* only if TARGET_WNOHANG */
95954743 2314 return null_ptid;
bd99dc85 2315
6bf5e0ba 2316 event_child = get_thread_lwp (current_inferior);
da6d8c04 2317
0d62e5e8
DJ
2318 /* If we are waiting for a particular child, and it exited,
2319 linux_wait_for_event will return its exit status. Similarly if
2320 the last child exited. If this is not the last child, however,
2321 do not report it as exited until there is a 'thread exited' response
2322 available in the remote protocol. Instead, just wait for another event.
2323 This should be safe, because if the thread crashed we will already
2324 have reported the termination signal to GDB; that should stop any
2325 in-progress stepping operations, etc.
2326
2327 Report the exit status of the last thread to exit. This matches
2328 LinuxThreads' behavior. */
2329
95954743 2330 if (last_thread_of_process_p (current_inferior))
da6d8c04 2331 {
bd99dc85 2332 if (WIFEXITED (w) || WIFSIGNALED (w))
0d62e5e8 2333 {
bd99dc85
PA
2334 if (WIFEXITED (w))
2335 {
2336 ourstatus->kind = TARGET_WAITKIND_EXITED;
2337 ourstatus->value.integer = WEXITSTATUS (w);
2338
2339 if (debug_threads)
493e2a69
MS
2340 fprintf (stderr,
2341 "\nChild exited with retcode = %x \n",
2342 WEXITSTATUS (w));
bd99dc85
PA
2343 }
2344 else
2345 {
2346 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2ea28649 2347 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
bd99dc85
PA
2348
2349 if (debug_threads)
493e2a69
MS
2350 fprintf (stderr,
2351 "\nChild terminated with signal = %x \n",
2352 WTERMSIG (w));
bd99dc85
PA
2353
2354 }
5b1c542e 2355
3e4c1235 2356 return ptid_of (event_child);
0d62e5e8 2357 }
da6d8c04 2358 }
0d62e5e8 2359 else
da6d8c04 2360 {
0d62e5e8
DJ
2361 if (!WIFSTOPPED (w))
2362 goto retry;
da6d8c04
DJ
2363 }
2364
6bf5e0ba
PA
2365 /* If this event was not handled before, and is not a SIGTRAP, we
2366 report it. SIGILL and SIGSEGV are also treated as traps in case
2367 a breakpoint is inserted at the current PC. If this target does
2368 not support internal breakpoints at all, we also report the
2369 SIGTRAP without further processing; it's of no concern to us. */
2370 maybe_internal_trap
2371 = (supports_breakpoints ()
2372 && (WSTOPSIG (w) == SIGTRAP
2373 || ((WSTOPSIG (w) == SIGILL
2374 || WSTOPSIG (w) == SIGSEGV)
2375 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2376
2377 if (maybe_internal_trap)
2378 {
2379 /* Handle anything that requires bookkeeping before deciding to
2380 report the event or continue waiting. */
2381
2382 /* First check if we can explain the SIGTRAP with an internal
2383 breakpoint, or if we should possibly report the event to GDB.
2384 Do this before anything that may remove or insert a
2385 breakpoint. */
2386 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2387
2388 /* We have a SIGTRAP, possibly a step-over dance has just
2389 finished. If so, tweak the state machine accordingly,
2390 reinsert breakpoints and delete any reinsert (software
2391 single-step) breakpoints. */
2392 step_over_finished = finish_step_over (event_child);
2393
2394 /* Now invoke the callbacks of any internal breakpoints there. */
2395 check_breakpoints (event_child->stop_pc);
2396
219f2f23
PA
2397 /* Handle tracepoint data collecting. This may overflow the
2398 trace buffer, and cause a tracing stop, removing
2399 breakpoints. */
2400 trace_event = handle_tracepoints (event_child);
2401
6bf5e0ba
PA
2402 if (bp_explains_trap)
2403 {
2404 /* If we stepped or ran into an internal breakpoint, we've
2405 already handled it. So next time we resume (from this
2406 PC), we should step over it. */
2407 if (debug_threads)
2408 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2409
8b07ae33
PA
2410 if (breakpoint_here (event_child->stop_pc))
2411 event_child->need_step_over = 1;
6bf5e0ba
PA
2412 }
2413 }
2414 else
2415 {
2416 /* We have some other signal, possibly a step-over dance was in
2417 progress, and it should be cancelled too. */
2418 step_over_finished = finish_step_over (event_child);
fa593d66
PA
2419 }
2420
2421 /* We have all the data we need. Either report the event to GDB, or
2422 resume threads and keep waiting for more. */
2423
2424 /* If we're collecting a fast tracepoint, finish the collection and
2425 move out of the jump pad before delivering a signal. See
2426 linux_stabilize_threads. */
2427
2428 if (WIFSTOPPED (w)
2429 && WSTOPSIG (w) != SIGTRAP
2430 && supports_fast_tracepoints ()
58b4daa5 2431 && agent_loaded_p ())
fa593d66
PA
2432 {
2433 if (debug_threads)
2434 fprintf (stderr,
2435 "Got signal %d for LWP %ld. Check if we need "
2436 "to defer or adjust it.\n",
2437 WSTOPSIG (w), lwpid_of (event_child));
2438
2439 /* Allow debugging the jump pad itself. */
2440 if (current_inferior->last_resume_kind != resume_step
2441 && maybe_move_out_of_jump_pad (event_child, &w))
2442 {
2443 enqueue_one_deferred_signal (event_child, &w);
2444
2445 if (debug_threads)
2446 fprintf (stderr,
2447 "Signal %d for LWP %ld deferred (in jump pad)\n",
2448 WSTOPSIG (w), lwpid_of (event_child));
2449
2450 linux_resume_one_lwp (event_child, 0, 0, NULL);
2451 goto retry;
2452 }
2453 }
219f2f23 2454
fa593d66
PA
2455 if (event_child->collecting_fast_tracepoint)
2456 {
2457 if (debug_threads)
2458 fprintf (stderr, "\
2459LWP %ld was trying to move out of the jump pad (%d). \
2460Check if we're already there.\n",
2461 lwpid_of (event_child),
2462 event_child->collecting_fast_tracepoint);
2463
2464 trace_event = 1;
2465
2466 event_child->collecting_fast_tracepoint
2467 = linux_fast_tracepoint_collecting (event_child, NULL);
2468
2469 if (event_child->collecting_fast_tracepoint != 1)
2470 {
2471 /* No longer need this breakpoint. */
2472 if (event_child->exit_jump_pad_bkpt != NULL)
2473 {
2474 if (debug_threads)
2475 fprintf (stderr,
2476 "No longer need exit-jump-pad bkpt; removing it."
2477 "stopping all threads momentarily.\n");
2478
2479 /* Other running threads could hit this breakpoint.
2480 We don't handle moribund locations like GDB does,
2481 instead we always pause all threads when removing
2482 breakpoints, so that any step-over or
2483 decr_pc_after_break adjustment is always taken
2484 care of while the breakpoint is still
2485 inserted. */
2486 stop_all_lwps (1, event_child);
2487 cancel_breakpoints ();
2488
2489 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2490 event_child->exit_jump_pad_bkpt = NULL;
2491
2492 unstop_all_lwps (1, event_child);
2493
2494 gdb_assert (event_child->suspended >= 0);
2495 }
2496 }
2497
2498 if (event_child->collecting_fast_tracepoint == 0)
2499 {
2500 if (debug_threads)
2501 fprintf (stderr,
2502 "fast tracepoint finished "
2503 "collecting successfully.\n");
2504
2505 /* We may have a deferred signal to report. */
2506 if (dequeue_one_deferred_signal (event_child, &w))
2507 {
2508 if (debug_threads)
2509 fprintf (stderr, "dequeued one signal.\n");
2510 }
3c11dd79 2511 else
fa593d66 2512 {
3c11dd79
PA
2513 if (debug_threads)
2514 fprintf (stderr, "no deferred signals.\n");
fa593d66
PA
2515
2516 if (stabilizing_threads)
2517 {
2518 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 2519 ourstatus->value.sig = GDB_SIGNAL_0;
fa593d66
PA
2520 return ptid_of (event_child);
2521 }
2522 }
2523 }
6bf5e0ba
PA
2524 }
2525
e471f25b
PA
2526 /* Check whether GDB would be interested in this event. */
2527
2528 /* If GDB is not interested in this signal, don't stop other
2529 threads, and don't report it to GDB. Just resume the inferior
2530 right away. We do this for threading-related signals as well as
2531 any that GDB specifically requested we ignore. But never ignore
2532 SIGSTOP if we sent it ourselves, and do not ignore signals when
2533 stepping - they may require special handling to skip the signal
2534 handler. */
2535 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2536 thread library? */
2537 if (WIFSTOPPED (w)
2538 && current_inferior->last_resume_kind != resume_step
2539 && (
1a981360 2540#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
e471f25b
PA
2541 (current_process ()->private->thread_db != NULL
2542 && (WSTOPSIG (w) == __SIGRTMIN
2543 || WSTOPSIG (w) == __SIGRTMIN + 1))
2544 ||
2545#endif
2ea28649 2546 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b
PA
2547 && !(WSTOPSIG (w) == SIGSTOP
2548 && current_inferior->last_resume_kind == resume_stop))))
2549 {
2550 siginfo_t info, *info_p;
2551
2552 if (debug_threads)
2553 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2554 WSTOPSIG (w), lwpid_of (event_child));
2555
56f7af9c 2556 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child),
b8e1b30e 2557 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
2558 info_p = &info;
2559 else
2560 info_p = NULL;
2561 linux_resume_one_lwp (event_child, event_child->stepping,
2562 WSTOPSIG (w), info_p);
2563 goto retry;
2564 }
2565
c2d6af84
PA
2566 /* Note that all addresses are always "out of the step range" when
2567 there's no range to begin with. */
2568 in_step_range = lwp_in_step_range (event_child);
2569
2570 /* If GDB wanted this thread to single step, and the thread is out
2571 of the step range, we always want to report the SIGTRAP, and let
2572 GDB handle it. Watchpoints should always be reported. So should
2573 signals we can't explain. A SIGTRAP we can't explain could be a
2574 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2575 do, we're be able to handle GDB breakpoints on top of internal
2576 breakpoints, by handling the internal breakpoint and still
2577 reporting the event to GDB. If we don't, we're out of luck, GDB
2578 won't see the breakpoint hit. */
6bf5e0ba 2579 report_to_gdb = (!maybe_internal_trap
c2d6af84
PA
2580 || (current_inferior->last_resume_kind == resume_step
2581 && !in_step_range)
6bf5e0ba 2582 || event_child->stopped_by_watchpoint
c2d6af84 2583 || (!step_over_finished && !in_step_range
493e2a69 2584 && !bp_explains_trap && !trace_event)
9f3a5c85 2585 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5
SS
2586 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2587 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2588
2589 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
2590
2591 /* We found no reason GDB would want us to stop. We either hit one
2592 of our own breakpoints, or finished an internal step GDB
2593 shouldn't know about. */
2594 if (!report_to_gdb)
2595 {
2596 if (debug_threads)
2597 {
2598 if (bp_explains_trap)
2599 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2600 if (step_over_finished)
2601 fprintf (stderr, "Step-over finished.\n");
219f2f23
PA
2602 if (trace_event)
2603 fprintf (stderr, "Tracepoint event.\n");
c2d6af84
PA
2604 if (lwp_in_step_range (event_child))
2605 fprintf (stderr, "Range stepping pc 0x%s [0x%s, 0x%s).\n",
2606 paddress (event_child->stop_pc),
2607 paddress (event_child->step_range_start),
2608 paddress (event_child->step_range_end));
6bf5e0ba
PA
2609 }
2610
2611 /* We're not reporting this breakpoint to GDB, so apply the
2612 decr_pc_after_break adjustment to the inferior's regcache
2613 ourselves. */
2614
2615 if (the_low_target.set_pc != NULL)
2616 {
2617 struct regcache *regcache
2618 = get_thread_regcache (get_lwp_thread (event_child), 1);
2619 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2620 }
2621
7984d532
PA
2622 /* We may have finished stepping over a breakpoint. If so,
2623 we've stopped and suspended all LWPs momentarily except the
2624 stepping one. This is where we resume them all again. We're
2625 going to keep waiting, so use proceed, which handles stepping
2626 over the next breakpoint. */
6bf5e0ba
PA
2627 if (debug_threads)
2628 fprintf (stderr, "proceeding all threads.\n");
7984d532
PA
2629
2630 if (step_over_finished)
2631 unsuspend_all_lwps (event_child);
2632
6bf5e0ba
PA
2633 proceed_all_lwps ();
2634 goto retry;
2635 }
2636
2637 if (debug_threads)
2638 {
8336d594 2639 if (current_inferior->last_resume_kind == resume_step)
c2d6af84
PA
2640 {
2641 if (event_child->step_range_start == event_child->step_range_end)
2642 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2643 else if (!lwp_in_step_range (event_child))
2644 fprintf (stderr, "Out of step range, reporting event.\n");
2645 }
6bf5e0ba
PA
2646 if (event_child->stopped_by_watchpoint)
2647 fprintf (stderr, "Stopped by watchpoint.\n");
8b07ae33
PA
2648 if (gdb_breakpoint_here (event_child->stop_pc))
2649 fprintf (stderr, "Stopped by GDB breakpoint.\n");
6bf5e0ba
PA
2650 if (debug_threads)
2651 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2652 }
2653
2654 /* Alright, we're going to report a stop. */
2655
fa593d66 2656 if (!non_stop && !stabilizing_threads)
6bf5e0ba
PA
2657 {
2658 /* In all-stop, stop all threads. */
7984d532 2659 stop_all_lwps (0, NULL);
6bf5e0ba
PA
2660
2661 /* If we're not waiting for a specific LWP, choose an event LWP
2662 from among those that have had events. Giving equal priority
2663 to all LWPs that have had events helps prevent
2664 starvation. */
2665 if (ptid_equal (ptid, minus_one_ptid))
2666 {
2667 event_child->status_pending_p = 1;
2668 event_child->status_pending = w;
2669
2670 select_event_lwp (&event_child);
2671
2672 event_child->status_pending_p = 0;
2673 w = event_child->status_pending;
2674 }
2675
2676 /* Now that we've selected our final event LWP, cancel any
2677 breakpoints in other LWPs that have hit a GDB breakpoint.
2678 See the comment in cancel_breakpoints_callback to find out
2679 why. */
2680 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
fa593d66 2681
c03e6ccc
YQ
2682 /* If we were going a step-over, all other threads but the stepping one
2683 had been paused in start_step_over, with their suspend counts
2684 incremented. We don't want to do a full unstop/unpause, because we're
2685 in all-stop mode (so we want threads stopped), but we still need to
2686 unsuspend the other threads, to decrement their `suspended' count
2687 back. */
2688 if (step_over_finished)
2689 unsuspend_all_lwps (event_child);
2690
fa593d66
PA
2691 /* Stabilize threads (move out of jump pads). */
2692 stabilize_threads ();
6bf5e0ba
PA
2693 }
2694 else
2695 {
2696 /* If we just finished a step-over, then all threads had been
2697 momentarily paused. In all-stop, that's fine, we want
2698 threads stopped by now anyway. In non-stop, we need to
2699 re-resume threads that GDB wanted to be running. */
2700 if (step_over_finished)
7984d532 2701 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
2702 }
2703
5b1c542e 2704 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 2705
8336d594
PA
2706 if (current_inferior->last_resume_kind == resume_stop
2707 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
2708 {
2709 /* A thread that has been requested to stop by GDB with vCont;t,
2710 and it stopped cleanly, so report as SIG0. The use of
2711 SIGSTOP is an implementation detail. */
a493e3e2 2712 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 2713 }
8336d594
PA
2714 else if (current_inferior->last_resume_kind == resume_stop
2715 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
2716 {
2717 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 2718 but, it stopped for other reasons. */
2ea28649 2719 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
2720 }
2721 else
2722 {
2ea28649 2723 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
2724 }
2725
d50171e4
PA
2726 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2727
bd99dc85 2728 if (debug_threads)
95954743 2729 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
6bf5e0ba 2730 target_pid_to_str (ptid_of (event_child)),
bd99dc85
PA
2731 ourstatus->kind,
2732 ourstatus->value.sig);
2733
6bf5e0ba 2734 return ptid_of (event_child);
bd99dc85
PA
2735}
2736
2737/* Get rid of any pending event in the pipe. */
2738static void
2739async_file_flush (void)
2740{
2741 int ret;
2742 char buf;
2743
2744 do
2745 ret = read (linux_event_pipe[0], &buf, 1);
2746 while (ret >= 0 || (ret == -1 && errno == EINTR));
2747}
2748
2749/* Put something in the pipe, so the event loop wakes up. */
2750static void
2751async_file_mark (void)
2752{
2753 int ret;
2754
2755 async_file_flush ();
2756
2757 do
2758 ret = write (linux_event_pipe[1], "+", 1);
2759 while (ret == 0 || (ret == -1 && errno == EINTR));
2760
2761 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2762 be awakened anyway. */
2763}
2764
95954743
PA
2765static ptid_t
2766linux_wait (ptid_t ptid,
2767 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 2768{
95954743 2769 ptid_t event_ptid;
bd99dc85
PA
2770
2771 if (debug_threads)
95954743 2772 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
bd99dc85
PA
2773
2774 /* Flush the async file first. */
2775 if (target_is_async_p ())
2776 async_file_flush ();
2777
95954743 2778 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
bd99dc85
PA
2779
2780 /* If at least one stop was reported, there may be more. A single
2781 SIGCHLD can signal more than one child stop. */
2782 if (target_is_async_p ()
2783 && (target_options & TARGET_WNOHANG) != 0
95954743 2784 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
2785 async_file_mark ();
2786
2787 return event_ptid;
da6d8c04
DJ
2788}
2789
c5f62d5f 2790/* Send a signal to an LWP. */
fd500816
DJ
2791
2792static int
a1928bad 2793kill_lwp (unsigned long lwpid, int signo)
fd500816 2794{
c5f62d5f
DE
2795 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2796 fails, then we are not using nptl threads and we should be using kill. */
fd500816 2797
c5f62d5f
DE
2798#ifdef __NR_tkill
2799 {
2800 static int tkill_failed;
fd500816 2801
c5f62d5f
DE
2802 if (!tkill_failed)
2803 {
2804 int ret;
2805
2806 errno = 0;
2807 ret = syscall (__NR_tkill, lwpid, signo);
2808 if (errno != ENOSYS)
2809 return ret;
2810 tkill_failed = 1;
2811 }
2812 }
fd500816
DJ
2813#endif
2814
2815 return kill (lwpid, signo);
2816}
2817
964e4306
PA
2818void
2819linux_stop_lwp (struct lwp_info *lwp)
2820{
2821 send_sigstop (lwp);
2822}
2823
0d62e5e8 2824static void
02fc4de7 2825send_sigstop (struct lwp_info *lwp)
0d62e5e8 2826{
bd99dc85 2827 int pid;
0d62e5e8 2828
bd99dc85
PA
2829 pid = lwpid_of (lwp);
2830
0d62e5e8
DJ
2831 /* If we already have a pending stop signal for this process, don't
2832 send another. */
54a0b537 2833 if (lwp->stop_expected)
0d62e5e8 2834 {
ae13219e 2835 if (debug_threads)
bd99dc85 2836 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
ae13219e 2837
0d62e5e8
DJ
2838 return;
2839 }
2840
2841 if (debug_threads)
bd99dc85 2842 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
0d62e5e8 2843
d50171e4 2844 lwp->stop_expected = 1;
bd99dc85 2845 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
2846}
2847
7984d532
PA
2848static int
2849send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7
PA
2850{
2851 struct lwp_info *lwp = (struct lwp_info *) entry;
2852
7984d532
PA
2853 /* Ignore EXCEPT. */
2854 if (lwp == except)
2855 return 0;
2856
02fc4de7 2857 if (lwp->stopped)
7984d532 2858 return 0;
02fc4de7
PA
2859
2860 send_sigstop (lwp);
7984d532
PA
2861 return 0;
2862}
2863
2864/* Increment the suspend count of an LWP, and stop it, if not stopped
2865 yet. */
2866static int
2867suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2868 void *except)
2869{
2870 struct lwp_info *lwp = (struct lwp_info *) entry;
2871
2872 /* Ignore EXCEPT. */
2873 if (lwp == except)
2874 return 0;
2875
2876 lwp->suspended++;
2877
2878 return send_sigstop_callback (entry, except);
02fc4de7
PA
2879}
2880
95954743
PA
2881static void
2882mark_lwp_dead (struct lwp_info *lwp, int wstat)
2883{
2884 /* It's dead, really. */
2885 lwp->dead = 1;
2886
2887 /* Store the exit status for later. */
2888 lwp->status_pending_p = 1;
2889 lwp->status_pending = wstat;
2890
95954743
PA
2891 /* Prevent trying to stop it. */
2892 lwp->stopped = 1;
2893
2894 /* No further stops are expected from a dead lwp. */
2895 lwp->stop_expected = 0;
2896}
2897
0d62e5e8
DJ
2898static void
2899wait_for_sigstop (struct inferior_list_entry *entry)
2900{
54a0b537 2901 struct lwp_info *lwp = (struct lwp_info *) entry;
bd99dc85 2902 struct thread_info *saved_inferior;
a1928bad 2903 int wstat;
95954743
PA
2904 ptid_t saved_tid;
2905 ptid_t ptid;
d50171e4 2906 int pid;
0d62e5e8 2907
54a0b537 2908 if (lwp->stopped)
d50171e4
PA
2909 {
2910 if (debug_threads)
2911 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2912 lwpid_of (lwp));
2913 return;
2914 }
0d62e5e8
DJ
2915
2916 saved_inferior = current_inferior;
bd99dc85
PA
2917 if (saved_inferior != NULL)
2918 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2919 else
95954743 2920 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 2921
95954743 2922 ptid = lwp->head.id;
bd99dc85 2923
d50171e4
PA
2924 if (debug_threads)
2925 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2926
2927 pid = linux_wait_for_event (ptid, &wstat, __WALL);
0d62e5e8
DJ
2928
2929 /* If we stopped with a non-SIGSTOP signal, save it for later
2930 and record the pending SIGSTOP. If the process exited, just
2931 return. */
d50171e4 2932 if (WIFSTOPPED (wstat))
0d62e5e8
DJ
2933 {
2934 if (debug_threads)
d50171e4
PA
2935 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2936 lwpid_of (lwp), WSTOPSIG (wstat));
c35fafde 2937
d50171e4 2938 if (WSTOPSIG (wstat) != SIGSTOP)
c35fafde
PA
2939 {
2940 if (debug_threads)
d50171e4
PA
2941 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2942 lwpid_of (lwp), wstat);
2943
c35fafde
PA
2944 lwp->status_pending_p = 1;
2945 lwp->status_pending = wstat;
2946 }
0d62e5e8 2947 }
d50171e4 2948 else
95954743
PA
2949 {
2950 if (debug_threads)
d50171e4 2951 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
95954743 2952
d50171e4
PA
2953 lwp = find_lwp_pid (pid_to_ptid (pid));
2954 if (lwp)
2955 {
2956 /* Leave this status pending for the next time we're able to
2957 report it. In the mean time, we'll report this lwp as
2958 dead to GDB, so GDB doesn't try to read registers and
2959 memory from it. This can only happen if this was the
2960 last thread of the process; otherwise, PID is removed
2961 from the thread tables before linux_wait_for_event
2962 returns. */
2963 mark_lwp_dead (lwp, wstat);
2964 }
95954743 2965 }
0d62e5e8 2966
bd99dc85 2967 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
0d62e5e8
DJ
2968 current_inferior = saved_inferior;
2969 else
2970 {
2971 if (debug_threads)
2972 fprintf (stderr, "Previously current thread died.\n");
2973
bd99dc85
PA
2974 if (non_stop)
2975 {
2976 /* We can't change the current inferior behind GDB's back,
2977 otherwise, a subsequent command may apply to the wrong
2978 process. */
2979 current_inferior = NULL;
2980 }
2981 else
2982 {
2983 /* Set a valid thread as current. */
2984 set_desired_inferior (0);
2985 }
0d62e5e8
DJ
2986 }
2987}
2988
fa593d66
PA
2989/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2990 move it out, because we need to report the stop event to GDB. For
2991 example, if the user puts a breakpoint in the jump pad, it's
2992 because she wants to debug it. */
2993
2994static int
2995stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2996{
2997 struct lwp_info *lwp = (struct lwp_info *) entry;
2998 struct thread_info *thread = get_lwp_thread (lwp);
2999
3000 gdb_assert (lwp->suspended == 0);
3001 gdb_assert (lwp->stopped);
3002
3003 /* Allow debugging the jump pad, gdb_collect, etc.. */
3004 return (supports_fast_tracepoints ()
58b4daa5 3005 && agent_loaded_p ()
fa593d66
PA
3006 && (gdb_breakpoint_here (lwp->stop_pc)
3007 || lwp->stopped_by_watchpoint
3008 || thread->last_resume_kind == resume_step)
3009 && linux_fast_tracepoint_collecting (lwp, NULL));
3010}
3011
3012static void
3013move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3014{
3015 struct lwp_info *lwp = (struct lwp_info *) entry;
3016 struct thread_info *thread = get_lwp_thread (lwp);
3017 int *wstat;
3018
3019 gdb_assert (lwp->suspended == 0);
3020 gdb_assert (lwp->stopped);
3021
3022 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3023
3024 /* Allow debugging the jump pad, gdb_collect, etc. */
3025 if (!gdb_breakpoint_here (lwp->stop_pc)
3026 && !lwp->stopped_by_watchpoint
3027 && thread->last_resume_kind != resume_step
3028 && maybe_move_out_of_jump_pad (lwp, wstat))
3029 {
3030 if (debug_threads)
3031 fprintf (stderr,
3032 "LWP %ld needs stabilizing (in jump pad)\n",
3033 lwpid_of (lwp));
3034
3035 if (wstat)
3036 {
3037 lwp->status_pending_p = 0;
3038 enqueue_one_deferred_signal (lwp, wstat);
3039
3040 if (debug_threads)
3041 fprintf (stderr,
3042 "Signal %d for LWP %ld deferred "
3043 "(in jump pad)\n",
3044 WSTOPSIG (*wstat), lwpid_of (lwp));
3045 }
3046
3047 linux_resume_one_lwp (lwp, 0, 0, NULL);
3048 }
3049 else
3050 lwp->suspended++;
3051}
3052
3053static int
3054lwp_running (struct inferior_list_entry *entry, void *data)
3055{
3056 struct lwp_info *lwp = (struct lwp_info *) entry;
3057
3058 if (lwp->dead)
3059 return 0;
3060 if (lwp->stopped)
3061 return 0;
3062 return 1;
3063}
3064
7984d532
PA
3065/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3066 If SUSPEND, then also increase the suspend count of every LWP,
3067 except EXCEPT. */
3068
0d62e5e8 3069static void
7984d532 3070stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8 3071{
bde24c0a
PA
3072 /* Should not be called recursively. */
3073 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3074
3075 stopping_threads = (suspend
3076 ? STOPPING_AND_SUSPENDING_THREADS
3077 : STOPPING_THREADS);
7984d532
PA
3078
3079 if (suspend)
3080 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3081 else
3082 find_inferior (&all_lwps, send_sigstop_callback, except);
54a0b537 3083 for_each_inferior (&all_lwps, wait_for_sigstop);
bde24c0a 3084 stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
3085}
3086
da6d8c04
DJ
3087/* Resume execution of the inferior process.
3088 If STEP is nonzero, single-step it.
3089 If SIGNAL is nonzero, give it that signal. */
3090
ce3a066d 3091static void
2acc282a 3092linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 3093 int step, int signal, siginfo_t *info)
da6d8c04 3094{
0d62e5e8 3095 struct thread_info *saved_inferior;
fa593d66 3096 int fast_tp_collecting;
0d62e5e8 3097
54a0b537 3098 if (lwp->stopped == 0)
0d62e5e8
DJ
3099 return;
3100
fa593d66
PA
3101 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3102
3103 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3104
219f2f23
PA
3105 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3106 user used the "jump" command, or "set $pc = foo"). */
3107 if (lwp->stop_pc != get_pc (lwp))
3108 {
3109 /* Collecting 'while-stepping' actions doesn't make sense
3110 anymore. */
3111 release_while_stepping_state_list (get_lwp_thread (lwp));
3112 }
3113
0d62e5e8
DJ
3114 /* If we have pending signals or status, and a new signal, enqueue the
3115 signal. Also enqueue the signal if we are waiting to reinsert a
3116 breakpoint; it will be picked up again below. */
3117 if (signal != 0
fa593d66
PA
3118 && (lwp->status_pending_p
3119 || lwp->pending_signals != NULL
3120 || lwp->bp_reinsert != 0
3121 || fast_tp_collecting))
0d62e5e8
DJ
3122 {
3123 struct pending_signals *p_sig;
bca929d3 3124 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 3125 p_sig->prev = lwp->pending_signals;
0d62e5e8 3126 p_sig->signal = signal;
32ca6d61
DJ
3127 if (info == NULL)
3128 memset (&p_sig->info, 0, sizeof (siginfo_t));
3129 else
3130 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 3131 lwp->pending_signals = p_sig;
0d62e5e8
DJ
3132 }
3133
d50171e4
PA
3134 if (lwp->status_pending_p)
3135 {
3136 if (debug_threads)
3137 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3138 " has pending status\n",
3139 lwpid_of (lwp), step ? "step" : "continue", signal,
3140 lwp->stop_expected ? "expected" : "not expected");
3141 return;
3142 }
0d62e5e8
DJ
3143
3144 saved_inferior = current_inferior;
54a0b537 3145 current_inferior = get_lwp_thread (lwp);
0d62e5e8
DJ
3146
3147 if (debug_threads)
1b3f6016 3148 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
bd99dc85 3149 lwpid_of (lwp), step ? "step" : "continue", signal,
54a0b537 3150 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
3151
3152 /* This bit needs some thinking about. If we get a signal that
3153 we must report while a single-step reinsert is still pending,
3154 we often end up resuming the thread. It might be better to
3155 (ew) allow a stack of pending events; then we could be sure that
3156 the reinsert happened right away and not lose any signals.
3157
3158 Making this stack would also shrink the window in which breakpoints are
54a0b537 3159 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
3160 complete correctness, so it won't solve that problem. It may be
3161 worthwhile just to solve this one, however. */
54a0b537 3162 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
3163 {
3164 if (debug_threads)
d50171e4
PA
3165 fprintf (stderr, " pending reinsert at 0x%s\n",
3166 paddress (lwp->bp_reinsert));
3167
85e00e85 3168 if (can_hardware_single_step ())
d50171e4 3169 {
fa593d66
PA
3170 if (fast_tp_collecting == 0)
3171 {
3172 if (step == 0)
3173 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3174 if (lwp->suspended)
3175 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3176 lwp->suspended);
3177 }
d50171e4
PA
3178
3179 step = 1;
3180 }
0d62e5e8
DJ
3181
3182 /* Postpone any pending signal. It was enqueued above. */
3183 signal = 0;
3184 }
3185
fa593d66
PA
3186 if (fast_tp_collecting == 1)
3187 {
3188 if (debug_threads)
3189 fprintf (stderr, "\
3190lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3191 lwpid_of (lwp));
3192
3193 /* Postpone any pending signal. It was enqueued above. */
3194 signal = 0;
3195 }
3196 else if (fast_tp_collecting == 2)
3197 {
3198 if (debug_threads)
3199 fprintf (stderr, "\
3200lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3201 lwpid_of (lwp));
3202
3203 if (can_hardware_single_step ())
3204 step = 1;
3205 else
3206 fatal ("moving out of jump pad single-stepping"
3207 " not implemented on this target");
3208
3209 /* Postpone any pending signal. It was enqueued above. */
3210 signal = 0;
3211 }
3212
219f2f23
PA
3213 /* If we have while-stepping actions in this thread set it stepping.
3214 If we have a signal to deliver, it may or may not be set to
3215 SIG_IGN, we don't know. Assume so, and allow collecting
3216 while-stepping into a signal handler. A possible smart thing to
3217 do would be to set an internal breakpoint at the signal return
3218 address, continue, and carry on catching this while-stepping
3219 action only when that breakpoint is hit. A future
3220 enhancement. */
3221 if (get_lwp_thread (lwp)->while_stepping != NULL
3222 && can_hardware_single_step ())
3223 {
3224 if (debug_threads)
3225 fprintf (stderr,
3226 "lwp %ld has a while-stepping action -> forcing step.\n",
3227 lwpid_of (lwp));
3228 step = 1;
3229 }
3230
aa691b87 3231 if (debug_threads && the_low_target.get_pc != NULL)
0d62e5e8 3232 {
442ea881
PA
3233 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3234 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
47c0c975 3235 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
0d62e5e8
DJ
3236 }
3237
fa593d66
PA
3238 /* If we have pending signals, consume one unless we are trying to
3239 reinsert a breakpoint or we're trying to finish a fast tracepoint
3240 collect. */
3241 if (lwp->pending_signals != NULL
3242 && lwp->bp_reinsert == 0
3243 && fast_tp_collecting == 0)
0d62e5e8
DJ
3244 {
3245 struct pending_signals **p_sig;
3246
54a0b537 3247 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
3248 while ((*p_sig)->prev != NULL)
3249 p_sig = &(*p_sig)->prev;
3250
3251 signal = (*p_sig)->signal;
32ca6d61 3252 if ((*p_sig)->info.si_signo != 0)
b8e1b30e 3253 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
56f7af9c 3254 &(*p_sig)->info);
32ca6d61 3255
0d62e5e8
DJ
3256 free (*p_sig);
3257 *p_sig = NULL;
3258 }
3259
aa5ca48f
DE
3260 if (the_low_target.prepare_to_resume != NULL)
3261 the_low_target.prepare_to_resume (lwp);
3262
3aee8918 3263 regcache_invalidate_thread (get_lwp_thread (lwp));
da6d8c04 3264 errno = 0;
54a0b537 3265 lwp->stopped = 0;
c3adc08c 3266 lwp->stopped_by_watchpoint = 0;
54a0b537 3267 lwp->stepping = step;
56f7af9c 3268 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp),
b8e1b30e 3269 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
3270 /* Coerce to a uintptr_t first to avoid potential gcc warning
3271 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 3272 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8
DJ
3273
3274 current_inferior = saved_inferior;
da6d8c04 3275 if (errno)
3221518c
UW
3276 {
3277 /* ESRCH from ptrace either means that the thread was already
3278 running (an error) or that it is gone (a race condition). If
3279 it's gone, we will get a notification the next time we wait,
3280 so we can ignore the error. We could differentiate these
3281 two, but it's tricky without waiting; the thread still exists
3282 as a zombie, so sending it signal 0 would succeed. So just
3283 ignore ESRCH. */
3284 if (errno == ESRCH)
3285 return;
3286
3287 perror_with_name ("ptrace");
3288 }
da6d8c04
DJ
3289}
3290
2bd7c093
PA
3291struct thread_resume_array
3292{
3293 struct thread_resume *resume;
3294 size_t n;
3295};
64386c31
DJ
3296
3297/* This function is called once per thread. We look up the thread
5544ad89
DJ
3298 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3299 resume request.
3300
3301 This algorithm is O(threads * resume elements), but resume elements
3302 is small (and will remain small at least until GDB supports thread
3303 suspension). */
2bd7c093
PA
3304static int
3305linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 3306{
54a0b537 3307 struct lwp_info *lwp;
64386c31 3308 struct thread_info *thread;
5544ad89 3309 int ndx;
2bd7c093 3310 struct thread_resume_array *r;
64386c31
DJ
3311
3312 thread = (struct thread_info *) entry;
54a0b537 3313 lwp = get_thread_lwp (thread);
2bd7c093 3314 r = arg;
64386c31 3315
2bd7c093 3316 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
3317 {
3318 ptid_t ptid = r->resume[ndx].thread;
3319 if (ptid_equal (ptid, minus_one_ptid)
3320 || ptid_equal (ptid, entry->id)
0c9070b3
YQ
3321 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3322 of PID'. */
3323 || (ptid_get_pid (ptid) == pid_of (lwp)
3324 && (ptid_is_pid (ptid)
3325 || ptid_get_lwp (ptid) == -1)))
95954743 3326 {
d50171e4 3327 if (r->resume[ndx].kind == resume_stop
8336d594 3328 && thread->last_resume_kind == resume_stop)
d50171e4
PA
3329 {
3330 if (debug_threads)
3331 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3332 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3333 ? "stopped"
3334 : "stopping",
3335 lwpid_of (lwp));
3336
3337 continue;
3338 }
3339
95954743 3340 lwp->resume = &r->resume[ndx];
8336d594 3341 thread->last_resume_kind = lwp->resume->kind;
fa593d66 3342
c2d6af84
PA
3343 lwp->step_range_start = lwp->resume->step_range_start;
3344 lwp->step_range_end = lwp->resume->step_range_end;
3345
fa593d66
PA
3346 /* If we had a deferred signal to report, dequeue one now.
3347 This can happen if LWP gets more than one signal while
3348 trying to get out of a jump pad. */
3349 if (lwp->stopped
3350 && !lwp->status_pending_p
3351 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3352 {
3353 lwp->status_pending_p = 1;
3354
3355 if (debug_threads)
3356 fprintf (stderr,
3357 "Dequeueing deferred signal %d for LWP %ld, "
3358 "leaving status pending.\n",
3359 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3360 }
3361
95954743
PA
3362 return 0;
3363 }
3364 }
2bd7c093
PA
3365
3366 /* No resume action for this thread. */
3367 lwp->resume = NULL;
64386c31 3368
2bd7c093 3369 return 0;
5544ad89
DJ
3370}
3371
5544ad89 3372
bd99dc85
PA
3373/* Set *FLAG_P if this lwp has an interesting status pending. */
3374static int
3375resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 3376{
bd99dc85 3377 struct lwp_info *lwp = (struct lwp_info *) entry;
5544ad89 3378
bd99dc85
PA
3379 /* LWPs which will not be resumed are not interesting, because
3380 we might not wait for them next time through linux_wait. */
2bd7c093 3381 if (lwp->resume == NULL)
bd99dc85 3382 return 0;
64386c31 3383
bd99dc85 3384 if (lwp->status_pending_p)
d50171e4
PA
3385 * (int *) flag_p = 1;
3386
3387 return 0;
3388}
3389
3390/* Return 1 if this lwp that GDB wants running is stopped at an
3391 internal breakpoint that we need to step over. It assumes that any
3392 required STOP_PC adjustment has already been propagated to the
3393 inferior's regcache. */
3394
3395static int
3396need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3397{
3398 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 3399 struct thread_info *thread;
d50171e4
PA
3400 struct thread_info *saved_inferior;
3401 CORE_ADDR pc;
3402
3403 /* LWPs which will not be resumed are not interesting, because we
3404 might not wait for them next time through linux_wait. */
3405
3406 if (!lwp->stopped)
3407 {
3408 if (debug_threads)
3409 fprintf (stderr,
3410 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3411 lwpid_of (lwp));
3412 return 0;
3413 }
3414
8336d594
PA
3415 thread = get_lwp_thread (lwp);
3416
3417 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
3418 {
3419 if (debug_threads)
3420 fprintf (stderr,
3421 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3422 lwpid_of (lwp));
3423 return 0;
3424 }
3425
7984d532
PA
3426 gdb_assert (lwp->suspended >= 0);
3427
3428 if (lwp->suspended)
3429 {
3430 if (debug_threads)
3431 fprintf (stderr,
3432 "Need step over [LWP %ld]? Ignoring, suspended\n",
3433 lwpid_of (lwp));
3434 return 0;
3435 }
3436
d50171e4
PA
3437 if (!lwp->need_step_over)
3438 {
3439 if (debug_threads)
3440 fprintf (stderr,
3441 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3442 }
5544ad89 3443
bd99dc85 3444 if (lwp->status_pending_p)
d50171e4
PA
3445 {
3446 if (debug_threads)
3447 fprintf (stderr,
3448 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3449 lwpid_of (lwp));
3450 return 0;
3451 }
3452
3453 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3454 or we have. */
3455 pc = get_pc (lwp);
3456
3457 /* If the PC has changed since we stopped, then don't do anything,
3458 and let the breakpoint/tracepoint be hit. This happens if, for
3459 instance, GDB handled the decr_pc_after_break subtraction itself,
3460 GDB is OOL stepping this thread, or the user has issued a "jump"
3461 command, or poked thread's registers herself. */
3462 if (pc != lwp->stop_pc)
3463 {
3464 if (debug_threads)
3465 fprintf (stderr,
3466 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3467 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3468 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3469
3470 lwp->need_step_over = 0;
3471 return 0;
3472 }
3473
3474 saved_inferior = current_inferior;
8336d594 3475 current_inferior = thread;
d50171e4 3476
8b07ae33 3477 /* We can only step over breakpoints we know about. */
fa593d66 3478 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 3479 {
8b07ae33 3480 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
3481 though. If the condition is being evaluated on the target's side
3482 and it evaluate to false, step over this breakpoint as well. */
3483 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
3484 && gdb_condition_true_at_breakpoint (pc)
3485 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
3486 {
3487 if (debug_threads)
3488 fprintf (stderr,
3489 "Need step over [LWP %ld]? yes, but found"
3490 " GDB breakpoint at 0x%s; skipping step over\n",
3491 lwpid_of (lwp), paddress (pc));
d50171e4 3492
8b07ae33
PA
3493 current_inferior = saved_inferior;
3494 return 0;
3495 }
3496 else
3497 {
3498 if (debug_threads)
3499 fprintf (stderr,
493e2a69
MS
3500 "Need step over [LWP %ld]? yes, "
3501 "found breakpoint at 0x%s\n",
8b07ae33 3502 lwpid_of (lwp), paddress (pc));
d50171e4 3503
8b07ae33
PA
3504 /* We've found an lwp that needs stepping over --- return 1 so
3505 that find_inferior stops looking. */
3506 current_inferior = saved_inferior;
3507
3508 /* If the step over is cancelled, this is set again. */
3509 lwp->need_step_over = 0;
3510 return 1;
3511 }
d50171e4
PA
3512 }
3513
3514 current_inferior = saved_inferior;
3515
3516 if (debug_threads)
3517 fprintf (stderr,
3518 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3519 lwpid_of (lwp), paddress (pc));
c6ecbae5 3520
bd99dc85 3521 return 0;
5544ad89
DJ
3522}
3523
d50171e4
PA
3524/* Start a step-over operation on LWP. When LWP stopped at a
3525 breakpoint, to make progress, we need to remove the breakpoint out
3526 of the way. If we let other threads run while we do that, they may
3527 pass by the breakpoint location and miss hitting it. To avoid
3528 that, a step-over momentarily stops all threads while LWP is
3529 single-stepped while the breakpoint is temporarily uninserted from
3530 the inferior. When the single-step finishes, we reinsert the
3531 breakpoint, and let all threads that are supposed to be running,
3532 run again.
3533
3534 On targets that don't support hardware single-step, we don't
3535 currently support full software single-stepping. Instead, we only
3536 support stepping over the thread event breakpoint, by asking the
3537 low target where to place a reinsert breakpoint. Since this
3538 routine assumes the breakpoint being stepped over is a thread event
3539 breakpoint, it usually assumes the return address of the current
3540 function is a good enough place to set the reinsert breakpoint. */
3541
3542static int
3543start_step_over (struct lwp_info *lwp)
3544{
3545 struct thread_info *saved_inferior;
3546 CORE_ADDR pc;
3547 int step;
3548
3549 if (debug_threads)
3550 fprintf (stderr,
3551 "Starting step-over on LWP %ld. Stopping all threads\n",
3552 lwpid_of (lwp));
3553
7984d532
PA
3554 stop_all_lwps (1, lwp);
3555 gdb_assert (lwp->suspended == 0);
d50171e4
PA
3556
3557 if (debug_threads)
3558 fprintf (stderr, "Done stopping all threads for step-over.\n");
3559
3560 /* Note, we should always reach here with an already adjusted PC,
3561 either by GDB (if we're resuming due to GDB's request), or by our
3562 caller, if we just finished handling an internal breakpoint GDB
3563 shouldn't care about. */
3564 pc = get_pc (lwp);
3565
3566 saved_inferior = current_inferior;
3567 current_inferior = get_lwp_thread (lwp);
3568
3569 lwp->bp_reinsert = pc;
3570 uninsert_breakpoints_at (pc);
fa593d66 3571 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
3572
3573 if (can_hardware_single_step ())
3574 {
3575 step = 1;
3576 }
3577 else
3578 {
3579 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3580 set_reinsert_breakpoint (raddr);
3581 step = 0;
3582 }
3583
3584 current_inferior = saved_inferior;
3585
3586 linux_resume_one_lwp (lwp, step, 0, NULL);
3587
3588 /* Require next event from this LWP. */
3589 step_over_bkpt = lwp->head.id;
3590 return 1;
3591}
3592
3593/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3594 start_step_over, if still there, and delete any reinsert
3595 breakpoints we've set, on non hardware single-step targets. */
3596
3597static int
3598finish_step_over (struct lwp_info *lwp)
3599{
3600 if (lwp->bp_reinsert != 0)
3601 {
3602 if (debug_threads)
3603 fprintf (stderr, "Finished step over.\n");
3604
3605 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3606 may be no breakpoint to reinsert there by now. */
3607 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 3608 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
3609
3610 lwp->bp_reinsert = 0;
3611
3612 /* Delete any software-single-step reinsert breakpoints. No
3613 longer needed. We don't have to worry about other threads
3614 hitting this trap, and later not being able to explain it,
3615 because we were stepping over a breakpoint, and we hold all
3616 threads but LWP stopped while doing that. */
3617 if (!can_hardware_single_step ())
3618 delete_reinsert_breakpoints ();
3619
3620 step_over_bkpt = null_ptid;
3621 return 1;
3622 }
3623 else
3624 return 0;
3625}
3626
5544ad89
DJ
3627/* This function is called once per thread. We check the thread's resume
3628 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 3629 stopped; and what signal, if any, it should be sent.
5544ad89 3630
bd99dc85
PA
3631 For threads which we aren't explicitly told otherwise, we preserve
3632 the stepping flag; this is used for stepping over gdbserver-placed
3633 breakpoints.
3634
3635 If pending_flags was set in any thread, we queue any needed
3636 signals, since we won't actually resume. We already have a pending
3637 event to report, so we don't need to preserve any step requests;
3638 they should be re-issued if necessary. */
3639
3640static int
3641linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 3642{
54a0b537 3643 struct lwp_info *lwp;
5544ad89 3644 struct thread_info *thread;
bd99dc85 3645 int step;
d50171e4
PA
3646 int leave_all_stopped = * (int *) arg;
3647 int leave_pending;
5544ad89
DJ
3648
3649 thread = (struct thread_info *) entry;
54a0b537 3650 lwp = get_thread_lwp (thread);
5544ad89 3651
2bd7c093 3652 if (lwp->resume == NULL)
bd99dc85 3653 return 0;
5544ad89 3654
bd99dc85 3655 if (lwp->resume->kind == resume_stop)
5544ad89 3656 {
bd99dc85 3657 if (debug_threads)
d50171e4 3658 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
bd99dc85
PA
3659
3660 if (!lwp->stopped)
3661 {
3662 if (debug_threads)
d50171e4 3663 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
bd99dc85 3664
d50171e4
PA
3665 /* Stop the thread, and wait for the event asynchronously,
3666 through the event loop. */
02fc4de7 3667 send_sigstop (lwp);
bd99dc85
PA
3668 }
3669 else
3670 {
3671 if (debug_threads)
d50171e4
PA
3672 fprintf (stderr, "already stopped LWP %ld\n",
3673 lwpid_of (lwp));
3674
3675 /* The LWP may have been stopped in an internal event that
3676 was not meant to be notified back to GDB (e.g., gdbserver
3677 breakpoint), so we should be reporting a stop event in
3678 this case too. */
3679
3680 /* If the thread already has a pending SIGSTOP, this is a
3681 no-op. Otherwise, something later will presumably resume
3682 the thread and this will cause it to cancel any pending
3683 operation, due to last_resume_kind == resume_stop. If
3684 the thread already has a pending status to report, we
3685 will still report it the next time we wait - see
3686 status_pending_p_callback. */
1a981360
PA
3687
3688 /* If we already have a pending signal to report, then
3689 there's no need to queue a SIGSTOP, as this means we're
3690 midway through moving the LWP out of the jumppad, and we
3691 will report the pending signal as soon as that is
3692 finished. */
3693 if (lwp->pending_signals_to_report == NULL)
3694 send_sigstop (lwp);
bd99dc85 3695 }
32ca6d61 3696
bd99dc85
PA
3697 /* For stop requests, we're done. */
3698 lwp->resume = NULL;
fc7238bb 3699 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3700 return 0;
5544ad89
DJ
3701 }
3702
bd99dc85
PA
3703 /* If this thread which is about to be resumed has a pending status,
3704 then don't resume any threads - we can just report the pending
3705 status. Make sure to queue any signals that would otherwise be
3706 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
3707 thread has a pending status. If there's a thread that needs the
3708 step-over-breakpoint dance, then don't resume any other thread
3709 but that particular one. */
3710 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 3711
d50171e4 3712 if (!leave_pending)
bd99dc85
PA
3713 {
3714 if (debug_threads)
3715 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
5544ad89 3716
d50171e4 3717 step = (lwp->resume->kind == resume_step);
2acc282a 3718 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
3719 }
3720 else
3721 {
3722 if (debug_threads)
3723 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
5544ad89 3724
bd99dc85
PA
3725 /* If we have a new signal, enqueue the signal. */
3726 if (lwp->resume->sig != 0)
3727 {
3728 struct pending_signals *p_sig;
3729 p_sig = xmalloc (sizeof (*p_sig));
3730 p_sig->prev = lwp->pending_signals;
3731 p_sig->signal = lwp->resume->sig;
3732 memset (&p_sig->info, 0, sizeof (siginfo_t));
3733
3734 /* If this is the same signal we were previously stopped by,
3735 make sure to queue its siginfo. We can ignore the return
3736 value of ptrace; if it fails, we'll skip
3737 PTRACE_SETSIGINFO. */
3738 if (WIFSTOPPED (lwp->last_status)
3739 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
b8e1b30e 3740 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
56f7af9c 3741 &p_sig->info);
bd99dc85
PA
3742
3743 lwp->pending_signals = p_sig;
3744 }
3745 }
5544ad89 3746
fc7238bb 3747 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3748 lwp->resume = NULL;
5544ad89 3749 return 0;
0d62e5e8
DJ
3750}
3751
3752static void
2bd7c093 3753linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 3754{
2bd7c093 3755 struct thread_resume_array array = { resume_info, n };
d50171e4
PA
3756 struct lwp_info *need_step_over = NULL;
3757 int any_pending;
3758 int leave_all_stopped;
c6ecbae5 3759
2bd7c093 3760 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 3761
d50171e4
PA
3762 /* If there is a thread which would otherwise be resumed, which has
3763 a pending status, then don't resume any threads - we can just
3764 report the pending status. Make sure to queue any signals that
3765 would otherwise be sent. In non-stop mode, we'll apply this
3766 logic to each thread individually. We consume all pending events
3767 before considering to start a step-over (in all-stop). */
3768 any_pending = 0;
bd99dc85 3769 if (!non_stop)
d50171e4
PA
3770 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3771
3772 /* If there is a thread which would otherwise be resumed, which is
3773 stopped at a breakpoint that needs stepping over, then don't
3774 resume any threads - have it step over the breakpoint with all
3775 other threads stopped, then resume all threads again. Make sure
3776 to queue any signals that would otherwise be delivered or
3777 queued. */
3778 if (!any_pending && supports_breakpoints ())
3779 need_step_over
3780 = (struct lwp_info *) find_inferior (&all_lwps,
3781 need_step_over_p, NULL);
3782
3783 leave_all_stopped = (need_step_over != NULL || any_pending);
3784
3785 if (debug_threads)
3786 {
3787 if (need_step_over != NULL)
3788 fprintf (stderr, "Not resuming all, need step over\n");
3789 else if (any_pending)
3790 fprintf (stderr,
3791 "Not resuming, all-stop and found "
3792 "an LWP with pending status\n");
3793 else
3794 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3795 }
3796
3797 /* Even if we're leaving threads stopped, queue all signals we'd
3798 otherwise deliver. */
3799 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3800
3801 if (need_step_over)
3802 start_step_over (need_step_over);
3803}
3804
3805/* This function is called once per thread. We check the thread's
3806 last resume request, which will tell us whether to resume, step, or
3807 leave the thread stopped. Any signal the client requested to be
3808 delivered has already been enqueued at this point.
3809
3810 If any thread that GDB wants running is stopped at an internal
3811 breakpoint that needs stepping over, we start a step-over operation
3812 on that particular thread, and leave all others stopped. */
3813
7984d532
PA
3814static int
3815proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 3816{
7984d532 3817 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 3818 struct thread_info *thread;
d50171e4
PA
3819 int step;
3820
7984d532
PA
3821 if (lwp == except)
3822 return 0;
d50171e4
PA
3823
3824 if (debug_threads)
3825 fprintf (stderr,
3826 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3827
3828 if (!lwp->stopped)
3829 {
3830 if (debug_threads)
3831 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
7984d532 3832 return 0;
d50171e4
PA
3833 }
3834
8336d594
PA
3835 thread = get_lwp_thread (lwp);
3836
02fc4de7
PA
3837 if (thread->last_resume_kind == resume_stop
3838 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
3839 {
3840 if (debug_threads)
02fc4de7
PA
3841 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3842 lwpid_of (lwp));
7984d532 3843 return 0;
d50171e4
PA
3844 }
3845
3846 if (lwp->status_pending_p)
3847 {
3848 if (debug_threads)
3849 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3850 lwpid_of (lwp));
7984d532 3851 return 0;
d50171e4
PA
3852 }
3853
7984d532
PA
3854 gdb_assert (lwp->suspended >= 0);
3855
d50171e4
PA
3856 if (lwp->suspended)
3857 {
3858 if (debug_threads)
3859 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
7984d532 3860 return 0;
d50171e4
PA
3861 }
3862
1a981360
PA
3863 if (thread->last_resume_kind == resume_stop
3864 && lwp->pending_signals_to_report == NULL
3865 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
3866 {
3867 /* We haven't reported this LWP as stopped yet (otherwise, the
3868 last_status.kind check above would catch it, and we wouldn't
3869 reach here. This LWP may have been momentarily paused by a
3870 stop_all_lwps call while handling for example, another LWP's
3871 step-over. In that case, the pending expected SIGSTOP signal
3872 that was queued at vCont;t handling time will have already
3873 been consumed by wait_for_sigstop, and so we need to requeue
3874 another one here. Note that if the LWP already has a SIGSTOP
3875 pending, this is a no-op. */
3876
3877 if (debug_threads)
3878 fprintf (stderr,
3879 "Client wants LWP %ld to stop. "
3880 "Making sure it has a SIGSTOP pending\n",
3881 lwpid_of (lwp));
3882
3883 send_sigstop (lwp);
3884 }
3885
8336d594 3886 step = thread->last_resume_kind == resume_step;
d50171e4 3887 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
3888 return 0;
3889}
3890
3891static int
3892unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3893{
3894 struct lwp_info *lwp = (struct lwp_info *) entry;
3895
3896 if (lwp == except)
3897 return 0;
3898
3899 lwp->suspended--;
3900 gdb_assert (lwp->suspended >= 0);
3901
3902 return proceed_one_lwp (entry, except);
d50171e4
PA
3903}
3904
3905/* When we finish a step-over, set threads running again. If there's
3906 another thread that may need a step-over, now's the time to start
3907 it. Eventually, we'll move all threads past their breakpoints. */
3908
3909static void
3910proceed_all_lwps (void)
3911{
3912 struct lwp_info *need_step_over;
3913
3914 /* If there is a thread which would otherwise be resumed, which is
3915 stopped at a breakpoint that needs stepping over, then don't
3916 resume any threads - have it step over the breakpoint with all
3917 other threads stopped, then resume all threads again. */
3918
3919 if (supports_breakpoints ())
3920 {
3921 need_step_over
3922 = (struct lwp_info *) find_inferior (&all_lwps,
3923 need_step_over_p, NULL);
3924
3925 if (need_step_over != NULL)
3926 {
3927 if (debug_threads)
3928 fprintf (stderr, "proceed_all_lwps: found "
3929 "thread %ld needing a step-over\n",
3930 lwpid_of (need_step_over));
3931
3932 start_step_over (need_step_over);
3933 return;
3934 }
3935 }
5544ad89 3936
d50171e4
PA
3937 if (debug_threads)
3938 fprintf (stderr, "Proceeding, no step-over needed\n");
3939
7984d532 3940 find_inferior (&all_lwps, proceed_one_lwp, NULL);
d50171e4
PA
3941}
3942
3943/* Stopped LWPs that the client wanted to be running, that don't have
3944 pending statuses, are set to run again, except for EXCEPT, if not
3945 NULL. This undoes a stop_all_lwps call. */
3946
3947static void
7984d532 3948unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 3949{
5544ad89
DJ
3950 if (debug_threads)
3951 {
d50171e4
PA
3952 if (except)
3953 fprintf (stderr,
3954 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
5544ad89 3955 else
d50171e4
PA
3956 fprintf (stderr,
3957 "unstopping all lwps\n");
5544ad89
DJ
3958 }
3959
7984d532
PA
3960 if (unsuspend)
3961 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3962 else
3963 find_inferior (&all_lwps, proceed_one_lwp, except);
0d62e5e8
DJ
3964}
3965
58caa3dc
DJ
3966
3967#ifdef HAVE_LINUX_REGSETS
3968
1faeff08
MR
3969#define use_linux_regsets 1
3970
030031ee
PA
3971/* Returns true if REGSET has been disabled. */
3972
3973static int
3974regset_disabled (struct regsets_info *info, struct regset_info *regset)
3975{
3976 return (info->disabled_regsets != NULL
3977 && info->disabled_regsets[regset - info->regsets]);
3978}
3979
3980/* Disable REGSET. */
3981
3982static void
3983disable_regset (struct regsets_info *info, struct regset_info *regset)
3984{
3985 int dr_offset;
3986
3987 dr_offset = regset - info->regsets;
3988 if (info->disabled_regsets == NULL)
3989 info->disabled_regsets = xcalloc (1, info->num_regsets);
3990 info->disabled_regsets[dr_offset] = 1;
3991}
3992
58caa3dc 3993static int
3aee8918
PA
3994regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
3995 struct regcache *regcache)
58caa3dc
DJ
3996{
3997 struct regset_info *regset;
e9d25b98 3998 int saw_general_regs = 0;
95954743 3999 int pid;
1570b33e 4000 struct iovec iov;
58caa3dc 4001
3aee8918 4002 regset = regsets_info->regsets;
58caa3dc 4003
95954743 4004 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
4005 while (regset->size >= 0)
4006 {
1570b33e
L
4007 void *buf, *data;
4008 int nt_type, res;
58caa3dc 4009
030031ee 4010 if (regset->size == 0 || regset_disabled (regsets_info, regset))
58caa3dc
DJ
4011 {
4012 regset ++;
4013 continue;
4014 }
4015
bca929d3 4016 buf = xmalloc (regset->size);
1570b33e
L
4017
4018 nt_type = regset->nt_type;
4019 if (nt_type)
4020 {
4021 iov.iov_base = buf;
4022 iov.iov_len = regset->size;
4023 data = (void *) &iov;
4024 }
4025 else
4026 data = buf;
4027
dfb64f85 4028#ifndef __sparc__
f15f9948 4029 res = ptrace (regset->get_request, pid,
b8e1b30e 4030 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4031#else
1570b33e 4032 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4033#endif
58caa3dc
DJ
4034 if (res < 0)
4035 {
4036 if (errno == EIO)
4037 {
52fa2412 4038 /* If we get EIO on a regset, do not try it again for
3aee8918 4039 this process mode. */
030031ee 4040 disable_regset (regsets_info, regset);
fdeb2a12 4041 free (buf);
52fa2412 4042 continue;
58caa3dc
DJ
4043 }
4044 else
4045 {
0d62e5e8 4046 char s[256];
95954743
PA
4047 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4048 pid);
0d62e5e8 4049 perror (s);
58caa3dc
DJ
4050 }
4051 }
e9d25b98
DJ
4052 else if (regset->type == GENERAL_REGS)
4053 saw_general_regs = 1;
442ea881 4054 regset->store_function (regcache, buf);
58caa3dc 4055 regset ++;
fdeb2a12 4056 free (buf);
58caa3dc 4057 }
e9d25b98
DJ
4058 if (saw_general_regs)
4059 return 0;
4060 else
4061 return 1;
58caa3dc
DJ
4062}
4063
4064static int
3aee8918
PA
4065regsets_store_inferior_registers (struct regsets_info *regsets_info,
4066 struct regcache *regcache)
58caa3dc
DJ
4067{
4068 struct regset_info *regset;
e9d25b98 4069 int saw_general_regs = 0;
95954743 4070 int pid;
1570b33e 4071 struct iovec iov;
58caa3dc 4072
3aee8918 4073 regset = regsets_info->regsets;
58caa3dc 4074
95954743 4075 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
4076 while (regset->size >= 0)
4077 {
1570b33e
L
4078 void *buf, *data;
4079 int nt_type, res;
58caa3dc 4080
030031ee 4081 if (regset->size == 0 || regset_disabled (regsets_info, regset))
58caa3dc
DJ
4082 {
4083 regset ++;
4084 continue;
4085 }
4086
bca929d3 4087 buf = xmalloc (regset->size);
545587ee
DJ
4088
4089 /* First fill the buffer with the current register set contents,
4090 in case there are any items in the kernel's regset that are
4091 not in gdbserver's regcache. */
1570b33e
L
4092
4093 nt_type = regset->nt_type;
4094 if (nt_type)
4095 {
4096 iov.iov_base = buf;
4097 iov.iov_len = regset->size;
4098 data = (void *) &iov;
4099 }
4100 else
4101 data = buf;
4102
dfb64f85 4103#ifndef __sparc__
f15f9948 4104 res = ptrace (regset->get_request, pid,
b8e1b30e 4105 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4106#else
689cc2ae 4107 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4108#endif
545587ee
DJ
4109
4110 if (res == 0)
4111 {
4112 /* Then overlay our cached registers on that. */
442ea881 4113 regset->fill_function (regcache, buf);
545587ee
DJ
4114
4115 /* Only now do we write the register set. */
dfb64f85 4116#ifndef __sparc__
f15f9948 4117 res = ptrace (regset->set_request, pid,
b8e1b30e 4118 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4119#else
1570b33e 4120 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 4121#endif
545587ee
DJ
4122 }
4123
58caa3dc
DJ
4124 if (res < 0)
4125 {
4126 if (errno == EIO)
4127 {
52fa2412 4128 /* If we get EIO on a regset, do not try it again for
3aee8918 4129 this process mode. */
030031ee 4130 disable_regset (regsets_info, regset);
fdeb2a12 4131 free (buf);
52fa2412 4132 continue;
58caa3dc 4133 }
3221518c
UW
4134 else if (errno == ESRCH)
4135 {
1b3f6016
PA
4136 /* At this point, ESRCH should mean the process is
4137 already gone, in which case we simply ignore attempts
4138 to change its registers. See also the related
4139 comment in linux_resume_one_lwp. */
fdeb2a12 4140 free (buf);
3221518c
UW
4141 return 0;
4142 }
58caa3dc
DJ
4143 else
4144 {
ce3a066d 4145 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
4146 }
4147 }
e9d25b98
DJ
4148 else if (regset->type == GENERAL_REGS)
4149 saw_general_regs = 1;
58caa3dc 4150 regset ++;
09ec9b38 4151 free (buf);
58caa3dc 4152 }
e9d25b98
DJ
4153 if (saw_general_regs)
4154 return 0;
4155 else
4156 return 1;
58caa3dc
DJ
4157}
4158
1faeff08 4159#else /* !HAVE_LINUX_REGSETS */
58caa3dc 4160
1faeff08 4161#define use_linux_regsets 0
3aee8918
PA
4162#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4163#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 4164
58caa3dc 4165#endif
1faeff08
MR
4166
4167/* Return 1 if register REGNO is supported by one of the regset ptrace
4168 calls or 0 if it has to be transferred individually. */
4169
4170static int
3aee8918 4171linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
4172{
4173 unsigned char mask = 1 << (regno % 8);
4174 size_t index = regno / 8;
4175
4176 return (use_linux_regsets
3aee8918
PA
4177 && (regs_info->regset_bitmap == NULL
4178 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
4179}
4180
58caa3dc 4181#ifdef HAVE_LINUX_USRREGS
1faeff08
MR
4182
4183int
3aee8918 4184register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
4185{
4186 int addr;
4187
3aee8918 4188 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
4189 error ("Invalid register number %d.", regnum);
4190
3aee8918 4191 addr = usrregs->regmap[regnum];
1faeff08
MR
4192
4193 return addr;
4194}
4195
4196/* Fetch one register. */
4197static void
3aee8918
PA
4198fetch_register (const struct usrregs_info *usrregs,
4199 struct regcache *regcache, int regno)
1faeff08
MR
4200{
4201 CORE_ADDR regaddr;
4202 int i, size;
4203 char *buf;
4204 int pid;
4205
3aee8918 4206 if (regno >= usrregs->num_regs)
1faeff08
MR
4207 return;
4208 if ((*the_low_target.cannot_fetch_register) (regno))
4209 return;
4210
3aee8918 4211 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4212 if (regaddr == -1)
4213 return;
4214
3aee8918
PA
4215 size = ((register_size (regcache->tdesc, regno)
4216 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4217 & -sizeof (PTRACE_XFER_TYPE));
4218 buf = alloca (size);
4219
4220 pid = lwpid_of (get_thread_lwp (current_inferior));
4221 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4222 {
4223 errno = 0;
4224 *(PTRACE_XFER_TYPE *) (buf + i) =
4225 ptrace (PTRACE_PEEKUSER, pid,
4226 /* Coerce to a uintptr_t first to avoid potential gcc warning
4227 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4228 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
4229 regaddr += sizeof (PTRACE_XFER_TYPE);
4230 if (errno != 0)
4231 error ("reading register %d: %s", regno, strerror (errno));
4232 }
4233
4234 if (the_low_target.supply_ptrace_register)
4235 the_low_target.supply_ptrace_register (regcache, regno, buf);
4236 else
4237 supply_register (regcache, regno, buf);
4238}
4239
4240/* Store one register. */
4241static void
3aee8918
PA
4242store_register (const struct usrregs_info *usrregs,
4243 struct regcache *regcache, int regno)
1faeff08
MR
4244{
4245 CORE_ADDR regaddr;
4246 int i, size;
4247 char *buf;
4248 int pid;
4249
3aee8918 4250 if (regno >= usrregs->num_regs)
1faeff08
MR
4251 return;
4252 if ((*the_low_target.cannot_store_register) (regno))
4253 return;
4254
3aee8918 4255 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4256 if (regaddr == -1)
4257 return;
4258
3aee8918
PA
4259 size = ((register_size (regcache->tdesc, regno)
4260 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4261 & -sizeof (PTRACE_XFER_TYPE));
4262 buf = alloca (size);
4263 memset (buf, 0, size);
4264
4265 if (the_low_target.collect_ptrace_register)
4266 the_low_target.collect_ptrace_register (regcache, regno, buf);
4267 else
4268 collect_register (regcache, regno, buf);
4269
4270 pid = lwpid_of (get_thread_lwp (current_inferior));
4271 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4272 {
4273 errno = 0;
4274 ptrace (PTRACE_POKEUSER, pid,
4275 /* Coerce to a uintptr_t first to avoid potential gcc warning
4276 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
4277 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4278 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
4279 if (errno != 0)
4280 {
4281 /* At this point, ESRCH should mean the process is
4282 already gone, in which case we simply ignore attempts
4283 to change its registers. See also the related
4284 comment in linux_resume_one_lwp. */
4285 if (errno == ESRCH)
4286 return;
4287
4288 if ((*the_low_target.cannot_store_register) (regno) == 0)
4289 error ("writing register %d: %s", regno, strerror (errno));
4290 }
4291 regaddr += sizeof (PTRACE_XFER_TYPE);
4292 }
4293}
4294
4295/* Fetch all registers, or just one, from the child process.
4296 If REGNO is -1, do this for all registers, skipping any that are
4297 assumed to have been retrieved by regsets_fetch_inferior_registers,
4298 unless ALL is non-zero.
4299 Otherwise, REGNO specifies which register (so we can save time). */
4300static void
3aee8918
PA
4301usr_fetch_inferior_registers (const struct regs_info *regs_info,
4302 struct regcache *regcache, int regno, int all)
1faeff08 4303{
3aee8918
PA
4304 struct usrregs_info *usr = regs_info->usrregs;
4305
1faeff08
MR
4306 if (regno == -1)
4307 {
3aee8918
PA
4308 for (regno = 0; regno < usr->num_regs; regno++)
4309 if (all || !linux_register_in_regsets (regs_info, regno))
4310 fetch_register (usr, regcache, regno);
1faeff08
MR
4311 }
4312 else
3aee8918 4313 fetch_register (usr, regcache, regno);
1faeff08
MR
4314}
4315
4316/* Store our register values back into the inferior.
4317 If REGNO is -1, do this for all registers, skipping any that are
4318 assumed to have been saved by regsets_store_inferior_registers,
4319 unless ALL is non-zero.
4320 Otherwise, REGNO specifies which register (so we can save time). */
4321static void
3aee8918
PA
4322usr_store_inferior_registers (const struct regs_info *regs_info,
4323 struct regcache *regcache, int regno, int all)
1faeff08 4324{
3aee8918
PA
4325 struct usrregs_info *usr = regs_info->usrregs;
4326
1faeff08
MR
4327 if (regno == -1)
4328 {
3aee8918
PA
4329 for (regno = 0; regno < usr->num_regs; regno++)
4330 if (all || !linux_register_in_regsets (regs_info, regno))
4331 store_register (usr, regcache, regno);
1faeff08
MR
4332 }
4333 else
3aee8918 4334 store_register (usr, regcache, regno);
1faeff08
MR
4335}
4336
4337#else /* !HAVE_LINUX_USRREGS */
4338
3aee8918
PA
4339#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4340#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
1faeff08 4341
58caa3dc 4342#endif
1faeff08
MR
4343
4344
4345void
4346linux_fetch_registers (struct regcache *regcache, int regno)
4347{
4348 int use_regsets;
4349 int all = 0;
3aee8918 4350 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
4351
4352 if (regno == -1)
4353 {
3aee8918
PA
4354 if (the_low_target.fetch_register != NULL
4355 && regs_info->usrregs != NULL)
4356 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
c14dfd32
PA
4357 (*the_low_target.fetch_register) (regcache, regno);
4358
3aee8918
PA
4359 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4360 if (regs_info->usrregs != NULL)
4361 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
4362 }
4363 else
4364 {
c14dfd32
PA
4365 if (the_low_target.fetch_register != NULL
4366 && (*the_low_target.fetch_register) (regcache, regno))
4367 return;
4368
3aee8918 4369 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 4370 if (use_regsets)
3aee8918
PA
4371 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4372 regcache);
4373 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4374 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 4375 }
58caa3dc
DJ
4376}
4377
4378void
442ea881 4379linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 4380{
1faeff08
MR
4381 int use_regsets;
4382 int all = 0;
3aee8918 4383 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
4384
4385 if (regno == -1)
4386 {
3aee8918
PA
4387 all = regsets_store_inferior_registers (regs_info->regsets_info,
4388 regcache);
4389 if (regs_info->usrregs != NULL)
4390 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
4391 }
4392 else
4393 {
3aee8918 4394 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 4395 if (use_regsets)
3aee8918
PA
4396 all = regsets_store_inferior_registers (regs_info->regsets_info,
4397 regcache);
4398 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4399 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 4400 }
58caa3dc
DJ
4401}
4402
da6d8c04 4403
da6d8c04
DJ
4404/* Copy LEN bytes from inferior's memory starting at MEMADDR
4405 to debugger memory starting at MYADDR. */
4406
c3e735a6 4407static int
f450004a 4408linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04 4409{
4934b29e
MR
4410 int pid = lwpid_of (get_thread_lwp (current_inferior));
4411 register PTRACE_XFER_TYPE *buffer;
4412 register CORE_ADDR addr;
4413 register int count;
4414 char filename[64];
da6d8c04 4415 register int i;
4934b29e 4416 int ret;
fd462a61 4417 int fd;
fd462a61
DJ
4418
4419 /* Try using /proc. Don't bother for one word. */
4420 if (len >= 3 * sizeof (long))
4421 {
4934b29e
MR
4422 int bytes;
4423
fd462a61
DJ
4424 /* We could keep this file open and cache it - possibly one per
4425 thread. That requires some juggling, but is even faster. */
95954743 4426 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
4427 fd = open (filename, O_RDONLY | O_LARGEFILE);
4428 if (fd == -1)
4429 goto no_proc;
4430
4431 /* If pread64 is available, use it. It's faster if the kernel
4432 supports it (only one syscall), and it's 64-bit safe even on
4433 32-bit platforms (for instance, SPARC debugging a SPARC64
4434 application). */
4435#ifdef HAVE_PREAD64
4934b29e 4436 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 4437#else
4934b29e
MR
4438 bytes = -1;
4439 if (lseek (fd, memaddr, SEEK_SET) != -1)
4440 bytes = read (fd, myaddr, len);
fd462a61 4441#endif
fd462a61
DJ
4442
4443 close (fd);
4934b29e
MR
4444 if (bytes == len)
4445 return 0;
4446
4447 /* Some data was read, we'll try to get the rest with ptrace. */
4448 if (bytes > 0)
4449 {
4450 memaddr += bytes;
4451 myaddr += bytes;
4452 len -= bytes;
4453 }
fd462a61 4454 }
da6d8c04 4455
fd462a61 4456 no_proc:
4934b29e
MR
4457 /* Round starting address down to longword boundary. */
4458 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4459 /* Round ending address up; get number of longwords that makes. */
4460 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4461 / sizeof (PTRACE_XFER_TYPE));
4462 /* Allocate buffer of that many longwords. */
4463 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4464
da6d8c04 4465 /* Read all the longwords */
4934b29e 4466 errno = 0;
da6d8c04
DJ
4467 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4468 {
14ce3065
DE
4469 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4470 about coercing an 8 byte integer to a 4 byte pointer. */
4471 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
4472 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4473 (PTRACE_TYPE_ARG4) 0);
c3e735a6 4474 if (errno)
4934b29e 4475 break;
da6d8c04 4476 }
4934b29e 4477 ret = errno;
da6d8c04
DJ
4478
4479 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
4480 if (i > 0)
4481 {
4482 i *= sizeof (PTRACE_XFER_TYPE);
4483 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4484 memcpy (myaddr,
4485 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4486 i < len ? i : len);
4487 }
c3e735a6 4488
4934b29e 4489 return ret;
da6d8c04
DJ
4490}
4491
93ae6fdc
PA
4492/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4493 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 4494 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 4495
ce3a066d 4496static int
f450004a 4497linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
4498{
4499 register int i;
4500 /* Round starting address down to longword boundary. */
4501 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4502 /* Round ending address up; get number of longwords that makes. */
4503 register int count
493e2a69
MS
4504 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4505 / sizeof (PTRACE_XFER_TYPE);
4506
da6d8c04 4507 /* Allocate buffer of that many longwords. */
493e2a69
MS
4508 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4509 alloca (count * sizeof (PTRACE_XFER_TYPE));
4510
95954743 4511 int pid = lwpid_of (get_thread_lwp (current_inferior));
da6d8c04 4512
f0ae6fc3
PA
4513 if (len == 0)
4514 {
4515 /* Zero length write always succeeds. */
4516 return 0;
4517 }
4518
0d62e5e8
DJ
4519 if (debug_threads)
4520 {
58d6951d
DJ
4521 /* Dump up to four bytes. */
4522 unsigned int val = * (unsigned int *) myaddr;
4523 if (len == 1)
4524 val = val & 0xff;
4525 else if (len == 2)
4526 val = val & 0xffff;
4527 else if (len == 3)
4528 val = val & 0xffffff;
4529 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4530 val, (long)memaddr);
0d62e5e8
DJ
4531 }
4532
da6d8c04
DJ
4533 /* Fill start and end extra bytes of buffer with existing memory data. */
4534
93ae6fdc 4535 errno = 0;
14ce3065
DE
4536 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4537 about coercing an 8 byte integer to a 4 byte pointer. */
4538 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
4539 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4540 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
4541 if (errno)
4542 return errno;
da6d8c04
DJ
4543
4544 if (count > 1)
4545 {
93ae6fdc 4546 errno = 0;
da6d8c04 4547 buffer[count - 1]
95954743 4548 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
4549 /* Coerce to a uintptr_t first to avoid potential gcc warning
4550 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4551 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 4552 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 4553 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
4554 if (errno)
4555 return errno;
da6d8c04
DJ
4556 }
4557
93ae6fdc 4558 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 4559
493e2a69
MS
4560 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4561 myaddr, len);
da6d8c04
DJ
4562
4563 /* Write the entire buffer. */
4564
4565 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4566 {
4567 errno = 0;
14ce3065
DE
4568 ptrace (PTRACE_POKETEXT, pid,
4569 /* Coerce to a uintptr_t first to avoid potential gcc warning
4570 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
4571 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4572 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
4573 if (errno)
4574 return errno;
4575 }
4576
4577 return 0;
4578}
2f2893d9
DJ
4579
4580static void
4581linux_look_up_symbols (void)
4582{
0d62e5e8 4583#ifdef USE_THREAD_DB
95954743
PA
4584 struct process_info *proc = current_process ();
4585
cdbfd419 4586 if (proc->private->thread_db != NULL)
0d62e5e8
DJ
4587 return;
4588
96d7229d
LM
4589 /* If the kernel supports tracing clones, then we don't need to
4590 use the magic thread event breakpoint to learn about
4591 threads. */
4592 thread_db_init (!linux_supports_traceclone ());
0d62e5e8
DJ
4593#endif
4594}
4595
e5379b03 4596static void
ef57601b 4597linux_request_interrupt (void)
e5379b03 4598{
a1928bad 4599 extern unsigned long signal_pid;
e5379b03 4600
95954743
PA
4601 if (!ptid_equal (cont_thread, null_ptid)
4602 && !ptid_equal (cont_thread, minus_one_ptid))
e5379b03 4603 {
54a0b537 4604 struct lwp_info *lwp;
bd99dc85 4605 int lwpid;
e5379b03 4606
54a0b537 4607 lwp = get_thread_lwp (current_inferior);
bd99dc85
PA
4608 lwpid = lwpid_of (lwp);
4609 kill_lwp (lwpid, SIGINT);
e5379b03
DJ
4610 }
4611 else
ef57601b 4612 kill_lwp (signal_pid, SIGINT);
e5379b03
DJ
4613}
4614
aa691b87
RM
4615/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4616 to debugger memory starting at MYADDR. */
4617
4618static int
f450004a 4619linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
4620{
4621 char filename[PATH_MAX];
4622 int fd, n;
95954743 4623 int pid = lwpid_of (get_thread_lwp (current_inferior));
aa691b87 4624
6cebaf6e 4625 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
4626
4627 fd = open (filename, O_RDONLY);
4628 if (fd < 0)
4629 return -1;
4630
4631 if (offset != (CORE_ADDR) 0
4632 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4633 n = -1;
4634 else
4635 n = read (fd, myaddr, len);
4636
4637 close (fd);
4638
4639 return n;
4640}
4641
d993e290
PA
4642/* These breakpoint and watchpoint related wrapper functions simply
4643 pass on the function call if the target has registered a
4644 corresponding function. */
e013ee27
OF
4645
4646static int
d993e290 4647linux_insert_point (char type, CORE_ADDR addr, int len)
e013ee27 4648{
d993e290
PA
4649 if (the_low_target.insert_point != NULL)
4650 return the_low_target.insert_point (type, addr, len);
e013ee27
OF
4651 else
4652 /* Unsupported (see target.h). */
4653 return 1;
4654}
4655
4656static int
d993e290 4657linux_remove_point (char type, CORE_ADDR addr, int len)
e013ee27 4658{
d993e290
PA
4659 if (the_low_target.remove_point != NULL)
4660 return the_low_target.remove_point (type, addr, len);
e013ee27
OF
4661 else
4662 /* Unsupported (see target.h). */
4663 return 1;
4664}
4665
4666static int
4667linux_stopped_by_watchpoint (void)
4668{
c3adc08c
PA
4669 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4670
4671 return lwp->stopped_by_watchpoint;
e013ee27
OF
4672}
4673
4674static CORE_ADDR
4675linux_stopped_data_address (void)
4676{
c3adc08c
PA
4677 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4678
4679 return lwp->stopped_data_address;
e013ee27
OF
4680}
4681
db0dfaa0
LM
4682#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4683 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4684 && defined(PT_TEXT_END_ADDR)
4685
4686/* This is only used for targets that define PT_TEXT_ADDR,
4687 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4688 the target has different ways of acquiring this information, like
4689 loadmaps. */
52fb6437
NS
4690
4691/* Under uClinux, programs are loaded at non-zero offsets, which we need
4692 to tell gdb about. */
4693
4694static int
4695linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4696{
52fb6437 4697 unsigned long text, text_end, data;
bd99dc85 4698 int pid = lwpid_of (get_thread_lwp (current_inferior));
52fb6437
NS
4699
4700 errno = 0;
4701
b8e1b30e
LM
4702 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4703 (PTRACE_TYPE_ARG4) 0);
4704 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4705 (PTRACE_TYPE_ARG4) 0);
4706 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4707 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
4708
4709 if (errno == 0)
4710 {
4711 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
4712 used by gdb) are relative to the beginning of the program,
4713 with the data segment immediately following the text segment.
4714 However, the actual runtime layout in memory may put the data
4715 somewhere else, so when we send gdb a data base-address, we
4716 use the real data base address and subtract the compile-time
4717 data base-address from it (which is just the length of the
4718 text segment). BSS immediately follows data in both
4719 cases. */
52fb6437
NS
4720 *text_p = text;
4721 *data_p = data - (text_end - text);
1b3f6016 4722
52fb6437
NS
4723 return 1;
4724 }
52fb6437
NS
4725 return 0;
4726}
4727#endif
4728
07e059b5
VP
4729static int
4730linux_qxfer_osdata (const char *annex,
1b3f6016
PA
4731 unsigned char *readbuf, unsigned const char *writebuf,
4732 CORE_ADDR offset, int len)
07e059b5 4733{
d26e3629 4734 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
4735}
4736
d0722149
DE
4737/* Convert a native/host siginfo object, into/from the siginfo in the
4738 layout of the inferiors' architecture. */
4739
4740static void
a5362b9a 4741siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
d0722149
DE
4742{
4743 int done = 0;
4744
4745 if (the_low_target.siginfo_fixup != NULL)
4746 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4747
4748 /* If there was no callback, or the callback didn't do anything,
4749 then just do a straight memcpy. */
4750 if (!done)
4751 {
4752 if (direction == 1)
a5362b9a 4753 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 4754 else
a5362b9a 4755 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
4756 }
4757}
4758
4aa995e1
PA
4759static int
4760linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4761 unsigned const char *writebuf, CORE_ADDR offset, int len)
4762{
d0722149 4763 int pid;
a5362b9a
TS
4764 siginfo_t siginfo;
4765 char inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
4766
4767 if (current_inferior == NULL)
4768 return -1;
4769
bd99dc85 4770 pid = lwpid_of (get_thread_lwp (current_inferior));
4aa995e1
PA
4771
4772 if (debug_threads)
d0722149 4773 fprintf (stderr, "%s siginfo for lwp %d.\n",
4aa995e1
PA
4774 readbuf != NULL ? "Reading" : "Writing",
4775 pid);
4776
0adea5f7 4777 if (offset >= sizeof (siginfo))
4aa995e1
PA
4778 return -1;
4779
b8e1b30e 4780 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
4781 return -1;
4782
d0722149
DE
4783 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4784 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4785 inferior with a 64-bit GDBSERVER should look the same as debugging it
4786 with a 32-bit GDBSERVER, we need to convert it. */
4787 siginfo_fixup (&siginfo, inf_siginfo, 0);
4788
4aa995e1
PA
4789 if (offset + len > sizeof (siginfo))
4790 len = sizeof (siginfo) - offset;
4791
4792 if (readbuf != NULL)
d0722149 4793 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4794 else
4795 {
d0722149
DE
4796 memcpy (inf_siginfo + offset, writebuf, len);
4797
4798 /* Convert back to ptrace layout before flushing it out. */
4799 siginfo_fixup (&siginfo, inf_siginfo, 1);
4800
b8e1b30e 4801 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
4802 return -1;
4803 }
4804
4805 return len;
4806}
4807
bd99dc85
PA
4808/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4809 so we notice when children change state; as the handler for the
4810 sigsuspend in my_waitpid. */
4811
4812static void
4813sigchld_handler (int signo)
4814{
4815 int old_errno = errno;
4816
4817 if (debug_threads)
e581f2b4
PA
4818 {
4819 do
4820 {
4821 /* fprintf is not async-signal-safe, so call write
4822 directly. */
4823 if (write (2, "sigchld_handler\n",
4824 sizeof ("sigchld_handler\n") - 1) < 0)
4825 break; /* just ignore */
4826 } while (0);
4827 }
bd99dc85
PA
4828
4829 if (target_is_async_p ())
4830 async_file_mark (); /* trigger a linux_wait */
4831
4832 errno = old_errno;
4833}
4834
4835static int
4836linux_supports_non_stop (void)
4837{
4838 return 1;
4839}
4840
4841static int
4842linux_async (int enable)
4843{
4844 int previous = (linux_event_pipe[0] != -1);
4845
8336d594
PA
4846 if (debug_threads)
4847 fprintf (stderr, "linux_async (%d), previous=%d\n",
4848 enable, previous);
4849
bd99dc85
PA
4850 if (previous != enable)
4851 {
4852 sigset_t mask;
4853 sigemptyset (&mask);
4854 sigaddset (&mask, SIGCHLD);
4855
4856 sigprocmask (SIG_BLOCK, &mask, NULL);
4857
4858 if (enable)
4859 {
4860 if (pipe (linux_event_pipe) == -1)
4861 fatal ("creating event pipe failed.");
4862
4863 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4864 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4865
4866 /* Register the event loop handler. */
4867 add_file_handler (linux_event_pipe[0],
4868 handle_target_event, NULL);
4869
4870 /* Always trigger a linux_wait. */
4871 async_file_mark ();
4872 }
4873 else
4874 {
4875 delete_file_handler (linux_event_pipe[0]);
4876
4877 close (linux_event_pipe[0]);
4878 close (linux_event_pipe[1]);
4879 linux_event_pipe[0] = -1;
4880 linux_event_pipe[1] = -1;
4881 }
4882
4883 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4884 }
4885
4886 return previous;
4887}
4888
4889static int
4890linux_start_non_stop (int nonstop)
4891{
4892 /* Register or unregister from event-loop accordingly. */
4893 linux_async (nonstop);
4894 return 0;
4895}
4896
cf8fd78b
PA
4897static int
4898linux_supports_multi_process (void)
4899{
4900 return 1;
4901}
4902
03583c20
UW
4903static int
4904linux_supports_disable_randomization (void)
4905{
4906#ifdef HAVE_PERSONALITY
4907 return 1;
4908#else
4909 return 0;
4910#endif
4911}
efcbbd14 4912
d1feda86
YQ
4913static int
4914linux_supports_agent (void)
4915{
4916 return 1;
4917}
4918
c2d6af84
PA
4919static int
4920linux_supports_range_stepping (void)
4921{
4922 if (*the_low_target.supports_range_stepping == NULL)
4923 return 0;
4924
4925 return (*the_low_target.supports_range_stepping) ();
4926}
4927
efcbbd14
UW
4928/* Enumerate spufs IDs for process PID. */
4929static int
4930spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4931{
4932 int pos = 0;
4933 int written = 0;
4934 char path[128];
4935 DIR *dir;
4936 struct dirent *entry;
4937
4938 sprintf (path, "/proc/%ld/fd", pid);
4939 dir = opendir (path);
4940 if (!dir)
4941 return -1;
4942
4943 rewinddir (dir);
4944 while ((entry = readdir (dir)) != NULL)
4945 {
4946 struct stat st;
4947 struct statfs stfs;
4948 int fd;
4949
4950 fd = atoi (entry->d_name);
4951 if (!fd)
4952 continue;
4953
4954 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4955 if (stat (path, &st) != 0)
4956 continue;
4957 if (!S_ISDIR (st.st_mode))
4958 continue;
4959
4960 if (statfs (path, &stfs) != 0)
4961 continue;
4962 if (stfs.f_type != SPUFS_MAGIC)
4963 continue;
4964
4965 if (pos >= offset && pos + 4 <= offset + len)
4966 {
4967 *(unsigned int *)(buf + pos - offset) = fd;
4968 written += 4;
4969 }
4970 pos += 4;
4971 }
4972
4973 closedir (dir);
4974 return written;
4975}
4976
4977/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4978 object type, using the /proc file system. */
4979static int
4980linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4981 unsigned const char *writebuf,
4982 CORE_ADDR offset, int len)
4983{
4984 long pid = lwpid_of (get_thread_lwp (current_inferior));
4985 char buf[128];
4986 int fd = 0;
4987 int ret = 0;
4988
4989 if (!writebuf && !readbuf)
4990 return -1;
4991
4992 if (!*annex)
4993 {
4994 if (!readbuf)
4995 return -1;
4996 else
4997 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4998 }
4999
5000 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5001 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5002 if (fd <= 0)
5003 return -1;
5004
5005 if (offset != 0
5006 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5007 {
5008 close (fd);
5009 return 0;
5010 }
5011
5012 if (writebuf)
5013 ret = write (fd, writebuf, (size_t) len);
5014 else
5015 ret = read (fd, readbuf, (size_t) len);
5016
5017 close (fd);
5018 return ret;
5019}
5020
723b724b 5021#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
5022struct target_loadseg
5023{
5024 /* Core address to which the segment is mapped. */
5025 Elf32_Addr addr;
5026 /* VMA recorded in the program header. */
5027 Elf32_Addr p_vaddr;
5028 /* Size of this segment in memory. */
5029 Elf32_Word p_memsz;
5030};
5031
723b724b 5032# if defined PT_GETDSBT
78d85199
YQ
5033struct target_loadmap
5034{
5035 /* Protocol version number, must be zero. */
5036 Elf32_Word version;
5037 /* Pointer to the DSBT table, its size, and the DSBT index. */
5038 unsigned *dsbt_table;
5039 unsigned dsbt_size, dsbt_index;
5040 /* Number of segments in this map. */
5041 Elf32_Word nsegs;
5042 /* The actual memory map. */
5043 struct target_loadseg segs[/*nsegs*/];
5044};
723b724b
MF
5045# define LINUX_LOADMAP PT_GETDSBT
5046# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5047# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5048# else
5049struct target_loadmap
5050{
5051 /* Protocol version number, must be zero. */
5052 Elf32_Half version;
5053 /* Number of segments in this map. */
5054 Elf32_Half nsegs;
5055 /* The actual memory map. */
5056 struct target_loadseg segs[/*nsegs*/];
5057};
5058# define LINUX_LOADMAP PTRACE_GETFDPIC
5059# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5060# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5061# endif
78d85199 5062
78d85199
YQ
5063static int
5064linux_read_loadmap (const char *annex, CORE_ADDR offset,
5065 unsigned char *myaddr, unsigned int len)
5066{
5067 int pid = lwpid_of (get_thread_lwp (current_inferior));
5068 int addr = -1;
5069 struct target_loadmap *data = NULL;
5070 unsigned int actual_length, copy_length;
5071
5072 if (strcmp (annex, "exec") == 0)
723b724b 5073 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 5074 else if (strcmp (annex, "interp") == 0)
723b724b 5075 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
5076 else
5077 return -1;
5078
723b724b 5079 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
5080 return -1;
5081
5082 if (data == NULL)
5083 return -1;
5084
5085 actual_length = sizeof (struct target_loadmap)
5086 + sizeof (struct target_loadseg) * data->nsegs;
5087
5088 if (offset < 0 || offset > actual_length)
5089 return -1;
5090
5091 copy_length = actual_length - offset < len ? actual_length - offset : len;
5092 memcpy (myaddr, (char *) data + offset, copy_length);
5093 return copy_length;
5094}
723b724b
MF
5095#else
5096# define linux_read_loadmap NULL
5097#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 5098
1570b33e
L
5099static void
5100linux_process_qsupported (const char *query)
5101{
5102 if (the_low_target.process_qsupported != NULL)
5103 the_low_target.process_qsupported (query);
5104}
5105
219f2f23
PA
5106static int
5107linux_supports_tracepoints (void)
5108{
5109 if (*the_low_target.supports_tracepoints == NULL)
5110 return 0;
5111
5112 return (*the_low_target.supports_tracepoints) ();
5113}
5114
5115static CORE_ADDR
5116linux_read_pc (struct regcache *regcache)
5117{
5118 if (the_low_target.get_pc == NULL)
5119 return 0;
5120
5121 return (*the_low_target.get_pc) (regcache);
5122}
5123
5124static void
5125linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5126{
5127 gdb_assert (the_low_target.set_pc != NULL);
5128
5129 (*the_low_target.set_pc) (regcache, pc);
5130}
5131
8336d594
PA
5132static int
5133linux_thread_stopped (struct thread_info *thread)
5134{
5135 return get_thread_lwp (thread)->stopped;
5136}
5137
5138/* This exposes stop-all-threads functionality to other modules. */
5139
5140static void
7984d532 5141linux_pause_all (int freeze)
8336d594 5142{
7984d532
PA
5143 stop_all_lwps (freeze, NULL);
5144}
5145
5146/* This exposes unstop-all-threads functionality to other gdbserver
5147 modules. */
5148
5149static void
5150linux_unpause_all (int unfreeze)
5151{
5152 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
5153}
5154
90d74c30
PA
5155static int
5156linux_prepare_to_access_memory (void)
5157{
5158 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5159 running LWP. */
5160 if (non_stop)
5161 linux_pause_all (1);
5162 return 0;
5163}
5164
5165static void
0146f85b 5166linux_done_accessing_memory (void)
90d74c30
PA
5167{
5168 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5169 running LWP. */
5170 if (non_stop)
5171 linux_unpause_all (1);
5172}
5173
fa593d66
PA
5174static int
5175linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5176 CORE_ADDR collector,
5177 CORE_ADDR lockaddr,
5178 ULONGEST orig_size,
5179 CORE_ADDR *jump_entry,
405f8e94
SS
5180 CORE_ADDR *trampoline,
5181 ULONGEST *trampoline_size,
fa593d66
PA
5182 unsigned char *jjump_pad_insn,
5183 ULONGEST *jjump_pad_insn_size,
5184 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
5185 CORE_ADDR *adjusted_insn_addr_end,
5186 char *err)
fa593d66
PA
5187{
5188 return (*the_low_target.install_fast_tracepoint_jump_pad)
5189 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
5190 jump_entry, trampoline, trampoline_size,
5191 jjump_pad_insn, jjump_pad_insn_size,
5192 adjusted_insn_addr, adjusted_insn_addr_end,
5193 err);
fa593d66
PA
5194}
5195
6a271cae
PA
5196static struct emit_ops *
5197linux_emit_ops (void)
5198{
5199 if (the_low_target.emit_ops != NULL)
5200 return (*the_low_target.emit_ops) ();
5201 else
5202 return NULL;
5203}
5204
405f8e94
SS
5205static int
5206linux_get_min_fast_tracepoint_insn_len (void)
5207{
5208 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5209}
5210
2268b414
JK
5211/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5212
5213static int
5214get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5215 CORE_ADDR *phdr_memaddr, int *num_phdr)
5216{
5217 char filename[PATH_MAX];
5218 int fd;
5219 const int auxv_size = is_elf64
5220 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5221 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5222
5223 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5224
5225 fd = open (filename, O_RDONLY);
5226 if (fd < 0)
5227 return 1;
5228
5229 *phdr_memaddr = 0;
5230 *num_phdr = 0;
5231 while (read (fd, buf, auxv_size) == auxv_size
5232 && (*phdr_memaddr == 0 || *num_phdr == 0))
5233 {
5234 if (is_elf64)
5235 {
5236 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5237
5238 switch (aux->a_type)
5239 {
5240 case AT_PHDR:
5241 *phdr_memaddr = aux->a_un.a_val;
5242 break;
5243 case AT_PHNUM:
5244 *num_phdr = aux->a_un.a_val;
5245 break;
5246 }
5247 }
5248 else
5249 {
5250 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5251
5252 switch (aux->a_type)
5253 {
5254 case AT_PHDR:
5255 *phdr_memaddr = aux->a_un.a_val;
5256 break;
5257 case AT_PHNUM:
5258 *num_phdr = aux->a_un.a_val;
5259 break;
5260 }
5261 }
5262 }
5263
5264 close (fd);
5265
5266 if (*phdr_memaddr == 0 || *num_phdr == 0)
5267 {
5268 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5269 "phdr_memaddr = %ld, phdr_num = %d",
5270 (long) *phdr_memaddr, *num_phdr);
5271 return 2;
5272 }
5273
5274 return 0;
5275}
5276
5277/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5278
5279static CORE_ADDR
5280get_dynamic (const int pid, const int is_elf64)
5281{
5282 CORE_ADDR phdr_memaddr, relocation;
5283 int num_phdr, i;
5284 unsigned char *phdr_buf;
5285 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5286
5287 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5288 return 0;
5289
5290 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5291 phdr_buf = alloca (num_phdr * phdr_size);
5292
5293 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5294 return 0;
5295
5296 /* Compute relocation: it is expected to be 0 for "regular" executables,
5297 non-zero for PIE ones. */
5298 relocation = -1;
5299 for (i = 0; relocation == -1 && i < num_phdr; i++)
5300 if (is_elf64)
5301 {
5302 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5303
5304 if (p->p_type == PT_PHDR)
5305 relocation = phdr_memaddr - p->p_vaddr;
5306 }
5307 else
5308 {
5309 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5310
5311 if (p->p_type == PT_PHDR)
5312 relocation = phdr_memaddr - p->p_vaddr;
5313 }
5314
5315 if (relocation == -1)
5316 {
e237a7e2
JK
5317 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5318 any real world executables, including PIE executables, have always
5319 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5320 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5321 or present DT_DEBUG anyway (fpc binaries are statically linked).
5322
5323 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5324
5325 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5326
2268b414
JK
5327 return 0;
5328 }
5329
5330 for (i = 0; i < num_phdr; i++)
5331 {
5332 if (is_elf64)
5333 {
5334 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5335
5336 if (p->p_type == PT_DYNAMIC)
5337 return p->p_vaddr + relocation;
5338 }
5339 else
5340 {
5341 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5342
5343 if (p->p_type == PT_DYNAMIC)
5344 return p->p_vaddr + relocation;
5345 }
5346 }
5347
5348 return 0;
5349}
5350
5351/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
5352 can be 0 if the inferior does not yet have the library list initialized.
5353 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5354 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
5355
5356static CORE_ADDR
5357get_r_debug (const int pid, const int is_elf64)
5358{
5359 CORE_ADDR dynamic_memaddr;
5360 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5361 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 5362 CORE_ADDR map = -1;
2268b414
JK
5363
5364 dynamic_memaddr = get_dynamic (pid, is_elf64);
5365 if (dynamic_memaddr == 0)
367ba2c2 5366 return map;
2268b414
JK
5367
5368 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5369 {
5370 if (is_elf64)
5371 {
5372 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
75f62ce7 5373#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
5374 union
5375 {
5376 Elf64_Xword map;
5377 unsigned char buf[sizeof (Elf64_Xword)];
5378 }
5379 rld_map;
5380
5381 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5382 {
5383 if (linux_read_memory (dyn->d_un.d_val,
5384 rld_map.buf, sizeof (rld_map.buf)) == 0)
5385 return rld_map.map;
5386 else
5387 break;
5388 }
75f62ce7 5389#endif /* DT_MIPS_RLD_MAP */
2268b414 5390
367ba2c2
MR
5391 if (dyn->d_tag == DT_DEBUG && map == -1)
5392 map = dyn->d_un.d_val;
2268b414
JK
5393
5394 if (dyn->d_tag == DT_NULL)
5395 break;
5396 }
5397 else
5398 {
5399 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
75f62ce7 5400#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
5401 union
5402 {
5403 Elf32_Word map;
5404 unsigned char buf[sizeof (Elf32_Word)];
5405 }
5406 rld_map;
5407
5408 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5409 {
5410 if (linux_read_memory (dyn->d_un.d_val,
5411 rld_map.buf, sizeof (rld_map.buf)) == 0)
5412 return rld_map.map;
5413 else
5414 break;
5415 }
75f62ce7 5416#endif /* DT_MIPS_RLD_MAP */
2268b414 5417
367ba2c2
MR
5418 if (dyn->d_tag == DT_DEBUG && map == -1)
5419 map = dyn->d_un.d_val;
2268b414
JK
5420
5421 if (dyn->d_tag == DT_NULL)
5422 break;
5423 }
5424
5425 dynamic_memaddr += dyn_size;
5426 }
5427
367ba2c2 5428 return map;
2268b414
JK
5429}
5430
5431/* Read one pointer from MEMADDR in the inferior. */
5432
5433static int
5434read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5435{
485f1ee4
PA
5436 int ret;
5437
5438 /* Go through a union so this works on either big or little endian
5439 hosts, when the inferior's pointer size is smaller than the size
5440 of CORE_ADDR. It is assumed the inferior's endianness is the
5441 same of the superior's. */
5442 union
5443 {
5444 CORE_ADDR core_addr;
5445 unsigned int ui;
5446 unsigned char uc;
5447 } addr;
5448
5449 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5450 if (ret == 0)
5451 {
5452 if (ptr_size == sizeof (CORE_ADDR))
5453 *ptr = addr.core_addr;
5454 else if (ptr_size == sizeof (unsigned int))
5455 *ptr = addr.ui;
5456 else
5457 gdb_assert_not_reached ("unhandled pointer size");
5458 }
5459 return ret;
2268b414
JK
5460}
5461
5462struct link_map_offsets
5463 {
5464 /* Offset and size of r_debug.r_version. */
5465 int r_version_offset;
5466
5467 /* Offset and size of r_debug.r_map. */
5468 int r_map_offset;
5469
5470 /* Offset to l_addr field in struct link_map. */
5471 int l_addr_offset;
5472
5473 /* Offset to l_name field in struct link_map. */
5474 int l_name_offset;
5475
5476 /* Offset to l_ld field in struct link_map. */
5477 int l_ld_offset;
5478
5479 /* Offset to l_next field in struct link_map. */
5480 int l_next_offset;
5481
5482 /* Offset to l_prev field in struct link_map. */
5483 int l_prev_offset;
5484 };
5485
fb723180 5486/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
5487
5488static int
5489linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5490 unsigned const char *writebuf,
5491 CORE_ADDR offset, int len)
5492{
5493 char *document;
5494 unsigned document_len;
5495 struct process_info_private *const priv = current_process ()->private;
5496 char filename[PATH_MAX];
5497 int pid, is_elf64;
5498
5499 static const struct link_map_offsets lmo_32bit_offsets =
5500 {
5501 0, /* r_version offset. */
5502 4, /* r_debug.r_map offset. */
5503 0, /* l_addr offset in link_map. */
5504 4, /* l_name offset in link_map. */
5505 8, /* l_ld offset in link_map. */
5506 12, /* l_next offset in link_map. */
5507 16 /* l_prev offset in link_map. */
5508 };
5509
5510 static const struct link_map_offsets lmo_64bit_offsets =
5511 {
5512 0, /* r_version offset. */
5513 8, /* r_debug.r_map offset. */
5514 0, /* l_addr offset in link_map. */
5515 8, /* l_name offset in link_map. */
5516 16, /* l_ld offset in link_map. */
5517 24, /* l_next offset in link_map. */
5518 32 /* l_prev offset in link_map. */
5519 };
5520 const struct link_map_offsets *lmo;
214d508e 5521 unsigned int machine;
b1fbec62
GB
5522 int ptr_size;
5523 CORE_ADDR lm_addr = 0, lm_prev = 0;
5524 int allocated = 1024;
5525 char *p;
5526 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5527 int header_done = 0;
2268b414
JK
5528
5529 if (writebuf != NULL)
5530 return -2;
5531 if (readbuf == NULL)
5532 return -1;
5533
5534 pid = lwpid_of (get_thread_lwp (current_inferior));
5535 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 5536 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 5537 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 5538 ptr_size = is_elf64 ? 8 : 4;
2268b414 5539
b1fbec62
GB
5540 while (annex[0] != '\0')
5541 {
5542 const char *sep;
5543 CORE_ADDR *addrp;
5544 int len;
2268b414 5545
b1fbec62
GB
5546 sep = strchr (annex, '=');
5547 if (sep == NULL)
5548 break;
0c5bf5a9 5549
b1fbec62
GB
5550 len = sep - annex;
5551 if (len == 5 && strncmp (annex, "start", 5) == 0)
5552 addrp = &lm_addr;
5553 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5554 addrp = &lm_prev;
5555 else
5556 {
5557 annex = strchr (sep, ';');
5558 if (annex == NULL)
5559 break;
5560 annex++;
5561 continue;
5562 }
5563
5564 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 5565 }
b1fbec62
GB
5566
5567 if (lm_addr == 0)
2268b414 5568 {
b1fbec62
GB
5569 int r_version = 0;
5570
5571 if (priv->r_debug == 0)
5572 priv->r_debug = get_r_debug (pid, is_elf64);
5573
5574 /* We failed to find DT_DEBUG. Such situation will not change
5575 for this inferior - do not retry it. Report it to GDB as
5576 E01, see for the reasons at the GDB solib-svr4.c side. */
5577 if (priv->r_debug == (CORE_ADDR) -1)
5578 return -1;
5579
5580 if (priv->r_debug != 0)
2268b414 5581 {
b1fbec62
GB
5582 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5583 (unsigned char *) &r_version,
5584 sizeof (r_version)) != 0
5585 || r_version != 1)
5586 {
5587 warning ("unexpected r_debug version %d", r_version);
5588 }
5589 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5590 &lm_addr, ptr_size) != 0)
5591 {
5592 warning ("unable to read r_map from 0x%lx",
5593 (long) priv->r_debug + lmo->r_map_offset);
5594 }
2268b414 5595 }
b1fbec62 5596 }
2268b414 5597
b1fbec62
GB
5598 document = xmalloc (allocated);
5599 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5600 p = document + strlen (document);
5601
5602 while (lm_addr
5603 && read_one_ptr (lm_addr + lmo->l_name_offset,
5604 &l_name, ptr_size) == 0
5605 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5606 &l_addr, ptr_size) == 0
5607 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5608 &l_ld, ptr_size) == 0
5609 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5610 &l_prev, ptr_size) == 0
5611 && read_one_ptr (lm_addr + lmo->l_next_offset,
5612 &l_next, ptr_size) == 0)
5613 {
5614 unsigned char libname[PATH_MAX];
5615
5616 if (lm_prev != l_prev)
2268b414 5617 {
b1fbec62
GB
5618 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5619 (long) lm_prev, (long) l_prev);
5620 break;
2268b414
JK
5621 }
5622
d878444c
JK
5623 /* Ignore the first entry even if it has valid name as the first entry
5624 corresponds to the main executable. The first entry should not be
5625 skipped if the dynamic loader was loaded late by a static executable
5626 (see solib-svr4.c parameter ignore_first). But in such case the main
5627 executable does not have PT_DYNAMIC present and this function already
5628 exited above due to failed get_r_debug. */
5629 if (lm_prev == 0)
2268b414 5630 {
d878444c
JK
5631 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5632 p = p + strlen (p);
5633 }
5634 else
5635 {
5636 /* Not checking for error because reading may stop before
5637 we've got PATH_MAX worth of characters. */
5638 libname[0] = '\0';
5639 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5640 libname[sizeof (libname) - 1] = '\0';
5641 if (libname[0] != '\0')
2268b414 5642 {
d878444c
JK
5643 /* 6x the size for xml_escape_text below. */
5644 size_t len = 6 * strlen ((char *) libname);
5645 char *name;
2268b414 5646
d878444c
JK
5647 if (!header_done)
5648 {
5649 /* Terminate `<library-list-svr4'. */
5650 *p++ = '>';
5651 header_done = 1;
5652 }
2268b414 5653
d878444c
JK
5654 while (allocated < p - document + len + 200)
5655 {
5656 /* Expand to guarantee sufficient storage. */
5657 uintptr_t document_len = p - document;
2268b414 5658
d878444c
JK
5659 document = xrealloc (document, 2 * allocated);
5660 allocated *= 2;
5661 p = document + document_len;
5662 }
5663
5664 name = xml_escape_text ((char *) libname);
5665 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5666 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5667 name, (unsigned long) lm_addr,
5668 (unsigned long) l_addr, (unsigned long) l_ld);
5669 free (name);
5670 }
0afae3cf 5671 }
b1fbec62
GB
5672
5673 lm_prev = lm_addr;
5674 lm_addr = l_next;
2268b414
JK
5675 }
5676
b1fbec62
GB
5677 if (!header_done)
5678 {
5679 /* Empty list; terminate `<library-list-svr4'. */
5680 strcpy (p, "/>");
5681 }
5682 else
5683 strcpy (p, "</library-list-svr4>");
5684
2268b414
JK
5685 document_len = strlen (document);
5686 if (offset < document_len)
5687 document_len -= offset;
5688 else
5689 document_len = 0;
5690 if (len > document_len)
5691 len = document_len;
5692
5693 memcpy (readbuf, document + offset, len);
5694 xfree (document);
5695
5696 return len;
5697}
5698
9accd112
MM
5699#ifdef HAVE_LINUX_BTRACE
5700
5701/* Enable branch tracing. */
5702
5703static struct btrace_target_info *
5704linux_low_enable_btrace (ptid_t ptid)
5705{
5706 struct btrace_target_info *tinfo;
5707
5708 tinfo = linux_enable_btrace (ptid);
3aee8918 5709
9accd112 5710 if (tinfo != NULL)
3aee8918
PA
5711 {
5712 struct thread_info *thread = find_thread_ptid (ptid);
5713 struct regcache *regcache = get_thread_regcache (thread, 0);
5714
5715 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5716 }
9accd112
MM
5717
5718 return tinfo;
5719}
5720
5721/* Read branch trace data as btrace xml document. */
5722
5723static void
5724linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5725 int type)
5726{
5727 VEC (btrace_block_s) *btrace;
5728 struct btrace_block *block;
5729 int i;
5730
5731 btrace = linux_read_btrace (tinfo, type);
5732
5733 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5734 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5735
5736 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
5737 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5738 paddress (block->begin), paddress (block->end));
5739
5740 buffer_grow_str (buffer, "</btrace>\n");
5741
5742 VEC_free (btrace_block_s, btrace);
5743}
5744#endif /* HAVE_LINUX_BTRACE */
5745
ce3a066d
DJ
5746static struct target_ops linux_target_ops = {
5747 linux_create_inferior,
5748 linux_attach,
5749 linux_kill,
6ad8ae5c 5750 linux_detach,
8336d594 5751 linux_mourn,
444d6139 5752 linux_join,
ce3a066d
DJ
5753 linux_thread_alive,
5754 linux_resume,
5755 linux_wait,
5756 linux_fetch_registers,
5757 linux_store_registers,
90d74c30 5758 linux_prepare_to_access_memory,
0146f85b 5759 linux_done_accessing_memory,
ce3a066d
DJ
5760 linux_read_memory,
5761 linux_write_memory,
2f2893d9 5762 linux_look_up_symbols,
ef57601b 5763 linux_request_interrupt,
aa691b87 5764 linux_read_auxv,
d993e290
PA
5765 linux_insert_point,
5766 linux_remove_point,
e013ee27
OF
5767 linux_stopped_by_watchpoint,
5768 linux_stopped_data_address,
db0dfaa0
LM
5769#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5770 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5771 && defined(PT_TEXT_END_ADDR)
52fb6437 5772 linux_read_offsets,
dae5f5cf
DJ
5773#else
5774 NULL,
5775#endif
5776#ifdef USE_THREAD_DB
5777 thread_db_get_tls_address,
5778#else
5779 NULL,
52fb6437 5780#endif
efcbbd14 5781 linux_qxfer_spu,
59a016f0 5782 hostio_last_error_from_errno,
07e059b5 5783 linux_qxfer_osdata,
4aa995e1 5784 linux_xfer_siginfo,
bd99dc85
PA
5785 linux_supports_non_stop,
5786 linux_async,
5787 linux_start_non_stop,
cdbfd419
PP
5788 linux_supports_multi_process,
5789#ifdef USE_THREAD_DB
dc146f7c 5790 thread_db_handle_monitor_command,
cdbfd419 5791#else
dc146f7c 5792 NULL,
cdbfd419 5793#endif
d26e3629 5794 linux_common_core_of_thread,
78d85199 5795 linux_read_loadmap,
219f2f23
PA
5796 linux_process_qsupported,
5797 linux_supports_tracepoints,
5798 linux_read_pc,
8336d594
PA
5799 linux_write_pc,
5800 linux_thread_stopped,
7984d532 5801 NULL,
711e434b 5802 linux_pause_all,
7984d532 5803 linux_unpause_all,
fa593d66
PA
5804 linux_cancel_breakpoints,
5805 linux_stabilize_threads,
6a271cae 5806 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
5807 linux_emit_ops,
5808 linux_supports_disable_randomization,
405f8e94 5809 linux_get_min_fast_tracepoint_insn_len,
2268b414 5810 linux_qxfer_libraries_svr4,
d1feda86 5811 linux_supports_agent,
9accd112
MM
5812#ifdef HAVE_LINUX_BTRACE
5813 linux_supports_btrace,
5814 linux_low_enable_btrace,
5815 linux_disable_btrace,
5816 linux_low_read_btrace,
5817#else
5818 NULL,
5819 NULL,
5820 NULL,
5821 NULL,
9accd112 5822#endif
c2d6af84 5823 linux_supports_range_stepping,
ce3a066d
DJ
5824};
5825
0d62e5e8
DJ
5826static void
5827linux_init_signals ()
5828{
5829 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5830 to find what the cancel signal actually is. */
1a981360 5831#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 5832 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 5833#endif
0d62e5e8
DJ
5834}
5835
3aee8918
PA
5836#ifdef HAVE_LINUX_REGSETS
5837void
5838initialize_regsets_info (struct regsets_info *info)
5839{
5840 for (info->num_regsets = 0;
5841 info->regsets[info->num_regsets].size >= 0;
5842 info->num_regsets++)
5843 ;
3aee8918
PA
5844}
5845#endif
5846
da6d8c04
DJ
5847void
5848initialize_low (void)
5849{
bd99dc85
PA
5850 struct sigaction sigchld_action;
5851 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 5852 set_target_ops (&linux_target_ops);
611cb4a5
DJ
5853 set_breakpoint_data (the_low_target.breakpoint,
5854 the_low_target.breakpoint_len);
0d62e5e8 5855 linux_init_signals ();
aa7c7447 5856 linux_ptrace_init_warnings ();
bd99dc85
PA
5857
5858 sigchld_action.sa_handler = sigchld_handler;
5859 sigemptyset (&sigchld_action.sa_mask);
5860 sigchld_action.sa_flags = SA_RESTART;
5861 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
5862
5863 initialize_low_arch ();
da6d8c04 5864}