]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
daily update
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
ecd75fc8 2 Copyright (C) 1995-2014 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
58b4daa5 22#include "agent.h"
da6d8c04 23
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
8bdce1ff 26#include "gdb_wait.h"
da6d8c04 27#include <sys/ptrace.h>
125f8a3d
GB
28#include "nat/linux-ptrace.h"
29#include "nat/linux-procfs.h"
da6d8c04
DJ
30#include <signal.h>
31#include <sys/ioctl.h>
32#include <fcntl.h>
0a30fbc4 33#include <unistd.h>
fd500816 34#include <sys/syscall.h>
f9387fc3 35#include <sched.h>
07e059b5
VP
36#include <ctype.h>
37#include <pwd.h>
38#include <sys/types.h>
39#include <dirent.h>
53ce3c39 40#include <sys/stat.h>
efcbbd14 41#include <sys/vfs.h>
1570b33e 42#include <sys/uio.h>
602e3198 43#include "filestuff.h"
c144c7a0 44#include "tracepoint.h"
533b0600 45#include "hostio.h"
957f3f49
DE
46#ifndef ELFMAG0
47/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51#include <elf.h>
52#endif
efcbbd14
UW
53
54#ifndef SPUFS_MAGIC
55#define SPUFS_MAGIC 0x23c9b64e
56#endif
da6d8c04 57
03583c20
UW
58#ifdef HAVE_PERSONALITY
59# include <sys/personality.h>
60# if !HAVE_DECL_ADDR_NO_RANDOMIZE
61# define ADDR_NO_RANDOMIZE 0x0040000
62# endif
63#endif
64
fd462a61
DJ
65#ifndef O_LARGEFILE
66#define O_LARGEFILE 0
67#endif
68
ec8ebe72
DE
69#ifndef W_STOPCODE
70#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71#endif
72
1a981360
PA
73/* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75#ifndef __SIGRTMIN
76#define __SIGRTMIN 32
77#endif
78
db0dfaa0
LM
79/* Some targets did not define these ptrace constants from the start,
80 so gdbserver defines them locally here. In the future, these may
81 be removed after they are added to asm/ptrace.h. */
82#if !(defined(PT_TEXT_ADDR) \
83 || defined(PT_DATA_ADDR) \
84 || defined(PT_TEXT_END_ADDR))
85#if defined(__mcoldfire__)
86/* These are still undefined in 3.10 kernels. */
87#define PT_TEXT_ADDR 49*4
88#define PT_DATA_ADDR 50*4
89#define PT_TEXT_END_ADDR 51*4
90/* BFIN already defines these since at least 2.6.32 kernels. */
91#elif defined(BFIN)
92#define PT_TEXT_ADDR 220
93#define PT_TEXT_END_ADDR 224
94#define PT_DATA_ADDR 228
95/* These are still undefined in 3.10 kernels. */
96#elif defined(__TMS320C6X__)
97#define PT_TEXT_ADDR (0x10000*4)
98#define PT_DATA_ADDR (0x10004*4)
99#define PT_TEXT_END_ADDR (0x10008*4)
100#endif
101#endif
102
9accd112 103#ifdef HAVE_LINUX_BTRACE
125f8a3d 104# include "nat/linux-btrace.h"
9accd112
MM
105#endif
106
8365dcf5
TJB
107#ifndef HAVE_ELF32_AUXV_T
108/* Copied from glibc's elf.h. */
109typedef struct
110{
111 uint32_t a_type; /* Entry type */
112 union
113 {
114 uint32_t a_val; /* Integer value */
115 /* We use to have pointer elements added here. We cannot do that,
116 though, since it does not work when using 32-bit definitions
117 on 64-bit platforms and vice versa. */
118 } a_un;
119} Elf32_auxv_t;
120#endif
121
122#ifndef HAVE_ELF64_AUXV_T
123/* Copied from glibc's elf.h. */
124typedef struct
125{
126 uint64_t a_type; /* Entry type */
127 union
128 {
129 uint64_t a_val; /* Integer value */
130 /* We use to have pointer elements added here. We cannot do that,
131 though, since it does not work when using 32-bit definitions
132 on 64-bit platforms and vice versa. */
133 } a_un;
134} Elf64_auxv_t;
135#endif
136
05044653
PA
137/* A list of all unknown processes which receive stop signals. Some
138 other process will presumably claim each of these as forked
139 children momentarily. */
24a09b5f 140
05044653
PA
141struct simple_pid_list
142{
143 /* The process ID. */
144 int pid;
145
146 /* The status as reported by waitpid. */
147 int status;
148
149 /* Next in chain. */
150 struct simple_pid_list *next;
151};
152struct simple_pid_list *stopped_pids;
153
154/* Trivial list manipulation functions to keep track of a list of new
155 stopped processes. */
156
157static void
158add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
159{
160 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
161
162 new_pid->pid = pid;
163 new_pid->status = status;
164 new_pid->next = *listp;
165 *listp = new_pid;
166}
167
168static int
169pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
170{
171 struct simple_pid_list **p;
172
173 for (p = listp; *p != NULL; p = &(*p)->next)
174 if ((*p)->pid == pid)
175 {
176 struct simple_pid_list *next = (*p)->next;
177
178 *statusp = (*p)->status;
179 xfree (*p);
180 *p = next;
181 return 1;
182 }
183 return 0;
184}
24a09b5f 185
bde24c0a
PA
186enum stopping_threads_kind
187 {
188 /* Not stopping threads presently. */
189 NOT_STOPPING_THREADS,
190
191 /* Stopping threads. */
192 STOPPING_THREADS,
193
194 /* Stopping and suspending threads. */
195 STOPPING_AND_SUSPENDING_THREADS
196 };
197
198/* This is set while stop_all_lwps is in effect. */
199enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
200
201/* FIXME make into a target method? */
24a09b5f 202int using_threads = 1;
24a09b5f 203
fa593d66
PA
204/* True if we're presently stabilizing threads (moving them out of
205 jump pads). */
206static int stabilizing_threads;
207
2acc282a 208static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 209 int step, int signal, siginfo_t *info);
2bd7c093 210static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
211static void stop_all_lwps (int suspend, struct lwp_info *except);
212static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
fa96cb38
PA
213static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
214 int *wstat, int options);
95954743 215static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
b3312d80 216static struct lwp_info *add_lwp (ptid_t ptid);
c35fafde 217static int linux_stopped_by_watchpoint (void);
95954743 218static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
d50171e4 219static void proceed_all_lwps (void);
d50171e4
PA
220static int finish_step_over (struct lwp_info *lwp);
221static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
222static int kill_lwp (unsigned long lwpid, int signo);
223
224/* True if the low target can hardware single-step. Such targets
225 don't need a BREAKPOINT_REINSERT_ADDR callback. */
226
227static int
228can_hardware_single_step (void)
229{
230 return (the_low_target.breakpoint_reinsert_addr == NULL);
231}
232
233/* True if the low target supports memory breakpoints. If so, we'll
234 have a GET_PC implementation. */
235
236static int
237supports_breakpoints (void)
238{
239 return (the_low_target.get_pc != NULL);
240}
0d62e5e8 241
fa593d66
PA
242/* Returns true if this target can support fast tracepoints. This
243 does not mean that the in-process agent has been loaded in the
244 inferior. */
245
246static int
247supports_fast_tracepoints (void)
248{
249 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
250}
251
c2d6af84
PA
252/* True if LWP is stopped in its stepping range. */
253
254static int
255lwp_in_step_range (struct lwp_info *lwp)
256{
257 CORE_ADDR pc = lwp->stop_pc;
258
259 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
260}
261
0d62e5e8
DJ
262struct pending_signals
263{
264 int signal;
32ca6d61 265 siginfo_t info;
0d62e5e8
DJ
266 struct pending_signals *prev;
267};
611cb4a5 268
bd99dc85
PA
269/* The read/write ends of the pipe registered as waitable file in the
270 event loop. */
271static int linux_event_pipe[2] = { -1, -1 };
272
273/* True if we're currently in async mode. */
274#define target_is_async_p() (linux_event_pipe[0] != -1)
275
02fc4de7 276static void send_sigstop (struct lwp_info *lwp);
fa96cb38 277static void wait_for_sigstop (void);
bd99dc85 278
d0722149
DE
279/* Return non-zero if HEADER is a 64-bit ELF file. */
280
281static int
214d508e 282elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 283{
214d508e
L
284 if (header->e_ident[EI_MAG0] == ELFMAG0
285 && header->e_ident[EI_MAG1] == ELFMAG1
286 && header->e_ident[EI_MAG2] == ELFMAG2
287 && header->e_ident[EI_MAG3] == ELFMAG3)
288 {
289 *machine = header->e_machine;
290 return header->e_ident[EI_CLASS] == ELFCLASS64;
291
292 }
293 *machine = EM_NONE;
294 return -1;
d0722149
DE
295}
296
297/* Return non-zero if FILE is a 64-bit ELF file,
298 zero if the file is not a 64-bit ELF file,
299 and -1 if the file is not accessible or doesn't exist. */
300
be07f1a2 301static int
214d508e 302elf_64_file_p (const char *file, unsigned int *machine)
d0722149 303{
957f3f49 304 Elf64_Ehdr header;
d0722149
DE
305 int fd;
306
307 fd = open (file, O_RDONLY);
308 if (fd < 0)
309 return -1;
310
311 if (read (fd, &header, sizeof (header)) != sizeof (header))
312 {
313 close (fd);
314 return 0;
315 }
316 close (fd);
317
214d508e 318 return elf_64_header_p (&header, machine);
d0722149
DE
319}
320
be07f1a2
PA
321/* Accepts an integer PID; Returns true if the executable PID is
322 running is a 64-bit ELF file.. */
323
324int
214d508e 325linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 326{
d8d2a3ee 327 char file[PATH_MAX];
be07f1a2
PA
328
329 sprintf (file, "/proc/%d/exe", pid);
214d508e 330 return elf_64_file_p (file, machine);
be07f1a2
PA
331}
332
bd99dc85
PA
333static void
334delete_lwp (struct lwp_info *lwp)
335{
fa96cb38
PA
336 struct thread_info *thr = get_lwp_thread (lwp);
337
338 if (debug_threads)
339 debug_printf ("deleting %ld\n", lwpid_of (thr));
340
341 remove_thread (thr);
aa5ca48f 342 free (lwp->arch_private);
bd99dc85
PA
343 free (lwp);
344}
345
95954743
PA
346/* Add a process to the common process list, and set its private
347 data. */
348
349static struct process_info *
350linux_add_process (int pid, int attached)
351{
352 struct process_info *proc;
353
95954743
PA
354 proc = add_process (pid, attached);
355 proc->private = xcalloc (1, sizeof (*proc->private));
356
3aee8918
PA
357 /* Set the arch when the first LWP stops. */
358 proc->private->new_inferior = 1;
359
aa5ca48f
DE
360 if (the_low_target.new_process != NULL)
361 proc->private->arch_private = the_low_target.new_process ();
362
95954743
PA
363 return proc;
364}
365
bd99dc85
PA
366/* Handle a GNU/Linux extended wait response. If we see a clone
367 event, we need to add the new LWP to our list (and not report the
368 trap to higher layers). */
0d62e5e8 369
24a09b5f 370static void
54a0b537 371handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f 372{
89a5711c 373 int event = linux_ptrace_get_extended_event (wstat);
d86d4aaf 374 struct thread_info *event_thr = get_lwp_thread (event_child);
54a0b537 375 struct lwp_info *new_lwp;
24a09b5f
DJ
376
377 if (event == PTRACE_EVENT_CLONE)
378 {
95954743 379 ptid_t ptid;
24a09b5f 380 unsigned long new_pid;
05044653 381 int ret, status;
24a09b5f 382
d86d4aaf 383 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 384 &new_pid);
24a09b5f
DJ
385
386 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 387 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
388 {
389 /* The new child has a pending SIGSTOP. We can't affect it until it
390 hits the SIGSTOP, but we're already attached. */
391
97438e3f 392 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
393
394 if (ret == -1)
395 perror_with_name ("waiting for new child");
396 else if (ret != new_pid)
397 warning ("wait returned unexpected PID %d", ret);
da5898ce 398 else if (!WIFSTOPPED (status))
24a09b5f
DJ
399 warning ("wait returned unexpected status 0x%x", status);
400 }
401
fa96cb38
PA
402 if (debug_threads)
403 debug_printf ("HEW: Got clone event "
404 "from LWP %ld, new child is LWP %ld\n",
405 lwpid_of (event_thr), new_pid);
406
d86d4aaf 407 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
b3312d80 408 new_lwp = add_lwp (ptid);
24a09b5f 409
e27d73f6
DE
410 /* Either we're going to immediately resume the new thread
411 or leave it stopped. linux_resume_one_lwp is a nop if it
412 thinks the thread is currently running, so set this first
413 before calling linux_resume_one_lwp. */
414 new_lwp->stopped = 1;
415
bde24c0a
PA
416 /* If we're suspending all threads, leave this one suspended
417 too. */
418 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
419 new_lwp->suspended = 1;
420
da5898ce
DJ
421 /* Normally we will get the pending SIGSTOP. But in some cases
422 we might get another signal delivered to the group first.
f21cc1a2 423 If we do get another signal, be sure not to lose it. */
da5898ce
DJ
424 if (WSTOPSIG (status) == SIGSTOP)
425 {
bde24c0a 426 if (stopping_threads != NOT_STOPPING_THREADS)
d50171e4
PA
427 new_lwp->stop_pc = get_stop_pc (new_lwp);
428 else
e27d73f6 429 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
da5898ce 430 }
24a09b5f 431 else
da5898ce 432 {
54a0b537 433 new_lwp->stop_expected = 1;
d50171e4 434
bde24c0a 435 if (stopping_threads != NOT_STOPPING_THREADS)
da5898ce 436 {
d50171e4 437 new_lwp->stop_pc = get_stop_pc (new_lwp);
54a0b537
PA
438 new_lwp->status_pending_p = 1;
439 new_lwp->status_pending = status;
da5898ce
DJ
440 }
441 else
442 /* Pass the signal on. This is what GDB does - except
443 shouldn't we really report it instead? */
e27d73f6 444 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
da5898ce 445 }
24a09b5f
DJ
446
447 /* Always resume the current thread. If we are stopping
448 threads, it will have a pending SIGSTOP; we may as well
449 collect it now. */
2acc282a 450 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
24a09b5f
DJ
451 }
452}
453
d50171e4
PA
454/* Return the PC as read from the regcache of LWP, without any
455 adjustment. */
456
457static CORE_ADDR
458get_pc (struct lwp_info *lwp)
459{
0bfdf32f 460 struct thread_info *saved_thread;
d50171e4
PA
461 struct regcache *regcache;
462 CORE_ADDR pc;
463
464 if (the_low_target.get_pc == NULL)
465 return 0;
466
0bfdf32f
GB
467 saved_thread = current_thread;
468 current_thread = get_lwp_thread (lwp);
d50171e4 469
0bfdf32f 470 regcache = get_thread_regcache (current_thread, 1);
d50171e4
PA
471 pc = (*the_low_target.get_pc) (regcache);
472
473 if (debug_threads)
87ce2a04 474 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 475
0bfdf32f 476 current_thread = saved_thread;
d50171e4
PA
477 return pc;
478}
479
480/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
481 The SIGTRAP could mean several things.
482
483 On i386, where decr_pc_after_break is non-zero:
484 If we were single-stepping this process using PTRACE_SINGLESTEP,
485 we will get only the one SIGTRAP (even if the instruction we
486 stepped over was a breakpoint). The value of $eip will be the
487 next instruction.
488 If we continue the process using PTRACE_CONT, we will get a
489 SIGTRAP when we hit a breakpoint. The value of $eip will be
490 the instruction after the breakpoint (i.e. needs to be
491 decremented). If we report the SIGTRAP to GDB, we must also
492 report the undecremented PC. If we cancel the SIGTRAP, we
493 must resume at the decremented PC.
494
495 (Presumably, not yet tested) On a non-decr_pc_after_break machine
496 with hardware or kernel single-step:
497 If we single-step over a breakpoint instruction, our PC will
498 point at the following instruction. If we continue and hit a
499 breakpoint instruction, our PC will point at the breakpoint
500 instruction. */
501
502static CORE_ADDR
d50171e4 503get_stop_pc (struct lwp_info *lwp)
0d62e5e8 504{
d50171e4
PA
505 CORE_ADDR stop_pc;
506
507 if (the_low_target.get_pc == NULL)
508 return 0;
0d62e5e8 509
d50171e4
PA
510 stop_pc = get_pc (lwp);
511
bdabb078
PA
512 if (WSTOPSIG (lwp->last_status) == SIGTRAP
513 && !lwp->stepping
514 && !lwp->stopped_by_watchpoint
89a5711c 515 && !linux_is_extended_waitstatus (lwp->last_status))
47c0c975
DE
516 stop_pc -= the_low_target.decr_pc_after_break;
517
518 if (debug_threads)
87ce2a04 519 debug_printf ("stop pc is 0x%lx\n", (long) stop_pc);
47c0c975
DE
520
521 return stop_pc;
0d62e5e8 522}
ce3a066d 523
b3312d80 524static struct lwp_info *
95954743 525add_lwp (ptid_t ptid)
611cb4a5 526{
54a0b537 527 struct lwp_info *lwp;
0d62e5e8 528
54a0b537
PA
529 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
530 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 531
aa5ca48f
DE
532 if (the_low_target.new_thread != NULL)
533 lwp->arch_private = the_low_target.new_thread ();
534
f7667f0d 535 lwp->thread = add_thread (ptid, lwp);
0d62e5e8 536
54a0b537 537 return lwp;
0d62e5e8 538}
611cb4a5 539
da6d8c04
DJ
540/* Start an inferior process and returns its pid.
541 ALLARGS is a vector of program-name and args. */
542
ce3a066d
DJ
543static int
544linux_create_inferior (char *program, char **allargs)
da6d8c04 545{
03583c20
UW
546#ifdef HAVE_PERSONALITY
547 int personality_orig = 0, personality_set = 0;
548#endif
a6dbe5df 549 struct lwp_info *new_lwp;
da6d8c04 550 int pid;
95954743 551 ptid_t ptid;
da6d8c04 552
03583c20
UW
553#ifdef HAVE_PERSONALITY
554 if (disable_randomization)
555 {
556 errno = 0;
557 personality_orig = personality (0xffffffff);
558 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
559 {
560 personality_set = 1;
561 personality (personality_orig | ADDR_NO_RANDOMIZE);
562 }
563 if (errno != 0 || (personality_set
564 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
565 warning ("Error disabling address space randomization: %s",
566 strerror (errno));
567 }
568#endif
569
42c81e2a 570#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
571 pid = vfork ();
572#else
da6d8c04 573 pid = fork ();
52fb6437 574#endif
da6d8c04
DJ
575 if (pid < 0)
576 perror_with_name ("fork");
577
578 if (pid == 0)
579 {
602e3198 580 close_most_fds ();
b8e1b30e 581 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da6d8c04 582
1a981360 583#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 584 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 585#endif
0d62e5e8 586
a9fa9f7d
DJ
587 setpgid (0, 0);
588
e0f9f062
DE
589 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
590 stdout to stderr so that inferior i/o doesn't corrupt the connection.
591 Also, redirect stdin to /dev/null. */
592 if (remote_connection_is_stdio ())
593 {
594 close (0);
595 open ("/dev/null", O_RDONLY);
596 dup2 (2, 1);
3e52c33d
JK
597 if (write (2, "stdin/stdout redirected\n",
598 sizeof ("stdin/stdout redirected\n") - 1) < 0)
8c29b58e
YQ
599 {
600 /* Errors ignored. */;
601 }
e0f9f062
DE
602 }
603
2b876972
DJ
604 execv (program, allargs);
605 if (errno == ENOENT)
606 execvp (program, allargs);
da6d8c04
DJ
607
608 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 609 strerror (errno));
da6d8c04
DJ
610 fflush (stderr);
611 _exit (0177);
612 }
613
03583c20
UW
614#ifdef HAVE_PERSONALITY
615 if (personality_set)
616 {
617 errno = 0;
618 personality (personality_orig);
619 if (errno != 0)
620 warning ("Error restoring address space randomization: %s",
621 strerror (errno));
622 }
623#endif
624
95954743
PA
625 linux_add_process (pid, 0);
626
627 ptid = ptid_build (pid, pid, 0);
628 new_lwp = add_lwp (ptid);
a6dbe5df 629 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 630
a9fa9f7d 631 return pid;
da6d8c04
DJ
632}
633
7ae1a6a6
PA
634char *
635linux_attach_fail_reason_string (ptid_t ptid, int err)
636{
637 static char *reason_string;
638 struct buffer buffer;
639 char *warnings;
640 long lwpid = ptid_get_lwp (ptid);
641
642 xfree (reason_string);
643
644 buffer_init (&buffer);
645 linux_ptrace_attach_fail_reason (lwpid, &buffer);
646 buffer_grow_str0 (&buffer, "");
647 warnings = buffer_finish (&buffer);
648 if (warnings[0] != '\0')
649 reason_string = xstrprintf ("%s (%d), %s",
650 strerror (err), err, warnings);
651 else
652 reason_string = xstrprintf ("%s (%d)",
653 strerror (err), err);
654 xfree (warnings);
655 return reason_string;
656}
657
da6d8c04
DJ
658/* Attach to an inferior process. */
659
7ae1a6a6
PA
660int
661linux_attach_lwp (ptid_t ptid)
da6d8c04 662{
54a0b537 663 struct lwp_info *new_lwp;
7ae1a6a6 664 int lwpid = ptid_get_lwp (ptid);
611cb4a5 665
b8e1b30e 666 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 667 != 0)
7ae1a6a6 668 return errno;
24a09b5f 669
b3312d80 670 new_lwp = add_lwp (ptid);
0d62e5e8 671
a6dbe5df
PA
672 /* We need to wait for SIGSTOP before being able to make the next
673 ptrace call on this LWP. */
674 new_lwp->must_set_ptrace_flags = 1;
675
644cebc9 676 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
677 {
678 if (debug_threads)
87ce2a04 679 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
680
681 /* The process is definitely stopped. It is in a job control
682 stop, unless the kernel predates the TASK_STOPPED /
683 TASK_TRACED distinction, in which case it might be in a
684 ptrace stop. Make sure it is in a ptrace stop; from there we
685 can kill it, signal it, et cetera.
686
687 First make sure there is a pending SIGSTOP. Since we are
688 already attached, the process can not transition from stopped
689 to running without a PTRACE_CONT; so we know this signal will
690 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
691 probably already in the queue (unless this kernel is old
692 enough to use TASK_STOPPED for ptrace stops); but since
693 SIGSTOP is not an RT signal, it can only be queued once. */
694 kill_lwp (lwpid, SIGSTOP);
695
696 /* Finally, resume the stopped process. This will deliver the
697 SIGSTOP (or a higher priority signal, just like normal
698 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 699 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
700 }
701
0d62e5e8 702 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
703 brings it to a halt.
704
705 There are several cases to consider here:
706
707 1) gdbserver has already attached to the process and is being notified
1b3f6016 708 of a new thread that is being created.
d50171e4
PA
709 In this case we should ignore that SIGSTOP and resume the
710 process. This is handled below by setting stop_expected = 1,
8336d594 711 and the fact that add_thread sets last_resume_kind ==
d50171e4 712 resume_continue.
0e21c1ec
DE
713
714 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
715 to it via attach_inferior.
716 In this case we want the process thread to stop.
d50171e4
PA
717 This is handled by having linux_attach set last_resume_kind ==
718 resume_stop after we return.
e3deef73
LM
719
720 If the pid we are attaching to is also the tgid, we attach to and
721 stop all the existing threads. Otherwise, we attach to pid and
722 ignore any other threads in the same group as this pid.
0e21c1ec
DE
723
724 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
725 existing threads.
726 In this case we want the thread to stop.
727 FIXME: This case is currently not properly handled.
728 We should wait for the SIGSTOP but don't. Things work apparently
729 because enough time passes between when we ptrace (ATTACH) and when
730 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
731
732 On the other hand, if we are currently trying to stop all threads, we
733 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 734 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
735 end of the list, and so the new thread has not yet reached
736 wait_for_sigstop (but will). */
d50171e4 737 new_lwp->stop_expected = 1;
0d62e5e8 738
7ae1a6a6 739 return 0;
95954743
PA
740}
741
e3deef73
LM
742/* Attach to PID. If PID is the tgid, attach to it and all
743 of its threads. */
744
c52daf70 745static int
a1928bad 746linux_attach (unsigned long pid)
0d62e5e8 747{
7ae1a6a6
PA
748 ptid_t ptid = ptid_build (pid, pid, 0);
749 int err;
750
e3deef73
LM
751 /* Attach to PID. We will check for other threads
752 soon. */
7ae1a6a6
PA
753 err = linux_attach_lwp (ptid);
754 if (err != 0)
755 error ("Cannot attach to process %ld: %s",
756 pid, linux_attach_fail_reason_string (ptid, err));
757
95954743 758 linux_add_process (pid, 1);
0d62e5e8 759
bd99dc85
PA
760 if (!non_stop)
761 {
8336d594
PA
762 struct thread_info *thread;
763
764 /* Don't ignore the initial SIGSTOP if we just attached to this
765 process. It will be collected by wait shortly. */
766 thread = find_thread_ptid (ptid_build (pid, pid, 0));
767 thread->last_resume_kind = resume_stop;
bd99dc85 768 }
0d62e5e8 769
e3deef73
LM
770 if (linux_proc_get_tgid (pid) == pid)
771 {
772 DIR *dir;
773 char pathname[128];
774
775 sprintf (pathname, "/proc/%ld/task", pid);
776
777 dir = opendir (pathname);
778
779 if (!dir)
780 {
781 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
782 fflush (stderr);
783 }
784 else
785 {
786 /* At this point we attached to the tgid. Scan the task for
787 existing threads. */
e3deef73
LM
788 int new_threads_found;
789 int iterations = 0;
e3deef73
LM
790
791 while (iterations < 2)
792 {
7ae1a6a6
PA
793 struct dirent *dp;
794
e3deef73
LM
795 new_threads_found = 0;
796 /* Add all the other threads. While we go through the
797 threads, new threads may be spawned. Cycle through
798 the list of threads until we have done two iterations without
799 finding new threads. */
800 while ((dp = readdir (dir)) != NULL)
801 {
7ae1a6a6
PA
802 unsigned long lwp;
803 ptid_t ptid;
804
e3deef73
LM
805 /* Fetch one lwp. */
806 lwp = strtoul (dp->d_name, NULL, 10);
807
7ae1a6a6
PA
808 ptid = ptid_build (pid, lwp, 0);
809
e3deef73 810 /* Is this a new thread? */
7ae1a6a6 811 if (lwp != 0 && find_thread_ptid (ptid) == NULL)
e3deef73 812 {
7ae1a6a6 813 int err;
e3deef73
LM
814
815 if (debug_threads)
7ae1a6a6
PA
816 debug_printf ("Found new lwp %ld\n", lwp);
817
818 err = linux_attach_lwp (ptid);
819 if (err != 0)
820 warning ("Cannot attach to lwp %ld: %s",
821 lwp,
822 linux_attach_fail_reason_string (ptid, err));
823
824 new_threads_found++;
e3deef73
LM
825 }
826 }
827
828 if (!new_threads_found)
829 iterations++;
830 else
831 iterations = 0;
832
833 rewinddir (dir);
834 }
835 closedir (dir);
836 }
837 }
838
95954743
PA
839 return 0;
840}
841
842struct counter
843{
844 int pid;
845 int count;
846};
847
848static int
849second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
850{
851 struct counter *counter = args;
852
853 if (ptid_get_pid (entry->id) == counter->pid)
854 {
855 if (++counter->count > 1)
856 return 1;
857 }
d61ddec4 858
da6d8c04
DJ
859 return 0;
860}
861
95954743 862static int
fa96cb38 863last_thread_of_process_p (int pid)
95954743 864{
95954743 865 struct counter counter = { pid , 0 };
da6d8c04 866
95954743
PA
867 return (find_inferior (&all_threads,
868 second_thread_of_pid_p, &counter) == NULL);
869}
870
da84f473
PA
871/* Kill LWP. */
872
873static void
874linux_kill_one_lwp (struct lwp_info *lwp)
875{
d86d4aaf
DE
876 struct thread_info *thr = get_lwp_thread (lwp);
877 int pid = lwpid_of (thr);
da84f473
PA
878
879 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
880 there is no signal context, and ptrace(PTRACE_KILL) (or
881 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
882 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
883 alternative is to kill with SIGKILL. We only need one SIGKILL
884 per process, not one for each thread. But since we still support
885 linuxthreads, and we also support debugging programs using raw
886 clone without CLONE_THREAD, we send one for each thread. For
887 years, we used PTRACE_KILL only, so we're being a bit paranoid
888 about some old kernels where PTRACE_KILL might work better
889 (dubious if there are any such, but that's why it's paranoia), so
890 we try SIGKILL first, PTRACE_KILL second, and so we're fine
891 everywhere. */
892
893 errno = 0;
69ff6be5 894 kill_lwp (pid, SIGKILL);
da84f473 895 if (debug_threads)
ce9e3fe7
PA
896 {
897 int save_errno = errno;
898
899 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
900 target_pid_to_str (ptid_of (thr)),
901 save_errno ? strerror (save_errno) : "OK");
902 }
da84f473
PA
903
904 errno = 0;
b8e1b30e 905 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 906 if (debug_threads)
ce9e3fe7
PA
907 {
908 int save_errno = errno;
909
910 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
911 target_pid_to_str (ptid_of (thr)),
912 save_errno ? strerror (save_errno) : "OK");
913 }
da84f473
PA
914}
915
e76126e8
PA
916/* Kill LWP and wait for it to die. */
917
918static void
919kill_wait_lwp (struct lwp_info *lwp)
920{
921 struct thread_info *thr = get_lwp_thread (lwp);
922 int pid = ptid_get_pid (ptid_of (thr));
923 int lwpid = ptid_get_lwp (ptid_of (thr));
924 int wstat;
925 int res;
926
927 if (debug_threads)
928 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
929
930 do
931 {
932 linux_kill_one_lwp (lwp);
933
934 /* Make sure it died. Notes:
935
936 - The loop is most likely unnecessary.
937
938 - We don't use linux_wait_for_event as that could delete lwps
939 while we're iterating over them. We're not interested in
940 any pending status at this point, only in making sure all
941 wait status on the kernel side are collected until the
942 process is reaped.
943
944 - We don't use __WALL here as the __WALL emulation relies on
945 SIGCHLD, and killing a stopped process doesn't generate
946 one, nor an exit status.
947 */
948 res = my_waitpid (lwpid, &wstat, 0);
949 if (res == -1 && errno == ECHILD)
950 res = my_waitpid (lwpid, &wstat, __WCLONE);
951 } while (res > 0 && WIFSTOPPED (wstat));
952
953 gdb_assert (res > 0);
954}
955
da84f473
PA
956/* Callback for `find_inferior'. Kills an lwp of a given process,
957 except the leader. */
95954743
PA
958
959static int
da84f473 960kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
da6d8c04 961{
0d62e5e8 962 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 963 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
964 int pid = * (int *) args;
965
966 if (ptid_get_pid (entry->id) != pid)
967 return 0;
0d62e5e8 968
fd500816
DJ
969 /* We avoid killing the first thread here, because of a Linux kernel (at
970 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
971 the children get a chance to be reaped, it will remain a zombie
972 forever. */
95954743 973
d86d4aaf 974 if (lwpid_of (thread) == pid)
95954743
PA
975 {
976 if (debug_threads)
87ce2a04
DE
977 debug_printf ("lkop: is last of process %s\n",
978 target_pid_to_str (entry->id));
95954743
PA
979 return 0;
980 }
fd500816 981
e76126e8 982 kill_wait_lwp (lwp);
95954743 983 return 0;
da6d8c04
DJ
984}
985
95954743
PA
986static int
987linux_kill (int pid)
0d62e5e8 988{
95954743 989 struct process_info *process;
54a0b537 990 struct lwp_info *lwp;
fd500816 991
95954743
PA
992 process = find_process_pid (pid);
993 if (process == NULL)
994 return -1;
9d606399 995
f9e39928
PA
996 /* If we're killing a running inferior, make sure it is stopped
997 first, as PTRACE_KILL will not work otherwise. */
7984d532 998 stop_all_lwps (0, NULL);
f9e39928 999
da84f473 1000 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
fd500816 1001
54a0b537 1002 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1003 thread in the list, so do so now. */
95954743 1004 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 1005
784867a5 1006 if (lwp == NULL)
fd500816 1007 {
784867a5 1008 if (debug_threads)
d86d4aaf
DE
1009 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1010 pid);
784867a5
JK
1011 }
1012 else
e76126e8 1013 kill_wait_lwp (lwp);
2d717e4f 1014
8336d594 1015 the_target->mourn (process);
f9e39928
PA
1016
1017 /* Since we presently can only stop all lwps of all processes, we
1018 need to unstop lwps of other processes. */
7984d532 1019 unstop_all_lwps (0, NULL);
95954743 1020 return 0;
0d62e5e8
DJ
1021}
1022
9b224c5e
PA
1023/* Get pending signal of THREAD, for detaching purposes. This is the
1024 signal the thread last stopped for, which we need to deliver to the
1025 thread when detaching, otherwise, it'd be suppressed/lost. */
1026
1027static int
1028get_detach_signal (struct thread_info *thread)
1029{
a493e3e2 1030 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1031 int status;
1032 struct lwp_info *lp = get_thread_lwp (thread);
1033
1034 if (lp->status_pending_p)
1035 status = lp->status_pending;
1036 else
1037 {
1038 /* If the thread had been suspended by gdbserver, and it stopped
1039 cleanly, then it'll have stopped with SIGSTOP. But we don't
1040 want to deliver that SIGSTOP. */
1041 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1042 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1043 return 0;
1044
1045 /* Otherwise, we may need to deliver the signal we
1046 intercepted. */
1047 status = lp->last_status;
1048 }
1049
1050 if (!WIFSTOPPED (status))
1051 {
1052 if (debug_threads)
87ce2a04 1053 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1054 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1055 return 0;
1056 }
1057
1058 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1059 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1060 {
1061 if (debug_threads)
87ce2a04
DE
1062 debug_printf ("GPS: lwp %s had stopped with extended "
1063 "status: no pending signal\n",
d86d4aaf 1064 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1065 return 0;
1066 }
1067
2ea28649 1068 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e
PA
1069
1070 if (program_signals_p && !program_signals[signo])
1071 {
1072 if (debug_threads)
87ce2a04 1073 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1074 target_pid_to_str (ptid_of (thread)),
87ce2a04 1075 gdb_signal_to_string (signo));
9b224c5e
PA
1076 return 0;
1077 }
1078 else if (!program_signals_p
1079 /* If we have no way to know which signals GDB does not
1080 want to have passed to the program, assume
1081 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1082 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1083 {
1084 if (debug_threads)
87ce2a04
DE
1085 debug_printf ("GPS: lwp %s had signal %s, "
1086 "but we don't know if we should pass it. "
1087 "Default to not.\n",
d86d4aaf 1088 target_pid_to_str (ptid_of (thread)),
87ce2a04 1089 gdb_signal_to_string (signo));
9b224c5e
PA
1090 return 0;
1091 }
1092 else
1093 {
1094 if (debug_threads)
87ce2a04 1095 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1096 target_pid_to_str (ptid_of (thread)),
87ce2a04 1097 gdb_signal_to_string (signo));
9b224c5e
PA
1098
1099 return WSTOPSIG (status);
1100 }
1101}
1102
95954743
PA
1103static int
1104linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
1105{
1106 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1107 struct lwp_info *lwp = get_thread_lwp (thread);
95954743 1108 int pid = * (int *) args;
9b224c5e 1109 int sig;
95954743
PA
1110
1111 if (ptid_get_pid (entry->id) != pid)
1112 return 0;
6ad8ae5c 1113
9b224c5e 1114 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1115 if (lwp->stop_expected)
ae13219e 1116 {
9b224c5e 1117 if (debug_threads)
87ce2a04 1118 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1119 target_pid_to_str (ptid_of (thread)));
9b224c5e 1120
d86d4aaf 1121 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1122 lwp->stop_expected = 0;
ae13219e
DJ
1123 }
1124
1125 /* Flush any pending changes to the process's registers. */
d86d4aaf 1126 regcache_invalidate_thread (thread);
ae13219e 1127
9b224c5e
PA
1128 /* Pass on any pending signal for this thread. */
1129 sig = get_detach_signal (thread);
1130
ae13219e 1131 /* Finally, let it resume. */
82bfbe7e
PA
1132 if (the_low_target.prepare_to_resume != NULL)
1133 the_low_target.prepare_to_resume (lwp);
d86d4aaf 1134 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1135 (PTRACE_TYPE_ARG4) (long) sig) < 0)
9b224c5e 1136 error (_("Can't detach %s: %s"),
d86d4aaf 1137 target_pid_to_str (ptid_of (thread)),
9b224c5e 1138 strerror (errno));
bd99dc85
PA
1139
1140 delete_lwp (lwp);
95954743 1141 return 0;
6ad8ae5c
DJ
1142}
1143
95954743
PA
1144static int
1145linux_detach (int pid)
1146{
1147 struct process_info *process;
1148
1149 process = find_process_pid (pid);
1150 if (process == NULL)
1151 return -1;
1152
f9e39928
PA
1153 /* Stop all threads before detaching. First, ptrace requires that
1154 the thread is stopped to sucessfully detach. Second, thread_db
1155 may need to uninstall thread event breakpoints from memory, which
1156 only works with a stopped process anyway. */
7984d532 1157 stop_all_lwps (0, NULL);
f9e39928 1158
ca5c370d 1159#ifdef USE_THREAD_DB
8336d594 1160 thread_db_detach (process);
ca5c370d
PA
1161#endif
1162
fa593d66
PA
1163 /* Stabilize threads (move out of jump pads). */
1164 stabilize_threads ();
1165
95954743 1166 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
1167
1168 the_target->mourn (process);
f9e39928
PA
1169
1170 /* Since we presently can only stop all lwps of all processes, we
1171 need to unstop lwps of other processes. */
7984d532 1172 unstop_all_lwps (0, NULL);
f9e39928
PA
1173 return 0;
1174}
1175
1176/* Remove all LWPs that belong to process PROC from the lwp list. */
1177
1178static int
1179delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1180{
d86d4aaf
DE
1181 struct thread_info *thread = (struct thread_info *) entry;
1182 struct lwp_info *lwp = get_thread_lwp (thread);
f9e39928
PA
1183 struct process_info *process = proc;
1184
d86d4aaf 1185 if (pid_of (thread) == pid_of (process))
f9e39928
PA
1186 delete_lwp (lwp);
1187
dd6953e1 1188 return 0;
6ad8ae5c
DJ
1189}
1190
8336d594
PA
1191static void
1192linux_mourn (struct process_info *process)
1193{
1194 struct process_info_private *priv;
1195
1196#ifdef USE_THREAD_DB
1197 thread_db_mourn (process);
1198#endif
1199
d86d4aaf 1200 find_inferior (&all_threads, delete_lwp_callback, process);
f9e39928 1201
8336d594
PA
1202 /* Freeing all private data. */
1203 priv = process->private;
1204 free (priv->arch_private);
1205 free (priv);
1206 process->private = NULL;
505106cd
PA
1207
1208 remove_process (process);
8336d594
PA
1209}
1210
444d6139 1211static void
95954743 1212linux_join (int pid)
444d6139 1213{
444d6139
PA
1214 int status, ret;
1215
1216 do {
95954743 1217 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1218 if (WIFEXITED (status) || WIFSIGNALED (status))
1219 break;
1220 } while (ret != -1 || errno != ECHILD);
1221}
1222
6ad8ae5c 1223/* Return nonzero if the given thread is still alive. */
0d62e5e8 1224static int
95954743 1225linux_thread_alive (ptid_t ptid)
0d62e5e8 1226{
95954743
PA
1227 struct lwp_info *lwp = find_lwp_pid (ptid);
1228
1229 /* We assume we always know if a thread exits. If a whole process
1230 exited but we still haven't been able to report it to GDB, we'll
1231 hold on to the last lwp of the dead process. */
1232 if (lwp != NULL)
1233 return !lwp->dead;
0d62e5e8
DJ
1234 else
1235 return 0;
1236}
1237
6bf5e0ba 1238/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1239static int
d50171e4 1240status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1241{
d86d4aaf
DE
1242 struct thread_info *thread = (struct thread_info *) entry;
1243 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
1244 ptid_t ptid = * (ptid_t *) arg;
1245
1246 /* Check if we're only interested in events from a specific process
1247 or its lwps. */
1248 if (!ptid_equal (minus_one_ptid, ptid)
d86d4aaf 1249 && ptid_get_pid (ptid) != ptid_get_pid (thread->entry.id))
95954743 1250 return 0;
0d62e5e8 1251
d50171e4
PA
1252 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1253 report any status pending the LWP may have. */
8336d594 1254 if (thread->last_resume_kind == resume_stop
7984d532 1255 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4 1256 return 0;
0d62e5e8 1257
d50171e4 1258 return lwp->status_pending_p;
0d62e5e8
DJ
1259}
1260
95954743
PA
1261static int
1262same_lwp (struct inferior_list_entry *entry, void *data)
1263{
1264 ptid_t ptid = *(ptid_t *) data;
1265 int lwp;
1266
1267 if (ptid_get_lwp (ptid) != 0)
1268 lwp = ptid_get_lwp (ptid);
1269 else
1270 lwp = ptid_get_pid (ptid);
1271
1272 if (ptid_get_lwp (entry->id) == lwp)
1273 return 1;
1274
1275 return 0;
1276}
1277
1278struct lwp_info *
1279find_lwp_pid (ptid_t ptid)
1280{
d86d4aaf
DE
1281 struct inferior_list_entry *thread
1282 = find_inferior (&all_threads, same_lwp, &ptid);
1283
1284 if (thread == NULL)
1285 return NULL;
1286
1287 return get_thread_lwp ((struct thread_info *) thread);
95954743
PA
1288}
1289
fa96cb38 1290/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1291
fa96cb38
PA
1292static int
1293num_lwps (int pid)
1294{
1295 struct inferior_list_entry *inf, *tmp;
1296 int count = 0;
0d62e5e8 1297
fa96cb38 1298 ALL_INFERIORS (&all_threads, inf, tmp)
24a09b5f 1299 {
fa96cb38
PA
1300 if (ptid_get_pid (inf->id) == pid)
1301 count++;
24a09b5f 1302 }
3aee8918 1303
fa96cb38
PA
1304 return count;
1305}
d61ddec4 1306
fa96cb38
PA
1307/* Detect zombie thread group leaders, and "exit" them. We can't reap
1308 their exits until all other threads in the group have exited. */
c3adc08c 1309
fa96cb38
PA
1310static void
1311check_zombie_leaders (void)
1312{
1313 struct process_info *proc, *tmp;
c3adc08c 1314
fa96cb38 1315 ALL_PROCESSES (proc, tmp)
c3adc08c 1316 {
fa96cb38
PA
1317 pid_t leader_pid = pid_of (proc);
1318 struct lwp_info *leader_lp;
c3adc08c 1319
fa96cb38 1320 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
c3adc08c 1321
fa96cb38
PA
1322 if (debug_threads)
1323 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1324 "num_lwps=%d, zombie=%d\n",
1325 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1326 linux_proc_pid_is_zombie (leader_pid));
1327
1328 if (leader_lp != NULL
1329 /* Check if there are other threads in the group, as we may
1330 have raced with the inferior simply exiting. */
1331 && !last_thread_of_process_p (leader_pid)
1332 && linux_proc_pid_is_zombie (leader_pid))
1333 {
1334 /* A leader zombie can mean one of two things:
1335
1336 - It exited, and there's an exit status pending
1337 available, or only the leader exited (not the whole
1338 program). In the latter case, we can't waitpid the
1339 leader's exit status until all other threads are gone.
1340
1341 - There are 3 or more threads in the group, and a thread
1342 other than the leader exec'd. On an exec, the Linux
1343 kernel destroys all other threads (except the execing
1344 one) in the thread group, and resets the execing thread's
1345 tid to the tgid. No exit notification is sent for the
1346 execing thread -- from the ptracer's perspective, it
1347 appears as though the execing thread just vanishes.
1348 Until we reap all other threads except the leader and the
1349 execing thread, the leader will be zombie, and the
1350 execing thread will be in `D (disc sleep)'. As soon as
1351 all other threads are reaped, the execing thread changes
1352 it's tid to the tgid, and the previous (zombie) leader
1353 vanishes, giving place to the "new" leader. We could try
1354 distinguishing the exit and exec cases, by waiting once
1355 more, and seeing if something comes out, but it doesn't
1356 sound useful. The previous leader _does_ go away, and
1357 we'll re-add the new one once we see the exec event
1358 (which is just the same as what would happen if the
1359 previous leader did exit voluntarily before some other
1360 thread execs). */
c3adc08c 1361
fa96cb38
PA
1362 if (debug_threads)
1363 fprintf (stderr,
1364 "CZL: Thread group leader %d zombie "
1365 "(it exited, or another thread execd).\n",
1366 leader_pid);
c3adc08c 1367
fa96cb38 1368 delete_lwp (leader_lp);
c3adc08c
PA
1369 }
1370 }
fa96cb38 1371}
c3adc08c 1372
fa96cb38
PA
1373/* Callback for `find_inferior'. Returns the first LWP that is not
1374 stopped. ARG is a PTID filter. */
d50171e4 1375
fa96cb38
PA
1376static int
1377not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1378{
1379 struct thread_info *thr = (struct thread_info *) entry;
1380 struct lwp_info *lwp;
1381 ptid_t filter = *(ptid_t *) arg;
47c0c975 1382
fa96cb38
PA
1383 if (!ptid_match (ptid_of (thr), filter))
1384 return 0;
bd99dc85 1385
fa96cb38
PA
1386 lwp = get_thread_lwp (thr);
1387 if (!lwp->stopped)
1388 return 1;
1389
1390 return 0;
0d62e5e8 1391}
611cb4a5 1392
219f2f23
PA
1393/* This function should only be called if the LWP got a SIGTRAP.
1394
1395 Handle any tracepoint steps or hits. Return true if a tracepoint
1396 event was handled, 0 otherwise. */
1397
1398static int
1399handle_tracepoints (struct lwp_info *lwp)
1400{
1401 struct thread_info *tinfo = get_lwp_thread (lwp);
1402 int tpoint_related_event = 0;
1403
7984d532
PA
1404 /* If this tracepoint hit causes a tracing stop, we'll immediately
1405 uninsert tracepoints. To do this, we temporarily pause all
1406 threads, unpatch away, and then unpause threads. We need to make
1407 sure the unpausing doesn't resume LWP too. */
1408 lwp->suspended++;
1409
219f2f23
PA
1410 /* And we need to be sure that any all-threads-stopping doesn't try
1411 to move threads out of the jump pads, as it could deadlock the
1412 inferior (LWP could be in the jump pad, maybe even holding the
1413 lock.) */
1414
1415 /* Do any necessary step collect actions. */
1416 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1417
fa593d66
PA
1418 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1419
219f2f23
PA
1420 /* See if we just hit a tracepoint and do its main collect
1421 actions. */
1422 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1423
7984d532
PA
1424 lwp->suspended--;
1425
1426 gdb_assert (lwp->suspended == 0);
fa593d66 1427 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1428
219f2f23
PA
1429 if (tpoint_related_event)
1430 {
1431 if (debug_threads)
87ce2a04 1432 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1433 return 1;
1434 }
1435
1436 return 0;
1437}
1438
fa593d66
PA
1439/* Convenience wrapper. Returns true if LWP is presently collecting a
1440 fast tracepoint. */
1441
1442static int
1443linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1444 struct fast_tpoint_collect_status *status)
1445{
1446 CORE_ADDR thread_area;
d86d4aaf 1447 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1448
1449 if (the_low_target.get_thread_area == NULL)
1450 return 0;
1451
1452 /* Get the thread area address. This is used to recognize which
1453 thread is which when tracing with the in-process agent library.
1454 We don't read anything from the address, and treat it as opaque;
1455 it's the address itself that we assume is unique per-thread. */
d86d4aaf 1456 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
fa593d66
PA
1457 return 0;
1458
1459 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1460}
1461
1462/* The reason we resume in the caller, is because we want to be able
1463 to pass lwp->status_pending as WSTAT, and we need to clear
1464 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1465 refuses to resume. */
1466
1467static int
1468maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1469{
0bfdf32f 1470 struct thread_info *saved_thread;
fa593d66 1471
0bfdf32f
GB
1472 saved_thread = current_thread;
1473 current_thread = get_lwp_thread (lwp);
fa593d66
PA
1474
1475 if ((wstat == NULL
1476 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1477 && supports_fast_tracepoints ()
58b4daa5 1478 && agent_loaded_p ())
fa593d66
PA
1479 {
1480 struct fast_tpoint_collect_status status;
1481 int r;
1482
1483 if (debug_threads)
87ce2a04
DE
1484 debug_printf ("Checking whether LWP %ld needs to move out of the "
1485 "jump pad.\n",
0bfdf32f 1486 lwpid_of (current_thread));
fa593d66
PA
1487
1488 r = linux_fast_tracepoint_collecting (lwp, &status);
1489
1490 if (wstat == NULL
1491 || (WSTOPSIG (*wstat) != SIGILL
1492 && WSTOPSIG (*wstat) != SIGFPE
1493 && WSTOPSIG (*wstat) != SIGSEGV
1494 && WSTOPSIG (*wstat) != SIGBUS))
1495 {
1496 lwp->collecting_fast_tracepoint = r;
1497
1498 if (r != 0)
1499 {
1500 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1501 {
1502 /* Haven't executed the original instruction yet.
1503 Set breakpoint there, and wait till it's hit,
1504 then single-step until exiting the jump pad. */
1505 lwp->exit_jump_pad_bkpt
1506 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1507 }
1508
1509 if (debug_threads)
87ce2a04
DE
1510 debug_printf ("Checking whether LWP %ld needs to move out of "
1511 "the jump pad...it does\n",
0bfdf32f
GB
1512 lwpid_of (current_thread));
1513 current_thread = saved_thread;
fa593d66
PA
1514
1515 return 1;
1516 }
1517 }
1518 else
1519 {
1520 /* If we get a synchronous signal while collecting, *and*
1521 while executing the (relocated) original instruction,
1522 reset the PC to point at the tpoint address, before
1523 reporting to GDB. Otherwise, it's an IPA lib bug: just
1524 report the signal to GDB, and pray for the best. */
1525
1526 lwp->collecting_fast_tracepoint = 0;
1527
1528 if (r != 0
1529 && (status.adjusted_insn_addr <= lwp->stop_pc
1530 && lwp->stop_pc < status.adjusted_insn_addr_end))
1531 {
1532 siginfo_t info;
1533 struct regcache *regcache;
1534
1535 /* The si_addr on a few signals references the address
1536 of the faulting instruction. Adjust that as
1537 well. */
1538 if ((WSTOPSIG (*wstat) == SIGILL
1539 || WSTOPSIG (*wstat) == SIGFPE
1540 || WSTOPSIG (*wstat) == SIGBUS
1541 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 1542 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1543 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
1544 /* Final check just to make sure we don't clobber
1545 the siginfo of non-kernel-sent signals. */
1546 && (uintptr_t) info.si_addr == lwp->stop_pc)
1547 {
1548 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 1549 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1550 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
1551 }
1552
0bfdf32f 1553 regcache = get_thread_regcache (current_thread, 1);
fa593d66
PA
1554 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1555 lwp->stop_pc = status.tpoint_addr;
1556
1557 /* Cancel any fast tracepoint lock this thread was
1558 holding. */
1559 force_unlock_trace_buffer ();
1560 }
1561
1562 if (lwp->exit_jump_pad_bkpt != NULL)
1563 {
1564 if (debug_threads)
87ce2a04
DE
1565 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1566 "stopping all threads momentarily.\n");
fa593d66
PA
1567
1568 stop_all_lwps (1, lwp);
1569 cancel_breakpoints ();
1570
1571 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1572 lwp->exit_jump_pad_bkpt = NULL;
1573
1574 unstop_all_lwps (1, lwp);
1575
1576 gdb_assert (lwp->suspended >= 0);
1577 }
1578 }
1579 }
1580
1581 if (debug_threads)
87ce2a04
DE
1582 debug_printf ("Checking whether LWP %ld needs to move out of the "
1583 "jump pad...no\n",
0bfdf32f 1584 lwpid_of (current_thread));
0cccb683 1585
0bfdf32f 1586 current_thread = saved_thread;
fa593d66
PA
1587 return 0;
1588}
1589
1590/* Enqueue one signal in the "signals to report later when out of the
1591 jump pad" list. */
1592
1593static void
1594enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1595{
1596 struct pending_signals *p_sig;
d86d4aaf 1597 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1598
1599 if (debug_threads)
87ce2a04 1600 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 1601 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1602
1603 if (debug_threads)
1604 {
1605 struct pending_signals *sig;
1606
1607 for (sig = lwp->pending_signals_to_report;
1608 sig != NULL;
1609 sig = sig->prev)
87ce2a04
DE
1610 debug_printf (" Already queued %d\n",
1611 sig->signal);
fa593d66 1612
87ce2a04 1613 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
1614 }
1615
1a981360
PA
1616 /* Don't enqueue non-RT signals if they are already in the deferred
1617 queue. (SIGSTOP being the easiest signal to see ending up here
1618 twice) */
1619 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1620 {
1621 struct pending_signals *sig;
1622
1623 for (sig = lwp->pending_signals_to_report;
1624 sig != NULL;
1625 sig = sig->prev)
1626 {
1627 if (sig->signal == WSTOPSIG (*wstat))
1628 {
1629 if (debug_threads)
87ce2a04
DE
1630 debug_printf ("Not requeuing already queued non-RT signal %d"
1631 " for LWP %ld\n",
1632 sig->signal,
d86d4aaf 1633 lwpid_of (thread));
1a981360
PA
1634 return;
1635 }
1636 }
1637 }
1638
fa593d66
PA
1639 p_sig = xmalloc (sizeof (*p_sig));
1640 p_sig->prev = lwp->pending_signals_to_report;
1641 p_sig->signal = WSTOPSIG (*wstat);
1642 memset (&p_sig->info, 0, sizeof (siginfo_t));
d86d4aaf 1643 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1644 &p_sig->info);
fa593d66
PA
1645
1646 lwp->pending_signals_to_report = p_sig;
1647}
1648
1649/* Dequeue one signal from the "signals to report later when out of
1650 the jump pad" list. */
1651
1652static int
1653dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1654{
d86d4aaf
DE
1655 struct thread_info *thread = get_lwp_thread (lwp);
1656
fa593d66
PA
1657 if (lwp->pending_signals_to_report != NULL)
1658 {
1659 struct pending_signals **p_sig;
1660
1661 p_sig = &lwp->pending_signals_to_report;
1662 while ((*p_sig)->prev != NULL)
1663 p_sig = &(*p_sig)->prev;
1664
1665 *wstat = W_STOPCODE ((*p_sig)->signal);
1666 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 1667 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1668 &(*p_sig)->info);
fa593d66
PA
1669 free (*p_sig);
1670 *p_sig = NULL;
1671
1672 if (debug_threads)
87ce2a04 1673 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 1674 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1675
1676 if (debug_threads)
1677 {
1678 struct pending_signals *sig;
1679
1680 for (sig = lwp->pending_signals_to_report;
1681 sig != NULL;
1682 sig = sig->prev)
87ce2a04
DE
1683 debug_printf (" Still queued %d\n",
1684 sig->signal);
fa593d66 1685
87ce2a04 1686 debug_printf (" (no more queued signals)\n");
fa593d66
PA
1687 }
1688
1689 return 1;
1690 }
1691
1692 return 0;
1693}
1694
d50171e4
PA
1695/* Arrange for a breakpoint to be hit again later. We don't keep the
1696 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1697 will handle the current event, eventually we will resume this LWP,
1698 and this breakpoint will trap again. */
1699
1700static int
1701cancel_breakpoint (struct lwp_info *lwp)
1702{
0bfdf32f 1703 struct thread_info *saved_thread;
d50171e4
PA
1704
1705 /* There's nothing to do if we don't support breakpoints. */
1706 if (!supports_breakpoints ())
1707 return 0;
1708
d50171e4 1709 /* breakpoint_at reads from current inferior. */
0bfdf32f
GB
1710 saved_thread = current_thread;
1711 current_thread = get_lwp_thread (lwp);
d50171e4
PA
1712
1713 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1714 {
1715 if (debug_threads)
87ce2a04 1716 debug_printf ("CB: Push back breakpoint for %s\n",
0bfdf32f 1717 target_pid_to_str (ptid_of (current_thread)));
d50171e4
PA
1718
1719 /* Back up the PC if necessary. */
1720 if (the_low_target.decr_pc_after_break)
1721 {
1722 struct regcache *regcache
0bfdf32f 1723 = get_thread_regcache (current_thread, 1);
d50171e4
PA
1724 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1725 }
1726
0bfdf32f 1727 current_thread = saved_thread;
d50171e4
PA
1728 return 1;
1729 }
1730 else
1731 {
1732 if (debug_threads)
87ce2a04
DE
1733 debug_printf ("CB: No breakpoint found at %s for [%s]\n",
1734 paddress (lwp->stop_pc),
0bfdf32f 1735 target_pid_to_str (ptid_of (current_thread)));
d50171e4
PA
1736 }
1737
0bfdf32f 1738 current_thread = saved_thread;
d50171e4
PA
1739 return 0;
1740}
1741
fa96cb38
PA
1742/* Do low-level handling of the event, and check if we should go on
1743 and pass it to caller code. Return the affected lwp if we are, or
1744 NULL otherwise. */
1745
1746static struct lwp_info *
1747linux_low_filter_event (ptid_t filter_ptid, int lwpid, int wstat)
1748{
1749 struct lwp_info *child;
1750 struct thread_info *thread;
1751
1752 child = find_lwp_pid (pid_to_ptid (lwpid));
1753
1754 /* If we didn't find a process, one of two things presumably happened:
1755 - A process we started and then detached from has exited. Ignore it.
1756 - A process we are controlling has forked and the new child's stop
1757 was reported to us by the kernel. Save its PID. */
1758 if (child == NULL && WIFSTOPPED (wstat))
1759 {
1760 add_to_pid_list (&stopped_pids, lwpid, wstat);
1761 return NULL;
1762 }
1763 else if (child == NULL)
1764 return NULL;
1765
1766 thread = get_lwp_thread (child);
1767
1768 child->stopped = 1;
1769
1770 child->last_status = wstat;
1771
1772 if (WIFSTOPPED (wstat))
1773 {
1774 struct process_info *proc;
1775
1776 /* Architecture-specific setup after inferior is running. This
1777 needs to happen after we have attached to the inferior and it
1778 is stopped for the first time, but before we access any
1779 inferior registers. */
1780 proc = find_process_pid (pid_of (thread));
1781 if (proc->private->new_inferior)
1782 {
0bfdf32f 1783 struct thread_info *saved_thread;
fa96cb38 1784
0bfdf32f
GB
1785 saved_thread = current_thread;
1786 current_thread = thread;
fa96cb38
PA
1787
1788 the_low_target.arch_setup ();
1789
0bfdf32f 1790 current_thread = saved_thread;
fa96cb38
PA
1791
1792 proc->private->new_inferior = 0;
1793 }
1794 }
1795
1796 /* Store the STOP_PC, with adjustment applied. This depends on the
1797 architecture being defined already (so that CHILD has a valid
1798 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1799 not). */
1800 if (WIFSTOPPED (wstat))
1801 {
1802 if (debug_threads
1803 && the_low_target.get_pc != NULL)
1804 {
0bfdf32f 1805 struct thread_info *saved_thread;
fa96cb38
PA
1806 struct regcache *regcache;
1807 CORE_ADDR pc;
1808
0bfdf32f
GB
1809 saved_thread = current_thread;
1810 current_thread = thread;
1811 regcache = get_thread_regcache (current_thread, 1);
fa96cb38
PA
1812 pc = (*the_low_target.get_pc) (regcache);
1813 debug_printf ("linux_low_filter_event: pc is 0x%lx\n", (long) pc);
0bfdf32f 1814 current_thread = saved_thread;
fa96cb38
PA
1815 }
1816
1817 child->stop_pc = get_stop_pc (child);
1818 }
1819
1820 /* Fetch the possibly triggered data watchpoint info and store it in
1821 CHILD.
1822
1823 On some archs, like x86, that use debug registers to set
1824 watchpoints, it's possible that the way to know which watched
1825 address trapped, is to check the register that is used to select
1826 which address to watch. Problem is, between setting the
1827 watchpoint and reading back which data address trapped, the user
1828 may change the set of watchpoints, and, as a consequence, GDB
1829 changes the debug registers in the inferior. To avoid reading
1830 back a stale stopped-data-address when that happens, we cache in
1831 LP the fact that a watchpoint trapped, and the corresponding data
1832 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1833 changes the debug registers meanwhile, we have the cached data we
1834 can rely on. */
1835
1836 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP)
1837 {
1838 if (the_low_target.stopped_by_watchpoint == NULL)
1839 {
1840 child->stopped_by_watchpoint = 0;
1841 }
1842 else
1843 {
0bfdf32f 1844 struct thread_info *saved_thread;
fa96cb38 1845
0bfdf32f
GB
1846 saved_thread = current_thread;
1847 current_thread = thread;
fa96cb38
PA
1848
1849 child->stopped_by_watchpoint
1850 = the_low_target.stopped_by_watchpoint ();
1851
1852 if (child->stopped_by_watchpoint)
1853 {
1854 if (the_low_target.stopped_data_address != NULL)
1855 child->stopped_data_address
1856 = the_low_target.stopped_data_address ();
1857 else
1858 child->stopped_data_address = 0;
1859 }
1860
0bfdf32f 1861 current_thread = saved_thread;
fa96cb38
PA
1862 }
1863 }
1864
1865 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1866 {
1867 linux_enable_event_reporting (lwpid);
1868 child->must_set_ptrace_flags = 0;
1869 }
1870
1871 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 1872 && linux_is_extended_waitstatus (wstat))
fa96cb38
PA
1873 {
1874 handle_extended_wait (child, wstat);
1875 return NULL;
1876 }
1877
1878 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
1879 && child->stop_expected)
1880 {
1881 if (debug_threads)
1882 debug_printf ("Expected stop.\n");
1883 child->stop_expected = 0;
1884
1885 if (thread->last_resume_kind == resume_stop)
1886 {
1887 /* We want to report the stop to the core. Treat the
1888 SIGSTOP as a normal event. */
1889 }
1890 else if (stopping_threads != NOT_STOPPING_THREADS)
1891 {
1892 /* Stopping threads. We don't want this SIGSTOP to end up
1893 pending in the FILTER_PTID handling below. */
1894 return NULL;
1895 }
1896 else
1897 {
1898 /* Filter out the event. */
1899 linux_resume_one_lwp (child, child->stepping, 0, NULL);
1900 return NULL;
1901 }
1902 }
1903
1904 /* Check if the thread has exited. */
1905 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat))
1906 && num_lwps (pid_of (thread)) > 1)
1907 {
1908 if (debug_threads)
1909 debug_printf ("LLW: %d exited.\n", lwpid);
1910
1911 /* If there is at least one more LWP, then the exit signal
1912 was not the end of the debugged application and should be
1913 ignored. */
1914 delete_lwp (child);
1915 return NULL;
1916 }
1917
1918 if (!ptid_match (ptid_of (thread), filter_ptid))
1919 {
1920 if (debug_threads)
1921 debug_printf ("LWP %d got an event %06x, leaving pending.\n",
1922 lwpid, wstat);
1923
1924 if (WIFSTOPPED (wstat))
1925 {
1926 child->status_pending_p = 1;
1927 child->status_pending = wstat;
1928
1929 if (WSTOPSIG (wstat) != SIGSTOP)
1930 {
1931 /* Cancel breakpoint hits. The breakpoint may be
1932 removed before we fetch events from this process to
1933 report to the core. It is best not to assume the
1934 moribund breakpoints heuristic always handles these
1935 cases --- it could be too many events go through to
1936 the core before this one is handled. All-stop always
1937 cancels breakpoint hits in all threads. */
1938 if (non_stop
1939 && WSTOPSIG (wstat) == SIGTRAP
1940 && cancel_breakpoint (child))
1941 {
1942 /* Throw away the SIGTRAP. */
1943 child->status_pending_p = 0;
1944
1945 if (debug_threads)
1946 debug_printf ("LLW: LWP %d hit a breakpoint while"
1947 " waiting for another process;"
1948 " cancelled it\n", lwpid);
1949 }
1950 }
1951 }
1952 else if (WIFEXITED (wstat) || WIFSIGNALED (wstat))
1953 {
1954 if (debug_threads)
1955 debug_printf ("LLWE: process %d exited while fetching "
1956 "event from another LWP\n", lwpid);
1957
1958 /* This was the last lwp in the process. Since events are
1959 serialized to GDB core, and we can't report this one
1960 right now, but GDB core and the other target layers will
1961 want to be notified about the exit code/signal, leave the
1962 status pending for the next time we're able to report
1963 it. */
1964 mark_lwp_dead (child, wstat);
1965 }
1966
1967 return NULL;
1968 }
1969
1970 return child;
1971}
1972
d50171e4
PA
1973/* When the event-loop is doing a step-over, this points at the thread
1974 being stepped. */
1975ptid_t step_over_bkpt;
1976
fa96cb38
PA
1977/* Wait for an event from child(ren) WAIT_PTID, and return any that
1978 match FILTER_PTID (leaving others pending). The PTIDs can be:
1979 minus_one_ptid, to specify any child; a pid PTID, specifying all
1980 lwps of a thread group; or a PTID representing a single lwp. Store
1981 the stop status through the status pointer WSTAT. OPTIONS is
1982 passed to the waitpid call. Return 0 if no event was found and
1983 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
1984 was found. Return the PID of the stopped child otherwise. */
bd99dc85 1985
0d62e5e8 1986static int
fa96cb38
PA
1987linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
1988 int *wstatp, int options)
0d62e5e8 1989{
d86d4aaf 1990 struct thread_info *event_thread;
d50171e4 1991 struct lwp_info *event_child, *requested_child;
fa96cb38 1992 sigset_t block_mask, prev_mask;
d50171e4 1993
fa96cb38 1994 retry:
d86d4aaf
DE
1995 /* N.B. event_thread points to the thread_info struct that contains
1996 event_child. Keep them in sync. */
1997 event_thread = NULL;
d50171e4
PA
1998 event_child = NULL;
1999 requested_child = NULL;
0d62e5e8 2000
95954743 2001 /* Check for a lwp with a pending status. */
bd99dc85 2002
fa96cb38 2003 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
0d62e5e8 2004 {
d86d4aaf 2005 event_thread = (struct thread_info *)
fa96cb38 2006 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
d86d4aaf
DE
2007 if (event_thread != NULL)
2008 event_child = get_thread_lwp (event_thread);
2009 if (debug_threads && event_thread)
2010 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2011 }
fa96cb38 2012 else if (!ptid_equal (filter_ptid, null_ptid))
0d62e5e8 2013 {
fa96cb38 2014 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2015
bde24c0a 2016 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66
PA
2017 && requested_child->status_pending_p
2018 && requested_child->collecting_fast_tracepoint)
2019 {
2020 enqueue_one_deferred_signal (requested_child,
2021 &requested_child->status_pending);
2022 requested_child->status_pending_p = 0;
2023 requested_child->status_pending = 0;
2024 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2025 }
2026
2027 if (requested_child->suspended
2028 && requested_child->status_pending_p)
38e08fca
GB
2029 {
2030 internal_error (__FILE__, __LINE__,
2031 "requesting an event out of a"
2032 " suspended child?");
2033 }
fa593d66 2034
d50171e4 2035 if (requested_child->status_pending_p)
d86d4aaf
DE
2036 {
2037 event_child = requested_child;
2038 event_thread = get_lwp_thread (event_child);
2039 }
0d62e5e8 2040 }
611cb4a5 2041
0d62e5e8
DJ
2042 if (event_child != NULL)
2043 {
bd99dc85 2044 if (debug_threads)
87ce2a04 2045 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2046 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2047 *wstatp = event_child->status_pending;
bd99dc85
PA
2048 event_child->status_pending_p = 0;
2049 event_child->status_pending = 0;
0bfdf32f 2050 current_thread = event_thread;
d86d4aaf 2051 return lwpid_of (event_thread);
0d62e5e8
DJ
2052 }
2053
fa96cb38
PA
2054 /* But if we don't find a pending event, we'll have to wait.
2055
2056 We only enter this loop if no process has a pending wait status.
2057 Thus any action taken in response to a wait status inside this
2058 loop is responding as soon as we detect the status, not after any
2059 pending events. */
d8301ad1 2060
fa96cb38
PA
2061 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2062 all signals while here. */
2063 sigfillset (&block_mask);
2064 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2065
2066 while (event_child == NULL)
0d62e5e8 2067 {
fa96cb38 2068 pid_t ret = 0;
0d62e5e8 2069
fa96cb38
PA
2070 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2071 quirks:
0d62e5e8 2072
fa96cb38
PA
2073 - If the thread group leader exits while other threads in the
2074 thread group still exist, waitpid(TGID, ...) hangs. That
2075 waitpid won't return an exit status until the other threads
2076 in the group are reaped.
611cb4a5 2077
fa96cb38
PA
2078 - When a non-leader thread execs, that thread just vanishes
2079 without reporting an exit (so we'd hang if we waited for it
2080 explicitly in that case). The exec event is reported to
2081 the TGID pid (although we don't currently enable exec
2082 events). */
2083 errno = 0;
2084 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2085
fa96cb38
PA
2086 if (debug_threads)
2087 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2088 ret, errno ? strerror (errno) : "ERRNO-OK");
0d62e5e8 2089
fa96cb38 2090 if (ret > 0)
0d62e5e8 2091 {
89be2091 2092 if (debug_threads)
bd99dc85 2093 {
fa96cb38
PA
2094 debug_printf ("LLW: waitpid %ld received %s\n",
2095 (long) ret, status_to_str (*wstatp));
bd99dc85 2096 }
89be2091 2097
fa96cb38
PA
2098 event_child = linux_low_filter_event (filter_ptid,
2099 ret, *wstatp);
2100 if (event_child != NULL)
bd99dc85 2101 {
fa96cb38
PA
2102 /* We got an event to report to the core. */
2103 event_thread = get_lwp_thread (event_child);
2104 break;
bd99dc85 2105 }
89be2091 2106
fa96cb38
PA
2107 /* Retry until nothing comes out of waitpid. A single
2108 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2109 continue;
2110 }
2111
fa96cb38
PA
2112 /* Check for zombie thread group leaders. Those can't be reaped
2113 until all other threads in the thread group are. */
2114 check_zombie_leaders ();
2115
2116 /* If there are no resumed children left in the set of LWPs we
2117 want to wait for, bail. We can't just block in
2118 waitpid/sigsuspend, because lwps might have been left stopped
2119 in trace-stop state, and we'd be stuck forever waiting for
2120 their status to change (which would only happen if we resumed
2121 them). Even if WNOHANG is set, this return code is preferred
2122 over 0 (below), as it is more detailed. */
2123 if ((find_inferior (&all_threads,
2124 not_stopped_callback,
2125 &wait_ptid) == NULL))
a6dbe5df 2126 {
fa96cb38
PA
2127 if (debug_threads)
2128 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2129 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2130 return -1;
a6dbe5df
PA
2131 }
2132
fa96cb38
PA
2133 /* No interesting event to report to the caller. */
2134 if ((options & WNOHANG))
24a09b5f 2135 {
fa96cb38
PA
2136 if (debug_threads)
2137 debug_printf ("WNOHANG set, no event found\n");
2138
2139 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2140 return 0;
24a09b5f
DJ
2141 }
2142
fa96cb38
PA
2143 /* Block until we get an event reported with SIGCHLD. */
2144 if (debug_threads)
2145 debug_printf ("sigsuspend'ing\n");
d50171e4 2146
fa96cb38
PA
2147 sigsuspend (&prev_mask);
2148 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2149 goto retry;
2150 }
d50171e4 2151
fa96cb38 2152 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2153
0bfdf32f 2154 current_thread = event_thread;
d50171e4 2155
fa96cb38
PA
2156 /* Check for thread exit. */
2157 if (! WIFSTOPPED (*wstatp))
2158 {
2159 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2160
2161 if (debug_threads)
2162 debug_printf ("LWP %d is the last lwp of process. "
2163 "Process %ld exiting.\n",
2164 pid_of (event_thread), lwpid_of (event_thread));
d86d4aaf 2165 return lwpid_of (event_thread);
611cb4a5 2166 }
0d62e5e8 2167
fa96cb38
PA
2168 return lwpid_of (event_thread);
2169}
2170
2171/* Wait for an event from child(ren) PTID. PTIDs can be:
2172 minus_one_ptid, to specify any child; a pid PTID, specifying all
2173 lwps of a thread group; or a PTID representing a single lwp. Store
2174 the stop status through the status pointer WSTAT. OPTIONS is
2175 passed to the waitpid call. Return 0 if no event was found and
2176 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2177 was found. Return the PID of the stopped child otherwise. */
2178
2179static int
2180linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2181{
2182 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2183}
2184
6bf5e0ba
PA
2185/* Count the LWP's that have had events. */
2186
2187static int
2188count_events_callback (struct inferior_list_entry *entry, void *data)
2189{
d86d4aaf
DE
2190 struct thread_info *thread = (struct thread_info *) entry;
2191 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2192 int *count = data;
2193
2194 gdb_assert (count != NULL);
2195
2196 /* Count only resumed LWPs that have a SIGTRAP event pending that
2197 should be reported to GDB. */
8336d594
PA
2198 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2199 && thread->last_resume_kind != resume_stop
6bf5e0ba
PA
2200 && lp->status_pending_p
2201 && WIFSTOPPED (lp->status_pending)
2202 && WSTOPSIG (lp->status_pending) == SIGTRAP
2203 && !breakpoint_inserted_here (lp->stop_pc))
2204 (*count)++;
2205
2206 return 0;
2207}
2208
2209/* Select the LWP (if any) that is currently being single-stepped. */
2210
2211static int
2212select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2213{
d86d4aaf
DE
2214 struct thread_info *thread = (struct thread_info *) entry;
2215 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba 2216
8336d594
PA
2217 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2218 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
2219 && lp->status_pending_p)
2220 return 1;
2221 else
2222 return 0;
2223}
2224
2225/* Select the Nth LWP that has had a SIGTRAP event that should be
2226 reported to GDB. */
2227
2228static int
2229select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2230{
d86d4aaf
DE
2231 struct thread_info *thread = (struct thread_info *) entry;
2232 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2233 int *selector = data;
2234
2235 gdb_assert (selector != NULL);
2236
2237 /* Select only resumed LWPs that have a SIGTRAP event pending. */
8336d594
PA
2238 if (thread->last_resume_kind != resume_stop
2239 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
2240 && lp->status_pending_p
2241 && WIFSTOPPED (lp->status_pending)
2242 && WSTOPSIG (lp->status_pending) == SIGTRAP
2243 && !breakpoint_inserted_here (lp->stop_pc))
2244 if ((*selector)-- == 0)
2245 return 1;
2246
2247 return 0;
2248}
2249
2250static int
2251cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2252{
d86d4aaf
DE
2253 struct thread_info *thread = (struct thread_info *) entry;
2254 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2255 struct lwp_info *event_lp = data;
2256
2257 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2258 if (lp == event_lp)
2259 return 0;
2260
2261 /* If a LWP other than the LWP that we're reporting an event for has
2262 hit a GDB breakpoint (as opposed to some random trap signal),
2263 then just arrange for it to hit it again later. We don't keep
2264 the SIGTRAP status and don't forward the SIGTRAP signal to the
2265 LWP. We will handle the current event, eventually we will resume
2266 all LWPs, and this one will get its breakpoint trap again.
2267
2268 If we do not do this, then we run the risk that the user will
2269 delete or disable the breakpoint, but the LWP will have already
2270 tripped on it. */
2271
8336d594
PA
2272 if (thread->last_resume_kind != resume_stop
2273 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
2274 && lp->status_pending_p
2275 && WIFSTOPPED (lp->status_pending)
2276 && WSTOPSIG (lp->status_pending) == SIGTRAP
bdabb078
PA
2277 && !lp->stepping
2278 && !lp->stopped_by_watchpoint
6bf5e0ba
PA
2279 && cancel_breakpoint (lp))
2280 /* Throw away the SIGTRAP. */
2281 lp->status_pending_p = 0;
2282
2283 return 0;
2284}
2285
7984d532
PA
2286static void
2287linux_cancel_breakpoints (void)
2288{
d86d4aaf 2289 find_inferior (&all_threads, cancel_breakpoints_callback, NULL);
7984d532
PA
2290}
2291
6bf5e0ba
PA
2292/* Select one LWP out of those that have events pending. */
2293
2294static void
2295select_event_lwp (struct lwp_info **orig_lp)
2296{
2297 int num_events = 0;
2298 int random_selector;
d86d4aaf 2299 struct thread_info *event_thread;
6bf5e0ba
PA
2300
2301 /* Give preference to any LWP that is being single-stepped. */
d86d4aaf
DE
2302 event_thread
2303 = (struct thread_info *) find_inferior (&all_threads,
2304 select_singlestep_lwp_callback,
2305 NULL);
2306 if (event_thread != NULL)
6bf5e0ba
PA
2307 {
2308 if (debug_threads)
87ce2a04 2309 debug_printf ("SEL: Select single-step %s\n",
d86d4aaf 2310 target_pid_to_str (ptid_of (event_thread)));
6bf5e0ba
PA
2311 }
2312 else
2313 {
2314 /* No single-stepping LWP. Select one at random, out of those
2315 which have had SIGTRAP events. */
2316
2317 /* First see how many SIGTRAP events we have. */
d86d4aaf 2318 find_inferior (&all_threads, count_events_callback, &num_events);
6bf5e0ba
PA
2319
2320 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2321 random_selector = (int)
2322 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2323
2324 if (debug_threads && num_events > 1)
87ce2a04
DE
2325 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2326 num_events, random_selector);
6bf5e0ba 2327
d86d4aaf
DE
2328 event_thread
2329 = (struct thread_info *) find_inferior (&all_threads,
2330 select_event_lwp_callback,
2331 &random_selector);
6bf5e0ba
PA
2332 }
2333
d86d4aaf 2334 if (event_thread != NULL)
6bf5e0ba 2335 {
d86d4aaf
DE
2336 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2337
6bf5e0ba
PA
2338 /* Switch the event LWP. */
2339 *orig_lp = event_lp;
2340 }
2341}
2342
7984d532
PA
2343/* Decrement the suspend count of an LWP. */
2344
2345static int
2346unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2347{
d86d4aaf
DE
2348 struct thread_info *thread = (struct thread_info *) entry;
2349 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
2350
2351 /* Ignore EXCEPT. */
2352 if (lwp == except)
2353 return 0;
2354
2355 lwp->suspended--;
2356
2357 gdb_assert (lwp->suspended >= 0);
2358 return 0;
2359}
2360
2361/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2362 NULL. */
2363
2364static void
2365unsuspend_all_lwps (struct lwp_info *except)
2366{
d86d4aaf 2367 find_inferior (&all_threads, unsuspend_one_lwp, except);
7984d532
PA
2368}
2369
fa593d66
PA
2370static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2371static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2372 void *data);
2373static int lwp_running (struct inferior_list_entry *entry, void *data);
2374static ptid_t linux_wait_1 (ptid_t ptid,
2375 struct target_waitstatus *ourstatus,
2376 int target_options);
2377
2378/* Stabilize threads (move out of jump pads).
2379
2380 If a thread is midway collecting a fast tracepoint, we need to
2381 finish the collection and move it out of the jump pad before
2382 reporting the signal.
2383
2384 This avoids recursion while collecting (when a signal arrives
2385 midway, and the signal handler itself collects), which would trash
2386 the trace buffer. In case the user set a breakpoint in a signal
2387 handler, this avoids the backtrace showing the jump pad, etc..
2388 Most importantly, there are certain things we can't do safely if
2389 threads are stopped in a jump pad (or in its callee's). For
2390 example:
2391
2392 - starting a new trace run. A thread still collecting the
2393 previous run, could trash the trace buffer when resumed. The trace
2394 buffer control structures would have been reset but the thread had
2395 no way to tell. The thread could even midway memcpy'ing to the
2396 buffer, which would mean that when resumed, it would clobber the
2397 trace buffer that had been set for a new run.
2398
2399 - we can't rewrite/reuse the jump pads for new tracepoints
2400 safely. Say you do tstart while a thread is stopped midway while
2401 collecting. When the thread is later resumed, it finishes the
2402 collection, and returns to the jump pad, to execute the original
2403 instruction that was under the tracepoint jump at the time the
2404 older run had been started. If the jump pad had been rewritten
2405 since for something else in the new run, the thread would now
2406 execute the wrong / random instructions. */
2407
2408static void
2409linux_stabilize_threads (void)
2410{
0bfdf32f 2411 struct thread_info *saved_thread;
d86d4aaf 2412 struct thread_info *thread_stuck;
fa593d66 2413
d86d4aaf
DE
2414 thread_stuck
2415 = (struct thread_info *) find_inferior (&all_threads,
2416 stuck_in_jump_pad_callback,
2417 NULL);
2418 if (thread_stuck != NULL)
fa593d66 2419 {
b4d51a55 2420 if (debug_threads)
87ce2a04 2421 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2422 lwpid_of (thread_stuck));
fa593d66
PA
2423 return;
2424 }
2425
0bfdf32f 2426 saved_thread = current_thread;
fa593d66
PA
2427
2428 stabilizing_threads = 1;
2429
2430 /* Kick 'em all. */
d86d4aaf 2431 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
fa593d66
PA
2432
2433 /* Loop until all are stopped out of the jump pads. */
d86d4aaf 2434 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
fa593d66
PA
2435 {
2436 struct target_waitstatus ourstatus;
2437 struct lwp_info *lwp;
fa593d66
PA
2438 int wstat;
2439
2440 /* Note that we go through the full wait even loop. While
2441 moving threads out of jump pad, we need to be able to step
2442 over internal breakpoints and such. */
32fcada3 2443 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2444
2445 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2446 {
0bfdf32f 2447 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2448
2449 /* Lock it. */
2450 lwp->suspended++;
2451
a493e3e2 2452 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2453 || current_thread->last_resume_kind == resume_stop)
fa593d66 2454 {
2ea28649 2455 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2456 enqueue_one_deferred_signal (lwp, &wstat);
2457 }
2458 }
2459 }
2460
d86d4aaf 2461 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
fa593d66
PA
2462
2463 stabilizing_threads = 0;
2464
0bfdf32f 2465 current_thread = saved_thread;
fa593d66 2466
b4d51a55 2467 if (debug_threads)
fa593d66 2468 {
d86d4aaf
DE
2469 thread_stuck
2470 = (struct thread_info *) find_inferior (&all_threads,
2471 stuck_in_jump_pad_callback,
2472 NULL);
2473 if (thread_stuck != NULL)
87ce2a04 2474 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2475 lwpid_of (thread_stuck));
fa593d66
PA
2476 }
2477}
2478
0d62e5e8 2479/* Wait for process, returns status. */
da6d8c04 2480
95954743
PA
2481static ptid_t
2482linux_wait_1 (ptid_t ptid,
2483 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2484{
e5f1222d 2485 int w;
fc7238bb 2486 struct lwp_info *event_child;
bd99dc85 2487 int options;
bd99dc85 2488 int pid;
6bf5e0ba
PA
2489 int step_over_finished;
2490 int bp_explains_trap;
2491 int maybe_internal_trap;
2492 int report_to_gdb;
219f2f23 2493 int trace_event;
c2d6af84 2494 int in_step_range;
bd99dc85 2495
87ce2a04
DE
2496 if (debug_threads)
2497 {
2498 debug_enter ();
2499 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2500 }
2501
bd99dc85
PA
2502 /* Translate generic target options into linux options. */
2503 options = __WALL;
2504 if (target_options & TARGET_WNOHANG)
2505 options |= WNOHANG;
0d62e5e8
DJ
2506
2507retry:
fa593d66
PA
2508 bp_explains_trap = 0;
2509 trace_event = 0;
c2d6af84 2510 in_step_range = 0;
bd99dc85
PA
2511 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2512
0d62e5e8
DJ
2513 /* If we were only supposed to resume one thread, only wait for
2514 that thread - if it's still alive. If it died, however - which
2515 can happen if we're coming from the thread death case below -
2516 then we need to make sure we restart the other threads. We could
2517 pick a thread at random or restart all; restarting all is less
2518 arbitrary. */
95954743
PA
2519 if (!non_stop
2520 && !ptid_equal (cont_thread, null_ptid)
2521 && !ptid_equal (cont_thread, minus_one_ptid))
0d62e5e8 2522 {
fc7238bb
PA
2523 struct thread_info *thread;
2524
bd99dc85
PA
2525 thread = (struct thread_info *) find_inferior_id (&all_threads,
2526 cont_thread);
0d62e5e8
DJ
2527
2528 /* No stepping, no signal - unless one is pending already, of course. */
bd99dc85 2529 if (thread == NULL)
64386c31
DJ
2530 {
2531 struct thread_resume resume_info;
95954743 2532 resume_info.thread = minus_one_ptid;
bd99dc85
PA
2533 resume_info.kind = resume_continue;
2534 resume_info.sig = 0;
2bd7c093 2535 linux_resume (&resume_info, 1);
64386c31 2536 }
bd99dc85 2537 else
95954743 2538 ptid = cont_thread;
0d62e5e8 2539 }
da6d8c04 2540
6bf5e0ba
PA
2541 if (ptid_equal (step_over_bkpt, null_ptid))
2542 pid = linux_wait_for_event (ptid, &w, options);
2543 else
2544 {
2545 if (debug_threads)
87ce2a04
DE
2546 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2547 target_pid_to_str (step_over_bkpt));
6bf5e0ba
PA
2548 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2549 }
2550
fa96cb38 2551 if (pid == 0)
87ce2a04 2552 {
fa96cb38
PA
2553 gdb_assert (target_options & TARGET_WNOHANG);
2554
87ce2a04
DE
2555 if (debug_threads)
2556 {
fa96cb38
PA
2557 debug_printf ("linux_wait_1 ret = null_ptid, "
2558 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
2559 debug_exit ();
2560 }
fa96cb38
PA
2561
2562 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
2563 return null_ptid;
2564 }
fa96cb38
PA
2565 else if (pid == -1)
2566 {
2567 if (debug_threads)
2568 {
2569 debug_printf ("linux_wait_1 ret = null_ptid, "
2570 "TARGET_WAITKIND_NO_RESUMED\n");
2571 debug_exit ();
2572 }
bd99dc85 2573
fa96cb38
PA
2574 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2575 return null_ptid;
2576 }
0d62e5e8 2577
0bfdf32f 2578 event_child = get_thread_lwp (current_thread);
0d62e5e8 2579
fa96cb38
PA
2580 /* linux_wait_for_event only returns an exit status for the last
2581 child of a process. Report it. */
2582 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 2583 {
fa96cb38 2584 if (WIFEXITED (w))
0d62e5e8 2585 {
fa96cb38
PA
2586 ourstatus->kind = TARGET_WAITKIND_EXITED;
2587 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 2588
fa96cb38 2589 if (debug_threads)
bd99dc85 2590 {
fa96cb38
PA
2591 debug_printf ("linux_wait_1 ret = %s, exited with "
2592 "retcode %d\n",
0bfdf32f 2593 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2594 WEXITSTATUS (w));
2595 debug_exit ();
bd99dc85 2596 }
fa96cb38
PA
2597 }
2598 else
2599 {
2600 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2601 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 2602
fa96cb38
PA
2603 if (debug_threads)
2604 {
2605 debug_printf ("linux_wait_1 ret = %s, terminated with "
2606 "signal %d\n",
0bfdf32f 2607 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2608 WTERMSIG (w));
2609 debug_exit ();
2610 }
0d62e5e8 2611 }
fa96cb38 2612
0bfdf32f 2613 return ptid_of (current_thread);
da6d8c04
DJ
2614 }
2615
6bf5e0ba
PA
2616 /* If this event was not handled before, and is not a SIGTRAP, we
2617 report it. SIGILL and SIGSEGV are also treated as traps in case
2618 a breakpoint is inserted at the current PC. If this target does
2619 not support internal breakpoints at all, we also report the
2620 SIGTRAP without further processing; it's of no concern to us. */
2621 maybe_internal_trap
2622 = (supports_breakpoints ()
2623 && (WSTOPSIG (w) == SIGTRAP
2624 || ((WSTOPSIG (w) == SIGILL
2625 || WSTOPSIG (w) == SIGSEGV)
2626 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2627
2628 if (maybe_internal_trap)
2629 {
2630 /* Handle anything that requires bookkeeping before deciding to
2631 report the event or continue waiting. */
2632
2633 /* First check if we can explain the SIGTRAP with an internal
2634 breakpoint, or if we should possibly report the event to GDB.
2635 Do this before anything that may remove or insert a
2636 breakpoint. */
2637 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2638
2639 /* We have a SIGTRAP, possibly a step-over dance has just
2640 finished. If so, tweak the state machine accordingly,
2641 reinsert breakpoints and delete any reinsert (software
2642 single-step) breakpoints. */
2643 step_over_finished = finish_step_over (event_child);
2644
2645 /* Now invoke the callbacks of any internal breakpoints there. */
2646 check_breakpoints (event_child->stop_pc);
2647
219f2f23
PA
2648 /* Handle tracepoint data collecting. This may overflow the
2649 trace buffer, and cause a tracing stop, removing
2650 breakpoints. */
2651 trace_event = handle_tracepoints (event_child);
2652
6bf5e0ba
PA
2653 if (bp_explains_trap)
2654 {
2655 /* If we stepped or ran into an internal breakpoint, we've
2656 already handled it. So next time we resume (from this
2657 PC), we should step over it. */
2658 if (debug_threads)
87ce2a04 2659 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 2660
8b07ae33
PA
2661 if (breakpoint_here (event_child->stop_pc))
2662 event_child->need_step_over = 1;
6bf5e0ba
PA
2663 }
2664 }
2665 else
2666 {
2667 /* We have some other signal, possibly a step-over dance was in
2668 progress, and it should be cancelled too. */
2669 step_over_finished = finish_step_over (event_child);
fa593d66
PA
2670 }
2671
2672 /* We have all the data we need. Either report the event to GDB, or
2673 resume threads and keep waiting for more. */
2674
2675 /* If we're collecting a fast tracepoint, finish the collection and
2676 move out of the jump pad before delivering a signal. See
2677 linux_stabilize_threads. */
2678
2679 if (WIFSTOPPED (w)
2680 && WSTOPSIG (w) != SIGTRAP
2681 && supports_fast_tracepoints ()
58b4daa5 2682 && agent_loaded_p ())
fa593d66
PA
2683 {
2684 if (debug_threads)
87ce2a04
DE
2685 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2686 "to defer or adjust it.\n",
0bfdf32f 2687 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
2688
2689 /* Allow debugging the jump pad itself. */
0bfdf32f 2690 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
2691 && maybe_move_out_of_jump_pad (event_child, &w))
2692 {
2693 enqueue_one_deferred_signal (event_child, &w);
2694
2695 if (debug_threads)
87ce2a04 2696 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 2697 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
2698
2699 linux_resume_one_lwp (event_child, 0, 0, NULL);
2700 goto retry;
2701 }
2702 }
219f2f23 2703
fa593d66
PA
2704 if (event_child->collecting_fast_tracepoint)
2705 {
2706 if (debug_threads)
87ce2a04
DE
2707 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2708 "Check if we're already there.\n",
0bfdf32f 2709 lwpid_of (current_thread),
87ce2a04 2710 event_child->collecting_fast_tracepoint);
fa593d66
PA
2711
2712 trace_event = 1;
2713
2714 event_child->collecting_fast_tracepoint
2715 = linux_fast_tracepoint_collecting (event_child, NULL);
2716
2717 if (event_child->collecting_fast_tracepoint != 1)
2718 {
2719 /* No longer need this breakpoint. */
2720 if (event_child->exit_jump_pad_bkpt != NULL)
2721 {
2722 if (debug_threads)
87ce2a04
DE
2723 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2724 "stopping all threads momentarily.\n");
fa593d66
PA
2725
2726 /* Other running threads could hit this breakpoint.
2727 We don't handle moribund locations like GDB does,
2728 instead we always pause all threads when removing
2729 breakpoints, so that any step-over or
2730 decr_pc_after_break adjustment is always taken
2731 care of while the breakpoint is still
2732 inserted. */
2733 stop_all_lwps (1, event_child);
2734 cancel_breakpoints ();
2735
2736 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2737 event_child->exit_jump_pad_bkpt = NULL;
2738
2739 unstop_all_lwps (1, event_child);
2740
2741 gdb_assert (event_child->suspended >= 0);
2742 }
2743 }
2744
2745 if (event_child->collecting_fast_tracepoint == 0)
2746 {
2747 if (debug_threads)
87ce2a04
DE
2748 debug_printf ("fast tracepoint finished "
2749 "collecting successfully.\n");
fa593d66
PA
2750
2751 /* We may have a deferred signal to report. */
2752 if (dequeue_one_deferred_signal (event_child, &w))
2753 {
2754 if (debug_threads)
87ce2a04 2755 debug_printf ("dequeued one signal.\n");
fa593d66 2756 }
3c11dd79 2757 else
fa593d66 2758 {
3c11dd79 2759 if (debug_threads)
87ce2a04 2760 debug_printf ("no deferred signals.\n");
fa593d66
PA
2761
2762 if (stabilizing_threads)
2763 {
2764 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 2765 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
2766
2767 if (debug_threads)
2768 {
2769 debug_printf ("linux_wait_1 ret = %s, stopped "
2770 "while stabilizing threads\n",
0bfdf32f 2771 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
2772 debug_exit ();
2773 }
2774
0bfdf32f 2775 return ptid_of (current_thread);
fa593d66
PA
2776 }
2777 }
2778 }
6bf5e0ba
PA
2779 }
2780
e471f25b
PA
2781 /* Check whether GDB would be interested in this event. */
2782
2783 /* If GDB is not interested in this signal, don't stop other
2784 threads, and don't report it to GDB. Just resume the inferior
2785 right away. We do this for threading-related signals as well as
2786 any that GDB specifically requested we ignore. But never ignore
2787 SIGSTOP if we sent it ourselves, and do not ignore signals when
2788 stepping - they may require special handling to skip the signal
2789 handler. */
2790 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2791 thread library? */
2792 if (WIFSTOPPED (w)
0bfdf32f 2793 && current_thread->last_resume_kind != resume_step
e471f25b 2794 && (
1a981360 2795#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
e471f25b
PA
2796 (current_process ()->private->thread_db != NULL
2797 && (WSTOPSIG (w) == __SIGRTMIN
2798 || WSTOPSIG (w) == __SIGRTMIN + 1))
2799 ||
2800#endif
2ea28649 2801 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 2802 && !(WSTOPSIG (w) == SIGSTOP
0bfdf32f 2803 && current_thread->last_resume_kind == resume_stop))))
e471f25b
PA
2804 {
2805 siginfo_t info, *info_p;
2806
2807 if (debug_threads)
87ce2a04 2808 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 2809 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 2810
0bfdf32f 2811 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2812 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
2813 info_p = &info;
2814 else
2815 info_p = NULL;
2816 linux_resume_one_lwp (event_child, event_child->stepping,
2817 WSTOPSIG (w), info_p);
2818 goto retry;
2819 }
2820
c2d6af84
PA
2821 /* Note that all addresses are always "out of the step range" when
2822 there's no range to begin with. */
2823 in_step_range = lwp_in_step_range (event_child);
2824
2825 /* If GDB wanted this thread to single step, and the thread is out
2826 of the step range, we always want to report the SIGTRAP, and let
2827 GDB handle it. Watchpoints should always be reported. So should
2828 signals we can't explain. A SIGTRAP we can't explain could be a
2829 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2830 do, we're be able to handle GDB breakpoints on top of internal
2831 breakpoints, by handling the internal breakpoint and still
2832 reporting the event to GDB. If we don't, we're out of luck, GDB
2833 won't see the breakpoint hit. */
6bf5e0ba 2834 report_to_gdb = (!maybe_internal_trap
0bfdf32f 2835 || (current_thread->last_resume_kind == resume_step
c2d6af84 2836 && !in_step_range)
6bf5e0ba 2837 || event_child->stopped_by_watchpoint
c2d6af84 2838 || (!step_over_finished && !in_step_range
493e2a69 2839 && !bp_explains_trap && !trace_event)
9f3a5c85 2840 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5
SS
2841 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2842 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2843
2844 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
2845
2846 /* We found no reason GDB would want us to stop. We either hit one
2847 of our own breakpoints, or finished an internal step GDB
2848 shouldn't know about. */
2849 if (!report_to_gdb)
2850 {
2851 if (debug_threads)
2852 {
2853 if (bp_explains_trap)
87ce2a04 2854 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 2855 if (step_over_finished)
87ce2a04 2856 debug_printf ("Step-over finished.\n");
219f2f23 2857 if (trace_event)
87ce2a04 2858 debug_printf ("Tracepoint event.\n");
c2d6af84 2859 if (lwp_in_step_range (event_child))
87ce2a04
DE
2860 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2861 paddress (event_child->stop_pc),
2862 paddress (event_child->step_range_start),
2863 paddress (event_child->step_range_end));
6bf5e0ba
PA
2864 }
2865
2866 /* We're not reporting this breakpoint to GDB, so apply the
2867 decr_pc_after_break adjustment to the inferior's regcache
2868 ourselves. */
2869
2870 if (the_low_target.set_pc != NULL)
2871 {
2872 struct regcache *regcache
0bfdf32f 2873 = get_thread_regcache (current_thread, 1);
6bf5e0ba
PA
2874 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2875 }
2876
7984d532
PA
2877 /* We may have finished stepping over a breakpoint. If so,
2878 we've stopped and suspended all LWPs momentarily except the
2879 stepping one. This is where we resume them all again. We're
2880 going to keep waiting, so use proceed, which handles stepping
2881 over the next breakpoint. */
6bf5e0ba 2882 if (debug_threads)
87ce2a04 2883 debug_printf ("proceeding all threads.\n");
7984d532
PA
2884
2885 if (step_over_finished)
2886 unsuspend_all_lwps (event_child);
2887
6bf5e0ba
PA
2888 proceed_all_lwps ();
2889 goto retry;
2890 }
2891
2892 if (debug_threads)
2893 {
0bfdf32f 2894 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
2895 {
2896 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 2897 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 2898 else if (!lwp_in_step_range (event_child))
87ce2a04 2899 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 2900 }
6bf5e0ba 2901 if (event_child->stopped_by_watchpoint)
87ce2a04 2902 debug_printf ("Stopped by watchpoint.\n");
8b07ae33 2903 if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 2904 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 2905 if (debug_threads)
87ce2a04 2906 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
2907 }
2908
2909 /* Alright, we're going to report a stop. */
2910
fa593d66 2911 if (!non_stop && !stabilizing_threads)
6bf5e0ba
PA
2912 {
2913 /* In all-stop, stop all threads. */
7984d532 2914 stop_all_lwps (0, NULL);
6bf5e0ba
PA
2915
2916 /* If we're not waiting for a specific LWP, choose an event LWP
2917 from among those that have had events. Giving equal priority
2918 to all LWPs that have had events helps prevent
2919 starvation. */
2920 if (ptid_equal (ptid, minus_one_ptid))
2921 {
2922 event_child->status_pending_p = 1;
2923 event_child->status_pending = w;
2924
2925 select_event_lwp (&event_child);
2926
0bfdf32f
GB
2927 /* current_thread and event_child must stay in sync. */
2928 current_thread = get_lwp_thread (event_child);
ee1e2d4f 2929
6bf5e0ba
PA
2930 event_child->status_pending_p = 0;
2931 w = event_child->status_pending;
2932 }
2933
2934 /* Now that we've selected our final event LWP, cancel any
2935 breakpoints in other LWPs that have hit a GDB breakpoint.
2936 See the comment in cancel_breakpoints_callback to find out
2937 why. */
d86d4aaf 2938 find_inferior (&all_threads, cancel_breakpoints_callback, event_child);
fa593d66 2939
c03e6ccc
YQ
2940 /* If we were going a step-over, all other threads but the stepping one
2941 had been paused in start_step_over, with their suspend counts
2942 incremented. We don't want to do a full unstop/unpause, because we're
2943 in all-stop mode (so we want threads stopped), but we still need to
2944 unsuspend the other threads, to decrement their `suspended' count
2945 back. */
2946 if (step_over_finished)
2947 unsuspend_all_lwps (event_child);
2948
fa593d66
PA
2949 /* Stabilize threads (move out of jump pads). */
2950 stabilize_threads ();
6bf5e0ba
PA
2951 }
2952 else
2953 {
2954 /* If we just finished a step-over, then all threads had been
2955 momentarily paused. In all-stop, that's fine, we want
2956 threads stopped by now anyway. In non-stop, we need to
2957 re-resume threads that GDB wanted to be running. */
2958 if (step_over_finished)
7984d532 2959 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
2960 }
2961
5b1c542e 2962 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 2963
0bfdf32f 2964 if (current_thread->last_resume_kind == resume_stop
8336d594 2965 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
2966 {
2967 /* A thread that has been requested to stop by GDB with vCont;t,
2968 and it stopped cleanly, so report as SIG0. The use of
2969 SIGSTOP is an implementation detail. */
a493e3e2 2970 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 2971 }
0bfdf32f 2972 else if (current_thread->last_resume_kind == resume_stop
8336d594 2973 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
2974 {
2975 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 2976 but, it stopped for other reasons. */
2ea28649 2977 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
2978 }
2979 else
2980 {
2ea28649 2981 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
2982 }
2983
d50171e4
PA
2984 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2985
bd99dc85 2986 if (debug_threads)
87ce2a04
DE
2987 {
2988 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
0bfdf32f 2989 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
2990 ourstatus->kind, ourstatus->value.sig);
2991 debug_exit ();
2992 }
bd99dc85 2993
0bfdf32f 2994 return ptid_of (current_thread);
bd99dc85
PA
2995}
2996
2997/* Get rid of any pending event in the pipe. */
2998static void
2999async_file_flush (void)
3000{
3001 int ret;
3002 char buf;
3003
3004 do
3005 ret = read (linux_event_pipe[0], &buf, 1);
3006 while (ret >= 0 || (ret == -1 && errno == EINTR));
3007}
3008
3009/* Put something in the pipe, so the event loop wakes up. */
3010static void
3011async_file_mark (void)
3012{
3013 int ret;
3014
3015 async_file_flush ();
3016
3017 do
3018 ret = write (linux_event_pipe[1], "+", 1);
3019 while (ret == 0 || (ret == -1 && errno == EINTR));
3020
3021 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3022 be awakened anyway. */
3023}
3024
95954743
PA
3025static ptid_t
3026linux_wait (ptid_t ptid,
3027 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 3028{
95954743 3029 ptid_t event_ptid;
bd99dc85 3030
bd99dc85
PA
3031 /* Flush the async file first. */
3032 if (target_is_async_p ())
3033 async_file_flush ();
3034
95954743 3035 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
bd99dc85
PA
3036
3037 /* If at least one stop was reported, there may be more. A single
3038 SIGCHLD can signal more than one child stop. */
3039 if (target_is_async_p ()
3040 && (target_options & TARGET_WNOHANG) != 0
95954743 3041 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
3042 async_file_mark ();
3043
3044 return event_ptid;
da6d8c04
DJ
3045}
3046
c5f62d5f 3047/* Send a signal to an LWP. */
fd500816
DJ
3048
3049static int
a1928bad 3050kill_lwp (unsigned long lwpid, int signo)
fd500816 3051{
c5f62d5f
DE
3052 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3053 fails, then we are not using nptl threads and we should be using kill. */
fd500816 3054
c5f62d5f
DE
3055#ifdef __NR_tkill
3056 {
3057 static int tkill_failed;
fd500816 3058
c5f62d5f
DE
3059 if (!tkill_failed)
3060 {
3061 int ret;
3062
3063 errno = 0;
3064 ret = syscall (__NR_tkill, lwpid, signo);
3065 if (errno != ENOSYS)
3066 return ret;
3067 tkill_failed = 1;
3068 }
3069 }
fd500816
DJ
3070#endif
3071
3072 return kill (lwpid, signo);
3073}
3074
964e4306
PA
3075void
3076linux_stop_lwp (struct lwp_info *lwp)
3077{
3078 send_sigstop (lwp);
3079}
3080
0d62e5e8 3081static void
02fc4de7 3082send_sigstop (struct lwp_info *lwp)
0d62e5e8 3083{
bd99dc85 3084 int pid;
0d62e5e8 3085
d86d4aaf 3086 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3087
0d62e5e8
DJ
3088 /* If we already have a pending stop signal for this process, don't
3089 send another. */
54a0b537 3090 if (lwp->stop_expected)
0d62e5e8 3091 {
ae13219e 3092 if (debug_threads)
87ce2a04 3093 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3094
0d62e5e8
DJ
3095 return;
3096 }
3097
3098 if (debug_threads)
87ce2a04 3099 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3100
d50171e4 3101 lwp->stop_expected = 1;
bd99dc85 3102 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3103}
3104
7984d532
PA
3105static int
3106send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7 3107{
d86d4aaf
DE
3108 struct thread_info *thread = (struct thread_info *) entry;
3109 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3110
7984d532
PA
3111 /* Ignore EXCEPT. */
3112 if (lwp == except)
3113 return 0;
3114
02fc4de7 3115 if (lwp->stopped)
7984d532 3116 return 0;
02fc4de7
PA
3117
3118 send_sigstop (lwp);
7984d532
PA
3119 return 0;
3120}
3121
3122/* Increment the suspend count of an LWP, and stop it, if not stopped
3123 yet. */
3124static int
3125suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3126 void *except)
3127{
d86d4aaf
DE
3128 struct thread_info *thread = (struct thread_info *) entry;
3129 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3130
3131 /* Ignore EXCEPT. */
3132 if (lwp == except)
3133 return 0;
3134
3135 lwp->suspended++;
3136
3137 return send_sigstop_callback (entry, except);
02fc4de7
PA
3138}
3139
95954743
PA
3140static void
3141mark_lwp_dead (struct lwp_info *lwp, int wstat)
3142{
3143 /* It's dead, really. */
3144 lwp->dead = 1;
3145
3146 /* Store the exit status for later. */
3147 lwp->status_pending_p = 1;
3148 lwp->status_pending = wstat;
3149
95954743
PA
3150 /* Prevent trying to stop it. */
3151 lwp->stopped = 1;
3152
3153 /* No further stops are expected from a dead lwp. */
3154 lwp->stop_expected = 0;
3155}
3156
fa96cb38
PA
3157/* Wait for all children to stop for the SIGSTOPs we just queued. */
3158
0d62e5e8 3159static void
fa96cb38 3160wait_for_sigstop (void)
0d62e5e8 3161{
0bfdf32f 3162 struct thread_info *saved_thread;
95954743 3163 ptid_t saved_tid;
fa96cb38
PA
3164 int wstat;
3165 int ret;
0d62e5e8 3166
0bfdf32f
GB
3167 saved_thread = current_thread;
3168 if (saved_thread != NULL)
3169 saved_tid = saved_thread->entry.id;
bd99dc85 3170 else
95954743 3171 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3172
d50171e4 3173 if (debug_threads)
fa96cb38 3174 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3175
fa96cb38
PA
3176 /* Passing NULL_PTID as filter indicates we want all events to be
3177 left pending. Eventually this returns when there are no
3178 unwaited-for children left. */
3179 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3180 &wstat, __WALL);
3181 gdb_assert (ret == -1);
0d62e5e8 3182
0bfdf32f
GB
3183 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3184 current_thread = saved_thread;
0d62e5e8
DJ
3185 else
3186 {
3187 if (debug_threads)
87ce2a04 3188 debug_printf ("Previously current thread died.\n");
0d62e5e8 3189
bd99dc85
PA
3190 if (non_stop)
3191 {
3192 /* We can't change the current inferior behind GDB's back,
3193 otherwise, a subsequent command may apply to the wrong
3194 process. */
0bfdf32f 3195 current_thread = NULL;
bd99dc85
PA
3196 }
3197 else
3198 {
3199 /* Set a valid thread as current. */
0bfdf32f 3200 set_desired_thread (0);
bd99dc85 3201 }
0d62e5e8
DJ
3202 }
3203}
3204
fa593d66
PA
3205/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3206 move it out, because we need to report the stop event to GDB. For
3207 example, if the user puts a breakpoint in the jump pad, it's
3208 because she wants to debug it. */
3209
3210static int
3211stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3212{
d86d4aaf
DE
3213 struct thread_info *thread = (struct thread_info *) entry;
3214 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3215
3216 gdb_assert (lwp->suspended == 0);
3217 gdb_assert (lwp->stopped);
3218
3219 /* Allow debugging the jump pad, gdb_collect, etc.. */
3220 return (supports_fast_tracepoints ()
58b4daa5 3221 && agent_loaded_p ()
fa593d66
PA
3222 && (gdb_breakpoint_here (lwp->stop_pc)
3223 || lwp->stopped_by_watchpoint
3224 || thread->last_resume_kind == resume_step)
3225 && linux_fast_tracepoint_collecting (lwp, NULL));
3226}
3227
3228static void
3229move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3230{
d86d4aaf
DE
3231 struct thread_info *thread = (struct thread_info *) entry;
3232 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3233 int *wstat;
3234
3235 gdb_assert (lwp->suspended == 0);
3236 gdb_assert (lwp->stopped);
3237
3238 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3239
3240 /* Allow debugging the jump pad, gdb_collect, etc. */
3241 if (!gdb_breakpoint_here (lwp->stop_pc)
3242 && !lwp->stopped_by_watchpoint
3243 && thread->last_resume_kind != resume_step
3244 && maybe_move_out_of_jump_pad (lwp, wstat))
3245 {
3246 if (debug_threads)
87ce2a04 3247 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 3248 lwpid_of (thread));
fa593d66
PA
3249
3250 if (wstat)
3251 {
3252 lwp->status_pending_p = 0;
3253 enqueue_one_deferred_signal (lwp, wstat);
3254
3255 if (debug_threads)
87ce2a04
DE
3256 debug_printf ("Signal %d for LWP %ld deferred "
3257 "(in jump pad)\n",
d86d4aaf 3258 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3259 }
3260
3261 linux_resume_one_lwp (lwp, 0, 0, NULL);
3262 }
3263 else
3264 lwp->suspended++;
3265}
3266
3267static int
3268lwp_running (struct inferior_list_entry *entry, void *data)
3269{
d86d4aaf
DE
3270 struct thread_info *thread = (struct thread_info *) entry;
3271 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3272
3273 if (lwp->dead)
3274 return 0;
3275 if (lwp->stopped)
3276 return 0;
3277 return 1;
3278}
3279
7984d532
PA
3280/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3281 If SUSPEND, then also increase the suspend count of every LWP,
3282 except EXCEPT. */
3283
0d62e5e8 3284static void
7984d532 3285stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8 3286{
bde24c0a
PA
3287 /* Should not be called recursively. */
3288 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3289
87ce2a04
DE
3290 if (debug_threads)
3291 {
3292 debug_enter ();
3293 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3294 suspend ? "stop-and-suspend" : "stop",
3295 except != NULL
d86d4aaf 3296 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
3297 : "none");
3298 }
3299
bde24c0a
PA
3300 stopping_threads = (suspend
3301 ? STOPPING_AND_SUSPENDING_THREADS
3302 : STOPPING_THREADS);
7984d532
PA
3303
3304 if (suspend)
d86d4aaf 3305 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
7984d532 3306 else
d86d4aaf 3307 find_inferior (&all_threads, send_sigstop_callback, except);
fa96cb38 3308 wait_for_sigstop ();
bde24c0a 3309 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
3310
3311 if (debug_threads)
3312 {
3313 debug_printf ("stop_all_lwps done, setting stopping_threads "
3314 "back to !stopping\n");
3315 debug_exit ();
3316 }
0d62e5e8
DJ
3317}
3318
da6d8c04
DJ
3319/* Resume execution of the inferior process.
3320 If STEP is nonzero, single-step it.
3321 If SIGNAL is nonzero, give it that signal. */
3322
ce3a066d 3323static void
2acc282a 3324linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 3325 int step, int signal, siginfo_t *info)
da6d8c04 3326{
d86d4aaf 3327 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 3328 struct thread_info *saved_thread;
fa593d66 3329 int fast_tp_collecting;
0d62e5e8 3330
54a0b537 3331 if (lwp->stopped == 0)
0d62e5e8
DJ
3332 return;
3333
fa593d66
PA
3334 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3335
3336 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3337
219f2f23
PA
3338 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3339 user used the "jump" command, or "set $pc = foo"). */
3340 if (lwp->stop_pc != get_pc (lwp))
3341 {
3342 /* Collecting 'while-stepping' actions doesn't make sense
3343 anymore. */
d86d4aaf 3344 release_while_stepping_state_list (thread);
219f2f23
PA
3345 }
3346
0d62e5e8
DJ
3347 /* If we have pending signals or status, and a new signal, enqueue the
3348 signal. Also enqueue the signal if we are waiting to reinsert a
3349 breakpoint; it will be picked up again below. */
3350 if (signal != 0
fa593d66
PA
3351 && (lwp->status_pending_p
3352 || lwp->pending_signals != NULL
3353 || lwp->bp_reinsert != 0
3354 || fast_tp_collecting))
0d62e5e8
DJ
3355 {
3356 struct pending_signals *p_sig;
bca929d3 3357 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 3358 p_sig->prev = lwp->pending_signals;
0d62e5e8 3359 p_sig->signal = signal;
32ca6d61
DJ
3360 if (info == NULL)
3361 memset (&p_sig->info, 0, sizeof (siginfo_t));
3362 else
3363 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 3364 lwp->pending_signals = p_sig;
0d62e5e8
DJ
3365 }
3366
d50171e4
PA
3367 if (lwp->status_pending_p)
3368 {
3369 if (debug_threads)
87ce2a04
DE
3370 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3371 " has pending status\n",
d86d4aaf 3372 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3373 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
3374 return;
3375 }
0d62e5e8 3376
0bfdf32f
GB
3377 saved_thread = current_thread;
3378 current_thread = thread;
0d62e5e8
DJ
3379
3380 if (debug_threads)
87ce2a04 3381 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
d86d4aaf 3382 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3383 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
3384
3385 /* This bit needs some thinking about. If we get a signal that
3386 we must report while a single-step reinsert is still pending,
3387 we often end up resuming the thread. It might be better to
3388 (ew) allow a stack of pending events; then we could be sure that
3389 the reinsert happened right away and not lose any signals.
3390
3391 Making this stack would also shrink the window in which breakpoints are
54a0b537 3392 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
3393 complete correctness, so it won't solve that problem. It may be
3394 worthwhile just to solve this one, however. */
54a0b537 3395 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
3396 {
3397 if (debug_threads)
87ce2a04
DE
3398 debug_printf (" pending reinsert at 0x%s\n",
3399 paddress (lwp->bp_reinsert));
d50171e4 3400
85e00e85 3401 if (can_hardware_single_step ())
d50171e4 3402 {
fa593d66
PA
3403 if (fast_tp_collecting == 0)
3404 {
3405 if (step == 0)
3406 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3407 if (lwp->suspended)
3408 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3409 lwp->suspended);
3410 }
d50171e4
PA
3411
3412 step = 1;
3413 }
0d62e5e8
DJ
3414
3415 /* Postpone any pending signal. It was enqueued above. */
3416 signal = 0;
3417 }
3418
fa593d66
PA
3419 if (fast_tp_collecting == 1)
3420 {
3421 if (debug_threads)
87ce2a04
DE
3422 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3423 " (exit-jump-pad-bkpt)\n",
d86d4aaf 3424 lwpid_of (thread));
fa593d66
PA
3425
3426 /* Postpone any pending signal. It was enqueued above. */
3427 signal = 0;
3428 }
3429 else if (fast_tp_collecting == 2)
3430 {
3431 if (debug_threads)
87ce2a04
DE
3432 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3433 " single-stepping\n",
d86d4aaf 3434 lwpid_of (thread));
fa593d66
PA
3435
3436 if (can_hardware_single_step ())
3437 step = 1;
3438 else
38e08fca
GB
3439 {
3440 internal_error (__FILE__, __LINE__,
3441 "moving out of jump pad single-stepping"
3442 " not implemented on this target");
3443 }
fa593d66
PA
3444
3445 /* Postpone any pending signal. It was enqueued above. */
3446 signal = 0;
3447 }
3448
219f2f23
PA
3449 /* If we have while-stepping actions in this thread set it stepping.
3450 If we have a signal to deliver, it may or may not be set to
3451 SIG_IGN, we don't know. Assume so, and allow collecting
3452 while-stepping into a signal handler. A possible smart thing to
3453 do would be to set an internal breakpoint at the signal return
3454 address, continue, and carry on catching this while-stepping
3455 action only when that breakpoint is hit. A future
3456 enhancement. */
d86d4aaf 3457 if (thread->while_stepping != NULL
219f2f23
PA
3458 && can_hardware_single_step ())
3459 {
3460 if (debug_threads)
87ce2a04 3461 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 3462 lwpid_of (thread));
219f2f23
PA
3463 step = 1;
3464 }
3465
aa691b87 3466 if (debug_threads && the_low_target.get_pc != NULL)
0d62e5e8 3467 {
0bfdf32f 3468 struct regcache *regcache = get_thread_regcache (current_thread, 1);
442ea881 3469 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
87ce2a04 3470 debug_printf (" resuming from pc 0x%lx\n", (long) pc);
0d62e5e8
DJ
3471 }
3472
fa593d66
PA
3473 /* If we have pending signals, consume one unless we are trying to
3474 reinsert a breakpoint or we're trying to finish a fast tracepoint
3475 collect. */
3476 if (lwp->pending_signals != NULL
3477 && lwp->bp_reinsert == 0
3478 && fast_tp_collecting == 0)
0d62e5e8
DJ
3479 {
3480 struct pending_signals **p_sig;
3481
54a0b537 3482 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
3483 while ((*p_sig)->prev != NULL)
3484 p_sig = &(*p_sig)->prev;
3485
3486 signal = (*p_sig)->signal;
32ca6d61 3487 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 3488 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 3489 &(*p_sig)->info);
32ca6d61 3490
0d62e5e8
DJ
3491 free (*p_sig);
3492 *p_sig = NULL;
3493 }
3494
aa5ca48f
DE
3495 if (the_low_target.prepare_to_resume != NULL)
3496 the_low_target.prepare_to_resume (lwp);
3497
d86d4aaf 3498 regcache_invalidate_thread (thread);
da6d8c04 3499 errno = 0;
54a0b537 3500 lwp->stopped = 0;
c3adc08c 3501 lwp->stopped_by_watchpoint = 0;
54a0b537 3502 lwp->stepping = step;
d86d4aaf 3503 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
b8e1b30e 3504 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
3505 /* Coerce to a uintptr_t first to avoid potential gcc warning
3506 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 3507 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 3508
0bfdf32f 3509 current_thread = saved_thread;
da6d8c04 3510 if (errno)
3221518c
UW
3511 {
3512 /* ESRCH from ptrace either means that the thread was already
3513 running (an error) or that it is gone (a race condition). If
3514 it's gone, we will get a notification the next time we wait,
3515 so we can ignore the error. We could differentiate these
3516 two, but it's tricky without waiting; the thread still exists
3517 as a zombie, so sending it signal 0 would succeed. So just
3518 ignore ESRCH. */
3519 if (errno == ESRCH)
3520 return;
3521
3522 perror_with_name ("ptrace");
3523 }
da6d8c04
DJ
3524}
3525
2bd7c093
PA
3526struct thread_resume_array
3527{
3528 struct thread_resume *resume;
3529 size_t n;
3530};
64386c31 3531
ebcf782c
DE
3532/* This function is called once per thread via find_inferior.
3533 ARG is a pointer to a thread_resume_array struct.
3534 We look up the thread specified by ENTRY in ARG, and mark the thread
3535 with a pointer to the appropriate resume request.
5544ad89
DJ
3536
3537 This algorithm is O(threads * resume elements), but resume elements
3538 is small (and will remain small at least until GDB supports thread
3539 suspension). */
ebcf782c 3540
2bd7c093
PA
3541static int
3542linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 3543{
d86d4aaf
DE
3544 struct thread_info *thread = (struct thread_info *) entry;
3545 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 3546 int ndx;
2bd7c093 3547 struct thread_resume_array *r;
64386c31 3548
2bd7c093 3549 r = arg;
64386c31 3550
2bd7c093 3551 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
3552 {
3553 ptid_t ptid = r->resume[ndx].thread;
3554 if (ptid_equal (ptid, minus_one_ptid)
3555 || ptid_equal (ptid, entry->id)
0c9070b3
YQ
3556 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3557 of PID'. */
d86d4aaf 3558 || (ptid_get_pid (ptid) == pid_of (thread)
0c9070b3
YQ
3559 && (ptid_is_pid (ptid)
3560 || ptid_get_lwp (ptid) == -1)))
95954743 3561 {
d50171e4 3562 if (r->resume[ndx].kind == resume_stop
8336d594 3563 && thread->last_resume_kind == resume_stop)
d50171e4
PA
3564 {
3565 if (debug_threads)
87ce2a04
DE
3566 debug_printf ("already %s LWP %ld at GDB's request\n",
3567 (thread->last_status.kind
3568 == TARGET_WAITKIND_STOPPED)
3569 ? "stopped"
3570 : "stopping",
d86d4aaf 3571 lwpid_of (thread));
d50171e4
PA
3572
3573 continue;
3574 }
3575
95954743 3576 lwp->resume = &r->resume[ndx];
8336d594 3577 thread->last_resume_kind = lwp->resume->kind;
fa593d66 3578
c2d6af84
PA
3579 lwp->step_range_start = lwp->resume->step_range_start;
3580 lwp->step_range_end = lwp->resume->step_range_end;
3581
fa593d66
PA
3582 /* If we had a deferred signal to report, dequeue one now.
3583 This can happen if LWP gets more than one signal while
3584 trying to get out of a jump pad. */
3585 if (lwp->stopped
3586 && !lwp->status_pending_p
3587 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3588 {
3589 lwp->status_pending_p = 1;
3590
3591 if (debug_threads)
87ce2a04
DE
3592 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3593 "leaving status pending.\n",
d86d4aaf
DE
3594 WSTOPSIG (lwp->status_pending),
3595 lwpid_of (thread));
fa593d66
PA
3596 }
3597
95954743
PA
3598 return 0;
3599 }
3600 }
2bd7c093
PA
3601
3602 /* No resume action for this thread. */
3603 lwp->resume = NULL;
64386c31 3604
2bd7c093 3605 return 0;
5544ad89
DJ
3606}
3607
20ad9378
DE
3608/* find_inferior callback for linux_resume.
3609 Set *FLAG_P if this lwp has an interesting status pending. */
5544ad89 3610
bd99dc85
PA
3611static int
3612resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 3613{
d86d4aaf
DE
3614 struct thread_info *thread = (struct thread_info *) entry;
3615 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 3616
bd99dc85
PA
3617 /* LWPs which will not be resumed are not interesting, because
3618 we might not wait for them next time through linux_wait. */
2bd7c093 3619 if (lwp->resume == NULL)
bd99dc85 3620 return 0;
64386c31 3621
bd99dc85 3622 if (lwp->status_pending_p)
d50171e4
PA
3623 * (int *) flag_p = 1;
3624
3625 return 0;
3626}
3627
3628/* Return 1 if this lwp that GDB wants running is stopped at an
3629 internal breakpoint that we need to step over. It assumes that any
3630 required STOP_PC adjustment has already been propagated to the
3631 inferior's regcache. */
3632
3633static int
3634need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3635{
d86d4aaf
DE
3636 struct thread_info *thread = (struct thread_info *) entry;
3637 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 3638 struct thread_info *saved_thread;
d50171e4
PA
3639 CORE_ADDR pc;
3640
3641 /* LWPs which will not be resumed are not interesting, because we
3642 might not wait for them next time through linux_wait. */
3643
3644 if (!lwp->stopped)
3645 {
3646 if (debug_threads)
87ce2a04 3647 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 3648 lwpid_of (thread));
d50171e4
PA
3649 return 0;
3650 }
3651
8336d594 3652 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
3653 {
3654 if (debug_threads)
87ce2a04
DE
3655 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3656 " stopped\n",
d86d4aaf 3657 lwpid_of (thread));
d50171e4
PA
3658 return 0;
3659 }
3660
7984d532
PA
3661 gdb_assert (lwp->suspended >= 0);
3662
3663 if (lwp->suspended)
3664 {
3665 if (debug_threads)
87ce2a04 3666 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 3667 lwpid_of (thread));
7984d532
PA
3668 return 0;
3669 }
3670
d50171e4
PA
3671 if (!lwp->need_step_over)
3672 {
3673 if (debug_threads)
d86d4aaf 3674 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
d50171e4 3675 }
5544ad89 3676
bd99dc85 3677 if (lwp->status_pending_p)
d50171e4
PA
3678 {
3679 if (debug_threads)
87ce2a04
DE
3680 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3681 " status.\n",
d86d4aaf 3682 lwpid_of (thread));
d50171e4
PA
3683 return 0;
3684 }
3685
3686 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3687 or we have. */
3688 pc = get_pc (lwp);
3689
3690 /* If the PC has changed since we stopped, then don't do anything,
3691 and let the breakpoint/tracepoint be hit. This happens if, for
3692 instance, GDB handled the decr_pc_after_break subtraction itself,
3693 GDB is OOL stepping this thread, or the user has issued a "jump"
3694 command, or poked thread's registers herself. */
3695 if (pc != lwp->stop_pc)
3696 {
3697 if (debug_threads)
87ce2a04
DE
3698 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3699 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
3700 lwpid_of (thread),
3701 paddress (lwp->stop_pc), paddress (pc));
d50171e4
PA
3702
3703 lwp->need_step_over = 0;
3704 return 0;
3705 }
3706
0bfdf32f
GB
3707 saved_thread = current_thread;
3708 current_thread = thread;
d50171e4 3709
8b07ae33 3710 /* We can only step over breakpoints we know about. */
fa593d66 3711 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 3712 {
8b07ae33 3713 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
3714 though. If the condition is being evaluated on the target's side
3715 and it evaluate to false, step over this breakpoint as well. */
3716 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
3717 && gdb_condition_true_at_breakpoint (pc)
3718 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
3719 {
3720 if (debug_threads)
87ce2a04
DE
3721 debug_printf ("Need step over [LWP %ld]? yes, but found"
3722 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 3723 lwpid_of (thread), paddress (pc));
d50171e4 3724
0bfdf32f 3725 current_thread = saved_thread;
8b07ae33
PA
3726 return 0;
3727 }
3728 else
3729 {
3730 if (debug_threads)
87ce2a04
DE
3731 debug_printf ("Need step over [LWP %ld]? yes, "
3732 "found breakpoint at 0x%s\n",
d86d4aaf 3733 lwpid_of (thread), paddress (pc));
d50171e4 3734
8b07ae33
PA
3735 /* We've found an lwp that needs stepping over --- return 1 so
3736 that find_inferior stops looking. */
0bfdf32f 3737 current_thread = saved_thread;
8b07ae33
PA
3738
3739 /* If the step over is cancelled, this is set again. */
3740 lwp->need_step_over = 0;
3741 return 1;
3742 }
d50171e4
PA
3743 }
3744
0bfdf32f 3745 current_thread = saved_thread;
d50171e4
PA
3746
3747 if (debug_threads)
87ce2a04
DE
3748 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3749 " at 0x%s\n",
d86d4aaf 3750 lwpid_of (thread), paddress (pc));
c6ecbae5 3751
bd99dc85 3752 return 0;
5544ad89
DJ
3753}
3754
d50171e4
PA
3755/* Start a step-over operation on LWP. When LWP stopped at a
3756 breakpoint, to make progress, we need to remove the breakpoint out
3757 of the way. If we let other threads run while we do that, they may
3758 pass by the breakpoint location and miss hitting it. To avoid
3759 that, a step-over momentarily stops all threads while LWP is
3760 single-stepped while the breakpoint is temporarily uninserted from
3761 the inferior. When the single-step finishes, we reinsert the
3762 breakpoint, and let all threads that are supposed to be running,
3763 run again.
3764
3765 On targets that don't support hardware single-step, we don't
3766 currently support full software single-stepping. Instead, we only
3767 support stepping over the thread event breakpoint, by asking the
3768 low target where to place a reinsert breakpoint. Since this
3769 routine assumes the breakpoint being stepped over is a thread event
3770 breakpoint, it usually assumes the return address of the current
3771 function is a good enough place to set the reinsert breakpoint. */
3772
3773static int
3774start_step_over (struct lwp_info *lwp)
3775{
d86d4aaf 3776 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 3777 struct thread_info *saved_thread;
d50171e4
PA
3778 CORE_ADDR pc;
3779 int step;
3780
3781 if (debug_threads)
87ce2a04 3782 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 3783 lwpid_of (thread));
d50171e4 3784
7984d532
PA
3785 stop_all_lwps (1, lwp);
3786 gdb_assert (lwp->suspended == 0);
d50171e4
PA
3787
3788 if (debug_threads)
87ce2a04 3789 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
3790
3791 /* Note, we should always reach here with an already adjusted PC,
3792 either by GDB (if we're resuming due to GDB's request), or by our
3793 caller, if we just finished handling an internal breakpoint GDB
3794 shouldn't care about. */
3795 pc = get_pc (lwp);
3796
0bfdf32f
GB
3797 saved_thread = current_thread;
3798 current_thread = thread;
d50171e4
PA
3799
3800 lwp->bp_reinsert = pc;
3801 uninsert_breakpoints_at (pc);
fa593d66 3802 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
3803
3804 if (can_hardware_single_step ())
3805 {
3806 step = 1;
3807 }
3808 else
3809 {
3810 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3811 set_reinsert_breakpoint (raddr);
3812 step = 0;
3813 }
3814
0bfdf32f 3815 current_thread = saved_thread;
d50171e4
PA
3816
3817 linux_resume_one_lwp (lwp, step, 0, NULL);
3818
3819 /* Require next event from this LWP. */
d86d4aaf 3820 step_over_bkpt = thread->entry.id;
d50171e4
PA
3821 return 1;
3822}
3823
3824/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3825 start_step_over, if still there, and delete any reinsert
3826 breakpoints we've set, on non hardware single-step targets. */
3827
3828static int
3829finish_step_over (struct lwp_info *lwp)
3830{
3831 if (lwp->bp_reinsert != 0)
3832 {
3833 if (debug_threads)
87ce2a04 3834 debug_printf ("Finished step over.\n");
d50171e4
PA
3835
3836 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3837 may be no breakpoint to reinsert there by now. */
3838 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 3839 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
3840
3841 lwp->bp_reinsert = 0;
3842
3843 /* Delete any software-single-step reinsert breakpoints. No
3844 longer needed. We don't have to worry about other threads
3845 hitting this trap, and later not being able to explain it,
3846 because we were stepping over a breakpoint, and we hold all
3847 threads but LWP stopped while doing that. */
3848 if (!can_hardware_single_step ())
3849 delete_reinsert_breakpoints ();
3850
3851 step_over_bkpt = null_ptid;
3852 return 1;
3853 }
3854 else
3855 return 0;
3856}
3857
5544ad89
DJ
3858/* This function is called once per thread. We check the thread's resume
3859 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 3860 stopped; and what signal, if any, it should be sent.
5544ad89 3861
bd99dc85
PA
3862 For threads which we aren't explicitly told otherwise, we preserve
3863 the stepping flag; this is used for stepping over gdbserver-placed
3864 breakpoints.
3865
3866 If pending_flags was set in any thread, we queue any needed
3867 signals, since we won't actually resume. We already have a pending
3868 event to report, so we don't need to preserve any step requests;
3869 they should be re-issued if necessary. */
3870
3871static int
3872linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 3873{
d86d4aaf
DE
3874 struct thread_info *thread = (struct thread_info *) entry;
3875 struct lwp_info *lwp = get_thread_lwp (thread);
bd99dc85 3876 int step;
d50171e4
PA
3877 int leave_all_stopped = * (int *) arg;
3878 int leave_pending;
5544ad89 3879
2bd7c093 3880 if (lwp->resume == NULL)
bd99dc85 3881 return 0;
5544ad89 3882
bd99dc85 3883 if (lwp->resume->kind == resume_stop)
5544ad89 3884 {
bd99dc85 3885 if (debug_threads)
d86d4aaf 3886 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
3887
3888 if (!lwp->stopped)
3889 {
3890 if (debug_threads)
d86d4aaf 3891 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 3892
d50171e4
PA
3893 /* Stop the thread, and wait for the event asynchronously,
3894 through the event loop. */
02fc4de7 3895 send_sigstop (lwp);
bd99dc85
PA
3896 }
3897 else
3898 {
3899 if (debug_threads)
87ce2a04 3900 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 3901 lwpid_of (thread));
d50171e4
PA
3902
3903 /* The LWP may have been stopped in an internal event that
3904 was not meant to be notified back to GDB (e.g., gdbserver
3905 breakpoint), so we should be reporting a stop event in
3906 this case too. */
3907
3908 /* If the thread already has a pending SIGSTOP, this is a
3909 no-op. Otherwise, something later will presumably resume
3910 the thread and this will cause it to cancel any pending
3911 operation, due to last_resume_kind == resume_stop. If
3912 the thread already has a pending status to report, we
3913 will still report it the next time we wait - see
3914 status_pending_p_callback. */
1a981360
PA
3915
3916 /* If we already have a pending signal to report, then
3917 there's no need to queue a SIGSTOP, as this means we're
3918 midway through moving the LWP out of the jumppad, and we
3919 will report the pending signal as soon as that is
3920 finished. */
3921 if (lwp->pending_signals_to_report == NULL)
3922 send_sigstop (lwp);
bd99dc85 3923 }
32ca6d61 3924
bd99dc85
PA
3925 /* For stop requests, we're done. */
3926 lwp->resume = NULL;
fc7238bb 3927 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3928 return 0;
5544ad89
DJ
3929 }
3930
bd99dc85
PA
3931 /* If this thread which is about to be resumed has a pending status,
3932 then don't resume any threads - we can just report the pending
3933 status. Make sure to queue any signals that would otherwise be
3934 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
3935 thread has a pending status. If there's a thread that needs the
3936 step-over-breakpoint dance, then don't resume any other thread
3937 but that particular one. */
3938 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 3939
d50171e4 3940 if (!leave_pending)
bd99dc85
PA
3941 {
3942 if (debug_threads)
d86d4aaf 3943 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 3944
d50171e4 3945 step = (lwp->resume->kind == resume_step);
2acc282a 3946 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
3947 }
3948 else
3949 {
3950 if (debug_threads)
d86d4aaf 3951 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5544ad89 3952
bd99dc85
PA
3953 /* If we have a new signal, enqueue the signal. */
3954 if (lwp->resume->sig != 0)
3955 {
3956 struct pending_signals *p_sig;
3957 p_sig = xmalloc (sizeof (*p_sig));
3958 p_sig->prev = lwp->pending_signals;
3959 p_sig->signal = lwp->resume->sig;
3960 memset (&p_sig->info, 0, sizeof (siginfo_t));
3961
3962 /* If this is the same signal we were previously stopped by,
3963 make sure to queue its siginfo. We can ignore the return
3964 value of ptrace; if it fails, we'll skip
3965 PTRACE_SETSIGINFO. */
3966 if (WIFSTOPPED (lwp->last_status)
3967 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
d86d4aaf 3968 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 3969 &p_sig->info);
bd99dc85
PA
3970
3971 lwp->pending_signals = p_sig;
3972 }
3973 }
5544ad89 3974
fc7238bb 3975 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3976 lwp->resume = NULL;
5544ad89 3977 return 0;
0d62e5e8
DJ
3978}
3979
3980static void
2bd7c093 3981linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 3982{
2bd7c093 3983 struct thread_resume_array array = { resume_info, n };
d86d4aaf 3984 struct thread_info *need_step_over = NULL;
d50171e4
PA
3985 int any_pending;
3986 int leave_all_stopped;
c6ecbae5 3987
87ce2a04
DE
3988 if (debug_threads)
3989 {
3990 debug_enter ();
3991 debug_printf ("linux_resume:\n");
3992 }
3993
2bd7c093 3994 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 3995
d50171e4
PA
3996 /* If there is a thread which would otherwise be resumed, which has
3997 a pending status, then don't resume any threads - we can just
3998 report the pending status. Make sure to queue any signals that
3999 would otherwise be sent. In non-stop mode, we'll apply this
4000 logic to each thread individually. We consume all pending events
4001 before considering to start a step-over (in all-stop). */
4002 any_pending = 0;
bd99dc85 4003 if (!non_stop)
d86d4aaf 4004 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
d50171e4
PA
4005
4006 /* If there is a thread which would otherwise be resumed, which is
4007 stopped at a breakpoint that needs stepping over, then don't
4008 resume any threads - have it step over the breakpoint with all
4009 other threads stopped, then resume all threads again. Make sure
4010 to queue any signals that would otherwise be delivered or
4011 queued. */
4012 if (!any_pending && supports_breakpoints ())
4013 need_step_over
d86d4aaf
DE
4014 = (struct thread_info *) find_inferior (&all_threads,
4015 need_step_over_p, NULL);
d50171e4
PA
4016
4017 leave_all_stopped = (need_step_over != NULL || any_pending);
4018
4019 if (debug_threads)
4020 {
4021 if (need_step_over != NULL)
87ce2a04 4022 debug_printf ("Not resuming all, need step over\n");
d50171e4 4023 else if (any_pending)
87ce2a04
DE
4024 debug_printf ("Not resuming, all-stop and found "
4025 "an LWP with pending status\n");
d50171e4 4026 else
87ce2a04 4027 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4028 }
4029
4030 /* Even if we're leaving threads stopped, queue all signals we'd
4031 otherwise deliver. */
4032 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4033
4034 if (need_step_over)
d86d4aaf 4035 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4036
4037 if (debug_threads)
4038 {
4039 debug_printf ("linux_resume done\n");
4040 debug_exit ();
4041 }
d50171e4
PA
4042}
4043
4044/* This function is called once per thread. We check the thread's
4045 last resume request, which will tell us whether to resume, step, or
4046 leave the thread stopped. Any signal the client requested to be
4047 delivered has already been enqueued at this point.
4048
4049 If any thread that GDB wants running is stopped at an internal
4050 breakpoint that needs stepping over, we start a step-over operation
4051 on that particular thread, and leave all others stopped. */
4052
7984d532
PA
4053static int
4054proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 4055{
d86d4aaf
DE
4056 struct thread_info *thread = (struct thread_info *) entry;
4057 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4058 int step;
4059
7984d532
PA
4060 if (lwp == except)
4061 return 0;
d50171e4
PA
4062
4063 if (debug_threads)
d86d4aaf 4064 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
4065
4066 if (!lwp->stopped)
4067 {
4068 if (debug_threads)
d86d4aaf 4069 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
7984d532 4070 return 0;
d50171e4
PA
4071 }
4072
02fc4de7
PA
4073 if (thread->last_resume_kind == resume_stop
4074 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
4075 {
4076 if (debug_threads)
87ce2a04 4077 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 4078 lwpid_of (thread));
7984d532 4079 return 0;
d50171e4
PA
4080 }
4081
4082 if (lwp->status_pending_p)
4083 {
4084 if (debug_threads)
87ce2a04 4085 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 4086 lwpid_of (thread));
7984d532 4087 return 0;
d50171e4
PA
4088 }
4089
7984d532
PA
4090 gdb_assert (lwp->suspended >= 0);
4091
d50171e4
PA
4092 if (lwp->suspended)
4093 {
4094 if (debug_threads)
d86d4aaf 4095 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
7984d532 4096 return 0;
d50171e4
PA
4097 }
4098
1a981360
PA
4099 if (thread->last_resume_kind == resume_stop
4100 && lwp->pending_signals_to_report == NULL
4101 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
4102 {
4103 /* We haven't reported this LWP as stopped yet (otherwise, the
4104 last_status.kind check above would catch it, and we wouldn't
4105 reach here. This LWP may have been momentarily paused by a
4106 stop_all_lwps call while handling for example, another LWP's
4107 step-over. In that case, the pending expected SIGSTOP signal
4108 that was queued at vCont;t handling time will have already
4109 been consumed by wait_for_sigstop, and so we need to requeue
4110 another one here. Note that if the LWP already has a SIGSTOP
4111 pending, this is a no-op. */
4112
4113 if (debug_threads)
87ce2a04
DE
4114 debug_printf ("Client wants LWP %ld to stop. "
4115 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 4116 lwpid_of (thread));
02fc4de7
PA
4117
4118 send_sigstop (lwp);
4119 }
4120
8336d594 4121 step = thread->last_resume_kind == resume_step;
d50171e4 4122 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4123 return 0;
4124}
4125
4126static int
4127unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4128{
d86d4aaf
DE
4129 struct thread_info *thread = (struct thread_info *) entry;
4130 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4131
4132 if (lwp == except)
4133 return 0;
4134
4135 lwp->suspended--;
4136 gdb_assert (lwp->suspended >= 0);
4137
4138 return proceed_one_lwp (entry, except);
d50171e4
PA
4139}
4140
4141/* When we finish a step-over, set threads running again. If there's
4142 another thread that may need a step-over, now's the time to start
4143 it. Eventually, we'll move all threads past their breakpoints. */
4144
4145static void
4146proceed_all_lwps (void)
4147{
d86d4aaf 4148 struct thread_info *need_step_over;
d50171e4
PA
4149
4150 /* If there is a thread which would otherwise be resumed, which is
4151 stopped at a breakpoint that needs stepping over, then don't
4152 resume any threads - have it step over the breakpoint with all
4153 other threads stopped, then resume all threads again. */
4154
4155 if (supports_breakpoints ())
4156 {
4157 need_step_over
d86d4aaf
DE
4158 = (struct thread_info *) find_inferior (&all_threads,
4159 need_step_over_p, NULL);
d50171e4
PA
4160
4161 if (need_step_over != NULL)
4162 {
4163 if (debug_threads)
87ce2a04
DE
4164 debug_printf ("proceed_all_lwps: found "
4165 "thread %ld needing a step-over\n",
4166 lwpid_of (need_step_over));
d50171e4 4167
d86d4aaf 4168 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4169 return;
4170 }
4171 }
5544ad89 4172
d50171e4 4173 if (debug_threads)
87ce2a04 4174 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 4175
d86d4aaf 4176 find_inferior (&all_threads, proceed_one_lwp, NULL);
d50171e4
PA
4177}
4178
4179/* Stopped LWPs that the client wanted to be running, that don't have
4180 pending statuses, are set to run again, except for EXCEPT, if not
4181 NULL. This undoes a stop_all_lwps call. */
4182
4183static void
7984d532 4184unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 4185{
5544ad89
DJ
4186 if (debug_threads)
4187 {
87ce2a04 4188 debug_enter ();
d50171e4 4189 if (except)
87ce2a04 4190 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 4191 lwpid_of (get_lwp_thread (except)));
5544ad89 4192 else
87ce2a04 4193 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
4194 }
4195
7984d532 4196 if (unsuspend)
d86d4aaf 4197 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
7984d532 4198 else
d86d4aaf 4199 find_inferior (&all_threads, proceed_one_lwp, except);
87ce2a04
DE
4200
4201 if (debug_threads)
4202 {
4203 debug_printf ("unstop_all_lwps done\n");
4204 debug_exit ();
4205 }
0d62e5e8
DJ
4206}
4207
58caa3dc
DJ
4208
4209#ifdef HAVE_LINUX_REGSETS
4210
1faeff08
MR
4211#define use_linux_regsets 1
4212
030031ee
PA
4213/* Returns true if REGSET has been disabled. */
4214
4215static int
4216regset_disabled (struct regsets_info *info, struct regset_info *regset)
4217{
4218 return (info->disabled_regsets != NULL
4219 && info->disabled_regsets[regset - info->regsets]);
4220}
4221
4222/* Disable REGSET. */
4223
4224static void
4225disable_regset (struct regsets_info *info, struct regset_info *regset)
4226{
4227 int dr_offset;
4228
4229 dr_offset = regset - info->regsets;
4230 if (info->disabled_regsets == NULL)
4231 info->disabled_regsets = xcalloc (1, info->num_regsets);
4232 info->disabled_regsets[dr_offset] = 1;
4233}
4234
58caa3dc 4235static int
3aee8918
PA
4236regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4237 struct regcache *regcache)
58caa3dc
DJ
4238{
4239 struct regset_info *regset;
e9d25b98 4240 int saw_general_regs = 0;
95954743 4241 int pid;
1570b33e 4242 struct iovec iov;
58caa3dc 4243
3aee8918 4244 regset = regsets_info->regsets;
58caa3dc 4245
0bfdf32f 4246 pid = lwpid_of (current_thread);
58caa3dc
DJ
4247 while (regset->size >= 0)
4248 {
1570b33e
L
4249 void *buf, *data;
4250 int nt_type, res;
58caa3dc 4251
030031ee 4252 if (regset->size == 0 || regset_disabled (regsets_info, regset))
58caa3dc
DJ
4253 {
4254 regset ++;
4255 continue;
4256 }
4257
bca929d3 4258 buf = xmalloc (regset->size);
1570b33e
L
4259
4260 nt_type = regset->nt_type;
4261 if (nt_type)
4262 {
4263 iov.iov_base = buf;
4264 iov.iov_len = regset->size;
4265 data = (void *) &iov;
4266 }
4267 else
4268 data = buf;
4269
dfb64f85 4270#ifndef __sparc__
f15f9948 4271 res = ptrace (regset->get_request, pid,
b8e1b30e 4272 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4273#else
1570b33e 4274 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4275#endif
58caa3dc
DJ
4276 if (res < 0)
4277 {
4278 if (errno == EIO)
4279 {
52fa2412 4280 /* If we get EIO on a regset, do not try it again for
3aee8918 4281 this process mode. */
030031ee 4282 disable_regset (regsets_info, regset);
fdeb2a12 4283 free (buf);
52fa2412 4284 continue;
58caa3dc
DJ
4285 }
4286 else
4287 {
0d62e5e8 4288 char s[256];
95954743
PA
4289 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4290 pid);
0d62e5e8 4291 perror (s);
58caa3dc
DJ
4292 }
4293 }
e9d25b98
DJ
4294 else if (regset->type == GENERAL_REGS)
4295 saw_general_regs = 1;
442ea881 4296 regset->store_function (regcache, buf);
58caa3dc 4297 regset ++;
fdeb2a12 4298 free (buf);
58caa3dc 4299 }
e9d25b98
DJ
4300 if (saw_general_regs)
4301 return 0;
4302 else
4303 return 1;
58caa3dc
DJ
4304}
4305
4306static int
3aee8918
PA
4307regsets_store_inferior_registers (struct regsets_info *regsets_info,
4308 struct regcache *regcache)
58caa3dc
DJ
4309{
4310 struct regset_info *regset;
e9d25b98 4311 int saw_general_regs = 0;
95954743 4312 int pid;
1570b33e 4313 struct iovec iov;
58caa3dc 4314
3aee8918 4315 regset = regsets_info->regsets;
58caa3dc 4316
0bfdf32f 4317 pid = lwpid_of (current_thread);
58caa3dc
DJ
4318 while (regset->size >= 0)
4319 {
1570b33e
L
4320 void *buf, *data;
4321 int nt_type, res;
58caa3dc 4322
030031ee 4323 if (regset->size == 0 || regset_disabled (regsets_info, regset))
58caa3dc
DJ
4324 {
4325 regset ++;
4326 continue;
4327 }
4328
bca929d3 4329 buf = xmalloc (regset->size);
545587ee
DJ
4330
4331 /* First fill the buffer with the current register set contents,
4332 in case there are any items in the kernel's regset that are
4333 not in gdbserver's regcache. */
1570b33e
L
4334
4335 nt_type = regset->nt_type;
4336 if (nt_type)
4337 {
4338 iov.iov_base = buf;
4339 iov.iov_len = regset->size;
4340 data = (void *) &iov;
4341 }
4342 else
4343 data = buf;
4344
dfb64f85 4345#ifndef __sparc__
f15f9948 4346 res = ptrace (regset->get_request, pid,
b8e1b30e 4347 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4348#else
689cc2ae 4349 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4350#endif
545587ee
DJ
4351
4352 if (res == 0)
4353 {
4354 /* Then overlay our cached registers on that. */
442ea881 4355 regset->fill_function (regcache, buf);
545587ee
DJ
4356
4357 /* Only now do we write the register set. */
dfb64f85 4358#ifndef __sparc__
f15f9948 4359 res = ptrace (regset->set_request, pid,
b8e1b30e 4360 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4361#else
1570b33e 4362 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 4363#endif
545587ee
DJ
4364 }
4365
58caa3dc
DJ
4366 if (res < 0)
4367 {
4368 if (errno == EIO)
4369 {
52fa2412 4370 /* If we get EIO on a regset, do not try it again for
3aee8918 4371 this process mode. */
030031ee 4372 disable_regset (regsets_info, regset);
fdeb2a12 4373 free (buf);
52fa2412 4374 continue;
58caa3dc 4375 }
3221518c
UW
4376 else if (errno == ESRCH)
4377 {
1b3f6016
PA
4378 /* At this point, ESRCH should mean the process is
4379 already gone, in which case we simply ignore attempts
4380 to change its registers. See also the related
4381 comment in linux_resume_one_lwp. */
fdeb2a12 4382 free (buf);
3221518c
UW
4383 return 0;
4384 }
58caa3dc
DJ
4385 else
4386 {
ce3a066d 4387 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
4388 }
4389 }
e9d25b98
DJ
4390 else if (regset->type == GENERAL_REGS)
4391 saw_general_regs = 1;
58caa3dc 4392 regset ++;
09ec9b38 4393 free (buf);
58caa3dc 4394 }
e9d25b98
DJ
4395 if (saw_general_regs)
4396 return 0;
4397 else
4398 return 1;
58caa3dc
DJ
4399}
4400
1faeff08 4401#else /* !HAVE_LINUX_REGSETS */
58caa3dc 4402
1faeff08 4403#define use_linux_regsets 0
3aee8918
PA
4404#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4405#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 4406
58caa3dc 4407#endif
1faeff08
MR
4408
4409/* Return 1 if register REGNO is supported by one of the regset ptrace
4410 calls or 0 if it has to be transferred individually. */
4411
4412static int
3aee8918 4413linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
4414{
4415 unsigned char mask = 1 << (regno % 8);
4416 size_t index = regno / 8;
4417
4418 return (use_linux_regsets
3aee8918
PA
4419 && (regs_info->regset_bitmap == NULL
4420 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
4421}
4422
58caa3dc 4423#ifdef HAVE_LINUX_USRREGS
1faeff08
MR
4424
4425int
3aee8918 4426register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
4427{
4428 int addr;
4429
3aee8918 4430 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
4431 error ("Invalid register number %d.", regnum);
4432
3aee8918 4433 addr = usrregs->regmap[regnum];
1faeff08
MR
4434
4435 return addr;
4436}
4437
4438/* Fetch one register. */
4439static void
3aee8918
PA
4440fetch_register (const struct usrregs_info *usrregs,
4441 struct regcache *regcache, int regno)
1faeff08
MR
4442{
4443 CORE_ADDR regaddr;
4444 int i, size;
4445 char *buf;
4446 int pid;
4447
3aee8918 4448 if (regno >= usrregs->num_regs)
1faeff08
MR
4449 return;
4450 if ((*the_low_target.cannot_fetch_register) (regno))
4451 return;
4452
3aee8918 4453 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4454 if (regaddr == -1)
4455 return;
4456
3aee8918
PA
4457 size = ((register_size (regcache->tdesc, regno)
4458 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4459 & -sizeof (PTRACE_XFER_TYPE));
4460 buf = alloca (size);
4461
0bfdf32f 4462 pid = lwpid_of (current_thread);
1faeff08
MR
4463 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4464 {
4465 errno = 0;
4466 *(PTRACE_XFER_TYPE *) (buf + i) =
4467 ptrace (PTRACE_PEEKUSER, pid,
4468 /* Coerce to a uintptr_t first to avoid potential gcc warning
4469 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4470 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
4471 regaddr += sizeof (PTRACE_XFER_TYPE);
4472 if (errno != 0)
4473 error ("reading register %d: %s", regno, strerror (errno));
4474 }
4475
4476 if (the_low_target.supply_ptrace_register)
4477 the_low_target.supply_ptrace_register (regcache, regno, buf);
4478 else
4479 supply_register (regcache, regno, buf);
4480}
4481
4482/* Store one register. */
4483static void
3aee8918
PA
4484store_register (const struct usrregs_info *usrregs,
4485 struct regcache *regcache, int regno)
1faeff08
MR
4486{
4487 CORE_ADDR regaddr;
4488 int i, size;
4489 char *buf;
4490 int pid;
4491
3aee8918 4492 if (regno >= usrregs->num_regs)
1faeff08
MR
4493 return;
4494 if ((*the_low_target.cannot_store_register) (regno))
4495 return;
4496
3aee8918 4497 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4498 if (regaddr == -1)
4499 return;
4500
3aee8918
PA
4501 size = ((register_size (regcache->tdesc, regno)
4502 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4503 & -sizeof (PTRACE_XFER_TYPE));
4504 buf = alloca (size);
4505 memset (buf, 0, size);
4506
4507 if (the_low_target.collect_ptrace_register)
4508 the_low_target.collect_ptrace_register (regcache, regno, buf);
4509 else
4510 collect_register (regcache, regno, buf);
4511
0bfdf32f 4512 pid = lwpid_of (current_thread);
1faeff08
MR
4513 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4514 {
4515 errno = 0;
4516 ptrace (PTRACE_POKEUSER, pid,
4517 /* Coerce to a uintptr_t first to avoid potential gcc warning
4518 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
4519 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4520 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
4521 if (errno != 0)
4522 {
4523 /* At this point, ESRCH should mean the process is
4524 already gone, in which case we simply ignore attempts
4525 to change its registers. See also the related
4526 comment in linux_resume_one_lwp. */
4527 if (errno == ESRCH)
4528 return;
4529
4530 if ((*the_low_target.cannot_store_register) (regno) == 0)
4531 error ("writing register %d: %s", regno, strerror (errno));
4532 }
4533 regaddr += sizeof (PTRACE_XFER_TYPE);
4534 }
4535}
4536
4537/* Fetch all registers, or just one, from the child process.
4538 If REGNO is -1, do this for all registers, skipping any that are
4539 assumed to have been retrieved by regsets_fetch_inferior_registers,
4540 unless ALL is non-zero.
4541 Otherwise, REGNO specifies which register (so we can save time). */
4542static void
3aee8918
PA
4543usr_fetch_inferior_registers (const struct regs_info *regs_info,
4544 struct regcache *regcache, int regno, int all)
1faeff08 4545{
3aee8918
PA
4546 struct usrregs_info *usr = regs_info->usrregs;
4547
1faeff08
MR
4548 if (regno == -1)
4549 {
3aee8918
PA
4550 for (regno = 0; regno < usr->num_regs; regno++)
4551 if (all || !linux_register_in_regsets (regs_info, regno))
4552 fetch_register (usr, regcache, regno);
1faeff08
MR
4553 }
4554 else
3aee8918 4555 fetch_register (usr, regcache, regno);
1faeff08
MR
4556}
4557
4558/* Store our register values back into the inferior.
4559 If REGNO is -1, do this for all registers, skipping any that are
4560 assumed to have been saved by regsets_store_inferior_registers,
4561 unless ALL is non-zero.
4562 Otherwise, REGNO specifies which register (so we can save time). */
4563static void
3aee8918
PA
4564usr_store_inferior_registers (const struct regs_info *regs_info,
4565 struct regcache *regcache, int regno, int all)
1faeff08 4566{
3aee8918
PA
4567 struct usrregs_info *usr = regs_info->usrregs;
4568
1faeff08
MR
4569 if (regno == -1)
4570 {
3aee8918
PA
4571 for (regno = 0; regno < usr->num_regs; regno++)
4572 if (all || !linux_register_in_regsets (regs_info, regno))
4573 store_register (usr, regcache, regno);
1faeff08
MR
4574 }
4575 else
3aee8918 4576 store_register (usr, regcache, regno);
1faeff08
MR
4577}
4578
4579#else /* !HAVE_LINUX_USRREGS */
4580
3aee8918
PA
4581#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4582#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
1faeff08 4583
58caa3dc 4584#endif
1faeff08
MR
4585
4586
4587void
4588linux_fetch_registers (struct regcache *regcache, int regno)
4589{
4590 int use_regsets;
4591 int all = 0;
3aee8918 4592 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
4593
4594 if (regno == -1)
4595 {
3aee8918
PA
4596 if (the_low_target.fetch_register != NULL
4597 && regs_info->usrregs != NULL)
4598 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
c14dfd32
PA
4599 (*the_low_target.fetch_register) (regcache, regno);
4600
3aee8918
PA
4601 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4602 if (regs_info->usrregs != NULL)
4603 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
4604 }
4605 else
4606 {
c14dfd32
PA
4607 if (the_low_target.fetch_register != NULL
4608 && (*the_low_target.fetch_register) (regcache, regno))
4609 return;
4610
3aee8918 4611 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 4612 if (use_regsets)
3aee8918
PA
4613 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4614 regcache);
4615 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4616 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 4617 }
58caa3dc
DJ
4618}
4619
4620void
442ea881 4621linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 4622{
1faeff08
MR
4623 int use_regsets;
4624 int all = 0;
3aee8918 4625 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
4626
4627 if (regno == -1)
4628 {
3aee8918
PA
4629 all = regsets_store_inferior_registers (regs_info->regsets_info,
4630 regcache);
4631 if (regs_info->usrregs != NULL)
4632 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
4633 }
4634 else
4635 {
3aee8918 4636 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 4637 if (use_regsets)
3aee8918
PA
4638 all = regsets_store_inferior_registers (regs_info->regsets_info,
4639 regcache);
4640 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4641 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 4642 }
58caa3dc
DJ
4643}
4644
da6d8c04 4645
da6d8c04
DJ
4646/* Copy LEN bytes from inferior's memory starting at MEMADDR
4647 to debugger memory starting at MYADDR. */
4648
c3e735a6 4649static int
f450004a 4650linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04 4651{
0bfdf32f 4652 int pid = lwpid_of (current_thread);
4934b29e
MR
4653 register PTRACE_XFER_TYPE *buffer;
4654 register CORE_ADDR addr;
4655 register int count;
4656 char filename[64];
da6d8c04 4657 register int i;
4934b29e 4658 int ret;
fd462a61 4659 int fd;
fd462a61
DJ
4660
4661 /* Try using /proc. Don't bother for one word. */
4662 if (len >= 3 * sizeof (long))
4663 {
4934b29e
MR
4664 int bytes;
4665
fd462a61
DJ
4666 /* We could keep this file open and cache it - possibly one per
4667 thread. That requires some juggling, but is even faster. */
95954743 4668 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
4669 fd = open (filename, O_RDONLY | O_LARGEFILE);
4670 if (fd == -1)
4671 goto no_proc;
4672
4673 /* If pread64 is available, use it. It's faster if the kernel
4674 supports it (only one syscall), and it's 64-bit safe even on
4675 32-bit platforms (for instance, SPARC debugging a SPARC64
4676 application). */
4677#ifdef HAVE_PREAD64
4934b29e 4678 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 4679#else
4934b29e
MR
4680 bytes = -1;
4681 if (lseek (fd, memaddr, SEEK_SET) != -1)
4682 bytes = read (fd, myaddr, len);
fd462a61 4683#endif
fd462a61
DJ
4684
4685 close (fd);
4934b29e
MR
4686 if (bytes == len)
4687 return 0;
4688
4689 /* Some data was read, we'll try to get the rest with ptrace. */
4690 if (bytes > 0)
4691 {
4692 memaddr += bytes;
4693 myaddr += bytes;
4694 len -= bytes;
4695 }
fd462a61 4696 }
da6d8c04 4697
fd462a61 4698 no_proc:
4934b29e
MR
4699 /* Round starting address down to longword boundary. */
4700 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4701 /* Round ending address up; get number of longwords that makes. */
4702 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4703 / sizeof (PTRACE_XFER_TYPE));
4704 /* Allocate buffer of that many longwords. */
4705 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4706
da6d8c04 4707 /* Read all the longwords */
4934b29e 4708 errno = 0;
da6d8c04
DJ
4709 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4710 {
14ce3065
DE
4711 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4712 about coercing an 8 byte integer to a 4 byte pointer. */
4713 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
4714 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4715 (PTRACE_TYPE_ARG4) 0);
c3e735a6 4716 if (errno)
4934b29e 4717 break;
da6d8c04 4718 }
4934b29e 4719 ret = errno;
da6d8c04
DJ
4720
4721 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
4722 if (i > 0)
4723 {
4724 i *= sizeof (PTRACE_XFER_TYPE);
4725 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4726 memcpy (myaddr,
4727 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4728 i < len ? i : len);
4729 }
c3e735a6 4730
4934b29e 4731 return ret;
da6d8c04
DJ
4732}
4733
93ae6fdc
PA
4734/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4735 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 4736 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 4737
ce3a066d 4738static int
f450004a 4739linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
4740{
4741 register int i;
4742 /* Round starting address down to longword boundary. */
4743 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4744 /* Round ending address up; get number of longwords that makes. */
4745 register int count
493e2a69
MS
4746 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4747 / sizeof (PTRACE_XFER_TYPE);
4748
da6d8c04 4749 /* Allocate buffer of that many longwords. */
493e2a69
MS
4750 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4751 alloca (count * sizeof (PTRACE_XFER_TYPE));
4752
0bfdf32f 4753 int pid = lwpid_of (current_thread);
da6d8c04 4754
f0ae6fc3
PA
4755 if (len == 0)
4756 {
4757 /* Zero length write always succeeds. */
4758 return 0;
4759 }
4760
0d62e5e8
DJ
4761 if (debug_threads)
4762 {
58d6951d
DJ
4763 /* Dump up to four bytes. */
4764 unsigned int val = * (unsigned int *) myaddr;
4765 if (len == 1)
4766 val = val & 0xff;
4767 else if (len == 2)
4768 val = val & 0xffff;
4769 else if (len == 3)
4770 val = val & 0xffffff;
87ce2a04
DE
4771 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4772 val, (long)memaddr);
0d62e5e8
DJ
4773 }
4774
da6d8c04
DJ
4775 /* Fill start and end extra bytes of buffer with existing memory data. */
4776
93ae6fdc 4777 errno = 0;
14ce3065
DE
4778 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4779 about coercing an 8 byte integer to a 4 byte pointer. */
4780 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
4781 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4782 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
4783 if (errno)
4784 return errno;
da6d8c04
DJ
4785
4786 if (count > 1)
4787 {
93ae6fdc 4788 errno = 0;
da6d8c04 4789 buffer[count - 1]
95954743 4790 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
4791 /* Coerce to a uintptr_t first to avoid potential gcc warning
4792 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4793 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 4794 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 4795 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
4796 if (errno)
4797 return errno;
da6d8c04
DJ
4798 }
4799
93ae6fdc 4800 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 4801
493e2a69
MS
4802 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4803 myaddr, len);
da6d8c04
DJ
4804
4805 /* Write the entire buffer. */
4806
4807 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4808 {
4809 errno = 0;
14ce3065
DE
4810 ptrace (PTRACE_POKETEXT, pid,
4811 /* Coerce to a uintptr_t first to avoid potential gcc warning
4812 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
4813 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4814 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
4815 if (errno)
4816 return errno;
4817 }
4818
4819 return 0;
4820}
2f2893d9
DJ
4821
4822static void
4823linux_look_up_symbols (void)
4824{
0d62e5e8 4825#ifdef USE_THREAD_DB
95954743
PA
4826 struct process_info *proc = current_process ();
4827
cdbfd419 4828 if (proc->private->thread_db != NULL)
0d62e5e8
DJ
4829 return;
4830
96d7229d
LM
4831 /* If the kernel supports tracing clones, then we don't need to
4832 use the magic thread event breakpoint to learn about
4833 threads. */
4834 thread_db_init (!linux_supports_traceclone ());
0d62e5e8
DJ
4835#endif
4836}
4837
e5379b03 4838static void
ef57601b 4839linux_request_interrupt (void)
e5379b03 4840{
a1928bad 4841 extern unsigned long signal_pid;
e5379b03 4842
95954743
PA
4843 if (!ptid_equal (cont_thread, null_ptid)
4844 && !ptid_equal (cont_thread, minus_one_ptid))
e5379b03 4845 {
bd99dc85 4846 int lwpid;
e5379b03 4847
0bfdf32f 4848 lwpid = lwpid_of (current_thread);
bd99dc85 4849 kill_lwp (lwpid, SIGINT);
e5379b03
DJ
4850 }
4851 else
ef57601b 4852 kill_lwp (signal_pid, SIGINT);
e5379b03
DJ
4853}
4854
aa691b87
RM
4855/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4856 to debugger memory starting at MYADDR. */
4857
4858static int
f450004a 4859linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
4860{
4861 char filename[PATH_MAX];
4862 int fd, n;
0bfdf32f 4863 int pid = lwpid_of (current_thread);
aa691b87 4864
6cebaf6e 4865 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
4866
4867 fd = open (filename, O_RDONLY);
4868 if (fd < 0)
4869 return -1;
4870
4871 if (offset != (CORE_ADDR) 0
4872 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4873 n = -1;
4874 else
4875 n = read (fd, myaddr, len);
4876
4877 close (fd);
4878
4879 return n;
4880}
4881
d993e290
PA
4882/* These breakpoint and watchpoint related wrapper functions simply
4883 pass on the function call if the target has registered a
4884 corresponding function. */
e013ee27
OF
4885
4886static int
802e8e6d
PA
4887linux_supports_z_point_type (char z_type)
4888{
4889 return (the_low_target.supports_z_point_type != NULL
4890 && the_low_target.supports_z_point_type (z_type));
4891}
4892
4893static int
4894linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
4895 int size, struct raw_breakpoint *bp)
e013ee27 4896{
d993e290 4897 if (the_low_target.insert_point != NULL)
802e8e6d 4898 return the_low_target.insert_point (type, addr, size, bp);
e013ee27
OF
4899 else
4900 /* Unsupported (see target.h). */
4901 return 1;
4902}
4903
4904static int
802e8e6d
PA
4905linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
4906 int size, struct raw_breakpoint *bp)
e013ee27 4907{
d993e290 4908 if (the_low_target.remove_point != NULL)
802e8e6d 4909 return the_low_target.remove_point (type, addr, size, bp);
e013ee27
OF
4910 else
4911 /* Unsupported (see target.h). */
4912 return 1;
4913}
4914
4915static int
4916linux_stopped_by_watchpoint (void)
4917{
0bfdf32f 4918 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
4919
4920 return lwp->stopped_by_watchpoint;
e013ee27
OF
4921}
4922
4923static CORE_ADDR
4924linux_stopped_data_address (void)
4925{
0bfdf32f 4926 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
4927
4928 return lwp->stopped_data_address;
e013ee27
OF
4929}
4930
db0dfaa0
LM
4931#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4932 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4933 && defined(PT_TEXT_END_ADDR)
4934
4935/* This is only used for targets that define PT_TEXT_ADDR,
4936 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4937 the target has different ways of acquiring this information, like
4938 loadmaps. */
52fb6437
NS
4939
4940/* Under uClinux, programs are loaded at non-zero offsets, which we need
4941 to tell gdb about. */
4942
4943static int
4944linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4945{
52fb6437 4946 unsigned long text, text_end, data;
0bfdf32f 4947 int pid = lwpid_of (get_thread_lwp (current_thread));
52fb6437
NS
4948
4949 errno = 0;
4950
b8e1b30e
LM
4951 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4952 (PTRACE_TYPE_ARG4) 0);
4953 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4954 (PTRACE_TYPE_ARG4) 0);
4955 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4956 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
4957
4958 if (errno == 0)
4959 {
4960 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
4961 used by gdb) are relative to the beginning of the program,
4962 with the data segment immediately following the text segment.
4963 However, the actual runtime layout in memory may put the data
4964 somewhere else, so when we send gdb a data base-address, we
4965 use the real data base address and subtract the compile-time
4966 data base-address from it (which is just the length of the
4967 text segment). BSS immediately follows data in both
4968 cases. */
52fb6437
NS
4969 *text_p = text;
4970 *data_p = data - (text_end - text);
1b3f6016 4971
52fb6437
NS
4972 return 1;
4973 }
52fb6437
NS
4974 return 0;
4975}
4976#endif
4977
07e059b5
VP
4978static int
4979linux_qxfer_osdata (const char *annex,
1b3f6016
PA
4980 unsigned char *readbuf, unsigned const char *writebuf,
4981 CORE_ADDR offset, int len)
07e059b5 4982{
d26e3629 4983 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
4984}
4985
d0722149
DE
4986/* Convert a native/host siginfo object, into/from the siginfo in the
4987 layout of the inferiors' architecture. */
4988
4989static void
a5362b9a 4990siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
d0722149
DE
4991{
4992 int done = 0;
4993
4994 if (the_low_target.siginfo_fixup != NULL)
4995 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4996
4997 /* If there was no callback, or the callback didn't do anything,
4998 then just do a straight memcpy. */
4999 if (!done)
5000 {
5001 if (direction == 1)
a5362b9a 5002 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5003 else
a5362b9a 5004 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5005 }
5006}
5007
4aa995e1
PA
5008static int
5009linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5010 unsigned const char *writebuf, CORE_ADDR offset, int len)
5011{
d0722149 5012 int pid;
a5362b9a
TS
5013 siginfo_t siginfo;
5014 char inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5015
0bfdf32f 5016 if (current_thread == NULL)
4aa995e1
PA
5017 return -1;
5018
0bfdf32f 5019 pid = lwpid_of (current_thread);
4aa995e1
PA
5020
5021 if (debug_threads)
87ce2a04
DE
5022 debug_printf ("%s siginfo for lwp %d.\n",
5023 readbuf != NULL ? "Reading" : "Writing",
5024 pid);
4aa995e1 5025
0adea5f7 5026 if (offset >= sizeof (siginfo))
4aa995e1
PA
5027 return -1;
5028
b8e1b30e 5029 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5030 return -1;
5031
d0722149
DE
5032 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5033 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5034 inferior with a 64-bit GDBSERVER should look the same as debugging it
5035 with a 32-bit GDBSERVER, we need to convert it. */
5036 siginfo_fixup (&siginfo, inf_siginfo, 0);
5037
4aa995e1
PA
5038 if (offset + len > sizeof (siginfo))
5039 len = sizeof (siginfo) - offset;
5040
5041 if (readbuf != NULL)
d0722149 5042 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5043 else
5044 {
d0722149
DE
5045 memcpy (inf_siginfo + offset, writebuf, len);
5046
5047 /* Convert back to ptrace layout before flushing it out. */
5048 siginfo_fixup (&siginfo, inf_siginfo, 1);
5049
b8e1b30e 5050 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5051 return -1;
5052 }
5053
5054 return len;
5055}
5056
bd99dc85
PA
5057/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5058 so we notice when children change state; as the handler for the
5059 sigsuspend in my_waitpid. */
5060
5061static void
5062sigchld_handler (int signo)
5063{
5064 int old_errno = errno;
5065
5066 if (debug_threads)
e581f2b4
PA
5067 {
5068 do
5069 {
5070 /* fprintf is not async-signal-safe, so call write
5071 directly. */
5072 if (write (2, "sigchld_handler\n",
5073 sizeof ("sigchld_handler\n") - 1) < 0)
5074 break; /* just ignore */
5075 } while (0);
5076 }
bd99dc85
PA
5077
5078 if (target_is_async_p ())
5079 async_file_mark (); /* trigger a linux_wait */
5080
5081 errno = old_errno;
5082}
5083
5084static int
5085linux_supports_non_stop (void)
5086{
5087 return 1;
5088}
5089
5090static int
5091linux_async (int enable)
5092{
7089dca4 5093 int previous = target_is_async_p ();
bd99dc85 5094
8336d594 5095 if (debug_threads)
87ce2a04
DE
5096 debug_printf ("linux_async (%d), previous=%d\n",
5097 enable, previous);
8336d594 5098
bd99dc85
PA
5099 if (previous != enable)
5100 {
5101 sigset_t mask;
5102 sigemptyset (&mask);
5103 sigaddset (&mask, SIGCHLD);
5104
5105 sigprocmask (SIG_BLOCK, &mask, NULL);
5106
5107 if (enable)
5108 {
5109 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
5110 {
5111 linux_event_pipe[0] = -1;
5112 linux_event_pipe[1] = -1;
5113 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5114
5115 warning ("creating event pipe failed.");
5116 return previous;
5117 }
bd99dc85
PA
5118
5119 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5120 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5121
5122 /* Register the event loop handler. */
5123 add_file_handler (linux_event_pipe[0],
5124 handle_target_event, NULL);
5125
5126 /* Always trigger a linux_wait. */
5127 async_file_mark ();
5128 }
5129 else
5130 {
5131 delete_file_handler (linux_event_pipe[0]);
5132
5133 close (linux_event_pipe[0]);
5134 close (linux_event_pipe[1]);
5135 linux_event_pipe[0] = -1;
5136 linux_event_pipe[1] = -1;
5137 }
5138
5139 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5140 }
5141
5142 return previous;
5143}
5144
5145static int
5146linux_start_non_stop (int nonstop)
5147{
5148 /* Register or unregister from event-loop accordingly. */
5149 linux_async (nonstop);
aa96c426
GB
5150
5151 if (target_is_async_p () != (nonstop != 0))
5152 return -1;
5153
bd99dc85
PA
5154 return 0;
5155}
5156
cf8fd78b
PA
5157static int
5158linux_supports_multi_process (void)
5159{
5160 return 1;
5161}
5162
03583c20
UW
5163static int
5164linux_supports_disable_randomization (void)
5165{
5166#ifdef HAVE_PERSONALITY
5167 return 1;
5168#else
5169 return 0;
5170#endif
5171}
efcbbd14 5172
d1feda86
YQ
5173static int
5174linux_supports_agent (void)
5175{
5176 return 1;
5177}
5178
c2d6af84
PA
5179static int
5180linux_supports_range_stepping (void)
5181{
5182 if (*the_low_target.supports_range_stepping == NULL)
5183 return 0;
5184
5185 return (*the_low_target.supports_range_stepping) ();
5186}
5187
efcbbd14
UW
5188/* Enumerate spufs IDs for process PID. */
5189static int
5190spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5191{
5192 int pos = 0;
5193 int written = 0;
5194 char path[128];
5195 DIR *dir;
5196 struct dirent *entry;
5197
5198 sprintf (path, "/proc/%ld/fd", pid);
5199 dir = opendir (path);
5200 if (!dir)
5201 return -1;
5202
5203 rewinddir (dir);
5204 while ((entry = readdir (dir)) != NULL)
5205 {
5206 struct stat st;
5207 struct statfs stfs;
5208 int fd;
5209
5210 fd = atoi (entry->d_name);
5211 if (!fd)
5212 continue;
5213
5214 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5215 if (stat (path, &st) != 0)
5216 continue;
5217 if (!S_ISDIR (st.st_mode))
5218 continue;
5219
5220 if (statfs (path, &stfs) != 0)
5221 continue;
5222 if (stfs.f_type != SPUFS_MAGIC)
5223 continue;
5224
5225 if (pos >= offset && pos + 4 <= offset + len)
5226 {
5227 *(unsigned int *)(buf + pos - offset) = fd;
5228 written += 4;
5229 }
5230 pos += 4;
5231 }
5232
5233 closedir (dir);
5234 return written;
5235}
5236
5237/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5238 object type, using the /proc file system. */
5239static int
5240linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5241 unsigned const char *writebuf,
5242 CORE_ADDR offset, int len)
5243{
0bfdf32f 5244 long pid = lwpid_of (current_thread);
efcbbd14
UW
5245 char buf[128];
5246 int fd = 0;
5247 int ret = 0;
5248
5249 if (!writebuf && !readbuf)
5250 return -1;
5251
5252 if (!*annex)
5253 {
5254 if (!readbuf)
5255 return -1;
5256 else
5257 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5258 }
5259
5260 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5261 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5262 if (fd <= 0)
5263 return -1;
5264
5265 if (offset != 0
5266 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5267 {
5268 close (fd);
5269 return 0;
5270 }
5271
5272 if (writebuf)
5273 ret = write (fd, writebuf, (size_t) len);
5274 else
5275 ret = read (fd, readbuf, (size_t) len);
5276
5277 close (fd);
5278 return ret;
5279}
5280
723b724b 5281#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
5282struct target_loadseg
5283{
5284 /* Core address to which the segment is mapped. */
5285 Elf32_Addr addr;
5286 /* VMA recorded in the program header. */
5287 Elf32_Addr p_vaddr;
5288 /* Size of this segment in memory. */
5289 Elf32_Word p_memsz;
5290};
5291
723b724b 5292# if defined PT_GETDSBT
78d85199
YQ
5293struct target_loadmap
5294{
5295 /* Protocol version number, must be zero. */
5296 Elf32_Word version;
5297 /* Pointer to the DSBT table, its size, and the DSBT index. */
5298 unsigned *dsbt_table;
5299 unsigned dsbt_size, dsbt_index;
5300 /* Number of segments in this map. */
5301 Elf32_Word nsegs;
5302 /* The actual memory map. */
5303 struct target_loadseg segs[/*nsegs*/];
5304};
723b724b
MF
5305# define LINUX_LOADMAP PT_GETDSBT
5306# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5307# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5308# else
5309struct target_loadmap
5310{
5311 /* Protocol version number, must be zero. */
5312 Elf32_Half version;
5313 /* Number of segments in this map. */
5314 Elf32_Half nsegs;
5315 /* The actual memory map. */
5316 struct target_loadseg segs[/*nsegs*/];
5317};
5318# define LINUX_LOADMAP PTRACE_GETFDPIC
5319# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5320# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5321# endif
78d85199 5322
78d85199
YQ
5323static int
5324linux_read_loadmap (const char *annex, CORE_ADDR offset,
5325 unsigned char *myaddr, unsigned int len)
5326{
0bfdf32f 5327 int pid = lwpid_of (current_thread);
78d85199
YQ
5328 int addr = -1;
5329 struct target_loadmap *data = NULL;
5330 unsigned int actual_length, copy_length;
5331
5332 if (strcmp (annex, "exec") == 0)
723b724b 5333 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 5334 else if (strcmp (annex, "interp") == 0)
723b724b 5335 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
5336 else
5337 return -1;
5338
723b724b 5339 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
5340 return -1;
5341
5342 if (data == NULL)
5343 return -1;
5344
5345 actual_length = sizeof (struct target_loadmap)
5346 + sizeof (struct target_loadseg) * data->nsegs;
5347
5348 if (offset < 0 || offset > actual_length)
5349 return -1;
5350
5351 copy_length = actual_length - offset < len ? actual_length - offset : len;
5352 memcpy (myaddr, (char *) data + offset, copy_length);
5353 return copy_length;
5354}
723b724b
MF
5355#else
5356# define linux_read_loadmap NULL
5357#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 5358
1570b33e
L
5359static void
5360linux_process_qsupported (const char *query)
5361{
5362 if (the_low_target.process_qsupported != NULL)
5363 the_low_target.process_qsupported (query);
5364}
5365
219f2f23
PA
5366static int
5367linux_supports_tracepoints (void)
5368{
5369 if (*the_low_target.supports_tracepoints == NULL)
5370 return 0;
5371
5372 return (*the_low_target.supports_tracepoints) ();
5373}
5374
5375static CORE_ADDR
5376linux_read_pc (struct regcache *regcache)
5377{
5378 if (the_low_target.get_pc == NULL)
5379 return 0;
5380
5381 return (*the_low_target.get_pc) (regcache);
5382}
5383
5384static void
5385linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5386{
5387 gdb_assert (the_low_target.set_pc != NULL);
5388
5389 (*the_low_target.set_pc) (regcache, pc);
5390}
5391
8336d594
PA
5392static int
5393linux_thread_stopped (struct thread_info *thread)
5394{
5395 return get_thread_lwp (thread)->stopped;
5396}
5397
5398/* This exposes stop-all-threads functionality to other modules. */
5399
5400static void
7984d532 5401linux_pause_all (int freeze)
8336d594 5402{
7984d532
PA
5403 stop_all_lwps (freeze, NULL);
5404}
5405
5406/* This exposes unstop-all-threads functionality to other gdbserver
5407 modules. */
5408
5409static void
5410linux_unpause_all (int unfreeze)
5411{
5412 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
5413}
5414
90d74c30
PA
5415static int
5416linux_prepare_to_access_memory (void)
5417{
5418 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5419 running LWP. */
5420 if (non_stop)
5421 linux_pause_all (1);
5422 return 0;
5423}
5424
5425static void
0146f85b 5426linux_done_accessing_memory (void)
90d74c30
PA
5427{
5428 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5429 running LWP. */
5430 if (non_stop)
5431 linux_unpause_all (1);
5432}
5433
fa593d66
PA
5434static int
5435linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5436 CORE_ADDR collector,
5437 CORE_ADDR lockaddr,
5438 ULONGEST orig_size,
5439 CORE_ADDR *jump_entry,
405f8e94
SS
5440 CORE_ADDR *trampoline,
5441 ULONGEST *trampoline_size,
fa593d66
PA
5442 unsigned char *jjump_pad_insn,
5443 ULONGEST *jjump_pad_insn_size,
5444 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
5445 CORE_ADDR *adjusted_insn_addr_end,
5446 char *err)
fa593d66
PA
5447{
5448 return (*the_low_target.install_fast_tracepoint_jump_pad)
5449 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
5450 jump_entry, trampoline, trampoline_size,
5451 jjump_pad_insn, jjump_pad_insn_size,
5452 adjusted_insn_addr, adjusted_insn_addr_end,
5453 err);
fa593d66
PA
5454}
5455
6a271cae
PA
5456static struct emit_ops *
5457linux_emit_ops (void)
5458{
5459 if (the_low_target.emit_ops != NULL)
5460 return (*the_low_target.emit_ops) ();
5461 else
5462 return NULL;
5463}
5464
405f8e94
SS
5465static int
5466linux_get_min_fast_tracepoint_insn_len (void)
5467{
5468 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5469}
5470
2268b414
JK
5471/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5472
5473static int
5474get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5475 CORE_ADDR *phdr_memaddr, int *num_phdr)
5476{
5477 char filename[PATH_MAX];
5478 int fd;
5479 const int auxv_size = is_elf64
5480 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5481 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5482
5483 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5484
5485 fd = open (filename, O_RDONLY);
5486 if (fd < 0)
5487 return 1;
5488
5489 *phdr_memaddr = 0;
5490 *num_phdr = 0;
5491 while (read (fd, buf, auxv_size) == auxv_size
5492 && (*phdr_memaddr == 0 || *num_phdr == 0))
5493 {
5494 if (is_elf64)
5495 {
5496 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5497
5498 switch (aux->a_type)
5499 {
5500 case AT_PHDR:
5501 *phdr_memaddr = aux->a_un.a_val;
5502 break;
5503 case AT_PHNUM:
5504 *num_phdr = aux->a_un.a_val;
5505 break;
5506 }
5507 }
5508 else
5509 {
5510 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5511
5512 switch (aux->a_type)
5513 {
5514 case AT_PHDR:
5515 *phdr_memaddr = aux->a_un.a_val;
5516 break;
5517 case AT_PHNUM:
5518 *num_phdr = aux->a_un.a_val;
5519 break;
5520 }
5521 }
5522 }
5523
5524 close (fd);
5525
5526 if (*phdr_memaddr == 0 || *num_phdr == 0)
5527 {
5528 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5529 "phdr_memaddr = %ld, phdr_num = %d",
5530 (long) *phdr_memaddr, *num_phdr);
5531 return 2;
5532 }
5533
5534 return 0;
5535}
5536
5537/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5538
5539static CORE_ADDR
5540get_dynamic (const int pid, const int is_elf64)
5541{
5542 CORE_ADDR phdr_memaddr, relocation;
5543 int num_phdr, i;
5544 unsigned char *phdr_buf;
5545 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5546
5547 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5548 return 0;
5549
5550 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5551 phdr_buf = alloca (num_phdr * phdr_size);
5552
5553 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5554 return 0;
5555
5556 /* Compute relocation: it is expected to be 0 for "regular" executables,
5557 non-zero for PIE ones. */
5558 relocation = -1;
5559 for (i = 0; relocation == -1 && i < num_phdr; i++)
5560 if (is_elf64)
5561 {
5562 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5563
5564 if (p->p_type == PT_PHDR)
5565 relocation = phdr_memaddr - p->p_vaddr;
5566 }
5567 else
5568 {
5569 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5570
5571 if (p->p_type == PT_PHDR)
5572 relocation = phdr_memaddr - p->p_vaddr;
5573 }
5574
5575 if (relocation == -1)
5576 {
e237a7e2
JK
5577 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5578 any real world executables, including PIE executables, have always
5579 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5580 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5581 or present DT_DEBUG anyway (fpc binaries are statically linked).
5582
5583 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5584
5585 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5586
2268b414
JK
5587 return 0;
5588 }
5589
5590 for (i = 0; i < num_phdr; i++)
5591 {
5592 if (is_elf64)
5593 {
5594 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5595
5596 if (p->p_type == PT_DYNAMIC)
5597 return p->p_vaddr + relocation;
5598 }
5599 else
5600 {
5601 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5602
5603 if (p->p_type == PT_DYNAMIC)
5604 return p->p_vaddr + relocation;
5605 }
5606 }
5607
5608 return 0;
5609}
5610
5611/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
5612 can be 0 if the inferior does not yet have the library list initialized.
5613 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5614 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
5615
5616static CORE_ADDR
5617get_r_debug (const int pid, const int is_elf64)
5618{
5619 CORE_ADDR dynamic_memaddr;
5620 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5621 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 5622 CORE_ADDR map = -1;
2268b414
JK
5623
5624 dynamic_memaddr = get_dynamic (pid, is_elf64);
5625 if (dynamic_memaddr == 0)
367ba2c2 5626 return map;
2268b414
JK
5627
5628 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5629 {
5630 if (is_elf64)
5631 {
5632 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
75f62ce7 5633#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
5634 union
5635 {
5636 Elf64_Xword map;
5637 unsigned char buf[sizeof (Elf64_Xword)];
5638 }
5639 rld_map;
5640
5641 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5642 {
5643 if (linux_read_memory (dyn->d_un.d_val,
5644 rld_map.buf, sizeof (rld_map.buf)) == 0)
5645 return rld_map.map;
5646 else
5647 break;
5648 }
75f62ce7 5649#endif /* DT_MIPS_RLD_MAP */
2268b414 5650
367ba2c2
MR
5651 if (dyn->d_tag == DT_DEBUG && map == -1)
5652 map = dyn->d_un.d_val;
2268b414
JK
5653
5654 if (dyn->d_tag == DT_NULL)
5655 break;
5656 }
5657 else
5658 {
5659 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
75f62ce7 5660#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
5661 union
5662 {
5663 Elf32_Word map;
5664 unsigned char buf[sizeof (Elf32_Word)];
5665 }
5666 rld_map;
5667
5668 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5669 {
5670 if (linux_read_memory (dyn->d_un.d_val,
5671 rld_map.buf, sizeof (rld_map.buf)) == 0)
5672 return rld_map.map;
5673 else
5674 break;
5675 }
75f62ce7 5676#endif /* DT_MIPS_RLD_MAP */
2268b414 5677
367ba2c2
MR
5678 if (dyn->d_tag == DT_DEBUG && map == -1)
5679 map = dyn->d_un.d_val;
2268b414
JK
5680
5681 if (dyn->d_tag == DT_NULL)
5682 break;
5683 }
5684
5685 dynamic_memaddr += dyn_size;
5686 }
5687
367ba2c2 5688 return map;
2268b414
JK
5689}
5690
5691/* Read one pointer from MEMADDR in the inferior. */
5692
5693static int
5694read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5695{
485f1ee4
PA
5696 int ret;
5697
5698 /* Go through a union so this works on either big or little endian
5699 hosts, when the inferior's pointer size is smaller than the size
5700 of CORE_ADDR. It is assumed the inferior's endianness is the
5701 same of the superior's. */
5702 union
5703 {
5704 CORE_ADDR core_addr;
5705 unsigned int ui;
5706 unsigned char uc;
5707 } addr;
5708
5709 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5710 if (ret == 0)
5711 {
5712 if (ptr_size == sizeof (CORE_ADDR))
5713 *ptr = addr.core_addr;
5714 else if (ptr_size == sizeof (unsigned int))
5715 *ptr = addr.ui;
5716 else
5717 gdb_assert_not_reached ("unhandled pointer size");
5718 }
5719 return ret;
2268b414
JK
5720}
5721
5722struct link_map_offsets
5723 {
5724 /* Offset and size of r_debug.r_version. */
5725 int r_version_offset;
5726
5727 /* Offset and size of r_debug.r_map. */
5728 int r_map_offset;
5729
5730 /* Offset to l_addr field in struct link_map. */
5731 int l_addr_offset;
5732
5733 /* Offset to l_name field in struct link_map. */
5734 int l_name_offset;
5735
5736 /* Offset to l_ld field in struct link_map. */
5737 int l_ld_offset;
5738
5739 /* Offset to l_next field in struct link_map. */
5740 int l_next_offset;
5741
5742 /* Offset to l_prev field in struct link_map. */
5743 int l_prev_offset;
5744 };
5745
fb723180 5746/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
5747
5748static int
5749linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5750 unsigned const char *writebuf,
5751 CORE_ADDR offset, int len)
5752{
5753 char *document;
5754 unsigned document_len;
5755 struct process_info_private *const priv = current_process ()->private;
5756 char filename[PATH_MAX];
5757 int pid, is_elf64;
5758
5759 static const struct link_map_offsets lmo_32bit_offsets =
5760 {
5761 0, /* r_version offset. */
5762 4, /* r_debug.r_map offset. */
5763 0, /* l_addr offset in link_map. */
5764 4, /* l_name offset in link_map. */
5765 8, /* l_ld offset in link_map. */
5766 12, /* l_next offset in link_map. */
5767 16 /* l_prev offset in link_map. */
5768 };
5769
5770 static const struct link_map_offsets lmo_64bit_offsets =
5771 {
5772 0, /* r_version offset. */
5773 8, /* r_debug.r_map offset. */
5774 0, /* l_addr offset in link_map. */
5775 8, /* l_name offset in link_map. */
5776 16, /* l_ld offset in link_map. */
5777 24, /* l_next offset in link_map. */
5778 32 /* l_prev offset in link_map. */
5779 };
5780 const struct link_map_offsets *lmo;
214d508e 5781 unsigned int machine;
b1fbec62
GB
5782 int ptr_size;
5783 CORE_ADDR lm_addr = 0, lm_prev = 0;
5784 int allocated = 1024;
5785 char *p;
5786 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5787 int header_done = 0;
2268b414
JK
5788
5789 if (writebuf != NULL)
5790 return -2;
5791 if (readbuf == NULL)
5792 return -1;
5793
0bfdf32f 5794 pid = lwpid_of (current_thread);
2268b414 5795 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 5796 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 5797 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 5798 ptr_size = is_elf64 ? 8 : 4;
2268b414 5799
b1fbec62
GB
5800 while (annex[0] != '\0')
5801 {
5802 const char *sep;
5803 CORE_ADDR *addrp;
5804 int len;
2268b414 5805
b1fbec62
GB
5806 sep = strchr (annex, '=');
5807 if (sep == NULL)
5808 break;
0c5bf5a9 5809
b1fbec62
GB
5810 len = sep - annex;
5811 if (len == 5 && strncmp (annex, "start", 5) == 0)
5812 addrp = &lm_addr;
5813 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5814 addrp = &lm_prev;
5815 else
5816 {
5817 annex = strchr (sep, ';');
5818 if (annex == NULL)
5819 break;
5820 annex++;
5821 continue;
5822 }
5823
5824 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 5825 }
b1fbec62
GB
5826
5827 if (lm_addr == 0)
2268b414 5828 {
b1fbec62
GB
5829 int r_version = 0;
5830
5831 if (priv->r_debug == 0)
5832 priv->r_debug = get_r_debug (pid, is_elf64);
5833
5834 /* We failed to find DT_DEBUG. Such situation will not change
5835 for this inferior - do not retry it. Report it to GDB as
5836 E01, see for the reasons at the GDB solib-svr4.c side. */
5837 if (priv->r_debug == (CORE_ADDR) -1)
5838 return -1;
5839
5840 if (priv->r_debug != 0)
2268b414 5841 {
b1fbec62
GB
5842 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5843 (unsigned char *) &r_version,
5844 sizeof (r_version)) != 0
5845 || r_version != 1)
5846 {
5847 warning ("unexpected r_debug version %d", r_version);
5848 }
5849 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5850 &lm_addr, ptr_size) != 0)
5851 {
5852 warning ("unable to read r_map from 0x%lx",
5853 (long) priv->r_debug + lmo->r_map_offset);
5854 }
2268b414 5855 }
b1fbec62 5856 }
2268b414 5857
b1fbec62
GB
5858 document = xmalloc (allocated);
5859 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5860 p = document + strlen (document);
5861
5862 while (lm_addr
5863 && read_one_ptr (lm_addr + lmo->l_name_offset,
5864 &l_name, ptr_size) == 0
5865 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5866 &l_addr, ptr_size) == 0
5867 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5868 &l_ld, ptr_size) == 0
5869 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5870 &l_prev, ptr_size) == 0
5871 && read_one_ptr (lm_addr + lmo->l_next_offset,
5872 &l_next, ptr_size) == 0)
5873 {
5874 unsigned char libname[PATH_MAX];
5875
5876 if (lm_prev != l_prev)
2268b414 5877 {
b1fbec62
GB
5878 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5879 (long) lm_prev, (long) l_prev);
5880 break;
2268b414
JK
5881 }
5882
d878444c
JK
5883 /* Ignore the first entry even if it has valid name as the first entry
5884 corresponds to the main executable. The first entry should not be
5885 skipped if the dynamic loader was loaded late by a static executable
5886 (see solib-svr4.c parameter ignore_first). But in such case the main
5887 executable does not have PT_DYNAMIC present and this function already
5888 exited above due to failed get_r_debug. */
5889 if (lm_prev == 0)
2268b414 5890 {
d878444c
JK
5891 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5892 p = p + strlen (p);
5893 }
5894 else
5895 {
5896 /* Not checking for error because reading may stop before
5897 we've got PATH_MAX worth of characters. */
5898 libname[0] = '\0';
5899 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5900 libname[sizeof (libname) - 1] = '\0';
5901 if (libname[0] != '\0')
2268b414 5902 {
d878444c
JK
5903 /* 6x the size for xml_escape_text below. */
5904 size_t len = 6 * strlen ((char *) libname);
5905 char *name;
2268b414 5906
d878444c
JK
5907 if (!header_done)
5908 {
5909 /* Terminate `<library-list-svr4'. */
5910 *p++ = '>';
5911 header_done = 1;
5912 }
2268b414 5913
d878444c
JK
5914 while (allocated < p - document + len + 200)
5915 {
5916 /* Expand to guarantee sufficient storage. */
5917 uintptr_t document_len = p - document;
2268b414 5918
d878444c
JK
5919 document = xrealloc (document, 2 * allocated);
5920 allocated *= 2;
5921 p = document + document_len;
5922 }
5923
5924 name = xml_escape_text ((char *) libname);
5925 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5926 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5927 name, (unsigned long) lm_addr,
5928 (unsigned long) l_addr, (unsigned long) l_ld);
5929 free (name);
5930 }
0afae3cf 5931 }
b1fbec62
GB
5932
5933 lm_prev = lm_addr;
5934 lm_addr = l_next;
2268b414
JK
5935 }
5936
b1fbec62
GB
5937 if (!header_done)
5938 {
5939 /* Empty list; terminate `<library-list-svr4'. */
5940 strcpy (p, "/>");
5941 }
5942 else
5943 strcpy (p, "</library-list-svr4>");
5944
2268b414
JK
5945 document_len = strlen (document);
5946 if (offset < document_len)
5947 document_len -= offset;
5948 else
5949 document_len = 0;
5950 if (len > document_len)
5951 len = document_len;
5952
5953 memcpy (readbuf, document + offset, len);
5954 xfree (document);
5955
5956 return len;
5957}
5958
9accd112
MM
5959#ifdef HAVE_LINUX_BTRACE
5960
969c39fb 5961/* See to_enable_btrace target method. */
9accd112
MM
5962
5963static struct btrace_target_info *
5964linux_low_enable_btrace (ptid_t ptid)
5965{
5966 struct btrace_target_info *tinfo;
5967
5968 tinfo = linux_enable_btrace (ptid);
3aee8918 5969
9accd112 5970 if (tinfo != NULL)
3aee8918
PA
5971 {
5972 struct thread_info *thread = find_thread_ptid (ptid);
5973 struct regcache *regcache = get_thread_regcache (thread, 0);
5974
5975 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5976 }
9accd112
MM
5977
5978 return tinfo;
5979}
5980
969c39fb 5981/* See to_disable_btrace target method. */
9accd112 5982
969c39fb
MM
5983static int
5984linux_low_disable_btrace (struct btrace_target_info *tinfo)
5985{
5986 enum btrace_error err;
5987
5988 err = linux_disable_btrace (tinfo);
5989 return (err == BTRACE_ERR_NONE ? 0 : -1);
5990}
5991
5992/* See to_read_btrace target method. */
5993
5994static int
9accd112
MM
5995linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5996 int type)
5997{
5998 VEC (btrace_block_s) *btrace;
5999 struct btrace_block *block;
969c39fb 6000 enum btrace_error err;
9accd112
MM
6001 int i;
6002
969c39fb
MM
6003 btrace = NULL;
6004 err = linux_read_btrace (&btrace, tinfo, type);
6005 if (err != BTRACE_ERR_NONE)
6006 {
6007 if (err == BTRACE_ERR_OVERFLOW)
6008 buffer_grow_str0 (buffer, "E.Overflow.");
6009 else
6010 buffer_grow_str0 (buffer, "E.Generic Error.");
6011
6012 return -1;
6013 }
9accd112
MM
6014
6015 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6016 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6017
6018 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
6019 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6020 paddress (block->begin), paddress (block->end));
6021
969c39fb 6022 buffer_grow_str0 (buffer, "</btrace>\n");
9accd112
MM
6023
6024 VEC_free (btrace_block_s, btrace);
969c39fb
MM
6025
6026 return 0;
9accd112
MM
6027}
6028#endif /* HAVE_LINUX_BTRACE */
6029
ce3a066d
DJ
6030static struct target_ops linux_target_ops = {
6031 linux_create_inferior,
6032 linux_attach,
6033 linux_kill,
6ad8ae5c 6034 linux_detach,
8336d594 6035 linux_mourn,
444d6139 6036 linux_join,
ce3a066d
DJ
6037 linux_thread_alive,
6038 linux_resume,
6039 linux_wait,
6040 linux_fetch_registers,
6041 linux_store_registers,
90d74c30 6042 linux_prepare_to_access_memory,
0146f85b 6043 linux_done_accessing_memory,
ce3a066d
DJ
6044 linux_read_memory,
6045 linux_write_memory,
2f2893d9 6046 linux_look_up_symbols,
ef57601b 6047 linux_request_interrupt,
aa691b87 6048 linux_read_auxv,
802e8e6d 6049 linux_supports_z_point_type,
d993e290
PA
6050 linux_insert_point,
6051 linux_remove_point,
e013ee27
OF
6052 linux_stopped_by_watchpoint,
6053 linux_stopped_data_address,
db0dfaa0
LM
6054#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6055 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6056 && defined(PT_TEXT_END_ADDR)
52fb6437 6057 linux_read_offsets,
dae5f5cf
DJ
6058#else
6059 NULL,
6060#endif
6061#ifdef USE_THREAD_DB
6062 thread_db_get_tls_address,
6063#else
6064 NULL,
52fb6437 6065#endif
efcbbd14 6066 linux_qxfer_spu,
59a016f0 6067 hostio_last_error_from_errno,
07e059b5 6068 linux_qxfer_osdata,
4aa995e1 6069 linux_xfer_siginfo,
bd99dc85
PA
6070 linux_supports_non_stop,
6071 linux_async,
6072 linux_start_non_stop,
cdbfd419
PP
6073 linux_supports_multi_process,
6074#ifdef USE_THREAD_DB
dc146f7c 6075 thread_db_handle_monitor_command,
cdbfd419 6076#else
dc146f7c 6077 NULL,
cdbfd419 6078#endif
d26e3629 6079 linux_common_core_of_thread,
78d85199 6080 linux_read_loadmap,
219f2f23
PA
6081 linux_process_qsupported,
6082 linux_supports_tracepoints,
6083 linux_read_pc,
8336d594
PA
6084 linux_write_pc,
6085 linux_thread_stopped,
7984d532 6086 NULL,
711e434b 6087 linux_pause_all,
7984d532 6088 linux_unpause_all,
fa593d66
PA
6089 linux_cancel_breakpoints,
6090 linux_stabilize_threads,
6a271cae 6091 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
6092 linux_emit_ops,
6093 linux_supports_disable_randomization,
405f8e94 6094 linux_get_min_fast_tracepoint_insn_len,
2268b414 6095 linux_qxfer_libraries_svr4,
d1feda86 6096 linux_supports_agent,
9accd112
MM
6097#ifdef HAVE_LINUX_BTRACE
6098 linux_supports_btrace,
6099 linux_low_enable_btrace,
969c39fb 6100 linux_low_disable_btrace,
9accd112
MM
6101 linux_low_read_btrace,
6102#else
6103 NULL,
6104 NULL,
6105 NULL,
6106 NULL,
9accd112 6107#endif
c2d6af84 6108 linux_supports_range_stepping,
ce3a066d
DJ
6109};
6110
0d62e5e8
DJ
6111static void
6112linux_init_signals ()
6113{
6114 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6115 to find what the cancel signal actually is. */
1a981360 6116#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 6117 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 6118#endif
0d62e5e8
DJ
6119}
6120
3aee8918
PA
6121#ifdef HAVE_LINUX_REGSETS
6122void
6123initialize_regsets_info (struct regsets_info *info)
6124{
6125 for (info->num_regsets = 0;
6126 info->regsets[info->num_regsets].size >= 0;
6127 info->num_regsets++)
6128 ;
3aee8918
PA
6129}
6130#endif
6131
da6d8c04
DJ
6132void
6133initialize_low (void)
6134{
bd99dc85
PA
6135 struct sigaction sigchld_action;
6136 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 6137 set_target_ops (&linux_target_ops);
611cb4a5
DJ
6138 set_breakpoint_data (the_low_target.breakpoint,
6139 the_low_target.breakpoint_len);
0d62e5e8 6140 linux_init_signals ();
aa7c7447 6141 linux_ptrace_init_warnings ();
bd99dc85
PA
6142
6143 sigchld_action.sa_handler = sigchld_handler;
6144 sigemptyset (&sigchld_action.sa_mask);
6145 sigchld_action.sa_flags = SA_RESTART;
6146 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
6147
6148 initialize_low_arch ();
da6d8c04 6149}