]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
linux-nat.c: better starvation avoidance, handle non-stop mode too
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
32d0add0 2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
58b4daa5 22#include "agent.h"
da6d8c04 23
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
8bdce1ff 26#include "gdb_wait.h"
da6d8c04 27#include <sys/ptrace.h>
125f8a3d
GB
28#include "nat/linux-ptrace.h"
29#include "nat/linux-procfs.h"
da6d8c04
DJ
30#include <signal.h>
31#include <sys/ioctl.h>
32#include <fcntl.h>
0a30fbc4 33#include <unistd.h>
fd500816 34#include <sys/syscall.h>
f9387fc3 35#include <sched.h>
07e059b5
VP
36#include <ctype.h>
37#include <pwd.h>
38#include <sys/types.h>
39#include <dirent.h>
53ce3c39 40#include <sys/stat.h>
efcbbd14 41#include <sys/vfs.h>
1570b33e 42#include <sys/uio.h>
602e3198 43#include "filestuff.h"
c144c7a0 44#include "tracepoint.h"
533b0600 45#include "hostio.h"
957f3f49
DE
46#ifndef ELFMAG0
47/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51#include <elf.h>
52#endif
efcbbd14
UW
53
54#ifndef SPUFS_MAGIC
55#define SPUFS_MAGIC 0x23c9b64e
56#endif
da6d8c04 57
03583c20
UW
58#ifdef HAVE_PERSONALITY
59# include <sys/personality.h>
60# if !HAVE_DECL_ADDR_NO_RANDOMIZE
61# define ADDR_NO_RANDOMIZE 0x0040000
62# endif
63#endif
64
fd462a61
DJ
65#ifndef O_LARGEFILE
66#define O_LARGEFILE 0
67#endif
68
ec8ebe72
DE
69#ifndef W_STOPCODE
70#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71#endif
72
1a981360
PA
73/* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75#ifndef __SIGRTMIN
76#define __SIGRTMIN 32
77#endif
78
db0dfaa0
LM
79/* Some targets did not define these ptrace constants from the start,
80 so gdbserver defines them locally here. In the future, these may
81 be removed after they are added to asm/ptrace.h. */
82#if !(defined(PT_TEXT_ADDR) \
83 || defined(PT_DATA_ADDR) \
84 || defined(PT_TEXT_END_ADDR))
85#if defined(__mcoldfire__)
86/* These are still undefined in 3.10 kernels. */
87#define PT_TEXT_ADDR 49*4
88#define PT_DATA_ADDR 50*4
89#define PT_TEXT_END_ADDR 51*4
90/* BFIN already defines these since at least 2.6.32 kernels. */
91#elif defined(BFIN)
92#define PT_TEXT_ADDR 220
93#define PT_TEXT_END_ADDR 224
94#define PT_DATA_ADDR 228
95/* These are still undefined in 3.10 kernels. */
96#elif defined(__TMS320C6X__)
97#define PT_TEXT_ADDR (0x10000*4)
98#define PT_DATA_ADDR (0x10004*4)
99#define PT_TEXT_END_ADDR (0x10008*4)
100#endif
101#endif
102
9accd112 103#ifdef HAVE_LINUX_BTRACE
125f8a3d 104# include "nat/linux-btrace.h"
9accd112
MM
105#endif
106
8365dcf5
TJB
107#ifndef HAVE_ELF32_AUXV_T
108/* Copied from glibc's elf.h. */
109typedef struct
110{
111 uint32_t a_type; /* Entry type */
112 union
113 {
114 uint32_t a_val; /* Integer value */
115 /* We use to have pointer elements added here. We cannot do that,
116 though, since it does not work when using 32-bit definitions
117 on 64-bit platforms and vice versa. */
118 } a_un;
119} Elf32_auxv_t;
120#endif
121
122#ifndef HAVE_ELF64_AUXV_T
123/* Copied from glibc's elf.h. */
124typedef struct
125{
126 uint64_t a_type; /* Entry type */
127 union
128 {
129 uint64_t a_val; /* Integer value */
130 /* We use to have pointer elements added here. We cannot do that,
131 though, since it does not work when using 32-bit definitions
132 on 64-bit platforms and vice versa. */
133 } a_un;
134} Elf64_auxv_t;
135#endif
136
05044653
PA
137/* A list of all unknown processes which receive stop signals. Some
138 other process will presumably claim each of these as forked
139 children momentarily. */
24a09b5f 140
05044653
PA
141struct simple_pid_list
142{
143 /* The process ID. */
144 int pid;
145
146 /* The status as reported by waitpid. */
147 int status;
148
149 /* Next in chain. */
150 struct simple_pid_list *next;
151};
152struct simple_pid_list *stopped_pids;
153
154/* Trivial list manipulation functions to keep track of a list of new
155 stopped processes. */
156
157static void
158add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
159{
160 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
161
162 new_pid->pid = pid;
163 new_pid->status = status;
164 new_pid->next = *listp;
165 *listp = new_pid;
166}
167
168static int
169pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
170{
171 struct simple_pid_list **p;
172
173 for (p = listp; *p != NULL; p = &(*p)->next)
174 if ((*p)->pid == pid)
175 {
176 struct simple_pid_list *next = (*p)->next;
177
178 *statusp = (*p)->status;
179 xfree (*p);
180 *p = next;
181 return 1;
182 }
183 return 0;
184}
24a09b5f 185
bde24c0a
PA
186enum stopping_threads_kind
187 {
188 /* Not stopping threads presently. */
189 NOT_STOPPING_THREADS,
190
191 /* Stopping threads. */
192 STOPPING_THREADS,
193
194 /* Stopping and suspending threads. */
195 STOPPING_AND_SUSPENDING_THREADS
196 };
197
198/* This is set while stop_all_lwps is in effect. */
199enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
200
201/* FIXME make into a target method? */
24a09b5f 202int using_threads = 1;
24a09b5f 203
fa593d66
PA
204/* True if we're presently stabilizing threads (moving them out of
205 jump pads). */
206static int stabilizing_threads;
207
2acc282a 208static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 209 int step, int signal, siginfo_t *info);
2bd7c093 210static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
211static void stop_all_lwps (int suspend, struct lwp_info *except);
212static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
fa96cb38
PA
213static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
214 int *wstat, int options);
95954743 215static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
b3312d80 216static struct lwp_info *add_lwp (ptid_t ptid);
c35fafde 217static int linux_stopped_by_watchpoint (void);
95954743 218static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
d50171e4 219static void proceed_all_lwps (void);
d50171e4
PA
220static int finish_step_over (struct lwp_info *lwp);
221static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
222static int kill_lwp (unsigned long lwpid, int signo);
223
224/* True if the low target can hardware single-step. Such targets
225 don't need a BREAKPOINT_REINSERT_ADDR callback. */
226
227static int
228can_hardware_single_step (void)
229{
230 return (the_low_target.breakpoint_reinsert_addr == NULL);
231}
232
233/* True if the low target supports memory breakpoints. If so, we'll
234 have a GET_PC implementation. */
235
236static int
237supports_breakpoints (void)
238{
239 return (the_low_target.get_pc != NULL);
240}
0d62e5e8 241
fa593d66
PA
242/* Returns true if this target can support fast tracepoints. This
243 does not mean that the in-process agent has been loaded in the
244 inferior. */
245
246static int
247supports_fast_tracepoints (void)
248{
249 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
250}
251
c2d6af84
PA
252/* True if LWP is stopped in its stepping range. */
253
254static int
255lwp_in_step_range (struct lwp_info *lwp)
256{
257 CORE_ADDR pc = lwp->stop_pc;
258
259 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
260}
261
0d62e5e8
DJ
262struct pending_signals
263{
264 int signal;
32ca6d61 265 siginfo_t info;
0d62e5e8
DJ
266 struct pending_signals *prev;
267};
611cb4a5 268
bd99dc85
PA
269/* The read/write ends of the pipe registered as waitable file in the
270 event loop. */
271static int linux_event_pipe[2] = { -1, -1 };
272
273/* True if we're currently in async mode. */
274#define target_is_async_p() (linux_event_pipe[0] != -1)
275
02fc4de7 276static void send_sigstop (struct lwp_info *lwp);
fa96cb38 277static void wait_for_sigstop (void);
bd99dc85 278
d0722149
DE
279/* Return non-zero if HEADER is a 64-bit ELF file. */
280
281static int
214d508e 282elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 283{
214d508e
L
284 if (header->e_ident[EI_MAG0] == ELFMAG0
285 && header->e_ident[EI_MAG1] == ELFMAG1
286 && header->e_ident[EI_MAG2] == ELFMAG2
287 && header->e_ident[EI_MAG3] == ELFMAG3)
288 {
289 *machine = header->e_machine;
290 return header->e_ident[EI_CLASS] == ELFCLASS64;
291
292 }
293 *machine = EM_NONE;
294 return -1;
d0722149
DE
295}
296
297/* Return non-zero if FILE is a 64-bit ELF file,
298 zero if the file is not a 64-bit ELF file,
299 and -1 if the file is not accessible or doesn't exist. */
300
be07f1a2 301static int
214d508e 302elf_64_file_p (const char *file, unsigned int *machine)
d0722149 303{
957f3f49 304 Elf64_Ehdr header;
d0722149
DE
305 int fd;
306
307 fd = open (file, O_RDONLY);
308 if (fd < 0)
309 return -1;
310
311 if (read (fd, &header, sizeof (header)) != sizeof (header))
312 {
313 close (fd);
314 return 0;
315 }
316 close (fd);
317
214d508e 318 return elf_64_header_p (&header, machine);
d0722149
DE
319}
320
be07f1a2
PA
321/* Accepts an integer PID; Returns true if the executable PID is
322 running is a 64-bit ELF file.. */
323
324int
214d508e 325linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 326{
d8d2a3ee 327 char file[PATH_MAX];
be07f1a2
PA
328
329 sprintf (file, "/proc/%d/exe", pid);
214d508e 330 return elf_64_file_p (file, machine);
be07f1a2
PA
331}
332
bd99dc85
PA
333static void
334delete_lwp (struct lwp_info *lwp)
335{
fa96cb38
PA
336 struct thread_info *thr = get_lwp_thread (lwp);
337
338 if (debug_threads)
339 debug_printf ("deleting %ld\n", lwpid_of (thr));
340
341 remove_thread (thr);
aa5ca48f 342 free (lwp->arch_private);
bd99dc85
PA
343 free (lwp);
344}
345
95954743
PA
346/* Add a process to the common process list, and set its private
347 data. */
348
349static struct process_info *
350linux_add_process (int pid, int attached)
351{
352 struct process_info *proc;
353
95954743
PA
354 proc = add_process (pid, attached);
355 proc->private = xcalloc (1, sizeof (*proc->private));
356
3aee8918
PA
357 /* Set the arch when the first LWP stops. */
358 proc->private->new_inferior = 1;
359
aa5ca48f
DE
360 if (the_low_target.new_process != NULL)
361 proc->private->arch_private = the_low_target.new_process ();
362
95954743
PA
363 return proc;
364}
365
bd99dc85
PA
366/* Handle a GNU/Linux extended wait response. If we see a clone
367 event, we need to add the new LWP to our list (and not report the
368 trap to higher layers). */
0d62e5e8 369
24a09b5f 370static void
54a0b537 371handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f 372{
89a5711c 373 int event = linux_ptrace_get_extended_event (wstat);
d86d4aaf 374 struct thread_info *event_thr = get_lwp_thread (event_child);
54a0b537 375 struct lwp_info *new_lwp;
24a09b5f
DJ
376
377 if (event == PTRACE_EVENT_CLONE)
378 {
95954743 379 ptid_t ptid;
24a09b5f 380 unsigned long new_pid;
05044653 381 int ret, status;
24a09b5f 382
d86d4aaf 383 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 384 &new_pid);
24a09b5f
DJ
385
386 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 387 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
388 {
389 /* The new child has a pending SIGSTOP. We can't affect it until it
390 hits the SIGSTOP, but we're already attached. */
391
97438e3f 392 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
393
394 if (ret == -1)
395 perror_with_name ("waiting for new child");
396 else if (ret != new_pid)
397 warning ("wait returned unexpected PID %d", ret);
da5898ce 398 else if (!WIFSTOPPED (status))
24a09b5f
DJ
399 warning ("wait returned unexpected status 0x%x", status);
400 }
401
fa96cb38
PA
402 if (debug_threads)
403 debug_printf ("HEW: Got clone event "
404 "from LWP %ld, new child is LWP %ld\n",
405 lwpid_of (event_thr), new_pid);
406
d86d4aaf 407 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
b3312d80 408 new_lwp = add_lwp (ptid);
24a09b5f 409
e27d73f6
DE
410 /* Either we're going to immediately resume the new thread
411 or leave it stopped. linux_resume_one_lwp is a nop if it
412 thinks the thread is currently running, so set this first
413 before calling linux_resume_one_lwp. */
414 new_lwp->stopped = 1;
415
bde24c0a
PA
416 /* If we're suspending all threads, leave this one suspended
417 too. */
418 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
419 new_lwp->suspended = 1;
420
da5898ce
DJ
421 /* Normally we will get the pending SIGSTOP. But in some cases
422 we might get another signal delivered to the group first.
f21cc1a2 423 If we do get another signal, be sure not to lose it. */
da5898ce
DJ
424 if (WSTOPSIG (status) == SIGSTOP)
425 {
bde24c0a 426 if (stopping_threads != NOT_STOPPING_THREADS)
d50171e4
PA
427 new_lwp->stop_pc = get_stop_pc (new_lwp);
428 else
e27d73f6 429 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
da5898ce 430 }
24a09b5f 431 else
da5898ce 432 {
54a0b537 433 new_lwp->stop_expected = 1;
d50171e4 434
bde24c0a 435 if (stopping_threads != NOT_STOPPING_THREADS)
da5898ce 436 {
d50171e4 437 new_lwp->stop_pc = get_stop_pc (new_lwp);
54a0b537
PA
438 new_lwp->status_pending_p = 1;
439 new_lwp->status_pending = status;
da5898ce
DJ
440 }
441 else
442 /* Pass the signal on. This is what GDB does - except
443 shouldn't we really report it instead? */
e27d73f6 444 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
da5898ce 445 }
24a09b5f
DJ
446
447 /* Always resume the current thread. If we are stopping
448 threads, it will have a pending SIGSTOP; we may as well
449 collect it now. */
2acc282a 450 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
24a09b5f
DJ
451 }
452}
453
d50171e4
PA
454/* Return the PC as read from the regcache of LWP, without any
455 adjustment. */
456
457static CORE_ADDR
458get_pc (struct lwp_info *lwp)
459{
0bfdf32f 460 struct thread_info *saved_thread;
d50171e4
PA
461 struct regcache *regcache;
462 CORE_ADDR pc;
463
464 if (the_low_target.get_pc == NULL)
465 return 0;
466
0bfdf32f
GB
467 saved_thread = current_thread;
468 current_thread = get_lwp_thread (lwp);
d50171e4 469
0bfdf32f 470 regcache = get_thread_regcache (current_thread, 1);
d50171e4
PA
471 pc = (*the_low_target.get_pc) (regcache);
472
473 if (debug_threads)
87ce2a04 474 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 475
0bfdf32f 476 current_thread = saved_thread;
d50171e4
PA
477 return pc;
478}
479
480/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
481 The SIGTRAP could mean several things.
482
483 On i386, where decr_pc_after_break is non-zero:
484 If we were single-stepping this process using PTRACE_SINGLESTEP,
485 we will get only the one SIGTRAP (even if the instruction we
486 stepped over was a breakpoint). The value of $eip will be the
487 next instruction.
488 If we continue the process using PTRACE_CONT, we will get a
489 SIGTRAP when we hit a breakpoint. The value of $eip will be
490 the instruction after the breakpoint (i.e. needs to be
491 decremented). If we report the SIGTRAP to GDB, we must also
492 report the undecremented PC. If we cancel the SIGTRAP, we
493 must resume at the decremented PC.
494
495 (Presumably, not yet tested) On a non-decr_pc_after_break machine
496 with hardware or kernel single-step:
497 If we single-step over a breakpoint instruction, our PC will
498 point at the following instruction. If we continue and hit a
499 breakpoint instruction, our PC will point at the breakpoint
500 instruction. */
501
502static CORE_ADDR
d50171e4 503get_stop_pc (struct lwp_info *lwp)
0d62e5e8 504{
d50171e4
PA
505 CORE_ADDR stop_pc;
506
507 if (the_low_target.get_pc == NULL)
508 return 0;
0d62e5e8 509
d50171e4
PA
510 stop_pc = get_pc (lwp);
511
bdabb078
PA
512 if (WSTOPSIG (lwp->last_status) == SIGTRAP
513 && !lwp->stepping
514 && !lwp->stopped_by_watchpoint
89a5711c 515 && !linux_is_extended_waitstatus (lwp->last_status))
47c0c975
DE
516 stop_pc -= the_low_target.decr_pc_after_break;
517
518 if (debug_threads)
87ce2a04 519 debug_printf ("stop pc is 0x%lx\n", (long) stop_pc);
47c0c975
DE
520
521 return stop_pc;
0d62e5e8 522}
ce3a066d 523
b3312d80 524static struct lwp_info *
95954743 525add_lwp (ptid_t ptid)
611cb4a5 526{
54a0b537 527 struct lwp_info *lwp;
0d62e5e8 528
54a0b537
PA
529 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
530 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 531
aa5ca48f
DE
532 if (the_low_target.new_thread != NULL)
533 lwp->arch_private = the_low_target.new_thread ();
534
f7667f0d 535 lwp->thread = add_thread (ptid, lwp);
0d62e5e8 536
54a0b537 537 return lwp;
0d62e5e8 538}
611cb4a5 539
da6d8c04
DJ
540/* Start an inferior process and returns its pid.
541 ALLARGS is a vector of program-name and args. */
542
ce3a066d
DJ
543static int
544linux_create_inferior (char *program, char **allargs)
da6d8c04 545{
03583c20
UW
546#ifdef HAVE_PERSONALITY
547 int personality_orig = 0, personality_set = 0;
548#endif
a6dbe5df 549 struct lwp_info *new_lwp;
da6d8c04 550 int pid;
95954743 551 ptid_t ptid;
da6d8c04 552
03583c20
UW
553#ifdef HAVE_PERSONALITY
554 if (disable_randomization)
555 {
556 errno = 0;
557 personality_orig = personality (0xffffffff);
558 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
559 {
560 personality_set = 1;
561 personality (personality_orig | ADDR_NO_RANDOMIZE);
562 }
563 if (errno != 0 || (personality_set
564 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
565 warning ("Error disabling address space randomization: %s",
566 strerror (errno));
567 }
568#endif
569
42c81e2a 570#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
571 pid = vfork ();
572#else
da6d8c04 573 pid = fork ();
52fb6437 574#endif
da6d8c04
DJ
575 if (pid < 0)
576 perror_with_name ("fork");
577
578 if (pid == 0)
579 {
602e3198 580 close_most_fds ();
b8e1b30e 581 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da6d8c04 582
1a981360 583#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 584 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 585#endif
0d62e5e8 586
a9fa9f7d
DJ
587 setpgid (0, 0);
588
e0f9f062
DE
589 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
590 stdout to stderr so that inferior i/o doesn't corrupt the connection.
591 Also, redirect stdin to /dev/null. */
592 if (remote_connection_is_stdio ())
593 {
594 close (0);
595 open ("/dev/null", O_RDONLY);
596 dup2 (2, 1);
3e52c33d
JK
597 if (write (2, "stdin/stdout redirected\n",
598 sizeof ("stdin/stdout redirected\n") - 1) < 0)
8c29b58e
YQ
599 {
600 /* Errors ignored. */;
601 }
e0f9f062
DE
602 }
603
2b876972
DJ
604 execv (program, allargs);
605 if (errno == ENOENT)
606 execvp (program, allargs);
da6d8c04
DJ
607
608 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 609 strerror (errno));
da6d8c04
DJ
610 fflush (stderr);
611 _exit (0177);
612 }
613
03583c20
UW
614#ifdef HAVE_PERSONALITY
615 if (personality_set)
616 {
617 errno = 0;
618 personality (personality_orig);
619 if (errno != 0)
620 warning ("Error restoring address space randomization: %s",
621 strerror (errno));
622 }
623#endif
624
95954743
PA
625 linux_add_process (pid, 0);
626
627 ptid = ptid_build (pid, pid, 0);
628 new_lwp = add_lwp (ptid);
a6dbe5df 629 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 630
a9fa9f7d 631 return pid;
da6d8c04
DJ
632}
633
8784d563
PA
634/* Attach to an inferior process. Returns 0 on success, ERRNO on
635 error. */
da6d8c04 636
7ae1a6a6
PA
637int
638linux_attach_lwp (ptid_t ptid)
da6d8c04 639{
54a0b537 640 struct lwp_info *new_lwp;
7ae1a6a6 641 int lwpid = ptid_get_lwp (ptid);
611cb4a5 642
b8e1b30e 643 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 644 != 0)
7ae1a6a6 645 return errno;
24a09b5f 646
b3312d80 647 new_lwp = add_lwp (ptid);
0d62e5e8 648
a6dbe5df
PA
649 /* We need to wait for SIGSTOP before being able to make the next
650 ptrace call on this LWP. */
651 new_lwp->must_set_ptrace_flags = 1;
652
644cebc9 653 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
654 {
655 if (debug_threads)
87ce2a04 656 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
657
658 /* The process is definitely stopped. It is in a job control
659 stop, unless the kernel predates the TASK_STOPPED /
660 TASK_TRACED distinction, in which case it might be in a
661 ptrace stop. Make sure it is in a ptrace stop; from there we
662 can kill it, signal it, et cetera.
663
664 First make sure there is a pending SIGSTOP. Since we are
665 already attached, the process can not transition from stopped
666 to running without a PTRACE_CONT; so we know this signal will
667 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
668 probably already in the queue (unless this kernel is old
669 enough to use TASK_STOPPED for ptrace stops); but since
670 SIGSTOP is not an RT signal, it can only be queued once. */
671 kill_lwp (lwpid, SIGSTOP);
672
673 /* Finally, resume the stopped process. This will deliver the
674 SIGSTOP (or a higher priority signal, just like normal
675 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 676 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
677 }
678
0d62e5e8 679 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
680 brings it to a halt.
681
682 There are several cases to consider here:
683
684 1) gdbserver has already attached to the process and is being notified
1b3f6016 685 of a new thread that is being created.
d50171e4
PA
686 In this case we should ignore that SIGSTOP and resume the
687 process. This is handled below by setting stop_expected = 1,
8336d594 688 and the fact that add_thread sets last_resume_kind ==
d50171e4 689 resume_continue.
0e21c1ec
DE
690
691 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
692 to it via attach_inferior.
693 In this case we want the process thread to stop.
d50171e4
PA
694 This is handled by having linux_attach set last_resume_kind ==
695 resume_stop after we return.
e3deef73
LM
696
697 If the pid we are attaching to is also the tgid, we attach to and
698 stop all the existing threads. Otherwise, we attach to pid and
699 ignore any other threads in the same group as this pid.
0e21c1ec
DE
700
701 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
702 existing threads.
703 In this case we want the thread to stop.
704 FIXME: This case is currently not properly handled.
705 We should wait for the SIGSTOP but don't. Things work apparently
706 because enough time passes between when we ptrace (ATTACH) and when
707 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
708
709 On the other hand, if we are currently trying to stop all threads, we
710 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 711 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
712 end of the list, and so the new thread has not yet reached
713 wait_for_sigstop (but will). */
d50171e4 714 new_lwp->stop_expected = 1;
0d62e5e8 715
7ae1a6a6 716 return 0;
95954743
PA
717}
718
8784d563
PA
719/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
720 already attached. Returns true if a new LWP is found, false
721 otherwise. */
722
723static int
724attach_proc_task_lwp_callback (ptid_t ptid)
725{
726 /* Is this a new thread? */
727 if (find_thread_ptid (ptid) == NULL)
728 {
729 int lwpid = ptid_get_lwp (ptid);
730 int err;
731
732 if (debug_threads)
733 debug_printf ("Found new lwp %d\n", lwpid);
734
735 err = linux_attach_lwp (ptid);
736
737 /* Be quiet if we simply raced with the thread exiting. EPERM
738 is returned if the thread's task still exists, and is marked
739 as exited or zombie, as well as other conditions, so in that
740 case, confirm the status in /proc/PID/status. */
741 if (err == ESRCH
742 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
743 {
744 if (debug_threads)
745 {
746 debug_printf ("Cannot attach to lwp %d: "
747 "thread is gone (%d: %s)\n",
748 lwpid, err, strerror (err));
749 }
750 }
751 else if (err != 0)
752 {
753 warning (_("Cannot attach to lwp %d: %s"),
754 lwpid,
755 linux_ptrace_attach_fail_reason_string (ptid, err));
756 }
757
758 return 1;
759 }
760 return 0;
761}
762
e3deef73
LM
763/* Attach to PID. If PID is the tgid, attach to it and all
764 of its threads. */
765
c52daf70 766static int
a1928bad 767linux_attach (unsigned long pid)
0d62e5e8 768{
7ae1a6a6
PA
769 ptid_t ptid = ptid_build (pid, pid, 0);
770 int err;
771
e3deef73
LM
772 /* Attach to PID. We will check for other threads
773 soon. */
7ae1a6a6
PA
774 err = linux_attach_lwp (ptid);
775 if (err != 0)
776 error ("Cannot attach to process %ld: %s",
8784d563 777 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
7ae1a6a6 778
95954743 779 linux_add_process (pid, 1);
0d62e5e8 780
bd99dc85
PA
781 if (!non_stop)
782 {
8336d594
PA
783 struct thread_info *thread;
784
785 /* Don't ignore the initial SIGSTOP if we just attached to this
786 process. It will be collected by wait shortly. */
787 thread = find_thread_ptid (ptid_build (pid, pid, 0));
788 thread->last_resume_kind = resume_stop;
bd99dc85 789 }
0d62e5e8 790
8784d563
PA
791 /* We must attach to every LWP. If /proc is mounted, use that to
792 find them now. On the one hand, the inferior may be using raw
793 clone instead of using pthreads. On the other hand, even if it
794 is using pthreads, GDB may not be connected yet (thread_db needs
795 to do symbol lookups, through qSymbol). Also, thread_db walks
796 structures in the inferior's address space to find the list of
797 threads/LWPs, and those structures may well be corrupted. Note
798 that once thread_db is loaded, we'll still use it to list threads
799 and associate pthread info with each LWP. */
800 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
95954743
PA
801 return 0;
802}
803
804struct counter
805{
806 int pid;
807 int count;
808};
809
810static int
811second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
812{
813 struct counter *counter = args;
814
815 if (ptid_get_pid (entry->id) == counter->pid)
816 {
817 if (++counter->count > 1)
818 return 1;
819 }
d61ddec4 820
da6d8c04
DJ
821 return 0;
822}
823
95954743 824static int
fa96cb38 825last_thread_of_process_p (int pid)
95954743 826{
95954743 827 struct counter counter = { pid , 0 };
da6d8c04 828
95954743
PA
829 return (find_inferior (&all_threads,
830 second_thread_of_pid_p, &counter) == NULL);
831}
832
da84f473
PA
833/* Kill LWP. */
834
835static void
836linux_kill_one_lwp (struct lwp_info *lwp)
837{
d86d4aaf
DE
838 struct thread_info *thr = get_lwp_thread (lwp);
839 int pid = lwpid_of (thr);
da84f473
PA
840
841 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
842 there is no signal context, and ptrace(PTRACE_KILL) (or
843 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
844 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
845 alternative is to kill with SIGKILL. We only need one SIGKILL
846 per process, not one for each thread. But since we still support
847 linuxthreads, and we also support debugging programs using raw
848 clone without CLONE_THREAD, we send one for each thread. For
849 years, we used PTRACE_KILL only, so we're being a bit paranoid
850 about some old kernels where PTRACE_KILL might work better
851 (dubious if there are any such, but that's why it's paranoia), so
852 we try SIGKILL first, PTRACE_KILL second, and so we're fine
853 everywhere. */
854
855 errno = 0;
69ff6be5 856 kill_lwp (pid, SIGKILL);
da84f473 857 if (debug_threads)
ce9e3fe7
PA
858 {
859 int save_errno = errno;
860
861 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
862 target_pid_to_str (ptid_of (thr)),
863 save_errno ? strerror (save_errno) : "OK");
864 }
da84f473
PA
865
866 errno = 0;
b8e1b30e 867 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 868 if (debug_threads)
ce9e3fe7
PA
869 {
870 int save_errno = errno;
871
872 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
873 target_pid_to_str (ptid_of (thr)),
874 save_errno ? strerror (save_errno) : "OK");
875 }
da84f473
PA
876}
877
e76126e8
PA
878/* Kill LWP and wait for it to die. */
879
880static void
881kill_wait_lwp (struct lwp_info *lwp)
882{
883 struct thread_info *thr = get_lwp_thread (lwp);
884 int pid = ptid_get_pid (ptid_of (thr));
885 int lwpid = ptid_get_lwp (ptid_of (thr));
886 int wstat;
887 int res;
888
889 if (debug_threads)
890 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
891
892 do
893 {
894 linux_kill_one_lwp (lwp);
895
896 /* Make sure it died. Notes:
897
898 - The loop is most likely unnecessary.
899
900 - We don't use linux_wait_for_event as that could delete lwps
901 while we're iterating over them. We're not interested in
902 any pending status at this point, only in making sure all
903 wait status on the kernel side are collected until the
904 process is reaped.
905
906 - We don't use __WALL here as the __WALL emulation relies on
907 SIGCHLD, and killing a stopped process doesn't generate
908 one, nor an exit status.
909 */
910 res = my_waitpid (lwpid, &wstat, 0);
911 if (res == -1 && errno == ECHILD)
912 res = my_waitpid (lwpid, &wstat, __WCLONE);
913 } while (res > 0 && WIFSTOPPED (wstat));
914
915 gdb_assert (res > 0);
916}
917
da84f473
PA
918/* Callback for `find_inferior'. Kills an lwp of a given process,
919 except the leader. */
95954743
PA
920
921static int
da84f473 922kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
da6d8c04 923{
0d62e5e8 924 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 925 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
926 int pid = * (int *) args;
927
928 if (ptid_get_pid (entry->id) != pid)
929 return 0;
0d62e5e8 930
fd500816
DJ
931 /* We avoid killing the first thread here, because of a Linux kernel (at
932 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
933 the children get a chance to be reaped, it will remain a zombie
934 forever. */
95954743 935
d86d4aaf 936 if (lwpid_of (thread) == pid)
95954743
PA
937 {
938 if (debug_threads)
87ce2a04
DE
939 debug_printf ("lkop: is last of process %s\n",
940 target_pid_to_str (entry->id));
95954743
PA
941 return 0;
942 }
fd500816 943
e76126e8 944 kill_wait_lwp (lwp);
95954743 945 return 0;
da6d8c04
DJ
946}
947
95954743
PA
948static int
949linux_kill (int pid)
0d62e5e8 950{
95954743 951 struct process_info *process;
54a0b537 952 struct lwp_info *lwp;
fd500816 953
95954743
PA
954 process = find_process_pid (pid);
955 if (process == NULL)
956 return -1;
9d606399 957
f9e39928
PA
958 /* If we're killing a running inferior, make sure it is stopped
959 first, as PTRACE_KILL will not work otherwise. */
7984d532 960 stop_all_lwps (0, NULL);
f9e39928 961
da84f473 962 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
fd500816 963
54a0b537 964 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 965 thread in the list, so do so now. */
95954743 966 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 967
784867a5 968 if (lwp == NULL)
fd500816 969 {
784867a5 970 if (debug_threads)
d86d4aaf
DE
971 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
972 pid);
784867a5
JK
973 }
974 else
e76126e8 975 kill_wait_lwp (lwp);
2d717e4f 976
8336d594 977 the_target->mourn (process);
f9e39928
PA
978
979 /* Since we presently can only stop all lwps of all processes, we
980 need to unstop lwps of other processes. */
7984d532 981 unstop_all_lwps (0, NULL);
95954743 982 return 0;
0d62e5e8
DJ
983}
984
9b224c5e
PA
985/* Get pending signal of THREAD, for detaching purposes. This is the
986 signal the thread last stopped for, which we need to deliver to the
987 thread when detaching, otherwise, it'd be suppressed/lost. */
988
989static int
990get_detach_signal (struct thread_info *thread)
991{
a493e3e2 992 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
993 int status;
994 struct lwp_info *lp = get_thread_lwp (thread);
995
996 if (lp->status_pending_p)
997 status = lp->status_pending;
998 else
999 {
1000 /* If the thread had been suspended by gdbserver, and it stopped
1001 cleanly, then it'll have stopped with SIGSTOP. But we don't
1002 want to deliver that SIGSTOP. */
1003 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1004 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1005 return 0;
1006
1007 /* Otherwise, we may need to deliver the signal we
1008 intercepted. */
1009 status = lp->last_status;
1010 }
1011
1012 if (!WIFSTOPPED (status))
1013 {
1014 if (debug_threads)
87ce2a04 1015 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1016 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1017 return 0;
1018 }
1019
1020 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1021 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1022 {
1023 if (debug_threads)
87ce2a04
DE
1024 debug_printf ("GPS: lwp %s had stopped with extended "
1025 "status: no pending signal\n",
d86d4aaf 1026 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1027 return 0;
1028 }
1029
2ea28649 1030 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e
PA
1031
1032 if (program_signals_p && !program_signals[signo])
1033 {
1034 if (debug_threads)
87ce2a04 1035 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1036 target_pid_to_str (ptid_of (thread)),
87ce2a04 1037 gdb_signal_to_string (signo));
9b224c5e
PA
1038 return 0;
1039 }
1040 else if (!program_signals_p
1041 /* If we have no way to know which signals GDB does not
1042 want to have passed to the program, assume
1043 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1044 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1045 {
1046 if (debug_threads)
87ce2a04
DE
1047 debug_printf ("GPS: lwp %s had signal %s, "
1048 "but we don't know if we should pass it. "
1049 "Default to not.\n",
d86d4aaf 1050 target_pid_to_str (ptid_of (thread)),
87ce2a04 1051 gdb_signal_to_string (signo));
9b224c5e
PA
1052 return 0;
1053 }
1054 else
1055 {
1056 if (debug_threads)
87ce2a04 1057 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1058 target_pid_to_str (ptid_of (thread)),
87ce2a04 1059 gdb_signal_to_string (signo));
9b224c5e
PA
1060
1061 return WSTOPSIG (status);
1062 }
1063}
1064
95954743
PA
1065static int
1066linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
1067{
1068 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1069 struct lwp_info *lwp = get_thread_lwp (thread);
95954743 1070 int pid = * (int *) args;
9b224c5e 1071 int sig;
95954743
PA
1072
1073 if (ptid_get_pid (entry->id) != pid)
1074 return 0;
6ad8ae5c 1075
9b224c5e 1076 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1077 if (lwp->stop_expected)
ae13219e 1078 {
9b224c5e 1079 if (debug_threads)
87ce2a04 1080 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1081 target_pid_to_str (ptid_of (thread)));
9b224c5e 1082
d86d4aaf 1083 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1084 lwp->stop_expected = 0;
ae13219e
DJ
1085 }
1086
1087 /* Flush any pending changes to the process's registers. */
d86d4aaf 1088 regcache_invalidate_thread (thread);
ae13219e 1089
9b224c5e
PA
1090 /* Pass on any pending signal for this thread. */
1091 sig = get_detach_signal (thread);
1092
ae13219e 1093 /* Finally, let it resume. */
82bfbe7e
PA
1094 if (the_low_target.prepare_to_resume != NULL)
1095 the_low_target.prepare_to_resume (lwp);
d86d4aaf 1096 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1097 (PTRACE_TYPE_ARG4) (long) sig) < 0)
9b224c5e 1098 error (_("Can't detach %s: %s"),
d86d4aaf 1099 target_pid_to_str (ptid_of (thread)),
9b224c5e 1100 strerror (errno));
bd99dc85
PA
1101
1102 delete_lwp (lwp);
95954743 1103 return 0;
6ad8ae5c
DJ
1104}
1105
95954743
PA
1106static int
1107linux_detach (int pid)
1108{
1109 struct process_info *process;
1110
1111 process = find_process_pid (pid);
1112 if (process == NULL)
1113 return -1;
1114
f9e39928
PA
1115 /* Stop all threads before detaching. First, ptrace requires that
1116 the thread is stopped to sucessfully detach. Second, thread_db
1117 may need to uninstall thread event breakpoints from memory, which
1118 only works with a stopped process anyway. */
7984d532 1119 stop_all_lwps (0, NULL);
f9e39928 1120
ca5c370d 1121#ifdef USE_THREAD_DB
8336d594 1122 thread_db_detach (process);
ca5c370d
PA
1123#endif
1124
fa593d66
PA
1125 /* Stabilize threads (move out of jump pads). */
1126 stabilize_threads ();
1127
95954743 1128 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
1129
1130 the_target->mourn (process);
f9e39928
PA
1131
1132 /* Since we presently can only stop all lwps of all processes, we
1133 need to unstop lwps of other processes. */
7984d532 1134 unstop_all_lwps (0, NULL);
f9e39928
PA
1135 return 0;
1136}
1137
1138/* Remove all LWPs that belong to process PROC from the lwp list. */
1139
1140static int
1141delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1142{
d86d4aaf
DE
1143 struct thread_info *thread = (struct thread_info *) entry;
1144 struct lwp_info *lwp = get_thread_lwp (thread);
f9e39928
PA
1145 struct process_info *process = proc;
1146
d86d4aaf 1147 if (pid_of (thread) == pid_of (process))
f9e39928
PA
1148 delete_lwp (lwp);
1149
dd6953e1 1150 return 0;
6ad8ae5c
DJ
1151}
1152
8336d594
PA
1153static void
1154linux_mourn (struct process_info *process)
1155{
1156 struct process_info_private *priv;
1157
1158#ifdef USE_THREAD_DB
1159 thread_db_mourn (process);
1160#endif
1161
d86d4aaf 1162 find_inferior (&all_threads, delete_lwp_callback, process);
f9e39928 1163
8336d594
PA
1164 /* Freeing all private data. */
1165 priv = process->private;
1166 free (priv->arch_private);
1167 free (priv);
1168 process->private = NULL;
505106cd
PA
1169
1170 remove_process (process);
8336d594
PA
1171}
1172
444d6139 1173static void
95954743 1174linux_join (int pid)
444d6139 1175{
444d6139
PA
1176 int status, ret;
1177
1178 do {
95954743 1179 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1180 if (WIFEXITED (status) || WIFSIGNALED (status))
1181 break;
1182 } while (ret != -1 || errno != ECHILD);
1183}
1184
6ad8ae5c 1185/* Return nonzero if the given thread is still alive. */
0d62e5e8 1186static int
95954743 1187linux_thread_alive (ptid_t ptid)
0d62e5e8 1188{
95954743
PA
1189 struct lwp_info *lwp = find_lwp_pid (ptid);
1190
1191 /* We assume we always know if a thread exits. If a whole process
1192 exited but we still haven't been able to report it to GDB, we'll
1193 hold on to the last lwp of the dead process. */
1194 if (lwp != NULL)
1195 return !lwp->dead;
0d62e5e8
DJ
1196 else
1197 return 0;
1198}
1199
6bf5e0ba 1200/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1201static int
d50171e4 1202status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1203{
d86d4aaf
DE
1204 struct thread_info *thread = (struct thread_info *) entry;
1205 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
1206 ptid_t ptid = * (ptid_t *) arg;
1207
1208 /* Check if we're only interested in events from a specific process
1209 or its lwps. */
1210 if (!ptid_equal (minus_one_ptid, ptid)
d86d4aaf 1211 && ptid_get_pid (ptid) != ptid_get_pid (thread->entry.id))
95954743 1212 return 0;
0d62e5e8 1213
d50171e4
PA
1214 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1215 report any status pending the LWP may have. */
8336d594 1216 if (thread->last_resume_kind == resume_stop
7984d532 1217 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4 1218 return 0;
0d62e5e8 1219
d50171e4 1220 return lwp->status_pending_p;
0d62e5e8
DJ
1221}
1222
95954743
PA
1223static int
1224same_lwp (struct inferior_list_entry *entry, void *data)
1225{
1226 ptid_t ptid = *(ptid_t *) data;
1227 int lwp;
1228
1229 if (ptid_get_lwp (ptid) != 0)
1230 lwp = ptid_get_lwp (ptid);
1231 else
1232 lwp = ptid_get_pid (ptid);
1233
1234 if (ptid_get_lwp (entry->id) == lwp)
1235 return 1;
1236
1237 return 0;
1238}
1239
1240struct lwp_info *
1241find_lwp_pid (ptid_t ptid)
1242{
d86d4aaf
DE
1243 struct inferior_list_entry *thread
1244 = find_inferior (&all_threads, same_lwp, &ptid);
1245
1246 if (thread == NULL)
1247 return NULL;
1248
1249 return get_thread_lwp ((struct thread_info *) thread);
95954743
PA
1250}
1251
fa96cb38 1252/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1253
fa96cb38
PA
1254static int
1255num_lwps (int pid)
1256{
1257 struct inferior_list_entry *inf, *tmp;
1258 int count = 0;
0d62e5e8 1259
fa96cb38 1260 ALL_INFERIORS (&all_threads, inf, tmp)
24a09b5f 1261 {
fa96cb38
PA
1262 if (ptid_get_pid (inf->id) == pid)
1263 count++;
24a09b5f 1264 }
3aee8918 1265
fa96cb38
PA
1266 return count;
1267}
d61ddec4 1268
fa96cb38
PA
1269/* Detect zombie thread group leaders, and "exit" them. We can't reap
1270 their exits until all other threads in the group have exited. */
c3adc08c 1271
fa96cb38
PA
1272static void
1273check_zombie_leaders (void)
1274{
1275 struct process_info *proc, *tmp;
c3adc08c 1276
fa96cb38 1277 ALL_PROCESSES (proc, tmp)
c3adc08c 1278 {
fa96cb38
PA
1279 pid_t leader_pid = pid_of (proc);
1280 struct lwp_info *leader_lp;
c3adc08c 1281
fa96cb38 1282 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
c3adc08c 1283
fa96cb38
PA
1284 if (debug_threads)
1285 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1286 "num_lwps=%d, zombie=%d\n",
1287 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1288 linux_proc_pid_is_zombie (leader_pid));
1289
1290 if (leader_lp != NULL
1291 /* Check if there are other threads in the group, as we may
1292 have raced with the inferior simply exiting. */
1293 && !last_thread_of_process_p (leader_pid)
1294 && linux_proc_pid_is_zombie (leader_pid))
1295 {
1296 /* A leader zombie can mean one of two things:
1297
1298 - It exited, and there's an exit status pending
1299 available, or only the leader exited (not the whole
1300 program). In the latter case, we can't waitpid the
1301 leader's exit status until all other threads are gone.
1302
1303 - There are 3 or more threads in the group, and a thread
1304 other than the leader exec'd. On an exec, the Linux
1305 kernel destroys all other threads (except the execing
1306 one) in the thread group, and resets the execing thread's
1307 tid to the tgid. No exit notification is sent for the
1308 execing thread -- from the ptracer's perspective, it
1309 appears as though the execing thread just vanishes.
1310 Until we reap all other threads except the leader and the
1311 execing thread, the leader will be zombie, and the
1312 execing thread will be in `D (disc sleep)'. As soon as
1313 all other threads are reaped, the execing thread changes
1314 it's tid to the tgid, and the previous (zombie) leader
1315 vanishes, giving place to the "new" leader. We could try
1316 distinguishing the exit and exec cases, by waiting once
1317 more, and seeing if something comes out, but it doesn't
1318 sound useful. The previous leader _does_ go away, and
1319 we'll re-add the new one once we see the exec event
1320 (which is just the same as what would happen if the
1321 previous leader did exit voluntarily before some other
1322 thread execs). */
c3adc08c 1323
fa96cb38
PA
1324 if (debug_threads)
1325 fprintf (stderr,
1326 "CZL: Thread group leader %d zombie "
1327 "(it exited, or another thread execd).\n",
1328 leader_pid);
c3adc08c 1329
fa96cb38 1330 delete_lwp (leader_lp);
c3adc08c
PA
1331 }
1332 }
fa96cb38 1333}
c3adc08c 1334
fa96cb38
PA
1335/* Callback for `find_inferior'. Returns the first LWP that is not
1336 stopped. ARG is a PTID filter. */
d50171e4 1337
fa96cb38
PA
1338static int
1339not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1340{
1341 struct thread_info *thr = (struct thread_info *) entry;
1342 struct lwp_info *lwp;
1343 ptid_t filter = *(ptid_t *) arg;
47c0c975 1344
fa96cb38
PA
1345 if (!ptid_match (ptid_of (thr), filter))
1346 return 0;
bd99dc85 1347
fa96cb38
PA
1348 lwp = get_thread_lwp (thr);
1349 if (!lwp->stopped)
1350 return 1;
1351
1352 return 0;
0d62e5e8 1353}
611cb4a5 1354
219f2f23
PA
1355/* This function should only be called if the LWP got a SIGTRAP.
1356
1357 Handle any tracepoint steps or hits. Return true if a tracepoint
1358 event was handled, 0 otherwise. */
1359
1360static int
1361handle_tracepoints (struct lwp_info *lwp)
1362{
1363 struct thread_info *tinfo = get_lwp_thread (lwp);
1364 int tpoint_related_event = 0;
1365
7984d532
PA
1366 /* If this tracepoint hit causes a tracing stop, we'll immediately
1367 uninsert tracepoints. To do this, we temporarily pause all
1368 threads, unpatch away, and then unpause threads. We need to make
1369 sure the unpausing doesn't resume LWP too. */
1370 lwp->suspended++;
1371
219f2f23
PA
1372 /* And we need to be sure that any all-threads-stopping doesn't try
1373 to move threads out of the jump pads, as it could deadlock the
1374 inferior (LWP could be in the jump pad, maybe even holding the
1375 lock.) */
1376
1377 /* Do any necessary step collect actions. */
1378 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1379
fa593d66
PA
1380 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1381
219f2f23
PA
1382 /* See if we just hit a tracepoint and do its main collect
1383 actions. */
1384 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1385
7984d532
PA
1386 lwp->suspended--;
1387
1388 gdb_assert (lwp->suspended == 0);
fa593d66 1389 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1390
219f2f23
PA
1391 if (tpoint_related_event)
1392 {
1393 if (debug_threads)
87ce2a04 1394 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1395 return 1;
1396 }
1397
1398 return 0;
1399}
1400
fa593d66
PA
1401/* Convenience wrapper. Returns true if LWP is presently collecting a
1402 fast tracepoint. */
1403
1404static int
1405linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1406 struct fast_tpoint_collect_status *status)
1407{
1408 CORE_ADDR thread_area;
d86d4aaf 1409 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1410
1411 if (the_low_target.get_thread_area == NULL)
1412 return 0;
1413
1414 /* Get the thread area address. This is used to recognize which
1415 thread is which when tracing with the in-process agent library.
1416 We don't read anything from the address, and treat it as opaque;
1417 it's the address itself that we assume is unique per-thread. */
d86d4aaf 1418 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
fa593d66
PA
1419 return 0;
1420
1421 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1422}
1423
1424/* The reason we resume in the caller, is because we want to be able
1425 to pass lwp->status_pending as WSTAT, and we need to clear
1426 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1427 refuses to resume. */
1428
1429static int
1430maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1431{
0bfdf32f 1432 struct thread_info *saved_thread;
fa593d66 1433
0bfdf32f
GB
1434 saved_thread = current_thread;
1435 current_thread = get_lwp_thread (lwp);
fa593d66
PA
1436
1437 if ((wstat == NULL
1438 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1439 && supports_fast_tracepoints ()
58b4daa5 1440 && agent_loaded_p ())
fa593d66
PA
1441 {
1442 struct fast_tpoint_collect_status status;
1443 int r;
1444
1445 if (debug_threads)
87ce2a04
DE
1446 debug_printf ("Checking whether LWP %ld needs to move out of the "
1447 "jump pad.\n",
0bfdf32f 1448 lwpid_of (current_thread));
fa593d66
PA
1449
1450 r = linux_fast_tracepoint_collecting (lwp, &status);
1451
1452 if (wstat == NULL
1453 || (WSTOPSIG (*wstat) != SIGILL
1454 && WSTOPSIG (*wstat) != SIGFPE
1455 && WSTOPSIG (*wstat) != SIGSEGV
1456 && WSTOPSIG (*wstat) != SIGBUS))
1457 {
1458 lwp->collecting_fast_tracepoint = r;
1459
1460 if (r != 0)
1461 {
1462 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1463 {
1464 /* Haven't executed the original instruction yet.
1465 Set breakpoint there, and wait till it's hit,
1466 then single-step until exiting the jump pad. */
1467 lwp->exit_jump_pad_bkpt
1468 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1469 }
1470
1471 if (debug_threads)
87ce2a04
DE
1472 debug_printf ("Checking whether LWP %ld needs to move out of "
1473 "the jump pad...it does\n",
0bfdf32f
GB
1474 lwpid_of (current_thread));
1475 current_thread = saved_thread;
fa593d66
PA
1476
1477 return 1;
1478 }
1479 }
1480 else
1481 {
1482 /* If we get a synchronous signal while collecting, *and*
1483 while executing the (relocated) original instruction,
1484 reset the PC to point at the tpoint address, before
1485 reporting to GDB. Otherwise, it's an IPA lib bug: just
1486 report the signal to GDB, and pray for the best. */
1487
1488 lwp->collecting_fast_tracepoint = 0;
1489
1490 if (r != 0
1491 && (status.adjusted_insn_addr <= lwp->stop_pc
1492 && lwp->stop_pc < status.adjusted_insn_addr_end))
1493 {
1494 siginfo_t info;
1495 struct regcache *regcache;
1496
1497 /* The si_addr on a few signals references the address
1498 of the faulting instruction. Adjust that as
1499 well. */
1500 if ((WSTOPSIG (*wstat) == SIGILL
1501 || WSTOPSIG (*wstat) == SIGFPE
1502 || WSTOPSIG (*wstat) == SIGBUS
1503 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 1504 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1505 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
1506 /* Final check just to make sure we don't clobber
1507 the siginfo of non-kernel-sent signals. */
1508 && (uintptr_t) info.si_addr == lwp->stop_pc)
1509 {
1510 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 1511 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1512 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
1513 }
1514
0bfdf32f 1515 regcache = get_thread_regcache (current_thread, 1);
fa593d66
PA
1516 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1517 lwp->stop_pc = status.tpoint_addr;
1518
1519 /* Cancel any fast tracepoint lock this thread was
1520 holding. */
1521 force_unlock_trace_buffer ();
1522 }
1523
1524 if (lwp->exit_jump_pad_bkpt != NULL)
1525 {
1526 if (debug_threads)
87ce2a04
DE
1527 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1528 "stopping all threads momentarily.\n");
fa593d66
PA
1529
1530 stop_all_lwps (1, lwp);
1531 cancel_breakpoints ();
1532
1533 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1534 lwp->exit_jump_pad_bkpt = NULL;
1535
1536 unstop_all_lwps (1, lwp);
1537
1538 gdb_assert (lwp->suspended >= 0);
1539 }
1540 }
1541 }
1542
1543 if (debug_threads)
87ce2a04
DE
1544 debug_printf ("Checking whether LWP %ld needs to move out of the "
1545 "jump pad...no\n",
0bfdf32f 1546 lwpid_of (current_thread));
0cccb683 1547
0bfdf32f 1548 current_thread = saved_thread;
fa593d66
PA
1549 return 0;
1550}
1551
1552/* Enqueue one signal in the "signals to report later when out of the
1553 jump pad" list. */
1554
1555static void
1556enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1557{
1558 struct pending_signals *p_sig;
d86d4aaf 1559 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1560
1561 if (debug_threads)
87ce2a04 1562 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 1563 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1564
1565 if (debug_threads)
1566 {
1567 struct pending_signals *sig;
1568
1569 for (sig = lwp->pending_signals_to_report;
1570 sig != NULL;
1571 sig = sig->prev)
87ce2a04
DE
1572 debug_printf (" Already queued %d\n",
1573 sig->signal);
fa593d66 1574
87ce2a04 1575 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
1576 }
1577
1a981360
PA
1578 /* Don't enqueue non-RT signals if they are already in the deferred
1579 queue. (SIGSTOP being the easiest signal to see ending up here
1580 twice) */
1581 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1582 {
1583 struct pending_signals *sig;
1584
1585 for (sig = lwp->pending_signals_to_report;
1586 sig != NULL;
1587 sig = sig->prev)
1588 {
1589 if (sig->signal == WSTOPSIG (*wstat))
1590 {
1591 if (debug_threads)
87ce2a04
DE
1592 debug_printf ("Not requeuing already queued non-RT signal %d"
1593 " for LWP %ld\n",
1594 sig->signal,
d86d4aaf 1595 lwpid_of (thread));
1a981360
PA
1596 return;
1597 }
1598 }
1599 }
1600
fa593d66
PA
1601 p_sig = xmalloc (sizeof (*p_sig));
1602 p_sig->prev = lwp->pending_signals_to_report;
1603 p_sig->signal = WSTOPSIG (*wstat);
1604 memset (&p_sig->info, 0, sizeof (siginfo_t));
d86d4aaf 1605 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1606 &p_sig->info);
fa593d66
PA
1607
1608 lwp->pending_signals_to_report = p_sig;
1609}
1610
1611/* Dequeue one signal from the "signals to report later when out of
1612 the jump pad" list. */
1613
1614static int
1615dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1616{
d86d4aaf
DE
1617 struct thread_info *thread = get_lwp_thread (lwp);
1618
fa593d66
PA
1619 if (lwp->pending_signals_to_report != NULL)
1620 {
1621 struct pending_signals **p_sig;
1622
1623 p_sig = &lwp->pending_signals_to_report;
1624 while ((*p_sig)->prev != NULL)
1625 p_sig = &(*p_sig)->prev;
1626
1627 *wstat = W_STOPCODE ((*p_sig)->signal);
1628 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 1629 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1630 &(*p_sig)->info);
fa593d66
PA
1631 free (*p_sig);
1632 *p_sig = NULL;
1633
1634 if (debug_threads)
87ce2a04 1635 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 1636 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1637
1638 if (debug_threads)
1639 {
1640 struct pending_signals *sig;
1641
1642 for (sig = lwp->pending_signals_to_report;
1643 sig != NULL;
1644 sig = sig->prev)
87ce2a04
DE
1645 debug_printf (" Still queued %d\n",
1646 sig->signal);
fa593d66 1647
87ce2a04 1648 debug_printf (" (no more queued signals)\n");
fa593d66
PA
1649 }
1650
1651 return 1;
1652 }
1653
1654 return 0;
1655}
1656
d50171e4
PA
1657/* Arrange for a breakpoint to be hit again later. We don't keep the
1658 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1659 will handle the current event, eventually we will resume this LWP,
1660 and this breakpoint will trap again. */
1661
1662static int
1663cancel_breakpoint (struct lwp_info *lwp)
1664{
0bfdf32f 1665 struct thread_info *saved_thread;
d50171e4
PA
1666
1667 /* There's nothing to do if we don't support breakpoints. */
1668 if (!supports_breakpoints ())
1669 return 0;
1670
d50171e4 1671 /* breakpoint_at reads from current inferior. */
0bfdf32f
GB
1672 saved_thread = current_thread;
1673 current_thread = get_lwp_thread (lwp);
d50171e4
PA
1674
1675 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1676 {
1677 if (debug_threads)
87ce2a04 1678 debug_printf ("CB: Push back breakpoint for %s\n",
0bfdf32f 1679 target_pid_to_str (ptid_of (current_thread)));
d50171e4
PA
1680
1681 /* Back up the PC if necessary. */
1682 if (the_low_target.decr_pc_after_break)
1683 {
1684 struct regcache *regcache
0bfdf32f 1685 = get_thread_regcache (current_thread, 1);
d50171e4
PA
1686 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1687 }
1688
0bfdf32f 1689 current_thread = saved_thread;
d50171e4
PA
1690 return 1;
1691 }
1692 else
1693 {
1694 if (debug_threads)
87ce2a04
DE
1695 debug_printf ("CB: No breakpoint found at %s for [%s]\n",
1696 paddress (lwp->stop_pc),
0bfdf32f 1697 target_pid_to_str (ptid_of (current_thread)));
d50171e4
PA
1698 }
1699
0bfdf32f 1700 current_thread = saved_thread;
d50171e4
PA
1701 return 0;
1702}
1703
c4d9ceb6
YQ
1704/* Return true if the event in LP may be caused by breakpoint. */
1705
1706static int
1707lp_status_maybe_breakpoint (struct lwp_info *lp)
1708{
1709 return (lp->status_pending_p
1710 && WIFSTOPPED (lp->status_pending)
1711 && (WSTOPSIG (lp->status_pending) == SIGTRAP
1712 /* SIGILL and SIGSEGV are also treated as traps in case a
1713 breakpoint is inserted at the current PC. */
1714 || WSTOPSIG (lp->status_pending) == SIGILL
1715 || WSTOPSIG (lp->status_pending) == SIGSEGV));
1716}
1717
fa96cb38
PA
1718/* Do low-level handling of the event, and check if we should go on
1719 and pass it to caller code. Return the affected lwp if we are, or
1720 NULL otherwise. */
1721
1722static struct lwp_info *
1723linux_low_filter_event (ptid_t filter_ptid, int lwpid, int wstat)
1724{
1725 struct lwp_info *child;
1726 struct thread_info *thread;
1727
1728 child = find_lwp_pid (pid_to_ptid (lwpid));
1729
1730 /* If we didn't find a process, one of two things presumably happened:
1731 - A process we started and then detached from has exited. Ignore it.
1732 - A process we are controlling has forked and the new child's stop
1733 was reported to us by the kernel. Save its PID. */
1734 if (child == NULL && WIFSTOPPED (wstat))
1735 {
1736 add_to_pid_list (&stopped_pids, lwpid, wstat);
1737 return NULL;
1738 }
1739 else if (child == NULL)
1740 return NULL;
1741
1742 thread = get_lwp_thread (child);
1743
1744 child->stopped = 1;
1745
1746 child->last_status = wstat;
1747
1748 if (WIFSTOPPED (wstat))
1749 {
1750 struct process_info *proc;
1751
1752 /* Architecture-specific setup after inferior is running. This
1753 needs to happen after we have attached to the inferior and it
1754 is stopped for the first time, but before we access any
1755 inferior registers. */
1756 proc = find_process_pid (pid_of (thread));
1757 if (proc->private->new_inferior)
1758 {
0bfdf32f 1759 struct thread_info *saved_thread;
fa96cb38 1760
0bfdf32f
GB
1761 saved_thread = current_thread;
1762 current_thread = thread;
fa96cb38
PA
1763
1764 the_low_target.arch_setup ();
1765
0bfdf32f 1766 current_thread = saved_thread;
fa96cb38
PA
1767
1768 proc->private->new_inferior = 0;
1769 }
1770 }
1771
1772 /* Store the STOP_PC, with adjustment applied. This depends on the
1773 architecture being defined already (so that CHILD has a valid
1774 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1775 not). */
1776 if (WIFSTOPPED (wstat))
1777 {
1778 if (debug_threads
1779 && the_low_target.get_pc != NULL)
1780 {
0bfdf32f 1781 struct thread_info *saved_thread;
fa96cb38
PA
1782 struct regcache *regcache;
1783 CORE_ADDR pc;
1784
0bfdf32f
GB
1785 saved_thread = current_thread;
1786 current_thread = thread;
1787 regcache = get_thread_regcache (current_thread, 1);
fa96cb38
PA
1788 pc = (*the_low_target.get_pc) (regcache);
1789 debug_printf ("linux_low_filter_event: pc is 0x%lx\n", (long) pc);
0bfdf32f 1790 current_thread = saved_thread;
fa96cb38
PA
1791 }
1792
1793 child->stop_pc = get_stop_pc (child);
1794 }
1795
1796 /* Fetch the possibly triggered data watchpoint info and store it in
1797 CHILD.
1798
1799 On some archs, like x86, that use debug registers to set
1800 watchpoints, it's possible that the way to know which watched
1801 address trapped, is to check the register that is used to select
1802 which address to watch. Problem is, between setting the
1803 watchpoint and reading back which data address trapped, the user
1804 may change the set of watchpoints, and, as a consequence, GDB
1805 changes the debug registers in the inferior. To avoid reading
1806 back a stale stopped-data-address when that happens, we cache in
1807 LP the fact that a watchpoint trapped, and the corresponding data
1808 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1809 changes the debug registers meanwhile, we have the cached data we
1810 can rely on. */
1811
1812 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP)
1813 {
1814 if (the_low_target.stopped_by_watchpoint == NULL)
1815 {
1816 child->stopped_by_watchpoint = 0;
1817 }
1818 else
1819 {
0bfdf32f 1820 struct thread_info *saved_thread;
fa96cb38 1821
0bfdf32f
GB
1822 saved_thread = current_thread;
1823 current_thread = thread;
fa96cb38
PA
1824
1825 child->stopped_by_watchpoint
1826 = the_low_target.stopped_by_watchpoint ();
1827
1828 if (child->stopped_by_watchpoint)
1829 {
1830 if (the_low_target.stopped_data_address != NULL)
1831 child->stopped_data_address
1832 = the_low_target.stopped_data_address ();
1833 else
1834 child->stopped_data_address = 0;
1835 }
1836
0bfdf32f 1837 current_thread = saved_thread;
fa96cb38
PA
1838 }
1839 }
1840
1841 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1842 {
beed38b8
JB
1843 struct process_info *proc = find_process_pid (pid_of (thread));
1844
1845 linux_enable_event_reporting (lwpid, proc->attached);
fa96cb38
PA
1846 child->must_set_ptrace_flags = 0;
1847 }
1848
1849 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 1850 && linux_is_extended_waitstatus (wstat))
fa96cb38
PA
1851 {
1852 handle_extended_wait (child, wstat);
1853 return NULL;
1854 }
1855
1856 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
1857 && child->stop_expected)
1858 {
1859 if (debug_threads)
1860 debug_printf ("Expected stop.\n");
1861 child->stop_expected = 0;
1862
1863 if (thread->last_resume_kind == resume_stop)
1864 {
1865 /* We want to report the stop to the core. Treat the
1866 SIGSTOP as a normal event. */
1867 }
1868 else if (stopping_threads != NOT_STOPPING_THREADS)
1869 {
1870 /* Stopping threads. We don't want this SIGSTOP to end up
1871 pending in the FILTER_PTID handling below. */
1872 return NULL;
1873 }
1874 else
1875 {
1876 /* Filter out the event. */
1877 linux_resume_one_lwp (child, child->stepping, 0, NULL);
1878 return NULL;
1879 }
1880 }
1881
1882 /* Check if the thread has exited. */
1883 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat))
1884 && num_lwps (pid_of (thread)) > 1)
1885 {
1886 if (debug_threads)
1887 debug_printf ("LLW: %d exited.\n", lwpid);
1888
1889 /* If there is at least one more LWP, then the exit signal
1890 was not the end of the debugged application and should be
1891 ignored. */
1892 delete_lwp (child);
1893 return NULL;
1894 }
1895
1896 if (!ptid_match (ptid_of (thread), filter_ptid))
1897 {
1898 if (debug_threads)
1899 debug_printf ("LWP %d got an event %06x, leaving pending.\n",
1900 lwpid, wstat);
1901
1902 if (WIFSTOPPED (wstat))
1903 {
1904 child->status_pending_p = 1;
1905 child->status_pending = wstat;
1906
1907 if (WSTOPSIG (wstat) != SIGSTOP)
1908 {
1909 /* Cancel breakpoint hits. The breakpoint may be
1910 removed before we fetch events from this process to
1911 report to the core. It is best not to assume the
1912 moribund breakpoints heuristic always handles these
1913 cases --- it could be too many events go through to
1914 the core before this one is handled. All-stop always
1915 cancels breakpoint hits in all threads. */
1916 if (non_stop
c4d9ceb6 1917 && lp_status_maybe_breakpoint (child)
fa96cb38
PA
1918 && cancel_breakpoint (child))
1919 {
1920 /* Throw away the SIGTRAP. */
1921 child->status_pending_p = 0;
1922
1923 if (debug_threads)
1924 debug_printf ("LLW: LWP %d hit a breakpoint while"
1925 " waiting for another process;"
1926 " cancelled it\n", lwpid);
1927 }
1928 }
1929 }
1930 else if (WIFEXITED (wstat) || WIFSIGNALED (wstat))
1931 {
1932 if (debug_threads)
1933 debug_printf ("LLWE: process %d exited while fetching "
1934 "event from another LWP\n", lwpid);
1935
1936 /* This was the last lwp in the process. Since events are
1937 serialized to GDB core, and we can't report this one
1938 right now, but GDB core and the other target layers will
1939 want to be notified about the exit code/signal, leave the
1940 status pending for the next time we're able to report
1941 it. */
1942 mark_lwp_dead (child, wstat);
1943 }
1944
1945 return NULL;
1946 }
1947
1948 return child;
1949}
1950
d50171e4
PA
1951/* When the event-loop is doing a step-over, this points at the thread
1952 being stepped. */
1953ptid_t step_over_bkpt;
1954
fa96cb38
PA
1955/* Wait for an event from child(ren) WAIT_PTID, and return any that
1956 match FILTER_PTID (leaving others pending). The PTIDs can be:
1957 minus_one_ptid, to specify any child; a pid PTID, specifying all
1958 lwps of a thread group; or a PTID representing a single lwp. Store
1959 the stop status through the status pointer WSTAT. OPTIONS is
1960 passed to the waitpid call. Return 0 if no event was found and
1961 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
1962 was found. Return the PID of the stopped child otherwise. */
bd99dc85 1963
0d62e5e8 1964static int
fa96cb38
PA
1965linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
1966 int *wstatp, int options)
0d62e5e8 1967{
d86d4aaf 1968 struct thread_info *event_thread;
d50171e4 1969 struct lwp_info *event_child, *requested_child;
fa96cb38 1970 sigset_t block_mask, prev_mask;
d50171e4 1971
fa96cb38 1972 retry:
d86d4aaf
DE
1973 /* N.B. event_thread points to the thread_info struct that contains
1974 event_child. Keep them in sync. */
1975 event_thread = NULL;
d50171e4
PA
1976 event_child = NULL;
1977 requested_child = NULL;
0d62e5e8 1978
95954743 1979 /* Check for a lwp with a pending status. */
bd99dc85 1980
fa96cb38 1981 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
0d62e5e8 1982 {
d86d4aaf 1983 event_thread = (struct thread_info *)
fa96cb38 1984 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
d86d4aaf
DE
1985 if (event_thread != NULL)
1986 event_child = get_thread_lwp (event_thread);
1987 if (debug_threads && event_thread)
1988 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 1989 }
fa96cb38 1990 else if (!ptid_equal (filter_ptid, null_ptid))
0d62e5e8 1991 {
fa96cb38 1992 requested_child = find_lwp_pid (filter_ptid);
d50171e4 1993
bde24c0a 1994 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66
PA
1995 && requested_child->status_pending_p
1996 && requested_child->collecting_fast_tracepoint)
1997 {
1998 enqueue_one_deferred_signal (requested_child,
1999 &requested_child->status_pending);
2000 requested_child->status_pending_p = 0;
2001 requested_child->status_pending = 0;
2002 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2003 }
2004
2005 if (requested_child->suspended
2006 && requested_child->status_pending_p)
38e08fca
GB
2007 {
2008 internal_error (__FILE__, __LINE__,
2009 "requesting an event out of a"
2010 " suspended child?");
2011 }
fa593d66 2012
d50171e4 2013 if (requested_child->status_pending_p)
d86d4aaf
DE
2014 {
2015 event_child = requested_child;
2016 event_thread = get_lwp_thread (event_child);
2017 }
0d62e5e8 2018 }
611cb4a5 2019
0d62e5e8
DJ
2020 if (event_child != NULL)
2021 {
bd99dc85 2022 if (debug_threads)
87ce2a04 2023 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2024 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2025 *wstatp = event_child->status_pending;
bd99dc85
PA
2026 event_child->status_pending_p = 0;
2027 event_child->status_pending = 0;
0bfdf32f 2028 current_thread = event_thread;
d86d4aaf 2029 return lwpid_of (event_thread);
0d62e5e8
DJ
2030 }
2031
fa96cb38
PA
2032 /* But if we don't find a pending event, we'll have to wait.
2033
2034 We only enter this loop if no process has a pending wait status.
2035 Thus any action taken in response to a wait status inside this
2036 loop is responding as soon as we detect the status, not after any
2037 pending events. */
d8301ad1 2038
fa96cb38
PA
2039 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2040 all signals while here. */
2041 sigfillset (&block_mask);
2042 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2043
2044 while (event_child == NULL)
0d62e5e8 2045 {
fa96cb38 2046 pid_t ret = 0;
0d62e5e8 2047
fa96cb38
PA
2048 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2049 quirks:
0d62e5e8 2050
fa96cb38
PA
2051 - If the thread group leader exits while other threads in the
2052 thread group still exist, waitpid(TGID, ...) hangs. That
2053 waitpid won't return an exit status until the other threads
2054 in the group are reaped.
611cb4a5 2055
fa96cb38
PA
2056 - When a non-leader thread execs, that thread just vanishes
2057 without reporting an exit (so we'd hang if we waited for it
2058 explicitly in that case). The exec event is reported to
2059 the TGID pid (although we don't currently enable exec
2060 events). */
2061 errno = 0;
2062 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2063
fa96cb38
PA
2064 if (debug_threads)
2065 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2066 ret, errno ? strerror (errno) : "ERRNO-OK");
0d62e5e8 2067
fa96cb38 2068 if (ret > 0)
0d62e5e8 2069 {
89be2091 2070 if (debug_threads)
bd99dc85 2071 {
fa96cb38
PA
2072 debug_printf ("LLW: waitpid %ld received %s\n",
2073 (long) ret, status_to_str (*wstatp));
bd99dc85 2074 }
89be2091 2075
fa96cb38
PA
2076 event_child = linux_low_filter_event (filter_ptid,
2077 ret, *wstatp);
2078 if (event_child != NULL)
bd99dc85 2079 {
fa96cb38
PA
2080 /* We got an event to report to the core. */
2081 event_thread = get_lwp_thread (event_child);
2082 break;
bd99dc85 2083 }
89be2091 2084
fa96cb38
PA
2085 /* Retry until nothing comes out of waitpid. A single
2086 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2087 continue;
2088 }
2089
fa96cb38
PA
2090 /* Check for zombie thread group leaders. Those can't be reaped
2091 until all other threads in the thread group are. */
2092 check_zombie_leaders ();
2093
2094 /* If there are no resumed children left in the set of LWPs we
2095 want to wait for, bail. We can't just block in
2096 waitpid/sigsuspend, because lwps might have been left stopped
2097 in trace-stop state, and we'd be stuck forever waiting for
2098 their status to change (which would only happen if we resumed
2099 them). Even if WNOHANG is set, this return code is preferred
2100 over 0 (below), as it is more detailed. */
2101 if ((find_inferior (&all_threads,
2102 not_stopped_callback,
2103 &wait_ptid) == NULL))
a6dbe5df 2104 {
fa96cb38
PA
2105 if (debug_threads)
2106 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2107 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2108 return -1;
a6dbe5df
PA
2109 }
2110
fa96cb38
PA
2111 /* No interesting event to report to the caller. */
2112 if ((options & WNOHANG))
24a09b5f 2113 {
fa96cb38
PA
2114 if (debug_threads)
2115 debug_printf ("WNOHANG set, no event found\n");
2116
2117 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2118 return 0;
24a09b5f
DJ
2119 }
2120
fa96cb38
PA
2121 /* Block until we get an event reported with SIGCHLD. */
2122 if (debug_threads)
2123 debug_printf ("sigsuspend'ing\n");
d50171e4 2124
fa96cb38
PA
2125 sigsuspend (&prev_mask);
2126 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2127 goto retry;
2128 }
d50171e4 2129
fa96cb38 2130 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2131
0bfdf32f 2132 current_thread = event_thread;
d50171e4 2133
fa96cb38
PA
2134 /* Check for thread exit. */
2135 if (! WIFSTOPPED (*wstatp))
2136 {
2137 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2138
2139 if (debug_threads)
2140 debug_printf ("LWP %d is the last lwp of process. "
2141 "Process %ld exiting.\n",
2142 pid_of (event_thread), lwpid_of (event_thread));
d86d4aaf 2143 return lwpid_of (event_thread);
611cb4a5 2144 }
0d62e5e8 2145
fa96cb38
PA
2146 return lwpid_of (event_thread);
2147}
2148
2149/* Wait for an event from child(ren) PTID. PTIDs can be:
2150 minus_one_ptid, to specify any child; a pid PTID, specifying all
2151 lwps of a thread group; or a PTID representing a single lwp. Store
2152 the stop status through the status pointer WSTAT. OPTIONS is
2153 passed to the waitpid call. Return 0 if no event was found and
2154 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2155 was found. Return the PID of the stopped child otherwise. */
2156
2157static int
2158linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2159{
2160 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2161}
2162
6bf5e0ba
PA
2163/* Count the LWP's that have had events. */
2164
2165static int
2166count_events_callback (struct inferior_list_entry *entry, void *data)
2167{
d86d4aaf
DE
2168 struct thread_info *thread = (struct thread_info *) entry;
2169 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2170 int *count = data;
2171
2172 gdb_assert (count != NULL);
2173
2174 /* Count only resumed LWPs that have a SIGTRAP event pending that
2175 should be reported to GDB. */
8336d594
PA
2176 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2177 && thread->last_resume_kind != resume_stop
c4d9ceb6 2178 && lp_status_maybe_breakpoint (lp)
6bf5e0ba
PA
2179 && !breakpoint_inserted_here (lp->stop_pc))
2180 (*count)++;
2181
2182 return 0;
2183}
2184
2185/* Select the LWP (if any) that is currently being single-stepped. */
2186
2187static int
2188select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2189{
d86d4aaf
DE
2190 struct thread_info *thread = (struct thread_info *) entry;
2191 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba 2192
8336d594
PA
2193 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2194 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
2195 && lp->status_pending_p)
2196 return 1;
2197 else
2198 return 0;
2199}
2200
2201/* Select the Nth LWP that has had a SIGTRAP event that should be
2202 reported to GDB. */
2203
2204static int
2205select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2206{
d86d4aaf
DE
2207 struct thread_info *thread = (struct thread_info *) entry;
2208 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2209 int *selector = data;
2210
2211 gdb_assert (selector != NULL);
2212
2213 /* Select only resumed LWPs that have a SIGTRAP event pending. */
8336d594
PA
2214 if (thread->last_resume_kind != resume_stop
2215 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
c4d9ceb6 2216 && lp_status_maybe_breakpoint (lp)
6bf5e0ba
PA
2217 && !breakpoint_inserted_here (lp->stop_pc))
2218 if ((*selector)-- == 0)
2219 return 1;
2220
2221 return 0;
2222}
2223
2224static int
2225cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2226{
d86d4aaf
DE
2227 struct thread_info *thread = (struct thread_info *) entry;
2228 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2229 struct lwp_info *event_lp = data;
2230
2231 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2232 if (lp == event_lp)
2233 return 0;
2234
2235 /* If a LWP other than the LWP that we're reporting an event for has
2236 hit a GDB breakpoint (as opposed to some random trap signal),
2237 then just arrange for it to hit it again later. We don't keep
2238 the SIGTRAP status and don't forward the SIGTRAP signal to the
2239 LWP. We will handle the current event, eventually we will resume
2240 all LWPs, and this one will get its breakpoint trap again.
2241
2242 If we do not do this, then we run the risk that the user will
2243 delete or disable the breakpoint, but the LWP will have already
2244 tripped on it. */
2245
8336d594
PA
2246 if (thread->last_resume_kind != resume_stop
2247 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
c4d9ceb6 2248 && lp_status_maybe_breakpoint (lp)
bdabb078
PA
2249 && !lp->stepping
2250 && !lp->stopped_by_watchpoint
6bf5e0ba
PA
2251 && cancel_breakpoint (lp))
2252 /* Throw away the SIGTRAP. */
2253 lp->status_pending_p = 0;
2254
2255 return 0;
2256}
2257
7984d532
PA
2258static void
2259linux_cancel_breakpoints (void)
2260{
d86d4aaf 2261 find_inferior (&all_threads, cancel_breakpoints_callback, NULL);
7984d532
PA
2262}
2263
6bf5e0ba
PA
2264/* Select one LWP out of those that have events pending. */
2265
2266static void
2267select_event_lwp (struct lwp_info **orig_lp)
2268{
2269 int num_events = 0;
2270 int random_selector;
d86d4aaf 2271 struct thread_info *event_thread;
6bf5e0ba
PA
2272
2273 /* Give preference to any LWP that is being single-stepped. */
d86d4aaf
DE
2274 event_thread
2275 = (struct thread_info *) find_inferior (&all_threads,
2276 select_singlestep_lwp_callback,
2277 NULL);
2278 if (event_thread != NULL)
6bf5e0ba
PA
2279 {
2280 if (debug_threads)
87ce2a04 2281 debug_printf ("SEL: Select single-step %s\n",
d86d4aaf 2282 target_pid_to_str (ptid_of (event_thread)));
6bf5e0ba
PA
2283 }
2284 else
2285 {
2286 /* No single-stepping LWP. Select one at random, out of those
2287 which have had SIGTRAP events. */
2288
2289 /* First see how many SIGTRAP events we have. */
d86d4aaf 2290 find_inferior (&all_threads, count_events_callback, &num_events);
6bf5e0ba
PA
2291
2292 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2293 random_selector = (int)
2294 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2295
2296 if (debug_threads && num_events > 1)
87ce2a04
DE
2297 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2298 num_events, random_selector);
6bf5e0ba 2299
d86d4aaf
DE
2300 event_thread
2301 = (struct thread_info *) find_inferior (&all_threads,
2302 select_event_lwp_callback,
2303 &random_selector);
6bf5e0ba
PA
2304 }
2305
d86d4aaf 2306 if (event_thread != NULL)
6bf5e0ba 2307 {
d86d4aaf
DE
2308 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2309
6bf5e0ba
PA
2310 /* Switch the event LWP. */
2311 *orig_lp = event_lp;
2312 }
2313}
2314
7984d532
PA
2315/* Decrement the suspend count of an LWP. */
2316
2317static int
2318unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2319{
d86d4aaf
DE
2320 struct thread_info *thread = (struct thread_info *) entry;
2321 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
2322
2323 /* Ignore EXCEPT. */
2324 if (lwp == except)
2325 return 0;
2326
2327 lwp->suspended--;
2328
2329 gdb_assert (lwp->suspended >= 0);
2330 return 0;
2331}
2332
2333/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2334 NULL. */
2335
2336static void
2337unsuspend_all_lwps (struct lwp_info *except)
2338{
d86d4aaf 2339 find_inferior (&all_threads, unsuspend_one_lwp, except);
7984d532
PA
2340}
2341
fa593d66
PA
2342static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2343static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2344 void *data);
2345static int lwp_running (struct inferior_list_entry *entry, void *data);
2346static ptid_t linux_wait_1 (ptid_t ptid,
2347 struct target_waitstatus *ourstatus,
2348 int target_options);
2349
2350/* Stabilize threads (move out of jump pads).
2351
2352 If a thread is midway collecting a fast tracepoint, we need to
2353 finish the collection and move it out of the jump pad before
2354 reporting the signal.
2355
2356 This avoids recursion while collecting (when a signal arrives
2357 midway, and the signal handler itself collects), which would trash
2358 the trace buffer. In case the user set a breakpoint in a signal
2359 handler, this avoids the backtrace showing the jump pad, etc..
2360 Most importantly, there are certain things we can't do safely if
2361 threads are stopped in a jump pad (or in its callee's). For
2362 example:
2363
2364 - starting a new trace run. A thread still collecting the
2365 previous run, could trash the trace buffer when resumed. The trace
2366 buffer control structures would have been reset but the thread had
2367 no way to tell. The thread could even midway memcpy'ing to the
2368 buffer, which would mean that when resumed, it would clobber the
2369 trace buffer that had been set for a new run.
2370
2371 - we can't rewrite/reuse the jump pads for new tracepoints
2372 safely. Say you do tstart while a thread is stopped midway while
2373 collecting. When the thread is later resumed, it finishes the
2374 collection, and returns to the jump pad, to execute the original
2375 instruction that was under the tracepoint jump at the time the
2376 older run had been started. If the jump pad had been rewritten
2377 since for something else in the new run, the thread would now
2378 execute the wrong / random instructions. */
2379
2380static void
2381linux_stabilize_threads (void)
2382{
0bfdf32f 2383 struct thread_info *saved_thread;
d86d4aaf 2384 struct thread_info *thread_stuck;
fa593d66 2385
d86d4aaf
DE
2386 thread_stuck
2387 = (struct thread_info *) find_inferior (&all_threads,
2388 stuck_in_jump_pad_callback,
2389 NULL);
2390 if (thread_stuck != NULL)
fa593d66 2391 {
b4d51a55 2392 if (debug_threads)
87ce2a04 2393 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2394 lwpid_of (thread_stuck));
fa593d66
PA
2395 return;
2396 }
2397
0bfdf32f 2398 saved_thread = current_thread;
fa593d66
PA
2399
2400 stabilizing_threads = 1;
2401
2402 /* Kick 'em all. */
d86d4aaf 2403 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
fa593d66
PA
2404
2405 /* Loop until all are stopped out of the jump pads. */
d86d4aaf 2406 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
fa593d66
PA
2407 {
2408 struct target_waitstatus ourstatus;
2409 struct lwp_info *lwp;
fa593d66
PA
2410 int wstat;
2411
2412 /* Note that we go through the full wait even loop. While
2413 moving threads out of jump pad, we need to be able to step
2414 over internal breakpoints and such. */
32fcada3 2415 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2416
2417 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2418 {
0bfdf32f 2419 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2420
2421 /* Lock it. */
2422 lwp->suspended++;
2423
a493e3e2 2424 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2425 || current_thread->last_resume_kind == resume_stop)
fa593d66 2426 {
2ea28649 2427 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2428 enqueue_one_deferred_signal (lwp, &wstat);
2429 }
2430 }
2431 }
2432
d86d4aaf 2433 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
fa593d66
PA
2434
2435 stabilizing_threads = 0;
2436
0bfdf32f 2437 current_thread = saved_thread;
fa593d66 2438
b4d51a55 2439 if (debug_threads)
fa593d66 2440 {
d86d4aaf
DE
2441 thread_stuck
2442 = (struct thread_info *) find_inferior (&all_threads,
2443 stuck_in_jump_pad_callback,
2444 NULL);
2445 if (thread_stuck != NULL)
87ce2a04 2446 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2447 lwpid_of (thread_stuck));
fa593d66
PA
2448 }
2449}
2450
0d62e5e8 2451/* Wait for process, returns status. */
da6d8c04 2452
95954743
PA
2453static ptid_t
2454linux_wait_1 (ptid_t ptid,
2455 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2456{
e5f1222d 2457 int w;
fc7238bb 2458 struct lwp_info *event_child;
bd99dc85 2459 int options;
bd99dc85 2460 int pid;
6bf5e0ba
PA
2461 int step_over_finished;
2462 int bp_explains_trap;
2463 int maybe_internal_trap;
2464 int report_to_gdb;
219f2f23 2465 int trace_event;
c2d6af84 2466 int in_step_range;
bd99dc85 2467
87ce2a04
DE
2468 if (debug_threads)
2469 {
2470 debug_enter ();
2471 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2472 }
2473
bd99dc85
PA
2474 /* Translate generic target options into linux options. */
2475 options = __WALL;
2476 if (target_options & TARGET_WNOHANG)
2477 options |= WNOHANG;
0d62e5e8
DJ
2478
2479retry:
fa593d66
PA
2480 bp_explains_trap = 0;
2481 trace_event = 0;
c2d6af84 2482 in_step_range = 0;
bd99dc85
PA
2483 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2484
6bf5e0ba
PA
2485 if (ptid_equal (step_over_bkpt, null_ptid))
2486 pid = linux_wait_for_event (ptid, &w, options);
2487 else
2488 {
2489 if (debug_threads)
87ce2a04
DE
2490 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2491 target_pid_to_str (step_over_bkpt));
6bf5e0ba
PA
2492 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2493 }
2494
fa96cb38 2495 if (pid == 0)
87ce2a04 2496 {
fa96cb38
PA
2497 gdb_assert (target_options & TARGET_WNOHANG);
2498
87ce2a04
DE
2499 if (debug_threads)
2500 {
fa96cb38
PA
2501 debug_printf ("linux_wait_1 ret = null_ptid, "
2502 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
2503 debug_exit ();
2504 }
fa96cb38
PA
2505
2506 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
2507 return null_ptid;
2508 }
fa96cb38
PA
2509 else if (pid == -1)
2510 {
2511 if (debug_threads)
2512 {
2513 debug_printf ("linux_wait_1 ret = null_ptid, "
2514 "TARGET_WAITKIND_NO_RESUMED\n");
2515 debug_exit ();
2516 }
bd99dc85 2517
fa96cb38
PA
2518 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2519 return null_ptid;
2520 }
0d62e5e8 2521
0bfdf32f 2522 event_child = get_thread_lwp (current_thread);
0d62e5e8 2523
fa96cb38
PA
2524 /* linux_wait_for_event only returns an exit status for the last
2525 child of a process. Report it. */
2526 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 2527 {
fa96cb38 2528 if (WIFEXITED (w))
0d62e5e8 2529 {
fa96cb38
PA
2530 ourstatus->kind = TARGET_WAITKIND_EXITED;
2531 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 2532
fa96cb38 2533 if (debug_threads)
bd99dc85 2534 {
fa96cb38
PA
2535 debug_printf ("linux_wait_1 ret = %s, exited with "
2536 "retcode %d\n",
0bfdf32f 2537 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2538 WEXITSTATUS (w));
2539 debug_exit ();
bd99dc85 2540 }
fa96cb38
PA
2541 }
2542 else
2543 {
2544 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2545 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 2546
fa96cb38
PA
2547 if (debug_threads)
2548 {
2549 debug_printf ("linux_wait_1 ret = %s, terminated with "
2550 "signal %d\n",
0bfdf32f 2551 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2552 WTERMSIG (w));
2553 debug_exit ();
2554 }
0d62e5e8 2555 }
fa96cb38 2556
0bfdf32f 2557 return ptid_of (current_thread);
da6d8c04
DJ
2558 }
2559
6bf5e0ba
PA
2560 /* If this event was not handled before, and is not a SIGTRAP, we
2561 report it. SIGILL and SIGSEGV are also treated as traps in case
2562 a breakpoint is inserted at the current PC. If this target does
2563 not support internal breakpoints at all, we also report the
2564 SIGTRAP without further processing; it's of no concern to us. */
2565 maybe_internal_trap
2566 = (supports_breakpoints ()
2567 && (WSTOPSIG (w) == SIGTRAP
2568 || ((WSTOPSIG (w) == SIGILL
2569 || WSTOPSIG (w) == SIGSEGV)
2570 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2571
2572 if (maybe_internal_trap)
2573 {
2574 /* Handle anything that requires bookkeeping before deciding to
2575 report the event or continue waiting. */
2576
2577 /* First check if we can explain the SIGTRAP with an internal
2578 breakpoint, or if we should possibly report the event to GDB.
2579 Do this before anything that may remove or insert a
2580 breakpoint. */
2581 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2582
2583 /* We have a SIGTRAP, possibly a step-over dance has just
2584 finished. If so, tweak the state machine accordingly,
2585 reinsert breakpoints and delete any reinsert (software
2586 single-step) breakpoints. */
2587 step_over_finished = finish_step_over (event_child);
2588
2589 /* Now invoke the callbacks of any internal breakpoints there. */
2590 check_breakpoints (event_child->stop_pc);
2591
219f2f23
PA
2592 /* Handle tracepoint data collecting. This may overflow the
2593 trace buffer, and cause a tracing stop, removing
2594 breakpoints. */
2595 trace_event = handle_tracepoints (event_child);
2596
6bf5e0ba
PA
2597 if (bp_explains_trap)
2598 {
2599 /* If we stepped or ran into an internal breakpoint, we've
2600 already handled it. So next time we resume (from this
2601 PC), we should step over it. */
2602 if (debug_threads)
87ce2a04 2603 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 2604
8b07ae33
PA
2605 if (breakpoint_here (event_child->stop_pc))
2606 event_child->need_step_over = 1;
6bf5e0ba
PA
2607 }
2608 }
2609 else
2610 {
2611 /* We have some other signal, possibly a step-over dance was in
2612 progress, and it should be cancelled too. */
2613 step_over_finished = finish_step_over (event_child);
fa593d66
PA
2614 }
2615
2616 /* We have all the data we need. Either report the event to GDB, or
2617 resume threads and keep waiting for more. */
2618
2619 /* If we're collecting a fast tracepoint, finish the collection and
2620 move out of the jump pad before delivering a signal. See
2621 linux_stabilize_threads. */
2622
2623 if (WIFSTOPPED (w)
2624 && WSTOPSIG (w) != SIGTRAP
2625 && supports_fast_tracepoints ()
58b4daa5 2626 && agent_loaded_p ())
fa593d66
PA
2627 {
2628 if (debug_threads)
87ce2a04
DE
2629 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2630 "to defer or adjust it.\n",
0bfdf32f 2631 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
2632
2633 /* Allow debugging the jump pad itself. */
0bfdf32f 2634 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
2635 && maybe_move_out_of_jump_pad (event_child, &w))
2636 {
2637 enqueue_one_deferred_signal (event_child, &w);
2638
2639 if (debug_threads)
87ce2a04 2640 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 2641 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
2642
2643 linux_resume_one_lwp (event_child, 0, 0, NULL);
2644 goto retry;
2645 }
2646 }
219f2f23 2647
fa593d66
PA
2648 if (event_child->collecting_fast_tracepoint)
2649 {
2650 if (debug_threads)
87ce2a04
DE
2651 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2652 "Check if we're already there.\n",
0bfdf32f 2653 lwpid_of (current_thread),
87ce2a04 2654 event_child->collecting_fast_tracepoint);
fa593d66
PA
2655
2656 trace_event = 1;
2657
2658 event_child->collecting_fast_tracepoint
2659 = linux_fast_tracepoint_collecting (event_child, NULL);
2660
2661 if (event_child->collecting_fast_tracepoint != 1)
2662 {
2663 /* No longer need this breakpoint. */
2664 if (event_child->exit_jump_pad_bkpt != NULL)
2665 {
2666 if (debug_threads)
87ce2a04
DE
2667 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2668 "stopping all threads momentarily.\n");
fa593d66
PA
2669
2670 /* Other running threads could hit this breakpoint.
2671 We don't handle moribund locations like GDB does,
2672 instead we always pause all threads when removing
2673 breakpoints, so that any step-over or
2674 decr_pc_after_break adjustment is always taken
2675 care of while the breakpoint is still
2676 inserted. */
2677 stop_all_lwps (1, event_child);
2678 cancel_breakpoints ();
2679
2680 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2681 event_child->exit_jump_pad_bkpt = NULL;
2682
2683 unstop_all_lwps (1, event_child);
2684
2685 gdb_assert (event_child->suspended >= 0);
2686 }
2687 }
2688
2689 if (event_child->collecting_fast_tracepoint == 0)
2690 {
2691 if (debug_threads)
87ce2a04
DE
2692 debug_printf ("fast tracepoint finished "
2693 "collecting successfully.\n");
fa593d66
PA
2694
2695 /* We may have a deferred signal to report. */
2696 if (dequeue_one_deferred_signal (event_child, &w))
2697 {
2698 if (debug_threads)
87ce2a04 2699 debug_printf ("dequeued one signal.\n");
fa593d66 2700 }
3c11dd79 2701 else
fa593d66 2702 {
3c11dd79 2703 if (debug_threads)
87ce2a04 2704 debug_printf ("no deferred signals.\n");
fa593d66
PA
2705
2706 if (stabilizing_threads)
2707 {
2708 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 2709 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
2710
2711 if (debug_threads)
2712 {
2713 debug_printf ("linux_wait_1 ret = %s, stopped "
2714 "while stabilizing threads\n",
0bfdf32f 2715 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
2716 debug_exit ();
2717 }
2718
0bfdf32f 2719 return ptid_of (current_thread);
fa593d66
PA
2720 }
2721 }
2722 }
6bf5e0ba
PA
2723 }
2724
e471f25b
PA
2725 /* Check whether GDB would be interested in this event. */
2726
2727 /* If GDB is not interested in this signal, don't stop other
2728 threads, and don't report it to GDB. Just resume the inferior
2729 right away. We do this for threading-related signals as well as
2730 any that GDB specifically requested we ignore. But never ignore
2731 SIGSTOP if we sent it ourselves, and do not ignore signals when
2732 stepping - they may require special handling to skip the signal
2733 handler. */
2734 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2735 thread library? */
2736 if (WIFSTOPPED (w)
0bfdf32f 2737 && current_thread->last_resume_kind != resume_step
e471f25b 2738 && (
1a981360 2739#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
e471f25b
PA
2740 (current_process ()->private->thread_db != NULL
2741 && (WSTOPSIG (w) == __SIGRTMIN
2742 || WSTOPSIG (w) == __SIGRTMIN + 1))
2743 ||
2744#endif
2ea28649 2745 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 2746 && !(WSTOPSIG (w) == SIGSTOP
0bfdf32f 2747 && current_thread->last_resume_kind == resume_stop))))
e471f25b
PA
2748 {
2749 siginfo_t info, *info_p;
2750
2751 if (debug_threads)
87ce2a04 2752 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 2753 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 2754
0bfdf32f 2755 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2756 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
2757 info_p = &info;
2758 else
2759 info_p = NULL;
2760 linux_resume_one_lwp (event_child, event_child->stepping,
2761 WSTOPSIG (w), info_p);
2762 goto retry;
2763 }
2764
c2d6af84
PA
2765 /* Note that all addresses are always "out of the step range" when
2766 there's no range to begin with. */
2767 in_step_range = lwp_in_step_range (event_child);
2768
2769 /* If GDB wanted this thread to single step, and the thread is out
2770 of the step range, we always want to report the SIGTRAP, and let
2771 GDB handle it. Watchpoints should always be reported. So should
2772 signals we can't explain. A SIGTRAP we can't explain could be a
2773 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2774 do, we're be able to handle GDB breakpoints on top of internal
2775 breakpoints, by handling the internal breakpoint and still
2776 reporting the event to GDB. If we don't, we're out of luck, GDB
2777 won't see the breakpoint hit. */
6bf5e0ba 2778 report_to_gdb = (!maybe_internal_trap
0bfdf32f 2779 || (current_thread->last_resume_kind == resume_step
c2d6af84 2780 && !in_step_range)
6bf5e0ba 2781 || event_child->stopped_by_watchpoint
c2d6af84 2782 || (!step_over_finished && !in_step_range
493e2a69 2783 && !bp_explains_trap && !trace_event)
9f3a5c85 2784 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5
SS
2785 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2786 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2787
2788 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
2789
2790 /* We found no reason GDB would want us to stop. We either hit one
2791 of our own breakpoints, or finished an internal step GDB
2792 shouldn't know about. */
2793 if (!report_to_gdb)
2794 {
2795 if (debug_threads)
2796 {
2797 if (bp_explains_trap)
87ce2a04 2798 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 2799 if (step_over_finished)
87ce2a04 2800 debug_printf ("Step-over finished.\n");
219f2f23 2801 if (trace_event)
87ce2a04 2802 debug_printf ("Tracepoint event.\n");
c2d6af84 2803 if (lwp_in_step_range (event_child))
87ce2a04
DE
2804 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2805 paddress (event_child->stop_pc),
2806 paddress (event_child->step_range_start),
2807 paddress (event_child->step_range_end));
6bf5e0ba
PA
2808 }
2809
2810 /* We're not reporting this breakpoint to GDB, so apply the
2811 decr_pc_after_break adjustment to the inferior's regcache
2812 ourselves. */
2813
2814 if (the_low_target.set_pc != NULL)
2815 {
2816 struct regcache *regcache
0bfdf32f 2817 = get_thread_regcache (current_thread, 1);
6bf5e0ba
PA
2818 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2819 }
2820
7984d532
PA
2821 /* We may have finished stepping over a breakpoint. If so,
2822 we've stopped and suspended all LWPs momentarily except the
2823 stepping one. This is where we resume them all again. We're
2824 going to keep waiting, so use proceed, which handles stepping
2825 over the next breakpoint. */
6bf5e0ba 2826 if (debug_threads)
87ce2a04 2827 debug_printf ("proceeding all threads.\n");
7984d532
PA
2828
2829 if (step_over_finished)
2830 unsuspend_all_lwps (event_child);
2831
6bf5e0ba
PA
2832 proceed_all_lwps ();
2833 goto retry;
2834 }
2835
2836 if (debug_threads)
2837 {
0bfdf32f 2838 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
2839 {
2840 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 2841 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 2842 else if (!lwp_in_step_range (event_child))
87ce2a04 2843 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 2844 }
6bf5e0ba 2845 if (event_child->stopped_by_watchpoint)
87ce2a04 2846 debug_printf ("Stopped by watchpoint.\n");
8b07ae33 2847 if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 2848 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 2849 if (debug_threads)
87ce2a04 2850 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
2851 }
2852
2853 /* Alright, we're going to report a stop. */
2854
fa593d66 2855 if (!non_stop && !stabilizing_threads)
6bf5e0ba
PA
2856 {
2857 /* In all-stop, stop all threads. */
7984d532 2858 stop_all_lwps (0, NULL);
6bf5e0ba
PA
2859
2860 /* If we're not waiting for a specific LWP, choose an event LWP
2861 from among those that have had events. Giving equal priority
2862 to all LWPs that have had events helps prevent
2863 starvation. */
2864 if (ptid_equal (ptid, minus_one_ptid))
2865 {
2866 event_child->status_pending_p = 1;
2867 event_child->status_pending = w;
2868
2869 select_event_lwp (&event_child);
2870
0bfdf32f
GB
2871 /* current_thread and event_child must stay in sync. */
2872 current_thread = get_lwp_thread (event_child);
ee1e2d4f 2873
6bf5e0ba
PA
2874 event_child->status_pending_p = 0;
2875 w = event_child->status_pending;
2876 }
2877
2878 /* Now that we've selected our final event LWP, cancel any
2879 breakpoints in other LWPs that have hit a GDB breakpoint.
2880 See the comment in cancel_breakpoints_callback to find out
2881 why. */
d86d4aaf 2882 find_inferior (&all_threads, cancel_breakpoints_callback, event_child);
fa593d66 2883
c03e6ccc
YQ
2884 /* If we were going a step-over, all other threads but the stepping one
2885 had been paused in start_step_over, with their suspend counts
2886 incremented. We don't want to do a full unstop/unpause, because we're
2887 in all-stop mode (so we want threads stopped), but we still need to
2888 unsuspend the other threads, to decrement their `suspended' count
2889 back. */
2890 if (step_over_finished)
2891 unsuspend_all_lwps (event_child);
2892
fa593d66
PA
2893 /* Stabilize threads (move out of jump pads). */
2894 stabilize_threads ();
6bf5e0ba
PA
2895 }
2896 else
2897 {
2898 /* If we just finished a step-over, then all threads had been
2899 momentarily paused. In all-stop, that's fine, we want
2900 threads stopped by now anyway. In non-stop, we need to
2901 re-resume threads that GDB wanted to be running. */
2902 if (step_over_finished)
7984d532 2903 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
2904 }
2905
5b1c542e 2906 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 2907
0bfdf32f 2908 if (current_thread->last_resume_kind == resume_stop
8336d594 2909 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
2910 {
2911 /* A thread that has been requested to stop by GDB with vCont;t,
2912 and it stopped cleanly, so report as SIG0. The use of
2913 SIGSTOP is an implementation detail. */
a493e3e2 2914 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 2915 }
0bfdf32f 2916 else if (current_thread->last_resume_kind == resume_stop
8336d594 2917 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
2918 {
2919 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 2920 but, it stopped for other reasons. */
2ea28649 2921 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
2922 }
2923 else
2924 {
2ea28649 2925 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
2926 }
2927
d50171e4
PA
2928 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2929
bd99dc85 2930 if (debug_threads)
87ce2a04
DE
2931 {
2932 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
0bfdf32f 2933 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
2934 ourstatus->kind, ourstatus->value.sig);
2935 debug_exit ();
2936 }
bd99dc85 2937
0bfdf32f 2938 return ptid_of (current_thread);
bd99dc85
PA
2939}
2940
2941/* Get rid of any pending event in the pipe. */
2942static void
2943async_file_flush (void)
2944{
2945 int ret;
2946 char buf;
2947
2948 do
2949 ret = read (linux_event_pipe[0], &buf, 1);
2950 while (ret >= 0 || (ret == -1 && errno == EINTR));
2951}
2952
2953/* Put something in the pipe, so the event loop wakes up. */
2954static void
2955async_file_mark (void)
2956{
2957 int ret;
2958
2959 async_file_flush ();
2960
2961 do
2962 ret = write (linux_event_pipe[1], "+", 1);
2963 while (ret == 0 || (ret == -1 && errno == EINTR));
2964
2965 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2966 be awakened anyway. */
2967}
2968
95954743
PA
2969static ptid_t
2970linux_wait (ptid_t ptid,
2971 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 2972{
95954743 2973 ptid_t event_ptid;
bd99dc85 2974
bd99dc85
PA
2975 /* Flush the async file first. */
2976 if (target_is_async_p ())
2977 async_file_flush ();
2978
95954743 2979 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
bd99dc85
PA
2980
2981 /* If at least one stop was reported, there may be more. A single
2982 SIGCHLD can signal more than one child stop. */
2983 if (target_is_async_p ()
2984 && (target_options & TARGET_WNOHANG) != 0
95954743 2985 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
2986 async_file_mark ();
2987
2988 return event_ptid;
da6d8c04
DJ
2989}
2990
c5f62d5f 2991/* Send a signal to an LWP. */
fd500816
DJ
2992
2993static int
a1928bad 2994kill_lwp (unsigned long lwpid, int signo)
fd500816 2995{
c5f62d5f
DE
2996 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2997 fails, then we are not using nptl threads and we should be using kill. */
fd500816 2998
c5f62d5f
DE
2999#ifdef __NR_tkill
3000 {
3001 static int tkill_failed;
fd500816 3002
c5f62d5f
DE
3003 if (!tkill_failed)
3004 {
3005 int ret;
3006
3007 errno = 0;
3008 ret = syscall (__NR_tkill, lwpid, signo);
3009 if (errno != ENOSYS)
3010 return ret;
3011 tkill_failed = 1;
3012 }
3013 }
fd500816
DJ
3014#endif
3015
3016 return kill (lwpid, signo);
3017}
3018
964e4306
PA
3019void
3020linux_stop_lwp (struct lwp_info *lwp)
3021{
3022 send_sigstop (lwp);
3023}
3024
0d62e5e8 3025static void
02fc4de7 3026send_sigstop (struct lwp_info *lwp)
0d62e5e8 3027{
bd99dc85 3028 int pid;
0d62e5e8 3029
d86d4aaf 3030 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3031
0d62e5e8
DJ
3032 /* If we already have a pending stop signal for this process, don't
3033 send another. */
54a0b537 3034 if (lwp->stop_expected)
0d62e5e8 3035 {
ae13219e 3036 if (debug_threads)
87ce2a04 3037 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3038
0d62e5e8
DJ
3039 return;
3040 }
3041
3042 if (debug_threads)
87ce2a04 3043 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3044
d50171e4 3045 lwp->stop_expected = 1;
bd99dc85 3046 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3047}
3048
7984d532
PA
3049static int
3050send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7 3051{
d86d4aaf
DE
3052 struct thread_info *thread = (struct thread_info *) entry;
3053 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3054
7984d532
PA
3055 /* Ignore EXCEPT. */
3056 if (lwp == except)
3057 return 0;
3058
02fc4de7 3059 if (lwp->stopped)
7984d532 3060 return 0;
02fc4de7
PA
3061
3062 send_sigstop (lwp);
7984d532
PA
3063 return 0;
3064}
3065
3066/* Increment the suspend count of an LWP, and stop it, if not stopped
3067 yet. */
3068static int
3069suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3070 void *except)
3071{
d86d4aaf
DE
3072 struct thread_info *thread = (struct thread_info *) entry;
3073 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3074
3075 /* Ignore EXCEPT. */
3076 if (lwp == except)
3077 return 0;
3078
3079 lwp->suspended++;
3080
3081 return send_sigstop_callback (entry, except);
02fc4de7
PA
3082}
3083
95954743
PA
3084static void
3085mark_lwp_dead (struct lwp_info *lwp, int wstat)
3086{
3087 /* It's dead, really. */
3088 lwp->dead = 1;
3089
3090 /* Store the exit status for later. */
3091 lwp->status_pending_p = 1;
3092 lwp->status_pending = wstat;
3093
95954743
PA
3094 /* Prevent trying to stop it. */
3095 lwp->stopped = 1;
3096
3097 /* No further stops are expected from a dead lwp. */
3098 lwp->stop_expected = 0;
3099}
3100
fa96cb38
PA
3101/* Wait for all children to stop for the SIGSTOPs we just queued. */
3102
0d62e5e8 3103static void
fa96cb38 3104wait_for_sigstop (void)
0d62e5e8 3105{
0bfdf32f 3106 struct thread_info *saved_thread;
95954743 3107 ptid_t saved_tid;
fa96cb38
PA
3108 int wstat;
3109 int ret;
0d62e5e8 3110
0bfdf32f
GB
3111 saved_thread = current_thread;
3112 if (saved_thread != NULL)
3113 saved_tid = saved_thread->entry.id;
bd99dc85 3114 else
95954743 3115 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3116
d50171e4 3117 if (debug_threads)
fa96cb38 3118 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3119
fa96cb38
PA
3120 /* Passing NULL_PTID as filter indicates we want all events to be
3121 left pending. Eventually this returns when there are no
3122 unwaited-for children left. */
3123 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3124 &wstat, __WALL);
3125 gdb_assert (ret == -1);
0d62e5e8 3126
0bfdf32f
GB
3127 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3128 current_thread = saved_thread;
0d62e5e8
DJ
3129 else
3130 {
3131 if (debug_threads)
87ce2a04 3132 debug_printf ("Previously current thread died.\n");
0d62e5e8 3133
bd99dc85
PA
3134 if (non_stop)
3135 {
3136 /* We can't change the current inferior behind GDB's back,
3137 otherwise, a subsequent command may apply to the wrong
3138 process. */
0bfdf32f 3139 current_thread = NULL;
bd99dc85
PA
3140 }
3141 else
3142 {
3143 /* Set a valid thread as current. */
0bfdf32f 3144 set_desired_thread (0);
bd99dc85 3145 }
0d62e5e8
DJ
3146 }
3147}
3148
fa593d66
PA
3149/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3150 move it out, because we need to report the stop event to GDB. For
3151 example, if the user puts a breakpoint in the jump pad, it's
3152 because she wants to debug it. */
3153
3154static int
3155stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3156{
d86d4aaf
DE
3157 struct thread_info *thread = (struct thread_info *) entry;
3158 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3159
3160 gdb_assert (lwp->suspended == 0);
3161 gdb_assert (lwp->stopped);
3162
3163 /* Allow debugging the jump pad, gdb_collect, etc.. */
3164 return (supports_fast_tracepoints ()
58b4daa5 3165 && agent_loaded_p ()
fa593d66
PA
3166 && (gdb_breakpoint_here (lwp->stop_pc)
3167 || lwp->stopped_by_watchpoint
3168 || thread->last_resume_kind == resume_step)
3169 && linux_fast_tracepoint_collecting (lwp, NULL));
3170}
3171
3172static void
3173move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3174{
d86d4aaf
DE
3175 struct thread_info *thread = (struct thread_info *) entry;
3176 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3177 int *wstat;
3178
3179 gdb_assert (lwp->suspended == 0);
3180 gdb_assert (lwp->stopped);
3181
3182 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3183
3184 /* Allow debugging the jump pad, gdb_collect, etc. */
3185 if (!gdb_breakpoint_here (lwp->stop_pc)
3186 && !lwp->stopped_by_watchpoint
3187 && thread->last_resume_kind != resume_step
3188 && maybe_move_out_of_jump_pad (lwp, wstat))
3189 {
3190 if (debug_threads)
87ce2a04 3191 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 3192 lwpid_of (thread));
fa593d66
PA
3193
3194 if (wstat)
3195 {
3196 lwp->status_pending_p = 0;
3197 enqueue_one_deferred_signal (lwp, wstat);
3198
3199 if (debug_threads)
87ce2a04
DE
3200 debug_printf ("Signal %d for LWP %ld deferred "
3201 "(in jump pad)\n",
d86d4aaf 3202 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3203 }
3204
3205 linux_resume_one_lwp (lwp, 0, 0, NULL);
3206 }
3207 else
3208 lwp->suspended++;
3209}
3210
3211static int
3212lwp_running (struct inferior_list_entry *entry, void *data)
3213{
d86d4aaf
DE
3214 struct thread_info *thread = (struct thread_info *) entry;
3215 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3216
3217 if (lwp->dead)
3218 return 0;
3219 if (lwp->stopped)
3220 return 0;
3221 return 1;
3222}
3223
7984d532
PA
3224/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3225 If SUSPEND, then also increase the suspend count of every LWP,
3226 except EXCEPT. */
3227
0d62e5e8 3228static void
7984d532 3229stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8 3230{
bde24c0a
PA
3231 /* Should not be called recursively. */
3232 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3233
87ce2a04
DE
3234 if (debug_threads)
3235 {
3236 debug_enter ();
3237 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3238 suspend ? "stop-and-suspend" : "stop",
3239 except != NULL
d86d4aaf 3240 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
3241 : "none");
3242 }
3243
bde24c0a
PA
3244 stopping_threads = (suspend
3245 ? STOPPING_AND_SUSPENDING_THREADS
3246 : STOPPING_THREADS);
7984d532
PA
3247
3248 if (suspend)
d86d4aaf 3249 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
7984d532 3250 else
d86d4aaf 3251 find_inferior (&all_threads, send_sigstop_callback, except);
fa96cb38 3252 wait_for_sigstop ();
bde24c0a 3253 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
3254
3255 if (debug_threads)
3256 {
3257 debug_printf ("stop_all_lwps done, setting stopping_threads "
3258 "back to !stopping\n");
3259 debug_exit ();
3260 }
0d62e5e8
DJ
3261}
3262
da6d8c04
DJ
3263/* Resume execution of the inferior process.
3264 If STEP is nonzero, single-step it.
3265 If SIGNAL is nonzero, give it that signal. */
3266
ce3a066d 3267static void
2acc282a 3268linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 3269 int step, int signal, siginfo_t *info)
da6d8c04 3270{
d86d4aaf 3271 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 3272 struct thread_info *saved_thread;
fa593d66 3273 int fast_tp_collecting;
0d62e5e8 3274
54a0b537 3275 if (lwp->stopped == 0)
0d62e5e8
DJ
3276 return;
3277
fa593d66
PA
3278 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3279
3280 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3281
219f2f23
PA
3282 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3283 user used the "jump" command, or "set $pc = foo"). */
3284 if (lwp->stop_pc != get_pc (lwp))
3285 {
3286 /* Collecting 'while-stepping' actions doesn't make sense
3287 anymore. */
d86d4aaf 3288 release_while_stepping_state_list (thread);
219f2f23
PA
3289 }
3290
0d62e5e8
DJ
3291 /* If we have pending signals or status, and a new signal, enqueue the
3292 signal. Also enqueue the signal if we are waiting to reinsert a
3293 breakpoint; it will be picked up again below. */
3294 if (signal != 0
fa593d66
PA
3295 && (lwp->status_pending_p
3296 || lwp->pending_signals != NULL
3297 || lwp->bp_reinsert != 0
3298 || fast_tp_collecting))
0d62e5e8
DJ
3299 {
3300 struct pending_signals *p_sig;
bca929d3 3301 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 3302 p_sig->prev = lwp->pending_signals;
0d62e5e8 3303 p_sig->signal = signal;
32ca6d61
DJ
3304 if (info == NULL)
3305 memset (&p_sig->info, 0, sizeof (siginfo_t));
3306 else
3307 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 3308 lwp->pending_signals = p_sig;
0d62e5e8
DJ
3309 }
3310
d50171e4
PA
3311 if (lwp->status_pending_p)
3312 {
3313 if (debug_threads)
87ce2a04
DE
3314 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3315 " has pending status\n",
d86d4aaf 3316 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3317 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
3318 return;
3319 }
0d62e5e8 3320
0bfdf32f
GB
3321 saved_thread = current_thread;
3322 current_thread = thread;
0d62e5e8
DJ
3323
3324 if (debug_threads)
87ce2a04 3325 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
d86d4aaf 3326 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3327 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
3328
3329 /* This bit needs some thinking about. If we get a signal that
3330 we must report while a single-step reinsert is still pending,
3331 we often end up resuming the thread. It might be better to
3332 (ew) allow a stack of pending events; then we could be sure that
3333 the reinsert happened right away and not lose any signals.
3334
3335 Making this stack would also shrink the window in which breakpoints are
54a0b537 3336 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
3337 complete correctness, so it won't solve that problem. It may be
3338 worthwhile just to solve this one, however. */
54a0b537 3339 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
3340 {
3341 if (debug_threads)
87ce2a04
DE
3342 debug_printf (" pending reinsert at 0x%s\n",
3343 paddress (lwp->bp_reinsert));
d50171e4 3344
85e00e85 3345 if (can_hardware_single_step ())
d50171e4 3346 {
fa593d66
PA
3347 if (fast_tp_collecting == 0)
3348 {
3349 if (step == 0)
3350 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3351 if (lwp->suspended)
3352 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3353 lwp->suspended);
3354 }
d50171e4
PA
3355
3356 step = 1;
3357 }
0d62e5e8
DJ
3358
3359 /* Postpone any pending signal. It was enqueued above. */
3360 signal = 0;
3361 }
3362
fa593d66
PA
3363 if (fast_tp_collecting == 1)
3364 {
3365 if (debug_threads)
87ce2a04
DE
3366 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3367 " (exit-jump-pad-bkpt)\n",
d86d4aaf 3368 lwpid_of (thread));
fa593d66
PA
3369
3370 /* Postpone any pending signal. It was enqueued above. */
3371 signal = 0;
3372 }
3373 else if (fast_tp_collecting == 2)
3374 {
3375 if (debug_threads)
87ce2a04
DE
3376 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3377 " single-stepping\n",
d86d4aaf 3378 lwpid_of (thread));
fa593d66
PA
3379
3380 if (can_hardware_single_step ())
3381 step = 1;
3382 else
38e08fca
GB
3383 {
3384 internal_error (__FILE__, __LINE__,
3385 "moving out of jump pad single-stepping"
3386 " not implemented on this target");
3387 }
fa593d66
PA
3388
3389 /* Postpone any pending signal. It was enqueued above. */
3390 signal = 0;
3391 }
3392
219f2f23
PA
3393 /* If we have while-stepping actions in this thread set it stepping.
3394 If we have a signal to deliver, it may or may not be set to
3395 SIG_IGN, we don't know. Assume so, and allow collecting
3396 while-stepping into a signal handler. A possible smart thing to
3397 do would be to set an internal breakpoint at the signal return
3398 address, continue, and carry on catching this while-stepping
3399 action only when that breakpoint is hit. A future
3400 enhancement. */
d86d4aaf 3401 if (thread->while_stepping != NULL
219f2f23
PA
3402 && can_hardware_single_step ())
3403 {
3404 if (debug_threads)
87ce2a04 3405 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 3406 lwpid_of (thread));
219f2f23
PA
3407 step = 1;
3408 }
3409
aa691b87 3410 if (debug_threads && the_low_target.get_pc != NULL)
0d62e5e8 3411 {
0bfdf32f 3412 struct regcache *regcache = get_thread_regcache (current_thread, 1);
442ea881 3413 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
87ce2a04 3414 debug_printf (" resuming from pc 0x%lx\n", (long) pc);
0d62e5e8
DJ
3415 }
3416
fa593d66
PA
3417 /* If we have pending signals, consume one unless we are trying to
3418 reinsert a breakpoint or we're trying to finish a fast tracepoint
3419 collect. */
3420 if (lwp->pending_signals != NULL
3421 && lwp->bp_reinsert == 0
3422 && fast_tp_collecting == 0)
0d62e5e8
DJ
3423 {
3424 struct pending_signals **p_sig;
3425
54a0b537 3426 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
3427 while ((*p_sig)->prev != NULL)
3428 p_sig = &(*p_sig)->prev;
3429
3430 signal = (*p_sig)->signal;
32ca6d61 3431 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 3432 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 3433 &(*p_sig)->info);
32ca6d61 3434
0d62e5e8
DJ
3435 free (*p_sig);
3436 *p_sig = NULL;
3437 }
3438
aa5ca48f
DE
3439 if (the_low_target.prepare_to_resume != NULL)
3440 the_low_target.prepare_to_resume (lwp);
3441
d86d4aaf 3442 regcache_invalidate_thread (thread);
da6d8c04 3443 errno = 0;
54a0b537 3444 lwp->stopped = 0;
c3adc08c 3445 lwp->stopped_by_watchpoint = 0;
54a0b537 3446 lwp->stepping = step;
d86d4aaf 3447 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
b8e1b30e 3448 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
3449 /* Coerce to a uintptr_t first to avoid potential gcc warning
3450 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 3451 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 3452
0bfdf32f 3453 current_thread = saved_thread;
da6d8c04 3454 if (errno)
3221518c
UW
3455 {
3456 /* ESRCH from ptrace either means that the thread was already
3457 running (an error) or that it is gone (a race condition). If
3458 it's gone, we will get a notification the next time we wait,
3459 so we can ignore the error. We could differentiate these
3460 two, but it's tricky without waiting; the thread still exists
3461 as a zombie, so sending it signal 0 would succeed. So just
3462 ignore ESRCH. */
3463 if (errno == ESRCH)
3464 return;
3465
3466 perror_with_name ("ptrace");
3467 }
da6d8c04
DJ
3468}
3469
2bd7c093
PA
3470struct thread_resume_array
3471{
3472 struct thread_resume *resume;
3473 size_t n;
3474};
64386c31 3475
ebcf782c
DE
3476/* This function is called once per thread via find_inferior.
3477 ARG is a pointer to a thread_resume_array struct.
3478 We look up the thread specified by ENTRY in ARG, and mark the thread
3479 with a pointer to the appropriate resume request.
5544ad89
DJ
3480
3481 This algorithm is O(threads * resume elements), but resume elements
3482 is small (and will remain small at least until GDB supports thread
3483 suspension). */
ebcf782c 3484
2bd7c093
PA
3485static int
3486linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 3487{
d86d4aaf
DE
3488 struct thread_info *thread = (struct thread_info *) entry;
3489 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 3490 int ndx;
2bd7c093 3491 struct thread_resume_array *r;
64386c31 3492
2bd7c093 3493 r = arg;
64386c31 3494
2bd7c093 3495 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
3496 {
3497 ptid_t ptid = r->resume[ndx].thread;
3498 if (ptid_equal (ptid, minus_one_ptid)
3499 || ptid_equal (ptid, entry->id)
0c9070b3
YQ
3500 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3501 of PID'. */
d86d4aaf 3502 || (ptid_get_pid (ptid) == pid_of (thread)
0c9070b3
YQ
3503 && (ptid_is_pid (ptid)
3504 || ptid_get_lwp (ptid) == -1)))
95954743 3505 {
d50171e4 3506 if (r->resume[ndx].kind == resume_stop
8336d594 3507 && thread->last_resume_kind == resume_stop)
d50171e4
PA
3508 {
3509 if (debug_threads)
87ce2a04
DE
3510 debug_printf ("already %s LWP %ld at GDB's request\n",
3511 (thread->last_status.kind
3512 == TARGET_WAITKIND_STOPPED)
3513 ? "stopped"
3514 : "stopping",
d86d4aaf 3515 lwpid_of (thread));
d50171e4
PA
3516
3517 continue;
3518 }
3519
95954743 3520 lwp->resume = &r->resume[ndx];
8336d594 3521 thread->last_resume_kind = lwp->resume->kind;
fa593d66 3522
c2d6af84
PA
3523 lwp->step_range_start = lwp->resume->step_range_start;
3524 lwp->step_range_end = lwp->resume->step_range_end;
3525
fa593d66
PA
3526 /* If we had a deferred signal to report, dequeue one now.
3527 This can happen if LWP gets more than one signal while
3528 trying to get out of a jump pad. */
3529 if (lwp->stopped
3530 && !lwp->status_pending_p
3531 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3532 {
3533 lwp->status_pending_p = 1;
3534
3535 if (debug_threads)
87ce2a04
DE
3536 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3537 "leaving status pending.\n",
d86d4aaf
DE
3538 WSTOPSIG (lwp->status_pending),
3539 lwpid_of (thread));
fa593d66
PA
3540 }
3541
95954743
PA
3542 return 0;
3543 }
3544 }
2bd7c093
PA
3545
3546 /* No resume action for this thread. */
3547 lwp->resume = NULL;
64386c31 3548
2bd7c093 3549 return 0;
5544ad89
DJ
3550}
3551
20ad9378
DE
3552/* find_inferior callback for linux_resume.
3553 Set *FLAG_P if this lwp has an interesting status pending. */
5544ad89 3554
bd99dc85
PA
3555static int
3556resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 3557{
d86d4aaf
DE
3558 struct thread_info *thread = (struct thread_info *) entry;
3559 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 3560
bd99dc85
PA
3561 /* LWPs which will not be resumed are not interesting, because
3562 we might not wait for them next time through linux_wait. */
2bd7c093 3563 if (lwp->resume == NULL)
bd99dc85 3564 return 0;
64386c31 3565
bd99dc85 3566 if (lwp->status_pending_p)
d50171e4
PA
3567 * (int *) flag_p = 1;
3568
3569 return 0;
3570}
3571
3572/* Return 1 if this lwp that GDB wants running is stopped at an
3573 internal breakpoint that we need to step over. It assumes that any
3574 required STOP_PC adjustment has already been propagated to the
3575 inferior's regcache. */
3576
3577static int
3578need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3579{
d86d4aaf
DE
3580 struct thread_info *thread = (struct thread_info *) entry;
3581 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 3582 struct thread_info *saved_thread;
d50171e4
PA
3583 CORE_ADDR pc;
3584
3585 /* LWPs which will not be resumed are not interesting, because we
3586 might not wait for them next time through linux_wait. */
3587
3588 if (!lwp->stopped)
3589 {
3590 if (debug_threads)
87ce2a04 3591 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 3592 lwpid_of (thread));
d50171e4
PA
3593 return 0;
3594 }
3595
8336d594 3596 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
3597 {
3598 if (debug_threads)
87ce2a04
DE
3599 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3600 " stopped\n",
d86d4aaf 3601 lwpid_of (thread));
d50171e4
PA
3602 return 0;
3603 }
3604
7984d532
PA
3605 gdb_assert (lwp->suspended >= 0);
3606
3607 if (lwp->suspended)
3608 {
3609 if (debug_threads)
87ce2a04 3610 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 3611 lwpid_of (thread));
7984d532
PA
3612 return 0;
3613 }
3614
d50171e4
PA
3615 if (!lwp->need_step_over)
3616 {
3617 if (debug_threads)
d86d4aaf 3618 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
d50171e4 3619 }
5544ad89 3620
bd99dc85 3621 if (lwp->status_pending_p)
d50171e4
PA
3622 {
3623 if (debug_threads)
87ce2a04
DE
3624 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3625 " status.\n",
d86d4aaf 3626 lwpid_of (thread));
d50171e4
PA
3627 return 0;
3628 }
3629
3630 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3631 or we have. */
3632 pc = get_pc (lwp);
3633
3634 /* If the PC has changed since we stopped, then don't do anything,
3635 and let the breakpoint/tracepoint be hit. This happens if, for
3636 instance, GDB handled the decr_pc_after_break subtraction itself,
3637 GDB is OOL stepping this thread, or the user has issued a "jump"
3638 command, or poked thread's registers herself. */
3639 if (pc != lwp->stop_pc)
3640 {
3641 if (debug_threads)
87ce2a04
DE
3642 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3643 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
3644 lwpid_of (thread),
3645 paddress (lwp->stop_pc), paddress (pc));
d50171e4
PA
3646
3647 lwp->need_step_over = 0;
3648 return 0;
3649 }
3650
0bfdf32f
GB
3651 saved_thread = current_thread;
3652 current_thread = thread;
d50171e4 3653
8b07ae33 3654 /* We can only step over breakpoints we know about. */
fa593d66 3655 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 3656 {
8b07ae33 3657 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
3658 though. If the condition is being evaluated on the target's side
3659 and it evaluate to false, step over this breakpoint as well. */
3660 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
3661 && gdb_condition_true_at_breakpoint (pc)
3662 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
3663 {
3664 if (debug_threads)
87ce2a04
DE
3665 debug_printf ("Need step over [LWP %ld]? yes, but found"
3666 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 3667 lwpid_of (thread), paddress (pc));
d50171e4 3668
0bfdf32f 3669 current_thread = saved_thread;
8b07ae33
PA
3670 return 0;
3671 }
3672 else
3673 {
3674 if (debug_threads)
87ce2a04
DE
3675 debug_printf ("Need step over [LWP %ld]? yes, "
3676 "found breakpoint at 0x%s\n",
d86d4aaf 3677 lwpid_of (thread), paddress (pc));
d50171e4 3678
8b07ae33
PA
3679 /* We've found an lwp that needs stepping over --- return 1 so
3680 that find_inferior stops looking. */
0bfdf32f 3681 current_thread = saved_thread;
8b07ae33
PA
3682
3683 /* If the step over is cancelled, this is set again. */
3684 lwp->need_step_over = 0;
3685 return 1;
3686 }
d50171e4
PA
3687 }
3688
0bfdf32f 3689 current_thread = saved_thread;
d50171e4
PA
3690
3691 if (debug_threads)
87ce2a04
DE
3692 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3693 " at 0x%s\n",
d86d4aaf 3694 lwpid_of (thread), paddress (pc));
c6ecbae5 3695
bd99dc85 3696 return 0;
5544ad89
DJ
3697}
3698
d50171e4
PA
3699/* Start a step-over operation on LWP. When LWP stopped at a
3700 breakpoint, to make progress, we need to remove the breakpoint out
3701 of the way. If we let other threads run while we do that, they may
3702 pass by the breakpoint location and miss hitting it. To avoid
3703 that, a step-over momentarily stops all threads while LWP is
3704 single-stepped while the breakpoint is temporarily uninserted from
3705 the inferior. When the single-step finishes, we reinsert the
3706 breakpoint, and let all threads that are supposed to be running,
3707 run again.
3708
3709 On targets that don't support hardware single-step, we don't
3710 currently support full software single-stepping. Instead, we only
3711 support stepping over the thread event breakpoint, by asking the
3712 low target where to place a reinsert breakpoint. Since this
3713 routine assumes the breakpoint being stepped over is a thread event
3714 breakpoint, it usually assumes the return address of the current
3715 function is a good enough place to set the reinsert breakpoint. */
3716
3717static int
3718start_step_over (struct lwp_info *lwp)
3719{
d86d4aaf 3720 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 3721 struct thread_info *saved_thread;
d50171e4
PA
3722 CORE_ADDR pc;
3723 int step;
3724
3725 if (debug_threads)
87ce2a04 3726 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 3727 lwpid_of (thread));
d50171e4 3728
7984d532
PA
3729 stop_all_lwps (1, lwp);
3730 gdb_assert (lwp->suspended == 0);
d50171e4
PA
3731
3732 if (debug_threads)
87ce2a04 3733 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
3734
3735 /* Note, we should always reach here with an already adjusted PC,
3736 either by GDB (if we're resuming due to GDB's request), or by our
3737 caller, if we just finished handling an internal breakpoint GDB
3738 shouldn't care about. */
3739 pc = get_pc (lwp);
3740
0bfdf32f
GB
3741 saved_thread = current_thread;
3742 current_thread = thread;
d50171e4
PA
3743
3744 lwp->bp_reinsert = pc;
3745 uninsert_breakpoints_at (pc);
fa593d66 3746 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
3747
3748 if (can_hardware_single_step ())
3749 {
3750 step = 1;
3751 }
3752 else
3753 {
3754 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3755 set_reinsert_breakpoint (raddr);
3756 step = 0;
3757 }
3758
0bfdf32f 3759 current_thread = saved_thread;
d50171e4
PA
3760
3761 linux_resume_one_lwp (lwp, step, 0, NULL);
3762
3763 /* Require next event from this LWP. */
d86d4aaf 3764 step_over_bkpt = thread->entry.id;
d50171e4
PA
3765 return 1;
3766}
3767
3768/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3769 start_step_over, if still there, and delete any reinsert
3770 breakpoints we've set, on non hardware single-step targets. */
3771
3772static int
3773finish_step_over (struct lwp_info *lwp)
3774{
3775 if (lwp->bp_reinsert != 0)
3776 {
3777 if (debug_threads)
87ce2a04 3778 debug_printf ("Finished step over.\n");
d50171e4
PA
3779
3780 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3781 may be no breakpoint to reinsert there by now. */
3782 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 3783 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
3784
3785 lwp->bp_reinsert = 0;
3786
3787 /* Delete any software-single-step reinsert breakpoints. No
3788 longer needed. We don't have to worry about other threads
3789 hitting this trap, and later not being able to explain it,
3790 because we were stepping over a breakpoint, and we hold all
3791 threads but LWP stopped while doing that. */
3792 if (!can_hardware_single_step ())
3793 delete_reinsert_breakpoints ();
3794
3795 step_over_bkpt = null_ptid;
3796 return 1;
3797 }
3798 else
3799 return 0;
3800}
3801
5544ad89
DJ
3802/* This function is called once per thread. We check the thread's resume
3803 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 3804 stopped; and what signal, if any, it should be sent.
5544ad89 3805
bd99dc85
PA
3806 For threads which we aren't explicitly told otherwise, we preserve
3807 the stepping flag; this is used for stepping over gdbserver-placed
3808 breakpoints.
3809
3810 If pending_flags was set in any thread, we queue any needed
3811 signals, since we won't actually resume. We already have a pending
3812 event to report, so we don't need to preserve any step requests;
3813 they should be re-issued if necessary. */
3814
3815static int
3816linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 3817{
d86d4aaf
DE
3818 struct thread_info *thread = (struct thread_info *) entry;
3819 struct lwp_info *lwp = get_thread_lwp (thread);
bd99dc85 3820 int step;
d50171e4
PA
3821 int leave_all_stopped = * (int *) arg;
3822 int leave_pending;
5544ad89 3823
2bd7c093 3824 if (lwp->resume == NULL)
bd99dc85 3825 return 0;
5544ad89 3826
bd99dc85 3827 if (lwp->resume->kind == resume_stop)
5544ad89 3828 {
bd99dc85 3829 if (debug_threads)
d86d4aaf 3830 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
3831
3832 if (!lwp->stopped)
3833 {
3834 if (debug_threads)
d86d4aaf 3835 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 3836
d50171e4
PA
3837 /* Stop the thread, and wait for the event asynchronously,
3838 through the event loop. */
02fc4de7 3839 send_sigstop (lwp);
bd99dc85
PA
3840 }
3841 else
3842 {
3843 if (debug_threads)
87ce2a04 3844 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 3845 lwpid_of (thread));
d50171e4
PA
3846
3847 /* The LWP may have been stopped in an internal event that
3848 was not meant to be notified back to GDB (e.g., gdbserver
3849 breakpoint), so we should be reporting a stop event in
3850 this case too. */
3851
3852 /* If the thread already has a pending SIGSTOP, this is a
3853 no-op. Otherwise, something later will presumably resume
3854 the thread and this will cause it to cancel any pending
3855 operation, due to last_resume_kind == resume_stop. If
3856 the thread already has a pending status to report, we
3857 will still report it the next time we wait - see
3858 status_pending_p_callback. */
1a981360
PA
3859
3860 /* If we already have a pending signal to report, then
3861 there's no need to queue a SIGSTOP, as this means we're
3862 midway through moving the LWP out of the jumppad, and we
3863 will report the pending signal as soon as that is
3864 finished. */
3865 if (lwp->pending_signals_to_report == NULL)
3866 send_sigstop (lwp);
bd99dc85 3867 }
32ca6d61 3868
bd99dc85
PA
3869 /* For stop requests, we're done. */
3870 lwp->resume = NULL;
fc7238bb 3871 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3872 return 0;
5544ad89
DJ
3873 }
3874
bd99dc85
PA
3875 /* If this thread which is about to be resumed has a pending status,
3876 then don't resume any threads - we can just report the pending
3877 status. Make sure to queue any signals that would otherwise be
3878 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
3879 thread has a pending status. If there's a thread that needs the
3880 step-over-breakpoint dance, then don't resume any other thread
3881 but that particular one. */
3882 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 3883
d50171e4 3884 if (!leave_pending)
bd99dc85
PA
3885 {
3886 if (debug_threads)
d86d4aaf 3887 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 3888
d50171e4 3889 step = (lwp->resume->kind == resume_step);
2acc282a 3890 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
3891 }
3892 else
3893 {
3894 if (debug_threads)
d86d4aaf 3895 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5544ad89 3896
bd99dc85
PA
3897 /* If we have a new signal, enqueue the signal. */
3898 if (lwp->resume->sig != 0)
3899 {
3900 struct pending_signals *p_sig;
3901 p_sig = xmalloc (sizeof (*p_sig));
3902 p_sig->prev = lwp->pending_signals;
3903 p_sig->signal = lwp->resume->sig;
3904 memset (&p_sig->info, 0, sizeof (siginfo_t));
3905
3906 /* If this is the same signal we were previously stopped by,
3907 make sure to queue its siginfo. We can ignore the return
3908 value of ptrace; if it fails, we'll skip
3909 PTRACE_SETSIGINFO. */
3910 if (WIFSTOPPED (lwp->last_status)
3911 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
d86d4aaf 3912 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 3913 &p_sig->info);
bd99dc85
PA
3914
3915 lwp->pending_signals = p_sig;
3916 }
3917 }
5544ad89 3918
fc7238bb 3919 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3920 lwp->resume = NULL;
5544ad89 3921 return 0;
0d62e5e8
DJ
3922}
3923
3924static void
2bd7c093 3925linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 3926{
2bd7c093 3927 struct thread_resume_array array = { resume_info, n };
d86d4aaf 3928 struct thread_info *need_step_over = NULL;
d50171e4
PA
3929 int any_pending;
3930 int leave_all_stopped;
c6ecbae5 3931
87ce2a04
DE
3932 if (debug_threads)
3933 {
3934 debug_enter ();
3935 debug_printf ("linux_resume:\n");
3936 }
3937
2bd7c093 3938 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 3939
d50171e4
PA
3940 /* If there is a thread which would otherwise be resumed, which has
3941 a pending status, then don't resume any threads - we can just
3942 report the pending status. Make sure to queue any signals that
3943 would otherwise be sent. In non-stop mode, we'll apply this
3944 logic to each thread individually. We consume all pending events
3945 before considering to start a step-over (in all-stop). */
3946 any_pending = 0;
bd99dc85 3947 if (!non_stop)
d86d4aaf 3948 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
d50171e4
PA
3949
3950 /* If there is a thread which would otherwise be resumed, which is
3951 stopped at a breakpoint that needs stepping over, then don't
3952 resume any threads - have it step over the breakpoint with all
3953 other threads stopped, then resume all threads again. Make sure
3954 to queue any signals that would otherwise be delivered or
3955 queued. */
3956 if (!any_pending && supports_breakpoints ())
3957 need_step_over
d86d4aaf
DE
3958 = (struct thread_info *) find_inferior (&all_threads,
3959 need_step_over_p, NULL);
d50171e4
PA
3960
3961 leave_all_stopped = (need_step_over != NULL || any_pending);
3962
3963 if (debug_threads)
3964 {
3965 if (need_step_over != NULL)
87ce2a04 3966 debug_printf ("Not resuming all, need step over\n");
d50171e4 3967 else if (any_pending)
87ce2a04
DE
3968 debug_printf ("Not resuming, all-stop and found "
3969 "an LWP with pending status\n");
d50171e4 3970 else
87ce2a04 3971 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
3972 }
3973
3974 /* Even if we're leaving threads stopped, queue all signals we'd
3975 otherwise deliver. */
3976 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3977
3978 if (need_step_over)
d86d4aaf 3979 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
3980
3981 if (debug_threads)
3982 {
3983 debug_printf ("linux_resume done\n");
3984 debug_exit ();
3985 }
d50171e4
PA
3986}
3987
3988/* This function is called once per thread. We check the thread's
3989 last resume request, which will tell us whether to resume, step, or
3990 leave the thread stopped. Any signal the client requested to be
3991 delivered has already been enqueued at this point.
3992
3993 If any thread that GDB wants running is stopped at an internal
3994 breakpoint that needs stepping over, we start a step-over operation
3995 on that particular thread, and leave all others stopped. */
3996
7984d532
PA
3997static int
3998proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 3999{
d86d4aaf
DE
4000 struct thread_info *thread = (struct thread_info *) entry;
4001 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4002 int step;
4003
7984d532
PA
4004 if (lwp == except)
4005 return 0;
d50171e4
PA
4006
4007 if (debug_threads)
d86d4aaf 4008 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
4009
4010 if (!lwp->stopped)
4011 {
4012 if (debug_threads)
d86d4aaf 4013 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
7984d532 4014 return 0;
d50171e4
PA
4015 }
4016
02fc4de7
PA
4017 if (thread->last_resume_kind == resume_stop
4018 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
4019 {
4020 if (debug_threads)
87ce2a04 4021 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 4022 lwpid_of (thread));
7984d532 4023 return 0;
d50171e4
PA
4024 }
4025
4026 if (lwp->status_pending_p)
4027 {
4028 if (debug_threads)
87ce2a04 4029 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 4030 lwpid_of (thread));
7984d532 4031 return 0;
d50171e4
PA
4032 }
4033
7984d532
PA
4034 gdb_assert (lwp->suspended >= 0);
4035
d50171e4
PA
4036 if (lwp->suspended)
4037 {
4038 if (debug_threads)
d86d4aaf 4039 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
7984d532 4040 return 0;
d50171e4
PA
4041 }
4042
1a981360
PA
4043 if (thread->last_resume_kind == resume_stop
4044 && lwp->pending_signals_to_report == NULL
4045 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
4046 {
4047 /* We haven't reported this LWP as stopped yet (otherwise, the
4048 last_status.kind check above would catch it, and we wouldn't
4049 reach here. This LWP may have been momentarily paused by a
4050 stop_all_lwps call while handling for example, another LWP's
4051 step-over. In that case, the pending expected SIGSTOP signal
4052 that was queued at vCont;t handling time will have already
4053 been consumed by wait_for_sigstop, and so we need to requeue
4054 another one here. Note that if the LWP already has a SIGSTOP
4055 pending, this is a no-op. */
4056
4057 if (debug_threads)
87ce2a04
DE
4058 debug_printf ("Client wants LWP %ld to stop. "
4059 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 4060 lwpid_of (thread));
02fc4de7
PA
4061
4062 send_sigstop (lwp);
4063 }
4064
8336d594 4065 step = thread->last_resume_kind == resume_step;
d50171e4 4066 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4067 return 0;
4068}
4069
4070static int
4071unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4072{
d86d4aaf
DE
4073 struct thread_info *thread = (struct thread_info *) entry;
4074 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4075
4076 if (lwp == except)
4077 return 0;
4078
4079 lwp->suspended--;
4080 gdb_assert (lwp->suspended >= 0);
4081
4082 return proceed_one_lwp (entry, except);
d50171e4
PA
4083}
4084
4085/* When we finish a step-over, set threads running again. If there's
4086 another thread that may need a step-over, now's the time to start
4087 it. Eventually, we'll move all threads past their breakpoints. */
4088
4089static void
4090proceed_all_lwps (void)
4091{
d86d4aaf 4092 struct thread_info *need_step_over;
d50171e4
PA
4093
4094 /* If there is a thread which would otherwise be resumed, which is
4095 stopped at a breakpoint that needs stepping over, then don't
4096 resume any threads - have it step over the breakpoint with all
4097 other threads stopped, then resume all threads again. */
4098
4099 if (supports_breakpoints ())
4100 {
4101 need_step_over
d86d4aaf
DE
4102 = (struct thread_info *) find_inferior (&all_threads,
4103 need_step_over_p, NULL);
d50171e4
PA
4104
4105 if (need_step_over != NULL)
4106 {
4107 if (debug_threads)
87ce2a04
DE
4108 debug_printf ("proceed_all_lwps: found "
4109 "thread %ld needing a step-over\n",
4110 lwpid_of (need_step_over));
d50171e4 4111
d86d4aaf 4112 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4113 return;
4114 }
4115 }
5544ad89 4116
d50171e4 4117 if (debug_threads)
87ce2a04 4118 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 4119
d86d4aaf 4120 find_inferior (&all_threads, proceed_one_lwp, NULL);
d50171e4
PA
4121}
4122
4123/* Stopped LWPs that the client wanted to be running, that don't have
4124 pending statuses, are set to run again, except for EXCEPT, if not
4125 NULL. This undoes a stop_all_lwps call. */
4126
4127static void
7984d532 4128unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 4129{
5544ad89
DJ
4130 if (debug_threads)
4131 {
87ce2a04 4132 debug_enter ();
d50171e4 4133 if (except)
87ce2a04 4134 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 4135 lwpid_of (get_lwp_thread (except)));
5544ad89 4136 else
87ce2a04 4137 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
4138 }
4139
7984d532 4140 if (unsuspend)
d86d4aaf 4141 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
7984d532 4142 else
d86d4aaf 4143 find_inferior (&all_threads, proceed_one_lwp, except);
87ce2a04
DE
4144
4145 if (debug_threads)
4146 {
4147 debug_printf ("unstop_all_lwps done\n");
4148 debug_exit ();
4149 }
0d62e5e8
DJ
4150}
4151
58caa3dc
DJ
4152
4153#ifdef HAVE_LINUX_REGSETS
4154
1faeff08
MR
4155#define use_linux_regsets 1
4156
030031ee
PA
4157/* Returns true if REGSET has been disabled. */
4158
4159static int
4160regset_disabled (struct regsets_info *info, struct regset_info *regset)
4161{
4162 return (info->disabled_regsets != NULL
4163 && info->disabled_regsets[regset - info->regsets]);
4164}
4165
4166/* Disable REGSET. */
4167
4168static void
4169disable_regset (struct regsets_info *info, struct regset_info *regset)
4170{
4171 int dr_offset;
4172
4173 dr_offset = regset - info->regsets;
4174 if (info->disabled_regsets == NULL)
4175 info->disabled_regsets = xcalloc (1, info->num_regsets);
4176 info->disabled_regsets[dr_offset] = 1;
4177}
4178
58caa3dc 4179static int
3aee8918
PA
4180regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4181 struct regcache *regcache)
58caa3dc
DJ
4182{
4183 struct regset_info *regset;
e9d25b98 4184 int saw_general_regs = 0;
95954743 4185 int pid;
1570b33e 4186 struct iovec iov;
58caa3dc 4187
0bfdf32f 4188 pid = lwpid_of (current_thread);
28eef672 4189 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4190 {
1570b33e
L
4191 void *buf, *data;
4192 int nt_type, res;
58caa3dc 4193
030031ee 4194 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 4195 continue;
58caa3dc 4196
bca929d3 4197 buf = xmalloc (regset->size);
1570b33e
L
4198
4199 nt_type = regset->nt_type;
4200 if (nt_type)
4201 {
4202 iov.iov_base = buf;
4203 iov.iov_len = regset->size;
4204 data = (void *) &iov;
4205 }
4206 else
4207 data = buf;
4208
dfb64f85 4209#ifndef __sparc__
f15f9948 4210 res = ptrace (regset->get_request, pid,
b8e1b30e 4211 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4212#else
1570b33e 4213 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4214#endif
58caa3dc
DJ
4215 if (res < 0)
4216 {
4217 if (errno == EIO)
4218 {
52fa2412 4219 /* If we get EIO on a regset, do not try it again for
3aee8918 4220 this process mode. */
030031ee 4221 disable_regset (regsets_info, regset);
58caa3dc 4222 }
e5a9158d
AA
4223 else if (errno == ENODATA)
4224 {
4225 /* ENODATA may be returned if the regset is currently
4226 not "active". This can happen in normal operation,
4227 so suppress the warning in this case. */
4228 }
58caa3dc
DJ
4229 else
4230 {
0d62e5e8 4231 char s[256];
95954743
PA
4232 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4233 pid);
0d62e5e8 4234 perror (s);
58caa3dc
DJ
4235 }
4236 }
098dbe61
AA
4237 else
4238 {
4239 if (regset->type == GENERAL_REGS)
4240 saw_general_regs = 1;
4241 regset->store_function (regcache, buf);
4242 }
fdeb2a12 4243 free (buf);
58caa3dc 4244 }
e9d25b98
DJ
4245 if (saw_general_regs)
4246 return 0;
4247 else
4248 return 1;
58caa3dc
DJ
4249}
4250
4251static int
3aee8918
PA
4252regsets_store_inferior_registers (struct regsets_info *regsets_info,
4253 struct regcache *regcache)
58caa3dc
DJ
4254{
4255 struct regset_info *regset;
e9d25b98 4256 int saw_general_regs = 0;
95954743 4257 int pid;
1570b33e 4258 struct iovec iov;
58caa3dc 4259
0bfdf32f 4260 pid = lwpid_of (current_thread);
28eef672 4261 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4262 {
1570b33e
L
4263 void *buf, *data;
4264 int nt_type, res;
58caa3dc 4265
feea5f36
AA
4266 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4267 || regset->fill_function == NULL)
28eef672 4268 continue;
58caa3dc 4269
bca929d3 4270 buf = xmalloc (regset->size);
545587ee
DJ
4271
4272 /* First fill the buffer with the current register set contents,
4273 in case there are any items in the kernel's regset that are
4274 not in gdbserver's regcache. */
1570b33e
L
4275
4276 nt_type = regset->nt_type;
4277 if (nt_type)
4278 {
4279 iov.iov_base = buf;
4280 iov.iov_len = regset->size;
4281 data = (void *) &iov;
4282 }
4283 else
4284 data = buf;
4285
dfb64f85 4286#ifndef __sparc__
f15f9948 4287 res = ptrace (regset->get_request, pid,
b8e1b30e 4288 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4289#else
689cc2ae 4290 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4291#endif
545587ee
DJ
4292
4293 if (res == 0)
4294 {
4295 /* Then overlay our cached registers on that. */
442ea881 4296 regset->fill_function (regcache, buf);
545587ee
DJ
4297
4298 /* Only now do we write the register set. */
dfb64f85 4299#ifndef __sparc__
f15f9948 4300 res = ptrace (regset->set_request, pid,
b8e1b30e 4301 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4302#else
1570b33e 4303 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 4304#endif
545587ee
DJ
4305 }
4306
58caa3dc
DJ
4307 if (res < 0)
4308 {
4309 if (errno == EIO)
4310 {
52fa2412 4311 /* If we get EIO on a regset, do not try it again for
3aee8918 4312 this process mode. */
030031ee 4313 disable_regset (regsets_info, regset);
58caa3dc 4314 }
3221518c
UW
4315 else if (errno == ESRCH)
4316 {
1b3f6016
PA
4317 /* At this point, ESRCH should mean the process is
4318 already gone, in which case we simply ignore attempts
4319 to change its registers. See also the related
4320 comment in linux_resume_one_lwp. */
fdeb2a12 4321 free (buf);
3221518c
UW
4322 return 0;
4323 }
58caa3dc
DJ
4324 else
4325 {
ce3a066d 4326 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
4327 }
4328 }
e9d25b98
DJ
4329 else if (regset->type == GENERAL_REGS)
4330 saw_general_regs = 1;
09ec9b38 4331 free (buf);
58caa3dc 4332 }
e9d25b98
DJ
4333 if (saw_general_regs)
4334 return 0;
4335 else
4336 return 1;
58caa3dc
DJ
4337}
4338
1faeff08 4339#else /* !HAVE_LINUX_REGSETS */
58caa3dc 4340
1faeff08 4341#define use_linux_regsets 0
3aee8918
PA
4342#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4343#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 4344
58caa3dc 4345#endif
1faeff08
MR
4346
4347/* Return 1 if register REGNO is supported by one of the regset ptrace
4348 calls or 0 if it has to be transferred individually. */
4349
4350static int
3aee8918 4351linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
4352{
4353 unsigned char mask = 1 << (regno % 8);
4354 size_t index = regno / 8;
4355
4356 return (use_linux_regsets
3aee8918
PA
4357 && (regs_info->regset_bitmap == NULL
4358 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
4359}
4360
58caa3dc 4361#ifdef HAVE_LINUX_USRREGS
1faeff08
MR
4362
4363int
3aee8918 4364register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
4365{
4366 int addr;
4367
3aee8918 4368 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
4369 error ("Invalid register number %d.", regnum);
4370
3aee8918 4371 addr = usrregs->regmap[regnum];
1faeff08
MR
4372
4373 return addr;
4374}
4375
4376/* Fetch one register. */
4377static void
3aee8918
PA
4378fetch_register (const struct usrregs_info *usrregs,
4379 struct regcache *regcache, int regno)
1faeff08
MR
4380{
4381 CORE_ADDR regaddr;
4382 int i, size;
4383 char *buf;
4384 int pid;
4385
3aee8918 4386 if (regno >= usrregs->num_regs)
1faeff08
MR
4387 return;
4388 if ((*the_low_target.cannot_fetch_register) (regno))
4389 return;
4390
3aee8918 4391 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4392 if (regaddr == -1)
4393 return;
4394
3aee8918
PA
4395 size = ((register_size (regcache->tdesc, regno)
4396 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4397 & -sizeof (PTRACE_XFER_TYPE));
4398 buf = alloca (size);
4399
0bfdf32f 4400 pid = lwpid_of (current_thread);
1faeff08
MR
4401 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4402 {
4403 errno = 0;
4404 *(PTRACE_XFER_TYPE *) (buf + i) =
4405 ptrace (PTRACE_PEEKUSER, pid,
4406 /* Coerce to a uintptr_t first to avoid potential gcc warning
4407 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4408 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
4409 regaddr += sizeof (PTRACE_XFER_TYPE);
4410 if (errno != 0)
4411 error ("reading register %d: %s", regno, strerror (errno));
4412 }
4413
4414 if (the_low_target.supply_ptrace_register)
4415 the_low_target.supply_ptrace_register (regcache, regno, buf);
4416 else
4417 supply_register (regcache, regno, buf);
4418}
4419
4420/* Store one register. */
4421static void
3aee8918
PA
4422store_register (const struct usrregs_info *usrregs,
4423 struct regcache *regcache, int regno)
1faeff08
MR
4424{
4425 CORE_ADDR regaddr;
4426 int i, size;
4427 char *buf;
4428 int pid;
4429
3aee8918 4430 if (regno >= usrregs->num_regs)
1faeff08
MR
4431 return;
4432 if ((*the_low_target.cannot_store_register) (regno))
4433 return;
4434
3aee8918 4435 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4436 if (regaddr == -1)
4437 return;
4438
3aee8918
PA
4439 size = ((register_size (regcache->tdesc, regno)
4440 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4441 & -sizeof (PTRACE_XFER_TYPE));
4442 buf = alloca (size);
4443 memset (buf, 0, size);
4444
4445 if (the_low_target.collect_ptrace_register)
4446 the_low_target.collect_ptrace_register (regcache, regno, buf);
4447 else
4448 collect_register (regcache, regno, buf);
4449
0bfdf32f 4450 pid = lwpid_of (current_thread);
1faeff08
MR
4451 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4452 {
4453 errno = 0;
4454 ptrace (PTRACE_POKEUSER, pid,
4455 /* Coerce to a uintptr_t first to avoid potential gcc warning
4456 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
4457 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4458 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
4459 if (errno != 0)
4460 {
4461 /* At this point, ESRCH should mean the process is
4462 already gone, in which case we simply ignore attempts
4463 to change its registers. See also the related
4464 comment in linux_resume_one_lwp. */
4465 if (errno == ESRCH)
4466 return;
4467
4468 if ((*the_low_target.cannot_store_register) (regno) == 0)
4469 error ("writing register %d: %s", regno, strerror (errno));
4470 }
4471 regaddr += sizeof (PTRACE_XFER_TYPE);
4472 }
4473}
4474
4475/* Fetch all registers, or just one, from the child process.
4476 If REGNO is -1, do this for all registers, skipping any that are
4477 assumed to have been retrieved by regsets_fetch_inferior_registers,
4478 unless ALL is non-zero.
4479 Otherwise, REGNO specifies which register (so we can save time). */
4480static void
3aee8918
PA
4481usr_fetch_inferior_registers (const struct regs_info *regs_info,
4482 struct regcache *regcache, int regno, int all)
1faeff08 4483{
3aee8918
PA
4484 struct usrregs_info *usr = regs_info->usrregs;
4485
1faeff08
MR
4486 if (regno == -1)
4487 {
3aee8918
PA
4488 for (regno = 0; regno < usr->num_regs; regno++)
4489 if (all || !linux_register_in_regsets (regs_info, regno))
4490 fetch_register (usr, regcache, regno);
1faeff08
MR
4491 }
4492 else
3aee8918 4493 fetch_register (usr, regcache, regno);
1faeff08
MR
4494}
4495
4496/* Store our register values back into the inferior.
4497 If REGNO is -1, do this for all registers, skipping any that are
4498 assumed to have been saved by regsets_store_inferior_registers,
4499 unless ALL is non-zero.
4500 Otherwise, REGNO specifies which register (so we can save time). */
4501static void
3aee8918
PA
4502usr_store_inferior_registers (const struct regs_info *regs_info,
4503 struct regcache *regcache, int regno, int all)
1faeff08 4504{
3aee8918
PA
4505 struct usrregs_info *usr = regs_info->usrregs;
4506
1faeff08
MR
4507 if (regno == -1)
4508 {
3aee8918
PA
4509 for (regno = 0; regno < usr->num_regs; regno++)
4510 if (all || !linux_register_in_regsets (regs_info, regno))
4511 store_register (usr, regcache, regno);
1faeff08
MR
4512 }
4513 else
3aee8918 4514 store_register (usr, regcache, regno);
1faeff08
MR
4515}
4516
4517#else /* !HAVE_LINUX_USRREGS */
4518
3aee8918
PA
4519#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4520#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
1faeff08 4521
58caa3dc 4522#endif
1faeff08
MR
4523
4524
4525void
4526linux_fetch_registers (struct regcache *regcache, int regno)
4527{
4528 int use_regsets;
4529 int all = 0;
3aee8918 4530 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
4531
4532 if (regno == -1)
4533 {
3aee8918
PA
4534 if (the_low_target.fetch_register != NULL
4535 && regs_info->usrregs != NULL)
4536 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
c14dfd32
PA
4537 (*the_low_target.fetch_register) (regcache, regno);
4538
3aee8918
PA
4539 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4540 if (regs_info->usrregs != NULL)
4541 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
4542 }
4543 else
4544 {
c14dfd32
PA
4545 if (the_low_target.fetch_register != NULL
4546 && (*the_low_target.fetch_register) (regcache, regno))
4547 return;
4548
3aee8918 4549 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 4550 if (use_regsets)
3aee8918
PA
4551 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4552 regcache);
4553 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4554 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 4555 }
58caa3dc
DJ
4556}
4557
4558void
442ea881 4559linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 4560{
1faeff08
MR
4561 int use_regsets;
4562 int all = 0;
3aee8918 4563 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
4564
4565 if (regno == -1)
4566 {
3aee8918
PA
4567 all = regsets_store_inferior_registers (regs_info->regsets_info,
4568 regcache);
4569 if (regs_info->usrregs != NULL)
4570 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
4571 }
4572 else
4573 {
3aee8918 4574 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 4575 if (use_regsets)
3aee8918
PA
4576 all = regsets_store_inferior_registers (regs_info->regsets_info,
4577 regcache);
4578 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4579 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 4580 }
58caa3dc
DJ
4581}
4582
da6d8c04 4583
da6d8c04
DJ
4584/* Copy LEN bytes from inferior's memory starting at MEMADDR
4585 to debugger memory starting at MYADDR. */
4586
c3e735a6 4587static int
f450004a 4588linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04 4589{
0bfdf32f 4590 int pid = lwpid_of (current_thread);
4934b29e
MR
4591 register PTRACE_XFER_TYPE *buffer;
4592 register CORE_ADDR addr;
4593 register int count;
4594 char filename[64];
da6d8c04 4595 register int i;
4934b29e 4596 int ret;
fd462a61 4597 int fd;
fd462a61
DJ
4598
4599 /* Try using /proc. Don't bother for one word. */
4600 if (len >= 3 * sizeof (long))
4601 {
4934b29e
MR
4602 int bytes;
4603
fd462a61
DJ
4604 /* We could keep this file open and cache it - possibly one per
4605 thread. That requires some juggling, but is even faster. */
95954743 4606 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
4607 fd = open (filename, O_RDONLY | O_LARGEFILE);
4608 if (fd == -1)
4609 goto no_proc;
4610
4611 /* If pread64 is available, use it. It's faster if the kernel
4612 supports it (only one syscall), and it's 64-bit safe even on
4613 32-bit platforms (for instance, SPARC debugging a SPARC64
4614 application). */
4615#ifdef HAVE_PREAD64
4934b29e 4616 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 4617#else
4934b29e
MR
4618 bytes = -1;
4619 if (lseek (fd, memaddr, SEEK_SET) != -1)
4620 bytes = read (fd, myaddr, len);
fd462a61 4621#endif
fd462a61
DJ
4622
4623 close (fd);
4934b29e
MR
4624 if (bytes == len)
4625 return 0;
4626
4627 /* Some data was read, we'll try to get the rest with ptrace. */
4628 if (bytes > 0)
4629 {
4630 memaddr += bytes;
4631 myaddr += bytes;
4632 len -= bytes;
4633 }
fd462a61 4634 }
da6d8c04 4635
fd462a61 4636 no_proc:
4934b29e
MR
4637 /* Round starting address down to longword boundary. */
4638 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4639 /* Round ending address up; get number of longwords that makes. */
4640 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4641 / sizeof (PTRACE_XFER_TYPE));
4642 /* Allocate buffer of that many longwords. */
4643 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4644
da6d8c04 4645 /* Read all the longwords */
4934b29e 4646 errno = 0;
da6d8c04
DJ
4647 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4648 {
14ce3065
DE
4649 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4650 about coercing an 8 byte integer to a 4 byte pointer. */
4651 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
4652 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4653 (PTRACE_TYPE_ARG4) 0);
c3e735a6 4654 if (errno)
4934b29e 4655 break;
da6d8c04 4656 }
4934b29e 4657 ret = errno;
da6d8c04
DJ
4658
4659 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
4660 if (i > 0)
4661 {
4662 i *= sizeof (PTRACE_XFER_TYPE);
4663 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4664 memcpy (myaddr,
4665 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4666 i < len ? i : len);
4667 }
c3e735a6 4668
4934b29e 4669 return ret;
da6d8c04
DJ
4670}
4671
93ae6fdc
PA
4672/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4673 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 4674 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 4675
ce3a066d 4676static int
f450004a 4677linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
4678{
4679 register int i;
4680 /* Round starting address down to longword boundary. */
4681 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4682 /* Round ending address up; get number of longwords that makes. */
4683 register int count
493e2a69
MS
4684 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4685 / sizeof (PTRACE_XFER_TYPE);
4686
da6d8c04 4687 /* Allocate buffer of that many longwords. */
493e2a69
MS
4688 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4689 alloca (count * sizeof (PTRACE_XFER_TYPE));
4690
0bfdf32f 4691 int pid = lwpid_of (current_thread);
da6d8c04 4692
f0ae6fc3
PA
4693 if (len == 0)
4694 {
4695 /* Zero length write always succeeds. */
4696 return 0;
4697 }
4698
0d62e5e8
DJ
4699 if (debug_threads)
4700 {
58d6951d
DJ
4701 /* Dump up to four bytes. */
4702 unsigned int val = * (unsigned int *) myaddr;
4703 if (len == 1)
4704 val = val & 0xff;
4705 else if (len == 2)
4706 val = val & 0xffff;
4707 else if (len == 3)
4708 val = val & 0xffffff;
87ce2a04
DE
4709 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4710 val, (long)memaddr);
0d62e5e8
DJ
4711 }
4712
da6d8c04
DJ
4713 /* Fill start and end extra bytes of buffer with existing memory data. */
4714
93ae6fdc 4715 errno = 0;
14ce3065
DE
4716 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4717 about coercing an 8 byte integer to a 4 byte pointer. */
4718 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
4719 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4720 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
4721 if (errno)
4722 return errno;
da6d8c04
DJ
4723
4724 if (count > 1)
4725 {
93ae6fdc 4726 errno = 0;
da6d8c04 4727 buffer[count - 1]
95954743 4728 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
4729 /* Coerce to a uintptr_t first to avoid potential gcc warning
4730 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4731 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 4732 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 4733 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
4734 if (errno)
4735 return errno;
da6d8c04
DJ
4736 }
4737
93ae6fdc 4738 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 4739
493e2a69
MS
4740 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4741 myaddr, len);
da6d8c04
DJ
4742
4743 /* Write the entire buffer. */
4744
4745 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4746 {
4747 errno = 0;
14ce3065
DE
4748 ptrace (PTRACE_POKETEXT, pid,
4749 /* Coerce to a uintptr_t first to avoid potential gcc warning
4750 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
4751 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4752 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
4753 if (errno)
4754 return errno;
4755 }
4756
4757 return 0;
4758}
2f2893d9
DJ
4759
4760static void
4761linux_look_up_symbols (void)
4762{
0d62e5e8 4763#ifdef USE_THREAD_DB
95954743
PA
4764 struct process_info *proc = current_process ();
4765
cdbfd419 4766 if (proc->private->thread_db != NULL)
0d62e5e8
DJ
4767 return;
4768
96d7229d
LM
4769 /* If the kernel supports tracing clones, then we don't need to
4770 use the magic thread event breakpoint to learn about
4771 threads. */
4772 thread_db_init (!linux_supports_traceclone ());
0d62e5e8
DJ
4773#endif
4774}
4775
e5379b03 4776static void
ef57601b 4777linux_request_interrupt (void)
e5379b03 4778{
a1928bad 4779 extern unsigned long signal_pid;
e5379b03 4780
78708b7c
PA
4781 /* Send a SIGINT to the process group. This acts just like the user
4782 typed a ^C on the controlling terminal. */
4783 kill (-signal_pid, SIGINT);
e5379b03
DJ
4784}
4785
aa691b87
RM
4786/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4787 to debugger memory starting at MYADDR. */
4788
4789static int
f450004a 4790linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
4791{
4792 char filename[PATH_MAX];
4793 int fd, n;
0bfdf32f 4794 int pid = lwpid_of (current_thread);
aa691b87 4795
6cebaf6e 4796 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
4797
4798 fd = open (filename, O_RDONLY);
4799 if (fd < 0)
4800 return -1;
4801
4802 if (offset != (CORE_ADDR) 0
4803 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4804 n = -1;
4805 else
4806 n = read (fd, myaddr, len);
4807
4808 close (fd);
4809
4810 return n;
4811}
4812
d993e290
PA
4813/* These breakpoint and watchpoint related wrapper functions simply
4814 pass on the function call if the target has registered a
4815 corresponding function. */
e013ee27
OF
4816
4817static int
802e8e6d
PA
4818linux_supports_z_point_type (char z_type)
4819{
4820 return (the_low_target.supports_z_point_type != NULL
4821 && the_low_target.supports_z_point_type (z_type));
4822}
4823
4824static int
4825linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
4826 int size, struct raw_breakpoint *bp)
e013ee27 4827{
d993e290 4828 if (the_low_target.insert_point != NULL)
802e8e6d 4829 return the_low_target.insert_point (type, addr, size, bp);
e013ee27
OF
4830 else
4831 /* Unsupported (see target.h). */
4832 return 1;
4833}
4834
4835static int
802e8e6d
PA
4836linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
4837 int size, struct raw_breakpoint *bp)
e013ee27 4838{
d993e290 4839 if (the_low_target.remove_point != NULL)
802e8e6d 4840 return the_low_target.remove_point (type, addr, size, bp);
e013ee27
OF
4841 else
4842 /* Unsupported (see target.h). */
4843 return 1;
4844}
4845
4846static int
4847linux_stopped_by_watchpoint (void)
4848{
0bfdf32f 4849 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
4850
4851 return lwp->stopped_by_watchpoint;
e013ee27
OF
4852}
4853
4854static CORE_ADDR
4855linux_stopped_data_address (void)
4856{
0bfdf32f 4857 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
4858
4859 return lwp->stopped_data_address;
e013ee27
OF
4860}
4861
db0dfaa0
LM
4862#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4863 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4864 && defined(PT_TEXT_END_ADDR)
4865
4866/* This is only used for targets that define PT_TEXT_ADDR,
4867 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4868 the target has different ways of acquiring this information, like
4869 loadmaps. */
52fb6437
NS
4870
4871/* Under uClinux, programs are loaded at non-zero offsets, which we need
4872 to tell gdb about. */
4873
4874static int
4875linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4876{
52fb6437 4877 unsigned long text, text_end, data;
0bfdf32f 4878 int pid = lwpid_of (get_thread_lwp (current_thread));
52fb6437
NS
4879
4880 errno = 0;
4881
b8e1b30e
LM
4882 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4883 (PTRACE_TYPE_ARG4) 0);
4884 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4885 (PTRACE_TYPE_ARG4) 0);
4886 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4887 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
4888
4889 if (errno == 0)
4890 {
4891 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
4892 used by gdb) are relative to the beginning of the program,
4893 with the data segment immediately following the text segment.
4894 However, the actual runtime layout in memory may put the data
4895 somewhere else, so when we send gdb a data base-address, we
4896 use the real data base address and subtract the compile-time
4897 data base-address from it (which is just the length of the
4898 text segment). BSS immediately follows data in both
4899 cases. */
52fb6437
NS
4900 *text_p = text;
4901 *data_p = data - (text_end - text);
1b3f6016 4902
52fb6437
NS
4903 return 1;
4904 }
52fb6437
NS
4905 return 0;
4906}
4907#endif
4908
07e059b5
VP
4909static int
4910linux_qxfer_osdata (const char *annex,
1b3f6016
PA
4911 unsigned char *readbuf, unsigned const char *writebuf,
4912 CORE_ADDR offset, int len)
07e059b5 4913{
d26e3629 4914 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
4915}
4916
d0722149
DE
4917/* Convert a native/host siginfo object, into/from the siginfo in the
4918 layout of the inferiors' architecture. */
4919
4920static void
a5362b9a 4921siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
d0722149
DE
4922{
4923 int done = 0;
4924
4925 if (the_low_target.siginfo_fixup != NULL)
4926 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4927
4928 /* If there was no callback, or the callback didn't do anything,
4929 then just do a straight memcpy. */
4930 if (!done)
4931 {
4932 if (direction == 1)
a5362b9a 4933 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 4934 else
a5362b9a 4935 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
4936 }
4937}
4938
4aa995e1
PA
4939static int
4940linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4941 unsigned const char *writebuf, CORE_ADDR offset, int len)
4942{
d0722149 4943 int pid;
a5362b9a
TS
4944 siginfo_t siginfo;
4945 char inf_siginfo[sizeof (siginfo_t)];
4aa995e1 4946
0bfdf32f 4947 if (current_thread == NULL)
4aa995e1
PA
4948 return -1;
4949
0bfdf32f 4950 pid = lwpid_of (current_thread);
4aa995e1
PA
4951
4952 if (debug_threads)
87ce2a04
DE
4953 debug_printf ("%s siginfo for lwp %d.\n",
4954 readbuf != NULL ? "Reading" : "Writing",
4955 pid);
4aa995e1 4956
0adea5f7 4957 if (offset >= sizeof (siginfo))
4aa995e1
PA
4958 return -1;
4959
b8e1b30e 4960 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
4961 return -1;
4962
d0722149
DE
4963 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4964 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4965 inferior with a 64-bit GDBSERVER should look the same as debugging it
4966 with a 32-bit GDBSERVER, we need to convert it. */
4967 siginfo_fixup (&siginfo, inf_siginfo, 0);
4968
4aa995e1
PA
4969 if (offset + len > sizeof (siginfo))
4970 len = sizeof (siginfo) - offset;
4971
4972 if (readbuf != NULL)
d0722149 4973 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4974 else
4975 {
d0722149
DE
4976 memcpy (inf_siginfo + offset, writebuf, len);
4977
4978 /* Convert back to ptrace layout before flushing it out. */
4979 siginfo_fixup (&siginfo, inf_siginfo, 1);
4980
b8e1b30e 4981 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
4982 return -1;
4983 }
4984
4985 return len;
4986}
4987
bd99dc85
PA
4988/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4989 so we notice when children change state; as the handler for the
4990 sigsuspend in my_waitpid. */
4991
4992static void
4993sigchld_handler (int signo)
4994{
4995 int old_errno = errno;
4996
4997 if (debug_threads)
e581f2b4
PA
4998 {
4999 do
5000 {
5001 /* fprintf is not async-signal-safe, so call write
5002 directly. */
5003 if (write (2, "sigchld_handler\n",
5004 sizeof ("sigchld_handler\n") - 1) < 0)
5005 break; /* just ignore */
5006 } while (0);
5007 }
bd99dc85
PA
5008
5009 if (target_is_async_p ())
5010 async_file_mark (); /* trigger a linux_wait */
5011
5012 errno = old_errno;
5013}
5014
5015static int
5016linux_supports_non_stop (void)
5017{
5018 return 1;
5019}
5020
5021static int
5022linux_async (int enable)
5023{
7089dca4 5024 int previous = target_is_async_p ();
bd99dc85 5025
8336d594 5026 if (debug_threads)
87ce2a04
DE
5027 debug_printf ("linux_async (%d), previous=%d\n",
5028 enable, previous);
8336d594 5029
bd99dc85
PA
5030 if (previous != enable)
5031 {
5032 sigset_t mask;
5033 sigemptyset (&mask);
5034 sigaddset (&mask, SIGCHLD);
5035
5036 sigprocmask (SIG_BLOCK, &mask, NULL);
5037
5038 if (enable)
5039 {
5040 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
5041 {
5042 linux_event_pipe[0] = -1;
5043 linux_event_pipe[1] = -1;
5044 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5045
5046 warning ("creating event pipe failed.");
5047 return previous;
5048 }
bd99dc85
PA
5049
5050 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5051 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5052
5053 /* Register the event loop handler. */
5054 add_file_handler (linux_event_pipe[0],
5055 handle_target_event, NULL);
5056
5057 /* Always trigger a linux_wait. */
5058 async_file_mark ();
5059 }
5060 else
5061 {
5062 delete_file_handler (linux_event_pipe[0]);
5063
5064 close (linux_event_pipe[0]);
5065 close (linux_event_pipe[1]);
5066 linux_event_pipe[0] = -1;
5067 linux_event_pipe[1] = -1;
5068 }
5069
5070 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5071 }
5072
5073 return previous;
5074}
5075
5076static int
5077linux_start_non_stop (int nonstop)
5078{
5079 /* Register or unregister from event-loop accordingly. */
5080 linux_async (nonstop);
aa96c426
GB
5081
5082 if (target_is_async_p () != (nonstop != 0))
5083 return -1;
5084
bd99dc85
PA
5085 return 0;
5086}
5087
cf8fd78b
PA
5088static int
5089linux_supports_multi_process (void)
5090{
5091 return 1;
5092}
5093
03583c20
UW
5094static int
5095linux_supports_disable_randomization (void)
5096{
5097#ifdef HAVE_PERSONALITY
5098 return 1;
5099#else
5100 return 0;
5101#endif
5102}
efcbbd14 5103
d1feda86
YQ
5104static int
5105linux_supports_agent (void)
5106{
5107 return 1;
5108}
5109
c2d6af84
PA
5110static int
5111linux_supports_range_stepping (void)
5112{
5113 if (*the_low_target.supports_range_stepping == NULL)
5114 return 0;
5115
5116 return (*the_low_target.supports_range_stepping) ();
5117}
5118
efcbbd14
UW
5119/* Enumerate spufs IDs for process PID. */
5120static int
5121spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5122{
5123 int pos = 0;
5124 int written = 0;
5125 char path[128];
5126 DIR *dir;
5127 struct dirent *entry;
5128
5129 sprintf (path, "/proc/%ld/fd", pid);
5130 dir = opendir (path);
5131 if (!dir)
5132 return -1;
5133
5134 rewinddir (dir);
5135 while ((entry = readdir (dir)) != NULL)
5136 {
5137 struct stat st;
5138 struct statfs stfs;
5139 int fd;
5140
5141 fd = atoi (entry->d_name);
5142 if (!fd)
5143 continue;
5144
5145 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5146 if (stat (path, &st) != 0)
5147 continue;
5148 if (!S_ISDIR (st.st_mode))
5149 continue;
5150
5151 if (statfs (path, &stfs) != 0)
5152 continue;
5153 if (stfs.f_type != SPUFS_MAGIC)
5154 continue;
5155
5156 if (pos >= offset && pos + 4 <= offset + len)
5157 {
5158 *(unsigned int *)(buf + pos - offset) = fd;
5159 written += 4;
5160 }
5161 pos += 4;
5162 }
5163
5164 closedir (dir);
5165 return written;
5166}
5167
5168/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5169 object type, using the /proc file system. */
5170static int
5171linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5172 unsigned const char *writebuf,
5173 CORE_ADDR offset, int len)
5174{
0bfdf32f 5175 long pid = lwpid_of (current_thread);
efcbbd14
UW
5176 char buf[128];
5177 int fd = 0;
5178 int ret = 0;
5179
5180 if (!writebuf && !readbuf)
5181 return -1;
5182
5183 if (!*annex)
5184 {
5185 if (!readbuf)
5186 return -1;
5187 else
5188 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5189 }
5190
5191 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5192 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5193 if (fd <= 0)
5194 return -1;
5195
5196 if (offset != 0
5197 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5198 {
5199 close (fd);
5200 return 0;
5201 }
5202
5203 if (writebuf)
5204 ret = write (fd, writebuf, (size_t) len);
5205 else
5206 ret = read (fd, readbuf, (size_t) len);
5207
5208 close (fd);
5209 return ret;
5210}
5211
723b724b 5212#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
5213struct target_loadseg
5214{
5215 /* Core address to which the segment is mapped. */
5216 Elf32_Addr addr;
5217 /* VMA recorded in the program header. */
5218 Elf32_Addr p_vaddr;
5219 /* Size of this segment in memory. */
5220 Elf32_Word p_memsz;
5221};
5222
723b724b 5223# if defined PT_GETDSBT
78d85199
YQ
5224struct target_loadmap
5225{
5226 /* Protocol version number, must be zero. */
5227 Elf32_Word version;
5228 /* Pointer to the DSBT table, its size, and the DSBT index. */
5229 unsigned *dsbt_table;
5230 unsigned dsbt_size, dsbt_index;
5231 /* Number of segments in this map. */
5232 Elf32_Word nsegs;
5233 /* The actual memory map. */
5234 struct target_loadseg segs[/*nsegs*/];
5235};
723b724b
MF
5236# define LINUX_LOADMAP PT_GETDSBT
5237# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5238# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5239# else
5240struct target_loadmap
5241{
5242 /* Protocol version number, must be zero. */
5243 Elf32_Half version;
5244 /* Number of segments in this map. */
5245 Elf32_Half nsegs;
5246 /* The actual memory map. */
5247 struct target_loadseg segs[/*nsegs*/];
5248};
5249# define LINUX_LOADMAP PTRACE_GETFDPIC
5250# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5251# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5252# endif
78d85199 5253
78d85199
YQ
5254static int
5255linux_read_loadmap (const char *annex, CORE_ADDR offset,
5256 unsigned char *myaddr, unsigned int len)
5257{
0bfdf32f 5258 int pid = lwpid_of (current_thread);
78d85199
YQ
5259 int addr = -1;
5260 struct target_loadmap *data = NULL;
5261 unsigned int actual_length, copy_length;
5262
5263 if (strcmp (annex, "exec") == 0)
723b724b 5264 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 5265 else if (strcmp (annex, "interp") == 0)
723b724b 5266 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
5267 else
5268 return -1;
5269
723b724b 5270 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
5271 return -1;
5272
5273 if (data == NULL)
5274 return -1;
5275
5276 actual_length = sizeof (struct target_loadmap)
5277 + sizeof (struct target_loadseg) * data->nsegs;
5278
5279 if (offset < 0 || offset > actual_length)
5280 return -1;
5281
5282 copy_length = actual_length - offset < len ? actual_length - offset : len;
5283 memcpy (myaddr, (char *) data + offset, copy_length);
5284 return copy_length;
5285}
723b724b
MF
5286#else
5287# define linux_read_loadmap NULL
5288#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 5289
1570b33e
L
5290static void
5291linux_process_qsupported (const char *query)
5292{
5293 if (the_low_target.process_qsupported != NULL)
5294 the_low_target.process_qsupported (query);
5295}
5296
219f2f23
PA
5297static int
5298linux_supports_tracepoints (void)
5299{
5300 if (*the_low_target.supports_tracepoints == NULL)
5301 return 0;
5302
5303 return (*the_low_target.supports_tracepoints) ();
5304}
5305
5306static CORE_ADDR
5307linux_read_pc (struct regcache *regcache)
5308{
5309 if (the_low_target.get_pc == NULL)
5310 return 0;
5311
5312 return (*the_low_target.get_pc) (regcache);
5313}
5314
5315static void
5316linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5317{
5318 gdb_assert (the_low_target.set_pc != NULL);
5319
5320 (*the_low_target.set_pc) (regcache, pc);
5321}
5322
8336d594
PA
5323static int
5324linux_thread_stopped (struct thread_info *thread)
5325{
5326 return get_thread_lwp (thread)->stopped;
5327}
5328
5329/* This exposes stop-all-threads functionality to other modules. */
5330
5331static void
7984d532 5332linux_pause_all (int freeze)
8336d594 5333{
7984d532
PA
5334 stop_all_lwps (freeze, NULL);
5335}
5336
5337/* This exposes unstop-all-threads functionality to other gdbserver
5338 modules. */
5339
5340static void
5341linux_unpause_all (int unfreeze)
5342{
5343 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
5344}
5345
90d74c30
PA
5346static int
5347linux_prepare_to_access_memory (void)
5348{
5349 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5350 running LWP. */
5351 if (non_stop)
5352 linux_pause_all (1);
5353 return 0;
5354}
5355
5356static void
0146f85b 5357linux_done_accessing_memory (void)
90d74c30
PA
5358{
5359 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5360 running LWP. */
5361 if (non_stop)
5362 linux_unpause_all (1);
5363}
5364
fa593d66
PA
5365static int
5366linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5367 CORE_ADDR collector,
5368 CORE_ADDR lockaddr,
5369 ULONGEST orig_size,
5370 CORE_ADDR *jump_entry,
405f8e94
SS
5371 CORE_ADDR *trampoline,
5372 ULONGEST *trampoline_size,
fa593d66
PA
5373 unsigned char *jjump_pad_insn,
5374 ULONGEST *jjump_pad_insn_size,
5375 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
5376 CORE_ADDR *adjusted_insn_addr_end,
5377 char *err)
fa593d66
PA
5378{
5379 return (*the_low_target.install_fast_tracepoint_jump_pad)
5380 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
5381 jump_entry, trampoline, trampoline_size,
5382 jjump_pad_insn, jjump_pad_insn_size,
5383 adjusted_insn_addr, adjusted_insn_addr_end,
5384 err);
fa593d66
PA
5385}
5386
6a271cae
PA
5387static struct emit_ops *
5388linux_emit_ops (void)
5389{
5390 if (the_low_target.emit_ops != NULL)
5391 return (*the_low_target.emit_ops) ();
5392 else
5393 return NULL;
5394}
5395
405f8e94
SS
5396static int
5397linux_get_min_fast_tracepoint_insn_len (void)
5398{
5399 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5400}
5401
2268b414
JK
5402/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5403
5404static int
5405get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5406 CORE_ADDR *phdr_memaddr, int *num_phdr)
5407{
5408 char filename[PATH_MAX];
5409 int fd;
5410 const int auxv_size = is_elf64
5411 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5412 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5413
5414 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5415
5416 fd = open (filename, O_RDONLY);
5417 if (fd < 0)
5418 return 1;
5419
5420 *phdr_memaddr = 0;
5421 *num_phdr = 0;
5422 while (read (fd, buf, auxv_size) == auxv_size
5423 && (*phdr_memaddr == 0 || *num_phdr == 0))
5424 {
5425 if (is_elf64)
5426 {
5427 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5428
5429 switch (aux->a_type)
5430 {
5431 case AT_PHDR:
5432 *phdr_memaddr = aux->a_un.a_val;
5433 break;
5434 case AT_PHNUM:
5435 *num_phdr = aux->a_un.a_val;
5436 break;
5437 }
5438 }
5439 else
5440 {
5441 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5442
5443 switch (aux->a_type)
5444 {
5445 case AT_PHDR:
5446 *phdr_memaddr = aux->a_un.a_val;
5447 break;
5448 case AT_PHNUM:
5449 *num_phdr = aux->a_un.a_val;
5450 break;
5451 }
5452 }
5453 }
5454
5455 close (fd);
5456
5457 if (*phdr_memaddr == 0 || *num_phdr == 0)
5458 {
5459 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5460 "phdr_memaddr = %ld, phdr_num = %d",
5461 (long) *phdr_memaddr, *num_phdr);
5462 return 2;
5463 }
5464
5465 return 0;
5466}
5467
5468/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5469
5470static CORE_ADDR
5471get_dynamic (const int pid, const int is_elf64)
5472{
5473 CORE_ADDR phdr_memaddr, relocation;
5474 int num_phdr, i;
5475 unsigned char *phdr_buf;
5476 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5477
5478 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5479 return 0;
5480
5481 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5482 phdr_buf = alloca (num_phdr * phdr_size);
5483
5484 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5485 return 0;
5486
5487 /* Compute relocation: it is expected to be 0 for "regular" executables,
5488 non-zero for PIE ones. */
5489 relocation = -1;
5490 for (i = 0; relocation == -1 && i < num_phdr; i++)
5491 if (is_elf64)
5492 {
5493 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5494
5495 if (p->p_type == PT_PHDR)
5496 relocation = phdr_memaddr - p->p_vaddr;
5497 }
5498 else
5499 {
5500 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5501
5502 if (p->p_type == PT_PHDR)
5503 relocation = phdr_memaddr - p->p_vaddr;
5504 }
5505
5506 if (relocation == -1)
5507 {
e237a7e2
JK
5508 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5509 any real world executables, including PIE executables, have always
5510 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5511 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5512 or present DT_DEBUG anyway (fpc binaries are statically linked).
5513
5514 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5515
5516 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5517
2268b414
JK
5518 return 0;
5519 }
5520
5521 for (i = 0; i < num_phdr; i++)
5522 {
5523 if (is_elf64)
5524 {
5525 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5526
5527 if (p->p_type == PT_DYNAMIC)
5528 return p->p_vaddr + relocation;
5529 }
5530 else
5531 {
5532 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5533
5534 if (p->p_type == PT_DYNAMIC)
5535 return p->p_vaddr + relocation;
5536 }
5537 }
5538
5539 return 0;
5540}
5541
5542/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
5543 can be 0 if the inferior does not yet have the library list initialized.
5544 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5545 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
5546
5547static CORE_ADDR
5548get_r_debug (const int pid, const int is_elf64)
5549{
5550 CORE_ADDR dynamic_memaddr;
5551 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5552 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 5553 CORE_ADDR map = -1;
2268b414
JK
5554
5555 dynamic_memaddr = get_dynamic (pid, is_elf64);
5556 if (dynamic_memaddr == 0)
367ba2c2 5557 return map;
2268b414
JK
5558
5559 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5560 {
5561 if (is_elf64)
5562 {
5563 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
75f62ce7 5564#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
5565 union
5566 {
5567 Elf64_Xword map;
5568 unsigned char buf[sizeof (Elf64_Xword)];
5569 }
5570 rld_map;
5571
5572 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5573 {
5574 if (linux_read_memory (dyn->d_un.d_val,
5575 rld_map.buf, sizeof (rld_map.buf)) == 0)
5576 return rld_map.map;
5577 else
5578 break;
5579 }
75f62ce7 5580#endif /* DT_MIPS_RLD_MAP */
2268b414 5581
367ba2c2
MR
5582 if (dyn->d_tag == DT_DEBUG && map == -1)
5583 map = dyn->d_un.d_val;
2268b414
JK
5584
5585 if (dyn->d_tag == DT_NULL)
5586 break;
5587 }
5588 else
5589 {
5590 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
75f62ce7 5591#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
5592 union
5593 {
5594 Elf32_Word map;
5595 unsigned char buf[sizeof (Elf32_Word)];
5596 }
5597 rld_map;
5598
5599 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5600 {
5601 if (linux_read_memory (dyn->d_un.d_val,
5602 rld_map.buf, sizeof (rld_map.buf)) == 0)
5603 return rld_map.map;
5604 else
5605 break;
5606 }
75f62ce7 5607#endif /* DT_MIPS_RLD_MAP */
2268b414 5608
367ba2c2
MR
5609 if (dyn->d_tag == DT_DEBUG && map == -1)
5610 map = dyn->d_un.d_val;
2268b414
JK
5611
5612 if (dyn->d_tag == DT_NULL)
5613 break;
5614 }
5615
5616 dynamic_memaddr += dyn_size;
5617 }
5618
367ba2c2 5619 return map;
2268b414
JK
5620}
5621
5622/* Read one pointer from MEMADDR in the inferior. */
5623
5624static int
5625read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5626{
485f1ee4
PA
5627 int ret;
5628
5629 /* Go through a union so this works on either big or little endian
5630 hosts, when the inferior's pointer size is smaller than the size
5631 of CORE_ADDR. It is assumed the inferior's endianness is the
5632 same of the superior's. */
5633 union
5634 {
5635 CORE_ADDR core_addr;
5636 unsigned int ui;
5637 unsigned char uc;
5638 } addr;
5639
5640 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5641 if (ret == 0)
5642 {
5643 if (ptr_size == sizeof (CORE_ADDR))
5644 *ptr = addr.core_addr;
5645 else if (ptr_size == sizeof (unsigned int))
5646 *ptr = addr.ui;
5647 else
5648 gdb_assert_not_reached ("unhandled pointer size");
5649 }
5650 return ret;
2268b414
JK
5651}
5652
5653struct link_map_offsets
5654 {
5655 /* Offset and size of r_debug.r_version. */
5656 int r_version_offset;
5657
5658 /* Offset and size of r_debug.r_map. */
5659 int r_map_offset;
5660
5661 /* Offset to l_addr field in struct link_map. */
5662 int l_addr_offset;
5663
5664 /* Offset to l_name field in struct link_map. */
5665 int l_name_offset;
5666
5667 /* Offset to l_ld field in struct link_map. */
5668 int l_ld_offset;
5669
5670 /* Offset to l_next field in struct link_map. */
5671 int l_next_offset;
5672
5673 /* Offset to l_prev field in struct link_map. */
5674 int l_prev_offset;
5675 };
5676
fb723180 5677/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
5678
5679static int
5680linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5681 unsigned const char *writebuf,
5682 CORE_ADDR offset, int len)
5683{
5684 char *document;
5685 unsigned document_len;
5686 struct process_info_private *const priv = current_process ()->private;
5687 char filename[PATH_MAX];
5688 int pid, is_elf64;
5689
5690 static const struct link_map_offsets lmo_32bit_offsets =
5691 {
5692 0, /* r_version offset. */
5693 4, /* r_debug.r_map offset. */
5694 0, /* l_addr offset in link_map. */
5695 4, /* l_name offset in link_map. */
5696 8, /* l_ld offset in link_map. */
5697 12, /* l_next offset in link_map. */
5698 16 /* l_prev offset in link_map. */
5699 };
5700
5701 static const struct link_map_offsets lmo_64bit_offsets =
5702 {
5703 0, /* r_version offset. */
5704 8, /* r_debug.r_map offset. */
5705 0, /* l_addr offset in link_map. */
5706 8, /* l_name offset in link_map. */
5707 16, /* l_ld offset in link_map. */
5708 24, /* l_next offset in link_map. */
5709 32 /* l_prev offset in link_map. */
5710 };
5711 const struct link_map_offsets *lmo;
214d508e 5712 unsigned int machine;
b1fbec62
GB
5713 int ptr_size;
5714 CORE_ADDR lm_addr = 0, lm_prev = 0;
5715 int allocated = 1024;
5716 char *p;
5717 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5718 int header_done = 0;
2268b414
JK
5719
5720 if (writebuf != NULL)
5721 return -2;
5722 if (readbuf == NULL)
5723 return -1;
5724
0bfdf32f 5725 pid = lwpid_of (current_thread);
2268b414 5726 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 5727 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 5728 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 5729 ptr_size = is_elf64 ? 8 : 4;
2268b414 5730
b1fbec62
GB
5731 while (annex[0] != '\0')
5732 {
5733 const char *sep;
5734 CORE_ADDR *addrp;
5735 int len;
2268b414 5736
b1fbec62
GB
5737 sep = strchr (annex, '=');
5738 if (sep == NULL)
5739 break;
0c5bf5a9 5740
b1fbec62
GB
5741 len = sep - annex;
5742 if (len == 5 && strncmp (annex, "start", 5) == 0)
5743 addrp = &lm_addr;
5744 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5745 addrp = &lm_prev;
5746 else
5747 {
5748 annex = strchr (sep, ';');
5749 if (annex == NULL)
5750 break;
5751 annex++;
5752 continue;
5753 }
5754
5755 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 5756 }
b1fbec62
GB
5757
5758 if (lm_addr == 0)
2268b414 5759 {
b1fbec62
GB
5760 int r_version = 0;
5761
5762 if (priv->r_debug == 0)
5763 priv->r_debug = get_r_debug (pid, is_elf64);
5764
5765 /* We failed to find DT_DEBUG. Such situation will not change
5766 for this inferior - do not retry it. Report it to GDB as
5767 E01, see for the reasons at the GDB solib-svr4.c side. */
5768 if (priv->r_debug == (CORE_ADDR) -1)
5769 return -1;
5770
5771 if (priv->r_debug != 0)
2268b414 5772 {
b1fbec62
GB
5773 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5774 (unsigned char *) &r_version,
5775 sizeof (r_version)) != 0
5776 || r_version != 1)
5777 {
5778 warning ("unexpected r_debug version %d", r_version);
5779 }
5780 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5781 &lm_addr, ptr_size) != 0)
5782 {
5783 warning ("unable to read r_map from 0x%lx",
5784 (long) priv->r_debug + lmo->r_map_offset);
5785 }
2268b414 5786 }
b1fbec62 5787 }
2268b414 5788
b1fbec62
GB
5789 document = xmalloc (allocated);
5790 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5791 p = document + strlen (document);
5792
5793 while (lm_addr
5794 && read_one_ptr (lm_addr + lmo->l_name_offset,
5795 &l_name, ptr_size) == 0
5796 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5797 &l_addr, ptr_size) == 0
5798 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5799 &l_ld, ptr_size) == 0
5800 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5801 &l_prev, ptr_size) == 0
5802 && read_one_ptr (lm_addr + lmo->l_next_offset,
5803 &l_next, ptr_size) == 0)
5804 {
5805 unsigned char libname[PATH_MAX];
5806
5807 if (lm_prev != l_prev)
2268b414 5808 {
b1fbec62
GB
5809 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5810 (long) lm_prev, (long) l_prev);
5811 break;
2268b414
JK
5812 }
5813
d878444c
JK
5814 /* Ignore the first entry even if it has valid name as the first entry
5815 corresponds to the main executable. The first entry should not be
5816 skipped if the dynamic loader was loaded late by a static executable
5817 (see solib-svr4.c parameter ignore_first). But in such case the main
5818 executable does not have PT_DYNAMIC present and this function already
5819 exited above due to failed get_r_debug. */
5820 if (lm_prev == 0)
2268b414 5821 {
d878444c
JK
5822 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5823 p = p + strlen (p);
5824 }
5825 else
5826 {
5827 /* Not checking for error because reading may stop before
5828 we've got PATH_MAX worth of characters. */
5829 libname[0] = '\0';
5830 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5831 libname[sizeof (libname) - 1] = '\0';
5832 if (libname[0] != '\0')
2268b414 5833 {
d878444c
JK
5834 /* 6x the size for xml_escape_text below. */
5835 size_t len = 6 * strlen ((char *) libname);
5836 char *name;
2268b414 5837
d878444c
JK
5838 if (!header_done)
5839 {
5840 /* Terminate `<library-list-svr4'. */
5841 *p++ = '>';
5842 header_done = 1;
5843 }
2268b414 5844
d878444c
JK
5845 while (allocated < p - document + len + 200)
5846 {
5847 /* Expand to guarantee sufficient storage. */
5848 uintptr_t document_len = p - document;
2268b414 5849
d878444c
JK
5850 document = xrealloc (document, 2 * allocated);
5851 allocated *= 2;
5852 p = document + document_len;
5853 }
5854
5855 name = xml_escape_text ((char *) libname);
5856 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5857 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5858 name, (unsigned long) lm_addr,
5859 (unsigned long) l_addr, (unsigned long) l_ld);
5860 free (name);
5861 }
0afae3cf 5862 }
b1fbec62
GB
5863
5864 lm_prev = lm_addr;
5865 lm_addr = l_next;
2268b414
JK
5866 }
5867
b1fbec62
GB
5868 if (!header_done)
5869 {
5870 /* Empty list; terminate `<library-list-svr4'. */
5871 strcpy (p, "/>");
5872 }
5873 else
5874 strcpy (p, "</library-list-svr4>");
5875
2268b414
JK
5876 document_len = strlen (document);
5877 if (offset < document_len)
5878 document_len -= offset;
5879 else
5880 document_len = 0;
5881 if (len > document_len)
5882 len = document_len;
5883
5884 memcpy (readbuf, document + offset, len);
5885 xfree (document);
5886
5887 return len;
5888}
5889
9accd112
MM
5890#ifdef HAVE_LINUX_BTRACE
5891
969c39fb 5892/* See to_enable_btrace target method. */
9accd112
MM
5893
5894static struct btrace_target_info *
5895linux_low_enable_btrace (ptid_t ptid)
5896{
5897 struct btrace_target_info *tinfo;
5898
5899 tinfo = linux_enable_btrace (ptid);
3aee8918 5900
9accd112 5901 if (tinfo != NULL)
3aee8918
PA
5902 {
5903 struct thread_info *thread = find_thread_ptid (ptid);
5904 struct regcache *regcache = get_thread_regcache (thread, 0);
5905
5906 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5907 }
9accd112
MM
5908
5909 return tinfo;
5910}
5911
969c39fb 5912/* See to_disable_btrace target method. */
9accd112 5913
969c39fb
MM
5914static int
5915linux_low_disable_btrace (struct btrace_target_info *tinfo)
5916{
5917 enum btrace_error err;
5918
5919 err = linux_disable_btrace (tinfo);
5920 return (err == BTRACE_ERR_NONE ? 0 : -1);
5921}
5922
5923/* See to_read_btrace target method. */
5924
5925static int
9accd112
MM
5926linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5927 int type)
5928{
5929 VEC (btrace_block_s) *btrace;
5930 struct btrace_block *block;
969c39fb 5931 enum btrace_error err;
9accd112
MM
5932 int i;
5933
969c39fb
MM
5934 btrace = NULL;
5935 err = linux_read_btrace (&btrace, tinfo, type);
5936 if (err != BTRACE_ERR_NONE)
5937 {
5938 if (err == BTRACE_ERR_OVERFLOW)
5939 buffer_grow_str0 (buffer, "E.Overflow.");
5940 else
5941 buffer_grow_str0 (buffer, "E.Generic Error.");
5942
5943 return -1;
5944 }
9accd112
MM
5945
5946 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5947 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5948
5949 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
5950 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5951 paddress (block->begin), paddress (block->end));
5952
969c39fb 5953 buffer_grow_str0 (buffer, "</btrace>\n");
9accd112
MM
5954
5955 VEC_free (btrace_block_s, btrace);
969c39fb
MM
5956
5957 return 0;
9accd112
MM
5958}
5959#endif /* HAVE_LINUX_BTRACE */
5960
ce3a066d
DJ
5961static struct target_ops linux_target_ops = {
5962 linux_create_inferior,
5963 linux_attach,
5964 linux_kill,
6ad8ae5c 5965 linux_detach,
8336d594 5966 linux_mourn,
444d6139 5967 linux_join,
ce3a066d
DJ
5968 linux_thread_alive,
5969 linux_resume,
5970 linux_wait,
5971 linux_fetch_registers,
5972 linux_store_registers,
90d74c30 5973 linux_prepare_to_access_memory,
0146f85b 5974 linux_done_accessing_memory,
ce3a066d
DJ
5975 linux_read_memory,
5976 linux_write_memory,
2f2893d9 5977 linux_look_up_symbols,
ef57601b 5978 linux_request_interrupt,
aa691b87 5979 linux_read_auxv,
802e8e6d 5980 linux_supports_z_point_type,
d993e290
PA
5981 linux_insert_point,
5982 linux_remove_point,
e013ee27
OF
5983 linux_stopped_by_watchpoint,
5984 linux_stopped_data_address,
db0dfaa0
LM
5985#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5986 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5987 && defined(PT_TEXT_END_ADDR)
52fb6437 5988 linux_read_offsets,
dae5f5cf
DJ
5989#else
5990 NULL,
5991#endif
5992#ifdef USE_THREAD_DB
5993 thread_db_get_tls_address,
5994#else
5995 NULL,
52fb6437 5996#endif
efcbbd14 5997 linux_qxfer_spu,
59a016f0 5998 hostio_last_error_from_errno,
07e059b5 5999 linux_qxfer_osdata,
4aa995e1 6000 linux_xfer_siginfo,
bd99dc85
PA
6001 linux_supports_non_stop,
6002 linux_async,
6003 linux_start_non_stop,
cdbfd419
PP
6004 linux_supports_multi_process,
6005#ifdef USE_THREAD_DB
dc146f7c 6006 thread_db_handle_monitor_command,
cdbfd419 6007#else
dc146f7c 6008 NULL,
cdbfd419 6009#endif
d26e3629 6010 linux_common_core_of_thread,
78d85199 6011 linux_read_loadmap,
219f2f23
PA
6012 linux_process_qsupported,
6013 linux_supports_tracepoints,
6014 linux_read_pc,
8336d594
PA
6015 linux_write_pc,
6016 linux_thread_stopped,
7984d532 6017 NULL,
711e434b 6018 linux_pause_all,
7984d532 6019 linux_unpause_all,
fa593d66
PA
6020 linux_cancel_breakpoints,
6021 linux_stabilize_threads,
6a271cae 6022 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
6023 linux_emit_ops,
6024 linux_supports_disable_randomization,
405f8e94 6025 linux_get_min_fast_tracepoint_insn_len,
2268b414 6026 linux_qxfer_libraries_svr4,
d1feda86 6027 linux_supports_agent,
9accd112
MM
6028#ifdef HAVE_LINUX_BTRACE
6029 linux_supports_btrace,
6030 linux_low_enable_btrace,
969c39fb 6031 linux_low_disable_btrace,
9accd112
MM
6032 linux_low_read_btrace,
6033#else
6034 NULL,
6035 NULL,
6036 NULL,
6037 NULL,
9accd112 6038#endif
c2d6af84 6039 linux_supports_range_stepping,
ce3a066d
DJ
6040};
6041
0d62e5e8
DJ
6042static void
6043linux_init_signals ()
6044{
6045 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6046 to find what the cancel signal actually is. */
1a981360 6047#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 6048 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 6049#endif
0d62e5e8
DJ
6050}
6051
3aee8918
PA
6052#ifdef HAVE_LINUX_REGSETS
6053void
6054initialize_regsets_info (struct regsets_info *info)
6055{
6056 for (info->num_regsets = 0;
6057 info->regsets[info->num_regsets].size >= 0;
6058 info->num_regsets++)
6059 ;
3aee8918
PA
6060}
6061#endif
6062
da6d8c04
DJ
6063void
6064initialize_low (void)
6065{
bd99dc85
PA
6066 struct sigaction sigchld_action;
6067 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 6068 set_target_ops (&linux_target_ops);
611cb4a5
DJ
6069 set_breakpoint_data (the_low_target.breakpoint,
6070 the_low_target.breakpoint_len);
0d62e5e8 6071 linux_init_signals ();
aa7c7447 6072 linux_ptrace_init_warnings ();
bd99dc85
PA
6073
6074 sigchld_action.sa_handler = sigchld_handler;
6075 sigemptyset (&sigchld_action.sa_mask);
6076 sigchld_action.sa_flags = SA_RESTART;
6077 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
6078
6079 initialize_low_arch ();
da6d8c04 6080}