]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
Automatic date update in version.in
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
32d0add0 2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
58b4daa5 22#include "agent.h"
de0d863e 23#include "tdesc.h"
b20a6524 24#include "rsp-low.h"
da6d8c04 25
96d7229d
LM
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
8bdce1ff 28#include "gdb_wait.h"
da6d8c04 29#include <sys/ptrace.h>
125f8a3d
GB
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
8cc73a39 32#include "nat/linux-personality.h"
da6d8c04
DJ
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
0a30fbc4 36#include <unistd.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
602e3198 46#include "filestuff.h"
c144c7a0 47#include "tracepoint.h"
533b0600 48#include "hostio.h"
957f3f49
DE
49#ifndef ELFMAG0
50/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54#include <elf.h>
55#endif
14d2069a 56#include "nat/linux-namespaces.h"
efcbbd14
UW
57
58#ifndef SPUFS_MAGIC
59#define SPUFS_MAGIC 0x23c9b64e
60#endif
da6d8c04 61
03583c20
UW
62#ifdef HAVE_PERSONALITY
63# include <sys/personality.h>
64# if !HAVE_DECL_ADDR_NO_RANDOMIZE
65# define ADDR_NO_RANDOMIZE 0x0040000
66# endif
67#endif
68
fd462a61
DJ
69#ifndef O_LARGEFILE
70#define O_LARGEFILE 0
71#endif
72
ec8ebe72
DE
73#ifndef W_STOPCODE
74#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75#endif
76
1a981360
PA
77/* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79#ifndef __SIGRTMIN
80#define __SIGRTMIN 32
81#endif
82
db0dfaa0
LM
83/* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86#if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89#if defined(__mcoldfire__)
90/* These are still undefined in 3.10 kernels. */
91#define PT_TEXT_ADDR 49*4
92#define PT_DATA_ADDR 50*4
93#define PT_TEXT_END_ADDR 51*4
94/* BFIN already defines these since at least 2.6.32 kernels. */
95#elif defined(BFIN)
96#define PT_TEXT_ADDR 220
97#define PT_TEXT_END_ADDR 224
98#define PT_DATA_ADDR 228
99/* These are still undefined in 3.10 kernels. */
100#elif defined(__TMS320C6X__)
101#define PT_TEXT_ADDR (0x10000*4)
102#define PT_DATA_ADDR (0x10004*4)
103#define PT_TEXT_END_ADDR (0x10008*4)
104#endif
105#endif
106
9accd112 107#ifdef HAVE_LINUX_BTRACE
125f8a3d 108# include "nat/linux-btrace.h"
734b0e4b 109# include "btrace-common.h"
9accd112
MM
110#endif
111
8365dcf5
TJB
112#ifndef HAVE_ELF32_AUXV_T
113/* Copied from glibc's elf.h. */
114typedef struct
115{
116 uint32_t a_type; /* Entry type */
117 union
118 {
119 uint32_t a_val; /* Integer value */
120 /* We use to have pointer elements added here. We cannot do that,
121 though, since it does not work when using 32-bit definitions
122 on 64-bit platforms and vice versa. */
123 } a_un;
124} Elf32_auxv_t;
125#endif
126
127#ifndef HAVE_ELF64_AUXV_T
128/* Copied from glibc's elf.h. */
129typedef struct
130{
131 uint64_t a_type; /* Entry type */
132 union
133 {
134 uint64_t a_val; /* Integer value */
135 /* We use to have pointer elements added here. We cannot do that,
136 though, since it does not work when using 32-bit definitions
137 on 64-bit platforms and vice versa. */
138 } a_un;
139} Elf64_auxv_t;
140#endif
141
cff068da
GB
142/* LWP accessors. */
143
144/* See nat/linux-nat.h. */
145
146ptid_t
147ptid_of_lwp (struct lwp_info *lwp)
148{
149 return ptid_of (get_lwp_thread (lwp));
150}
151
152/* See nat/linux-nat.h. */
153
4b134ca1
GB
154void
155lwp_set_arch_private_info (struct lwp_info *lwp,
156 struct arch_lwp_info *info)
157{
158 lwp->arch_private = info;
159}
160
161/* See nat/linux-nat.h. */
162
163struct arch_lwp_info *
164lwp_arch_private_info (struct lwp_info *lwp)
165{
166 return lwp->arch_private;
167}
168
169/* See nat/linux-nat.h. */
170
cff068da
GB
171int
172lwp_is_stopped (struct lwp_info *lwp)
173{
174 return lwp->stopped;
175}
176
177/* See nat/linux-nat.h. */
178
179enum target_stop_reason
180lwp_stop_reason (struct lwp_info *lwp)
181{
182 return lwp->stop_reason;
183}
184
05044653
PA
185/* A list of all unknown processes which receive stop signals. Some
186 other process will presumably claim each of these as forked
187 children momentarily. */
24a09b5f 188
05044653
PA
189struct simple_pid_list
190{
191 /* The process ID. */
192 int pid;
193
194 /* The status as reported by waitpid. */
195 int status;
196
197 /* Next in chain. */
198 struct simple_pid_list *next;
199};
200struct simple_pid_list *stopped_pids;
201
202/* Trivial list manipulation functions to keep track of a list of new
203 stopped processes. */
204
205static void
206add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
207{
208 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
209
210 new_pid->pid = pid;
211 new_pid->status = status;
212 new_pid->next = *listp;
213 *listp = new_pid;
214}
215
216static int
217pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
218{
219 struct simple_pid_list **p;
220
221 for (p = listp; *p != NULL; p = &(*p)->next)
222 if ((*p)->pid == pid)
223 {
224 struct simple_pid_list *next = (*p)->next;
225
226 *statusp = (*p)->status;
227 xfree (*p);
228 *p = next;
229 return 1;
230 }
231 return 0;
232}
24a09b5f 233
bde24c0a
PA
234enum stopping_threads_kind
235 {
236 /* Not stopping threads presently. */
237 NOT_STOPPING_THREADS,
238
239 /* Stopping threads. */
240 STOPPING_THREADS,
241
242 /* Stopping and suspending threads. */
243 STOPPING_AND_SUSPENDING_THREADS
244 };
245
246/* This is set while stop_all_lwps is in effect. */
247enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
248
249/* FIXME make into a target method? */
24a09b5f 250int using_threads = 1;
24a09b5f 251
fa593d66
PA
252/* True if we're presently stabilizing threads (moving them out of
253 jump pads). */
254static int stabilizing_threads;
255
2acc282a 256static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 257 int step, int signal, siginfo_t *info);
2bd7c093 258static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
259static void stop_all_lwps (int suspend, struct lwp_info *except);
260static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
fa96cb38
PA
261static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
262 int *wstat, int options);
95954743 263static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
b3312d80 264static struct lwp_info *add_lwp (ptid_t ptid);
c35fafde 265static int linux_stopped_by_watchpoint (void);
95954743 266static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
d50171e4 267static void proceed_all_lwps (void);
d50171e4 268static int finish_step_over (struct lwp_info *lwp);
d50171e4
PA
269static int kill_lwp (unsigned long lwpid, int signo);
270
582511be
PA
271/* When the event-loop is doing a step-over, this points at the thread
272 being stepped. */
273ptid_t step_over_bkpt;
274
d50171e4
PA
275/* True if the low target can hardware single-step. Such targets
276 don't need a BREAKPOINT_REINSERT_ADDR callback. */
277
278static int
279can_hardware_single_step (void)
280{
281 return (the_low_target.breakpoint_reinsert_addr == NULL);
282}
283
284/* True if the low target supports memory breakpoints. If so, we'll
285 have a GET_PC implementation. */
286
287static int
288supports_breakpoints (void)
289{
290 return (the_low_target.get_pc != NULL);
291}
0d62e5e8 292
fa593d66
PA
293/* Returns true if this target can support fast tracepoints. This
294 does not mean that the in-process agent has been loaded in the
295 inferior. */
296
297static int
298supports_fast_tracepoints (void)
299{
300 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
301}
302
c2d6af84
PA
303/* True if LWP is stopped in its stepping range. */
304
305static int
306lwp_in_step_range (struct lwp_info *lwp)
307{
308 CORE_ADDR pc = lwp->stop_pc;
309
310 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
311}
312
0d62e5e8
DJ
313struct pending_signals
314{
315 int signal;
32ca6d61 316 siginfo_t info;
0d62e5e8
DJ
317 struct pending_signals *prev;
318};
611cb4a5 319
bd99dc85
PA
320/* The read/write ends of the pipe registered as waitable file in the
321 event loop. */
322static int linux_event_pipe[2] = { -1, -1 };
323
324/* True if we're currently in async mode. */
325#define target_is_async_p() (linux_event_pipe[0] != -1)
326
02fc4de7 327static void send_sigstop (struct lwp_info *lwp);
fa96cb38 328static void wait_for_sigstop (void);
bd99dc85 329
d0722149
DE
330/* Return non-zero if HEADER is a 64-bit ELF file. */
331
332static int
214d508e 333elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 334{
214d508e
L
335 if (header->e_ident[EI_MAG0] == ELFMAG0
336 && header->e_ident[EI_MAG1] == ELFMAG1
337 && header->e_ident[EI_MAG2] == ELFMAG2
338 && header->e_ident[EI_MAG3] == ELFMAG3)
339 {
340 *machine = header->e_machine;
341 return header->e_ident[EI_CLASS] == ELFCLASS64;
342
343 }
344 *machine = EM_NONE;
345 return -1;
d0722149
DE
346}
347
348/* Return non-zero if FILE is a 64-bit ELF file,
349 zero if the file is not a 64-bit ELF file,
350 and -1 if the file is not accessible or doesn't exist. */
351
be07f1a2 352static int
214d508e 353elf_64_file_p (const char *file, unsigned int *machine)
d0722149 354{
957f3f49 355 Elf64_Ehdr header;
d0722149
DE
356 int fd;
357
358 fd = open (file, O_RDONLY);
359 if (fd < 0)
360 return -1;
361
362 if (read (fd, &header, sizeof (header)) != sizeof (header))
363 {
364 close (fd);
365 return 0;
366 }
367 close (fd);
368
214d508e 369 return elf_64_header_p (&header, machine);
d0722149
DE
370}
371
be07f1a2
PA
372/* Accepts an integer PID; Returns true if the executable PID is
373 running is a 64-bit ELF file.. */
374
375int
214d508e 376linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 377{
d8d2a3ee 378 char file[PATH_MAX];
be07f1a2
PA
379
380 sprintf (file, "/proc/%d/exe", pid);
214d508e 381 return elf_64_file_p (file, machine);
be07f1a2
PA
382}
383
bd99dc85
PA
384static void
385delete_lwp (struct lwp_info *lwp)
386{
fa96cb38
PA
387 struct thread_info *thr = get_lwp_thread (lwp);
388
389 if (debug_threads)
390 debug_printf ("deleting %ld\n", lwpid_of (thr));
391
392 remove_thread (thr);
aa5ca48f 393 free (lwp->arch_private);
bd99dc85
PA
394 free (lwp);
395}
396
95954743
PA
397/* Add a process to the common process list, and set its private
398 data. */
399
400static struct process_info *
401linux_add_process (int pid, int attached)
402{
403 struct process_info *proc;
404
95954743 405 proc = add_process (pid, attached);
fe978cb0 406 proc->priv = xcalloc (1, sizeof (*proc->priv));
95954743 407
3aee8918 408 /* Set the arch when the first LWP stops. */
fe978cb0 409 proc->priv->new_inferior = 1;
3aee8918 410
aa5ca48f 411 if (the_low_target.new_process != NULL)
fe978cb0 412 proc->priv->arch_private = the_low_target.new_process ();
aa5ca48f 413
95954743
PA
414 return proc;
415}
416
582511be
PA
417static CORE_ADDR get_pc (struct lwp_info *lwp);
418
bd99dc85 419/* Handle a GNU/Linux extended wait response. If we see a clone
de0d863e
DB
420 event, we need to add the new LWP to our list (and return 0 so as
421 not to report the trap to higher layers). */
0d62e5e8 422
de0d863e
DB
423static int
424handle_extended_wait (struct lwp_info *event_lwp, int wstat)
24a09b5f 425{
89a5711c 426 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 427 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 428 struct lwp_info *new_lwp;
24a09b5f 429
c269dbdb
DB
430 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
431 || (event == PTRACE_EVENT_CLONE))
24a09b5f 432 {
95954743 433 ptid_t ptid;
24a09b5f 434 unsigned long new_pid;
05044653 435 int ret, status;
24a09b5f 436
de0d863e 437 /* Get the pid of the new lwp. */
d86d4aaf 438 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 439 &new_pid);
24a09b5f
DJ
440
441 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 442 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
443 {
444 /* The new child has a pending SIGSTOP. We can't affect it until it
445 hits the SIGSTOP, but we're already attached. */
446
97438e3f 447 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
448
449 if (ret == -1)
450 perror_with_name ("waiting for new child");
451 else if (ret != new_pid)
452 warning ("wait returned unexpected PID %d", ret);
da5898ce 453 else if (!WIFSTOPPED (status))
24a09b5f
DJ
454 warning ("wait returned unexpected status 0x%x", status);
455 }
456
c269dbdb 457 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
458 {
459 struct process_info *parent_proc;
460 struct process_info *child_proc;
461 struct lwp_info *child_lwp;
bfacd19d 462 struct thread_info *child_thr;
de0d863e
DB
463 struct target_desc *tdesc;
464
465 ptid = ptid_build (new_pid, new_pid, 0);
466
467 if (debug_threads)
468 {
469 debug_printf ("HEW: Got fork event from LWP %ld, "
470 "new child is %d\n",
471 ptid_get_lwp (ptid_of (event_thr)),
472 ptid_get_pid (ptid));
473 }
474
475 /* Add the new process to the tables and clone the breakpoint
476 lists of the parent. We need to do this even if the new process
477 will be detached, since we will need the process object and the
478 breakpoints to remove any breakpoints from memory when we
479 detach, and the client side will access registers. */
480 child_proc = linux_add_process (new_pid, 0);
481 gdb_assert (child_proc != NULL);
482 child_lwp = add_lwp (ptid);
483 gdb_assert (child_lwp != NULL);
484 child_lwp->stopped = 1;
bfacd19d
DB
485 child_lwp->must_set_ptrace_flags = 1;
486 child_lwp->status_pending_p = 0;
487 child_thr = get_lwp_thread (child_lwp);
488 child_thr->last_resume_kind = resume_stop;
de0d863e
DB
489 parent_proc = get_thread_process (event_thr);
490 child_proc->attached = parent_proc->attached;
491 clone_all_breakpoints (&child_proc->breakpoints,
492 &child_proc->raw_breakpoints,
493 parent_proc->breakpoints);
494
495 tdesc = xmalloc (sizeof (struct target_desc));
496 copy_target_description (tdesc, parent_proc->tdesc);
497 child_proc->tdesc = tdesc;
de0d863e 498
3a8a0396
DB
499 /* Clone arch-specific process data. */
500 if (the_low_target.new_fork != NULL)
501 the_low_target.new_fork (parent_proc, child_proc);
502
de0d863e 503 /* Save fork info in the parent thread. */
c269dbdb
DB
504 if (event == PTRACE_EVENT_FORK)
505 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
506 else if (event == PTRACE_EVENT_VFORK)
507 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
508
de0d863e 509 event_lwp->waitstatus.value.related_pid = ptid;
c269dbdb 510
de0d863e
DB
511 /* The status_pending field contains bits denoting the
512 extended event, so when the pending event is handled,
513 the handler will look at lwp->waitstatus. */
514 event_lwp->status_pending_p = 1;
515 event_lwp->status_pending = wstat;
516
517 /* Report the event. */
518 return 0;
519 }
520
fa96cb38
PA
521 if (debug_threads)
522 debug_printf ("HEW: Got clone event "
523 "from LWP %ld, new child is LWP %ld\n",
524 lwpid_of (event_thr), new_pid);
525
d86d4aaf 526 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
b3312d80 527 new_lwp = add_lwp (ptid);
24a09b5f 528
e27d73f6
DE
529 /* Either we're going to immediately resume the new thread
530 or leave it stopped. linux_resume_one_lwp is a nop if it
531 thinks the thread is currently running, so set this first
532 before calling linux_resume_one_lwp. */
533 new_lwp->stopped = 1;
534
bde24c0a
PA
535 /* If we're suspending all threads, leave this one suspended
536 too. */
537 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
538 new_lwp->suspended = 1;
539
da5898ce
DJ
540 /* Normally we will get the pending SIGSTOP. But in some cases
541 we might get another signal delivered to the group first.
f21cc1a2 542 If we do get another signal, be sure not to lose it. */
20ba1ce6 543 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 544 {
54a0b537 545 new_lwp->stop_expected = 1;
20ba1ce6
PA
546 new_lwp->status_pending_p = 1;
547 new_lwp->status_pending = status;
da5898ce 548 }
de0d863e
DB
549
550 /* Don't report the event. */
551 return 1;
24a09b5f 552 }
c269dbdb
DB
553 else if (event == PTRACE_EVENT_VFORK_DONE)
554 {
555 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
556
557 /* Report the event. */
558 return 0;
559 }
de0d863e
DB
560
561 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
562}
563
d50171e4
PA
564/* Return the PC as read from the regcache of LWP, without any
565 adjustment. */
566
567static CORE_ADDR
568get_pc (struct lwp_info *lwp)
569{
0bfdf32f 570 struct thread_info *saved_thread;
d50171e4
PA
571 struct regcache *regcache;
572 CORE_ADDR pc;
573
574 if (the_low_target.get_pc == NULL)
575 return 0;
576
0bfdf32f
GB
577 saved_thread = current_thread;
578 current_thread = get_lwp_thread (lwp);
d50171e4 579
0bfdf32f 580 regcache = get_thread_regcache (current_thread, 1);
d50171e4
PA
581 pc = (*the_low_target.get_pc) (regcache);
582
583 if (debug_threads)
87ce2a04 584 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 585
0bfdf32f 586 current_thread = saved_thread;
d50171e4
PA
587 return pc;
588}
589
590/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
591 The SIGTRAP could mean several things.
592
593 On i386, where decr_pc_after_break is non-zero:
582511be
PA
594
595 If we were single-stepping this process using PTRACE_SINGLESTEP, we
596 will get only the one SIGTRAP. The value of $eip will be the next
597 instruction. If the instruction we stepped over was a breakpoint,
598 we need to decrement the PC.
599
0d62e5e8
DJ
600 If we continue the process using PTRACE_CONT, we will get a
601 SIGTRAP when we hit a breakpoint. The value of $eip will be
602 the instruction after the breakpoint (i.e. needs to be
603 decremented). If we report the SIGTRAP to GDB, we must also
582511be 604 report the undecremented PC. If the breakpoint is removed, we
0d62e5e8
DJ
605 must resume at the decremented PC.
606
582511be
PA
607 On a non-decr_pc_after_break machine with hardware or kernel
608 single-step:
609
610 If we either single-step a breakpoint instruction, or continue and
611 hit a breakpoint instruction, our PC will point at the breakpoint
0d62e5e8
DJ
612 instruction. */
613
582511be
PA
614static int
615check_stopped_by_breakpoint (struct lwp_info *lwp)
0d62e5e8 616{
582511be
PA
617 CORE_ADDR pc;
618 CORE_ADDR sw_breakpoint_pc;
619 struct thread_info *saved_thread;
3e572f71
PA
620#if USE_SIGTRAP_SIGINFO
621 siginfo_t siginfo;
622#endif
d50171e4
PA
623
624 if (the_low_target.get_pc == NULL)
625 return 0;
0d62e5e8 626
582511be
PA
627 pc = get_pc (lwp);
628 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
d50171e4 629
582511be
PA
630 /* breakpoint_at reads from the current thread. */
631 saved_thread = current_thread;
632 current_thread = get_lwp_thread (lwp);
47c0c975 633
3e572f71
PA
634#if USE_SIGTRAP_SIGINFO
635 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
636 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
637 {
638 if (siginfo.si_signo == SIGTRAP)
639 {
640 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
641 {
642 if (debug_threads)
643 {
644 struct thread_info *thr = get_lwp_thread (lwp);
645
2bf6fb9d 646 debug_printf ("CSBB: %s stopped by software breakpoint\n",
3e572f71
PA
647 target_pid_to_str (ptid_of (thr)));
648 }
649
650 /* Back up the PC if necessary. */
651 if (pc != sw_breakpoint_pc)
652 {
653 struct regcache *regcache
654 = get_thread_regcache (current_thread, 1);
655 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
656 }
657
658 lwp->stop_pc = sw_breakpoint_pc;
659 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
660 current_thread = saved_thread;
661 return 1;
662 }
663 else if (siginfo.si_code == TRAP_HWBKPT)
664 {
665 if (debug_threads)
666 {
667 struct thread_info *thr = get_lwp_thread (lwp);
668
2bf6fb9d
PA
669 debug_printf ("CSBB: %s stopped by hardware "
670 "breakpoint/watchpoint\n",
3e572f71
PA
671 target_pid_to_str (ptid_of (thr)));
672 }
673
674 lwp->stop_pc = pc;
675 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
676 current_thread = saved_thread;
677 return 1;
678 }
2bf6fb9d
PA
679 else if (siginfo.si_code == TRAP_TRACE)
680 {
681 if (debug_threads)
682 {
683 struct thread_info *thr = get_lwp_thread (lwp);
684
685 debug_printf ("CSBB: %s stopped by trace\n",
686 target_pid_to_str (ptid_of (thr)));
687 }
688 }
3e572f71
PA
689 }
690 }
691#else
582511be
PA
692 /* We may have just stepped a breakpoint instruction. E.g., in
693 non-stop mode, GDB first tells the thread A to step a range, and
694 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
695 case we need to report the breakpoint PC. */
696 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
582511be
PA
697 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
698 {
699 if (debug_threads)
700 {
701 struct thread_info *thr = get_lwp_thread (lwp);
702
703 debug_printf ("CSBB: %s stopped by software breakpoint\n",
704 target_pid_to_str (ptid_of (thr)));
705 }
706
707 /* Back up the PC if necessary. */
708 if (pc != sw_breakpoint_pc)
709 {
710 struct regcache *regcache
711 = get_thread_regcache (current_thread, 1);
712 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
713 }
714
715 lwp->stop_pc = sw_breakpoint_pc;
15c66dd6 716 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
582511be
PA
717 current_thread = saved_thread;
718 return 1;
719 }
720
721 if (hardware_breakpoint_inserted_here (pc))
722 {
723 if (debug_threads)
724 {
725 struct thread_info *thr = get_lwp_thread (lwp);
726
727 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
728 target_pid_to_str (ptid_of (thr)));
729 }
47c0c975 730
582511be 731 lwp->stop_pc = pc;
15c66dd6 732 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
582511be
PA
733 current_thread = saved_thread;
734 return 1;
735 }
3e572f71 736#endif
582511be
PA
737
738 current_thread = saved_thread;
739 return 0;
0d62e5e8 740}
ce3a066d 741
b3312d80 742static struct lwp_info *
95954743 743add_lwp (ptid_t ptid)
611cb4a5 744{
54a0b537 745 struct lwp_info *lwp;
0d62e5e8 746
54a0b537
PA
747 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
748 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 749
aa5ca48f 750 if (the_low_target.new_thread != NULL)
34c703da 751 the_low_target.new_thread (lwp);
aa5ca48f 752
f7667f0d 753 lwp->thread = add_thread (ptid, lwp);
0d62e5e8 754
54a0b537 755 return lwp;
0d62e5e8 756}
611cb4a5 757
da6d8c04
DJ
758/* Start an inferior process and returns its pid.
759 ALLARGS is a vector of program-name and args. */
760
ce3a066d
DJ
761static int
762linux_create_inferior (char *program, char **allargs)
da6d8c04 763{
a6dbe5df 764 struct lwp_info *new_lwp;
da6d8c04 765 int pid;
95954743 766 ptid_t ptid;
8cc73a39
SDJ
767 struct cleanup *restore_personality
768 = maybe_disable_address_space_randomization (disable_randomization);
03583c20 769
42c81e2a 770#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
771 pid = vfork ();
772#else
da6d8c04 773 pid = fork ();
52fb6437 774#endif
da6d8c04
DJ
775 if (pid < 0)
776 perror_with_name ("fork");
777
778 if (pid == 0)
779 {
602e3198 780 close_most_fds ();
b8e1b30e 781 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da6d8c04 782
1a981360 783#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 784 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 785#endif
0d62e5e8 786
a9fa9f7d
DJ
787 setpgid (0, 0);
788
e0f9f062
DE
789 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
790 stdout to stderr so that inferior i/o doesn't corrupt the connection.
791 Also, redirect stdin to /dev/null. */
792 if (remote_connection_is_stdio ())
793 {
794 close (0);
795 open ("/dev/null", O_RDONLY);
796 dup2 (2, 1);
3e52c33d
JK
797 if (write (2, "stdin/stdout redirected\n",
798 sizeof ("stdin/stdout redirected\n") - 1) < 0)
8c29b58e
YQ
799 {
800 /* Errors ignored. */;
801 }
e0f9f062
DE
802 }
803
2b876972
DJ
804 execv (program, allargs);
805 if (errno == ENOENT)
806 execvp (program, allargs);
da6d8c04
DJ
807
808 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 809 strerror (errno));
da6d8c04
DJ
810 fflush (stderr);
811 _exit (0177);
812 }
813
8cc73a39 814 do_cleanups (restore_personality);
03583c20 815
95954743
PA
816 linux_add_process (pid, 0);
817
818 ptid = ptid_build (pid, pid, 0);
819 new_lwp = add_lwp (ptid);
a6dbe5df 820 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 821
a9fa9f7d 822 return pid;
da6d8c04
DJ
823}
824
8784d563
PA
825/* Attach to an inferior process. Returns 0 on success, ERRNO on
826 error. */
da6d8c04 827
7ae1a6a6
PA
828int
829linux_attach_lwp (ptid_t ptid)
da6d8c04 830{
54a0b537 831 struct lwp_info *new_lwp;
7ae1a6a6 832 int lwpid = ptid_get_lwp (ptid);
611cb4a5 833
b8e1b30e 834 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 835 != 0)
7ae1a6a6 836 return errno;
24a09b5f 837
b3312d80 838 new_lwp = add_lwp (ptid);
0d62e5e8 839
a6dbe5df
PA
840 /* We need to wait for SIGSTOP before being able to make the next
841 ptrace call on this LWP. */
842 new_lwp->must_set_ptrace_flags = 1;
843
644cebc9 844 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
845 {
846 if (debug_threads)
87ce2a04 847 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
848
849 /* The process is definitely stopped. It is in a job control
850 stop, unless the kernel predates the TASK_STOPPED /
851 TASK_TRACED distinction, in which case it might be in a
852 ptrace stop. Make sure it is in a ptrace stop; from there we
853 can kill it, signal it, et cetera.
854
855 First make sure there is a pending SIGSTOP. Since we are
856 already attached, the process can not transition from stopped
857 to running without a PTRACE_CONT; so we know this signal will
858 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
859 probably already in the queue (unless this kernel is old
860 enough to use TASK_STOPPED for ptrace stops); but since
861 SIGSTOP is not an RT signal, it can only be queued once. */
862 kill_lwp (lwpid, SIGSTOP);
863
864 /* Finally, resume the stopped process. This will deliver the
865 SIGSTOP (or a higher priority signal, just like normal
866 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 867 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
868 }
869
0d62e5e8 870 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
871 brings it to a halt.
872
873 There are several cases to consider here:
874
875 1) gdbserver has already attached to the process and is being notified
1b3f6016 876 of a new thread that is being created.
d50171e4
PA
877 In this case we should ignore that SIGSTOP and resume the
878 process. This is handled below by setting stop_expected = 1,
8336d594 879 and the fact that add_thread sets last_resume_kind ==
d50171e4 880 resume_continue.
0e21c1ec
DE
881
882 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
883 to it via attach_inferior.
884 In this case we want the process thread to stop.
d50171e4
PA
885 This is handled by having linux_attach set last_resume_kind ==
886 resume_stop after we return.
e3deef73
LM
887
888 If the pid we are attaching to is also the tgid, we attach to and
889 stop all the existing threads. Otherwise, we attach to pid and
890 ignore any other threads in the same group as this pid.
0e21c1ec
DE
891
892 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
893 existing threads.
894 In this case we want the thread to stop.
895 FIXME: This case is currently not properly handled.
896 We should wait for the SIGSTOP but don't. Things work apparently
897 because enough time passes between when we ptrace (ATTACH) and when
898 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
899
900 On the other hand, if we are currently trying to stop all threads, we
901 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 902 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
903 end of the list, and so the new thread has not yet reached
904 wait_for_sigstop (but will). */
d50171e4 905 new_lwp->stop_expected = 1;
0d62e5e8 906
7ae1a6a6 907 return 0;
95954743
PA
908}
909
8784d563
PA
910/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
911 already attached. Returns true if a new LWP is found, false
912 otherwise. */
913
914static int
915attach_proc_task_lwp_callback (ptid_t ptid)
916{
917 /* Is this a new thread? */
918 if (find_thread_ptid (ptid) == NULL)
919 {
920 int lwpid = ptid_get_lwp (ptid);
921 int err;
922
923 if (debug_threads)
924 debug_printf ("Found new lwp %d\n", lwpid);
925
926 err = linux_attach_lwp (ptid);
927
928 /* Be quiet if we simply raced with the thread exiting. EPERM
929 is returned if the thread's task still exists, and is marked
930 as exited or zombie, as well as other conditions, so in that
931 case, confirm the status in /proc/PID/status. */
932 if (err == ESRCH
933 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
934 {
935 if (debug_threads)
936 {
937 debug_printf ("Cannot attach to lwp %d: "
938 "thread is gone (%d: %s)\n",
939 lwpid, err, strerror (err));
940 }
941 }
942 else if (err != 0)
943 {
944 warning (_("Cannot attach to lwp %d: %s"),
945 lwpid,
946 linux_ptrace_attach_fail_reason_string (ptid, err));
947 }
948
949 return 1;
950 }
951 return 0;
952}
953
e3deef73
LM
954/* Attach to PID. If PID is the tgid, attach to it and all
955 of its threads. */
956
c52daf70 957static int
a1928bad 958linux_attach (unsigned long pid)
0d62e5e8 959{
7ae1a6a6
PA
960 ptid_t ptid = ptid_build (pid, pid, 0);
961 int err;
962
e3deef73
LM
963 /* Attach to PID. We will check for other threads
964 soon. */
7ae1a6a6
PA
965 err = linux_attach_lwp (ptid);
966 if (err != 0)
967 error ("Cannot attach to process %ld: %s",
8784d563 968 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
7ae1a6a6 969
95954743 970 linux_add_process (pid, 1);
0d62e5e8 971
bd99dc85
PA
972 if (!non_stop)
973 {
8336d594
PA
974 struct thread_info *thread;
975
976 /* Don't ignore the initial SIGSTOP if we just attached to this
977 process. It will be collected by wait shortly. */
978 thread = find_thread_ptid (ptid_build (pid, pid, 0));
979 thread->last_resume_kind = resume_stop;
bd99dc85 980 }
0d62e5e8 981
8784d563
PA
982 /* We must attach to every LWP. If /proc is mounted, use that to
983 find them now. On the one hand, the inferior may be using raw
984 clone instead of using pthreads. On the other hand, even if it
985 is using pthreads, GDB may not be connected yet (thread_db needs
986 to do symbol lookups, through qSymbol). Also, thread_db walks
987 structures in the inferior's address space to find the list of
988 threads/LWPs, and those structures may well be corrupted. Note
989 that once thread_db is loaded, we'll still use it to list threads
990 and associate pthread info with each LWP. */
991 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
95954743
PA
992 return 0;
993}
994
995struct counter
996{
997 int pid;
998 int count;
999};
1000
1001static int
1002second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1003{
1004 struct counter *counter = args;
1005
1006 if (ptid_get_pid (entry->id) == counter->pid)
1007 {
1008 if (++counter->count > 1)
1009 return 1;
1010 }
d61ddec4 1011
da6d8c04
DJ
1012 return 0;
1013}
1014
95954743 1015static int
fa96cb38 1016last_thread_of_process_p (int pid)
95954743 1017{
95954743 1018 struct counter counter = { pid , 0 };
da6d8c04 1019
95954743
PA
1020 return (find_inferior (&all_threads,
1021 second_thread_of_pid_p, &counter) == NULL);
1022}
1023
da84f473
PA
1024/* Kill LWP. */
1025
1026static void
1027linux_kill_one_lwp (struct lwp_info *lwp)
1028{
d86d4aaf
DE
1029 struct thread_info *thr = get_lwp_thread (lwp);
1030 int pid = lwpid_of (thr);
da84f473
PA
1031
1032 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1033 there is no signal context, and ptrace(PTRACE_KILL) (or
1034 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1035 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1036 alternative is to kill with SIGKILL. We only need one SIGKILL
1037 per process, not one for each thread. But since we still support
1038 linuxthreads, and we also support debugging programs using raw
1039 clone without CLONE_THREAD, we send one for each thread. For
1040 years, we used PTRACE_KILL only, so we're being a bit paranoid
1041 about some old kernels where PTRACE_KILL might work better
1042 (dubious if there are any such, but that's why it's paranoia), so
1043 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1044 everywhere. */
1045
1046 errno = 0;
69ff6be5 1047 kill_lwp (pid, SIGKILL);
da84f473 1048 if (debug_threads)
ce9e3fe7
PA
1049 {
1050 int save_errno = errno;
1051
1052 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1053 target_pid_to_str (ptid_of (thr)),
1054 save_errno ? strerror (save_errno) : "OK");
1055 }
da84f473
PA
1056
1057 errno = 0;
b8e1b30e 1058 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1059 if (debug_threads)
ce9e3fe7
PA
1060 {
1061 int save_errno = errno;
1062
1063 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1064 target_pid_to_str (ptid_of (thr)),
1065 save_errno ? strerror (save_errno) : "OK");
1066 }
da84f473
PA
1067}
1068
e76126e8
PA
1069/* Kill LWP and wait for it to die. */
1070
1071static void
1072kill_wait_lwp (struct lwp_info *lwp)
1073{
1074 struct thread_info *thr = get_lwp_thread (lwp);
1075 int pid = ptid_get_pid (ptid_of (thr));
1076 int lwpid = ptid_get_lwp (ptid_of (thr));
1077 int wstat;
1078 int res;
1079
1080 if (debug_threads)
1081 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1082
1083 do
1084 {
1085 linux_kill_one_lwp (lwp);
1086
1087 /* Make sure it died. Notes:
1088
1089 - The loop is most likely unnecessary.
1090
1091 - We don't use linux_wait_for_event as that could delete lwps
1092 while we're iterating over them. We're not interested in
1093 any pending status at this point, only in making sure all
1094 wait status on the kernel side are collected until the
1095 process is reaped.
1096
1097 - We don't use __WALL here as the __WALL emulation relies on
1098 SIGCHLD, and killing a stopped process doesn't generate
1099 one, nor an exit status.
1100 */
1101 res = my_waitpid (lwpid, &wstat, 0);
1102 if (res == -1 && errno == ECHILD)
1103 res = my_waitpid (lwpid, &wstat, __WCLONE);
1104 } while (res > 0 && WIFSTOPPED (wstat));
1105
1106 gdb_assert (res > 0);
1107}
1108
da84f473
PA
1109/* Callback for `find_inferior'. Kills an lwp of a given process,
1110 except the leader. */
95954743
PA
1111
1112static int
da84f473 1113kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
da6d8c04 1114{
0d62e5e8 1115 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1116 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
1117 int pid = * (int *) args;
1118
1119 if (ptid_get_pid (entry->id) != pid)
1120 return 0;
0d62e5e8 1121
fd500816
DJ
1122 /* We avoid killing the first thread here, because of a Linux kernel (at
1123 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1124 the children get a chance to be reaped, it will remain a zombie
1125 forever. */
95954743 1126
d86d4aaf 1127 if (lwpid_of (thread) == pid)
95954743
PA
1128 {
1129 if (debug_threads)
87ce2a04
DE
1130 debug_printf ("lkop: is last of process %s\n",
1131 target_pid_to_str (entry->id));
95954743
PA
1132 return 0;
1133 }
fd500816 1134
e76126e8 1135 kill_wait_lwp (lwp);
95954743 1136 return 0;
da6d8c04
DJ
1137}
1138
95954743
PA
1139static int
1140linux_kill (int pid)
0d62e5e8 1141{
95954743 1142 struct process_info *process;
54a0b537 1143 struct lwp_info *lwp;
fd500816 1144
95954743
PA
1145 process = find_process_pid (pid);
1146 if (process == NULL)
1147 return -1;
9d606399 1148
f9e39928
PA
1149 /* If we're killing a running inferior, make sure it is stopped
1150 first, as PTRACE_KILL will not work otherwise. */
7984d532 1151 stop_all_lwps (0, NULL);
f9e39928 1152
da84f473 1153 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
fd500816 1154
54a0b537 1155 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1156 thread in the list, so do so now. */
95954743 1157 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 1158
784867a5 1159 if (lwp == NULL)
fd500816 1160 {
784867a5 1161 if (debug_threads)
d86d4aaf
DE
1162 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1163 pid);
784867a5
JK
1164 }
1165 else
e76126e8 1166 kill_wait_lwp (lwp);
2d717e4f 1167
8336d594 1168 the_target->mourn (process);
f9e39928
PA
1169
1170 /* Since we presently can only stop all lwps of all processes, we
1171 need to unstop lwps of other processes. */
7984d532 1172 unstop_all_lwps (0, NULL);
95954743 1173 return 0;
0d62e5e8
DJ
1174}
1175
9b224c5e
PA
1176/* Get pending signal of THREAD, for detaching purposes. This is the
1177 signal the thread last stopped for, which we need to deliver to the
1178 thread when detaching, otherwise, it'd be suppressed/lost. */
1179
1180static int
1181get_detach_signal (struct thread_info *thread)
1182{
a493e3e2 1183 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1184 int status;
1185 struct lwp_info *lp = get_thread_lwp (thread);
1186
1187 if (lp->status_pending_p)
1188 status = lp->status_pending;
1189 else
1190 {
1191 /* If the thread had been suspended by gdbserver, and it stopped
1192 cleanly, then it'll have stopped with SIGSTOP. But we don't
1193 want to deliver that SIGSTOP. */
1194 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1195 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1196 return 0;
1197
1198 /* Otherwise, we may need to deliver the signal we
1199 intercepted. */
1200 status = lp->last_status;
1201 }
1202
1203 if (!WIFSTOPPED (status))
1204 {
1205 if (debug_threads)
87ce2a04 1206 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1207 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1208 return 0;
1209 }
1210
1211 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1212 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1213 {
1214 if (debug_threads)
87ce2a04
DE
1215 debug_printf ("GPS: lwp %s had stopped with extended "
1216 "status: no pending signal\n",
d86d4aaf 1217 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1218 return 0;
1219 }
1220
2ea28649 1221 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e
PA
1222
1223 if (program_signals_p && !program_signals[signo])
1224 {
1225 if (debug_threads)
87ce2a04 1226 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1227 target_pid_to_str (ptid_of (thread)),
87ce2a04 1228 gdb_signal_to_string (signo));
9b224c5e
PA
1229 return 0;
1230 }
1231 else if (!program_signals_p
1232 /* If we have no way to know which signals GDB does not
1233 want to have passed to the program, assume
1234 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1235 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1236 {
1237 if (debug_threads)
87ce2a04
DE
1238 debug_printf ("GPS: lwp %s had signal %s, "
1239 "but we don't know if we should pass it. "
1240 "Default to not.\n",
d86d4aaf 1241 target_pid_to_str (ptid_of (thread)),
87ce2a04 1242 gdb_signal_to_string (signo));
9b224c5e
PA
1243 return 0;
1244 }
1245 else
1246 {
1247 if (debug_threads)
87ce2a04 1248 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1249 target_pid_to_str (ptid_of (thread)),
87ce2a04 1250 gdb_signal_to_string (signo));
9b224c5e
PA
1251
1252 return WSTOPSIG (status);
1253 }
1254}
1255
95954743
PA
1256static int
1257linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
1258{
1259 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1260 struct lwp_info *lwp = get_thread_lwp (thread);
95954743 1261 int pid = * (int *) args;
9b224c5e 1262 int sig;
95954743
PA
1263
1264 if (ptid_get_pid (entry->id) != pid)
1265 return 0;
6ad8ae5c 1266
9b224c5e 1267 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1268 if (lwp->stop_expected)
ae13219e 1269 {
9b224c5e 1270 if (debug_threads)
87ce2a04 1271 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1272 target_pid_to_str (ptid_of (thread)));
9b224c5e 1273
d86d4aaf 1274 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1275 lwp->stop_expected = 0;
ae13219e
DJ
1276 }
1277
1278 /* Flush any pending changes to the process's registers. */
d86d4aaf 1279 regcache_invalidate_thread (thread);
ae13219e 1280
9b224c5e
PA
1281 /* Pass on any pending signal for this thread. */
1282 sig = get_detach_signal (thread);
1283
ae13219e 1284 /* Finally, let it resume. */
82bfbe7e
PA
1285 if (the_low_target.prepare_to_resume != NULL)
1286 the_low_target.prepare_to_resume (lwp);
d86d4aaf 1287 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1288 (PTRACE_TYPE_ARG4) (long) sig) < 0)
9b224c5e 1289 error (_("Can't detach %s: %s"),
d86d4aaf 1290 target_pid_to_str (ptid_of (thread)),
9b224c5e 1291 strerror (errno));
bd99dc85
PA
1292
1293 delete_lwp (lwp);
95954743 1294 return 0;
6ad8ae5c
DJ
1295}
1296
95954743
PA
1297static int
1298linux_detach (int pid)
1299{
1300 struct process_info *process;
1301
1302 process = find_process_pid (pid);
1303 if (process == NULL)
1304 return -1;
1305
f9e39928
PA
1306 /* Stop all threads before detaching. First, ptrace requires that
1307 the thread is stopped to sucessfully detach. Second, thread_db
1308 may need to uninstall thread event breakpoints from memory, which
1309 only works with a stopped process anyway. */
7984d532 1310 stop_all_lwps (0, NULL);
f9e39928 1311
ca5c370d 1312#ifdef USE_THREAD_DB
8336d594 1313 thread_db_detach (process);
ca5c370d
PA
1314#endif
1315
fa593d66
PA
1316 /* Stabilize threads (move out of jump pads). */
1317 stabilize_threads ();
1318
95954743 1319 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
1320
1321 the_target->mourn (process);
f9e39928
PA
1322
1323 /* Since we presently can only stop all lwps of all processes, we
1324 need to unstop lwps of other processes. */
7984d532 1325 unstop_all_lwps (0, NULL);
f9e39928
PA
1326 return 0;
1327}
1328
1329/* Remove all LWPs that belong to process PROC from the lwp list. */
1330
1331static int
1332delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1333{
d86d4aaf
DE
1334 struct thread_info *thread = (struct thread_info *) entry;
1335 struct lwp_info *lwp = get_thread_lwp (thread);
f9e39928
PA
1336 struct process_info *process = proc;
1337
d86d4aaf 1338 if (pid_of (thread) == pid_of (process))
f9e39928
PA
1339 delete_lwp (lwp);
1340
dd6953e1 1341 return 0;
6ad8ae5c
DJ
1342}
1343
8336d594
PA
1344static void
1345linux_mourn (struct process_info *process)
1346{
1347 struct process_info_private *priv;
1348
1349#ifdef USE_THREAD_DB
1350 thread_db_mourn (process);
1351#endif
1352
d86d4aaf 1353 find_inferior (&all_threads, delete_lwp_callback, process);
f9e39928 1354
8336d594 1355 /* Freeing all private data. */
fe978cb0 1356 priv = process->priv;
8336d594
PA
1357 free (priv->arch_private);
1358 free (priv);
fe978cb0 1359 process->priv = NULL;
505106cd
PA
1360
1361 remove_process (process);
8336d594
PA
1362}
1363
444d6139 1364static void
95954743 1365linux_join (int pid)
444d6139 1366{
444d6139
PA
1367 int status, ret;
1368
1369 do {
95954743 1370 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1371 if (WIFEXITED (status) || WIFSIGNALED (status))
1372 break;
1373 } while (ret != -1 || errno != ECHILD);
1374}
1375
6ad8ae5c 1376/* Return nonzero if the given thread is still alive. */
0d62e5e8 1377static int
95954743 1378linux_thread_alive (ptid_t ptid)
0d62e5e8 1379{
95954743
PA
1380 struct lwp_info *lwp = find_lwp_pid (ptid);
1381
1382 /* We assume we always know if a thread exits. If a whole process
1383 exited but we still haven't been able to report it to GDB, we'll
1384 hold on to the last lwp of the dead process. */
1385 if (lwp != NULL)
1386 return !lwp->dead;
0d62e5e8
DJ
1387 else
1388 return 0;
1389}
1390
582511be
PA
1391/* Return 1 if this lwp still has an interesting status pending. If
1392 not (e.g., it had stopped for a breakpoint that is gone), return
1393 false. */
1394
1395static int
1396thread_still_has_status_pending_p (struct thread_info *thread)
1397{
1398 struct lwp_info *lp = get_thread_lwp (thread);
1399
1400 if (!lp->status_pending_p)
1401 return 0;
1402
1403 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1404 report any status pending the LWP may have. */
1405 if (thread->last_resume_kind == resume_stop
1406 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1407 return 0;
1408
1409 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1410 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1411 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1412 {
1413 struct thread_info *saved_thread;
1414 CORE_ADDR pc;
1415 int discard = 0;
1416
1417 gdb_assert (lp->last_status != 0);
1418
1419 pc = get_pc (lp);
1420
1421 saved_thread = current_thread;
1422 current_thread = thread;
1423
1424 if (pc != lp->stop_pc)
1425 {
1426 if (debug_threads)
1427 debug_printf ("PC of %ld changed\n",
1428 lwpid_of (thread));
1429 discard = 1;
1430 }
3e572f71
PA
1431
1432#if !USE_SIGTRAP_SIGINFO
15c66dd6 1433 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
582511be
PA
1434 && !(*the_low_target.breakpoint_at) (pc))
1435 {
1436 if (debug_threads)
1437 debug_printf ("previous SW breakpoint of %ld gone\n",
1438 lwpid_of (thread));
1439 discard = 1;
1440 }
15c66dd6 1441 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1442 && !hardware_breakpoint_inserted_here (pc))
1443 {
1444 if (debug_threads)
1445 debug_printf ("previous HW breakpoint of %ld gone\n",
1446 lwpid_of (thread));
1447 discard = 1;
1448 }
3e572f71 1449#endif
582511be
PA
1450
1451 current_thread = saved_thread;
1452
1453 if (discard)
1454 {
1455 if (debug_threads)
1456 debug_printf ("discarding pending breakpoint status\n");
1457 lp->status_pending_p = 0;
1458 return 0;
1459 }
1460 }
1461
1462 return 1;
1463}
1464
6bf5e0ba 1465/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1466static int
d50171e4 1467status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1468{
d86d4aaf 1469 struct thread_info *thread = (struct thread_info *) entry;
582511be 1470 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1471 ptid_t ptid = * (ptid_t *) arg;
1472
1473 /* Check if we're only interested in events from a specific process
afa8d396
PA
1474 or a specific LWP. */
1475 if (!ptid_match (ptid_of (thread), ptid))
95954743 1476 return 0;
0d62e5e8 1477
582511be
PA
1478 if (lp->status_pending_p
1479 && !thread_still_has_status_pending_p (thread))
1480 {
1481 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1482 return 0;
1483 }
0d62e5e8 1484
582511be 1485 return lp->status_pending_p;
0d62e5e8
DJ
1486}
1487
95954743
PA
1488static int
1489same_lwp (struct inferior_list_entry *entry, void *data)
1490{
1491 ptid_t ptid = *(ptid_t *) data;
1492 int lwp;
1493
1494 if (ptid_get_lwp (ptid) != 0)
1495 lwp = ptid_get_lwp (ptid);
1496 else
1497 lwp = ptid_get_pid (ptid);
1498
1499 if (ptid_get_lwp (entry->id) == lwp)
1500 return 1;
1501
1502 return 0;
1503}
1504
1505struct lwp_info *
1506find_lwp_pid (ptid_t ptid)
1507{
d86d4aaf
DE
1508 struct inferior_list_entry *thread
1509 = find_inferior (&all_threads, same_lwp, &ptid);
1510
1511 if (thread == NULL)
1512 return NULL;
1513
1514 return get_thread_lwp ((struct thread_info *) thread);
95954743
PA
1515}
1516
fa96cb38 1517/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1518
fa96cb38
PA
1519static int
1520num_lwps (int pid)
1521{
1522 struct inferior_list_entry *inf, *tmp;
1523 int count = 0;
0d62e5e8 1524
fa96cb38 1525 ALL_INFERIORS (&all_threads, inf, tmp)
24a09b5f 1526 {
fa96cb38
PA
1527 if (ptid_get_pid (inf->id) == pid)
1528 count++;
24a09b5f 1529 }
3aee8918 1530
fa96cb38
PA
1531 return count;
1532}
d61ddec4 1533
6d4ee8c6
GB
1534/* The arguments passed to iterate_over_lwps. */
1535
1536struct iterate_over_lwps_args
1537{
1538 /* The FILTER argument passed to iterate_over_lwps. */
1539 ptid_t filter;
1540
1541 /* The CALLBACK argument passed to iterate_over_lwps. */
1542 iterate_over_lwps_ftype *callback;
1543
1544 /* The DATA argument passed to iterate_over_lwps. */
1545 void *data;
1546};
1547
1548/* Callback for find_inferior used by iterate_over_lwps to filter
1549 calls to the callback supplied to that function. Returning a
1550 nonzero value causes find_inferiors to stop iterating and return
1551 the current inferior_list_entry. Returning zero indicates that
1552 find_inferiors should continue iterating. */
1553
1554static int
1555iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1556{
1557 struct iterate_over_lwps_args *args
1558 = (struct iterate_over_lwps_args *) args_p;
1559
1560 if (ptid_match (entry->id, args->filter))
1561 {
1562 struct thread_info *thr = (struct thread_info *) entry;
1563 struct lwp_info *lwp = get_thread_lwp (thr);
1564
1565 return (*args->callback) (lwp, args->data);
1566 }
1567
1568 return 0;
1569}
1570
1571/* See nat/linux-nat.h. */
1572
1573struct lwp_info *
1574iterate_over_lwps (ptid_t filter,
1575 iterate_over_lwps_ftype callback,
1576 void *data)
1577{
1578 struct iterate_over_lwps_args args = {filter, callback, data};
1579 struct inferior_list_entry *entry;
1580
1581 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1582 if (entry == NULL)
1583 return NULL;
1584
1585 return get_thread_lwp ((struct thread_info *) entry);
1586}
1587
fa96cb38
PA
1588/* Detect zombie thread group leaders, and "exit" them. We can't reap
1589 their exits until all other threads in the group have exited. */
c3adc08c 1590
fa96cb38
PA
1591static void
1592check_zombie_leaders (void)
1593{
1594 struct process_info *proc, *tmp;
c3adc08c 1595
fa96cb38 1596 ALL_PROCESSES (proc, tmp)
c3adc08c 1597 {
fa96cb38
PA
1598 pid_t leader_pid = pid_of (proc);
1599 struct lwp_info *leader_lp;
c3adc08c 1600
fa96cb38 1601 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
c3adc08c 1602
fa96cb38
PA
1603 if (debug_threads)
1604 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1605 "num_lwps=%d, zombie=%d\n",
1606 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1607 linux_proc_pid_is_zombie (leader_pid));
1608
1609 if (leader_lp != NULL
1610 /* Check if there are other threads in the group, as we may
1611 have raced with the inferior simply exiting. */
1612 && !last_thread_of_process_p (leader_pid)
1613 && linux_proc_pid_is_zombie (leader_pid))
1614 {
1615 /* A leader zombie can mean one of two things:
1616
1617 - It exited, and there's an exit status pending
1618 available, or only the leader exited (not the whole
1619 program). In the latter case, we can't waitpid the
1620 leader's exit status until all other threads are gone.
1621
1622 - There are 3 or more threads in the group, and a thread
1623 other than the leader exec'd. On an exec, the Linux
1624 kernel destroys all other threads (except the execing
1625 one) in the thread group, and resets the execing thread's
1626 tid to the tgid. No exit notification is sent for the
1627 execing thread -- from the ptracer's perspective, it
1628 appears as though the execing thread just vanishes.
1629 Until we reap all other threads except the leader and the
1630 execing thread, the leader will be zombie, and the
1631 execing thread will be in `D (disc sleep)'. As soon as
1632 all other threads are reaped, the execing thread changes
1633 it's tid to the tgid, and the previous (zombie) leader
1634 vanishes, giving place to the "new" leader. We could try
1635 distinguishing the exit and exec cases, by waiting once
1636 more, and seeing if something comes out, but it doesn't
1637 sound useful. The previous leader _does_ go away, and
1638 we'll re-add the new one once we see the exec event
1639 (which is just the same as what would happen if the
1640 previous leader did exit voluntarily before some other
1641 thread execs). */
c3adc08c 1642
fa96cb38
PA
1643 if (debug_threads)
1644 fprintf (stderr,
1645 "CZL: Thread group leader %d zombie "
1646 "(it exited, or another thread execd).\n",
1647 leader_pid);
c3adc08c 1648
fa96cb38 1649 delete_lwp (leader_lp);
c3adc08c
PA
1650 }
1651 }
fa96cb38 1652}
c3adc08c 1653
fa96cb38
PA
1654/* Callback for `find_inferior'. Returns the first LWP that is not
1655 stopped. ARG is a PTID filter. */
d50171e4 1656
fa96cb38
PA
1657static int
1658not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1659{
1660 struct thread_info *thr = (struct thread_info *) entry;
1661 struct lwp_info *lwp;
1662 ptid_t filter = *(ptid_t *) arg;
47c0c975 1663
fa96cb38
PA
1664 if (!ptid_match (ptid_of (thr), filter))
1665 return 0;
bd99dc85 1666
fa96cb38
PA
1667 lwp = get_thread_lwp (thr);
1668 if (!lwp->stopped)
1669 return 1;
1670
1671 return 0;
0d62e5e8 1672}
611cb4a5 1673
219f2f23
PA
1674/* This function should only be called if the LWP got a SIGTRAP.
1675
1676 Handle any tracepoint steps or hits. Return true if a tracepoint
1677 event was handled, 0 otherwise. */
1678
1679static int
1680handle_tracepoints (struct lwp_info *lwp)
1681{
1682 struct thread_info *tinfo = get_lwp_thread (lwp);
1683 int tpoint_related_event = 0;
1684
582511be
PA
1685 gdb_assert (lwp->suspended == 0);
1686
7984d532
PA
1687 /* If this tracepoint hit causes a tracing stop, we'll immediately
1688 uninsert tracepoints. To do this, we temporarily pause all
1689 threads, unpatch away, and then unpause threads. We need to make
1690 sure the unpausing doesn't resume LWP too. */
1691 lwp->suspended++;
1692
219f2f23
PA
1693 /* And we need to be sure that any all-threads-stopping doesn't try
1694 to move threads out of the jump pads, as it could deadlock the
1695 inferior (LWP could be in the jump pad, maybe even holding the
1696 lock.) */
1697
1698 /* Do any necessary step collect actions. */
1699 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1700
fa593d66
PA
1701 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1702
219f2f23
PA
1703 /* See if we just hit a tracepoint and do its main collect
1704 actions. */
1705 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1706
7984d532
PA
1707 lwp->suspended--;
1708
1709 gdb_assert (lwp->suspended == 0);
fa593d66 1710 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1711
219f2f23
PA
1712 if (tpoint_related_event)
1713 {
1714 if (debug_threads)
87ce2a04 1715 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1716 return 1;
1717 }
1718
1719 return 0;
1720}
1721
fa593d66
PA
1722/* Convenience wrapper. Returns true if LWP is presently collecting a
1723 fast tracepoint. */
1724
1725static int
1726linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1727 struct fast_tpoint_collect_status *status)
1728{
1729 CORE_ADDR thread_area;
d86d4aaf 1730 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1731
1732 if (the_low_target.get_thread_area == NULL)
1733 return 0;
1734
1735 /* Get the thread area address. This is used to recognize which
1736 thread is which when tracing with the in-process agent library.
1737 We don't read anything from the address, and treat it as opaque;
1738 it's the address itself that we assume is unique per-thread. */
d86d4aaf 1739 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
fa593d66
PA
1740 return 0;
1741
1742 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1743}
1744
1745/* The reason we resume in the caller, is because we want to be able
1746 to pass lwp->status_pending as WSTAT, and we need to clear
1747 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1748 refuses to resume. */
1749
1750static int
1751maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1752{
0bfdf32f 1753 struct thread_info *saved_thread;
fa593d66 1754
0bfdf32f
GB
1755 saved_thread = current_thread;
1756 current_thread = get_lwp_thread (lwp);
fa593d66
PA
1757
1758 if ((wstat == NULL
1759 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1760 && supports_fast_tracepoints ()
58b4daa5 1761 && agent_loaded_p ())
fa593d66
PA
1762 {
1763 struct fast_tpoint_collect_status status;
1764 int r;
1765
1766 if (debug_threads)
87ce2a04
DE
1767 debug_printf ("Checking whether LWP %ld needs to move out of the "
1768 "jump pad.\n",
0bfdf32f 1769 lwpid_of (current_thread));
fa593d66
PA
1770
1771 r = linux_fast_tracepoint_collecting (lwp, &status);
1772
1773 if (wstat == NULL
1774 || (WSTOPSIG (*wstat) != SIGILL
1775 && WSTOPSIG (*wstat) != SIGFPE
1776 && WSTOPSIG (*wstat) != SIGSEGV
1777 && WSTOPSIG (*wstat) != SIGBUS))
1778 {
1779 lwp->collecting_fast_tracepoint = r;
1780
1781 if (r != 0)
1782 {
1783 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1784 {
1785 /* Haven't executed the original instruction yet.
1786 Set breakpoint there, and wait till it's hit,
1787 then single-step until exiting the jump pad. */
1788 lwp->exit_jump_pad_bkpt
1789 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1790 }
1791
1792 if (debug_threads)
87ce2a04
DE
1793 debug_printf ("Checking whether LWP %ld needs to move out of "
1794 "the jump pad...it does\n",
0bfdf32f
GB
1795 lwpid_of (current_thread));
1796 current_thread = saved_thread;
fa593d66
PA
1797
1798 return 1;
1799 }
1800 }
1801 else
1802 {
1803 /* If we get a synchronous signal while collecting, *and*
1804 while executing the (relocated) original instruction,
1805 reset the PC to point at the tpoint address, before
1806 reporting to GDB. Otherwise, it's an IPA lib bug: just
1807 report the signal to GDB, and pray for the best. */
1808
1809 lwp->collecting_fast_tracepoint = 0;
1810
1811 if (r != 0
1812 && (status.adjusted_insn_addr <= lwp->stop_pc
1813 && lwp->stop_pc < status.adjusted_insn_addr_end))
1814 {
1815 siginfo_t info;
1816 struct regcache *regcache;
1817
1818 /* The si_addr on a few signals references the address
1819 of the faulting instruction. Adjust that as
1820 well. */
1821 if ((WSTOPSIG (*wstat) == SIGILL
1822 || WSTOPSIG (*wstat) == SIGFPE
1823 || WSTOPSIG (*wstat) == SIGBUS
1824 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 1825 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1826 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
1827 /* Final check just to make sure we don't clobber
1828 the siginfo of non-kernel-sent signals. */
1829 && (uintptr_t) info.si_addr == lwp->stop_pc)
1830 {
1831 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 1832 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1833 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
1834 }
1835
0bfdf32f 1836 regcache = get_thread_regcache (current_thread, 1);
fa593d66
PA
1837 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1838 lwp->stop_pc = status.tpoint_addr;
1839
1840 /* Cancel any fast tracepoint lock this thread was
1841 holding. */
1842 force_unlock_trace_buffer ();
1843 }
1844
1845 if (lwp->exit_jump_pad_bkpt != NULL)
1846 {
1847 if (debug_threads)
87ce2a04
DE
1848 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1849 "stopping all threads momentarily.\n");
fa593d66
PA
1850
1851 stop_all_lwps (1, lwp);
fa593d66
PA
1852
1853 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1854 lwp->exit_jump_pad_bkpt = NULL;
1855
1856 unstop_all_lwps (1, lwp);
1857
1858 gdb_assert (lwp->suspended >= 0);
1859 }
1860 }
1861 }
1862
1863 if (debug_threads)
87ce2a04
DE
1864 debug_printf ("Checking whether LWP %ld needs to move out of the "
1865 "jump pad...no\n",
0bfdf32f 1866 lwpid_of (current_thread));
0cccb683 1867
0bfdf32f 1868 current_thread = saved_thread;
fa593d66
PA
1869 return 0;
1870}
1871
1872/* Enqueue one signal in the "signals to report later when out of the
1873 jump pad" list. */
1874
1875static void
1876enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1877{
1878 struct pending_signals *p_sig;
d86d4aaf 1879 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1880
1881 if (debug_threads)
87ce2a04 1882 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 1883 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1884
1885 if (debug_threads)
1886 {
1887 struct pending_signals *sig;
1888
1889 for (sig = lwp->pending_signals_to_report;
1890 sig != NULL;
1891 sig = sig->prev)
87ce2a04
DE
1892 debug_printf (" Already queued %d\n",
1893 sig->signal);
fa593d66 1894
87ce2a04 1895 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
1896 }
1897
1a981360
PA
1898 /* Don't enqueue non-RT signals if they are already in the deferred
1899 queue. (SIGSTOP being the easiest signal to see ending up here
1900 twice) */
1901 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1902 {
1903 struct pending_signals *sig;
1904
1905 for (sig = lwp->pending_signals_to_report;
1906 sig != NULL;
1907 sig = sig->prev)
1908 {
1909 if (sig->signal == WSTOPSIG (*wstat))
1910 {
1911 if (debug_threads)
87ce2a04
DE
1912 debug_printf ("Not requeuing already queued non-RT signal %d"
1913 " for LWP %ld\n",
1914 sig->signal,
d86d4aaf 1915 lwpid_of (thread));
1a981360
PA
1916 return;
1917 }
1918 }
1919 }
1920
fa593d66
PA
1921 p_sig = xmalloc (sizeof (*p_sig));
1922 p_sig->prev = lwp->pending_signals_to_report;
1923 p_sig->signal = WSTOPSIG (*wstat);
1924 memset (&p_sig->info, 0, sizeof (siginfo_t));
d86d4aaf 1925 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1926 &p_sig->info);
fa593d66
PA
1927
1928 lwp->pending_signals_to_report = p_sig;
1929}
1930
1931/* Dequeue one signal from the "signals to report later when out of
1932 the jump pad" list. */
1933
1934static int
1935dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1936{
d86d4aaf
DE
1937 struct thread_info *thread = get_lwp_thread (lwp);
1938
fa593d66
PA
1939 if (lwp->pending_signals_to_report != NULL)
1940 {
1941 struct pending_signals **p_sig;
1942
1943 p_sig = &lwp->pending_signals_to_report;
1944 while ((*p_sig)->prev != NULL)
1945 p_sig = &(*p_sig)->prev;
1946
1947 *wstat = W_STOPCODE ((*p_sig)->signal);
1948 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 1949 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1950 &(*p_sig)->info);
fa593d66
PA
1951 free (*p_sig);
1952 *p_sig = NULL;
1953
1954 if (debug_threads)
87ce2a04 1955 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 1956 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1957
1958 if (debug_threads)
1959 {
1960 struct pending_signals *sig;
1961
1962 for (sig = lwp->pending_signals_to_report;
1963 sig != NULL;
1964 sig = sig->prev)
87ce2a04
DE
1965 debug_printf (" Still queued %d\n",
1966 sig->signal);
fa593d66 1967
87ce2a04 1968 debug_printf (" (no more queued signals)\n");
fa593d66
PA
1969 }
1970
1971 return 1;
1972 }
1973
1974 return 0;
1975}
1976
582511be
PA
1977/* Fetch the possibly triggered data watchpoint info and store it in
1978 CHILD.
d50171e4 1979
582511be
PA
1980 On some archs, like x86, that use debug registers to set
1981 watchpoints, it's possible that the way to know which watched
1982 address trapped, is to check the register that is used to select
1983 which address to watch. Problem is, between setting the watchpoint
1984 and reading back which data address trapped, the user may change
1985 the set of watchpoints, and, as a consequence, GDB changes the
1986 debug registers in the inferior. To avoid reading back a stale
1987 stopped-data-address when that happens, we cache in LP the fact
1988 that a watchpoint trapped, and the corresponding data address, as
1989 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1990 registers meanwhile, we have the cached data we can rely on. */
d50171e4 1991
582511be
PA
1992static int
1993check_stopped_by_watchpoint (struct lwp_info *child)
1994{
1995 if (the_low_target.stopped_by_watchpoint != NULL)
d50171e4 1996 {
582511be 1997 struct thread_info *saved_thread;
d50171e4 1998
582511be
PA
1999 saved_thread = current_thread;
2000 current_thread = get_lwp_thread (child);
2001
2002 if (the_low_target.stopped_by_watchpoint ())
d50171e4 2003 {
15c66dd6 2004 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
582511be
PA
2005
2006 if (the_low_target.stopped_data_address != NULL)
2007 child->stopped_data_address
2008 = the_low_target.stopped_data_address ();
2009 else
2010 child->stopped_data_address = 0;
d50171e4
PA
2011 }
2012
0bfdf32f 2013 current_thread = saved_thread;
d50171e4
PA
2014 }
2015
15c66dd6 2016 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
c4d9ceb6
YQ
2017}
2018
de0d863e
DB
2019/* Return the ptrace options that we want to try to enable. */
2020
2021static int
2022linux_low_ptrace_options (int attached)
2023{
2024 int options = 0;
2025
2026 if (!attached)
2027 options |= PTRACE_O_EXITKILL;
2028
2029 if (report_fork_events)
2030 options |= PTRACE_O_TRACEFORK;
2031
c269dbdb
DB
2032 if (report_vfork_events)
2033 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2034
de0d863e
DB
2035 return options;
2036}
2037
fa96cb38
PA
2038/* Do low-level handling of the event, and check if we should go on
2039 and pass it to caller code. Return the affected lwp if we are, or
2040 NULL otherwise. */
2041
2042static struct lwp_info *
582511be 2043linux_low_filter_event (int lwpid, int wstat)
fa96cb38
PA
2044{
2045 struct lwp_info *child;
2046 struct thread_info *thread;
582511be 2047 int have_stop_pc = 0;
fa96cb38
PA
2048
2049 child = find_lwp_pid (pid_to_ptid (lwpid));
2050
2051 /* If we didn't find a process, one of two things presumably happened:
2052 - A process we started and then detached from has exited. Ignore it.
2053 - A process we are controlling has forked and the new child's stop
2054 was reported to us by the kernel. Save its PID. */
2055 if (child == NULL && WIFSTOPPED (wstat))
2056 {
2057 add_to_pid_list (&stopped_pids, lwpid, wstat);
2058 return NULL;
2059 }
2060 else if (child == NULL)
2061 return NULL;
2062
2063 thread = get_lwp_thread (child);
2064
2065 child->stopped = 1;
2066
2067 child->last_status = wstat;
2068
582511be
PA
2069 /* Check if the thread has exited. */
2070 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2071 {
2072 if (debug_threads)
2073 debug_printf ("LLFE: %d exited.\n", lwpid);
2074 if (num_lwps (pid_of (thread)) > 1)
2075 {
2076
2077 /* If there is at least one more LWP, then the exit signal was
2078 not the end of the debugged application and should be
2079 ignored. */
2080 delete_lwp (child);
2081 return NULL;
2082 }
2083 else
2084 {
2085 /* This was the last lwp in the process. Since events are
2086 serialized to GDB core, and we can't report this one
2087 right now, but GDB core and the other target layers will
2088 want to be notified about the exit code/signal, leave the
2089 status pending for the next time we're able to report
2090 it. */
2091 mark_lwp_dead (child, wstat);
2092 return child;
2093 }
2094 }
2095
2096 gdb_assert (WIFSTOPPED (wstat));
2097
fa96cb38
PA
2098 if (WIFSTOPPED (wstat))
2099 {
2100 struct process_info *proc;
2101
2102 /* Architecture-specific setup after inferior is running. This
2103 needs to happen after we have attached to the inferior and it
2104 is stopped for the first time, but before we access any
2105 inferior registers. */
2106 proc = find_process_pid (pid_of (thread));
fe978cb0 2107 if (proc->priv->new_inferior)
fa96cb38 2108 {
0bfdf32f 2109 struct thread_info *saved_thread;
fa96cb38 2110
0bfdf32f
GB
2111 saved_thread = current_thread;
2112 current_thread = thread;
fa96cb38
PA
2113
2114 the_low_target.arch_setup ();
2115
0bfdf32f 2116 current_thread = saved_thread;
fa96cb38 2117
fe978cb0 2118 proc->priv->new_inferior = 0;
fa96cb38
PA
2119 }
2120 }
2121
fa96cb38
PA
2122 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2123 {
beed38b8 2124 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2125 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2126
de0d863e 2127 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2128 child->must_set_ptrace_flags = 0;
2129 }
2130
582511be
PA
2131 /* Be careful to not overwrite stop_pc until
2132 check_stopped_by_breakpoint is called. */
fa96cb38 2133 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2134 && linux_is_extended_waitstatus (wstat))
fa96cb38 2135 {
582511be 2136 child->stop_pc = get_pc (child);
de0d863e
DB
2137 if (handle_extended_wait (child, wstat))
2138 {
2139 /* The event has been handled, so just return without
2140 reporting it. */
2141 return NULL;
2142 }
fa96cb38
PA
2143 }
2144
3e572f71
PA
2145 /* Check first whether this was a SW/HW breakpoint before checking
2146 watchpoints, because at least s390 can't tell the data address of
2147 hardware watchpoint hits, and returns stopped-by-watchpoint as
2148 long as there's a watchpoint set. */
2149 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
582511be
PA
2150 {
2151 if (check_stopped_by_breakpoint (child))
2152 have_stop_pc = 1;
2153 }
2154
3e572f71
PA
2155 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2156 or hardware watchpoint. Check which is which if we got
2157 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2158 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2159 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2160 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2161 check_stopped_by_watchpoint (child);
2162
582511be
PA
2163 if (!have_stop_pc)
2164 child->stop_pc = get_pc (child);
2165
fa96cb38
PA
2166 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2167 && child->stop_expected)
2168 {
2169 if (debug_threads)
2170 debug_printf ("Expected stop.\n");
2171 child->stop_expected = 0;
2172
2173 if (thread->last_resume_kind == resume_stop)
2174 {
2175 /* We want to report the stop to the core. Treat the
2176 SIGSTOP as a normal event. */
2bf6fb9d
PA
2177 if (debug_threads)
2178 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2179 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2180 }
2181 else if (stopping_threads != NOT_STOPPING_THREADS)
2182 {
2183 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2184 pending. */
2bf6fb9d
PA
2185 if (debug_threads)
2186 debug_printf ("LLW: SIGSTOP caught for %s "
2187 "while stopping threads.\n",
2188 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2189 return NULL;
2190 }
2191 else
2192 {
2bf6fb9d
PA
2193 /* This is a delayed SIGSTOP. Filter out the event. */
2194 if (debug_threads)
2195 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2196 child->stepping ? "step" : "continue",
2197 target_pid_to_str (ptid_of (thread)));
2198
fa96cb38
PA
2199 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2200 return NULL;
2201 }
2202 }
2203
582511be
PA
2204 child->status_pending_p = 1;
2205 child->status_pending = wstat;
fa96cb38
PA
2206 return child;
2207}
2208
20ba1ce6
PA
2209/* Resume LWPs that are currently stopped without any pending status
2210 to report, but are resumed from the core's perspective. */
2211
2212static void
2213resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2214{
2215 struct thread_info *thread = (struct thread_info *) entry;
2216 struct lwp_info *lp = get_thread_lwp (thread);
2217
2218 if (lp->stopped
2219 && !lp->status_pending_p
2220 && thread->last_resume_kind != resume_stop
2221 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2222 {
2223 int step = thread->last_resume_kind == resume_step;
2224
2225 if (debug_threads)
2226 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2227 target_pid_to_str (ptid_of (thread)),
2228 paddress (lp->stop_pc),
2229 step);
2230
2231 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2232 }
2233}
2234
fa96cb38
PA
2235/* Wait for an event from child(ren) WAIT_PTID, and return any that
2236 match FILTER_PTID (leaving others pending). The PTIDs can be:
2237 minus_one_ptid, to specify any child; a pid PTID, specifying all
2238 lwps of a thread group; or a PTID representing a single lwp. Store
2239 the stop status through the status pointer WSTAT. OPTIONS is
2240 passed to the waitpid call. Return 0 if no event was found and
2241 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2242 was found. Return the PID of the stopped child otherwise. */
bd99dc85 2243
0d62e5e8 2244static int
fa96cb38
PA
2245linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2246 int *wstatp, int options)
0d62e5e8 2247{
d86d4aaf 2248 struct thread_info *event_thread;
d50171e4 2249 struct lwp_info *event_child, *requested_child;
fa96cb38 2250 sigset_t block_mask, prev_mask;
d50171e4 2251
fa96cb38 2252 retry:
d86d4aaf
DE
2253 /* N.B. event_thread points to the thread_info struct that contains
2254 event_child. Keep them in sync. */
2255 event_thread = NULL;
d50171e4
PA
2256 event_child = NULL;
2257 requested_child = NULL;
0d62e5e8 2258
95954743 2259 /* Check for a lwp with a pending status. */
bd99dc85 2260
fa96cb38 2261 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
0d62e5e8 2262 {
d86d4aaf 2263 event_thread = (struct thread_info *)
fa96cb38 2264 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
d86d4aaf
DE
2265 if (event_thread != NULL)
2266 event_child = get_thread_lwp (event_thread);
2267 if (debug_threads && event_thread)
2268 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2269 }
fa96cb38 2270 else if (!ptid_equal (filter_ptid, null_ptid))
0d62e5e8 2271 {
fa96cb38 2272 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2273
bde24c0a 2274 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66
PA
2275 && requested_child->status_pending_p
2276 && requested_child->collecting_fast_tracepoint)
2277 {
2278 enqueue_one_deferred_signal (requested_child,
2279 &requested_child->status_pending);
2280 requested_child->status_pending_p = 0;
2281 requested_child->status_pending = 0;
2282 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2283 }
2284
2285 if (requested_child->suspended
2286 && requested_child->status_pending_p)
38e08fca
GB
2287 {
2288 internal_error (__FILE__, __LINE__,
2289 "requesting an event out of a"
2290 " suspended child?");
2291 }
fa593d66 2292
d50171e4 2293 if (requested_child->status_pending_p)
d86d4aaf
DE
2294 {
2295 event_child = requested_child;
2296 event_thread = get_lwp_thread (event_child);
2297 }
0d62e5e8 2298 }
611cb4a5 2299
0d62e5e8
DJ
2300 if (event_child != NULL)
2301 {
bd99dc85 2302 if (debug_threads)
87ce2a04 2303 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2304 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2305 *wstatp = event_child->status_pending;
bd99dc85
PA
2306 event_child->status_pending_p = 0;
2307 event_child->status_pending = 0;
0bfdf32f 2308 current_thread = event_thread;
d86d4aaf 2309 return lwpid_of (event_thread);
0d62e5e8
DJ
2310 }
2311
fa96cb38
PA
2312 /* But if we don't find a pending event, we'll have to wait.
2313
2314 We only enter this loop if no process has a pending wait status.
2315 Thus any action taken in response to a wait status inside this
2316 loop is responding as soon as we detect the status, not after any
2317 pending events. */
d8301ad1 2318
fa96cb38
PA
2319 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2320 all signals while here. */
2321 sigfillset (&block_mask);
2322 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2323
582511be
PA
2324 /* Always pull all events out of the kernel. We'll randomly select
2325 an event LWP out of all that have events, to prevent
2326 starvation. */
fa96cb38 2327 while (event_child == NULL)
0d62e5e8 2328 {
fa96cb38 2329 pid_t ret = 0;
0d62e5e8 2330
fa96cb38
PA
2331 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2332 quirks:
0d62e5e8 2333
fa96cb38
PA
2334 - If the thread group leader exits while other threads in the
2335 thread group still exist, waitpid(TGID, ...) hangs. That
2336 waitpid won't return an exit status until the other threads
2337 in the group are reaped.
611cb4a5 2338
fa96cb38
PA
2339 - When a non-leader thread execs, that thread just vanishes
2340 without reporting an exit (so we'd hang if we waited for it
2341 explicitly in that case). The exec event is reported to
2342 the TGID pid (although we don't currently enable exec
2343 events). */
2344 errno = 0;
2345 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2346
fa96cb38
PA
2347 if (debug_threads)
2348 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2349 ret, errno ? strerror (errno) : "ERRNO-OK");
0d62e5e8 2350
fa96cb38 2351 if (ret > 0)
0d62e5e8 2352 {
89be2091 2353 if (debug_threads)
bd99dc85 2354 {
fa96cb38
PA
2355 debug_printf ("LLW: waitpid %ld received %s\n",
2356 (long) ret, status_to_str (*wstatp));
bd99dc85 2357 }
89be2091 2358
582511be
PA
2359 /* Filter all events. IOW, leave all events pending. We'll
2360 randomly select an event LWP out of all that have events
2361 below. */
2362 linux_low_filter_event (ret, *wstatp);
fa96cb38
PA
2363 /* Retry until nothing comes out of waitpid. A single
2364 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2365 continue;
2366 }
2367
20ba1ce6
PA
2368 /* Now that we've pulled all events out of the kernel, resume
2369 LWPs that don't have an interesting event to report. */
2370 if (stopping_threads == NOT_STOPPING_THREADS)
2371 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2372
2373 /* ... and find an LWP with a status to report to the core, if
2374 any. */
582511be
PA
2375 event_thread = (struct thread_info *)
2376 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2377 if (event_thread != NULL)
2378 {
2379 event_child = get_thread_lwp (event_thread);
2380 *wstatp = event_child->status_pending;
2381 event_child->status_pending_p = 0;
2382 event_child->status_pending = 0;
2383 break;
2384 }
2385
fa96cb38
PA
2386 /* Check for zombie thread group leaders. Those can't be reaped
2387 until all other threads in the thread group are. */
2388 check_zombie_leaders ();
2389
2390 /* If there are no resumed children left in the set of LWPs we
2391 want to wait for, bail. We can't just block in
2392 waitpid/sigsuspend, because lwps might have been left stopped
2393 in trace-stop state, and we'd be stuck forever waiting for
2394 their status to change (which would only happen if we resumed
2395 them). Even if WNOHANG is set, this return code is preferred
2396 over 0 (below), as it is more detailed. */
2397 if ((find_inferior (&all_threads,
2398 not_stopped_callback,
2399 &wait_ptid) == NULL))
a6dbe5df 2400 {
fa96cb38
PA
2401 if (debug_threads)
2402 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2403 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2404 return -1;
a6dbe5df
PA
2405 }
2406
fa96cb38
PA
2407 /* No interesting event to report to the caller. */
2408 if ((options & WNOHANG))
24a09b5f 2409 {
fa96cb38
PA
2410 if (debug_threads)
2411 debug_printf ("WNOHANG set, no event found\n");
2412
2413 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2414 return 0;
24a09b5f
DJ
2415 }
2416
fa96cb38
PA
2417 /* Block until we get an event reported with SIGCHLD. */
2418 if (debug_threads)
2419 debug_printf ("sigsuspend'ing\n");
d50171e4 2420
fa96cb38
PA
2421 sigsuspend (&prev_mask);
2422 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2423 goto retry;
2424 }
d50171e4 2425
fa96cb38 2426 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2427
0bfdf32f 2428 current_thread = event_thread;
d50171e4 2429
fa96cb38
PA
2430 /* Check for thread exit. */
2431 if (! WIFSTOPPED (*wstatp))
2432 {
2433 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2434
2435 if (debug_threads)
2436 debug_printf ("LWP %d is the last lwp of process. "
2437 "Process %ld exiting.\n",
2438 pid_of (event_thread), lwpid_of (event_thread));
d86d4aaf 2439 return lwpid_of (event_thread);
611cb4a5 2440 }
0d62e5e8 2441
fa96cb38
PA
2442 return lwpid_of (event_thread);
2443}
2444
2445/* Wait for an event from child(ren) PTID. PTIDs can be:
2446 minus_one_ptid, to specify any child; a pid PTID, specifying all
2447 lwps of a thread group; or a PTID representing a single lwp. Store
2448 the stop status through the status pointer WSTAT. OPTIONS is
2449 passed to the waitpid call. Return 0 if no event was found and
2450 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2451 was found. Return the PID of the stopped child otherwise. */
2452
2453static int
2454linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2455{
2456 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2457}
2458
6bf5e0ba
PA
2459/* Count the LWP's that have had events. */
2460
2461static int
2462count_events_callback (struct inferior_list_entry *entry, void *data)
2463{
d86d4aaf 2464 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2465 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2466 int *count = data;
2467
2468 gdb_assert (count != NULL);
2469
582511be 2470 /* Count only resumed LWPs that have an event pending. */
8336d594 2471 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2472 && lp->status_pending_p)
6bf5e0ba
PA
2473 (*count)++;
2474
2475 return 0;
2476}
2477
2478/* Select the LWP (if any) that is currently being single-stepped. */
2479
2480static int
2481select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2482{
d86d4aaf
DE
2483 struct thread_info *thread = (struct thread_info *) entry;
2484 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba 2485
8336d594
PA
2486 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2487 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
2488 && lp->status_pending_p)
2489 return 1;
2490 else
2491 return 0;
2492}
2493
b90fc188 2494/* Select the Nth LWP that has had an event. */
6bf5e0ba
PA
2495
2496static int
2497select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2498{
d86d4aaf 2499 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2500 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2501 int *selector = data;
2502
2503 gdb_assert (selector != NULL);
2504
582511be 2505 /* Select only resumed LWPs that have an event pending. */
91baf43f 2506 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2507 && lp->status_pending_p)
6bf5e0ba
PA
2508 if ((*selector)-- == 0)
2509 return 1;
2510
2511 return 0;
2512}
2513
6bf5e0ba
PA
2514/* Select one LWP out of those that have events pending. */
2515
2516static void
2517select_event_lwp (struct lwp_info **orig_lp)
2518{
2519 int num_events = 0;
2520 int random_selector;
582511be
PA
2521 struct thread_info *event_thread = NULL;
2522
2523 /* In all-stop, give preference to the LWP that is being
2524 single-stepped. There will be at most one, and it's the LWP that
2525 the core is most interested in. If we didn't do this, then we'd
2526 have to handle pending step SIGTRAPs somehow in case the core
2527 later continues the previously-stepped thread, otherwise we'd
2528 report the pending SIGTRAP, and the core, not having stepped the
2529 thread, wouldn't understand what the trap was for, and therefore
2530 would report it to the user as a random signal. */
2531 if (!non_stop)
6bf5e0ba 2532 {
582511be
PA
2533 event_thread
2534 = (struct thread_info *) find_inferior (&all_threads,
2535 select_singlestep_lwp_callback,
2536 NULL);
2537 if (event_thread != NULL)
2538 {
2539 if (debug_threads)
2540 debug_printf ("SEL: Select single-step %s\n",
2541 target_pid_to_str (ptid_of (event_thread)));
2542 }
6bf5e0ba 2543 }
582511be 2544 if (event_thread == NULL)
6bf5e0ba
PA
2545 {
2546 /* No single-stepping LWP. Select one at random, out of those
b90fc188 2547 which have had events. */
6bf5e0ba 2548
b90fc188 2549 /* First see how many events we have. */
d86d4aaf 2550 find_inferior (&all_threads, count_events_callback, &num_events);
8bf3b159 2551 gdb_assert (num_events > 0);
6bf5e0ba 2552
b90fc188
PA
2553 /* Now randomly pick a LWP out of those that have had
2554 events. */
6bf5e0ba
PA
2555 random_selector = (int)
2556 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2557
2558 if (debug_threads && num_events > 1)
87ce2a04
DE
2559 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2560 num_events, random_selector);
6bf5e0ba 2561
d86d4aaf
DE
2562 event_thread
2563 = (struct thread_info *) find_inferior (&all_threads,
2564 select_event_lwp_callback,
2565 &random_selector);
6bf5e0ba
PA
2566 }
2567
d86d4aaf 2568 if (event_thread != NULL)
6bf5e0ba 2569 {
d86d4aaf
DE
2570 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2571
6bf5e0ba
PA
2572 /* Switch the event LWP. */
2573 *orig_lp = event_lp;
2574 }
2575}
2576
7984d532
PA
2577/* Decrement the suspend count of an LWP. */
2578
2579static int
2580unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2581{
d86d4aaf
DE
2582 struct thread_info *thread = (struct thread_info *) entry;
2583 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
2584
2585 /* Ignore EXCEPT. */
2586 if (lwp == except)
2587 return 0;
2588
2589 lwp->suspended--;
2590
2591 gdb_assert (lwp->suspended >= 0);
2592 return 0;
2593}
2594
2595/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2596 NULL. */
2597
2598static void
2599unsuspend_all_lwps (struct lwp_info *except)
2600{
d86d4aaf 2601 find_inferior (&all_threads, unsuspend_one_lwp, except);
7984d532
PA
2602}
2603
fa593d66
PA
2604static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2605static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2606 void *data);
2607static int lwp_running (struct inferior_list_entry *entry, void *data);
2608static ptid_t linux_wait_1 (ptid_t ptid,
2609 struct target_waitstatus *ourstatus,
2610 int target_options);
2611
2612/* Stabilize threads (move out of jump pads).
2613
2614 If a thread is midway collecting a fast tracepoint, we need to
2615 finish the collection and move it out of the jump pad before
2616 reporting the signal.
2617
2618 This avoids recursion while collecting (when a signal arrives
2619 midway, and the signal handler itself collects), which would trash
2620 the trace buffer. In case the user set a breakpoint in a signal
2621 handler, this avoids the backtrace showing the jump pad, etc..
2622 Most importantly, there are certain things we can't do safely if
2623 threads are stopped in a jump pad (or in its callee's). For
2624 example:
2625
2626 - starting a new trace run. A thread still collecting the
2627 previous run, could trash the trace buffer when resumed. The trace
2628 buffer control structures would have been reset but the thread had
2629 no way to tell. The thread could even midway memcpy'ing to the
2630 buffer, which would mean that when resumed, it would clobber the
2631 trace buffer that had been set for a new run.
2632
2633 - we can't rewrite/reuse the jump pads for new tracepoints
2634 safely. Say you do tstart while a thread is stopped midway while
2635 collecting. When the thread is later resumed, it finishes the
2636 collection, and returns to the jump pad, to execute the original
2637 instruction that was under the tracepoint jump at the time the
2638 older run had been started. If the jump pad had been rewritten
2639 since for something else in the new run, the thread would now
2640 execute the wrong / random instructions. */
2641
2642static void
2643linux_stabilize_threads (void)
2644{
0bfdf32f 2645 struct thread_info *saved_thread;
d86d4aaf 2646 struct thread_info *thread_stuck;
fa593d66 2647
d86d4aaf
DE
2648 thread_stuck
2649 = (struct thread_info *) find_inferior (&all_threads,
2650 stuck_in_jump_pad_callback,
2651 NULL);
2652 if (thread_stuck != NULL)
fa593d66 2653 {
b4d51a55 2654 if (debug_threads)
87ce2a04 2655 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2656 lwpid_of (thread_stuck));
fa593d66
PA
2657 return;
2658 }
2659
0bfdf32f 2660 saved_thread = current_thread;
fa593d66
PA
2661
2662 stabilizing_threads = 1;
2663
2664 /* Kick 'em all. */
d86d4aaf 2665 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
fa593d66
PA
2666
2667 /* Loop until all are stopped out of the jump pads. */
d86d4aaf 2668 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
fa593d66
PA
2669 {
2670 struct target_waitstatus ourstatus;
2671 struct lwp_info *lwp;
fa593d66
PA
2672 int wstat;
2673
2674 /* Note that we go through the full wait even loop. While
2675 moving threads out of jump pad, we need to be able to step
2676 over internal breakpoints and such. */
32fcada3 2677 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2678
2679 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2680 {
0bfdf32f 2681 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2682
2683 /* Lock it. */
2684 lwp->suspended++;
2685
a493e3e2 2686 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2687 || current_thread->last_resume_kind == resume_stop)
fa593d66 2688 {
2ea28649 2689 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2690 enqueue_one_deferred_signal (lwp, &wstat);
2691 }
2692 }
2693 }
2694
d86d4aaf 2695 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
fa593d66
PA
2696
2697 stabilizing_threads = 0;
2698
0bfdf32f 2699 current_thread = saved_thread;
fa593d66 2700
b4d51a55 2701 if (debug_threads)
fa593d66 2702 {
d86d4aaf
DE
2703 thread_stuck
2704 = (struct thread_info *) find_inferior (&all_threads,
2705 stuck_in_jump_pad_callback,
2706 NULL);
2707 if (thread_stuck != NULL)
87ce2a04 2708 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2709 lwpid_of (thread_stuck));
fa593d66
PA
2710 }
2711}
2712
582511be
PA
2713static void async_file_mark (void);
2714
2715/* Convenience function that is called when the kernel reports an
2716 event that is not passed out to GDB. */
2717
2718static ptid_t
2719ignore_event (struct target_waitstatus *ourstatus)
2720{
2721 /* If we got an event, there may still be others, as a single
2722 SIGCHLD can indicate more than one child stopped. This forces
2723 another target_wait call. */
2724 async_file_mark ();
2725
2726 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2727 return null_ptid;
2728}
2729
de0d863e
DB
2730/* Return non-zero if WAITSTATUS reflects an extended linux
2731 event. Otherwise, return zero. */
2732
2733static int
2734extended_event_reported (const struct target_waitstatus *waitstatus)
2735{
2736 if (waitstatus == NULL)
2737 return 0;
2738
c269dbdb
DB
2739 return (waitstatus->kind == TARGET_WAITKIND_FORKED
2740 || waitstatus->kind == TARGET_WAITKIND_VFORKED
2741 || waitstatus->kind == TARGET_WAITKIND_VFORK_DONE);
de0d863e
DB
2742}
2743
0d62e5e8 2744/* Wait for process, returns status. */
da6d8c04 2745
95954743
PA
2746static ptid_t
2747linux_wait_1 (ptid_t ptid,
2748 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2749{
e5f1222d 2750 int w;
fc7238bb 2751 struct lwp_info *event_child;
bd99dc85 2752 int options;
bd99dc85 2753 int pid;
6bf5e0ba
PA
2754 int step_over_finished;
2755 int bp_explains_trap;
2756 int maybe_internal_trap;
2757 int report_to_gdb;
219f2f23 2758 int trace_event;
c2d6af84 2759 int in_step_range;
bd99dc85 2760
87ce2a04
DE
2761 if (debug_threads)
2762 {
2763 debug_enter ();
2764 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2765 }
2766
bd99dc85
PA
2767 /* Translate generic target options into linux options. */
2768 options = __WALL;
2769 if (target_options & TARGET_WNOHANG)
2770 options |= WNOHANG;
0d62e5e8 2771
fa593d66
PA
2772 bp_explains_trap = 0;
2773 trace_event = 0;
c2d6af84 2774 in_step_range = 0;
bd99dc85
PA
2775 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2776
6bf5e0ba
PA
2777 if (ptid_equal (step_over_bkpt, null_ptid))
2778 pid = linux_wait_for_event (ptid, &w, options);
2779 else
2780 {
2781 if (debug_threads)
87ce2a04
DE
2782 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2783 target_pid_to_str (step_over_bkpt));
6bf5e0ba
PA
2784 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2785 }
2786
fa96cb38 2787 if (pid == 0)
87ce2a04 2788 {
fa96cb38
PA
2789 gdb_assert (target_options & TARGET_WNOHANG);
2790
87ce2a04
DE
2791 if (debug_threads)
2792 {
fa96cb38
PA
2793 debug_printf ("linux_wait_1 ret = null_ptid, "
2794 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
2795 debug_exit ();
2796 }
fa96cb38
PA
2797
2798 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
2799 return null_ptid;
2800 }
fa96cb38
PA
2801 else if (pid == -1)
2802 {
2803 if (debug_threads)
2804 {
2805 debug_printf ("linux_wait_1 ret = null_ptid, "
2806 "TARGET_WAITKIND_NO_RESUMED\n");
2807 debug_exit ();
2808 }
bd99dc85 2809
fa96cb38
PA
2810 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2811 return null_ptid;
2812 }
0d62e5e8 2813
0bfdf32f 2814 event_child = get_thread_lwp (current_thread);
0d62e5e8 2815
fa96cb38
PA
2816 /* linux_wait_for_event only returns an exit status for the last
2817 child of a process. Report it. */
2818 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 2819 {
fa96cb38 2820 if (WIFEXITED (w))
0d62e5e8 2821 {
fa96cb38
PA
2822 ourstatus->kind = TARGET_WAITKIND_EXITED;
2823 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 2824
fa96cb38 2825 if (debug_threads)
bd99dc85 2826 {
fa96cb38
PA
2827 debug_printf ("linux_wait_1 ret = %s, exited with "
2828 "retcode %d\n",
0bfdf32f 2829 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2830 WEXITSTATUS (w));
2831 debug_exit ();
bd99dc85 2832 }
fa96cb38
PA
2833 }
2834 else
2835 {
2836 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2837 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 2838
fa96cb38
PA
2839 if (debug_threads)
2840 {
2841 debug_printf ("linux_wait_1 ret = %s, terminated with "
2842 "signal %d\n",
0bfdf32f 2843 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2844 WTERMSIG (w));
2845 debug_exit ();
2846 }
0d62e5e8 2847 }
fa96cb38 2848
0bfdf32f 2849 return ptid_of (current_thread);
da6d8c04
DJ
2850 }
2851
8090aef2
PA
2852 /* If step-over executes a breakpoint instruction, it means a
2853 gdb/gdbserver breakpoint had been planted on top of a permanent
2854 breakpoint. The PC has been adjusted by
2855 check_stopped_by_breakpoint to point at the breakpoint address.
2856 Advance the PC manually past the breakpoint, otherwise the
2857 program would keep trapping the permanent breakpoint forever. */
2858 if (!ptid_equal (step_over_bkpt, null_ptid)
15c66dd6 2859 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
8090aef2 2860 {
9beb7c4e 2861 unsigned int increment_pc = the_low_target.breakpoint_len;
8090aef2
PA
2862
2863 if (debug_threads)
2864 {
2865 debug_printf ("step-over for %s executed software breakpoint\n",
2866 target_pid_to_str (ptid_of (current_thread)));
2867 }
2868
2869 if (increment_pc != 0)
2870 {
2871 struct regcache *regcache
2872 = get_thread_regcache (current_thread, 1);
2873
2874 event_child->stop_pc += increment_pc;
2875 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2876
2877 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
15c66dd6 2878 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
2879 }
2880 }
2881
6bf5e0ba
PA
2882 /* If this event was not handled before, and is not a SIGTRAP, we
2883 report it. SIGILL and SIGSEGV are also treated as traps in case
2884 a breakpoint is inserted at the current PC. If this target does
2885 not support internal breakpoints at all, we also report the
2886 SIGTRAP without further processing; it's of no concern to us. */
2887 maybe_internal_trap
2888 = (supports_breakpoints ()
2889 && (WSTOPSIG (w) == SIGTRAP
2890 || ((WSTOPSIG (w) == SIGILL
2891 || WSTOPSIG (w) == SIGSEGV)
2892 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2893
2894 if (maybe_internal_trap)
2895 {
2896 /* Handle anything that requires bookkeeping before deciding to
2897 report the event or continue waiting. */
2898
2899 /* First check if we can explain the SIGTRAP with an internal
2900 breakpoint, or if we should possibly report the event to GDB.
2901 Do this before anything that may remove or insert a
2902 breakpoint. */
2903 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2904
2905 /* We have a SIGTRAP, possibly a step-over dance has just
2906 finished. If so, tweak the state machine accordingly,
2907 reinsert breakpoints and delete any reinsert (software
2908 single-step) breakpoints. */
2909 step_over_finished = finish_step_over (event_child);
2910
2911 /* Now invoke the callbacks of any internal breakpoints there. */
2912 check_breakpoints (event_child->stop_pc);
2913
219f2f23
PA
2914 /* Handle tracepoint data collecting. This may overflow the
2915 trace buffer, and cause a tracing stop, removing
2916 breakpoints. */
2917 trace_event = handle_tracepoints (event_child);
2918
6bf5e0ba
PA
2919 if (bp_explains_trap)
2920 {
2921 /* If we stepped or ran into an internal breakpoint, we've
2922 already handled it. So next time we resume (from this
2923 PC), we should step over it. */
2924 if (debug_threads)
87ce2a04 2925 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 2926
8b07ae33
PA
2927 if (breakpoint_here (event_child->stop_pc))
2928 event_child->need_step_over = 1;
6bf5e0ba
PA
2929 }
2930 }
2931 else
2932 {
2933 /* We have some other signal, possibly a step-over dance was in
2934 progress, and it should be cancelled too. */
2935 step_over_finished = finish_step_over (event_child);
fa593d66
PA
2936 }
2937
2938 /* We have all the data we need. Either report the event to GDB, or
2939 resume threads and keep waiting for more. */
2940
2941 /* If we're collecting a fast tracepoint, finish the collection and
2942 move out of the jump pad before delivering a signal. See
2943 linux_stabilize_threads. */
2944
2945 if (WIFSTOPPED (w)
2946 && WSTOPSIG (w) != SIGTRAP
2947 && supports_fast_tracepoints ()
58b4daa5 2948 && agent_loaded_p ())
fa593d66
PA
2949 {
2950 if (debug_threads)
87ce2a04
DE
2951 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2952 "to defer or adjust it.\n",
0bfdf32f 2953 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
2954
2955 /* Allow debugging the jump pad itself. */
0bfdf32f 2956 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
2957 && maybe_move_out_of_jump_pad (event_child, &w))
2958 {
2959 enqueue_one_deferred_signal (event_child, &w);
2960
2961 if (debug_threads)
87ce2a04 2962 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 2963 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
2964
2965 linux_resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
2966
2967 return ignore_event (ourstatus);
fa593d66
PA
2968 }
2969 }
219f2f23 2970
fa593d66
PA
2971 if (event_child->collecting_fast_tracepoint)
2972 {
2973 if (debug_threads)
87ce2a04
DE
2974 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2975 "Check if we're already there.\n",
0bfdf32f 2976 lwpid_of (current_thread),
87ce2a04 2977 event_child->collecting_fast_tracepoint);
fa593d66
PA
2978
2979 trace_event = 1;
2980
2981 event_child->collecting_fast_tracepoint
2982 = linux_fast_tracepoint_collecting (event_child, NULL);
2983
2984 if (event_child->collecting_fast_tracepoint != 1)
2985 {
2986 /* No longer need this breakpoint. */
2987 if (event_child->exit_jump_pad_bkpt != NULL)
2988 {
2989 if (debug_threads)
87ce2a04
DE
2990 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2991 "stopping all threads momentarily.\n");
fa593d66
PA
2992
2993 /* Other running threads could hit this breakpoint.
2994 We don't handle moribund locations like GDB does,
2995 instead we always pause all threads when removing
2996 breakpoints, so that any step-over or
2997 decr_pc_after_break adjustment is always taken
2998 care of while the breakpoint is still
2999 inserted. */
3000 stop_all_lwps (1, event_child);
fa593d66
PA
3001
3002 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3003 event_child->exit_jump_pad_bkpt = NULL;
3004
3005 unstop_all_lwps (1, event_child);
3006
3007 gdb_assert (event_child->suspended >= 0);
3008 }
3009 }
3010
3011 if (event_child->collecting_fast_tracepoint == 0)
3012 {
3013 if (debug_threads)
87ce2a04
DE
3014 debug_printf ("fast tracepoint finished "
3015 "collecting successfully.\n");
fa593d66
PA
3016
3017 /* We may have a deferred signal to report. */
3018 if (dequeue_one_deferred_signal (event_child, &w))
3019 {
3020 if (debug_threads)
87ce2a04 3021 debug_printf ("dequeued one signal.\n");
fa593d66 3022 }
3c11dd79 3023 else
fa593d66 3024 {
3c11dd79 3025 if (debug_threads)
87ce2a04 3026 debug_printf ("no deferred signals.\n");
fa593d66
PA
3027
3028 if (stabilizing_threads)
3029 {
3030 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 3031 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
3032
3033 if (debug_threads)
3034 {
3035 debug_printf ("linux_wait_1 ret = %s, stopped "
3036 "while stabilizing threads\n",
0bfdf32f 3037 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
3038 debug_exit ();
3039 }
3040
0bfdf32f 3041 return ptid_of (current_thread);
fa593d66
PA
3042 }
3043 }
3044 }
6bf5e0ba
PA
3045 }
3046
e471f25b
PA
3047 /* Check whether GDB would be interested in this event. */
3048
3049 /* If GDB is not interested in this signal, don't stop other
3050 threads, and don't report it to GDB. Just resume the inferior
3051 right away. We do this for threading-related signals as well as
3052 any that GDB specifically requested we ignore. But never ignore
3053 SIGSTOP if we sent it ourselves, and do not ignore signals when
3054 stepping - they may require special handling to skip the signal
c9587f88
AT
3055 handler. Also never ignore signals that could be caused by a
3056 breakpoint. */
e471f25b
PA
3057 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3058 thread library? */
3059 if (WIFSTOPPED (w)
0bfdf32f 3060 && current_thread->last_resume_kind != resume_step
e471f25b 3061 && (
1a981360 3062#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3063 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3064 && (WSTOPSIG (w) == __SIGRTMIN
3065 || WSTOPSIG (w) == __SIGRTMIN + 1))
3066 ||
3067#endif
2ea28649 3068 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3069 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3070 && current_thread->last_resume_kind == resume_stop)
3071 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3072 {
3073 siginfo_t info, *info_p;
3074
3075 if (debug_threads)
87ce2a04 3076 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 3077 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3078
0bfdf32f 3079 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3080 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3081 info_p = &info;
3082 else
3083 info_p = NULL;
3084 linux_resume_one_lwp (event_child, event_child->stepping,
3085 WSTOPSIG (w), info_p);
582511be 3086 return ignore_event (ourstatus);
e471f25b
PA
3087 }
3088
c2d6af84
PA
3089 /* Note that all addresses are always "out of the step range" when
3090 there's no range to begin with. */
3091 in_step_range = lwp_in_step_range (event_child);
3092
3093 /* If GDB wanted this thread to single step, and the thread is out
3094 of the step range, we always want to report the SIGTRAP, and let
3095 GDB handle it. Watchpoints should always be reported. So should
3096 signals we can't explain. A SIGTRAP we can't explain could be a
3097 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3098 do, we're be able to handle GDB breakpoints on top of internal
3099 breakpoints, by handling the internal breakpoint and still
3100 reporting the event to GDB. If we don't, we're out of luck, GDB
3101 won't see the breakpoint hit. */
6bf5e0ba 3102 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3103 || (current_thread->last_resume_kind == resume_step
c2d6af84 3104 && !in_step_range)
15c66dd6 3105 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
c2d6af84 3106 || (!step_over_finished && !in_step_range
493e2a69 3107 && !bp_explains_trap && !trace_event)
9f3a5c85 3108 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3109 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e
DB
3110 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3111 || extended_event_reported (&event_child->waitstatus));
d3ce09f5
SS
3112
3113 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3114
3115 /* We found no reason GDB would want us to stop. We either hit one
3116 of our own breakpoints, or finished an internal step GDB
3117 shouldn't know about. */
3118 if (!report_to_gdb)
3119 {
3120 if (debug_threads)
3121 {
3122 if (bp_explains_trap)
87ce2a04 3123 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3124 if (step_over_finished)
87ce2a04 3125 debug_printf ("Step-over finished.\n");
219f2f23 3126 if (trace_event)
87ce2a04 3127 debug_printf ("Tracepoint event.\n");
c2d6af84 3128 if (lwp_in_step_range (event_child))
87ce2a04
DE
3129 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3130 paddress (event_child->stop_pc),
3131 paddress (event_child->step_range_start),
3132 paddress (event_child->step_range_end));
de0d863e
DB
3133 if (extended_event_reported (&event_child->waitstatus))
3134 {
3135 char *str = target_waitstatus_to_string (ourstatus);
3136 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3137 lwpid_of (get_lwp_thread (event_child)), str);
3138 xfree (str);
3139 }
6bf5e0ba
PA
3140 }
3141
3142 /* We're not reporting this breakpoint to GDB, so apply the
3143 decr_pc_after_break adjustment to the inferior's regcache
3144 ourselves. */
3145
3146 if (the_low_target.set_pc != NULL)
3147 {
3148 struct regcache *regcache
0bfdf32f 3149 = get_thread_regcache (current_thread, 1);
6bf5e0ba
PA
3150 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3151 }
3152
7984d532
PA
3153 /* We may have finished stepping over a breakpoint. If so,
3154 we've stopped and suspended all LWPs momentarily except the
3155 stepping one. This is where we resume them all again. We're
3156 going to keep waiting, so use proceed, which handles stepping
3157 over the next breakpoint. */
6bf5e0ba 3158 if (debug_threads)
87ce2a04 3159 debug_printf ("proceeding all threads.\n");
7984d532
PA
3160
3161 if (step_over_finished)
3162 unsuspend_all_lwps (event_child);
3163
6bf5e0ba 3164 proceed_all_lwps ();
582511be 3165 return ignore_event (ourstatus);
6bf5e0ba
PA
3166 }
3167
3168 if (debug_threads)
3169 {
0bfdf32f 3170 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3171 {
3172 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3173 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3174 else if (!lwp_in_step_range (event_child))
87ce2a04 3175 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3176 }
15c66dd6 3177 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3178 debug_printf ("Stopped by watchpoint.\n");
582511be 3179 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3180 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3181 if (debug_threads)
87ce2a04 3182 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3183 }
3184
3185 /* Alright, we're going to report a stop. */
3186
582511be 3187 if (!stabilizing_threads)
6bf5e0ba
PA
3188 {
3189 /* In all-stop, stop all threads. */
582511be
PA
3190 if (!non_stop)
3191 stop_all_lwps (0, NULL);
6bf5e0ba
PA
3192
3193 /* If we're not waiting for a specific LWP, choose an event LWP
3194 from among those that have had events. Giving equal priority
3195 to all LWPs that have had events helps prevent
3196 starvation. */
3197 if (ptid_equal (ptid, minus_one_ptid))
3198 {
3199 event_child->status_pending_p = 1;
3200 event_child->status_pending = w;
3201
3202 select_event_lwp (&event_child);
3203
0bfdf32f
GB
3204 /* current_thread and event_child must stay in sync. */
3205 current_thread = get_lwp_thread (event_child);
ee1e2d4f 3206
6bf5e0ba
PA
3207 event_child->status_pending_p = 0;
3208 w = event_child->status_pending;
3209 }
3210
c03e6ccc 3211 if (step_over_finished)
582511be
PA
3212 {
3213 if (!non_stop)
3214 {
3215 /* If we were doing a step-over, all other threads but
3216 the stepping one had been paused in start_step_over,
3217 with their suspend counts incremented. We don't want
3218 to do a full unstop/unpause, because we're in
3219 all-stop mode (so we want threads stopped), but we
3220 still need to unsuspend the other threads, to
3221 decrement their `suspended' count back. */
3222 unsuspend_all_lwps (event_child);
3223 }
3224 else
3225 {
3226 /* If we just finished a step-over, then all threads had
3227 been momentarily paused. In all-stop, that's fine,
3228 we want threads stopped by now anyway. In non-stop,
3229 we need to re-resume threads that GDB wanted to be
3230 running. */
3231 unstop_all_lwps (1, event_child);
3232 }
3233 }
c03e6ccc 3234
fa593d66 3235 /* Stabilize threads (move out of jump pads). */
582511be
PA
3236 if (!non_stop)
3237 stabilize_threads ();
6bf5e0ba
PA
3238 }
3239 else
3240 {
3241 /* If we just finished a step-over, then all threads had been
3242 momentarily paused. In all-stop, that's fine, we want
3243 threads stopped by now anyway. In non-stop, we need to
3244 re-resume threads that GDB wanted to be running. */
3245 if (step_over_finished)
7984d532 3246 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3247 }
3248
de0d863e
DB
3249 if (extended_event_reported (&event_child->waitstatus))
3250 {
3251 /* If the reported event is a fork, vfork or exec, let GDB know. */
3252 ourstatus->kind = event_child->waitstatus.kind;
3253 ourstatus->value = event_child->waitstatus.value;
3254
3255 /* Clear the event lwp's waitstatus since we handled it already. */
3256 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3257 }
3258 else
3259 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3260
582511be 3261 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3262 it was a software breakpoint, and the client doesn't know we can
3263 adjust the breakpoint ourselves. */
3264 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3265 && !swbreak_feature)
582511be
PA
3266 {
3267 int decr_pc = the_low_target.decr_pc_after_break;
3268
3269 if (decr_pc != 0)
3270 {
3271 struct regcache *regcache
3272 = get_thread_regcache (current_thread, 1);
3273 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3274 }
3275 }
3276
0bfdf32f 3277 if (current_thread->last_resume_kind == resume_stop
8336d594 3278 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3279 {
3280 /* A thread that has been requested to stop by GDB with vCont;t,
3281 and it stopped cleanly, so report as SIG0. The use of
3282 SIGSTOP is an implementation detail. */
a493e3e2 3283 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3284 }
0bfdf32f 3285 else if (current_thread->last_resume_kind == resume_stop
8336d594 3286 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3287 {
3288 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3289 but, it stopped for other reasons. */
2ea28649 3290 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85 3291 }
de0d863e 3292 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
bd99dc85 3293 {
2ea28649 3294 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3295 }
3296
d50171e4
PA
3297 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3298
bd99dc85 3299 if (debug_threads)
87ce2a04
DE
3300 {
3301 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
0bfdf32f 3302 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3303 ourstatus->kind, ourstatus->value.sig);
3304 debug_exit ();
3305 }
bd99dc85 3306
0bfdf32f 3307 return ptid_of (current_thread);
bd99dc85
PA
3308}
3309
3310/* Get rid of any pending event in the pipe. */
3311static void
3312async_file_flush (void)
3313{
3314 int ret;
3315 char buf;
3316
3317 do
3318 ret = read (linux_event_pipe[0], &buf, 1);
3319 while (ret >= 0 || (ret == -1 && errno == EINTR));
3320}
3321
3322/* Put something in the pipe, so the event loop wakes up. */
3323static void
3324async_file_mark (void)
3325{
3326 int ret;
3327
3328 async_file_flush ();
3329
3330 do
3331 ret = write (linux_event_pipe[1], "+", 1);
3332 while (ret == 0 || (ret == -1 && errno == EINTR));
3333
3334 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3335 be awakened anyway. */
3336}
3337
95954743
PA
3338static ptid_t
3339linux_wait (ptid_t ptid,
3340 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 3341{
95954743 3342 ptid_t event_ptid;
bd99dc85 3343
bd99dc85
PA
3344 /* Flush the async file first. */
3345 if (target_is_async_p ())
3346 async_file_flush ();
3347
582511be
PA
3348 do
3349 {
3350 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3351 }
3352 while ((target_options & TARGET_WNOHANG) == 0
3353 && ptid_equal (event_ptid, null_ptid)
3354 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3355
3356 /* If at least one stop was reported, there may be more. A single
3357 SIGCHLD can signal more than one child stop. */
3358 if (target_is_async_p ()
3359 && (target_options & TARGET_WNOHANG) != 0
95954743 3360 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
3361 async_file_mark ();
3362
3363 return event_ptid;
da6d8c04
DJ
3364}
3365
c5f62d5f 3366/* Send a signal to an LWP. */
fd500816
DJ
3367
3368static int
a1928bad 3369kill_lwp (unsigned long lwpid, int signo)
fd500816 3370{
c5f62d5f
DE
3371 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3372 fails, then we are not using nptl threads and we should be using kill. */
fd500816 3373
c5f62d5f
DE
3374#ifdef __NR_tkill
3375 {
3376 static int tkill_failed;
fd500816 3377
c5f62d5f
DE
3378 if (!tkill_failed)
3379 {
3380 int ret;
3381
3382 errno = 0;
3383 ret = syscall (__NR_tkill, lwpid, signo);
3384 if (errno != ENOSYS)
3385 return ret;
3386 tkill_failed = 1;
3387 }
3388 }
fd500816
DJ
3389#endif
3390
3391 return kill (lwpid, signo);
3392}
3393
964e4306
PA
3394void
3395linux_stop_lwp (struct lwp_info *lwp)
3396{
3397 send_sigstop (lwp);
3398}
3399
0d62e5e8 3400static void
02fc4de7 3401send_sigstop (struct lwp_info *lwp)
0d62e5e8 3402{
bd99dc85 3403 int pid;
0d62e5e8 3404
d86d4aaf 3405 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3406
0d62e5e8
DJ
3407 /* If we already have a pending stop signal for this process, don't
3408 send another. */
54a0b537 3409 if (lwp->stop_expected)
0d62e5e8 3410 {
ae13219e 3411 if (debug_threads)
87ce2a04 3412 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3413
0d62e5e8
DJ
3414 return;
3415 }
3416
3417 if (debug_threads)
87ce2a04 3418 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3419
d50171e4 3420 lwp->stop_expected = 1;
bd99dc85 3421 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3422}
3423
7984d532
PA
3424static int
3425send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7 3426{
d86d4aaf
DE
3427 struct thread_info *thread = (struct thread_info *) entry;
3428 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3429
7984d532
PA
3430 /* Ignore EXCEPT. */
3431 if (lwp == except)
3432 return 0;
3433
02fc4de7 3434 if (lwp->stopped)
7984d532 3435 return 0;
02fc4de7
PA
3436
3437 send_sigstop (lwp);
7984d532
PA
3438 return 0;
3439}
3440
3441/* Increment the suspend count of an LWP, and stop it, if not stopped
3442 yet. */
3443static int
3444suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3445 void *except)
3446{
d86d4aaf
DE
3447 struct thread_info *thread = (struct thread_info *) entry;
3448 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3449
3450 /* Ignore EXCEPT. */
3451 if (lwp == except)
3452 return 0;
3453
3454 lwp->suspended++;
3455
3456 return send_sigstop_callback (entry, except);
02fc4de7
PA
3457}
3458
95954743
PA
3459static void
3460mark_lwp_dead (struct lwp_info *lwp, int wstat)
3461{
3462 /* It's dead, really. */
3463 lwp->dead = 1;
3464
3465 /* Store the exit status for later. */
3466 lwp->status_pending_p = 1;
3467 lwp->status_pending = wstat;
3468
95954743
PA
3469 /* Prevent trying to stop it. */
3470 lwp->stopped = 1;
3471
3472 /* No further stops are expected from a dead lwp. */
3473 lwp->stop_expected = 0;
3474}
3475
fa96cb38
PA
3476/* Wait for all children to stop for the SIGSTOPs we just queued. */
3477
0d62e5e8 3478static void
fa96cb38 3479wait_for_sigstop (void)
0d62e5e8 3480{
0bfdf32f 3481 struct thread_info *saved_thread;
95954743 3482 ptid_t saved_tid;
fa96cb38
PA
3483 int wstat;
3484 int ret;
0d62e5e8 3485
0bfdf32f
GB
3486 saved_thread = current_thread;
3487 if (saved_thread != NULL)
3488 saved_tid = saved_thread->entry.id;
bd99dc85 3489 else
95954743 3490 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3491
d50171e4 3492 if (debug_threads)
fa96cb38 3493 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3494
fa96cb38
PA
3495 /* Passing NULL_PTID as filter indicates we want all events to be
3496 left pending. Eventually this returns when there are no
3497 unwaited-for children left. */
3498 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3499 &wstat, __WALL);
3500 gdb_assert (ret == -1);
0d62e5e8 3501
0bfdf32f
GB
3502 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3503 current_thread = saved_thread;
0d62e5e8
DJ
3504 else
3505 {
3506 if (debug_threads)
87ce2a04 3507 debug_printf ("Previously current thread died.\n");
0d62e5e8 3508
bd99dc85
PA
3509 if (non_stop)
3510 {
3511 /* We can't change the current inferior behind GDB's back,
3512 otherwise, a subsequent command may apply to the wrong
3513 process. */
0bfdf32f 3514 current_thread = NULL;
bd99dc85
PA
3515 }
3516 else
3517 {
3518 /* Set a valid thread as current. */
0bfdf32f 3519 set_desired_thread (0);
bd99dc85 3520 }
0d62e5e8
DJ
3521 }
3522}
3523
fa593d66
PA
3524/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3525 move it out, because we need to report the stop event to GDB. For
3526 example, if the user puts a breakpoint in the jump pad, it's
3527 because she wants to debug it. */
3528
3529static int
3530stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3531{
d86d4aaf
DE
3532 struct thread_info *thread = (struct thread_info *) entry;
3533 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3534
3535 gdb_assert (lwp->suspended == 0);
3536 gdb_assert (lwp->stopped);
3537
3538 /* Allow debugging the jump pad, gdb_collect, etc.. */
3539 return (supports_fast_tracepoints ()
58b4daa5 3540 && agent_loaded_p ()
fa593d66 3541 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3542 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3543 || thread->last_resume_kind == resume_step)
3544 && linux_fast_tracepoint_collecting (lwp, NULL));
3545}
3546
3547static void
3548move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3549{
d86d4aaf
DE
3550 struct thread_info *thread = (struct thread_info *) entry;
3551 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3552 int *wstat;
3553
3554 gdb_assert (lwp->suspended == 0);
3555 gdb_assert (lwp->stopped);
3556
3557 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3558
3559 /* Allow debugging the jump pad, gdb_collect, etc. */
3560 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3561 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3562 && thread->last_resume_kind != resume_step
3563 && maybe_move_out_of_jump_pad (lwp, wstat))
3564 {
3565 if (debug_threads)
87ce2a04 3566 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 3567 lwpid_of (thread));
fa593d66
PA
3568
3569 if (wstat)
3570 {
3571 lwp->status_pending_p = 0;
3572 enqueue_one_deferred_signal (lwp, wstat);
3573
3574 if (debug_threads)
87ce2a04
DE
3575 debug_printf ("Signal %d for LWP %ld deferred "
3576 "(in jump pad)\n",
d86d4aaf 3577 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3578 }
3579
3580 linux_resume_one_lwp (lwp, 0, 0, NULL);
3581 }
3582 else
3583 lwp->suspended++;
3584}
3585
3586static int
3587lwp_running (struct inferior_list_entry *entry, void *data)
3588{
d86d4aaf
DE
3589 struct thread_info *thread = (struct thread_info *) entry;
3590 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3591
3592 if (lwp->dead)
3593 return 0;
3594 if (lwp->stopped)
3595 return 0;
3596 return 1;
3597}
3598
7984d532
PA
3599/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3600 If SUSPEND, then also increase the suspend count of every LWP,
3601 except EXCEPT. */
3602
0d62e5e8 3603static void
7984d532 3604stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8 3605{
bde24c0a
PA
3606 /* Should not be called recursively. */
3607 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3608
87ce2a04
DE
3609 if (debug_threads)
3610 {
3611 debug_enter ();
3612 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3613 suspend ? "stop-and-suspend" : "stop",
3614 except != NULL
d86d4aaf 3615 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
3616 : "none");
3617 }
3618
bde24c0a
PA
3619 stopping_threads = (suspend
3620 ? STOPPING_AND_SUSPENDING_THREADS
3621 : STOPPING_THREADS);
7984d532
PA
3622
3623 if (suspend)
d86d4aaf 3624 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
7984d532 3625 else
d86d4aaf 3626 find_inferior (&all_threads, send_sigstop_callback, except);
fa96cb38 3627 wait_for_sigstop ();
bde24c0a 3628 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
3629
3630 if (debug_threads)
3631 {
3632 debug_printf ("stop_all_lwps done, setting stopping_threads "
3633 "back to !stopping\n");
3634 debug_exit ();
3635 }
0d62e5e8
DJ
3636}
3637
23f238d3
PA
3638/* Resume execution of LWP. If STEP is nonzero, single-step it. If
3639 SIGNAL is nonzero, give it that signal. */
da6d8c04 3640
ce3a066d 3641static void
23f238d3
PA
3642linux_resume_one_lwp_throw (struct lwp_info *lwp,
3643 int step, int signal, siginfo_t *info)
da6d8c04 3644{
d86d4aaf 3645 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 3646 struct thread_info *saved_thread;
fa593d66 3647 int fast_tp_collecting;
0d62e5e8 3648
54a0b537 3649 if (lwp->stopped == 0)
0d62e5e8
DJ
3650 return;
3651
fa593d66
PA
3652 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3653
3654 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3655
219f2f23
PA
3656 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3657 user used the "jump" command, or "set $pc = foo"). */
3658 if (lwp->stop_pc != get_pc (lwp))
3659 {
3660 /* Collecting 'while-stepping' actions doesn't make sense
3661 anymore. */
d86d4aaf 3662 release_while_stepping_state_list (thread);
219f2f23
PA
3663 }
3664
0d62e5e8
DJ
3665 /* If we have pending signals or status, and a new signal, enqueue the
3666 signal. Also enqueue the signal if we are waiting to reinsert a
3667 breakpoint; it will be picked up again below. */
3668 if (signal != 0
fa593d66
PA
3669 && (lwp->status_pending_p
3670 || lwp->pending_signals != NULL
3671 || lwp->bp_reinsert != 0
3672 || fast_tp_collecting))
0d62e5e8
DJ
3673 {
3674 struct pending_signals *p_sig;
bca929d3 3675 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 3676 p_sig->prev = lwp->pending_signals;
0d62e5e8 3677 p_sig->signal = signal;
32ca6d61
DJ
3678 if (info == NULL)
3679 memset (&p_sig->info, 0, sizeof (siginfo_t));
3680 else
3681 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 3682 lwp->pending_signals = p_sig;
0d62e5e8
DJ
3683 }
3684
d50171e4
PA
3685 if (lwp->status_pending_p)
3686 {
3687 if (debug_threads)
87ce2a04
DE
3688 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3689 " has pending status\n",
d86d4aaf 3690 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3691 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
3692 return;
3693 }
0d62e5e8 3694
0bfdf32f
GB
3695 saved_thread = current_thread;
3696 current_thread = thread;
0d62e5e8
DJ
3697
3698 if (debug_threads)
87ce2a04 3699 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
d86d4aaf 3700 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3701 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
3702
3703 /* This bit needs some thinking about. If we get a signal that
3704 we must report while a single-step reinsert is still pending,
3705 we often end up resuming the thread. It might be better to
3706 (ew) allow a stack of pending events; then we could be sure that
3707 the reinsert happened right away and not lose any signals.
3708
3709 Making this stack would also shrink the window in which breakpoints are
54a0b537 3710 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
3711 complete correctness, so it won't solve that problem. It may be
3712 worthwhile just to solve this one, however. */
54a0b537 3713 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
3714 {
3715 if (debug_threads)
87ce2a04
DE
3716 debug_printf (" pending reinsert at 0x%s\n",
3717 paddress (lwp->bp_reinsert));
d50171e4 3718
85e00e85 3719 if (can_hardware_single_step ())
d50171e4 3720 {
fa593d66
PA
3721 if (fast_tp_collecting == 0)
3722 {
3723 if (step == 0)
3724 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3725 if (lwp->suspended)
3726 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3727 lwp->suspended);
3728 }
d50171e4
PA
3729
3730 step = 1;
3731 }
0d62e5e8
DJ
3732
3733 /* Postpone any pending signal. It was enqueued above. */
3734 signal = 0;
3735 }
3736
fa593d66
PA
3737 if (fast_tp_collecting == 1)
3738 {
3739 if (debug_threads)
87ce2a04
DE
3740 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3741 " (exit-jump-pad-bkpt)\n",
d86d4aaf 3742 lwpid_of (thread));
fa593d66
PA
3743
3744 /* Postpone any pending signal. It was enqueued above. */
3745 signal = 0;
3746 }
3747 else if (fast_tp_collecting == 2)
3748 {
3749 if (debug_threads)
87ce2a04
DE
3750 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3751 " single-stepping\n",
d86d4aaf 3752 lwpid_of (thread));
fa593d66
PA
3753
3754 if (can_hardware_single_step ())
3755 step = 1;
3756 else
38e08fca
GB
3757 {
3758 internal_error (__FILE__, __LINE__,
3759 "moving out of jump pad single-stepping"
3760 " not implemented on this target");
3761 }
fa593d66
PA
3762
3763 /* Postpone any pending signal. It was enqueued above. */
3764 signal = 0;
3765 }
3766
219f2f23
PA
3767 /* If we have while-stepping actions in this thread set it stepping.
3768 If we have a signal to deliver, it may or may not be set to
3769 SIG_IGN, we don't know. Assume so, and allow collecting
3770 while-stepping into a signal handler. A possible smart thing to
3771 do would be to set an internal breakpoint at the signal return
3772 address, continue, and carry on catching this while-stepping
3773 action only when that breakpoint is hit. A future
3774 enhancement. */
d86d4aaf 3775 if (thread->while_stepping != NULL
219f2f23
PA
3776 && can_hardware_single_step ())
3777 {
3778 if (debug_threads)
87ce2a04 3779 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 3780 lwpid_of (thread));
219f2f23
PA
3781 step = 1;
3782 }
3783
582511be 3784 if (the_low_target.get_pc != NULL)
0d62e5e8 3785 {
0bfdf32f 3786 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be
PA
3787
3788 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3789
3790 if (debug_threads)
3791 {
3792 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3793 (long) lwp->stop_pc);
3794 }
0d62e5e8
DJ
3795 }
3796
fa593d66
PA
3797 /* If we have pending signals, consume one unless we are trying to
3798 reinsert a breakpoint or we're trying to finish a fast tracepoint
3799 collect. */
3800 if (lwp->pending_signals != NULL
3801 && lwp->bp_reinsert == 0
3802 && fast_tp_collecting == 0)
0d62e5e8
DJ
3803 {
3804 struct pending_signals **p_sig;
3805
54a0b537 3806 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
3807 while ((*p_sig)->prev != NULL)
3808 p_sig = &(*p_sig)->prev;
3809
3810 signal = (*p_sig)->signal;
32ca6d61 3811 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 3812 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 3813 &(*p_sig)->info);
32ca6d61 3814
0d62e5e8
DJ
3815 free (*p_sig);
3816 *p_sig = NULL;
3817 }
3818
aa5ca48f
DE
3819 if (the_low_target.prepare_to_resume != NULL)
3820 the_low_target.prepare_to_resume (lwp);
3821
d86d4aaf 3822 regcache_invalidate_thread (thread);
da6d8c04 3823 errno = 0;
54a0b537 3824 lwp->stepping = step;
d86d4aaf 3825 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
b8e1b30e 3826 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
3827 /* Coerce to a uintptr_t first to avoid potential gcc warning
3828 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 3829 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 3830
0bfdf32f 3831 current_thread = saved_thread;
da6d8c04 3832 if (errno)
23f238d3
PA
3833 perror_with_name ("resuming thread");
3834
3835 /* Successfully resumed. Clear state that no longer makes sense,
3836 and mark the LWP as running. Must not do this before resuming
3837 otherwise if that fails other code will be confused. E.g., we'd
3838 later try to stop the LWP and hang forever waiting for a stop
3839 status. Note that we must not throw after this is cleared,
3840 otherwise handle_zombie_lwp_error would get confused. */
3841 lwp->stopped = 0;
3842 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3843}
3844
3845/* Called when we try to resume a stopped LWP and that errors out. If
3846 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3847 or about to become), discard the error, clear any pending status
3848 the LWP may have, and return true (we'll collect the exit status
3849 soon enough). Otherwise, return false. */
3850
3851static int
3852check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3853{
3854 struct thread_info *thread = get_lwp_thread (lp);
3855
3856 /* If we get an error after resuming the LWP successfully, we'd
3857 confuse !T state for the LWP being gone. */
3858 gdb_assert (lp->stopped);
3859
3860 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3861 because even if ptrace failed with ESRCH, the tracee may be "not
3862 yet fully dead", but already refusing ptrace requests. In that
3863 case the tracee has 'R (Running)' state for a little bit
3864 (observed in Linux 3.18). See also the note on ESRCH in the
3865 ptrace(2) man page. Instead, check whether the LWP has any state
3866 other than ptrace-stopped. */
3867
3868 /* Don't assume anything if /proc/PID/status can't be read. */
3869 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 3870 {
23f238d3
PA
3871 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3872 lp->status_pending_p = 0;
3873 return 1;
3874 }
3875 return 0;
3876}
3877
3878/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3879 disappears while we try to resume it. */
3221518c 3880
23f238d3
PA
3881static void
3882linux_resume_one_lwp (struct lwp_info *lwp,
3883 int step, int signal, siginfo_t *info)
3884{
3885 TRY
3886 {
3887 linux_resume_one_lwp_throw (lwp, step, signal, info);
3888 }
3889 CATCH (ex, RETURN_MASK_ERROR)
3890 {
3891 if (!check_ptrace_stopped_lwp_gone (lwp))
3892 throw_exception (ex);
3221518c 3893 }
23f238d3 3894 END_CATCH
da6d8c04
DJ
3895}
3896
2bd7c093
PA
3897struct thread_resume_array
3898{
3899 struct thread_resume *resume;
3900 size_t n;
3901};
64386c31 3902
ebcf782c
DE
3903/* This function is called once per thread via find_inferior.
3904 ARG is a pointer to a thread_resume_array struct.
3905 We look up the thread specified by ENTRY in ARG, and mark the thread
3906 with a pointer to the appropriate resume request.
5544ad89
DJ
3907
3908 This algorithm is O(threads * resume elements), but resume elements
3909 is small (and will remain small at least until GDB supports thread
3910 suspension). */
ebcf782c 3911
2bd7c093
PA
3912static int
3913linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 3914{
d86d4aaf
DE
3915 struct thread_info *thread = (struct thread_info *) entry;
3916 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 3917 int ndx;
2bd7c093 3918 struct thread_resume_array *r;
64386c31 3919
2bd7c093 3920 r = arg;
64386c31 3921
2bd7c093 3922 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
3923 {
3924 ptid_t ptid = r->resume[ndx].thread;
3925 if (ptid_equal (ptid, minus_one_ptid)
3926 || ptid_equal (ptid, entry->id)
0c9070b3
YQ
3927 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3928 of PID'. */
d86d4aaf 3929 || (ptid_get_pid (ptid) == pid_of (thread)
0c9070b3
YQ
3930 && (ptid_is_pid (ptid)
3931 || ptid_get_lwp (ptid) == -1)))
95954743 3932 {
d50171e4 3933 if (r->resume[ndx].kind == resume_stop
8336d594 3934 && thread->last_resume_kind == resume_stop)
d50171e4
PA
3935 {
3936 if (debug_threads)
87ce2a04
DE
3937 debug_printf ("already %s LWP %ld at GDB's request\n",
3938 (thread->last_status.kind
3939 == TARGET_WAITKIND_STOPPED)
3940 ? "stopped"
3941 : "stopping",
d86d4aaf 3942 lwpid_of (thread));
d50171e4
PA
3943
3944 continue;
3945 }
3946
95954743 3947 lwp->resume = &r->resume[ndx];
8336d594 3948 thread->last_resume_kind = lwp->resume->kind;
fa593d66 3949
c2d6af84
PA
3950 lwp->step_range_start = lwp->resume->step_range_start;
3951 lwp->step_range_end = lwp->resume->step_range_end;
3952
fa593d66
PA
3953 /* If we had a deferred signal to report, dequeue one now.
3954 This can happen if LWP gets more than one signal while
3955 trying to get out of a jump pad. */
3956 if (lwp->stopped
3957 && !lwp->status_pending_p
3958 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3959 {
3960 lwp->status_pending_p = 1;
3961
3962 if (debug_threads)
87ce2a04
DE
3963 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3964 "leaving status pending.\n",
d86d4aaf
DE
3965 WSTOPSIG (lwp->status_pending),
3966 lwpid_of (thread));
fa593d66
PA
3967 }
3968
95954743
PA
3969 return 0;
3970 }
3971 }
2bd7c093
PA
3972
3973 /* No resume action for this thread. */
3974 lwp->resume = NULL;
64386c31 3975
2bd7c093 3976 return 0;
5544ad89
DJ
3977}
3978
20ad9378
DE
3979/* find_inferior callback for linux_resume.
3980 Set *FLAG_P if this lwp has an interesting status pending. */
5544ad89 3981
bd99dc85
PA
3982static int
3983resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 3984{
d86d4aaf
DE
3985 struct thread_info *thread = (struct thread_info *) entry;
3986 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 3987
bd99dc85
PA
3988 /* LWPs which will not be resumed are not interesting, because
3989 we might not wait for them next time through linux_wait. */
2bd7c093 3990 if (lwp->resume == NULL)
bd99dc85 3991 return 0;
64386c31 3992
582511be 3993 if (thread_still_has_status_pending_p (thread))
d50171e4
PA
3994 * (int *) flag_p = 1;
3995
3996 return 0;
3997}
3998
3999/* Return 1 if this lwp that GDB wants running is stopped at an
4000 internal breakpoint that we need to step over. It assumes that any
4001 required STOP_PC adjustment has already been propagated to the
4002 inferior's regcache. */
4003
4004static int
4005need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4006{
d86d4aaf
DE
4007 struct thread_info *thread = (struct thread_info *) entry;
4008 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 4009 struct thread_info *saved_thread;
d50171e4
PA
4010 CORE_ADDR pc;
4011
4012 /* LWPs which will not be resumed are not interesting, because we
4013 might not wait for them next time through linux_wait. */
4014
4015 if (!lwp->stopped)
4016 {
4017 if (debug_threads)
87ce2a04 4018 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 4019 lwpid_of (thread));
d50171e4
PA
4020 return 0;
4021 }
4022
8336d594 4023 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
4024 {
4025 if (debug_threads)
87ce2a04
DE
4026 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4027 " stopped\n",
d86d4aaf 4028 lwpid_of (thread));
d50171e4
PA
4029 return 0;
4030 }
4031
7984d532
PA
4032 gdb_assert (lwp->suspended >= 0);
4033
4034 if (lwp->suspended)
4035 {
4036 if (debug_threads)
87ce2a04 4037 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 4038 lwpid_of (thread));
7984d532
PA
4039 return 0;
4040 }
4041
d50171e4
PA
4042 if (!lwp->need_step_over)
4043 {
4044 if (debug_threads)
d86d4aaf 4045 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
d50171e4 4046 }
5544ad89 4047
bd99dc85 4048 if (lwp->status_pending_p)
d50171e4
PA
4049 {
4050 if (debug_threads)
87ce2a04
DE
4051 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4052 " status.\n",
d86d4aaf 4053 lwpid_of (thread));
d50171e4
PA
4054 return 0;
4055 }
4056
4057 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4058 or we have. */
4059 pc = get_pc (lwp);
4060
4061 /* If the PC has changed since we stopped, then don't do anything,
4062 and let the breakpoint/tracepoint be hit. This happens if, for
4063 instance, GDB handled the decr_pc_after_break subtraction itself,
4064 GDB is OOL stepping this thread, or the user has issued a "jump"
4065 command, or poked thread's registers herself. */
4066 if (pc != lwp->stop_pc)
4067 {
4068 if (debug_threads)
87ce2a04
DE
4069 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4070 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
4071 lwpid_of (thread),
4072 paddress (lwp->stop_pc), paddress (pc));
d50171e4
PA
4073
4074 lwp->need_step_over = 0;
4075 return 0;
4076 }
4077
0bfdf32f
GB
4078 saved_thread = current_thread;
4079 current_thread = thread;
d50171e4 4080
8b07ae33 4081 /* We can only step over breakpoints we know about. */
fa593d66 4082 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4083 {
8b07ae33 4084 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4085 though. If the condition is being evaluated on the target's side
4086 and it evaluate to false, step over this breakpoint as well. */
4087 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4088 && gdb_condition_true_at_breakpoint (pc)
4089 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
4090 {
4091 if (debug_threads)
87ce2a04
DE
4092 debug_printf ("Need step over [LWP %ld]? yes, but found"
4093 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 4094 lwpid_of (thread), paddress (pc));
d50171e4 4095
0bfdf32f 4096 current_thread = saved_thread;
8b07ae33
PA
4097 return 0;
4098 }
4099 else
4100 {
4101 if (debug_threads)
87ce2a04
DE
4102 debug_printf ("Need step over [LWP %ld]? yes, "
4103 "found breakpoint at 0x%s\n",
d86d4aaf 4104 lwpid_of (thread), paddress (pc));
d50171e4 4105
8b07ae33
PA
4106 /* We've found an lwp that needs stepping over --- return 1 so
4107 that find_inferior stops looking. */
0bfdf32f 4108 current_thread = saved_thread;
8b07ae33
PA
4109
4110 /* If the step over is cancelled, this is set again. */
4111 lwp->need_step_over = 0;
4112 return 1;
4113 }
d50171e4
PA
4114 }
4115
0bfdf32f 4116 current_thread = saved_thread;
d50171e4
PA
4117
4118 if (debug_threads)
87ce2a04
DE
4119 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4120 " at 0x%s\n",
d86d4aaf 4121 lwpid_of (thread), paddress (pc));
c6ecbae5 4122
bd99dc85 4123 return 0;
5544ad89
DJ
4124}
4125
d50171e4
PA
4126/* Start a step-over operation on LWP. When LWP stopped at a
4127 breakpoint, to make progress, we need to remove the breakpoint out
4128 of the way. If we let other threads run while we do that, they may
4129 pass by the breakpoint location and miss hitting it. To avoid
4130 that, a step-over momentarily stops all threads while LWP is
4131 single-stepped while the breakpoint is temporarily uninserted from
4132 the inferior. When the single-step finishes, we reinsert the
4133 breakpoint, and let all threads that are supposed to be running,
4134 run again.
4135
4136 On targets that don't support hardware single-step, we don't
4137 currently support full software single-stepping. Instead, we only
4138 support stepping over the thread event breakpoint, by asking the
4139 low target where to place a reinsert breakpoint. Since this
4140 routine assumes the breakpoint being stepped over is a thread event
4141 breakpoint, it usually assumes the return address of the current
4142 function is a good enough place to set the reinsert breakpoint. */
4143
4144static int
4145start_step_over (struct lwp_info *lwp)
4146{
d86d4aaf 4147 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4148 struct thread_info *saved_thread;
d50171e4
PA
4149 CORE_ADDR pc;
4150 int step;
4151
4152 if (debug_threads)
87ce2a04 4153 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 4154 lwpid_of (thread));
d50171e4 4155
7984d532
PA
4156 stop_all_lwps (1, lwp);
4157 gdb_assert (lwp->suspended == 0);
d50171e4
PA
4158
4159 if (debug_threads)
87ce2a04 4160 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
4161
4162 /* Note, we should always reach here with an already adjusted PC,
4163 either by GDB (if we're resuming due to GDB's request), or by our
4164 caller, if we just finished handling an internal breakpoint GDB
4165 shouldn't care about. */
4166 pc = get_pc (lwp);
4167
0bfdf32f
GB
4168 saved_thread = current_thread;
4169 current_thread = thread;
d50171e4
PA
4170
4171 lwp->bp_reinsert = pc;
4172 uninsert_breakpoints_at (pc);
fa593d66 4173 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
4174
4175 if (can_hardware_single_step ())
4176 {
4177 step = 1;
4178 }
4179 else
4180 {
4181 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4182 set_reinsert_breakpoint (raddr);
4183 step = 0;
4184 }
4185
0bfdf32f 4186 current_thread = saved_thread;
d50171e4
PA
4187
4188 linux_resume_one_lwp (lwp, step, 0, NULL);
4189
4190 /* Require next event from this LWP. */
d86d4aaf 4191 step_over_bkpt = thread->entry.id;
d50171e4
PA
4192 return 1;
4193}
4194
4195/* Finish a step-over. Reinsert the breakpoint we had uninserted in
4196 start_step_over, if still there, and delete any reinsert
4197 breakpoints we've set, on non hardware single-step targets. */
4198
4199static int
4200finish_step_over (struct lwp_info *lwp)
4201{
4202 if (lwp->bp_reinsert != 0)
4203 {
4204 if (debug_threads)
87ce2a04 4205 debug_printf ("Finished step over.\n");
d50171e4
PA
4206
4207 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4208 may be no breakpoint to reinsert there by now. */
4209 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4210 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4211
4212 lwp->bp_reinsert = 0;
4213
4214 /* Delete any software-single-step reinsert breakpoints. No
4215 longer needed. We don't have to worry about other threads
4216 hitting this trap, and later not being able to explain it,
4217 because we were stepping over a breakpoint, and we hold all
4218 threads but LWP stopped while doing that. */
4219 if (!can_hardware_single_step ())
4220 delete_reinsert_breakpoints ();
4221
4222 step_over_bkpt = null_ptid;
4223 return 1;
4224 }
4225 else
4226 return 0;
4227}
4228
5544ad89
DJ
4229/* This function is called once per thread. We check the thread's resume
4230 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 4231 stopped; and what signal, if any, it should be sent.
5544ad89 4232
bd99dc85
PA
4233 For threads which we aren't explicitly told otherwise, we preserve
4234 the stepping flag; this is used for stepping over gdbserver-placed
4235 breakpoints.
4236
4237 If pending_flags was set in any thread, we queue any needed
4238 signals, since we won't actually resume. We already have a pending
4239 event to report, so we don't need to preserve any step requests;
4240 they should be re-issued if necessary. */
4241
4242static int
4243linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 4244{
d86d4aaf
DE
4245 struct thread_info *thread = (struct thread_info *) entry;
4246 struct lwp_info *lwp = get_thread_lwp (thread);
bd99dc85 4247 int step;
d50171e4
PA
4248 int leave_all_stopped = * (int *) arg;
4249 int leave_pending;
5544ad89 4250
2bd7c093 4251 if (lwp->resume == NULL)
bd99dc85 4252 return 0;
5544ad89 4253
bd99dc85 4254 if (lwp->resume->kind == resume_stop)
5544ad89 4255 {
bd99dc85 4256 if (debug_threads)
d86d4aaf 4257 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4258
4259 if (!lwp->stopped)
4260 {
4261 if (debug_threads)
d86d4aaf 4262 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4263
d50171e4
PA
4264 /* Stop the thread, and wait for the event asynchronously,
4265 through the event loop. */
02fc4de7 4266 send_sigstop (lwp);
bd99dc85
PA
4267 }
4268 else
4269 {
4270 if (debug_threads)
87ce2a04 4271 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4272 lwpid_of (thread));
d50171e4
PA
4273
4274 /* The LWP may have been stopped in an internal event that
4275 was not meant to be notified back to GDB (e.g., gdbserver
4276 breakpoint), so we should be reporting a stop event in
4277 this case too. */
4278
4279 /* If the thread already has a pending SIGSTOP, this is a
4280 no-op. Otherwise, something later will presumably resume
4281 the thread and this will cause it to cancel any pending
4282 operation, due to last_resume_kind == resume_stop. If
4283 the thread already has a pending status to report, we
4284 will still report it the next time we wait - see
4285 status_pending_p_callback. */
1a981360
PA
4286
4287 /* If we already have a pending signal to report, then
4288 there's no need to queue a SIGSTOP, as this means we're
4289 midway through moving the LWP out of the jumppad, and we
4290 will report the pending signal as soon as that is
4291 finished. */
4292 if (lwp->pending_signals_to_report == NULL)
4293 send_sigstop (lwp);
bd99dc85 4294 }
32ca6d61 4295
bd99dc85
PA
4296 /* For stop requests, we're done. */
4297 lwp->resume = NULL;
fc7238bb 4298 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4299 return 0;
5544ad89
DJ
4300 }
4301
bd99dc85
PA
4302 /* If this thread which is about to be resumed has a pending status,
4303 then don't resume any threads - we can just report the pending
4304 status. Make sure to queue any signals that would otherwise be
4305 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
4306 thread has a pending status. If there's a thread that needs the
4307 step-over-breakpoint dance, then don't resume any other thread
4308 but that particular one. */
4309 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 4310
d50171e4 4311 if (!leave_pending)
bd99dc85
PA
4312 {
4313 if (debug_threads)
d86d4aaf 4314 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4315
d50171e4 4316 step = (lwp->resume->kind == resume_step);
2acc282a 4317 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
4318 }
4319 else
4320 {
4321 if (debug_threads)
d86d4aaf 4322 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5544ad89 4323
bd99dc85
PA
4324 /* If we have a new signal, enqueue the signal. */
4325 if (lwp->resume->sig != 0)
4326 {
4327 struct pending_signals *p_sig;
4328 p_sig = xmalloc (sizeof (*p_sig));
4329 p_sig->prev = lwp->pending_signals;
4330 p_sig->signal = lwp->resume->sig;
4331 memset (&p_sig->info, 0, sizeof (siginfo_t));
4332
4333 /* If this is the same signal we were previously stopped by,
4334 make sure to queue its siginfo. We can ignore the return
4335 value of ptrace; if it fails, we'll skip
4336 PTRACE_SETSIGINFO. */
4337 if (WIFSTOPPED (lwp->last_status)
4338 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
d86d4aaf 4339 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4340 &p_sig->info);
bd99dc85
PA
4341
4342 lwp->pending_signals = p_sig;
4343 }
4344 }
5544ad89 4345
fc7238bb 4346 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4347 lwp->resume = NULL;
5544ad89 4348 return 0;
0d62e5e8
DJ
4349}
4350
4351static void
2bd7c093 4352linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 4353{
2bd7c093 4354 struct thread_resume_array array = { resume_info, n };
d86d4aaf 4355 struct thread_info *need_step_over = NULL;
d50171e4
PA
4356 int any_pending;
4357 int leave_all_stopped;
c6ecbae5 4358
87ce2a04
DE
4359 if (debug_threads)
4360 {
4361 debug_enter ();
4362 debug_printf ("linux_resume:\n");
4363 }
4364
2bd7c093 4365 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 4366
d50171e4
PA
4367 /* If there is a thread which would otherwise be resumed, which has
4368 a pending status, then don't resume any threads - we can just
4369 report the pending status. Make sure to queue any signals that
4370 would otherwise be sent. In non-stop mode, we'll apply this
4371 logic to each thread individually. We consume all pending events
4372 before considering to start a step-over (in all-stop). */
4373 any_pending = 0;
bd99dc85 4374 if (!non_stop)
d86d4aaf 4375 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
d50171e4
PA
4376
4377 /* If there is a thread which would otherwise be resumed, which is
4378 stopped at a breakpoint that needs stepping over, then don't
4379 resume any threads - have it step over the breakpoint with all
4380 other threads stopped, then resume all threads again. Make sure
4381 to queue any signals that would otherwise be delivered or
4382 queued. */
4383 if (!any_pending && supports_breakpoints ())
4384 need_step_over
d86d4aaf
DE
4385 = (struct thread_info *) find_inferior (&all_threads,
4386 need_step_over_p, NULL);
d50171e4
PA
4387
4388 leave_all_stopped = (need_step_over != NULL || any_pending);
4389
4390 if (debug_threads)
4391 {
4392 if (need_step_over != NULL)
87ce2a04 4393 debug_printf ("Not resuming all, need step over\n");
d50171e4 4394 else if (any_pending)
87ce2a04
DE
4395 debug_printf ("Not resuming, all-stop and found "
4396 "an LWP with pending status\n");
d50171e4 4397 else
87ce2a04 4398 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4399 }
4400
4401 /* Even if we're leaving threads stopped, queue all signals we'd
4402 otherwise deliver. */
4403 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4404
4405 if (need_step_over)
d86d4aaf 4406 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4407
4408 if (debug_threads)
4409 {
4410 debug_printf ("linux_resume done\n");
4411 debug_exit ();
4412 }
d50171e4
PA
4413}
4414
4415/* This function is called once per thread. We check the thread's
4416 last resume request, which will tell us whether to resume, step, or
4417 leave the thread stopped. Any signal the client requested to be
4418 delivered has already been enqueued at this point.
4419
4420 If any thread that GDB wants running is stopped at an internal
4421 breakpoint that needs stepping over, we start a step-over operation
4422 on that particular thread, and leave all others stopped. */
4423
7984d532
PA
4424static int
4425proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 4426{
d86d4aaf
DE
4427 struct thread_info *thread = (struct thread_info *) entry;
4428 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4429 int step;
4430
7984d532
PA
4431 if (lwp == except)
4432 return 0;
d50171e4
PA
4433
4434 if (debug_threads)
d86d4aaf 4435 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
4436
4437 if (!lwp->stopped)
4438 {
4439 if (debug_threads)
d86d4aaf 4440 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
7984d532 4441 return 0;
d50171e4
PA
4442 }
4443
02fc4de7
PA
4444 if (thread->last_resume_kind == resume_stop
4445 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
4446 {
4447 if (debug_threads)
87ce2a04 4448 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 4449 lwpid_of (thread));
7984d532 4450 return 0;
d50171e4
PA
4451 }
4452
4453 if (lwp->status_pending_p)
4454 {
4455 if (debug_threads)
87ce2a04 4456 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 4457 lwpid_of (thread));
7984d532 4458 return 0;
d50171e4
PA
4459 }
4460
7984d532
PA
4461 gdb_assert (lwp->suspended >= 0);
4462
d50171e4
PA
4463 if (lwp->suspended)
4464 {
4465 if (debug_threads)
d86d4aaf 4466 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
7984d532 4467 return 0;
d50171e4
PA
4468 }
4469
1a981360
PA
4470 if (thread->last_resume_kind == resume_stop
4471 && lwp->pending_signals_to_report == NULL
4472 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
4473 {
4474 /* We haven't reported this LWP as stopped yet (otherwise, the
4475 last_status.kind check above would catch it, and we wouldn't
4476 reach here. This LWP may have been momentarily paused by a
4477 stop_all_lwps call while handling for example, another LWP's
4478 step-over. In that case, the pending expected SIGSTOP signal
4479 that was queued at vCont;t handling time will have already
4480 been consumed by wait_for_sigstop, and so we need to requeue
4481 another one here. Note that if the LWP already has a SIGSTOP
4482 pending, this is a no-op. */
4483
4484 if (debug_threads)
87ce2a04
DE
4485 debug_printf ("Client wants LWP %ld to stop. "
4486 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 4487 lwpid_of (thread));
02fc4de7
PA
4488
4489 send_sigstop (lwp);
4490 }
4491
8336d594 4492 step = thread->last_resume_kind == resume_step;
d50171e4 4493 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4494 return 0;
4495}
4496
4497static int
4498unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4499{
d86d4aaf
DE
4500 struct thread_info *thread = (struct thread_info *) entry;
4501 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4502
4503 if (lwp == except)
4504 return 0;
4505
4506 lwp->suspended--;
4507 gdb_assert (lwp->suspended >= 0);
4508
4509 return proceed_one_lwp (entry, except);
d50171e4
PA
4510}
4511
4512/* When we finish a step-over, set threads running again. If there's
4513 another thread that may need a step-over, now's the time to start
4514 it. Eventually, we'll move all threads past their breakpoints. */
4515
4516static void
4517proceed_all_lwps (void)
4518{
d86d4aaf 4519 struct thread_info *need_step_over;
d50171e4
PA
4520
4521 /* If there is a thread which would otherwise be resumed, which is
4522 stopped at a breakpoint that needs stepping over, then don't
4523 resume any threads - have it step over the breakpoint with all
4524 other threads stopped, then resume all threads again. */
4525
4526 if (supports_breakpoints ())
4527 {
4528 need_step_over
d86d4aaf
DE
4529 = (struct thread_info *) find_inferior (&all_threads,
4530 need_step_over_p, NULL);
d50171e4
PA
4531
4532 if (need_step_over != NULL)
4533 {
4534 if (debug_threads)
87ce2a04
DE
4535 debug_printf ("proceed_all_lwps: found "
4536 "thread %ld needing a step-over\n",
4537 lwpid_of (need_step_over));
d50171e4 4538
d86d4aaf 4539 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4540 return;
4541 }
4542 }
5544ad89 4543
d50171e4 4544 if (debug_threads)
87ce2a04 4545 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 4546
d86d4aaf 4547 find_inferior (&all_threads, proceed_one_lwp, NULL);
d50171e4
PA
4548}
4549
4550/* Stopped LWPs that the client wanted to be running, that don't have
4551 pending statuses, are set to run again, except for EXCEPT, if not
4552 NULL. This undoes a stop_all_lwps call. */
4553
4554static void
7984d532 4555unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 4556{
5544ad89
DJ
4557 if (debug_threads)
4558 {
87ce2a04 4559 debug_enter ();
d50171e4 4560 if (except)
87ce2a04 4561 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 4562 lwpid_of (get_lwp_thread (except)));
5544ad89 4563 else
87ce2a04 4564 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
4565 }
4566
7984d532 4567 if (unsuspend)
d86d4aaf 4568 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
7984d532 4569 else
d86d4aaf 4570 find_inferior (&all_threads, proceed_one_lwp, except);
87ce2a04
DE
4571
4572 if (debug_threads)
4573 {
4574 debug_printf ("unstop_all_lwps done\n");
4575 debug_exit ();
4576 }
0d62e5e8
DJ
4577}
4578
58caa3dc
DJ
4579
4580#ifdef HAVE_LINUX_REGSETS
4581
1faeff08
MR
4582#define use_linux_regsets 1
4583
030031ee
PA
4584/* Returns true if REGSET has been disabled. */
4585
4586static int
4587regset_disabled (struct regsets_info *info, struct regset_info *regset)
4588{
4589 return (info->disabled_regsets != NULL
4590 && info->disabled_regsets[regset - info->regsets]);
4591}
4592
4593/* Disable REGSET. */
4594
4595static void
4596disable_regset (struct regsets_info *info, struct regset_info *regset)
4597{
4598 int dr_offset;
4599
4600 dr_offset = regset - info->regsets;
4601 if (info->disabled_regsets == NULL)
4602 info->disabled_regsets = xcalloc (1, info->num_regsets);
4603 info->disabled_regsets[dr_offset] = 1;
4604}
4605
58caa3dc 4606static int
3aee8918
PA
4607regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4608 struct regcache *regcache)
58caa3dc
DJ
4609{
4610 struct regset_info *regset;
e9d25b98 4611 int saw_general_regs = 0;
95954743 4612 int pid;
1570b33e 4613 struct iovec iov;
58caa3dc 4614
0bfdf32f 4615 pid = lwpid_of (current_thread);
28eef672 4616 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4617 {
1570b33e
L
4618 void *buf, *data;
4619 int nt_type, res;
58caa3dc 4620
030031ee 4621 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 4622 continue;
58caa3dc 4623
bca929d3 4624 buf = xmalloc (regset->size);
1570b33e
L
4625
4626 nt_type = regset->nt_type;
4627 if (nt_type)
4628 {
4629 iov.iov_base = buf;
4630 iov.iov_len = regset->size;
4631 data = (void *) &iov;
4632 }
4633 else
4634 data = buf;
4635
dfb64f85 4636#ifndef __sparc__
f15f9948 4637 res = ptrace (regset->get_request, pid,
b8e1b30e 4638 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4639#else
1570b33e 4640 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4641#endif
58caa3dc
DJ
4642 if (res < 0)
4643 {
4644 if (errno == EIO)
4645 {
52fa2412 4646 /* If we get EIO on a regset, do not try it again for
3aee8918 4647 this process mode. */
030031ee 4648 disable_regset (regsets_info, regset);
58caa3dc 4649 }
e5a9158d
AA
4650 else if (errno == ENODATA)
4651 {
4652 /* ENODATA may be returned if the regset is currently
4653 not "active". This can happen in normal operation,
4654 so suppress the warning in this case. */
4655 }
58caa3dc
DJ
4656 else
4657 {
0d62e5e8 4658 char s[256];
95954743
PA
4659 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4660 pid);
0d62e5e8 4661 perror (s);
58caa3dc
DJ
4662 }
4663 }
098dbe61
AA
4664 else
4665 {
4666 if (regset->type == GENERAL_REGS)
4667 saw_general_regs = 1;
4668 regset->store_function (regcache, buf);
4669 }
fdeb2a12 4670 free (buf);
58caa3dc 4671 }
e9d25b98
DJ
4672 if (saw_general_regs)
4673 return 0;
4674 else
4675 return 1;
58caa3dc
DJ
4676}
4677
4678static int
3aee8918
PA
4679regsets_store_inferior_registers (struct regsets_info *regsets_info,
4680 struct regcache *regcache)
58caa3dc
DJ
4681{
4682 struct regset_info *regset;
e9d25b98 4683 int saw_general_regs = 0;
95954743 4684 int pid;
1570b33e 4685 struct iovec iov;
58caa3dc 4686
0bfdf32f 4687 pid = lwpid_of (current_thread);
28eef672 4688 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4689 {
1570b33e
L
4690 void *buf, *data;
4691 int nt_type, res;
58caa3dc 4692
feea5f36
AA
4693 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4694 || regset->fill_function == NULL)
28eef672 4695 continue;
58caa3dc 4696
bca929d3 4697 buf = xmalloc (regset->size);
545587ee
DJ
4698
4699 /* First fill the buffer with the current register set contents,
4700 in case there are any items in the kernel's regset that are
4701 not in gdbserver's regcache. */
1570b33e
L
4702
4703 nt_type = regset->nt_type;
4704 if (nt_type)
4705 {
4706 iov.iov_base = buf;
4707 iov.iov_len = regset->size;
4708 data = (void *) &iov;
4709 }
4710 else
4711 data = buf;
4712
dfb64f85 4713#ifndef __sparc__
f15f9948 4714 res = ptrace (regset->get_request, pid,
b8e1b30e 4715 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4716#else
689cc2ae 4717 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4718#endif
545587ee
DJ
4719
4720 if (res == 0)
4721 {
4722 /* Then overlay our cached registers on that. */
442ea881 4723 regset->fill_function (regcache, buf);
545587ee
DJ
4724
4725 /* Only now do we write the register set. */
dfb64f85 4726#ifndef __sparc__
f15f9948 4727 res = ptrace (regset->set_request, pid,
b8e1b30e 4728 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4729#else
1570b33e 4730 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 4731#endif
545587ee
DJ
4732 }
4733
58caa3dc
DJ
4734 if (res < 0)
4735 {
4736 if (errno == EIO)
4737 {
52fa2412 4738 /* If we get EIO on a regset, do not try it again for
3aee8918 4739 this process mode. */
030031ee 4740 disable_regset (regsets_info, regset);
58caa3dc 4741 }
3221518c
UW
4742 else if (errno == ESRCH)
4743 {
1b3f6016
PA
4744 /* At this point, ESRCH should mean the process is
4745 already gone, in which case we simply ignore attempts
4746 to change its registers. See also the related
4747 comment in linux_resume_one_lwp. */
fdeb2a12 4748 free (buf);
3221518c
UW
4749 return 0;
4750 }
58caa3dc
DJ
4751 else
4752 {
ce3a066d 4753 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
4754 }
4755 }
e9d25b98
DJ
4756 else if (regset->type == GENERAL_REGS)
4757 saw_general_regs = 1;
09ec9b38 4758 free (buf);
58caa3dc 4759 }
e9d25b98
DJ
4760 if (saw_general_regs)
4761 return 0;
4762 else
4763 return 1;
58caa3dc
DJ
4764}
4765
1faeff08 4766#else /* !HAVE_LINUX_REGSETS */
58caa3dc 4767
1faeff08 4768#define use_linux_regsets 0
3aee8918
PA
4769#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4770#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 4771
58caa3dc 4772#endif
1faeff08
MR
4773
4774/* Return 1 if register REGNO is supported by one of the regset ptrace
4775 calls or 0 if it has to be transferred individually. */
4776
4777static int
3aee8918 4778linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
4779{
4780 unsigned char mask = 1 << (regno % 8);
4781 size_t index = regno / 8;
4782
4783 return (use_linux_regsets
3aee8918
PA
4784 && (regs_info->regset_bitmap == NULL
4785 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
4786}
4787
58caa3dc 4788#ifdef HAVE_LINUX_USRREGS
1faeff08
MR
4789
4790int
3aee8918 4791register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
4792{
4793 int addr;
4794
3aee8918 4795 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
4796 error ("Invalid register number %d.", regnum);
4797
3aee8918 4798 addr = usrregs->regmap[regnum];
1faeff08
MR
4799
4800 return addr;
4801}
4802
4803/* Fetch one register. */
4804static void
3aee8918
PA
4805fetch_register (const struct usrregs_info *usrregs,
4806 struct regcache *regcache, int regno)
1faeff08
MR
4807{
4808 CORE_ADDR regaddr;
4809 int i, size;
4810 char *buf;
4811 int pid;
4812
3aee8918 4813 if (regno >= usrregs->num_regs)
1faeff08
MR
4814 return;
4815 if ((*the_low_target.cannot_fetch_register) (regno))
4816 return;
4817
3aee8918 4818 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4819 if (regaddr == -1)
4820 return;
4821
3aee8918
PA
4822 size = ((register_size (regcache->tdesc, regno)
4823 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4824 & -sizeof (PTRACE_XFER_TYPE));
4825 buf = alloca (size);
4826
0bfdf32f 4827 pid = lwpid_of (current_thread);
1faeff08
MR
4828 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4829 {
4830 errno = 0;
4831 *(PTRACE_XFER_TYPE *) (buf + i) =
4832 ptrace (PTRACE_PEEKUSER, pid,
4833 /* Coerce to a uintptr_t first to avoid potential gcc warning
4834 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4835 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
4836 regaddr += sizeof (PTRACE_XFER_TYPE);
4837 if (errno != 0)
4838 error ("reading register %d: %s", regno, strerror (errno));
4839 }
4840
4841 if (the_low_target.supply_ptrace_register)
4842 the_low_target.supply_ptrace_register (regcache, regno, buf);
4843 else
4844 supply_register (regcache, regno, buf);
4845}
4846
4847/* Store one register. */
4848static void
3aee8918
PA
4849store_register (const struct usrregs_info *usrregs,
4850 struct regcache *regcache, int regno)
1faeff08
MR
4851{
4852 CORE_ADDR regaddr;
4853 int i, size;
4854 char *buf;
4855 int pid;
4856
3aee8918 4857 if (regno >= usrregs->num_regs)
1faeff08
MR
4858 return;
4859 if ((*the_low_target.cannot_store_register) (regno))
4860 return;
4861
3aee8918 4862 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4863 if (regaddr == -1)
4864 return;
4865
3aee8918
PA
4866 size = ((register_size (regcache->tdesc, regno)
4867 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4868 & -sizeof (PTRACE_XFER_TYPE));
4869 buf = alloca (size);
4870 memset (buf, 0, size);
4871
4872 if (the_low_target.collect_ptrace_register)
4873 the_low_target.collect_ptrace_register (regcache, regno, buf);
4874 else
4875 collect_register (regcache, regno, buf);
4876
0bfdf32f 4877 pid = lwpid_of (current_thread);
1faeff08
MR
4878 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4879 {
4880 errno = 0;
4881 ptrace (PTRACE_POKEUSER, pid,
4882 /* Coerce to a uintptr_t first to avoid potential gcc warning
4883 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
4884 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4885 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
4886 if (errno != 0)
4887 {
4888 /* At this point, ESRCH should mean the process is
4889 already gone, in which case we simply ignore attempts
4890 to change its registers. See also the related
4891 comment in linux_resume_one_lwp. */
4892 if (errno == ESRCH)
4893 return;
4894
4895 if ((*the_low_target.cannot_store_register) (regno) == 0)
4896 error ("writing register %d: %s", regno, strerror (errno));
4897 }
4898 regaddr += sizeof (PTRACE_XFER_TYPE);
4899 }
4900}
4901
4902/* Fetch all registers, or just one, from the child process.
4903 If REGNO is -1, do this for all registers, skipping any that are
4904 assumed to have been retrieved by regsets_fetch_inferior_registers,
4905 unless ALL is non-zero.
4906 Otherwise, REGNO specifies which register (so we can save time). */
4907static void
3aee8918
PA
4908usr_fetch_inferior_registers (const struct regs_info *regs_info,
4909 struct regcache *regcache, int regno, int all)
1faeff08 4910{
3aee8918
PA
4911 struct usrregs_info *usr = regs_info->usrregs;
4912
1faeff08
MR
4913 if (regno == -1)
4914 {
3aee8918
PA
4915 for (regno = 0; regno < usr->num_regs; regno++)
4916 if (all || !linux_register_in_regsets (regs_info, regno))
4917 fetch_register (usr, regcache, regno);
1faeff08
MR
4918 }
4919 else
3aee8918 4920 fetch_register (usr, regcache, regno);
1faeff08
MR
4921}
4922
4923/* Store our register values back into the inferior.
4924 If REGNO is -1, do this for all registers, skipping any that are
4925 assumed to have been saved by regsets_store_inferior_registers,
4926 unless ALL is non-zero.
4927 Otherwise, REGNO specifies which register (so we can save time). */
4928static void
3aee8918
PA
4929usr_store_inferior_registers (const struct regs_info *regs_info,
4930 struct regcache *regcache, int regno, int all)
1faeff08 4931{
3aee8918
PA
4932 struct usrregs_info *usr = regs_info->usrregs;
4933
1faeff08
MR
4934 if (regno == -1)
4935 {
3aee8918
PA
4936 for (regno = 0; regno < usr->num_regs; regno++)
4937 if (all || !linux_register_in_regsets (regs_info, regno))
4938 store_register (usr, regcache, regno);
1faeff08
MR
4939 }
4940 else
3aee8918 4941 store_register (usr, regcache, regno);
1faeff08
MR
4942}
4943
4944#else /* !HAVE_LINUX_USRREGS */
4945
3aee8918
PA
4946#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4947#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
1faeff08 4948
58caa3dc 4949#endif
1faeff08
MR
4950
4951
4952void
4953linux_fetch_registers (struct regcache *regcache, int regno)
4954{
4955 int use_regsets;
4956 int all = 0;
3aee8918 4957 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
4958
4959 if (regno == -1)
4960 {
3aee8918
PA
4961 if (the_low_target.fetch_register != NULL
4962 && regs_info->usrregs != NULL)
4963 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
c14dfd32
PA
4964 (*the_low_target.fetch_register) (regcache, regno);
4965
3aee8918
PA
4966 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4967 if (regs_info->usrregs != NULL)
4968 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
4969 }
4970 else
4971 {
c14dfd32
PA
4972 if (the_low_target.fetch_register != NULL
4973 && (*the_low_target.fetch_register) (regcache, regno))
4974 return;
4975
3aee8918 4976 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 4977 if (use_regsets)
3aee8918
PA
4978 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4979 regcache);
4980 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4981 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 4982 }
58caa3dc
DJ
4983}
4984
4985void
442ea881 4986linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 4987{
1faeff08
MR
4988 int use_regsets;
4989 int all = 0;
3aee8918 4990 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
4991
4992 if (regno == -1)
4993 {
3aee8918
PA
4994 all = regsets_store_inferior_registers (regs_info->regsets_info,
4995 regcache);
4996 if (regs_info->usrregs != NULL)
4997 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
4998 }
4999 else
5000 {
3aee8918 5001 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5002 if (use_regsets)
3aee8918
PA
5003 all = regsets_store_inferior_registers (regs_info->regsets_info,
5004 regcache);
5005 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5006 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5007 }
58caa3dc
DJ
5008}
5009
da6d8c04 5010
da6d8c04
DJ
5011/* Copy LEN bytes from inferior's memory starting at MEMADDR
5012 to debugger memory starting at MYADDR. */
5013
c3e735a6 5014static int
f450004a 5015linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04 5016{
0bfdf32f 5017 int pid = lwpid_of (current_thread);
4934b29e
MR
5018 register PTRACE_XFER_TYPE *buffer;
5019 register CORE_ADDR addr;
5020 register int count;
5021 char filename[64];
da6d8c04 5022 register int i;
4934b29e 5023 int ret;
fd462a61 5024 int fd;
fd462a61
DJ
5025
5026 /* Try using /proc. Don't bother for one word. */
5027 if (len >= 3 * sizeof (long))
5028 {
4934b29e
MR
5029 int bytes;
5030
fd462a61
DJ
5031 /* We could keep this file open and cache it - possibly one per
5032 thread. That requires some juggling, but is even faster. */
95954743 5033 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5034 fd = open (filename, O_RDONLY | O_LARGEFILE);
5035 if (fd == -1)
5036 goto no_proc;
5037
5038 /* If pread64 is available, use it. It's faster if the kernel
5039 supports it (only one syscall), and it's 64-bit safe even on
5040 32-bit platforms (for instance, SPARC debugging a SPARC64
5041 application). */
5042#ifdef HAVE_PREAD64
4934b29e 5043 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5044#else
4934b29e
MR
5045 bytes = -1;
5046 if (lseek (fd, memaddr, SEEK_SET) != -1)
5047 bytes = read (fd, myaddr, len);
fd462a61 5048#endif
fd462a61
DJ
5049
5050 close (fd);
4934b29e
MR
5051 if (bytes == len)
5052 return 0;
5053
5054 /* Some data was read, we'll try to get the rest with ptrace. */
5055 if (bytes > 0)
5056 {
5057 memaddr += bytes;
5058 myaddr += bytes;
5059 len -= bytes;
5060 }
fd462a61 5061 }
da6d8c04 5062
fd462a61 5063 no_proc:
4934b29e
MR
5064 /* Round starting address down to longword boundary. */
5065 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5066 /* Round ending address up; get number of longwords that makes. */
5067 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5068 / sizeof (PTRACE_XFER_TYPE));
5069 /* Allocate buffer of that many longwords. */
5070 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5071
da6d8c04 5072 /* Read all the longwords */
4934b29e 5073 errno = 0;
da6d8c04
DJ
5074 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5075 {
14ce3065
DE
5076 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5077 about coercing an 8 byte integer to a 4 byte pointer. */
5078 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5079 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5080 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5081 if (errno)
4934b29e 5082 break;
da6d8c04 5083 }
4934b29e 5084 ret = errno;
da6d8c04
DJ
5085
5086 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5087 if (i > 0)
5088 {
5089 i *= sizeof (PTRACE_XFER_TYPE);
5090 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5091 memcpy (myaddr,
5092 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5093 i < len ? i : len);
5094 }
c3e735a6 5095
4934b29e 5096 return ret;
da6d8c04
DJ
5097}
5098
93ae6fdc
PA
5099/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5100 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5101 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5102
ce3a066d 5103static int
f450004a 5104linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
5105{
5106 register int i;
5107 /* Round starting address down to longword boundary. */
5108 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5109 /* Round ending address up; get number of longwords that makes. */
5110 register int count
493e2a69
MS
5111 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5112 / sizeof (PTRACE_XFER_TYPE);
5113
da6d8c04 5114 /* Allocate buffer of that many longwords. */
493e2a69
MS
5115 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5116 alloca (count * sizeof (PTRACE_XFER_TYPE));
5117
0bfdf32f 5118 int pid = lwpid_of (current_thread);
da6d8c04 5119
f0ae6fc3
PA
5120 if (len == 0)
5121 {
5122 /* Zero length write always succeeds. */
5123 return 0;
5124 }
5125
0d62e5e8
DJ
5126 if (debug_threads)
5127 {
58d6951d
DJ
5128 /* Dump up to four bytes. */
5129 unsigned int val = * (unsigned int *) myaddr;
5130 if (len == 1)
5131 val = val & 0xff;
5132 else if (len == 2)
5133 val = val & 0xffff;
5134 else if (len == 3)
5135 val = val & 0xffffff;
de0d863e
DB
5136 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5137 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
0d62e5e8
DJ
5138 }
5139
da6d8c04
DJ
5140 /* Fill start and end extra bytes of buffer with existing memory data. */
5141
93ae6fdc 5142 errno = 0;
14ce3065
DE
5143 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5144 about coercing an 8 byte integer to a 4 byte pointer. */
5145 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5146 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5147 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5148 if (errno)
5149 return errno;
da6d8c04
DJ
5150
5151 if (count > 1)
5152 {
93ae6fdc 5153 errno = 0;
da6d8c04 5154 buffer[count - 1]
95954743 5155 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5156 /* Coerce to a uintptr_t first to avoid potential gcc warning
5157 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5158 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5159 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5160 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5161 if (errno)
5162 return errno;
da6d8c04
DJ
5163 }
5164
93ae6fdc 5165 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5166
493e2a69
MS
5167 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5168 myaddr, len);
da6d8c04
DJ
5169
5170 /* Write the entire buffer. */
5171
5172 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5173 {
5174 errno = 0;
14ce3065
DE
5175 ptrace (PTRACE_POKETEXT, pid,
5176 /* Coerce to a uintptr_t first to avoid potential gcc warning
5177 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5178 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5179 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5180 if (errno)
5181 return errno;
5182 }
5183
5184 return 0;
5185}
2f2893d9
DJ
5186
5187static void
5188linux_look_up_symbols (void)
5189{
0d62e5e8 5190#ifdef USE_THREAD_DB
95954743
PA
5191 struct process_info *proc = current_process ();
5192
fe978cb0 5193 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5194 return;
5195
96d7229d
LM
5196 /* If the kernel supports tracing clones, then we don't need to
5197 use the magic thread event breakpoint to learn about
5198 threads. */
5199 thread_db_init (!linux_supports_traceclone ());
0d62e5e8
DJ
5200#endif
5201}
5202
e5379b03 5203static void
ef57601b 5204linux_request_interrupt (void)
e5379b03 5205{
a1928bad 5206 extern unsigned long signal_pid;
e5379b03 5207
78708b7c
PA
5208 /* Send a SIGINT to the process group. This acts just like the user
5209 typed a ^C on the controlling terminal. */
5210 kill (-signal_pid, SIGINT);
e5379b03
DJ
5211}
5212
aa691b87
RM
5213/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5214 to debugger memory starting at MYADDR. */
5215
5216static int
f450004a 5217linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
5218{
5219 char filename[PATH_MAX];
5220 int fd, n;
0bfdf32f 5221 int pid = lwpid_of (current_thread);
aa691b87 5222
6cebaf6e 5223 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5224
5225 fd = open (filename, O_RDONLY);
5226 if (fd < 0)
5227 return -1;
5228
5229 if (offset != (CORE_ADDR) 0
5230 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5231 n = -1;
5232 else
5233 n = read (fd, myaddr, len);
5234
5235 close (fd);
5236
5237 return n;
5238}
5239
d993e290
PA
5240/* These breakpoint and watchpoint related wrapper functions simply
5241 pass on the function call if the target has registered a
5242 corresponding function. */
e013ee27
OF
5243
5244static int
802e8e6d
PA
5245linux_supports_z_point_type (char z_type)
5246{
5247 return (the_low_target.supports_z_point_type != NULL
5248 && the_low_target.supports_z_point_type (z_type));
5249}
5250
5251static int
5252linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5253 int size, struct raw_breakpoint *bp)
e013ee27 5254{
c8f4bfdd
YQ
5255 if (type == raw_bkpt_type_sw)
5256 return insert_memory_breakpoint (bp);
5257 else if (the_low_target.insert_point != NULL)
802e8e6d 5258 return the_low_target.insert_point (type, addr, size, bp);
e013ee27
OF
5259 else
5260 /* Unsupported (see target.h). */
5261 return 1;
5262}
5263
5264static int
802e8e6d
PA
5265linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5266 int size, struct raw_breakpoint *bp)
e013ee27 5267{
c8f4bfdd
YQ
5268 if (type == raw_bkpt_type_sw)
5269 return remove_memory_breakpoint (bp);
5270 else if (the_low_target.remove_point != NULL)
802e8e6d 5271 return the_low_target.remove_point (type, addr, size, bp);
e013ee27
OF
5272 else
5273 /* Unsupported (see target.h). */
5274 return 1;
5275}
5276
3e572f71
PA
5277/* Implement the to_stopped_by_sw_breakpoint target_ops
5278 method. */
5279
5280static int
5281linux_stopped_by_sw_breakpoint (void)
5282{
5283 struct lwp_info *lwp = get_thread_lwp (current_thread);
5284
5285 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5286}
5287
5288/* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5289 method. */
5290
5291static int
5292linux_supports_stopped_by_sw_breakpoint (void)
5293{
5294 return USE_SIGTRAP_SIGINFO;
5295}
5296
5297/* Implement the to_stopped_by_hw_breakpoint target_ops
5298 method. */
5299
5300static int
5301linux_stopped_by_hw_breakpoint (void)
5302{
5303 struct lwp_info *lwp = get_thread_lwp (current_thread);
5304
5305 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5306}
5307
5308/* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5309 method. */
5310
5311static int
5312linux_supports_stopped_by_hw_breakpoint (void)
5313{
5314 return USE_SIGTRAP_SIGINFO;
5315}
5316
45614f15
YQ
5317/* Implement the supports_conditional_breakpoints target_ops
5318 method. */
5319
5320static int
5321linux_supports_conditional_breakpoints (void)
5322{
5323 /* GDBserver needs to step over the breakpoint if the condition is
5324 false. GDBserver software single step is too simple, so disable
5325 conditional breakpoints if the target doesn't have hardware single
5326 step. */
5327 return can_hardware_single_step ();
5328}
5329
e013ee27
OF
5330static int
5331linux_stopped_by_watchpoint (void)
5332{
0bfdf32f 5333 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5334
15c66dd6 5335 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5336}
5337
5338static CORE_ADDR
5339linux_stopped_data_address (void)
5340{
0bfdf32f 5341 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5342
5343 return lwp->stopped_data_address;
e013ee27
OF
5344}
5345
db0dfaa0
LM
5346#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5347 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5348 && defined(PT_TEXT_END_ADDR)
5349
5350/* This is only used for targets that define PT_TEXT_ADDR,
5351 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5352 the target has different ways of acquiring this information, like
5353 loadmaps. */
52fb6437
NS
5354
5355/* Under uClinux, programs are loaded at non-zero offsets, which we need
5356 to tell gdb about. */
5357
5358static int
5359linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5360{
52fb6437 5361 unsigned long text, text_end, data;
62828379 5362 int pid = lwpid_of (current_thread);
52fb6437
NS
5363
5364 errno = 0;
5365
b8e1b30e
LM
5366 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5367 (PTRACE_TYPE_ARG4) 0);
5368 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5369 (PTRACE_TYPE_ARG4) 0);
5370 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5371 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5372
5373 if (errno == 0)
5374 {
5375 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5376 used by gdb) are relative to the beginning of the program,
5377 with the data segment immediately following the text segment.
5378 However, the actual runtime layout in memory may put the data
5379 somewhere else, so when we send gdb a data base-address, we
5380 use the real data base address and subtract the compile-time
5381 data base-address from it (which is just the length of the
5382 text segment). BSS immediately follows data in both
5383 cases. */
52fb6437
NS
5384 *text_p = text;
5385 *data_p = data - (text_end - text);
1b3f6016 5386
52fb6437
NS
5387 return 1;
5388 }
52fb6437
NS
5389 return 0;
5390}
5391#endif
5392
07e059b5
VP
5393static int
5394linux_qxfer_osdata (const char *annex,
1b3f6016
PA
5395 unsigned char *readbuf, unsigned const char *writebuf,
5396 CORE_ADDR offset, int len)
07e059b5 5397{
d26e3629 5398 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5399}
5400
d0722149
DE
5401/* Convert a native/host siginfo object, into/from the siginfo in the
5402 layout of the inferiors' architecture. */
5403
5404static void
a5362b9a 5405siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
d0722149
DE
5406{
5407 int done = 0;
5408
5409 if (the_low_target.siginfo_fixup != NULL)
5410 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5411
5412 /* If there was no callback, or the callback didn't do anything,
5413 then just do a straight memcpy. */
5414 if (!done)
5415 {
5416 if (direction == 1)
a5362b9a 5417 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5418 else
a5362b9a 5419 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5420 }
5421}
5422
4aa995e1
PA
5423static int
5424linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5425 unsigned const char *writebuf, CORE_ADDR offset, int len)
5426{
d0722149 5427 int pid;
a5362b9a
TS
5428 siginfo_t siginfo;
5429 char inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5430
0bfdf32f 5431 if (current_thread == NULL)
4aa995e1
PA
5432 return -1;
5433
0bfdf32f 5434 pid = lwpid_of (current_thread);
4aa995e1
PA
5435
5436 if (debug_threads)
87ce2a04
DE
5437 debug_printf ("%s siginfo for lwp %d.\n",
5438 readbuf != NULL ? "Reading" : "Writing",
5439 pid);
4aa995e1 5440
0adea5f7 5441 if (offset >= sizeof (siginfo))
4aa995e1
PA
5442 return -1;
5443
b8e1b30e 5444 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5445 return -1;
5446
d0722149
DE
5447 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5448 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5449 inferior with a 64-bit GDBSERVER should look the same as debugging it
5450 with a 32-bit GDBSERVER, we need to convert it. */
5451 siginfo_fixup (&siginfo, inf_siginfo, 0);
5452
4aa995e1
PA
5453 if (offset + len > sizeof (siginfo))
5454 len = sizeof (siginfo) - offset;
5455
5456 if (readbuf != NULL)
d0722149 5457 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5458 else
5459 {
d0722149
DE
5460 memcpy (inf_siginfo + offset, writebuf, len);
5461
5462 /* Convert back to ptrace layout before flushing it out. */
5463 siginfo_fixup (&siginfo, inf_siginfo, 1);
5464
b8e1b30e 5465 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5466 return -1;
5467 }
5468
5469 return len;
5470}
5471
bd99dc85
PA
5472/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5473 so we notice when children change state; as the handler for the
5474 sigsuspend in my_waitpid. */
5475
5476static void
5477sigchld_handler (int signo)
5478{
5479 int old_errno = errno;
5480
5481 if (debug_threads)
e581f2b4
PA
5482 {
5483 do
5484 {
5485 /* fprintf is not async-signal-safe, so call write
5486 directly. */
5487 if (write (2, "sigchld_handler\n",
5488 sizeof ("sigchld_handler\n") - 1) < 0)
5489 break; /* just ignore */
5490 } while (0);
5491 }
bd99dc85
PA
5492
5493 if (target_is_async_p ())
5494 async_file_mark (); /* trigger a linux_wait */
5495
5496 errno = old_errno;
5497}
5498
5499static int
5500linux_supports_non_stop (void)
5501{
5502 return 1;
5503}
5504
5505static int
5506linux_async (int enable)
5507{
7089dca4 5508 int previous = target_is_async_p ();
bd99dc85 5509
8336d594 5510 if (debug_threads)
87ce2a04
DE
5511 debug_printf ("linux_async (%d), previous=%d\n",
5512 enable, previous);
8336d594 5513
bd99dc85
PA
5514 if (previous != enable)
5515 {
5516 sigset_t mask;
5517 sigemptyset (&mask);
5518 sigaddset (&mask, SIGCHLD);
5519
5520 sigprocmask (SIG_BLOCK, &mask, NULL);
5521
5522 if (enable)
5523 {
5524 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
5525 {
5526 linux_event_pipe[0] = -1;
5527 linux_event_pipe[1] = -1;
5528 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5529
5530 warning ("creating event pipe failed.");
5531 return previous;
5532 }
bd99dc85
PA
5533
5534 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5535 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5536
5537 /* Register the event loop handler. */
5538 add_file_handler (linux_event_pipe[0],
5539 handle_target_event, NULL);
5540
5541 /* Always trigger a linux_wait. */
5542 async_file_mark ();
5543 }
5544 else
5545 {
5546 delete_file_handler (linux_event_pipe[0]);
5547
5548 close (linux_event_pipe[0]);
5549 close (linux_event_pipe[1]);
5550 linux_event_pipe[0] = -1;
5551 linux_event_pipe[1] = -1;
5552 }
5553
5554 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5555 }
5556
5557 return previous;
5558}
5559
5560static int
5561linux_start_non_stop (int nonstop)
5562{
5563 /* Register or unregister from event-loop accordingly. */
5564 linux_async (nonstop);
aa96c426
GB
5565
5566 if (target_is_async_p () != (nonstop != 0))
5567 return -1;
5568
bd99dc85
PA
5569 return 0;
5570}
5571
cf8fd78b
PA
5572static int
5573linux_supports_multi_process (void)
5574{
5575 return 1;
5576}
5577
89245bc0
DB
5578/* Check if fork events are supported. */
5579
5580static int
5581linux_supports_fork_events (void)
5582{
5583 return linux_supports_tracefork ();
5584}
5585
5586/* Check if vfork events are supported. */
5587
5588static int
5589linux_supports_vfork_events (void)
5590{
5591 return linux_supports_tracefork ();
5592}
5593
de0d863e
DB
5594/* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5595 options for the specified lwp. */
5596
5597static int
5598reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5599 void *args)
5600{
5601 struct thread_info *thread = (struct thread_info *) entry;
5602 struct lwp_info *lwp = get_thread_lwp (thread);
5603
5604 if (!lwp->stopped)
5605 {
5606 /* Stop the lwp so we can modify its ptrace options. */
5607 lwp->must_set_ptrace_flags = 1;
5608 linux_stop_lwp (lwp);
5609 }
5610 else
5611 {
5612 /* Already stopped; go ahead and set the ptrace options. */
5613 struct process_info *proc = find_process_pid (pid_of (thread));
5614 int options = linux_low_ptrace_options (proc->attached);
5615
5616 linux_enable_event_reporting (lwpid_of (thread), options);
5617 lwp->must_set_ptrace_flags = 0;
5618 }
5619
5620 return 0;
5621}
5622
5623/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5624 ptrace flags for all inferiors. This is in case the new GDB connection
5625 doesn't support the same set of events that the previous one did. */
5626
5627static void
5628linux_handle_new_gdb_connection (void)
5629{
5630 pid_t pid;
5631
5632 /* Request that all the lwps reset their ptrace options. */
5633 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5634}
5635
03583c20
UW
5636static int
5637linux_supports_disable_randomization (void)
5638{
5639#ifdef HAVE_PERSONALITY
5640 return 1;
5641#else
5642 return 0;
5643#endif
5644}
efcbbd14 5645
d1feda86
YQ
5646static int
5647linux_supports_agent (void)
5648{
5649 return 1;
5650}
5651
c2d6af84
PA
5652static int
5653linux_supports_range_stepping (void)
5654{
5655 if (*the_low_target.supports_range_stepping == NULL)
5656 return 0;
5657
5658 return (*the_low_target.supports_range_stepping) ();
5659}
5660
efcbbd14
UW
5661/* Enumerate spufs IDs for process PID. */
5662static int
5663spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5664{
5665 int pos = 0;
5666 int written = 0;
5667 char path[128];
5668 DIR *dir;
5669 struct dirent *entry;
5670
5671 sprintf (path, "/proc/%ld/fd", pid);
5672 dir = opendir (path);
5673 if (!dir)
5674 return -1;
5675
5676 rewinddir (dir);
5677 while ((entry = readdir (dir)) != NULL)
5678 {
5679 struct stat st;
5680 struct statfs stfs;
5681 int fd;
5682
5683 fd = atoi (entry->d_name);
5684 if (!fd)
5685 continue;
5686
5687 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5688 if (stat (path, &st) != 0)
5689 continue;
5690 if (!S_ISDIR (st.st_mode))
5691 continue;
5692
5693 if (statfs (path, &stfs) != 0)
5694 continue;
5695 if (stfs.f_type != SPUFS_MAGIC)
5696 continue;
5697
5698 if (pos >= offset && pos + 4 <= offset + len)
5699 {
5700 *(unsigned int *)(buf + pos - offset) = fd;
5701 written += 4;
5702 }
5703 pos += 4;
5704 }
5705
5706 closedir (dir);
5707 return written;
5708}
5709
5710/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5711 object type, using the /proc file system. */
5712static int
5713linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5714 unsigned const char *writebuf,
5715 CORE_ADDR offset, int len)
5716{
0bfdf32f 5717 long pid = lwpid_of (current_thread);
efcbbd14
UW
5718 char buf[128];
5719 int fd = 0;
5720 int ret = 0;
5721
5722 if (!writebuf && !readbuf)
5723 return -1;
5724
5725 if (!*annex)
5726 {
5727 if (!readbuf)
5728 return -1;
5729 else
5730 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5731 }
5732
5733 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5734 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5735 if (fd <= 0)
5736 return -1;
5737
5738 if (offset != 0
5739 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5740 {
5741 close (fd);
5742 return 0;
5743 }
5744
5745 if (writebuf)
5746 ret = write (fd, writebuf, (size_t) len);
5747 else
5748 ret = read (fd, readbuf, (size_t) len);
5749
5750 close (fd);
5751 return ret;
5752}
5753
723b724b 5754#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
5755struct target_loadseg
5756{
5757 /* Core address to which the segment is mapped. */
5758 Elf32_Addr addr;
5759 /* VMA recorded in the program header. */
5760 Elf32_Addr p_vaddr;
5761 /* Size of this segment in memory. */
5762 Elf32_Word p_memsz;
5763};
5764
723b724b 5765# if defined PT_GETDSBT
78d85199
YQ
5766struct target_loadmap
5767{
5768 /* Protocol version number, must be zero. */
5769 Elf32_Word version;
5770 /* Pointer to the DSBT table, its size, and the DSBT index. */
5771 unsigned *dsbt_table;
5772 unsigned dsbt_size, dsbt_index;
5773 /* Number of segments in this map. */
5774 Elf32_Word nsegs;
5775 /* The actual memory map. */
5776 struct target_loadseg segs[/*nsegs*/];
5777};
723b724b
MF
5778# define LINUX_LOADMAP PT_GETDSBT
5779# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5780# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5781# else
5782struct target_loadmap
5783{
5784 /* Protocol version number, must be zero. */
5785 Elf32_Half version;
5786 /* Number of segments in this map. */
5787 Elf32_Half nsegs;
5788 /* The actual memory map. */
5789 struct target_loadseg segs[/*nsegs*/];
5790};
5791# define LINUX_LOADMAP PTRACE_GETFDPIC
5792# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5793# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5794# endif
78d85199 5795
78d85199
YQ
5796static int
5797linux_read_loadmap (const char *annex, CORE_ADDR offset,
5798 unsigned char *myaddr, unsigned int len)
5799{
0bfdf32f 5800 int pid = lwpid_of (current_thread);
78d85199
YQ
5801 int addr = -1;
5802 struct target_loadmap *data = NULL;
5803 unsigned int actual_length, copy_length;
5804
5805 if (strcmp (annex, "exec") == 0)
723b724b 5806 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 5807 else if (strcmp (annex, "interp") == 0)
723b724b 5808 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
5809 else
5810 return -1;
5811
723b724b 5812 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
5813 return -1;
5814
5815 if (data == NULL)
5816 return -1;
5817
5818 actual_length = sizeof (struct target_loadmap)
5819 + sizeof (struct target_loadseg) * data->nsegs;
5820
5821 if (offset < 0 || offset > actual_length)
5822 return -1;
5823
5824 copy_length = actual_length - offset < len ? actual_length - offset : len;
5825 memcpy (myaddr, (char *) data + offset, copy_length);
5826 return copy_length;
5827}
723b724b
MF
5828#else
5829# define linux_read_loadmap NULL
5830#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 5831
1570b33e
L
5832static void
5833linux_process_qsupported (const char *query)
5834{
5835 if (the_low_target.process_qsupported != NULL)
5836 the_low_target.process_qsupported (query);
5837}
5838
219f2f23
PA
5839static int
5840linux_supports_tracepoints (void)
5841{
5842 if (*the_low_target.supports_tracepoints == NULL)
5843 return 0;
5844
5845 return (*the_low_target.supports_tracepoints) ();
5846}
5847
5848static CORE_ADDR
5849linux_read_pc (struct regcache *regcache)
5850{
5851 if (the_low_target.get_pc == NULL)
5852 return 0;
5853
5854 return (*the_low_target.get_pc) (regcache);
5855}
5856
5857static void
5858linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5859{
5860 gdb_assert (the_low_target.set_pc != NULL);
5861
5862 (*the_low_target.set_pc) (regcache, pc);
5863}
5864
8336d594
PA
5865static int
5866linux_thread_stopped (struct thread_info *thread)
5867{
5868 return get_thread_lwp (thread)->stopped;
5869}
5870
5871/* This exposes stop-all-threads functionality to other modules. */
5872
5873static void
7984d532 5874linux_pause_all (int freeze)
8336d594 5875{
7984d532
PA
5876 stop_all_lwps (freeze, NULL);
5877}
5878
5879/* This exposes unstop-all-threads functionality to other gdbserver
5880 modules. */
5881
5882static void
5883linux_unpause_all (int unfreeze)
5884{
5885 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
5886}
5887
90d74c30
PA
5888static int
5889linux_prepare_to_access_memory (void)
5890{
5891 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5892 running LWP. */
5893 if (non_stop)
5894 linux_pause_all (1);
5895 return 0;
5896}
5897
5898static void
0146f85b 5899linux_done_accessing_memory (void)
90d74c30
PA
5900{
5901 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5902 running LWP. */
5903 if (non_stop)
5904 linux_unpause_all (1);
5905}
5906
fa593d66
PA
5907static int
5908linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5909 CORE_ADDR collector,
5910 CORE_ADDR lockaddr,
5911 ULONGEST orig_size,
5912 CORE_ADDR *jump_entry,
405f8e94
SS
5913 CORE_ADDR *trampoline,
5914 ULONGEST *trampoline_size,
fa593d66
PA
5915 unsigned char *jjump_pad_insn,
5916 ULONGEST *jjump_pad_insn_size,
5917 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
5918 CORE_ADDR *adjusted_insn_addr_end,
5919 char *err)
fa593d66
PA
5920{
5921 return (*the_low_target.install_fast_tracepoint_jump_pad)
5922 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
5923 jump_entry, trampoline, trampoline_size,
5924 jjump_pad_insn, jjump_pad_insn_size,
5925 adjusted_insn_addr, adjusted_insn_addr_end,
5926 err);
fa593d66
PA
5927}
5928
6a271cae
PA
5929static struct emit_ops *
5930linux_emit_ops (void)
5931{
5932 if (the_low_target.emit_ops != NULL)
5933 return (*the_low_target.emit_ops) ();
5934 else
5935 return NULL;
5936}
5937
405f8e94
SS
5938static int
5939linux_get_min_fast_tracepoint_insn_len (void)
5940{
5941 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5942}
5943
2268b414
JK
5944/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5945
5946static int
5947get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5948 CORE_ADDR *phdr_memaddr, int *num_phdr)
5949{
5950 char filename[PATH_MAX];
5951 int fd;
5952 const int auxv_size = is_elf64
5953 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5954 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5955
5956 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5957
5958 fd = open (filename, O_RDONLY);
5959 if (fd < 0)
5960 return 1;
5961
5962 *phdr_memaddr = 0;
5963 *num_phdr = 0;
5964 while (read (fd, buf, auxv_size) == auxv_size
5965 && (*phdr_memaddr == 0 || *num_phdr == 0))
5966 {
5967 if (is_elf64)
5968 {
5969 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5970
5971 switch (aux->a_type)
5972 {
5973 case AT_PHDR:
5974 *phdr_memaddr = aux->a_un.a_val;
5975 break;
5976 case AT_PHNUM:
5977 *num_phdr = aux->a_un.a_val;
5978 break;
5979 }
5980 }
5981 else
5982 {
5983 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5984
5985 switch (aux->a_type)
5986 {
5987 case AT_PHDR:
5988 *phdr_memaddr = aux->a_un.a_val;
5989 break;
5990 case AT_PHNUM:
5991 *num_phdr = aux->a_un.a_val;
5992 break;
5993 }
5994 }
5995 }
5996
5997 close (fd);
5998
5999 if (*phdr_memaddr == 0 || *num_phdr == 0)
6000 {
6001 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6002 "phdr_memaddr = %ld, phdr_num = %d",
6003 (long) *phdr_memaddr, *num_phdr);
6004 return 2;
6005 }
6006
6007 return 0;
6008}
6009
6010/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6011
6012static CORE_ADDR
6013get_dynamic (const int pid, const int is_elf64)
6014{
6015 CORE_ADDR phdr_memaddr, relocation;
6016 int num_phdr, i;
6017 unsigned char *phdr_buf;
6018 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6019
6020 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6021 return 0;
6022
6023 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6024 phdr_buf = alloca (num_phdr * phdr_size);
6025
6026 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6027 return 0;
6028
6029 /* Compute relocation: it is expected to be 0 for "regular" executables,
6030 non-zero for PIE ones. */
6031 relocation = -1;
6032 for (i = 0; relocation == -1 && i < num_phdr; i++)
6033 if (is_elf64)
6034 {
6035 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6036
6037 if (p->p_type == PT_PHDR)
6038 relocation = phdr_memaddr - p->p_vaddr;
6039 }
6040 else
6041 {
6042 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6043
6044 if (p->p_type == PT_PHDR)
6045 relocation = phdr_memaddr - p->p_vaddr;
6046 }
6047
6048 if (relocation == -1)
6049 {
e237a7e2
JK
6050 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6051 any real world executables, including PIE executables, have always
6052 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6053 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6054 or present DT_DEBUG anyway (fpc binaries are statically linked).
6055
6056 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6057
6058 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6059
2268b414
JK
6060 return 0;
6061 }
6062
6063 for (i = 0; i < num_phdr; i++)
6064 {
6065 if (is_elf64)
6066 {
6067 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6068
6069 if (p->p_type == PT_DYNAMIC)
6070 return p->p_vaddr + relocation;
6071 }
6072 else
6073 {
6074 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6075
6076 if (p->p_type == PT_DYNAMIC)
6077 return p->p_vaddr + relocation;
6078 }
6079 }
6080
6081 return 0;
6082}
6083
6084/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6085 can be 0 if the inferior does not yet have the library list initialized.
6086 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6087 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6088
6089static CORE_ADDR
6090get_r_debug (const int pid, const int is_elf64)
6091{
6092 CORE_ADDR dynamic_memaddr;
6093 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6094 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6095 CORE_ADDR map = -1;
2268b414
JK
6096
6097 dynamic_memaddr = get_dynamic (pid, is_elf64);
6098 if (dynamic_memaddr == 0)
367ba2c2 6099 return map;
2268b414
JK
6100
6101 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6102 {
6103 if (is_elf64)
6104 {
6105 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
75f62ce7 6106#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6107 union
6108 {
6109 Elf64_Xword map;
6110 unsigned char buf[sizeof (Elf64_Xword)];
6111 }
6112 rld_map;
6113
6114 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6115 {
6116 if (linux_read_memory (dyn->d_un.d_val,
6117 rld_map.buf, sizeof (rld_map.buf)) == 0)
6118 return rld_map.map;
6119 else
6120 break;
6121 }
75f62ce7 6122#endif /* DT_MIPS_RLD_MAP */
2268b414 6123
367ba2c2
MR
6124 if (dyn->d_tag == DT_DEBUG && map == -1)
6125 map = dyn->d_un.d_val;
2268b414
JK
6126
6127 if (dyn->d_tag == DT_NULL)
6128 break;
6129 }
6130 else
6131 {
6132 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
75f62ce7 6133#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6134 union
6135 {
6136 Elf32_Word map;
6137 unsigned char buf[sizeof (Elf32_Word)];
6138 }
6139 rld_map;
6140
6141 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6142 {
6143 if (linux_read_memory (dyn->d_un.d_val,
6144 rld_map.buf, sizeof (rld_map.buf)) == 0)
6145 return rld_map.map;
6146 else
6147 break;
6148 }
75f62ce7 6149#endif /* DT_MIPS_RLD_MAP */
2268b414 6150
367ba2c2
MR
6151 if (dyn->d_tag == DT_DEBUG && map == -1)
6152 map = dyn->d_un.d_val;
2268b414
JK
6153
6154 if (dyn->d_tag == DT_NULL)
6155 break;
6156 }
6157
6158 dynamic_memaddr += dyn_size;
6159 }
6160
367ba2c2 6161 return map;
2268b414
JK
6162}
6163
6164/* Read one pointer from MEMADDR in the inferior. */
6165
6166static int
6167read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6168{
485f1ee4
PA
6169 int ret;
6170
6171 /* Go through a union so this works on either big or little endian
6172 hosts, when the inferior's pointer size is smaller than the size
6173 of CORE_ADDR. It is assumed the inferior's endianness is the
6174 same of the superior's. */
6175 union
6176 {
6177 CORE_ADDR core_addr;
6178 unsigned int ui;
6179 unsigned char uc;
6180 } addr;
6181
6182 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6183 if (ret == 0)
6184 {
6185 if (ptr_size == sizeof (CORE_ADDR))
6186 *ptr = addr.core_addr;
6187 else if (ptr_size == sizeof (unsigned int))
6188 *ptr = addr.ui;
6189 else
6190 gdb_assert_not_reached ("unhandled pointer size");
6191 }
6192 return ret;
2268b414
JK
6193}
6194
6195struct link_map_offsets
6196 {
6197 /* Offset and size of r_debug.r_version. */
6198 int r_version_offset;
6199
6200 /* Offset and size of r_debug.r_map. */
6201 int r_map_offset;
6202
6203 /* Offset to l_addr field in struct link_map. */
6204 int l_addr_offset;
6205
6206 /* Offset to l_name field in struct link_map. */
6207 int l_name_offset;
6208
6209 /* Offset to l_ld field in struct link_map. */
6210 int l_ld_offset;
6211
6212 /* Offset to l_next field in struct link_map. */
6213 int l_next_offset;
6214
6215 /* Offset to l_prev field in struct link_map. */
6216 int l_prev_offset;
6217 };
6218
fb723180 6219/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
6220
6221static int
6222linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6223 unsigned const char *writebuf,
6224 CORE_ADDR offset, int len)
6225{
6226 char *document;
6227 unsigned document_len;
fe978cb0 6228 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6229 char filename[PATH_MAX];
6230 int pid, is_elf64;
6231
6232 static const struct link_map_offsets lmo_32bit_offsets =
6233 {
6234 0, /* r_version offset. */
6235 4, /* r_debug.r_map offset. */
6236 0, /* l_addr offset in link_map. */
6237 4, /* l_name offset in link_map. */
6238 8, /* l_ld offset in link_map. */
6239 12, /* l_next offset in link_map. */
6240 16 /* l_prev offset in link_map. */
6241 };
6242
6243 static const struct link_map_offsets lmo_64bit_offsets =
6244 {
6245 0, /* r_version offset. */
6246 8, /* r_debug.r_map offset. */
6247 0, /* l_addr offset in link_map. */
6248 8, /* l_name offset in link_map. */
6249 16, /* l_ld offset in link_map. */
6250 24, /* l_next offset in link_map. */
6251 32 /* l_prev offset in link_map. */
6252 };
6253 const struct link_map_offsets *lmo;
214d508e 6254 unsigned int machine;
b1fbec62
GB
6255 int ptr_size;
6256 CORE_ADDR lm_addr = 0, lm_prev = 0;
6257 int allocated = 1024;
6258 char *p;
6259 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6260 int header_done = 0;
2268b414
JK
6261
6262 if (writebuf != NULL)
6263 return -2;
6264 if (readbuf == NULL)
6265 return -1;
6266
0bfdf32f 6267 pid = lwpid_of (current_thread);
2268b414 6268 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6269 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 6270 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 6271 ptr_size = is_elf64 ? 8 : 4;
2268b414 6272
b1fbec62
GB
6273 while (annex[0] != '\0')
6274 {
6275 const char *sep;
6276 CORE_ADDR *addrp;
6277 int len;
2268b414 6278
b1fbec62
GB
6279 sep = strchr (annex, '=');
6280 if (sep == NULL)
6281 break;
0c5bf5a9 6282
b1fbec62 6283 len = sep - annex;
61012eef 6284 if (len == 5 && startswith (annex, "start"))
b1fbec62 6285 addrp = &lm_addr;
61012eef 6286 else if (len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6287 addrp = &lm_prev;
6288 else
6289 {
6290 annex = strchr (sep, ';');
6291 if (annex == NULL)
6292 break;
6293 annex++;
6294 continue;
6295 }
6296
6297 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6298 }
b1fbec62
GB
6299
6300 if (lm_addr == 0)
2268b414 6301 {
b1fbec62
GB
6302 int r_version = 0;
6303
6304 if (priv->r_debug == 0)
6305 priv->r_debug = get_r_debug (pid, is_elf64);
6306
6307 /* We failed to find DT_DEBUG. Such situation will not change
6308 for this inferior - do not retry it. Report it to GDB as
6309 E01, see for the reasons at the GDB solib-svr4.c side. */
6310 if (priv->r_debug == (CORE_ADDR) -1)
6311 return -1;
6312
6313 if (priv->r_debug != 0)
2268b414 6314 {
b1fbec62
GB
6315 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6316 (unsigned char *) &r_version,
6317 sizeof (r_version)) != 0
6318 || r_version != 1)
6319 {
6320 warning ("unexpected r_debug version %d", r_version);
6321 }
6322 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6323 &lm_addr, ptr_size) != 0)
6324 {
6325 warning ("unable to read r_map from 0x%lx",
6326 (long) priv->r_debug + lmo->r_map_offset);
6327 }
2268b414 6328 }
b1fbec62 6329 }
2268b414 6330
b1fbec62
GB
6331 document = xmalloc (allocated);
6332 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6333 p = document + strlen (document);
6334
6335 while (lm_addr
6336 && read_one_ptr (lm_addr + lmo->l_name_offset,
6337 &l_name, ptr_size) == 0
6338 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6339 &l_addr, ptr_size) == 0
6340 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6341 &l_ld, ptr_size) == 0
6342 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6343 &l_prev, ptr_size) == 0
6344 && read_one_ptr (lm_addr + lmo->l_next_offset,
6345 &l_next, ptr_size) == 0)
6346 {
6347 unsigned char libname[PATH_MAX];
6348
6349 if (lm_prev != l_prev)
2268b414 6350 {
b1fbec62
GB
6351 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6352 (long) lm_prev, (long) l_prev);
6353 break;
2268b414
JK
6354 }
6355
d878444c
JK
6356 /* Ignore the first entry even if it has valid name as the first entry
6357 corresponds to the main executable. The first entry should not be
6358 skipped if the dynamic loader was loaded late by a static executable
6359 (see solib-svr4.c parameter ignore_first). But in such case the main
6360 executable does not have PT_DYNAMIC present and this function already
6361 exited above due to failed get_r_debug. */
6362 if (lm_prev == 0)
2268b414 6363 {
d878444c
JK
6364 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6365 p = p + strlen (p);
6366 }
6367 else
6368 {
6369 /* Not checking for error because reading may stop before
6370 we've got PATH_MAX worth of characters. */
6371 libname[0] = '\0';
6372 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6373 libname[sizeof (libname) - 1] = '\0';
6374 if (libname[0] != '\0')
2268b414 6375 {
d878444c
JK
6376 /* 6x the size for xml_escape_text below. */
6377 size_t len = 6 * strlen ((char *) libname);
6378 char *name;
2268b414 6379
d878444c
JK
6380 if (!header_done)
6381 {
6382 /* Terminate `<library-list-svr4'. */
6383 *p++ = '>';
6384 header_done = 1;
6385 }
2268b414 6386
d878444c
JK
6387 while (allocated < p - document + len + 200)
6388 {
6389 /* Expand to guarantee sufficient storage. */
6390 uintptr_t document_len = p - document;
2268b414 6391
d878444c
JK
6392 document = xrealloc (document, 2 * allocated);
6393 allocated *= 2;
6394 p = document + document_len;
6395 }
6396
6397 name = xml_escape_text ((char *) libname);
6398 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6399 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6400 name, (unsigned long) lm_addr,
6401 (unsigned long) l_addr, (unsigned long) l_ld);
6402 free (name);
6403 }
0afae3cf 6404 }
b1fbec62
GB
6405
6406 lm_prev = lm_addr;
6407 lm_addr = l_next;
2268b414
JK
6408 }
6409
b1fbec62
GB
6410 if (!header_done)
6411 {
6412 /* Empty list; terminate `<library-list-svr4'. */
6413 strcpy (p, "/>");
6414 }
6415 else
6416 strcpy (p, "</library-list-svr4>");
6417
2268b414
JK
6418 document_len = strlen (document);
6419 if (offset < document_len)
6420 document_len -= offset;
6421 else
6422 document_len = 0;
6423 if (len > document_len)
6424 len = document_len;
6425
6426 memcpy (readbuf, document + offset, len);
6427 xfree (document);
6428
6429 return len;
6430}
6431
9accd112
MM
6432#ifdef HAVE_LINUX_BTRACE
6433
969c39fb 6434/* See to_enable_btrace target method. */
9accd112
MM
6435
6436static struct btrace_target_info *
f4abbc16 6437linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
9accd112
MM
6438{
6439 struct btrace_target_info *tinfo;
6440
f4abbc16 6441 tinfo = linux_enable_btrace (ptid, conf);
3aee8918 6442
d68e53f4 6443 if (tinfo != NULL && tinfo->ptr_bits == 0)
3aee8918
PA
6444 {
6445 struct thread_info *thread = find_thread_ptid (ptid);
6446 struct regcache *regcache = get_thread_regcache (thread, 0);
6447
6448 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6449 }
9accd112
MM
6450
6451 return tinfo;
6452}
6453
969c39fb 6454/* See to_disable_btrace target method. */
9accd112 6455
969c39fb
MM
6456static int
6457linux_low_disable_btrace (struct btrace_target_info *tinfo)
6458{
6459 enum btrace_error err;
6460
6461 err = linux_disable_btrace (tinfo);
6462 return (err == BTRACE_ERR_NONE ? 0 : -1);
6463}
6464
b20a6524
MM
6465/* Encode an Intel(R) Processor Trace configuration. */
6466
6467static void
6468linux_low_encode_pt_config (struct buffer *buffer,
6469 const struct btrace_data_pt_config *config)
6470{
6471 buffer_grow_str (buffer, "<pt-config>\n");
6472
6473 switch (config->cpu.vendor)
6474 {
6475 case CV_INTEL:
6476 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6477 "model=\"%u\" stepping=\"%u\"/>\n",
6478 config->cpu.family, config->cpu.model,
6479 config->cpu.stepping);
6480 break;
6481
6482 default:
6483 break;
6484 }
6485
6486 buffer_grow_str (buffer, "</pt-config>\n");
6487}
6488
6489/* Encode a raw buffer. */
6490
6491static void
6492linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6493 unsigned int size)
6494{
6495 if (size == 0)
6496 return;
6497
6498 /* We use hex encoding - see common/rsp-low.h. */
6499 buffer_grow_str (buffer, "<raw>\n");
6500
6501 while (size-- > 0)
6502 {
6503 char elem[2];
6504
6505 elem[0] = tohex ((*data >> 4) & 0xf);
6506 elem[1] = tohex (*data++ & 0xf);
6507
6508 buffer_grow (buffer, elem, 2);
6509 }
6510
6511 buffer_grow_str (buffer, "</raw>\n");
6512}
6513
969c39fb
MM
6514/* See to_read_btrace target method. */
6515
6516static int
9accd112
MM
6517linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6518 int type)
6519{
734b0e4b 6520 struct btrace_data btrace;
9accd112 6521 struct btrace_block *block;
969c39fb 6522 enum btrace_error err;
9accd112
MM
6523 int i;
6524
734b0e4b
MM
6525 btrace_data_init (&btrace);
6526
969c39fb
MM
6527 err = linux_read_btrace (&btrace, tinfo, type);
6528 if (err != BTRACE_ERR_NONE)
6529 {
6530 if (err == BTRACE_ERR_OVERFLOW)
6531 buffer_grow_str0 (buffer, "E.Overflow.");
6532 else
6533 buffer_grow_str0 (buffer, "E.Generic Error.");
6534
b20a6524 6535 goto err;
969c39fb 6536 }
9accd112 6537
734b0e4b
MM
6538 switch (btrace.format)
6539 {
6540 case BTRACE_FORMAT_NONE:
6541 buffer_grow_str0 (buffer, "E.No Trace.");
b20a6524 6542 goto err;
734b0e4b
MM
6543
6544 case BTRACE_FORMAT_BTS:
6545 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6546 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 6547
734b0e4b
MM
6548 for (i = 0;
6549 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6550 i++)
6551 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6552 paddress (block->begin), paddress (block->end));
9accd112 6553
734b0e4b
MM
6554 buffer_grow_str0 (buffer, "</btrace>\n");
6555 break;
6556
b20a6524
MM
6557 case BTRACE_FORMAT_PT:
6558 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6559 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6560 buffer_grow_str (buffer, "<pt>\n");
6561
6562 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 6563
b20a6524
MM
6564 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6565 btrace.variant.pt.size);
6566
6567 buffer_grow_str (buffer, "</pt>\n");
6568 buffer_grow_str0 (buffer, "</btrace>\n");
6569 break;
6570
6571 default:
6572 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6573 goto err;
734b0e4b 6574 }
969c39fb 6575
734b0e4b 6576 btrace_data_fini (&btrace);
969c39fb 6577 return 0;
b20a6524
MM
6578
6579err:
6580 btrace_data_fini (&btrace);
6581 return -1;
9accd112 6582}
f4abbc16
MM
6583
6584/* See to_btrace_conf target method. */
6585
6586static int
6587linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6588 struct buffer *buffer)
6589{
6590 const struct btrace_config *conf;
6591
6592 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6593 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6594
6595 conf = linux_btrace_conf (tinfo);
6596 if (conf != NULL)
6597 {
6598 switch (conf->format)
6599 {
6600 case BTRACE_FORMAT_NONE:
6601 break;
6602
6603 case BTRACE_FORMAT_BTS:
d33501a5
MM
6604 buffer_xml_printf (buffer, "<bts");
6605 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6606 buffer_xml_printf (buffer, " />\n");
f4abbc16 6607 break;
b20a6524
MM
6608
6609 case BTRACE_FORMAT_PT:
6610 buffer_xml_printf (buffer, "<pt");
6611 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6612 buffer_xml_printf (buffer, "/>\n");
6613 break;
f4abbc16
MM
6614 }
6615 }
6616
6617 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6618 return 0;
6619}
9accd112
MM
6620#endif /* HAVE_LINUX_BTRACE */
6621
7b669087
GB
6622/* See nat/linux-nat.h. */
6623
6624ptid_t
6625current_lwp_ptid (void)
6626{
6627 return ptid_of (current_thread);
6628}
6629
ce3a066d
DJ
6630static struct target_ops linux_target_ops = {
6631 linux_create_inferior,
6632 linux_attach,
6633 linux_kill,
6ad8ae5c 6634 linux_detach,
8336d594 6635 linux_mourn,
444d6139 6636 linux_join,
ce3a066d
DJ
6637 linux_thread_alive,
6638 linux_resume,
6639 linux_wait,
6640 linux_fetch_registers,
6641 linux_store_registers,
90d74c30 6642 linux_prepare_to_access_memory,
0146f85b 6643 linux_done_accessing_memory,
ce3a066d
DJ
6644 linux_read_memory,
6645 linux_write_memory,
2f2893d9 6646 linux_look_up_symbols,
ef57601b 6647 linux_request_interrupt,
aa691b87 6648 linux_read_auxv,
802e8e6d 6649 linux_supports_z_point_type,
d993e290
PA
6650 linux_insert_point,
6651 linux_remove_point,
3e572f71
PA
6652 linux_stopped_by_sw_breakpoint,
6653 linux_supports_stopped_by_sw_breakpoint,
6654 linux_stopped_by_hw_breakpoint,
6655 linux_supports_stopped_by_hw_breakpoint,
45614f15 6656 linux_supports_conditional_breakpoints,
e013ee27
OF
6657 linux_stopped_by_watchpoint,
6658 linux_stopped_data_address,
db0dfaa0
LM
6659#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6660 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6661 && defined(PT_TEXT_END_ADDR)
52fb6437 6662 linux_read_offsets,
dae5f5cf
DJ
6663#else
6664 NULL,
6665#endif
6666#ifdef USE_THREAD_DB
6667 thread_db_get_tls_address,
6668#else
6669 NULL,
52fb6437 6670#endif
efcbbd14 6671 linux_qxfer_spu,
59a016f0 6672 hostio_last_error_from_errno,
07e059b5 6673 linux_qxfer_osdata,
4aa995e1 6674 linux_xfer_siginfo,
bd99dc85
PA
6675 linux_supports_non_stop,
6676 linux_async,
6677 linux_start_non_stop,
cdbfd419 6678 linux_supports_multi_process,
89245bc0
DB
6679 linux_supports_fork_events,
6680 linux_supports_vfork_events,
de0d863e 6681 linux_handle_new_gdb_connection,
cdbfd419 6682#ifdef USE_THREAD_DB
dc146f7c 6683 thread_db_handle_monitor_command,
cdbfd419 6684#else
dc146f7c 6685 NULL,
cdbfd419 6686#endif
d26e3629 6687 linux_common_core_of_thread,
78d85199 6688 linux_read_loadmap,
219f2f23
PA
6689 linux_process_qsupported,
6690 linux_supports_tracepoints,
6691 linux_read_pc,
8336d594
PA
6692 linux_write_pc,
6693 linux_thread_stopped,
7984d532 6694 NULL,
711e434b 6695 linux_pause_all,
7984d532 6696 linux_unpause_all,
fa593d66 6697 linux_stabilize_threads,
6a271cae 6698 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
6699 linux_emit_ops,
6700 linux_supports_disable_randomization,
405f8e94 6701 linux_get_min_fast_tracepoint_insn_len,
2268b414 6702 linux_qxfer_libraries_svr4,
d1feda86 6703 linux_supports_agent,
9accd112
MM
6704#ifdef HAVE_LINUX_BTRACE
6705 linux_supports_btrace,
6706 linux_low_enable_btrace,
969c39fb 6707 linux_low_disable_btrace,
9accd112 6708 linux_low_read_btrace,
f4abbc16 6709 linux_low_btrace_conf,
9accd112
MM
6710#else
6711 NULL,
6712 NULL,
6713 NULL,
6714 NULL,
f4abbc16 6715 NULL,
9accd112 6716#endif
c2d6af84 6717 linux_supports_range_stepping,
e57f1de3 6718 linux_proc_pid_to_exec_file,
14d2069a
GB
6719 linux_mntns_open_cloexec,
6720 linux_mntns_unlink,
6721 linux_mntns_readlink,
ce3a066d
DJ
6722};
6723
0d62e5e8
DJ
6724static void
6725linux_init_signals ()
6726{
6727 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6728 to find what the cancel signal actually is. */
1a981360 6729#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 6730 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 6731#endif
0d62e5e8
DJ
6732}
6733
3aee8918
PA
6734#ifdef HAVE_LINUX_REGSETS
6735void
6736initialize_regsets_info (struct regsets_info *info)
6737{
6738 for (info->num_regsets = 0;
6739 info->regsets[info->num_regsets].size >= 0;
6740 info->num_regsets++)
6741 ;
3aee8918
PA
6742}
6743#endif
6744
da6d8c04
DJ
6745void
6746initialize_low (void)
6747{
bd99dc85
PA
6748 struct sigaction sigchld_action;
6749 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 6750 set_target_ops (&linux_target_ops);
611cb4a5
DJ
6751 set_breakpoint_data (the_low_target.breakpoint,
6752 the_low_target.breakpoint_len);
0d62e5e8 6753 linux_init_signals ();
aa7c7447 6754 linux_ptrace_init_warnings ();
bd99dc85
PA
6755
6756 sigchld_action.sa_handler = sigchld_handler;
6757 sigemptyset (&sigchld_action.sa_mask);
6758 sigchld_action.sa_flags = SA_RESTART;
6759 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
6760
6761 initialize_low_arch ();
89245bc0
DB
6762
6763 linux_check_ptrace_features ();
da6d8c04 6764}