]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
Linux gdbserver confused when event randomization picks process exit event
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
32d0add0 2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
58b4daa5 22#include "agent.h"
de0d863e 23#include "tdesc.h"
b20a6524 24#include "rsp-low.h"
da6d8c04 25
96d7229d
LM
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
8bdce1ff 28#include "gdb_wait.h"
5826e159 29#include "nat/gdb_ptrace.h"
125f8a3d
GB
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
8cc73a39 32#include "nat/linux-personality.h"
da6d8c04
DJ
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
0a30fbc4 36#include <unistd.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
602e3198 46#include "filestuff.h"
c144c7a0 47#include "tracepoint.h"
533b0600 48#include "hostio.h"
957f3f49
DE
49#ifndef ELFMAG0
50/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54#include <elf.h>
55#endif
14d2069a 56#include "nat/linux-namespaces.h"
efcbbd14
UW
57
58#ifndef SPUFS_MAGIC
59#define SPUFS_MAGIC 0x23c9b64e
60#endif
da6d8c04 61
03583c20
UW
62#ifdef HAVE_PERSONALITY
63# include <sys/personality.h>
64# if !HAVE_DECL_ADDR_NO_RANDOMIZE
65# define ADDR_NO_RANDOMIZE 0x0040000
66# endif
67#endif
68
fd462a61
DJ
69#ifndef O_LARGEFILE
70#define O_LARGEFILE 0
71#endif
72
ec8ebe72
DE
73#ifndef W_STOPCODE
74#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75#endif
76
1a981360
PA
77/* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79#ifndef __SIGRTMIN
80#define __SIGRTMIN 32
81#endif
82
db0dfaa0
LM
83/* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86#if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89#if defined(__mcoldfire__)
90/* These are still undefined in 3.10 kernels. */
91#define PT_TEXT_ADDR 49*4
92#define PT_DATA_ADDR 50*4
93#define PT_TEXT_END_ADDR 51*4
94/* BFIN already defines these since at least 2.6.32 kernels. */
95#elif defined(BFIN)
96#define PT_TEXT_ADDR 220
97#define PT_TEXT_END_ADDR 224
98#define PT_DATA_ADDR 228
99/* These are still undefined in 3.10 kernels. */
100#elif defined(__TMS320C6X__)
101#define PT_TEXT_ADDR (0x10000*4)
102#define PT_DATA_ADDR (0x10004*4)
103#define PT_TEXT_END_ADDR (0x10008*4)
104#endif
105#endif
106
9accd112 107#ifdef HAVE_LINUX_BTRACE
125f8a3d 108# include "nat/linux-btrace.h"
734b0e4b 109# include "btrace-common.h"
9accd112
MM
110#endif
111
8365dcf5
TJB
112#ifndef HAVE_ELF32_AUXV_T
113/* Copied from glibc's elf.h. */
114typedef struct
115{
116 uint32_t a_type; /* Entry type */
117 union
118 {
119 uint32_t a_val; /* Integer value */
120 /* We use to have pointer elements added here. We cannot do that,
121 though, since it does not work when using 32-bit definitions
122 on 64-bit platforms and vice versa. */
123 } a_un;
124} Elf32_auxv_t;
125#endif
126
127#ifndef HAVE_ELF64_AUXV_T
128/* Copied from glibc's elf.h. */
129typedef struct
130{
131 uint64_t a_type; /* Entry type */
132 union
133 {
134 uint64_t a_val; /* Integer value */
135 /* We use to have pointer elements added here. We cannot do that,
136 though, since it does not work when using 32-bit definitions
137 on 64-bit platforms and vice versa. */
138 } a_un;
139} Elf64_auxv_t;
140#endif
141
ded48a5e
YQ
142/* Does the current host support PTRACE_GETREGSET? */
143int have_ptrace_getregset = -1;
144
cff068da
GB
145/* LWP accessors. */
146
147/* See nat/linux-nat.h. */
148
149ptid_t
150ptid_of_lwp (struct lwp_info *lwp)
151{
152 return ptid_of (get_lwp_thread (lwp));
153}
154
155/* See nat/linux-nat.h. */
156
4b134ca1
GB
157void
158lwp_set_arch_private_info (struct lwp_info *lwp,
159 struct arch_lwp_info *info)
160{
161 lwp->arch_private = info;
162}
163
164/* See nat/linux-nat.h. */
165
166struct arch_lwp_info *
167lwp_arch_private_info (struct lwp_info *lwp)
168{
169 return lwp->arch_private;
170}
171
172/* See nat/linux-nat.h. */
173
cff068da
GB
174int
175lwp_is_stopped (struct lwp_info *lwp)
176{
177 return lwp->stopped;
178}
179
180/* See nat/linux-nat.h. */
181
182enum target_stop_reason
183lwp_stop_reason (struct lwp_info *lwp)
184{
185 return lwp->stop_reason;
186}
187
05044653
PA
188/* A list of all unknown processes which receive stop signals. Some
189 other process will presumably claim each of these as forked
190 children momentarily. */
24a09b5f 191
05044653
PA
192struct simple_pid_list
193{
194 /* The process ID. */
195 int pid;
196
197 /* The status as reported by waitpid. */
198 int status;
199
200 /* Next in chain. */
201 struct simple_pid_list *next;
202};
203struct simple_pid_list *stopped_pids;
204
205/* Trivial list manipulation functions to keep track of a list of new
206 stopped processes. */
207
208static void
209add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
210{
211 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
212
213 new_pid->pid = pid;
214 new_pid->status = status;
215 new_pid->next = *listp;
216 *listp = new_pid;
217}
218
219static int
220pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
221{
222 struct simple_pid_list **p;
223
224 for (p = listp; *p != NULL; p = &(*p)->next)
225 if ((*p)->pid == pid)
226 {
227 struct simple_pid_list *next = (*p)->next;
228
229 *statusp = (*p)->status;
230 xfree (*p);
231 *p = next;
232 return 1;
233 }
234 return 0;
235}
24a09b5f 236
bde24c0a
PA
237enum stopping_threads_kind
238 {
239 /* Not stopping threads presently. */
240 NOT_STOPPING_THREADS,
241
242 /* Stopping threads. */
243 STOPPING_THREADS,
244
245 /* Stopping and suspending threads. */
246 STOPPING_AND_SUSPENDING_THREADS
247 };
248
249/* This is set while stop_all_lwps is in effect. */
250enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
251
252/* FIXME make into a target method? */
24a09b5f 253int using_threads = 1;
24a09b5f 254
fa593d66
PA
255/* True if we're presently stabilizing threads (moving them out of
256 jump pads). */
257static int stabilizing_threads;
258
2acc282a 259static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 260 int step, int signal, siginfo_t *info);
2bd7c093 261static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
262static void stop_all_lwps (int suspend, struct lwp_info *except);
263static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
fa96cb38
PA
264static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
265 int *wstat, int options);
95954743 266static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
b3312d80 267static struct lwp_info *add_lwp (ptid_t ptid);
c35fafde 268static int linux_stopped_by_watchpoint (void);
95954743 269static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 270static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 271static void proceed_all_lwps (void);
d50171e4 272static int finish_step_over (struct lwp_info *lwp);
d50171e4
PA
273static int kill_lwp (unsigned long lwpid, int signo);
274
582511be
PA
275/* When the event-loop is doing a step-over, this points at the thread
276 being stepped. */
277ptid_t step_over_bkpt;
278
d50171e4
PA
279/* True if the low target can hardware single-step. Such targets
280 don't need a BREAKPOINT_REINSERT_ADDR callback. */
281
282static int
283can_hardware_single_step (void)
284{
285 return (the_low_target.breakpoint_reinsert_addr == NULL);
286}
287
288/* True if the low target supports memory breakpoints. If so, we'll
289 have a GET_PC implementation. */
290
291static int
292supports_breakpoints (void)
293{
294 return (the_low_target.get_pc != NULL);
295}
0d62e5e8 296
fa593d66
PA
297/* Returns true if this target can support fast tracepoints. This
298 does not mean that the in-process agent has been loaded in the
299 inferior. */
300
301static int
302supports_fast_tracepoints (void)
303{
304 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
305}
306
c2d6af84
PA
307/* True if LWP is stopped in its stepping range. */
308
309static int
310lwp_in_step_range (struct lwp_info *lwp)
311{
312 CORE_ADDR pc = lwp->stop_pc;
313
314 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
315}
316
0d62e5e8
DJ
317struct pending_signals
318{
319 int signal;
32ca6d61 320 siginfo_t info;
0d62e5e8
DJ
321 struct pending_signals *prev;
322};
611cb4a5 323
bd99dc85
PA
324/* The read/write ends of the pipe registered as waitable file in the
325 event loop. */
326static int linux_event_pipe[2] = { -1, -1 };
327
328/* True if we're currently in async mode. */
329#define target_is_async_p() (linux_event_pipe[0] != -1)
330
02fc4de7 331static void send_sigstop (struct lwp_info *lwp);
fa96cb38 332static void wait_for_sigstop (void);
bd99dc85 333
d0722149
DE
334/* Return non-zero if HEADER is a 64-bit ELF file. */
335
336static int
214d508e 337elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 338{
214d508e
L
339 if (header->e_ident[EI_MAG0] == ELFMAG0
340 && header->e_ident[EI_MAG1] == ELFMAG1
341 && header->e_ident[EI_MAG2] == ELFMAG2
342 && header->e_ident[EI_MAG3] == ELFMAG3)
343 {
344 *machine = header->e_machine;
345 return header->e_ident[EI_CLASS] == ELFCLASS64;
346
347 }
348 *machine = EM_NONE;
349 return -1;
d0722149
DE
350}
351
352/* Return non-zero if FILE is a 64-bit ELF file,
353 zero if the file is not a 64-bit ELF file,
354 and -1 if the file is not accessible or doesn't exist. */
355
be07f1a2 356static int
214d508e 357elf_64_file_p (const char *file, unsigned int *machine)
d0722149 358{
957f3f49 359 Elf64_Ehdr header;
d0722149
DE
360 int fd;
361
362 fd = open (file, O_RDONLY);
363 if (fd < 0)
364 return -1;
365
366 if (read (fd, &header, sizeof (header)) != sizeof (header))
367 {
368 close (fd);
369 return 0;
370 }
371 close (fd);
372
214d508e 373 return elf_64_header_p (&header, machine);
d0722149
DE
374}
375
be07f1a2
PA
376/* Accepts an integer PID; Returns true if the executable PID is
377 running is a 64-bit ELF file.. */
378
379int
214d508e 380linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 381{
d8d2a3ee 382 char file[PATH_MAX];
be07f1a2
PA
383
384 sprintf (file, "/proc/%d/exe", pid);
214d508e 385 return elf_64_file_p (file, machine);
be07f1a2
PA
386}
387
bd99dc85
PA
388static void
389delete_lwp (struct lwp_info *lwp)
390{
fa96cb38
PA
391 struct thread_info *thr = get_lwp_thread (lwp);
392
393 if (debug_threads)
394 debug_printf ("deleting %ld\n", lwpid_of (thr));
395
396 remove_thread (thr);
aa5ca48f 397 free (lwp->arch_private);
bd99dc85
PA
398 free (lwp);
399}
400
95954743
PA
401/* Add a process to the common process list, and set its private
402 data. */
403
404static struct process_info *
405linux_add_process (int pid, int attached)
406{
407 struct process_info *proc;
408
95954743 409 proc = add_process (pid, attached);
fe978cb0 410 proc->priv = xcalloc (1, sizeof (*proc->priv));
95954743 411
aa5ca48f 412 if (the_low_target.new_process != NULL)
fe978cb0 413 proc->priv->arch_private = the_low_target.new_process ();
aa5ca48f 414
95954743
PA
415 return proc;
416}
417
582511be
PA
418static CORE_ADDR get_pc (struct lwp_info *lwp);
419
bd99dc85 420/* Handle a GNU/Linux extended wait response. If we see a clone
de0d863e
DB
421 event, we need to add the new LWP to our list (and return 0 so as
422 not to report the trap to higher layers). */
0d62e5e8 423
de0d863e
DB
424static int
425handle_extended_wait (struct lwp_info *event_lwp, int wstat)
24a09b5f 426{
89a5711c 427 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 428 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 429 struct lwp_info *new_lwp;
24a09b5f 430
c269dbdb
DB
431 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
432 || (event == PTRACE_EVENT_CLONE))
24a09b5f 433 {
95954743 434 ptid_t ptid;
24a09b5f 435 unsigned long new_pid;
05044653 436 int ret, status;
24a09b5f 437
de0d863e 438 /* Get the pid of the new lwp. */
d86d4aaf 439 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 440 &new_pid);
24a09b5f
DJ
441
442 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 443 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
444 {
445 /* The new child has a pending SIGSTOP. We can't affect it until it
446 hits the SIGSTOP, but we're already attached. */
447
97438e3f 448 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
449
450 if (ret == -1)
451 perror_with_name ("waiting for new child");
452 else if (ret != new_pid)
453 warning ("wait returned unexpected PID %d", ret);
da5898ce 454 else if (!WIFSTOPPED (status))
24a09b5f
DJ
455 warning ("wait returned unexpected status 0x%x", status);
456 }
457
c269dbdb 458 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
459 {
460 struct process_info *parent_proc;
461 struct process_info *child_proc;
462 struct lwp_info *child_lwp;
bfacd19d 463 struct thread_info *child_thr;
de0d863e
DB
464 struct target_desc *tdesc;
465
466 ptid = ptid_build (new_pid, new_pid, 0);
467
468 if (debug_threads)
469 {
470 debug_printf ("HEW: Got fork event from LWP %ld, "
471 "new child is %d\n",
472 ptid_get_lwp (ptid_of (event_thr)),
473 ptid_get_pid (ptid));
474 }
475
476 /* Add the new process to the tables and clone the breakpoint
477 lists of the parent. We need to do this even if the new process
478 will be detached, since we will need the process object and the
479 breakpoints to remove any breakpoints from memory when we
480 detach, and the client side will access registers. */
481 child_proc = linux_add_process (new_pid, 0);
482 gdb_assert (child_proc != NULL);
483 child_lwp = add_lwp (ptid);
484 gdb_assert (child_lwp != NULL);
485 child_lwp->stopped = 1;
bfacd19d
DB
486 child_lwp->must_set_ptrace_flags = 1;
487 child_lwp->status_pending_p = 0;
488 child_thr = get_lwp_thread (child_lwp);
489 child_thr->last_resume_kind = resume_stop;
998d452a
PA
490 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
491
de0d863e
DB
492 parent_proc = get_thread_process (event_thr);
493 child_proc->attached = parent_proc->attached;
494 clone_all_breakpoints (&child_proc->breakpoints,
495 &child_proc->raw_breakpoints,
496 parent_proc->breakpoints);
497
498 tdesc = xmalloc (sizeof (struct target_desc));
499 copy_target_description (tdesc, parent_proc->tdesc);
500 child_proc->tdesc = tdesc;
de0d863e 501
3a8a0396
DB
502 /* Clone arch-specific process data. */
503 if (the_low_target.new_fork != NULL)
504 the_low_target.new_fork (parent_proc, child_proc);
505
de0d863e 506 /* Save fork info in the parent thread. */
c269dbdb
DB
507 if (event == PTRACE_EVENT_FORK)
508 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
509 else if (event == PTRACE_EVENT_VFORK)
510 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
511
de0d863e 512 event_lwp->waitstatus.value.related_pid = ptid;
c269dbdb 513
de0d863e
DB
514 /* The status_pending field contains bits denoting the
515 extended event, so when the pending event is handled,
516 the handler will look at lwp->waitstatus. */
517 event_lwp->status_pending_p = 1;
518 event_lwp->status_pending = wstat;
519
520 /* Report the event. */
521 return 0;
522 }
523
fa96cb38
PA
524 if (debug_threads)
525 debug_printf ("HEW: Got clone event "
526 "from LWP %ld, new child is LWP %ld\n",
527 lwpid_of (event_thr), new_pid);
528
d86d4aaf 529 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
b3312d80 530 new_lwp = add_lwp (ptid);
24a09b5f 531
e27d73f6
DE
532 /* Either we're going to immediately resume the new thread
533 or leave it stopped. linux_resume_one_lwp is a nop if it
534 thinks the thread is currently running, so set this first
535 before calling linux_resume_one_lwp. */
536 new_lwp->stopped = 1;
537
bde24c0a
PA
538 /* If we're suspending all threads, leave this one suspended
539 too. */
540 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
541 new_lwp->suspended = 1;
542
da5898ce
DJ
543 /* Normally we will get the pending SIGSTOP. But in some cases
544 we might get another signal delivered to the group first.
f21cc1a2 545 If we do get another signal, be sure not to lose it. */
20ba1ce6 546 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 547 {
54a0b537 548 new_lwp->stop_expected = 1;
20ba1ce6
PA
549 new_lwp->status_pending_p = 1;
550 new_lwp->status_pending = status;
da5898ce 551 }
de0d863e
DB
552
553 /* Don't report the event. */
554 return 1;
24a09b5f 555 }
c269dbdb
DB
556 else if (event == PTRACE_EVENT_VFORK_DONE)
557 {
558 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
559
560 /* Report the event. */
561 return 0;
562 }
de0d863e
DB
563
564 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
565}
566
d50171e4
PA
567/* Return the PC as read from the regcache of LWP, without any
568 adjustment. */
569
570static CORE_ADDR
571get_pc (struct lwp_info *lwp)
572{
0bfdf32f 573 struct thread_info *saved_thread;
d50171e4
PA
574 struct regcache *regcache;
575 CORE_ADDR pc;
576
577 if (the_low_target.get_pc == NULL)
578 return 0;
579
0bfdf32f
GB
580 saved_thread = current_thread;
581 current_thread = get_lwp_thread (lwp);
d50171e4 582
0bfdf32f 583 regcache = get_thread_regcache (current_thread, 1);
d50171e4
PA
584 pc = (*the_low_target.get_pc) (regcache);
585
586 if (debug_threads)
87ce2a04 587 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 588
0bfdf32f 589 current_thread = saved_thread;
d50171e4
PA
590 return pc;
591}
592
593/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
594 The SIGTRAP could mean several things.
595
596 On i386, where decr_pc_after_break is non-zero:
582511be
PA
597
598 If we were single-stepping this process using PTRACE_SINGLESTEP, we
599 will get only the one SIGTRAP. The value of $eip will be the next
600 instruction. If the instruction we stepped over was a breakpoint,
601 we need to decrement the PC.
602
0d62e5e8
DJ
603 If we continue the process using PTRACE_CONT, we will get a
604 SIGTRAP when we hit a breakpoint. The value of $eip will be
605 the instruction after the breakpoint (i.e. needs to be
606 decremented). If we report the SIGTRAP to GDB, we must also
582511be 607 report the undecremented PC. If the breakpoint is removed, we
0d62e5e8
DJ
608 must resume at the decremented PC.
609
582511be
PA
610 On a non-decr_pc_after_break machine with hardware or kernel
611 single-step:
612
613 If we either single-step a breakpoint instruction, or continue and
614 hit a breakpoint instruction, our PC will point at the breakpoint
0d62e5e8
DJ
615 instruction. */
616
582511be
PA
617static int
618check_stopped_by_breakpoint (struct lwp_info *lwp)
0d62e5e8 619{
582511be
PA
620 CORE_ADDR pc;
621 CORE_ADDR sw_breakpoint_pc;
622 struct thread_info *saved_thread;
3e572f71
PA
623#if USE_SIGTRAP_SIGINFO
624 siginfo_t siginfo;
625#endif
d50171e4
PA
626
627 if (the_low_target.get_pc == NULL)
628 return 0;
0d62e5e8 629
582511be
PA
630 pc = get_pc (lwp);
631 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
d50171e4 632
582511be
PA
633 /* breakpoint_at reads from the current thread. */
634 saved_thread = current_thread;
635 current_thread = get_lwp_thread (lwp);
47c0c975 636
3e572f71
PA
637#if USE_SIGTRAP_SIGINFO
638 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
639 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
640 {
641 if (siginfo.si_signo == SIGTRAP)
642 {
643 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
644 {
645 if (debug_threads)
646 {
647 struct thread_info *thr = get_lwp_thread (lwp);
648
2bf6fb9d 649 debug_printf ("CSBB: %s stopped by software breakpoint\n",
3e572f71
PA
650 target_pid_to_str (ptid_of (thr)));
651 }
652
653 /* Back up the PC if necessary. */
654 if (pc != sw_breakpoint_pc)
655 {
656 struct regcache *regcache
657 = get_thread_regcache (current_thread, 1);
658 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
659 }
660
661 lwp->stop_pc = sw_breakpoint_pc;
662 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
663 current_thread = saved_thread;
664 return 1;
665 }
666 else if (siginfo.si_code == TRAP_HWBKPT)
667 {
668 if (debug_threads)
669 {
670 struct thread_info *thr = get_lwp_thread (lwp);
671
2bf6fb9d
PA
672 debug_printf ("CSBB: %s stopped by hardware "
673 "breakpoint/watchpoint\n",
3e572f71
PA
674 target_pid_to_str (ptid_of (thr)));
675 }
676
677 lwp->stop_pc = pc;
678 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
679 current_thread = saved_thread;
680 return 1;
681 }
2bf6fb9d
PA
682 else if (siginfo.si_code == TRAP_TRACE)
683 {
684 if (debug_threads)
685 {
686 struct thread_info *thr = get_lwp_thread (lwp);
687
688 debug_printf ("CSBB: %s stopped by trace\n",
689 target_pid_to_str (ptid_of (thr)));
690 }
691 }
3e572f71
PA
692 }
693 }
694#else
582511be
PA
695 /* We may have just stepped a breakpoint instruction. E.g., in
696 non-stop mode, GDB first tells the thread A to step a range, and
697 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
698 case we need to report the breakpoint PC. */
699 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
582511be
PA
700 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
701 {
702 if (debug_threads)
703 {
704 struct thread_info *thr = get_lwp_thread (lwp);
705
706 debug_printf ("CSBB: %s stopped by software breakpoint\n",
707 target_pid_to_str (ptid_of (thr)));
708 }
709
710 /* Back up the PC if necessary. */
711 if (pc != sw_breakpoint_pc)
712 {
713 struct regcache *regcache
714 = get_thread_regcache (current_thread, 1);
715 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
716 }
717
718 lwp->stop_pc = sw_breakpoint_pc;
15c66dd6 719 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
582511be
PA
720 current_thread = saved_thread;
721 return 1;
722 }
723
724 if (hardware_breakpoint_inserted_here (pc))
725 {
726 if (debug_threads)
727 {
728 struct thread_info *thr = get_lwp_thread (lwp);
729
730 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
731 target_pid_to_str (ptid_of (thr)));
732 }
47c0c975 733
582511be 734 lwp->stop_pc = pc;
15c66dd6 735 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
582511be
PA
736 current_thread = saved_thread;
737 return 1;
738 }
3e572f71 739#endif
582511be
PA
740
741 current_thread = saved_thread;
742 return 0;
0d62e5e8 743}
ce3a066d 744
b3312d80 745static struct lwp_info *
95954743 746add_lwp (ptid_t ptid)
611cb4a5 747{
54a0b537 748 struct lwp_info *lwp;
0d62e5e8 749
00db26fa
PA
750 lwp = (struct lwp_info *) xcalloc (1, sizeof (*lwp));
751
752 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
0d62e5e8 753
aa5ca48f 754 if (the_low_target.new_thread != NULL)
34c703da 755 the_low_target.new_thread (lwp);
aa5ca48f 756
f7667f0d 757 lwp->thread = add_thread (ptid, lwp);
0d62e5e8 758
54a0b537 759 return lwp;
0d62e5e8 760}
611cb4a5 761
da6d8c04
DJ
762/* Start an inferior process and returns its pid.
763 ALLARGS is a vector of program-name and args. */
764
ce3a066d
DJ
765static int
766linux_create_inferior (char *program, char **allargs)
da6d8c04 767{
a6dbe5df 768 struct lwp_info *new_lwp;
da6d8c04 769 int pid;
95954743 770 ptid_t ptid;
8cc73a39
SDJ
771 struct cleanup *restore_personality
772 = maybe_disable_address_space_randomization (disable_randomization);
03583c20 773
42c81e2a 774#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
775 pid = vfork ();
776#else
da6d8c04 777 pid = fork ();
52fb6437 778#endif
da6d8c04
DJ
779 if (pid < 0)
780 perror_with_name ("fork");
781
782 if (pid == 0)
783 {
602e3198 784 close_most_fds ();
b8e1b30e 785 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da6d8c04 786
1a981360 787#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 788 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 789#endif
0d62e5e8 790
a9fa9f7d
DJ
791 setpgid (0, 0);
792
e0f9f062
DE
793 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
794 stdout to stderr so that inferior i/o doesn't corrupt the connection.
795 Also, redirect stdin to /dev/null. */
796 if (remote_connection_is_stdio ())
797 {
798 close (0);
799 open ("/dev/null", O_RDONLY);
800 dup2 (2, 1);
3e52c33d
JK
801 if (write (2, "stdin/stdout redirected\n",
802 sizeof ("stdin/stdout redirected\n") - 1) < 0)
8c29b58e
YQ
803 {
804 /* Errors ignored. */;
805 }
e0f9f062
DE
806 }
807
2b876972
DJ
808 execv (program, allargs);
809 if (errno == ENOENT)
810 execvp (program, allargs);
da6d8c04
DJ
811
812 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 813 strerror (errno));
da6d8c04
DJ
814 fflush (stderr);
815 _exit (0177);
816 }
817
8cc73a39 818 do_cleanups (restore_personality);
03583c20 819
55d7b841 820 linux_add_process (pid, 0);
95954743
PA
821
822 ptid = ptid_build (pid, pid, 0);
823 new_lwp = add_lwp (ptid);
a6dbe5df 824 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 825
a9fa9f7d 826 return pid;
da6d8c04
DJ
827}
828
c06cbd92
YQ
829/* Implement the arch_setup target_ops method. */
830
831static void
832linux_arch_setup (void)
833{
834 the_low_target.arch_setup ();
835}
836
8784d563
PA
837/* Attach to an inferior process. Returns 0 on success, ERRNO on
838 error. */
da6d8c04 839
7ae1a6a6
PA
840int
841linux_attach_lwp (ptid_t ptid)
da6d8c04 842{
54a0b537 843 struct lwp_info *new_lwp;
7ae1a6a6 844 int lwpid = ptid_get_lwp (ptid);
611cb4a5 845
b8e1b30e 846 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 847 != 0)
7ae1a6a6 848 return errno;
24a09b5f 849
b3312d80 850 new_lwp = add_lwp (ptid);
0d62e5e8 851
a6dbe5df
PA
852 /* We need to wait for SIGSTOP before being able to make the next
853 ptrace call on this LWP. */
854 new_lwp->must_set_ptrace_flags = 1;
855
644cebc9 856 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
857 {
858 if (debug_threads)
87ce2a04 859 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
860
861 /* The process is definitely stopped. It is in a job control
862 stop, unless the kernel predates the TASK_STOPPED /
863 TASK_TRACED distinction, in which case it might be in a
864 ptrace stop. Make sure it is in a ptrace stop; from there we
865 can kill it, signal it, et cetera.
866
867 First make sure there is a pending SIGSTOP. Since we are
868 already attached, the process can not transition from stopped
869 to running without a PTRACE_CONT; so we know this signal will
870 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
871 probably already in the queue (unless this kernel is old
872 enough to use TASK_STOPPED for ptrace stops); but since
873 SIGSTOP is not an RT signal, it can only be queued once. */
874 kill_lwp (lwpid, SIGSTOP);
875
876 /* Finally, resume the stopped process. This will deliver the
877 SIGSTOP (or a higher priority signal, just like normal
878 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 879 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
880 }
881
0d62e5e8 882 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
883 brings it to a halt.
884
885 There are several cases to consider here:
886
887 1) gdbserver has already attached to the process and is being notified
1b3f6016 888 of a new thread that is being created.
d50171e4
PA
889 In this case we should ignore that SIGSTOP and resume the
890 process. This is handled below by setting stop_expected = 1,
8336d594 891 and the fact that add_thread sets last_resume_kind ==
d50171e4 892 resume_continue.
0e21c1ec
DE
893
894 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
895 to it via attach_inferior.
896 In this case we want the process thread to stop.
d50171e4
PA
897 This is handled by having linux_attach set last_resume_kind ==
898 resume_stop after we return.
e3deef73
LM
899
900 If the pid we are attaching to is also the tgid, we attach to and
901 stop all the existing threads. Otherwise, we attach to pid and
902 ignore any other threads in the same group as this pid.
0e21c1ec
DE
903
904 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
905 existing threads.
906 In this case we want the thread to stop.
907 FIXME: This case is currently not properly handled.
908 We should wait for the SIGSTOP but don't. Things work apparently
909 because enough time passes between when we ptrace (ATTACH) and when
910 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
911
912 On the other hand, if we are currently trying to stop all threads, we
913 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 914 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
915 end of the list, and so the new thread has not yet reached
916 wait_for_sigstop (but will). */
d50171e4 917 new_lwp->stop_expected = 1;
0d62e5e8 918
7ae1a6a6 919 return 0;
95954743
PA
920}
921
8784d563
PA
922/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
923 already attached. Returns true if a new LWP is found, false
924 otherwise. */
925
926static int
927attach_proc_task_lwp_callback (ptid_t ptid)
928{
929 /* Is this a new thread? */
930 if (find_thread_ptid (ptid) == NULL)
931 {
932 int lwpid = ptid_get_lwp (ptid);
933 int err;
934
935 if (debug_threads)
936 debug_printf ("Found new lwp %d\n", lwpid);
937
938 err = linux_attach_lwp (ptid);
939
940 /* Be quiet if we simply raced with the thread exiting. EPERM
941 is returned if the thread's task still exists, and is marked
942 as exited or zombie, as well as other conditions, so in that
943 case, confirm the status in /proc/PID/status. */
944 if (err == ESRCH
945 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
946 {
947 if (debug_threads)
948 {
949 debug_printf ("Cannot attach to lwp %d: "
950 "thread is gone (%d: %s)\n",
951 lwpid, err, strerror (err));
952 }
953 }
954 else if (err != 0)
955 {
956 warning (_("Cannot attach to lwp %d: %s"),
957 lwpid,
958 linux_ptrace_attach_fail_reason_string (ptid, err));
959 }
960
961 return 1;
962 }
963 return 0;
964}
965
e3deef73
LM
966/* Attach to PID. If PID is the tgid, attach to it and all
967 of its threads. */
968
c52daf70 969static int
a1928bad 970linux_attach (unsigned long pid)
0d62e5e8 971{
7ae1a6a6
PA
972 ptid_t ptid = ptid_build (pid, pid, 0);
973 int err;
974
e3deef73
LM
975 /* Attach to PID. We will check for other threads
976 soon. */
7ae1a6a6
PA
977 err = linux_attach_lwp (ptid);
978 if (err != 0)
979 error ("Cannot attach to process %ld: %s",
8784d563 980 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
7ae1a6a6 981
55d7b841 982 linux_add_process (pid, 1);
0d62e5e8 983
bd99dc85
PA
984 if (!non_stop)
985 {
8336d594
PA
986 struct thread_info *thread;
987
988 /* Don't ignore the initial SIGSTOP if we just attached to this
989 process. It will be collected by wait shortly. */
990 thread = find_thread_ptid (ptid_build (pid, pid, 0));
991 thread->last_resume_kind = resume_stop;
bd99dc85 992 }
0d62e5e8 993
8784d563
PA
994 /* We must attach to every LWP. If /proc is mounted, use that to
995 find them now. On the one hand, the inferior may be using raw
996 clone instead of using pthreads. On the other hand, even if it
997 is using pthreads, GDB may not be connected yet (thread_db needs
998 to do symbol lookups, through qSymbol). Also, thread_db walks
999 structures in the inferior's address space to find the list of
1000 threads/LWPs, and those structures may well be corrupted. Note
1001 that once thread_db is loaded, we'll still use it to list threads
1002 and associate pthread info with each LWP. */
1003 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
95954743
PA
1004 return 0;
1005}
1006
1007struct counter
1008{
1009 int pid;
1010 int count;
1011};
1012
1013static int
1014second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1015{
1016 struct counter *counter = args;
1017
1018 if (ptid_get_pid (entry->id) == counter->pid)
1019 {
1020 if (++counter->count > 1)
1021 return 1;
1022 }
d61ddec4 1023
da6d8c04
DJ
1024 return 0;
1025}
1026
95954743 1027static int
fa96cb38 1028last_thread_of_process_p (int pid)
95954743 1029{
95954743 1030 struct counter counter = { pid , 0 };
da6d8c04 1031
95954743
PA
1032 return (find_inferior (&all_threads,
1033 second_thread_of_pid_p, &counter) == NULL);
1034}
1035
da84f473
PA
1036/* Kill LWP. */
1037
1038static void
1039linux_kill_one_lwp (struct lwp_info *lwp)
1040{
d86d4aaf
DE
1041 struct thread_info *thr = get_lwp_thread (lwp);
1042 int pid = lwpid_of (thr);
da84f473
PA
1043
1044 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1045 there is no signal context, and ptrace(PTRACE_KILL) (or
1046 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1047 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1048 alternative is to kill with SIGKILL. We only need one SIGKILL
1049 per process, not one for each thread. But since we still support
1050 linuxthreads, and we also support debugging programs using raw
1051 clone without CLONE_THREAD, we send one for each thread. For
1052 years, we used PTRACE_KILL only, so we're being a bit paranoid
1053 about some old kernels where PTRACE_KILL might work better
1054 (dubious if there are any such, but that's why it's paranoia), so
1055 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1056 everywhere. */
1057
1058 errno = 0;
69ff6be5 1059 kill_lwp (pid, SIGKILL);
da84f473 1060 if (debug_threads)
ce9e3fe7
PA
1061 {
1062 int save_errno = errno;
1063
1064 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1065 target_pid_to_str (ptid_of (thr)),
1066 save_errno ? strerror (save_errno) : "OK");
1067 }
da84f473
PA
1068
1069 errno = 0;
b8e1b30e 1070 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1071 if (debug_threads)
ce9e3fe7
PA
1072 {
1073 int save_errno = errno;
1074
1075 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1076 target_pid_to_str (ptid_of (thr)),
1077 save_errno ? strerror (save_errno) : "OK");
1078 }
da84f473
PA
1079}
1080
e76126e8
PA
1081/* Kill LWP and wait for it to die. */
1082
1083static void
1084kill_wait_lwp (struct lwp_info *lwp)
1085{
1086 struct thread_info *thr = get_lwp_thread (lwp);
1087 int pid = ptid_get_pid (ptid_of (thr));
1088 int lwpid = ptid_get_lwp (ptid_of (thr));
1089 int wstat;
1090 int res;
1091
1092 if (debug_threads)
1093 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1094
1095 do
1096 {
1097 linux_kill_one_lwp (lwp);
1098
1099 /* Make sure it died. Notes:
1100
1101 - The loop is most likely unnecessary.
1102
1103 - We don't use linux_wait_for_event as that could delete lwps
1104 while we're iterating over them. We're not interested in
1105 any pending status at this point, only in making sure all
1106 wait status on the kernel side are collected until the
1107 process is reaped.
1108
1109 - We don't use __WALL here as the __WALL emulation relies on
1110 SIGCHLD, and killing a stopped process doesn't generate
1111 one, nor an exit status.
1112 */
1113 res = my_waitpid (lwpid, &wstat, 0);
1114 if (res == -1 && errno == ECHILD)
1115 res = my_waitpid (lwpid, &wstat, __WCLONE);
1116 } while (res > 0 && WIFSTOPPED (wstat));
1117
586b02a9
PA
1118 /* Even if it was stopped, the child may have already disappeared.
1119 E.g., if it was killed by SIGKILL. */
1120 if (res < 0 && errno != ECHILD)
1121 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1122}
1123
da84f473
PA
1124/* Callback for `find_inferior'. Kills an lwp of a given process,
1125 except the leader. */
95954743
PA
1126
1127static int
da84f473 1128kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
da6d8c04 1129{
0d62e5e8 1130 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1131 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
1132 int pid = * (int *) args;
1133
1134 if (ptid_get_pid (entry->id) != pid)
1135 return 0;
0d62e5e8 1136
fd500816
DJ
1137 /* We avoid killing the first thread here, because of a Linux kernel (at
1138 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1139 the children get a chance to be reaped, it will remain a zombie
1140 forever. */
95954743 1141
d86d4aaf 1142 if (lwpid_of (thread) == pid)
95954743
PA
1143 {
1144 if (debug_threads)
87ce2a04
DE
1145 debug_printf ("lkop: is last of process %s\n",
1146 target_pid_to_str (entry->id));
95954743
PA
1147 return 0;
1148 }
fd500816 1149
e76126e8 1150 kill_wait_lwp (lwp);
95954743 1151 return 0;
da6d8c04
DJ
1152}
1153
95954743
PA
1154static int
1155linux_kill (int pid)
0d62e5e8 1156{
95954743 1157 struct process_info *process;
54a0b537 1158 struct lwp_info *lwp;
fd500816 1159
95954743
PA
1160 process = find_process_pid (pid);
1161 if (process == NULL)
1162 return -1;
9d606399 1163
f9e39928
PA
1164 /* If we're killing a running inferior, make sure it is stopped
1165 first, as PTRACE_KILL will not work otherwise. */
7984d532 1166 stop_all_lwps (0, NULL);
f9e39928 1167
da84f473 1168 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
fd500816 1169
54a0b537 1170 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1171 thread in the list, so do so now. */
95954743 1172 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 1173
784867a5 1174 if (lwp == NULL)
fd500816 1175 {
784867a5 1176 if (debug_threads)
d86d4aaf
DE
1177 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1178 pid);
784867a5
JK
1179 }
1180 else
e76126e8 1181 kill_wait_lwp (lwp);
2d717e4f 1182
8336d594 1183 the_target->mourn (process);
f9e39928
PA
1184
1185 /* Since we presently can only stop all lwps of all processes, we
1186 need to unstop lwps of other processes. */
7984d532 1187 unstop_all_lwps (0, NULL);
95954743 1188 return 0;
0d62e5e8
DJ
1189}
1190
9b224c5e
PA
1191/* Get pending signal of THREAD, for detaching purposes. This is the
1192 signal the thread last stopped for, which we need to deliver to the
1193 thread when detaching, otherwise, it'd be suppressed/lost. */
1194
1195static int
1196get_detach_signal (struct thread_info *thread)
1197{
a493e3e2 1198 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1199 int status;
1200 struct lwp_info *lp = get_thread_lwp (thread);
1201
1202 if (lp->status_pending_p)
1203 status = lp->status_pending;
1204 else
1205 {
1206 /* If the thread had been suspended by gdbserver, and it stopped
1207 cleanly, then it'll have stopped with SIGSTOP. But we don't
1208 want to deliver that SIGSTOP. */
1209 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1210 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1211 return 0;
1212
1213 /* Otherwise, we may need to deliver the signal we
1214 intercepted. */
1215 status = lp->last_status;
1216 }
1217
1218 if (!WIFSTOPPED (status))
1219 {
1220 if (debug_threads)
87ce2a04 1221 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1222 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1223 return 0;
1224 }
1225
1226 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1227 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1228 {
1229 if (debug_threads)
87ce2a04
DE
1230 debug_printf ("GPS: lwp %s had stopped with extended "
1231 "status: no pending signal\n",
d86d4aaf 1232 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1233 return 0;
1234 }
1235
2ea28649 1236 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e
PA
1237
1238 if (program_signals_p && !program_signals[signo])
1239 {
1240 if (debug_threads)
87ce2a04 1241 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1242 target_pid_to_str (ptid_of (thread)),
87ce2a04 1243 gdb_signal_to_string (signo));
9b224c5e
PA
1244 return 0;
1245 }
1246 else if (!program_signals_p
1247 /* If we have no way to know which signals GDB does not
1248 want to have passed to the program, assume
1249 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1250 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1251 {
1252 if (debug_threads)
87ce2a04
DE
1253 debug_printf ("GPS: lwp %s had signal %s, "
1254 "but we don't know if we should pass it. "
1255 "Default to not.\n",
d86d4aaf 1256 target_pid_to_str (ptid_of (thread)),
87ce2a04 1257 gdb_signal_to_string (signo));
9b224c5e
PA
1258 return 0;
1259 }
1260 else
1261 {
1262 if (debug_threads)
87ce2a04 1263 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1264 target_pid_to_str (ptid_of (thread)),
87ce2a04 1265 gdb_signal_to_string (signo));
9b224c5e
PA
1266
1267 return WSTOPSIG (status);
1268 }
1269}
1270
95954743
PA
1271static int
1272linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
1273{
1274 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1275 struct lwp_info *lwp = get_thread_lwp (thread);
95954743 1276 int pid = * (int *) args;
9b224c5e 1277 int sig;
95954743
PA
1278
1279 if (ptid_get_pid (entry->id) != pid)
1280 return 0;
6ad8ae5c 1281
9b224c5e 1282 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1283 if (lwp->stop_expected)
ae13219e 1284 {
9b224c5e 1285 if (debug_threads)
87ce2a04 1286 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1287 target_pid_to_str (ptid_of (thread)));
9b224c5e 1288
d86d4aaf 1289 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1290 lwp->stop_expected = 0;
ae13219e
DJ
1291 }
1292
1293 /* Flush any pending changes to the process's registers. */
d86d4aaf 1294 regcache_invalidate_thread (thread);
ae13219e 1295
9b224c5e
PA
1296 /* Pass on any pending signal for this thread. */
1297 sig = get_detach_signal (thread);
1298
ae13219e 1299 /* Finally, let it resume. */
82bfbe7e
PA
1300 if (the_low_target.prepare_to_resume != NULL)
1301 the_low_target.prepare_to_resume (lwp);
d86d4aaf 1302 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1303 (PTRACE_TYPE_ARG4) (long) sig) < 0)
9b224c5e 1304 error (_("Can't detach %s: %s"),
d86d4aaf 1305 target_pid_to_str (ptid_of (thread)),
9b224c5e 1306 strerror (errno));
bd99dc85
PA
1307
1308 delete_lwp (lwp);
95954743 1309 return 0;
6ad8ae5c
DJ
1310}
1311
95954743
PA
1312static int
1313linux_detach (int pid)
1314{
1315 struct process_info *process;
1316
1317 process = find_process_pid (pid);
1318 if (process == NULL)
1319 return -1;
1320
f9e39928
PA
1321 /* Stop all threads before detaching. First, ptrace requires that
1322 the thread is stopped to sucessfully detach. Second, thread_db
1323 may need to uninstall thread event breakpoints from memory, which
1324 only works with a stopped process anyway. */
7984d532 1325 stop_all_lwps (0, NULL);
f9e39928 1326
ca5c370d 1327#ifdef USE_THREAD_DB
8336d594 1328 thread_db_detach (process);
ca5c370d
PA
1329#endif
1330
fa593d66
PA
1331 /* Stabilize threads (move out of jump pads). */
1332 stabilize_threads ();
1333
95954743 1334 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
1335
1336 the_target->mourn (process);
f9e39928
PA
1337
1338 /* Since we presently can only stop all lwps of all processes, we
1339 need to unstop lwps of other processes. */
7984d532 1340 unstop_all_lwps (0, NULL);
f9e39928
PA
1341 return 0;
1342}
1343
1344/* Remove all LWPs that belong to process PROC from the lwp list. */
1345
1346static int
1347delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1348{
d86d4aaf
DE
1349 struct thread_info *thread = (struct thread_info *) entry;
1350 struct lwp_info *lwp = get_thread_lwp (thread);
f9e39928
PA
1351 struct process_info *process = proc;
1352
d86d4aaf 1353 if (pid_of (thread) == pid_of (process))
f9e39928
PA
1354 delete_lwp (lwp);
1355
dd6953e1 1356 return 0;
6ad8ae5c
DJ
1357}
1358
8336d594
PA
1359static void
1360linux_mourn (struct process_info *process)
1361{
1362 struct process_info_private *priv;
1363
1364#ifdef USE_THREAD_DB
1365 thread_db_mourn (process);
1366#endif
1367
d86d4aaf 1368 find_inferior (&all_threads, delete_lwp_callback, process);
f9e39928 1369
8336d594 1370 /* Freeing all private data. */
fe978cb0 1371 priv = process->priv;
8336d594
PA
1372 free (priv->arch_private);
1373 free (priv);
fe978cb0 1374 process->priv = NULL;
505106cd
PA
1375
1376 remove_process (process);
8336d594
PA
1377}
1378
444d6139 1379static void
95954743 1380linux_join (int pid)
444d6139 1381{
444d6139
PA
1382 int status, ret;
1383
1384 do {
95954743 1385 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1386 if (WIFEXITED (status) || WIFSIGNALED (status))
1387 break;
1388 } while (ret != -1 || errno != ECHILD);
1389}
1390
6ad8ae5c 1391/* Return nonzero if the given thread is still alive. */
0d62e5e8 1392static int
95954743 1393linux_thread_alive (ptid_t ptid)
0d62e5e8 1394{
95954743
PA
1395 struct lwp_info *lwp = find_lwp_pid (ptid);
1396
1397 /* We assume we always know if a thread exits. If a whole process
1398 exited but we still haven't been able to report it to GDB, we'll
1399 hold on to the last lwp of the dead process. */
1400 if (lwp != NULL)
00db26fa 1401 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1402 else
1403 return 0;
1404}
1405
582511be
PA
1406/* Return 1 if this lwp still has an interesting status pending. If
1407 not (e.g., it had stopped for a breakpoint that is gone), return
1408 false. */
1409
1410static int
1411thread_still_has_status_pending_p (struct thread_info *thread)
1412{
1413 struct lwp_info *lp = get_thread_lwp (thread);
1414
1415 if (!lp->status_pending_p)
1416 return 0;
1417
1418 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1419 report any status pending the LWP may have. */
1420 if (thread->last_resume_kind == resume_stop
1421 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1422 return 0;
1423
1424 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1425 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1426 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1427 {
1428 struct thread_info *saved_thread;
1429 CORE_ADDR pc;
1430 int discard = 0;
1431
1432 gdb_assert (lp->last_status != 0);
1433
1434 pc = get_pc (lp);
1435
1436 saved_thread = current_thread;
1437 current_thread = thread;
1438
1439 if (pc != lp->stop_pc)
1440 {
1441 if (debug_threads)
1442 debug_printf ("PC of %ld changed\n",
1443 lwpid_of (thread));
1444 discard = 1;
1445 }
3e572f71
PA
1446
1447#if !USE_SIGTRAP_SIGINFO
15c66dd6 1448 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
582511be
PA
1449 && !(*the_low_target.breakpoint_at) (pc))
1450 {
1451 if (debug_threads)
1452 debug_printf ("previous SW breakpoint of %ld gone\n",
1453 lwpid_of (thread));
1454 discard = 1;
1455 }
15c66dd6 1456 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1457 && !hardware_breakpoint_inserted_here (pc))
1458 {
1459 if (debug_threads)
1460 debug_printf ("previous HW breakpoint of %ld gone\n",
1461 lwpid_of (thread));
1462 discard = 1;
1463 }
3e572f71 1464#endif
582511be
PA
1465
1466 current_thread = saved_thread;
1467
1468 if (discard)
1469 {
1470 if (debug_threads)
1471 debug_printf ("discarding pending breakpoint status\n");
1472 lp->status_pending_p = 0;
1473 return 0;
1474 }
1475 }
1476
1477 return 1;
1478}
1479
6bf5e0ba 1480/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1481static int
d50171e4 1482status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1483{
d86d4aaf 1484 struct thread_info *thread = (struct thread_info *) entry;
582511be 1485 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1486 ptid_t ptid = * (ptid_t *) arg;
1487
1488 /* Check if we're only interested in events from a specific process
afa8d396
PA
1489 or a specific LWP. */
1490 if (!ptid_match (ptid_of (thread), ptid))
95954743 1491 return 0;
0d62e5e8 1492
582511be
PA
1493 if (lp->status_pending_p
1494 && !thread_still_has_status_pending_p (thread))
1495 {
1496 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1497 return 0;
1498 }
0d62e5e8 1499
582511be 1500 return lp->status_pending_p;
0d62e5e8
DJ
1501}
1502
95954743
PA
1503static int
1504same_lwp (struct inferior_list_entry *entry, void *data)
1505{
1506 ptid_t ptid = *(ptid_t *) data;
1507 int lwp;
1508
1509 if (ptid_get_lwp (ptid) != 0)
1510 lwp = ptid_get_lwp (ptid);
1511 else
1512 lwp = ptid_get_pid (ptid);
1513
1514 if (ptid_get_lwp (entry->id) == lwp)
1515 return 1;
1516
1517 return 0;
1518}
1519
1520struct lwp_info *
1521find_lwp_pid (ptid_t ptid)
1522{
d86d4aaf
DE
1523 struct inferior_list_entry *thread
1524 = find_inferior (&all_threads, same_lwp, &ptid);
1525
1526 if (thread == NULL)
1527 return NULL;
1528
1529 return get_thread_lwp ((struct thread_info *) thread);
95954743
PA
1530}
1531
fa96cb38 1532/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1533
fa96cb38
PA
1534static int
1535num_lwps (int pid)
1536{
1537 struct inferior_list_entry *inf, *tmp;
1538 int count = 0;
0d62e5e8 1539
fa96cb38 1540 ALL_INFERIORS (&all_threads, inf, tmp)
24a09b5f 1541 {
fa96cb38
PA
1542 if (ptid_get_pid (inf->id) == pid)
1543 count++;
24a09b5f 1544 }
3aee8918 1545
fa96cb38
PA
1546 return count;
1547}
d61ddec4 1548
6d4ee8c6
GB
1549/* The arguments passed to iterate_over_lwps. */
1550
1551struct iterate_over_lwps_args
1552{
1553 /* The FILTER argument passed to iterate_over_lwps. */
1554 ptid_t filter;
1555
1556 /* The CALLBACK argument passed to iterate_over_lwps. */
1557 iterate_over_lwps_ftype *callback;
1558
1559 /* The DATA argument passed to iterate_over_lwps. */
1560 void *data;
1561};
1562
1563/* Callback for find_inferior used by iterate_over_lwps to filter
1564 calls to the callback supplied to that function. Returning a
1565 nonzero value causes find_inferiors to stop iterating and return
1566 the current inferior_list_entry. Returning zero indicates that
1567 find_inferiors should continue iterating. */
1568
1569static int
1570iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1571{
1572 struct iterate_over_lwps_args *args
1573 = (struct iterate_over_lwps_args *) args_p;
1574
1575 if (ptid_match (entry->id, args->filter))
1576 {
1577 struct thread_info *thr = (struct thread_info *) entry;
1578 struct lwp_info *lwp = get_thread_lwp (thr);
1579
1580 return (*args->callback) (lwp, args->data);
1581 }
1582
1583 return 0;
1584}
1585
1586/* See nat/linux-nat.h. */
1587
1588struct lwp_info *
1589iterate_over_lwps (ptid_t filter,
1590 iterate_over_lwps_ftype callback,
1591 void *data)
1592{
1593 struct iterate_over_lwps_args args = {filter, callback, data};
1594 struct inferior_list_entry *entry;
1595
1596 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1597 if (entry == NULL)
1598 return NULL;
1599
1600 return get_thread_lwp ((struct thread_info *) entry);
1601}
1602
fa96cb38
PA
1603/* Detect zombie thread group leaders, and "exit" them. We can't reap
1604 their exits until all other threads in the group have exited. */
c3adc08c 1605
fa96cb38
PA
1606static void
1607check_zombie_leaders (void)
1608{
1609 struct process_info *proc, *tmp;
c3adc08c 1610
fa96cb38 1611 ALL_PROCESSES (proc, tmp)
c3adc08c 1612 {
fa96cb38
PA
1613 pid_t leader_pid = pid_of (proc);
1614 struct lwp_info *leader_lp;
c3adc08c 1615
fa96cb38 1616 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
c3adc08c 1617
fa96cb38
PA
1618 if (debug_threads)
1619 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1620 "num_lwps=%d, zombie=%d\n",
1621 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1622 linux_proc_pid_is_zombie (leader_pid));
1623
1624 if (leader_lp != NULL
1625 /* Check if there are other threads in the group, as we may
1626 have raced with the inferior simply exiting. */
1627 && !last_thread_of_process_p (leader_pid)
1628 && linux_proc_pid_is_zombie (leader_pid))
1629 {
1630 /* A leader zombie can mean one of two things:
1631
1632 - It exited, and there's an exit status pending
1633 available, or only the leader exited (not the whole
1634 program). In the latter case, we can't waitpid the
1635 leader's exit status until all other threads are gone.
1636
1637 - There are 3 or more threads in the group, and a thread
1638 other than the leader exec'd. On an exec, the Linux
1639 kernel destroys all other threads (except the execing
1640 one) in the thread group, and resets the execing thread's
1641 tid to the tgid. No exit notification is sent for the
1642 execing thread -- from the ptracer's perspective, it
1643 appears as though the execing thread just vanishes.
1644 Until we reap all other threads except the leader and the
1645 execing thread, the leader will be zombie, and the
1646 execing thread will be in `D (disc sleep)'. As soon as
1647 all other threads are reaped, the execing thread changes
1648 it's tid to the tgid, and the previous (zombie) leader
1649 vanishes, giving place to the "new" leader. We could try
1650 distinguishing the exit and exec cases, by waiting once
1651 more, and seeing if something comes out, but it doesn't
1652 sound useful. The previous leader _does_ go away, and
1653 we'll re-add the new one once we see the exec event
1654 (which is just the same as what would happen if the
1655 previous leader did exit voluntarily before some other
1656 thread execs). */
c3adc08c 1657
fa96cb38
PA
1658 if (debug_threads)
1659 fprintf (stderr,
1660 "CZL: Thread group leader %d zombie "
1661 "(it exited, or another thread execd).\n",
1662 leader_pid);
c3adc08c 1663
fa96cb38 1664 delete_lwp (leader_lp);
c3adc08c
PA
1665 }
1666 }
fa96cb38 1667}
c3adc08c 1668
fa96cb38
PA
1669/* Callback for `find_inferior'. Returns the first LWP that is not
1670 stopped. ARG is a PTID filter. */
d50171e4 1671
fa96cb38
PA
1672static int
1673not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1674{
1675 struct thread_info *thr = (struct thread_info *) entry;
1676 struct lwp_info *lwp;
1677 ptid_t filter = *(ptid_t *) arg;
47c0c975 1678
fa96cb38
PA
1679 if (!ptid_match (ptid_of (thr), filter))
1680 return 0;
bd99dc85 1681
fa96cb38
PA
1682 lwp = get_thread_lwp (thr);
1683 if (!lwp->stopped)
1684 return 1;
1685
1686 return 0;
0d62e5e8 1687}
611cb4a5 1688
219f2f23
PA
1689/* This function should only be called if the LWP got a SIGTRAP.
1690
1691 Handle any tracepoint steps or hits. Return true if a tracepoint
1692 event was handled, 0 otherwise. */
1693
1694static int
1695handle_tracepoints (struct lwp_info *lwp)
1696{
1697 struct thread_info *tinfo = get_lwp_thread (lwp);
1698 int tpoint_related_event = 0;
1699
582511be
PA
1700 gdb_assert (lwp->suspended == 0);
1701
7984d532
PA
1702 /* If this tracepoint hit causes a tracing stop, we'll immediately
1703 uninsert tracepoints. To do this, we temporarily pause all
1704 threads, unpatch away, and then unpause threads. We need to make
1705 sure the unpausing doesn't resume LWP too. */
1706 lwp->suspended++;
1707
219f2f23
PA
1708 /* And we need to be sure that any all-threads-stopping doesn't try
1709 to move threads out of the jump pads, as it could deadlock the
1710 inferior (LWP could be in the jump pad, maybe even holding the
1711 lock.) */
1712
1713 /* Do any necessary step collect actions. */
1714 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1715
fa593d66
PA
1716 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1717
219f2f23
PA
1718 /* See if we just hit a tracepoint and do its main collect
1719 actions. */
1720 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1721
7984d532
PA
1722 lwp->suspended--;
1723
1724 gdb_assert (lwp->suspended == 0);
fa593d66 1725 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1726
219f2f23
PA
1727 if (tpoint_related_event)
1728 {
1729 if (debug_threads)
87ce2a04 1730 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1731 return 1;
1732 }
1733
1734 return 0;
1735}
1736
fa593d66
PA
1737/* Convenience wrapper. Returns true if LWP is presently collecting a
1738 fast tracepoint. */
1739
1740static int
1741linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1742 struct fast_tpoint_collect_status *status)
1743{
1744 CORE_ADDR thread_area;
d86d4aaf 1745 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1746
1747 if (the_low_target.get_thread_area == NULL)
1748 return 0;
1749
1750 /* Get the thread area address. This is used to recognize which
1751 thread is which when tracing with the in-process agent library.
1752 We don't read anything from the address, and treat it as opaque;
1753 it's the address itself that we assume is unique per-thread. */
d86d4aaf 1754 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
fa593d66
PA
1755 return 0;
1756
1757 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1758}
1759
1760/* The reason we resume in the caller, is because we want to be able
1761 to pass lwp->status_pending as WSTAT, and we need to clear
1762 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1763 refuses to resume. */
1764
1765static int
1766maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1767{
0bfdf32f 1768 struct thread_info *saved_thread;
fa593d66 1769
0bfdf32f
GB
1770 saved_thread = current_thread;
1771 current_thread = get_lwp_thread (lwp);
fa593d66
PA
1772
1773 if ((wstat == NULL
1774 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1775 && supports_fast_tracepoints ()
58b4daa5 1776 && agent_loaded_p ())
fa593d66
PA
1777 {
1778 struct fast_tpoint_collect_status status;
1779 int r;
1780
1781 if (debug_threads)
87ce2a04
DE
1782 debug_printf ("Checking whether LWP %ld needs to move out of the "
1783 "jump pad.\n",
0bfdf32f 1784 lwpid_of (current_thread));
fa593d66
PA
1785
1786 r = linux_fast_tracepoint_collecting (lwp, &status);
1787
1788 if (wstat == NULL
1789 || (WSTOPSIG (*wstat) != SIGILL
1790 && WSTOPSIG (*wstat) != SIGFPE
1791 && WSTOPSIG (*wstat) != SIGSEGV
1792 && WSTOPSIG (*wstat) != SIGBUS))
1793 {
1794 lwp->collecting_fast_tracepoint = r;
1795
1796 if (r != 0)
1797 {
1798 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1799 {
1800 /* Haven't executed the original instruction yet.
1801 Set breakpoint there, and wait till it's hit,
1802 then single-step until exiting the jump pad. */
1803 lwp->exit_jump_pad_bkpt
1804 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1805 }
1806
1807 if (debug_threads)
87ce2a04
DE
1808 debug_printf ("Checking whether LWP %ld needs to move out of "
1809 "the jump pad...it does\n",
0bfdf32f
GB
1810 lwpid_of (current_thread));
1811 current_thread = saved_thread;
fa593d66
PA
1812
1813 return 1;
1814 }
1815 }
1816 else
1817 {
1818 /* If we get a synchronous signal while collecting, *and*
1819 while executing the (relocated) original instruction,
1820 reset the PC to point at the tpoint address, before
1821 reporting to GDB. Otherwise, it's an IPA lib bug: just
1822 report the signal to GDB, and pray for the best. */
1823
1824 lwp->collecting_fast_tracepoint = 0;
1825
1826 if (r != 0
1827 && (status.adjusted_insn_addr <= lwp->stop_pc
1828 && lwp->stop_pc < status.adjusted_insn_addr_end))
1829 {
1830 siginfo_t info;
1831 struct regcache *regcache;
1832
1833 /* The si_addr on a few signals references the address
1834 of the faulting instruction. Adjust that as
1835 well. */
1836 if ((WSTOPSIG (*wstat) == SIGILL
1837 || WSTOPSIG (*wstat) == SIGFPE
1838 || WSTOPSIG (*wstat) == SIGBUS
1839 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 1840 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1841 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
1842 /* Final check just to make sure we don't clobber
1843 the siginfo of non-kernel-sent signals. */
1844 && (uintptr_t) info.si_addr == lwp->stop_pc)
1845 {
1846 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 1847 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1848 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
1849 }
1850
0bfdf32f 1851 regcache = get_thread_regcache (current_thread, 1);
fa593d66
PA
1852 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1853 lwp->stop_pc = status.tpoint_addr;
1854
1855 /* Cancel any fast tracepoint lock this thread was
1856 holding. */
1857 force_unlock_trace_buffer ();
1858 }
1859
1860 if (lwp->exit_jump_pad_bkpt != NULL)
1861 {
1862 if (debug_threads)
87ce2a04
DE
1863 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1864 "stopping all threads momentarily.\n");
fa593d66
PA
1865
1866 stop_all_lwps (1, lwp);
fa593d66
PA
1867
1868 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1869 lwp->exit_jump_pad_bkpt = NULL;
1870
1871 unstop_all_lwps (1, lwp);
1872
1873 gdb_assert (lwp->suspended >= 0);
1874 }
1875 }
1876 }
1877
1878 if (debug_threads)
87ce2a04
DE
1879 debug_printf ("Checking whether LWP %ld needs to move out of the "
1880 "jump pad...no\n",
0bfdf32f 1881 lwpid_of (current_thread));
0cccb683 1882
0bfdf32f 1883 current_thread = saved_thread;
fa593d66
PA
1884 return 0;
1885}
1886
1887/* Enqueue one signal in the "signals to report later when out of the
1888 jump pad" list. */
1889
1890static void
1891enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1892{
1893 struct pending_signals *p_sig;
d86d4aaf 1894 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1895
1896 if (debug_threads)
87ce2a04 1897 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 1898 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1899
1900 if (debug_threads)
1901 {
1902 struct pending_signals *sig;
1903
1904 for (sig = lwp->pending_signals_to_report;
1905 sig != NULL;
1906 sig = sig->prev)
87ce2a04
DE
1907 debug_printf (" Already queued %d\n",
1908 sig->signal);
fa593d66 1909
87ce2a04 1910 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
1911 }
1912
1a981360
PA
1913 /* Don't enqueue non-RT signals if they are already in the deferred
1914 queue. (SIGSTOP being the easiest signal to see ending up here
1915 twice) */
1916 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1917 {
1918 struct pending_signals *sig;
1919
1920 for (sig = lwp->pending_signals_to_report;
1921 sig != NULL;
1922 sig = sig->prev)
1923 {
1924 if (sig->signal == WSTOPSIG (*wstat))
1925 {
1926 if (debug_threads)
87ce2a04
DE
1927 debug_printf ("Not requeuing already queued non-RT signal %d"
1928 " for LWP %ld\n",
1929 sig->signal,
d86d4aaf 1930 lwpid_of (thread));
1a981360
PA
1931 return;
1932 }
1933 }
1934 }
1935
fa593d66
PA
1936 p_sig = xmalloc (sizeof (*p_sig));
1937 p_sig->prev = lwp->pending_signals_to_report;
1938 p_sig->signal = WSTOPSIG (*wstat);
1939 memset (&p_sig->info, 0, sizeof (siginfo_t));
d86d4aaf 1940 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1941 &p_sig->info);
fa593d66
PA
1942
1943 lwp->pending_signals_to_report = p_sig;
1944}
1945
1946/* Dequeue one signal from the "signals to report later when out of
1947 the jump pad" list. */
1948
1949static int
1950dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1951{
d86d4aaf
DE
1952 struct thread_info *thread = get_lwp_thread (lwp);
1953
fa593d66
PA
1954 if (lwp->pending_signals_to_report != NULL)
1955 {
1956 struct pending_signals **p_sig;
1957
1958 p_sig = &lwp->pending_signals_to_report;
1959 while ((*p_sig)->prev != NULL)
1960 p_sig = &(*p_sig)->prev;
1961
1962 *wstat = W_STOPCODE ((*p_sig)->signal);
1963 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 1964 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1965 &(*p_sig)->info);
fa593d66
PA
1966 free (*p_sig);
1967 *p_sig = NULL;
1968
1969 if (debug_threads)
87ce2a04 1970 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 1971 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1972
1973 if (debug_threads)
1974 {
1975 struct pending_signals *sig;
1976
1977 for (sig = lwp->pending_signals_to_report;
1978 sig != NULL;
1979 sig = sig->prev)
87ce2a04
DE
1980 debug_printf (" Still queued %d\n",
1981 sig->signal);
fa593d66 1982
87ce2a04 1983 debug_printf (" (no more queued signals)\n");
fa593d66
PA
1984 }
1985
1986 return 1;
1987 }
1988
1989 return 0;
1990}
1991
582511be
PA
1992/* Fetch the possibly triggered data watchpoint info and store it in
1993 CHILD.
d50171e4 1994
582511be
PA
1995 On some archs, like x86, that use debug registers to set
1996 watchpoints, it's possible that the way to know which watched
1997 address trapped, is to check the register that is used to select
1998 which address to watch. Problem is, between setting the watchpoint
1999 and reading back which data address trapped, the user may change
2000 the set of watchpoints, and, as a consequence, GDB changes the
2001 debug registers in the inferior. To avoid reading back a stale
2002 stopped-data-address when that happens, we cache in LP the fact
2003 that a watchpoint trapped, and the corresponding data address, as
2004 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2005 registers meanwhile, we have the cached data we can rely on. */
d50171e4 2006
582511be
PA
2007static int
2008check_stopped_by_watchpoint (struct lwp_info *child)
2009{
2010 if (the_low_target.stopped_by_watchpoint != NULL)
d50171e4 2011 {
582511be 2012 struct thread_info *saved_thread;
d50171e4 2013
582511be
PA
2014 saved_thread = current_thread;
2015 current_thread = get_lwp_thread (child);
2016
2017 if (the_low_target.stopped_by_watchpoint ())
d50171e4 2018 {
15c66dd6 2019 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
582511be
PA
2020
2021 if (the_low_target.stopped_data_address != NULL)
2022 child->stopped_data_address
2023 = the_low_target.stopped_data_address ();
2024 else
2025 child->stopped_data_address = 0;
d50171e4
PA
2026 }
2027
0bfdf32f 2028 current_thread = saved_thread;
d50171e4
PA
2029 }
2030
15c66dd6 2031 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
c4d9ceb6
YQ
2032}
2033
de0d863e
DB
2034/* Return the ptrace options that we want to try to enable. */
2035
2036static int
2037linux_low_ptrace_options (int attached)
2038{
2039 int options = 0;
2040
2041 if (!attached)
2042 options |= PTRACE_O_EXITKILL;
2043
2044 if (report_fork_events)
2045 options |= PTRACE_O_TRACEFORK;
2046
c269dbdb
DB
2047 if (report_vfork_events)
2048 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2049
de0d863e
DB
2050 return options;
2051}
2052
fa96cb38
PA
2053/* Do low-level handling of the event, and check if we should go on
2054 and pass it to caller code. Return the affected lwp if we are, or
2055 NULL otherwise. */
2056
2057static struct lwp_info *
582511be 2058linux_low_filter_event (int lwpid, int wstat)
fa96cb38
PA
2059{
2060 struct lwp_info *child;
2061 struct thread_info *thread;
582511be 2062 int have_stop_pc = 0;
fa96cb38
PA
2063
2064 child = find_lwp_pid (pid_to_ptid (lwpid));
2065
2066 /* If we didn't find a process, one of two things presumably happened:
2067 - A process we started and then detached from has exited. Ignore it.
2068 - A process we are controlling has forked and the new child's stop
2069 was reported to us by the kernel. Save its PID. */
2070 if (child == NULL && WIFSTOPPED (wstat))
2071 {
2072 add_to_pid_list (&stopped_pids, lwpid, wstat);
2073 return NULL;
2074 }
2075 else if (child == NULL)
2076 return NULL;
2077
2078 thread = get_lwp_thread (child);
2079
2080 child->stopped = 1;
2081
2082 child->last_status = wstat;
2083
582511be
PA
2084 /* Check if the thread has exited. */
2085 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2086 {
2087 if (debug_threads)
2088 debug_printf ("LLFE: %d exited.\n", lwpid);
2089 if (num_lwps (pid_of (thread)) > 1)
2090 {
2091
2092 /* If there is at least one more LWP, then the exit signal was
2093 not the end of the debugged application and should be
2094 ignored. */
2095 delete_lwp (child);
2096 return NULL;
2097 }
2098 else
2099 {
2100 /* This was the last lwp in the process. Since events are
2101 serialized to GDB core, and we can't report this one
2102 right now, but GDB core and the other target layers will
2103 want to be notified about the exit code/signal, leave the
2104 status pending for the next time we're able to report
2105 it. */
2106 mark_lwp_dead (child, wstat);
2107 return child;
2108 }
2109 }
2110
2111 gdb_assert (WIFSTOPPED (wstat));
2112
fa96cb38
PA
2113 if (WIFSTOPPED (wstat))
2114 {
2115 struct process_info *proc;
2116
c06cbd92 2117 /* Architecture-specific setup after inferior is running. */
fa96cb38 2118 proc = find_process_pid (pid_of (thread));
c06cbd92 2119 if (proc->tdesc == NULL)
fa96cb38 2120 {
c06cbd92
YQ
2121 if (proc->attached)
2122 {
2123 struct thread_info *saved_thread;
fa96cb38 2124
c06cbd92
YQ
2125 /* This needs to happen after we have attached to the
2126 inferior and it is stopped for the first time, but
2127 before we access any inferior registers. */
2128 saved_thread = current_thread;
2129 current_thread = thread;
fa96cb38 2130
c06cbd92 2131 the_low_target.arch_setup ();
fa96cb38 2132
c06cbd92 2133 current_thread = saved_thread;
c06cbd92
YQ
2134 }
2135 else
2136 {
2137 /* The process is started, but GDBserver will do
2138 architecture-specific setup after the program stops at
2139 the first instruction. */
2140 child->status_pending_p = 1;
2141 child->status_pending = wstat;
2142 return child;
2143 }
fa96cb38
PA
2144 }
2145 }
2146
fa96cb38
PA
2147 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2148 {
beed38b8 2149 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2150 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2151
de0d863e 2152 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2153 child->must_set_ptrace_flags = 0;
2154 }
2155
582511be
PA
2156 /* Be careful to not overwrite stop_pc until
2157 check_stopped_by_breakpoint is called. */
fa96cb38 2158 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2159 && linux_is_extended_waitstatus (wstat))
fa96cb38 2160 {
582511be 2161 child->stop_pc = get_pc (child);
de0d863e
DB
2162 if (handle_extended_wait (child, wstat))
2163 {
2164 /* The event has been handled, so just return without
2165 reporting it. */
2166 return NULL;
2167 }
fa96cb38
PA
2168 }
2169
3e572f71
PA
2170 /* Check first whether this was a SW/HW breakpoint before checking
2171 watchpoints, because at least s390 can't tell the data address of
2172 hardware watchpoint hits, and returns stopped-by-watchpoint as
2173 long as there's a watchpoint set. */
2174 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
582511be
PA
2175 {
2176 if (check_stopped_by_breakpoint (child))
2177 have_stop_pc = 1;
2178 }
2179
3e572f71
PA
2180 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2181 or hardware watchpoint. Check which is which if we got
2182 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2183 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2184 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2185 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2186 check_stopped_by_watchpoint (child);
2187
582511be
PA
2188 if (!have_stop_pc)
2189 child->stop_pc = get_pc (child);
2190
fa96cb38
PA
2191 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2192 && child->stop_expected)
2193 {
2194 if (debug_threads)
2195 debug_printf ("Expected stop.\n");
2196 child->stop_expected = 0;
2197
2198 if (thread->last_resume_kind == resume_stop)
2199 {
2200 /* We want to report the stop to the core. Treat the
2201 SIGSTOP as a normal event. */
2bf6fb9d
PA
2202 if (debug_threads)
2203 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2204 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2205 }
2206 else if (stopping_threads != NOT_STOPPING_THREADS)
2207 {
2208 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2209 pending. */
2bf6fb9d
PA
2210 if (debug_threads)
2211 debug_printf ("LLW: SIGSTOP caught for %s "
2212 "while stopping threads.\n",
2213 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2214 return NULL;
2215 }
2216 else
2217 {
2bf6fb9d
PA
2218 /* This is a delayed SIGSTOP. Filter out the event. */
2219 if (debug_threads)
2220 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2221 child->stepping ? "step" : "continue",
2222 target_pid_to_str (ptid_of (thread)));
2223
fa96cb38
PA
2224 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2225 return NULL;
2226 }
2227 }
2228
582511be
PA
2229 child->status_pending_p = 1;
2230 child->status_pending = wstat;
fa96cb38
PA
2231 return child;
2232}
2233
20ba1ce6
PA
2234/* Resume LWPs that are currently stopped without any pending status
2235 to report, but are resumed from the core's perspective. */
2236
2237static void
2238resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2239{
2240 struct thread_info *thread = (struct thread_info *) entry;
2241 struct lwp_info *lp = get_thread_lwp (thread);
2242
2243 if (lp->stopped
2244 && !lp->status_pending_p
2245 && thread->last_resume_kind != resume_stop
2246 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2247 {
2248 int step = thread->last_resume_kind == resume_step;
2249
2250 if (debug_threads)
2251 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2252 target_pid_to_str (ptid_of (thread)),
2253 paddress (lp->stop_pc),
2254 step);
2255
2256 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2257 }
2258}
2259
fa96cb38
PA
2260/* Wait for an event from child(ren) WAIT_PTID, and return any that
2261 match FILTER_PTID (leaving others pending). The PTIDs can be:
2262 minus_one_ptid, to specify any child; a pid PTID, specifying all
2263 lwps of a thread group; or a PTID representing a single lwp. Store
2264 the stop status through the status pointer WSTAT. OPTIONS is
2265 passed to the waitpid call. Return 0 if no event was found and
2266 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2267 was found. Return the PID of the stopped child otherwise. */
bd99dc85 2268
0d62e5e8 2269static int
fa96cb38
PA
2270linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2271 int *wstatp, int options)
0d62e5e8 2272{
d86d4aaf 2273 struct thread_info *event_thread;
d50171e4 2274 struct lwp_info *event_child, *requested_child;
fa96cb38 2275 sigset_t block_mask, prev_mask;
d50171e4 2276
fa96cb38 2277 retry:
d86d4aaf
DE
2278 /* N.B. event_thread points to the thread_info struct that contains
2279 event_child. Keep them in sync. */
2280 event_thread = NULL;
d50171e4
PA
2281 event_child = NULL;
2282 requested_child = NULL;
0d62e5e8 2283
95954743 2284 /* Check for a lwp with a pending status. */
bd99dc85 2285
fa96cb38 2286 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
0d62e5e8 2287 {
d86d4aaf 2288 event_thread = (struct thread_info *)
fa96cb38 2289 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
d86d4aaf
DE
2290 if (event_thread != NULL)
2291 event_child = get_thread_lwp (event_thread);
2292 if (debug_threads && event_thread)
2293 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2294 }
fa96cb38 2295 else if (!ptid_equal (filter_ptid, null_ptid))
0d62e5e8 2296 {
fa96cb38 2297 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2298
bde24c0a 2299 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66
PA
2300 && requested_child->status_pending_p
2301 && requested_child->collecting_fast_tracepoint)
2302 {
2303 enqueue_one_deferred_signal (requested_child,
2304 &requested_child->status_pending);
2305 requested_child->status_pending_p = 0;
2306 requested_child->status_pending = 0;
2307 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2308 }
2309
2310 if (requested_child->suspended
2311 && requested_child->status_pending_p)
38e08fca
GB
2312 {
2313 internal_error (__FILE__, __LINE__,
2314 "requesting an event out of a"
2315 " suspended child?");
2316 }
fa593d66 2317
d50171e4 2318 if (requested_child->status_pending_p)
d86d4aaf
DE
2319 {
2320 event_child = requested_child;
2321 event_thread = get_lwp_thread (event_child);
2322 }
0d62e5e8 2323 }
611cb4a5 2324
0d62e5e8
DJ
2325 if (event_child != NULL)
2326 {
bd99dc85 2327 if (debug_threads)
87ce2a04 2328 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2329 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2330 *wstatp = event_child->status_pending;
bd99dc85
PA
2331 event_child->status_pending_p = 0;
2332 event_child->status_pending = 0;
0bfdf32f 2333 current_thread = event_thread;
d86d4aaf 2334 return lwpid_of (event_thread);
0d62e5e8
DJ
2335 }
2336
fa96cb38
PA
2337 /* But if we don't find a pending event, we'll have to wait.
2338
2339 We only enter this loop if no process has a pending wait status.
2340 Thus any action taken in response to a wait status inside this
2341 loop is responding as soon as we detect the status, not after any
2342 pending events. */
d8301ad1 2343
fa96cb38
PA
2344 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2345 all signals while here. */
2346 sigfillset (&block_mask);
2347 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2348
582511be
PA
2349 /* Always pull all events out of the kernel. We'll randomly select
2350 an event LWP out of all that have events, to prevent
2351 starvation. */
fa96cb38 2352 while (event_child == NULL)
0d62e5e8 2353 {
fa96cb38 2354 pid_t ret = 0;
0d62e5e8 2355
fa96cb38
PA
2356 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2357 quirks:
0d62e5e8 2358
fa96cb38
PA
2359 - If the thread group leader exits while other threads in the
2360 thread group still exist, waitpid(TGID, ...) hangs. That
2361 waitpid won't return an exit status until the other threads
2362 in the group are reaped.
611cb4a5 2363
fa96cb38
PA
2364 - When a non-leader thread execs, that thread just vanishes
2365 without reporting an exit (so we'd hang if we waited for it
2366 explicitly in that case). The exec event is reported to
2367 the TGID pid (although we don't currently enable exec
2368 events). */
2369 errno = 0;
2370 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2371
fa96cb38
PA
2372 if (debug_threads)
2373 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2374 ret, errno ? strerror (errno) : "ERRNO-OK");
0d62e5e8 2375
fa96cb38 2376 if (ret > 0)
0d62e5e8 2377 {
89be2091 2378 if (debug_threads)
bd99dc85 2379 {
fa96cb38
PA
2380 debug_printf ("LLW: waitpid %ld received %s\n",
2381 (long) ret, status_to_str (*wstatp));
bd99dc85 2382 }
89be2091 2383
582511be
PA
2384 /* Filter all events. IOW, leave all events pending. We'll
2385 randomly select an event LWP out of all that have events
2386 below. */
2387 linux_low_filter_event (ret, *wstatp);
fa96cb38
PA
2388 /* Retry until nothing comes out of waitpid. A single
2389 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2390 continue;
2391 }
2392
20ba1ce6
PA
2393 /* Now that we've pulled all events out of the kernel, resume
2394 LWPs that don't have an interesting event to report. */
2395 if (stopping_threads == NOT_STOPPING_THREADS)
2396 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2397
2398 /* ... and find an LWP with a status to report to the core, if
2399 any. */
582511be
PA
2400 event_thread = (struct thread_info *)
2401 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2402 if (event_thread != NULL)
2403 {
2404 event_child = get_thread_lwp (event_thread);
2405 *wstatp = event_child->status_pending;
2406 event_child->status_pending_p = 0;
2407 event_child->status_pending = 0;
2408 break;
2409 }
2410
fa96cb38
PA
2411 /* Check for zombie thread group leaders. Those can't be reaped
2412 until all other threads in the thread group are. */
2413 check_zombie_leaders ();
2414
2415 /* If there are no resumed children left in the set of LWPs we
2416 want to wait for, bail. We can't just block in
2417 waitpid/sigsuspend, because lwps might have been left stopped
2418 in trace-stop state, and we'd be stuck forever waiting for
2419 their status to change (which would only happen if we resumed
2420 them). Even if WNOHANG is set, this return code is preferred
2421 over 0 (below), as it is more detailed. */
2422 if ((find_inferior (&all_threads,
2423 not_stopped_callback,
2424 &wait_ptid) == NULL))
a6dbe5df 2425 {
fa96cb38
PA
2426 if (debug_threads)
2427 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2428 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2429 return -1;
a6dbe5df
PA
2430 }
2431
fa96cb38
PA
2432 /* No interesting event to report to the caller. */
2433 if ((options & WNOHANG))
24a09b5f 2434 {
fa96cb38
PA
2435 if (debug_threads)
2436 debug_printf ("WNOHANG set, no event found\n");
2437
2438 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2439 return 0;
24a09b5f
DJ
2440 }
2441
fa96cb38
PA
2442 /* Block until we get an event reported with SIGCHLD. */
2443 if (debug_threads)
2444 debug_printf ("sigsuspend'ing\n");
d50171e4 2445
fa96cb38
PA
2446 sigsuspend (&prev_mask);
2447 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2448 goto retry;
2449 }
d50171e4 2450
fa96cb38 2451 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2452
0bfdf32f 2453 current_thread = event_thread;
d50171e4 2454
fa96cb38
PA
2455 /* Check for thread exit. */
2456 if (! WIFSTOPPED (*wstatp))
2457 {
2458 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2459
2460 if (debug_threads)
2461 debug_printf ("LWP %d is the last lwp of process. "
2462 "Process %ld exiting.\n",
2463 pid_of (event_thread), lwpid_of (event_thread));
d86d4aaf 2464 return lwpid_of (event_thread);
611cb4a5 2465 }
0d62e5e8 2466
fa96cb38
PA
2467 return lwpid_of (event_thread);
2468}
2469
2470/* Wait for an event from child(ren) PTID. PTIDs can be:
2471 minus_one_ptid, to specify any child; a pid PTID, specifying all
2472 lwps of a thread group; or a PTID representing a single lwp. Store
2473 the stop status through the status pointer WSTAT. OPTIONS is
2474 passed to the waitpid call. Return 0 if no event was found and
2475 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2476 was found. Return the PID of the stopped child otherwise. */
2477
2478static int
2479linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2480{
2481 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2482}
2483
6bf5e0ba
PA
2484/* Count the LWP's that have had events. */
2485
2486static int
2487count_events_callback (struct inferior_list_entry *entry, void *data)
2488{
d86d4aaf 2489 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2490 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2491 int *count = data;
2492
2493 gdb_assert (count != NULL);
2494
582511be 2495 /* Count only resumed LWPs that have an event pending. */
8336d594 2496 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2497 && lp->status_pending_p)
6bf5e0ba
PA
2498 (*count)++;
2499
2500 return 0;
2501}
2502
2503/* Select the LWP (if any) that is currently being single-stepped. */
2504
2505static int
2506select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2507{
d86d4aaf
DE
2508 struct thread_info *thread = (struct thread_info *) entry;
2509 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba 2510
8336d594
PA
2511 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2512 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
2513 && lp->status_pending_p)
2514 return 1;
2515 else
2516 return 0;
2517}
2518
b90fc188 2519/* Select the Nth LWP that has had an event. */
6bf5e0ba
PA
2520
2521static int
2522select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2523{
d86d4aaf 2524 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2525 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2526 int *selector = data;
2527
2528 gdb_assert (selector != NULL);
2529
582511be 2530 /* Select only resumed LWPs that have an event pending. */
91baf43f 2531 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2532 && lp->status_pending_p)
6bf5e0ba
PA
2533 if ((*selector)-- == 0)
2534 return 1;
2535
2536 return 0;
2537}
2538
6bf5e0ba
PA
2539/* Select one LWP out of those that have events pending. */
2540
2541static void
2542select_event_lwp (struct lwp_info **orig_lp)
2543{
2544 int num_events = 0;
2545 int random_selector;
582511be
PA
2546 struct thread_info *event_thread = NULL;
2547
2548 /* In all-stop, give preference to the LWP that is being
2549 single-stepped. There will be at most one, and it's the LWP that
2550 the core is most interested in. If we didn't do this, then we'd
2551 have to handle pending step SIGTRAPs somehow in case the core
2552 later continues the previously-stepped thread, otherwise we'd
2553 report the pending SIGTRAP, and the core, not having stepped the
2554 thread, wouldn't understand what the trap was for, and therefore
2555 would report it to the user as a random signal. */
2556 if (!non_stop)
6bf5e0ba 2557 {
582511be
PA
2558 event_thread
2559 = (struct thread_info *) find_inferior (&all_threads,
2560 select_singlestep_lwp_callback,
2561 NULL);
2562 if (event_thread != NULL)
2563 {
2564 if (debug_threads)
2565 debug_printf ("SEL: Select single-step %s\n",
2566 target_pid_to_str (ptid_of (event_thread)));
2567 }
6bf5e0ba 2568 }
582511be 2569 if (event_thread == NULL)
6bf5e0ba
PA
2570 {
2571 /* No single-stepping LWP. Select one at random, out of those
b90fc188 2572 which have had events. */
6bf5e0ba 2573
b90fc188 2574 /* First see how many events we have. */
d86d4aaf 2575 find_inferior (&all_threads, count_events_callback, &num_events);
8bf3b159 2576 gdb_assert (num_events > 0);
6bf5e0ba 2577
b90fc188
PA
2578 /* Now randomly pick a LWP out of those that have had
2579 events. */
6bf5e0ba
PA
2580 random_selector = (int)
2581 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2582
2583 if (debug_threads && num_events > 1)
87ce2a04
DE
2584 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2585 num_events, random_selector);
6bf5e0ba 2586
d86d4aaf
DE
2587 event_thread
2588 = (struct thread_info *) find_inferior (&all_threads,
2589 select_event_lwp_callback,
2590 &random_selector);
6bf5e0ba
PA
2591 }
2592
d86d4aaf 2593 if (event_thread != NULL)
6bf5e0ba 2594 {
d86d4aaf
DE
2595 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2596
6bf5e0ba
PA
2597 /* Switch the event LWP. */
2598 *orig_lp = event_lp;
2599 }
2600}
2601
7984d532
PA
2602/* Decrement the suspend count of an LWP. */
2603
2604static int
2605unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2606{
d86d4aaf
DE
2607 struct thread_info *thread = (struct thread_info *) entry;
2608 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
2609
2610 /* Ignore EXCEPT. */
2611 if (lwp == except)
2612 return 0;
2613
2614 lwp->suspended--;
2615
2616 gdb_assert (lwp->suspended >= 0);
2617 return 0;
2618}
2619
2620/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2621 NULL. */
2622
2623static void
2624unsuspend_all_lwps (struct lwp_info *except)
2625{
d86d4aaf 2626 find_inferior (&all_threads, unsuspend_one_lwp, except);
7984d532
PA
2627}
2628
fa593d66
PA
2629static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2630static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2631 void *data);
2632static int lwp_running (struct inferior_list_entry *entry, void *data);
2633static ptid_t linux_wait_1 (ptid_t ptid,
2634 struct target_waitstatus *ourstatus,
2635 int target_options);
2636
2637/* Stabilize threads (move out of jump pads).
2638
2639 If a thread is midway collecting a fast tracepoint, we need to
2640 finish the collection and move it out of the jump pad before
2641 reporting the signal.
2642
2643 This avoids recursion while collecting (when a signal arrives
2644 midway, and the signal handler itself collects), which would trash
2645 the trace buffer. In case the user set a breakpoint in a signal
2646 handler, this avoids the backtrace showing the jump pad, etc..
2647 Most importantly, there are certain things we can't do safely if
2648 threads are stopped in a jump pad (or in its callee's). For
2649 example:
2650
2651 - starting a new trace run. A thread still collecting the
2652 previous run, could trash the trace buffer when resumed. The trace
2653 buffer control structures would have been reset but the thread had
2654 no way to tell. The thread could even midway memcpy'ing to the
2655 buffer, which would mean that when resumed, it would clobber the
2656 trace buffer that had been set for a new run.
2657
2658 - we can't rewrite/reuse the jump pads for new tracepoints
2659 safely. Say you do tstart while a thread is stopped midway while
2660 collecting. When the thread is later resumed, it finishes the
2661 collection, and returns to the jump pad, to execute the original
2662 instruction that was under the tracepoint jump at the time the
2663 older run had been started. If the jump pad had been rewritten
2664 since for something else in the new run, the thread would now
2665 execute the wrong / random instructions. */
2666
2667static void
2668linux_stabilize_threads (void)
2669{
0bfdf32f 2670 struct thread_info *saved_thread;
d86d4aaf 2671 struct thread_info *thread_stuck;
fa593d66 2672
d86d4aaf
DE
2673 thread_stuck
2674 = (struct thread_info *) find_inferior (&all_threads,
2675 stuck_in_jump_pad_callback,
2676 NULL);
2677 if (thread_stuck != NULL)
fa593d66 2678 {
b4d51a55 2679 if (debug_threads)
87ce2a04 2680 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2681 lwpid_of (thread_stuck));
fa593d66
PA
2682 return;
2683 }
2684
0bfdf32f 2685 saved_thread = current_thread;
fa593d66
PA
2686
2687 stabilizing_threads = 1;
2688
2689 /* Kick 'em all. */
d86d4aaf 2690 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
fa593d66
PA
2691
2692 /* Loop until all are stopped out of the jump pads. */
d86d4aaf 2693 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
fa593d66
PA
2694 {
2695 struct target_waitstatus ourstatus;
2696 struct lwp_info *lwp;
fa593d66
PA
2697 int wstat;
2698
2699 /* Note that we go through the full wait even loop. While
2700 moving threads out of jump pad, we need to be able to step
2701 over internal breakpoints and such. */
32fcada3 2702 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2703
2704 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2705 {
0bfdf32f 2706 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2707
2708 /* Lock it. */
2709 lwp->suspended++;
2710
a493e3e2 2711 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2712 || current_thread->last_resume_kind == resume_stop)
fa593d66 2713 {
2ea28649 2714 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2715 enqueue_one_deferred_signal (lwp, &wstat);
2716 }
2717 }
2718 }
2719
d86d4aaf 2720 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
fa593d66
PA
2721
2722 stabilizing_threads = 0;
2723
0bfdf32f 2724 current_thread = saved_thread;
fa593d66 2725
b4d51a55 2726 if (debug_threads)
fa593d66 2727 {
d86d4aaf
DE
2728 thread_stuck
2729 = (struct thread_info *) find_inferior (&all_threads,
2730 stuck_in_jump_pad_callback,
2731 NULL);
2732 if (thread_stuck != NULL)
87ce2a04 2733 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2734 lwpid_of (thread_stuck));
fa593d66
PA
2735 }
2736}
2737
582511be
PA
2738static void async_file_mark (void);
2739
2740/* Convenience function that is called when the kernel reports an
2741 event that is not passed out to GDB. */
2742
2743static ptid_t
2744ignore_event (struct target_waitstatus *ourstatus)
2745{
2746 /* If we got an event, there may still be others, as a single
2747 SIGCHLD can indicate more than one child stopped. This forces
2748 another target_wait call. */
2749 async_file_mark ();
2750
2751 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2752 return null_ptid;
2753}
2754
0d62e5e8 2755/* Wait for process, returns status. */
da6d8c04 2756
95954743
PA
2757static ptid_t
2758linux_wait_1 (ptid_t ptid,
2759 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2760{
e5f1222d 2761 int w;
fc7238bb 2762 struct lwp_info *event_child;
bd99dc85 2763 int options;
bd99dc85 2764 int pid;
6bf5e0ba
PA
2765 int step_over_finished;
2766 int bp_explains_trap;
2767 int maybe_internal_trap;
2768 int report_to_gdb;
219f2f23 2769 int trace_event;
c2d6af84 2770 int in_step_range;
bd99dc85 2771
87ce2a04
DE
2772 if (debug_threads)
2773 {
2774 debug_enter ();
2775 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2776 }
2777
bd99dc85
PA
2778 /* Translate generic target options into linux options. */
2779 options = __WALL;
2780 if (target_options & TARGET_WNOHANG)
2781 options |= WNOHANG;
0d62e5e8 2782
fa593d66
PA
2783 bp_explains_trap = 0;
2784 trace_event = 0;
c2d6af84 2785 in_step_range = 0;
bd99dc85
PA
2786 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2787
6bf5e0ba
PA
2788 if (ptid_equal (step_over_bkpt, null_ptid))
2789 pid = linux_wait_for_event (ptid, &w, options);
2790 else
2791 {
2792 if (debug_threads)
87ce2a04
DE
2793 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2794 target_pid_to_str (step_over_bkpt));
6bf5e0ba
PA
2795 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2796 }
2797
fa96cb38 2798 if (pid == 0)
87ce2a04 2799 {
fa96cb38
PA
2800 gdb_assert (target_options & TARGET_WNOHANG);
2801
87ce2a04
DE
2802 if (debug_threads)
2803 {
fa96cb38
PA
2804 debug_printf ("linux_wait_1 ret = null_ptid, "
2805 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
2806 debug_exit ();
2807 }
fa96cb38
PA
2808
2809 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
2810 return null_ptid;
2811 }
fa96cb38
PA
2812 else if (pid == -1)
2813 {
2814 if (debug_threads)
2815 {
2816 debug_printf ("linux_wait_1 ret = null_ptid, "
2817 "TARGET_WAITKIND_NO_RESUMED\n");
2818 debug_exit ();
2819 }
bd99dc85 2820
fa96cb38
PA
2821 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2822 return null_ptid;
2823 }
0d62e5e8 2824
0bfdf32f 2825 event_child = get_thread_lwp (current_thread);
0d62e5e8 2826
fa96cb38
PA
2827 /* linux_wait_for_event only returns an exit status for the last
2828 child of a process. Report it. */
2829 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 2830 {
fa96cb38 2831 if (WIFEXITED (w))
0d62e5e8 2832 {
fa96cb38
PA
2833 ourstatus->kind = TARGET_WAITKIND_EXITED;
2834 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 2835
fa96cb38 2836 if (debug_threads)
bd99dc85 2837 {
fa96cb38
PA
2838 debug_printf ("linux_wait_1 ret = %s, exited with "
2839 "retcode %d\n",
0bfdf32f 2840 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2841 WEXITSTATUS (w));
2842 debug_exit ();
bd99dc85 2843 }
fa96cb38
PA
2844 }
2845 else
2846 {
2847 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2848 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 2849
fa96cb38
PA
2850 if (debug_threads)
2851 {
2852 debug_printf ("linux_wait_1 ret = %s, terminated with "
2853 "signal %d\n",
0bfdf32f 2854 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2855 WTERMSIG (w));
2856 debug_exit ();
2857 }
0d62e5e8 2858 }
fa96cb38 2859
0bfdf32f 2860 return ptid_of (current_thread);
da6d8c04
DJ
2861 }
2862
8090aef2
PA
2863 /* If step-over executes a breakpoint instruction, it means a
2864 gdb/gdbserver breakpoint had been planted on top of a permanent
2865 breakpoint. The PC has been adjusted by
2866 check_stopped_by_breakpoint to point at the breakpoint address.
2867 Advance the PC manually past the breakpoint, otherwise the
2868 program would keep trapping the permanent breakpoint forever. */
2869 if (!ptid_equal (step_over_bkpt, null_ptid)
15c66dd6 2870 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
8090aef2 2871 {
9beb7c4e 2872 unsigned int increment_pc = the_low_target.breakpoint_len;
8090aef2
PA
2873
2874 if (debug_threads)
2875 {
2876 debug_printf ("step-over for %s executed software breakpoint\n",
2877 target_pid_to_str (ptid_of (current_thread)));
2878 }
2879
2880 if (increment_pc != 0)
2881 {
2882 struct regcache *regcache
2883 = get_thread_regcache (current_thread, 1);
2884
2885 event_child->stop_pc += increment_pc;
2886 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2887
2888 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
15c66dd6 2889 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
2890 }
2891 }
2892
6bf5e0ba
PA
2893 /* If this event was not handled before, and is not a SIGTRAP, we
2894 report it. SIGILL and SIGSEGV are also treated as traps in case
2895 a breakpoint is inserted at the current PC. If this target does
2896 not support internal breakpoints at all, we also report the
2897 SIGTRAP without further processing; it's of no concern to us. */
2898 maybe_internal_trap
2899 = (supports_breakpoints ()
2900 && (WSTOPSIG (w) == SIGTRAP
2901 || ((WSTOPSIG (w) == SIGILL
2902 || WSTOPSIG (w) == SIGSEGV)
2903 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2904
2905 if (maybe_internal_trap)
2906 {
2907 /* Handle anything that requires bookkeeping before deciding to
2908 report the event or continue waiting. */
2909
2910 /* First check if we can explain the SIGTRAP with an internal
2911 breakpoint, or if we should possibly report the event to GDB.
2912 Do this before anything that may remove or insert a
2913 breakpoint. */
2914 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2915
2916 /* We have a SIGTRAP, possibly a step-over dance has just
2917 finished. If so, tweak the state machine accordingly,
2918 reinsert breakpoints and delete any reinsert (software
2919 single-step) breakpoints. */
2920 step_over_finished = finish_step_over (event_child);
2921
2922 /* Now invoke the callbacks of any internal breakpoints there. */
2923 check_breakpoints (event_child->stop_pc);
2924
219f2f23
PA
2925 /* Handle tracepoint data collecting. This may overflow the
2926 trace buffer, and cause a tracing stop, removing
2927 breakpoints. */
2928 trace_event = handle_tracepoints (event_child);
2929
6bf5e0ba
PA
2930 if (bp_explains_trap)
2931 {
2932 /* If we stepped or ran into an internal breakpoint, we've
2933 already handled it. So next time we resume (from this
2934 PC), we should step over it. */
2935 if (debug_threads)
87ce2a04 2936 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 2937
8b07ae33
PA
2938 if (breakpoint_here (event_child->stop_pc))
2939 event_child->need_step_over = 1;
6bf5e0ba
PA
2940 }
2941 }
2942 else
2943 {
2944 /* We have some other signal, possibly a step-over dance was in
2945 progress, and it should be cancelled too. */
2946 step_over_finished = finish_step_over (event_child);
fa593d66
PA
2947 }
2948
2949 /* We have all the data we need. Either report the event to GDB, or
2950 resume threads and keep waiting for more. */
2951
2952 /* If we're collecting a fast tracepoint, finish the collection and
2953 move out of the jump pad before delivering a signal. See
2954 linux_stabilize_threads. */
2955
2956 if (WIFSTOPPED (w)
2957 && WSTOPSIG (w) != SIGTRAP
2958 && supports_fast_tracepoints ()
58b4daa5 2959 && agent_loaded_p ())
fa593d66
PA
2960 {
2961 if (debug_threads)
87ce2a04
DE
2962 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2963 "to defer or adjust it.\n",
0bfdf32f 2964 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
2965
2966 /* Allow debugging the jump pad itself. */
0bfdf32f 2967 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
2968 && maybe_move_out_of_jump_pad (event_child, &w))
2969 {
2970 enqueue_one_deferred_signal (event_child, &w);
2971
2972 if (debug_threads)
87ce2a04 2973 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 2974 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
2975
2976 linux_resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
2977
2978 return ignore_event (ourstatus);
fa593d66
PA
2979 }
2980 }
219f2f23 2981
fa593d66
PA
2982 if (event_child->collecting_fast_tracepoint)
2983 {
2984 if (debug_threads)
87ce2a04
DE
2985 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2986 "Check if we're already there.\n",
0bfdf32f 2987 lwpid_of (current_thread),
87ce2a04 2988 event_child->collecting_fast_tracepoint);
fa593d66
PA
2989
2990 trace_event = 1;
2991
2992 event_child->collecting_fast_tracepoint
2993 = linux_fast_tracepoint_collecting (event_child, NULL);
2994
2995 if (event_child->collecting_fast_tracepoint != 1)
2996 {
2997 /* No longer need this breakpoint. */
2998 if (event_child->exit_jump_pad_bkpt != NULL)
2999 {
3000 if (debug_threads)
87ce2a04
DE
3001 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3002 "stopping all threads momentarily.\n");
fa593d66
PA
3003
3004 /* Other running threads could hit this breakpoint.
3005 We don't handle moribund locations like GDB does,
3006 instead we always pause all threads when removing
3007 breakpoints, so that any step-over or
3008 decr_pc_after_break adjustment is always taken
3009 care of while the breakpoint is still
3010 inserted. */
3011 stop_all_lwps (1, event_child);
fa593d66
PA
3012
3013 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3014 event_child->exit_jump_pad_bkpt = NULL;
3015
3016 unstop_all_lwps (1, event_child);
3017
3018 gdb_assert (event_child->suspended >= 0);
3019 }
3020 }
3021
3022 if (event_child->collecting_fast_tracepoint == 0)
3023 {
3024 if (debug_threads)
87ce2a04
DE
3025 debug_printf ("fast tracepoint finished "
3026 "collecting successfully.\n");
fa593d66
PA
3027
3028 /* We may have a deferred signal to report. */
3029 if (dequeue_one_deferred_signal (event_child, &w))
3030 {
3031 if (debug_threads)
87ce2a04 3032 debug_printf ("dequeued one signal.\n");
fa593d66 3033 }
3c11dd79 3034 else
fa593d66 3035 {
3c11dd79 3036 if (debug_threads)
87ce2a04 3037 debug_printf ("no deferred signals.\n");
fa593d66
PA
3038
3039 if (stabilizing_threads)
3040 {
3041 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 3042 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
3043
3044 if (debug_threads)
3045 {
3046 debug_printf ("linux_wait_1 ret = %s, stopped "
3047 "while stabilizing threads\n",
0bfdf32f 3048 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
3049 debug_exit ();
3050 }
3051
0bfdf32f 3052 return ptid_of (current_thread);
fa593d66
PA
3053 }
3054 }
3055 }
6bf5e0ba
PA
3056 }
3057
e471f25b
PA
3058 /* Check whether GDB would be interested in this event. */
3059
3060 /* If GDB is not interested in this signal, don't stop other
3061 threads, and don't report it to GDB. Just resume the inferior
3062 right away. We do this for threading-related signals as well as
3063 any that GDB specifically requested we ignore. But never ignore
3064 SIGSTOP if we sent it ourselves, and do not ignore signals when
3065 stepping - they may require special handling to skip the signal
c9587f88
AT
3066 handler. Also never ignore signals that could be caused by a
3067 breakpoint. */
e471f25b
PA
3068 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3069 thread library? */
3070 if (WIFSTOPPED (w)
0bfdf32f 3071 && current_thread->last_resume_kind != resume_step
e471f25b 3072 && (
1a981360 3073#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3074 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3075 && (WSTOPSIG (w) == __SIGRTMIN
3076 || WSTOPSIG (w) == __SIGRTMIN + 1))
3077 ||
3078#endif
2ea28649 3079 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3080 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3081 && current_thread->last_resume_kind == resume_stop)
3082 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3083 {
3084 siginfo_t info, *info_p;
3085
3086 if (debug_threads)
87ce2a04 3087 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 3088 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3089
0bfdf32f 3090 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3091 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3092 info_p = &info;
3093 else
3094 info_p = NULL;
3095 linux_resume_one_lwp (event_child, event_child->stepping,
3096 WSTOPSIG (w), info_p);
582511be 3097 return ignore_event (ourstatus);
e471f25b
PA
3098 }
3099
c2d6af84
PA
3100 /* Note that all addresses are always "out of the step range" when
3101 there's no range to begin with. */
3102 in_step_range = lwp_in_step_range (event_child);
3103
3104 /* If GDB wanted this thread to single step, and the thread is out
3105 of the step range, we always want to report the SIGTRAP, and let
3106 GDB handle it. Watchpoints should always be reported. So should
3107 signals we can't explain. A SIGTRAP we can't explain could be a
3108 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3109 do, we're be able to handle GDB breakpoints on top of internal
3110 breakpoints, by handling the internal breakpoint and still
3111 reporting the event to GDB. If we don't, we're out of luck, GDB
3112 won't see the breakpoint hit. */
6bf5e0ba 3113 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3114 || (current_thread->last_resume_kind == resume_step
c2d6af84 3115 && !in_step_range)
15c66dd6 3116 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
c2d6af84 3117 || (!step_over_finished && !in_step_range
493e2a69 3118 && !bp_explains_trap && !trace_event)
9f3a5c85 3119 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3120 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3121 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
00db26fa 3122 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3123
3124 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3125
3126 /* We found no reason GDB would want us to stop. We either hit one
3127 of our own breakpoints, or finished an internal step GDB
3128 shouldn't know about. */
3129 if (!report_to_gdb)
3130 {
3131 if (debug_threads)
3132 {
3133 if (bp_explains_trap)
87ce2a04 3134 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3135 if (step_over_finished)
87ce2a04 3136 debug_printf ("Step-over finished.\n");
219f2f23 3137 if (trace_event)
87ce2a04 3138 debug_printf ("Tracepoint event.\n");
c2d6af84 3139 if (lwp_in_step_range (event_child))
87ce2a04
DE
3140 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3141 paddress (event_child->stop_pc),
3142 paddress (event_child->step_range_start),
3143 paddress (event_child->step_range_end));
6bf5e0ba
PA
3144 }
3145
3146 /* We're not reporting this breakpoint to GDB, so apply the
3147 decr_pc_after_break adjustment to the inferior's regcache
3148 ourselves. */
3149
3150 if (the_low_target.set_pc != NULL)
3151 {
3152 struct regcache *regcache
0bfdf32f 3153 = get_thread_regcache (current_thread, 1);
6bf5e0ba
PA
3154 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3155 }
3156
7984d532
PA
3157 /* We may have finished stepping over a breakpoint. If so,
3158 we've stopped and suspended all LWPs momentarily except the
3159 stepping one. This is where we resume them all again. We're
3160 going to keep waiting, so use proceed, which handles stepping
3161 over the next breakpoint. */
6bf5e0ba 3162 if (debug_threads)
87ce2a04 3163 debug_printf ("proceeding all threads.\n");
7984d532
PA
3164
3165 if (step_over_finished)
3166 unsuspend_all_lwps (event_child);
3167
6bf5e0ba 3168 proceed_all_lwps ();
582511be 3169 return ignore_event (ourstatus);
6bf5e0ba
PA
3170 }
3171
3172 if (debug_threads)
3173 {
00db26fa 3174 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
ad071a30
PA
3175 {
3176 char *str;
3177
3178 str = target_waitstatus_to_string (&event_child->waitstatus);
3179 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3180 lwpid_of (get_lwp_thread (event_child)), str);
3181 xfree (str);
3182 }
0bfdf32f 3183 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3184 {
3185 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3186 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3187 else if (!lwp_in_step_range (event_child))
87ce2a04 3188 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3189 }
15c66dd6 3190 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3191 debug_printf ("Stopped by watchpoint.\n");
582511be 3192 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3193 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3194 if (debug_threads)
87ce2a04 3195 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3196 }
3197
3198 /* Alright, we're going to report a stop. */
3199
582511be 3200 if (!stabilizing_threads)
6bf5e0ba
PA
3201 {
3202 /* In all-stop, stop all threads. */
582511be
PA
3203 if (!non_stop)
3204 stop_all_lwps (0, NULL);
6bf5e0ba
PA
3205
3206 /* If we're not waiting for a specific LWP, choose an event LWP
3207 from among those that have had events. Giving equal priority
3208 to all LWPs that have had events helps prevent
3209 starvation. */
3210 if (ptid_equal (ptid, minus_one_ptid))
3211 {
3212 event_child->status_pending_p = 1;
3213 event_child->status_pending = w;
3214
3215 select_event_lwp (&event_child);
3216
0bfdf32f
GB
3217 /* current_thread and event_child must stay in sync. */
3218 current_thread = get_lwp_thread (event_child);
ee1e2d4f 3219
6bf5e0ba
PA
3220 event_child->status_pending_p = 0;
3221 w = event_child->status_pending;
3222 }
3223
c03e6ccc 3224 if (step_over_finished)
582511be
PA
3225 {
3226 if (!non_stop)
3227 {
3228 /* If we were doing a step-over, all other threads but
3229 the stepping one had been paused in start_step_over,
3230 with their suspend counts incremented. We don't want
3231 to do a full unstop/unpause, because we're in
3232 all-stop mode (so we want threads stopped), but we
3233 still need to unsuspend the other threads, to
3234 decrement their `suspended' count back. */
3235 unsuspend_all_lwps (event_child);
3236 }
3237 else
3238 {
3239 /* If we just finished a step-over, then all threads had
3240 been momentarily paused. In all-stop, that's fine,
3241 we want threads stopped by now anyway. In non-stop,
3242 we need to re-resume threads that GDB wanted to be
3243 running. */
3244 unstop_all_lwps (1, event_child);
3245 }
3246 }
c03e6ccc 3247
fa593d66 3248 /* Stabilize threads (move out of jump pads). */
582511be
PA
3249 if (!non_stop)
3250 stabilize_threads ();
6bf5e0ba
PA
3251 }
3252 else
3253 {
3254 /* If we just finished a step-over, then all threads had been
3255 momentarily paused. In all-stop, that's fine, we want
3256 threads stopped by now anyway. In non-stop, we need to
3257 re-resume threads that GDB wanted to be running. */
3258 if (step_over_finished)
7984d532 3259 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3260 }
3261
00db26fa 3262 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
de0d863e 3263 {
00db26fa
PA
3264 /* If the reported event is an exit, fork, vfork or exec, let
3265 GDB know. */
3266 *ourstatus = event_child->waitstatus;
de0d863e
DB
3267 /* Clear the event lwp's waitstatus since we handled it already. */
3268 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3269 }
3270 else
3271 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3272
582511be 3273 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3274 it was a software breakpoint, and the client doesn't know we can
3275 adjust the breakpoint ourselves. */
3276 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3277 && !swbreak_feature)
582511be
PA
3278 {
3279 int decr_pc = the_low_target.decr_pc_after_break;
3280
3281 if (decr_pc != 0)
3282 {
3283 struct regcache *regcache
3284 = get_thread_regcache (current_thread, 1);
3285 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3286 }
3287 }
3288
0bfdf32f 3289 if (current_thread->last_resume_kind == resume_stop
8336d594 3290 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3291 {
3292 /* A thread that has been requested to stop by GDB with vCont;t,
3293 and it stopped cleanly, so report as SIG0. The use of
3294 SIGSTOP is an implementation detail. */
a493e3e2 3295 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3296 }
0bfdf32f 3297 else if (current_thread->last_resume_kind == resume_stop
8336d594 3298 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3299 {
3300 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3301 but, it stopped for other reasons. */
2ea28649 3302 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85 3303 }
de0d863e 3304 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
bd99dc85 3305 {
2ea28649 3306 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3307 }
3308
d50171e4
PA
3309 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3310
bd99dc85 3311 if (debug_threads)
87ce2a04
DE
3312 {
3313 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
0bfdf32f 3314 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3315 ourstatus->kind, ourstatus->value.sig);
3316 debug_exit ();
3317 }
bd99dc85 3318
0bfdf32f 3319 return ptid_of (current_thread);
bd99dc85
PA
3320}
3321
3322/* Get rid of any pending event in the pipe. */
3323static void
3324async_file_flush (void)
3325{
3326 int ret;
3327 char buf;
3328
3329 do
3330 ret = read (linux_event_pipe[0], &buf, 1);
3331 while (ret >= 0 || (ret == -1 && errno == EINTR));
3332}
3333
3334/* Put something in the pipe, so the event loop wakes up. */
3335static void
3336async_file_mark (void)
3337{
3338 int ret;
3339
3340 async_file_flush ();
3341
3342 do
3343 ret = write (linux_event_pipe[1], "+", 1);
3344 while (ret == 0 || (ret == -1 && errno == EINTR));
3345
3346 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3347 be awakened anyway. */
3348}
3349
95954743
PA
3350static ptid_t
3351linux_wait (ptid_t ptid,
3352 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 3353{
95954743 3354 ptid_t event_ptid;
bd99dc85 3355
bd99dc85
PA
3356 /* Flush the async file first. */
3357 if (target_is_async_p ())
3358 async_file_flush ();
3359
582511be
PA
3360 do
3361 {
3362 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3363 }
3364 while ((target_options & TARGET_WNOHANG) == 0
3365 && ptid_equal (event_ptid, null_ptid)
3366 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3367
3368 /* If at least one stop was reported, there may be more. A single
3369 SIGCHLD can signal more than one child stop. */
3370 if (target_is_async_p ()
3371 && (target_options & TARGET_WNOHANG) != 0
95954743 3372 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
3373 async_file_mark ();
3374
3375 return event_ptid;
da6d8c04
DJ
3376}
3377
c5f62d5f 3378/* Send a signal to an LWP. */
fd500816
DJ
3379
3380static int
a1928bad 3381kill_lwp (unsigned long lwpid, int signo)
fd500816 3382{
c5f62d5f
DE
3383 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3384 fails, then we are not using nptl threads and we should be using kill. */
fd500816 3385
c5f62d5f
DE
3386#ifdef __NR_tkill
3387 {
3388 static int tkill_failed;
fd500816 3389
c5f62d5f
DE
3390 if (!tkill_failed)
3391 {
3392 int ret;
3393
3394 errno = 0;
3395 ret = syscall (__NR_tkill, lwpid, signo);
3396 if (errno != ENOSYS)
3397 return ret;
3398 tkill_failed = 1;
3399 }
3400 }
fd500816
DJ
3401#endif
3402
3403 return kill (lwpid, signo);
3404}
3405
964e4306
PA
3406void
3407linux_stop_lwp (struct lwp_info *lwp)
3408{
3409 send_sigstop (lwp);
3410}
3411
0d62e5e8 3412static void
02fc4de7 3413send_sigstop (struct lwp_info *lwp)
0d62e5e8 3414{
bd99dc85 3415 int pid;
0d62e5e8 3416
d86d4aaf 3417 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3418
0d62e5e8
DJ
3419 /* If we already have a pending stop signal for this process, don't
3420 send another. */
54a0b537 3421 if (lwp->stop_expected)
0d62e5e8 3422 {
ae13219e 3423 if (debug_threads)
87ce2a04 3424 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3425
0d62e5e8
DJ
3426 return;
3427 }
3428
3429 if (debug_threads)
87ce2a04 3430 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3431
d50171e4 3432 lwp->stop_expected = 1;
bd99dc85 3433 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3434}
3435
7984d532
PA
3436static int
3437send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7 3438{
d86d4aaf
DE
3439 struct thread_info *thread = (struct thread_info *) entry;
3440 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3441
7984d532
PA
3442 /* Ignore EXCEPT. */
3443 if (lwp == except)
3444 return 0;
3445
02fc4de7 3446 if (lwp->stopped)
7984d532 3447 return 0;
02fc4de7
PA
3448
3449 send_sigstop (lwp);
7984d532
PA
3450 return 0;
3451}
3452
3453/* Increment the suspend count of an LWP, and stop it, if not stopped
3454 yet. */
3455static int
3456suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3457 void *except)
3458{
d86d4aaf
DE
3459 struct thread_info *thread = (struct thread_info *) entry;
3460 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3461
3462 /* Ignore EXCEPT. */
3463 if (lwp == except)
3464 return 0;
3465
3466 lwp->suspended++;
3467
3468 return send_sigstop_callback (entry, except);
02fc4de7
PA
3469}
3470
95954743
PA
3471static void
3472mark_lwp_dead (struct lwp_info *lwp, int wstat)
3473{
95954743
PA
3474 /* Store the exit status for later. */
3475 lwp->status_pending_p = 1;
3476 lwp->status_pending = wstat;
3477
00db26fa
PA
3478 /* Store in waitstatus as well, as there's nothing else to process
3479 for this event. */
3480 if (WIFEXITED (wstat))
3481 {
3482 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3483 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3484 }
3485 else if (WIFSIGNALED (wstat))
3486 {
3487 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3488 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3489 }
3490
95954743
PA
3491 /* Prevent trying to stop it. */
3492 lwp->stopped = 1;
3493
3494 /* No further stops are expected from a dead lwp. */
3495 lwp->stop_expected = 0;
3496}
3497
00db26fa
PA
3498/* Return true if LWP has exited already, and has a pending exit event
3499 to report to GDB. */
3500
3501static int
3502lwp_is_marked_dead (struct lwp_info *lwp)
3503{
3504 return (lwp->status_pending_p
3505 && (WIFEXITED (lwp->status_pending)
3506 || WIFSIGNALED (lwp->status_pending)));
3507}
3508
fa96cb38
PA
3509/* Wait for all children to stop for the SIGSTOPs we just queued. */
3510
0d62e5e8 3511static void
fa96cb38 3512wait_for_sigstop (void)
0d62e5e8 3513{
0bfdf32f 3514 struct thread_info *saved_thread;
95954743 3515 ptid_t saved_tid;
fa96cb38
PA
3516 int wstat;
3517 int ret;
0d62e5e8 3518
0bfdf32f
GB
3519 saved_thread = current_thread;
3520 if (saved_thread != NULL)
3521 saved_tid = saved_thread->entry.id;
bd99dc85 3522 else
95954743 3523 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3524
d50171e4 3525 if (debug_threads)
fa96cb38 3526 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3527
fa96cb38
PA
3528 /* Passing NULL_PTID as filter indicates we want all events to be
3529 left pending. Eventually this returns when there are no
3530 unwaited-for children left. */
3531 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3532 &wstat, __WALL);
3533 gdb_assert (ret == -1);
0d62e5e8 3534
0bfdf32f
GB
3535 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3536 current_thread = saved_thread;
0d62e5e8
DJ
3537 else
3538 {
3539 if (debug_threads)
87ce2a04 3540 debug_printf ("Previously current thread died.\n");
0d62e5e8 3541
bd99dc85
PA
3542 if (non_stop)
3543 {
3544 /* We can't change the current inferior behind GDB's back,
3545 otherwise, a subsequent command may apply to the wrong
3546 process. */
0bfdf32f 3547 current_thread = NULL;
bd99dc85
PA
3548 }
3549 else
3550 {
3551 /* Set a valid thread as current. */
0bfdf32f 3552 set_desired_thread (0);
bd99dc85 3553 }
0d62e5e8
DJ
3554 }
3555}
3556
fa593d66
PA
3557/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3558 move it out, because we need to report the stop event to GDB. For
3559 example, if the user puts a breakpoint in the jump pad, it's
3560 because she wants to debug it. */
3561
3562static int
3563stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3564{
d86d4aaf
DE
3565 struct thread_info *thread = (struct thread_info *) entry;
3566 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3567
3568 gdb_assert (lwp->suspended == 0);
3569 gdb_assert (lwp->stopped);
3570
3571 /* Allow debugging the jump pad, gdb_collect, etc.. */
3572 return (supports_fast_tracepoints ()
58b4daa5 3573 && agent_loaded_p ()
fa593d66 3574 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3575 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3576 || thread->last_resume_kind == resume_step)
3577 && linux_fast_tracepoint_collecting (lwp, NULL));
3578}
3579
3580static void
3581move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3582{
d86d4aaf
DE
3583 struct thread_info *thread = (struct thread_info *) entry;
3584 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3585 int *wstat;
3586
3587 gdb_assert (lwp->suspended == 0);
3588 gdb_assert (lwp->stopped);
3589
3590 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3591
3592 /* Allow debugging the jump pad, gdb_collect, etc. */
3593 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3594 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3595 && thread->last_resume_kind != resume_step
3596 && maybe_move_out_of_jump_pad (lwp, wstat))
3597 {
3598 if (debug_threads)
87ce2a04 3599 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 3600 lwpid_of (thread));
fa593d66
PA
3601
3602 if (wstat)
3603 {
3604 lwp->status_pending_p = 0;
3605 enqueue_one_deferred_signal (lwp, wstat);
3606
3607 if (debug_threads)
87ce2a04
DE
3608 debug_printf ("Signal %d for LWP %ld deferred "
3609 "(in jump pad)\n",
d86d4aaf 3610 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3611 }
3612
3613 linux_resume_one_lwp (lwp, 0, 0, NULL);
3614 }
3615 else
3616 lwp->suspended++;
3617}
3618
3619static int
3620lwp_running (struct inferior_list_entry *entry, void *data)
3621{
d86d4aaf
DE
3622 struct thread_info *thread = (struct thread_info *) entry;
3623 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3624
00db26fa 3625 if (lwp_is_marked_dead (lwp))
fa593d66
PA
3626 return 0;
3627 if (lwp->stopped)
3628 return 0;
3629 return 1;
3630}
3631
7984d532
PA
3632/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3633 If SUSPEND, then also increase the suspend count of every LWP,
3634 except EXCEPT. */
3635
0d62e5e8 3636static void
7984d532 3637stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8 3638{
bde24c0a
PA
3639 /* Should not be called recursively. */
3640 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3641
87ce2a04
DE
3642 if (debug_threads)
3643 {
3644 debug_enter ();
3645 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3646 suspend ? "stop-and-suspend" : "stop",
3647 except != NULL
d86d4aaf 3648 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
3649 : "none");
3650 }
3651
bde24c0a
PA
3652 stopping_threads = (suspend
3653 ? STOPPING_AND_SUSPENDING_THREADS
3654 : STOPPING_THREADS);
7984d532
PA
3655
3656 if (suspend)
d86d4aaf 3657 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
7984d532 3658 else
d86d4aaf 3659 find_inferior (&all_threads, send_sigstop_callback, except);
fa96cb38 3660 wait_for_sigstop ();
bde24c0a 3661 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
3662
3663 if (debug_threads)
3664 {
3665 debug_printf ("stop_all_lwps done, setting stopping_threads "
3666 "back to !stopping\n");
3667 debug_exit ();
3668 }
0d62e5e8
DJ
3669}
3670
23f238d3
PA
3671/* Resume execution of LWP. If STEP is nonzero, single-step it. If
3672 SIGNAL is nonzero, give it that signal. */
da6d8c04 3673
ce3a066d 3674static void
23f238d3
PA
3675linux_resume_one_lwp_throw (struct lwp_info *lwp,
3676 int step, int signal, siginfo_t *info)
da6d8c04 3677{
d86d4aaf 3678 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 3679 struct thread_info *saved_thread;
fa593d66 3680 int fast_tp_collecting;
c06cbd92
YQ
3681 struct process_info *proc = get_thread_process (thread);
3682
3683 /* Note that target description may not be initialised
3684 (proc->tdesc == NULL) at this point because the program hasn't
3685 stopped at the first instruction yet. It means GDBserver skips
3686 the extra traps from the wrapper program (see option --wrapper).
3687 Code in this function that requires register access should be
3688 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 3689
54a0b537 3690 if (lwp->stopped == 0)
0d62e5e8
DJ
3691 return;
3692
fa593d66
PA
3693 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3694
3695 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3696
219f2f23
PA
3697 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3698 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 3699 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
3700 {
3701 /* Collecting 'while-stepping' actions doesn't make sense
3702 anymore. */
d86d4aaf 3703 release_while_stepping_state_list (thread);
219f2f23
PA
3704 }
3705
0d62e5e8
DJ
3706 /* If we have pending signals or status, and a new signal, enqueue the
3707 signal. Also enqueue the signal if we are waiting to reinsert a
3708 breakpoint; it will be picked up again below. */
3709 if (signal != 0
fa593d66
PA
3710 && (lwp->status_pending_p
3711 || lwp->pending_signals != NULL
3712 || lwp->bp_reinsert != 0
3713 || fast_tp_collecting))
0d62e5e8
DJ
3714 {
3715 struct pending_signals *p_sig;
bca929d3 3716 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 3717 p_sig->prev = lwp->pending_signals;
0d62e5e8 3718 p_sig->signal = signal;
32ca6d61
DJ
3719 if (info == NULL)
3720 memset (&p_sig->info, 0, sizeof (siginfo_t));
3721 else
3722 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 3723 lwp->pending_signals = p_sig;
0d62e5e8
DJ
3724 }
3725
d50171e4
PA
3726 if (lwp->status_pending_p)
3727 {
3728 if (debug_threads)
87ce2a04
DE
3729 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3730 " has pending status\n",
d86d4aaf 3731 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3732 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
3733 return;
3734 }
0d62e5e8 3735
0bfdf32f
GB
3736 saved_thread = current_thread;
3737 current_thread = thread;
0d62e5e8
DJ
3738
3739 if (debug_threads)
87ce2a04 3740 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
d86d4aaf 3741 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3742 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
3743
3744 /* This bit needs some thinking about. If we get a signal that
3745 we must report while a single-step reinsert is still pending,
3746 we often end up resuming the thread. It might be better to
3747 (ew) allow a stack of pending events; then we could be sure that
3748 the reinsert happened right away and not lose any signals.
3749
3750 Making this stack would also shrink the window in which breakpoints are
54a0b537 3751 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
3752 complete correctness, so it won't solve that problem. It may be
3753 worthwhile just to solve this one, however. */
54a0b537 3754 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
3755 {
3756 if (debug_threads)
87ce2a04
DE
3757 debug_printf (" pending reinsert at 0x%s\n",
3758 paddress (lwp->bp_reinsert));
d50171e4 3759
85e00e85 3760 if (can_hardware_single_step ())
d50171e4 3761 {
fa593d66
PA
3762 if (fast_tp_collecting == 0)
3763 {
3764 if (step == 0)
3765 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3766 if (lwp->suspended)
3767 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3768 lwp->suspended);
3769 }
d50171e4
PA
3770
3771 step = 1;
3772 }
0d62e5e8
DJ
3773
3774 /* Postpone any pending signal. It was enqueued above. */
3775 signal = 0;
3776 }
3777
fa593d66
PA
3778 if (fast_tp_collecting == 1)
3779 {
3780 if (debug_threads)
87ce2a04
DE
3781 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3782 " (exit-jump-pad-bkpt)\n",
d86d4aaf 3783 lwpid_of (thread));
fa593d66
PA
3784
3785 /* Postpone any pending signal. It was enqueued above. */
3786 signal = 0;
3787 }
3788 else if (fast_tp_collecting == 2)
3789 {
3790 if (debug_threads)
87ce2a04
DE
3791 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3792 " single-stepping\n",
d86d4aaf 3793 lwpid_of (thread));
fa593d66
PA
3794
3795 if (can_hardware_single_step ())
3796 step = 1;
3797 else
38e08fca
GB
3798 {
3799 internal_error (__FILE__, __LINE__,
3800 "moving out of jump pad single-stepping"
3801 " not implemented on this target");
3802 }
fa593d66
PA
3803
3804 /* Postpone any pending signal. It was enqueued above. */
3805 signal = 0;
3806 }
3807
219f2f23
PA
3808 /* If we have while-stepping actions in this thread set it stepping.
3809 If we have a signal to deliver, it may or may not be set to
3810 SIG_IGN, we don't know. Assume so, and allow collecting
3811 while-stepping into a signal handler. A possible smart thing to
3812 do would be to set an internal breakpoint at the signal return
3813 address, continue, and carry on catching this while-stepping
3814 action only when that breakpoint is hit. A future
3815 enhancement. */
d86d4aaf 3816 if (thread->while_stepping != NULL
219f2f23
PA
3817 && can_hardware_single_step ())
3818 {
3819 if (debug_threads)
87ce2a04 3820 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 3821 lwpid_of (thread));
219f2f23
PA
3822 step = 1;
3823 }
3824
c06cbd92 3825 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
0d62e5e8 3826 {
0bfdf32f 3827 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be
PA
3828
3829 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3830
3831 if (debug_threads)
3832 {
3833 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3834 (long) lwp->stop_pc);
3835 }
0d62e5e8
DJ
3836 }
3837
fa593d66
PA
3838 /* If we have pending signals, consume one unless we are trying to
3839 reinsert a breakpoint or we're trying to finish a fast tracepoint
3840 collect. */
3841 if (lwp->pending_signals != NULL
3842 && lwp->bp_reinsert == 0
3843 && fast_tp_collecting == 0)
0d62e5e8
DJ
3844 {
3845 struct pending_signals **p_sig;
3846
54a0b537 3847 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
3848 while ((*p_sig)->prev != NULL)
3849 p_sig = &(*p_sig)->prev;
3850
3851 signal = (*p_sig)->signal;
32ca6d61 3852 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 3853 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 3854 &(*p_sig)->info);
32ca6d61 3855
0d62e5e8
DJ
3856 free (*p_sig);
3857 *p_sig = NULL;
3858 }
3859
aa5ca48f
DE
3860 if (the_low_target.prepare_to_resume != NULL)
3861 the_low_target.prepare_to_resume (lwp);
3862
d86d4aaf 3863 regcache_invalidate_thread (thread);
da6d8c04 3864 errno = 0;
54a0b537 3865 lwp->stepping = step;
d86d4aaf 3866 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
b8e1b30e 3867 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
3868 /* Coerce to a uintptr_t first to avoid potential gcc warning
3869 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 3870 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 3871
0bfdf32f 3872 current_thread = saved_thread;
da6d8c04 3873 if (errno)
23f238d3
PA
3874 perror_with_name ("resuming thread");
3875
3876 /* Successfully resumed. Clear state that no longer makes sense,
3877 and mark the LWP as running. Must not do this before resuming
3878 otherwise if that fails other code will be confused. E.g., we'd
3879 later try to stop the LWP and hang forever waiting for a stop
3880 status. Note that we must not throw after this is cleared,
3881 otherwise handle_zombie_lwp_error would get confused. */
3882 lwp->stopped = 0;
3883 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3884}
3885
3886/* Called when we try to resume a stopped LWP and that errors out. If
3887 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3888 or about to become), discard the error, clear any pending status
3889 the LWP may have, and return true (we'll collect the exit status
3890 soon enough). Otherwise, return false. */
3891
3892static int
3893check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3894{
3895 struct thread_info *thread = get_lwp_thread (lp);
3896
3897 /* If we get an error after resuming the LWP successfully, we'd
3898 confuse !T state for the LWP being gone. */
3899 gdb_assert (lp->stopped);
3900
3901 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3902 because even if ptrace failed with ESRCH, the tracee may be "not
3903 yet fully dead", but already refusing ptrace requests. In that
3904 case the tracee has 'R (Running)' state for a little bit
3905 (observed in Linux 3.18). See also the note on ESRCH in the
3906 ptrace(2) man page. Instead, check whether the LWP has any state
3907 other than ptrace-stopped. */
3908
3909 /* Don't assume anything if /proc/PID/status can't be read. */
3910 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 3911 {
23f238d3
PA
3912 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3913 lp->status_pending_p = 0;
3914 return 1;
3915 }
3916 return 0;
3917}
3918
3919/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3920 disappears while we try to resume it. */
3221518c 3921
23f238d3
PA
3922static void
3923linux_resume_one_lwp (struct lwp_info *lwp,
3924 int step, int signal, siginfo_t *info)
3925{
3926 TRY
3927 {
3928 linux_resume_one_lwp_throw (lwp, step, signal, info);
3929 }
3930 CATCH (ex, RETURN_MASK_ERROR)
3931 {
3932 if (!check_ptrace_stopped_lwp_gone (lwp))
3933 throw_exception (ex);
3221518c 3934 }
23f238d3 3935 END_CATCH
da6d8c04
DJ
3936}
3937
2bd7c093
PA
3938struct thread_resume_array
3939{
3940 struct thread_resume *resume;
3941 size_t n;
3942};
64386c31 3943
ebcf782c
DE
3944/* This function is called once per thread via find_inferior.
3945 ARG is a pointer to a thread_resume_array struct.
3946 We look up the thread specified by ENTRY in ARG, and mark the thread
3947 with a pointer to the appropriate resume request.
5544ad89
DJ
3948
3949 This algorithm is O(threads * resume elements), but resume elements
3950 is small (and will remain small at least until GDB supports thread
3951 suspension). */
ebcf782c 3952
2bd7c093
PA
3953static int
3954linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 3955{
d86d4aaf
DE
3956 struct thread_info *thread = (struct thread_info *) entry;
3957 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 3958 int ndx;
2bd7c093 3959 struct thread_resume_array *r;
64386c31 3960
2bd7c093 3961 r = arg;
64386c31 3962
2bd7c093 3963 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
3964 {
3965 ptid_t ptid = r->resume[ndx].thread;
3966 if (ptid_equal (ptid, minus_one_ptid)
3967 || ptid_equal (ptid, entry->id)
0c9070b3
YQ
3968 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3969 of PID'. */
d86d4aaf 3970 || (ptid_get_pid (ptid) == pid_of (thread)
0c9070b3
YQ
3971 && (ptid_is_pid (ptid)
3972 || ptid_get_lwp (ptid) == -1)))
95954743 3973 {
d50171e4 3974 if (r->resume[ndx].kind == resume_stop
8336d594 3975 && thread->last_resume_kind == resume_stop)
d50171e4
PA
3976 {
3977 if (debug_threads)
87ce2a04
DE
3978 debug_printf ("already %s LWP %ld at GDB's request\n",
3979 (thread->last_status.kind
3980 == TARGET_WAITKIND_STOPPED)
3981 ? "stopped"
3982 : "stopping",
d86d4aaf 3983 lwpid_of (thread));
d50171e4
PA
3984
3985 continue;
3986 }
3987
95954743 3988 lwp->resume = &r->resume[ndx];
8336d594 3989 thread->last_resume_kind = lwp->resume->kind;
fa593d66 3990
c2d6af84
PA
3991 lwp->step_range_start = lwp->resume->step_range_start;
3992 lwp->step_range_end = lwp->resume->step_range_end;
3993
fa593d66
PA
3994 /* If we had a deferred signal to report, dequeue one now.
3995 This can happen if LWP gets more than one signal while
3996 trying to get out of a jump pad. */
3997 if (lwp->stopped
3998 && !lwp->status_pending_p
3999 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4000 {
4001 lwp->status_pending_p = 1;
4002
4003 if (debug_threads)
87ce2a04
DE
4004 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4005 "leaving status pending.\n",
d86d4aaf
DE
4006 WSTOPSIG (lwp->status_pending),
4007 lwpid_of (thread));
fa593d66
PA
4008 }
4009
95954743
PA
4010 return 0;
4011 }
4012 }
2bd7c093
PA
4013
4014 /* No resume action for this thread. */
4015 lwp->resume = NULL;
64386c31 4016
2bd7c093 4017 return 0;
5544ad89
DJ
4018}
4019
20ad9378
DE
4020/* find_inferior callback for linux_resume.
4021 Set *FLAG_P if this lwp has an interesting status pending. */
5544ad89 4022
bd99dc85
PA
4023static int
4024resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 4025{
d86d4aaf
DE
4026 struct thread_info *thread = (struct thread_info *) entry;
4027 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4028
bd99dc85
PA
4029 /* LWPs which will not be resumed are not interesting, because
4030 we might not wait for them next time through linux_wait. */
2bd7c093 4031 if (lwp->resume == NULL)
bd99dc85 4032 return 0;
64386c31 4033
582511be 4034 if (thread_still_has_status_pending_p (thread))
d50171e4
PA
4035 * (int *) flag_p = 1;
4036
4037 return 0;
4038}
4039
4040/* Return 1 if this lwp that GDB wants running is stopped at an
4041 internal breakpoint that we need to step over. It assumes that any
4042 required STOP_PC adjustment has already been propagated to the
4043 inferior's regcache. */
4044
4045static int
4046need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4047{
d86d4aaf
DE
4048 struct thread_info *thread = (struct thread_info *) entry;
4049 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 4050 struct thread_info *saved_thread;
d50171e4 4051 CORE_ADDR pc;
c06cbd92
YQ
4052 struct process_info *proc = get_thread_process (thread);
4053
4054 /* GDBserver is skipping the extra traps from the wrapper program,
4055 don't have to do step over. */
4056 if (proc->tdesc == NULL)
4057 return 0;
d50171e4
PA
4058
4059 /* LWPs which will not be resumed are not interesting, because we
4060 might not wait for them next time through linux_wait. */
4061
4062 if (!lwp->stopped)
4063 {
4064 if (debug_threads)
87ce2a04 4065 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 4066 lwpid_of (thread));
d50171e4
PA
4067 return 0;
4068 }
4069
8336d594 4070 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
4071 {
4072 if (debug_threads)
87ce2a04
DE
4073 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4074 " stopped\n",
d86d4aaf 4075 lwpid_of (thread));
d50171e4
PA
4076 return 0;
4077 }
4078
7984d532
PA
4079 gdb_assert (lwp->suspended >= 0);
4080
4081 if (lwp->suspended)
4082 {
4083 if (debug_threads)
87ce2a04 4084 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 4085 lwpid_of (thread));
7984d532
PA
4086 return 0;
4087 }
4088
d50171e4
PA
4089 if (!lwp->need_step_over)
4090 {
4091 if (debug_threads)
d86d4aaf 4092 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
d50171e4 4093 }
5544ad89 4094
bd99dc85 4095 if (lwp->status_pending_p)
d50171e4
PA
4096 {
4097 if (debug_threads)
87ce2a04
DE
4098 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4099 " status.\n",
d86d4aaf 4100 lwpid_of (thread));
d50171e4
PA
4101 return 0;
4102 }
4103
4104 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4105 or we have. */
4106 pc = get_pc (lwp);
4107
4108 /* If the PC has changed since we stopped, then don't do anything,
4109 and let the breakpoint/tracepoint be hit. This happens if, for
4110 instance, GDB handled the decr_pc_after_break subtraction itself,
4111 GDB is OOL stepping this thread, or the user has issued a "jump"
4112 command, or poked thread's registers herself. */
4113 if (pc != lwp->stop_pc)
4114 {
4115 if (debug_threads)
87ce2a04
DE
4116 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4117 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
4118 lwpid_of (thread),
4119 paddress (lwp->stop_pc), paddress (pc));
d50171e4
PA
4120
4121 lwp->need_step_over = 0;
4122 return 0;
4123 }
4124
0bfdf32f
GB
4125 saved_thread = current_thread;
4126 current_thread = thread;
d50171e4 4127
8b07ae33 4128 /* We can only step over breakpoints we know about. */
fa593d66 4129 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4130 {
8b07ae33 4131 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4132 though. If the condition is being evaluated on the target's side
4133 and it evaluate to false, step over this breakpoint as well. */
4134 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4135 && gdb_condition_true_at_breakpoint (pc)
4136 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
4137 {
4138 if (debug_threads)
87ce2a04
DE
4139 debug_printf ("Need step over [LWP %ld]? yes, but found"
4140 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 4141 lwpid_of (thread), paddress (pc));
d50171e4 4142
0bfdf32f 4143 current_thread = saved_thread;
8b07ae33
PA
4144 return 0;
4145 }
4146 else
4147 {
4148 if (debug_threads)
87ce2a04
DE
4149 debug_printf ("Need step over [LWP %ld]? yes, "
4150 "found breakpoint at 0x%s\n",
d86d4aaf 4151 lwpid_of (thread), paddress (pc));
d50171e4 4152
8b07ae33
PA
4153 /* We've found an lwp that needs stepping over --- return 1 so
4154 that find_inferior stops looking. */
0bfdf32f 4155 current_thread = saved_thread;
8b07ae33
PA
4156
4157 /* If the step over is cancelled, this is set again. */
4158 lwp->need_step_over = 0;
4159 return 1;
4160 }
d50171e4
PA
4161 }
4162
0bfdf32f 4163 current_thread = saved_thread;
d50171e4
PA
4164
4165 if (debug_threads)
87ce2a04
DE
4166 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4167 " at 0x%s\n",
d86d4aaf 4168 lwpid_of (thread), paddress (pc));
c6ecbae5 4169
bd99dc85 4170 return 0;
5544ad89
DJ
4171}
4172
d50171e4
PA
4173/* Start a step-over operation on LWP. When LWP stopped at a
4174 breakpoint, to make progress, we need to remove the breakpoint out
4175 of the way. If we let other threads run while we do that, they may
4176 pass by the breakpoint location and miss hitting it. To avoid
4177 that, a step-over momentarily stops all threads while LWP is
4178 single-stepped while the breakpoint is temporarily uninserted from
4179 the inferior. When the single-step finishes, we reinsert the
4180 breakpoint, and let all threads that are supposed to be running,
4181 run again.
4182
4183 On targets that don't support hardware single-step, we don't
4184 currently support full software single-stepping. Instead, we only
4185 support stepping over the thread event breakpoint, by asking the
4186 low target where to place a reinsert breakpoint. Since this
4187 routine assumes the breakpoint being stepped over is a thread event
4188 breakpoint, it usually assumes the return address of the current
4189 function is a good enough place to set the reinsert breakpoint. */
4190
4191static int
4192start_step_over (struct lwp_info *lwp)
4193{
d86d4aaf 4194 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4195 struct thread_info *saved_thread;
d50171e4
PA
4196 CORE_ADDR pc;
4197 int step;
4198
4199 if (debug_threads)
87ce2a04 4200 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 4201 lwpid_of (thread));
d50171e4 4202
7984d532
PA
4203 stop_all_lwps (1, lwp);
4204 gdb_assert (lwp->suspended == 0);
d50171e4
PA
4205
4206 if (debug_threads)
87ce2a04 4207 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
4208
4209 /* Note, we should always reach here with an already adjusted PC,
4210 either by GDB (if we're resuming due to GDB's request), or by our
4211 caller, if we just finished handling an internal breakpoint GDB
4212 shouldn't care about. */
4213 pc = get_pc (lwp);
4214
0bfdf32f
GB
4215 saved_thread = current_thread;
4216 current_thread = thread;
d50171e4
PA
4217
4218 lwp->bp_reinsert = pc;
4219 uninsert_breakpoints_at (pc);
fa593d66 4220 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
4221
4222 if (can_hardware_single_step ())
4223 {
4224 step = 1;
4225 }
4226 else
4227 {
4228 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4229 set_reinsert_breakpoint (raddr);
4230 step = 0;
4231 }
4232
0bfdf32f 4233 current_thread = saved_thread;
d50171e4
PA
4234
4235 linux_resume_one_lwp (lwp, step, 0, NULL);
4236
4237 /* Require next event from this LWP. */
d86d4aaf 4238 step_over_bkpt = thread->entry.id;
d50171e4
PA
4239 return 1;
4240}
4241
4242/* Finish a step-over. Reinsert the breakpoint we had uninserted in
4243 start_step_over, if still there, and delete any reinsert
4244 breakpoints we've set, on non hardware single-step targets. */
4245
4246static int
4247finish_step_over (struct lwp_info *lwp)
4248{
4249 if (lwp->bp_reinsert != 0)
4250 {
4251 if (debug_threads)
87ce2a04 4252 debug_printf ("Finished step over.\n");
d50171e4
PA
4253
4254 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4255 may be no breakpoint to reinsert there by now. */
4256 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4257 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4258
4259 lwp->bp_reinsert = 0;
4260
4261 /* Delete any software-single-step reinsert breakpoints. No
4262 longer needed. We don't have to worry about other threads
4263 hitting this trap, and later not being able to explain it,
4264 because we were stepping over a breakpoint, and we hold all
4265 threads but LWP stopped while doing that. */
4266 if (!can_hardware_single_step ())
4267 delete_reinsert_breakpoints ();
4268
4269 step_over_bkpt = null_ptid;
4270 return 1;
4271 }
4272 else
4273 return 0;
4274}
4275
5544ad89
DJ
4276/* This function is called once per thread. We check the thread's resume
4277 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 4278 stopped; and what signal, if any, it should be sent.
5544ad89 4279
bd99dc85
PA
4280 For threads which we aren't explicitly told otherwise, we preserve
4281 the stepping flag; this is used for stepping over gdbserver-placed
4282 breakpoints.
4283
4284 If pending_flags was set in any thread, we queue any needed
4285 signals, since we won't actually resume. We already have a pending
4286 event to report, so we don't need to preserve any step requests;
4287 they should be re-issued if necessary. */
4288
4289static int
4290linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 4291{
d86d4aaf
DE
4292 struct thread_info *thread = (struct thread_info *) entry;
4293 struct lwp_info *lwp = get_thread_lwp (thread);
bd99dc85 4294 int step;
d50171e4
PA
4295 int leave_all_stopped = * (int *) arg;
4296 int leave_pending;
5544ad89 4297
2bd7c093 4298 if (lwp->resume == NULL)
bd99dc85 4299 return 0;
5544ad89 4300
bd99dc85 4301 if (lwp->resume->kind == resume_stop)
5544ad89 4302 {
bd99dc85 4303 if (debug_threads)
d86d4aaf 4304 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4305
4306 if (!lwp->stopped)
4307 {
4308 if (debug_threads)
d86d4aaf 4309 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4310
d50171e4
PA
4311 /* Stop the thread, and wait for the event asynchronously,
4312 through the event loop. */
02fc4de7 4313 send_sigstop (lwp);
bd99dc85
PA
4314 }
4315 else
4316 {
4317 if (debug_threads)
87ce2a04 4318 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4319 lwpid_of (thread));
d50171e4
PA
4320
4321 /* The LWP may have been stopped in an internal event that
4322 was not meant to be notified back to GDB (e.g., gdbserver
4323 breakpoint), so we should be reporting a stop event in
4324 this case too. */
4325
4326 /* If the thread already has a pending SIGSTOP, this is a
4327 no-op. Otherwise, something later will presumably resume
4328 the thread and this will cause it to cancel any pending
4329 operation, due to last_resume_kind == resume_stop. If
4330 the thread already has a pending status to report, we
4331 will still report it the next time we wait - see
4332 status_pending_p_callback. */
1a981360
PA
4333
4334 /* If we already have a pending signal to report, then
4335 there's no need to queue a SIGSTOP, as this means we're
4336 midway through moving the LWP out of the jumppad, and we
4337 will report the pending signal as soon as that is
4338 finished. */
4339 if (lwp->pending_signals_to_report == NULL)
4340 send_sigstop (lwp);
bd99dc85 4341 }
32ca6d61 4342
bd99dc85
PA
4343 /* For stop requests, we're done. */
4344 lwp->resume = NULL;
fc7238bb 4345 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4346 return 0;
5544ad89
DJ
4347 }
4348
bd99dc85
PA
4349 /* If this thread which is about to be resumed has a pending status,
4350 then don't resume any threads - we can just report the pending
4351 status. Make sure to queue any signals that would otherwise be
4352 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
4353 thread has a pending status. If there's a thread that needs the
4354 step-over-breakpoint dance, then don't resume any other thread
4355 but that particular one. */
4356 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 4357
d50171e4 4358 if (!leave_pending)
bd99dc85
PA
4359 {
4360 if (debug_threads)
d86d4aaf 4361 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4362
d50171e4 4363 step = (lwp->resume->kind == resume_step);
2acc282a 4364 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
4365 }
4366 else
4367 {
4368 if (debug_threads)
d86d4aaf 4369 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5544ad89 4370
bd99dc85
PA
4371 /* If we have a new signal, enqueue the signal. */
4372 if (lwp->resume->sig != 0)
4373 {
4374 struct pending_signals *p_sig;
4375 p_sig = xmalloc (sizeof (*p_sig));
4376 p_sig->prev = lwp->pending_signals;
4377 p_sig->signal = lwp->resume->sig;
4378 memset (&p_sig->info, 0, sizeof (siginfo_t));
4379
4380 /* If this is the same signal we were previously stopped by,
4381 make sure to queue its siginfo. We can ignore the return
4382 value of ptrace; if it fails, we'll skip
4383 PTRACE_SETSIGINFO. */
4384 if (WIFSTOPPED (lwp->last_status)
4385 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
d86d4aaf 4386 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4387 &p_sig->info);
bd99dc85
PA
4388
4389 lwp->pending_signals = p_sig;
4390 }
4391 }
5544ad89 4392
fc7238bb 4393 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4394 lwp->resume = NULL;
5544ad89 4395 return 0;
0d62e5e8
DJ
4396}
4397
4398static void
2bd7c093 4399linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 4400{
2bd7c093 4401 struct thread_resume_array array = { resume_info, n };
d86d4aaf 4402 struct thread_info *need_step_over = NULL;
d50171e4
PA
4403 int any_pending;
4404 int leave_all_stopped;
c6ecbae5 4405
87ce2a04
DE
4406 if (debug_threads)
4407 {
4408 debug_enter ();
4409 debug_printf ("linux_resume:\n");
4410 }
4411
2bd7c093 4412 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 4413
d50171e4
PA
4414 /* If there is a thread which would otherwise be resumed, which has
4415 a pending status, then don't resume any threads - we can just
4416 report the pending status. Make sure to queue any signals that
4417 would otherwise be sent. In non-stop mode, we'll apply this
4418 logic to each thread individually. We consume all pending events
4419 before considering to start a step-over (in all-stop). */
4420 any_pending = 0;
bd99dc85 4421 if (!non_stop)
d86d4aaf 4422 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
d50171e4
PA
4423
4424 /* If there is a thread which would otherwise be resumed, which is
4425 stopped at a breakpoint that needs stepping over, then don't
4426 resume any threads - have it step over the breakpoint with all
4427 other threads stopped, then resume all threads again. Make sure
4428 to queue any signals that would otherwise be delivered or
4429 queued. */
4430 if (!any_pending && supports_breakpoints ())
4431 need_step_over
d86d4aaf
DE
4432 = (struct thread_info *) find_inferior (&all_threads,
4433 need_step_over_p, NULL);
d50171e4
PA
4434
4435 leave_all_stopped = (need_step_over != NULL || any_pending);
4436
4437 if (debug_threads)
4438 {
4439 if (need_step_over != NULL)
87ce2a04 4440 debug_printf ("Not resuming all, need step over\n");
d50171e4 4441 else if (any_pending)
87ce2a04
DE
4442 debug_printf ("Not resuming, all-stop and found "
4443 "an LWP with pending status\n");
d50171e4 4444 else
87ce2a04 4445 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4446 }
4447
4448 /* Even if we're leaving threads stopped, queue all signals we'd
4449 otherwise deliver. */
4450 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4451
4452 if (need_step_over)
d86d4aaf 4453 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4454
4455 if (debug_threads)
4456 {
4457 debug_printf ("linux_resume done\n");
4458 debug_exit ();
4459 }
d50171e4
PA
4460}
4461
4462/* This function is called once per thread. We check the thread's
4463 last resume request, which will tell us whether to resume, step, or
4464 leave the thread stopped. Any signal the client requested to be
4465 delivered has already been enqueued at this point.
4466
4467 If any thread that GDB wants running is stopped at an internal
4468 breakpoint that needs stepping over, we start a step-over operation
4469 on that particular thread, and leave all others stopped. */
4470
7984d532
PA
4471static int
4472proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 4473{
d86d4aaf
DE
4474 struct thread_info *thread = (struct thread_info *) entry;
4475 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4476 int step;
4477
7984d532
PA
4478 if (lwp == except)
4479 return 0;
d50171e4
PA
4480
4481 if (debug_threads)
d86d4aaf 4482 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
4483
4484 if (!lwp->stopped)
4485 {
4486 if (debug_threads)
d86d4aaf 4487 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
7984d532 4488 return 0;
d50171e4
PA
4489 }
4490
02fc4de7
PA
4491 if (thread->last_resume_kind == resume_stop
4492 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
4493 {
4494 if (debug_threads)
87ce2a04 4495 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 4496 lwpid_of (thread));
7984d532 4497 return 0;
d50171e4
PA
4498 }
4499
4500 if (lwp->status_pending_p)
4501 {
4502 if (debug_threads)
87ce2a04 4503 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 4504 lwpid_of (thread));
7984d532 4505 return 0;
d50171e4
PA
4506 }
4507
7984d532
PA
4508 gdb_assert (lwp->suspended >= 0);
4509
d50171e4
PA
4510 if (lwp->suspended)
4511 {
4512 if (debug_threads)
d86d4aaf 4513 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
7984d532 4514 return 0;
d50171e4
PA
4515 }
4516
1a981360
PA
4517 if (thread->last_resume_kind == resume_stop
4518 && lwp->pending_signals_to_report == NULL
4519 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
4520 {
4521 /* We haven't reported this LWP as stopped yet (otherwise, the
4522 last_status.kind check above would catch it, and we wouldn't
4523 reach here. This LWP may have been momentarily paused by a
4524 stop_all_lwps call while handling for example, another LWP's
4525 step-over. In that case, the pending expected SIGSTOP signal
4526 that was queued at vCont;t handling time will have already
4527 been consumed by wait_for_sigstop, and so we need to requeue
4528 another one here. Note that if the LWP already has a SIGSTOP
4529 pending, this is a no-op. */
4530
4531 if (debug_threads)
87ce2a04
DE
4532 debug_printf ("Client wants LWP %ld to stop. "
4533 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 4534 lwpid_of (thread));
02fc4de7
PA
4535
4536 send_sigstop (lwp);
4537 }
4538
8336d594 4539 step = thread->last_resume_kind == resume_step;
d50171e4 4540 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4541 return 0;
4542}
4543
4544static int
4545unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4546{
d86d4aaf
DE
4547 struct thread_info *thread = (struct thread_info *) entry;
4548 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4549
4550 if (lwp == except)
4551 return 0;
4552
4553 lwp->suspended--;
4554 gdb_assert (lwp->suspended >= 0);
4555
4556 return proceed_one_lwp (entry, except);
d50171e4
PA
4557}
4558
4559/* When we finish a step-over, set threads running again. If there's
4560 another thread that may need a step-over, now's the time to start
4561 it. Eventually, we'll move all threads past their breakpoints. */
4562
4563static void
4564proceed_all_lwps (void)
4565{
d86d4aaf 4566 struct thread_info *need_step_over;
d50171e4
PA
4567
4568 /* If there is a thread which would otherwise be resumed, which is
4569 stopped at a breakpoint that needs stepping over, then don't
4570 resume any threads - have it step over the breakpoint with all
4571 other threads stopped, then resume all threads again. */
4572
4573 if (supports_breakpoints ())
4574 {
4575 need_step_over
d86d4aaf
DE
4576 = (struct thread_info *) find_inferior (&all_threads,
4577 need_step_over_p, NULL);
d50171e4
PA
4578
4579 if (need_step_over != NULL)
4580 {
4581 if (debug_threads)
87ce2a04
DE
4582 debug_printf ("proceed_all_lwps: found "
4583 "thread %ld needing a step-over\n",
4584 lwpid_of (need_step_over));
d50171e4 4585
d86d4aaf 4586 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4587 return;
4588 }
4589 }
5544ad89 4590
d50171e4 4591 if (debug_threads)
87ce2a04 4592 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 4593
d86d4aaf 4594 find_inferior (&all_threads, proceed_one_lwp, NULL);
d50171e4
PA
4595}
4596
4597/* Stopped LWPs that the client wanted to be running, that don't have
4598 pending statuses, are set to run again, except for EXCEPT, if not
4599 NULL. This undoes a stop_all_lwps call. */
4600
4601static void
7984d532 4602unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 4603{
5544ad89
DJ
4604 if (debug_threads)
4605 {
87ce2a04 4606 debug_enter ();
d50171e4 4607 if (except)
87ce2a04 4608 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 4609 lwpid_of (get_lwp_thread (except)));
5544ad89 4610 else
87ce2a04 4611 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
4612 }
4613
7984d532 4614 if (unsuspend)
d86d4aaf 4615 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
7984d532 4616 else
d86d4aaf 4617 find_inferior (&all_threads, proceed_one_lwp, except);
87ce2a04
DE
4618
4619 if (debug_threads)
4620 {
4621 debug_printf ("unstop_all_lwps done\n");
4622 debug_exit ();
4623 }
0d62e5e8
DJ
4624}
4625
58caa3dc
DJ
4626
4627#ifdef HAVE_LINUX_REGSETS
4628
1faeff08
MR
4629#define use_linux_regsets 1
4630
030031ee
PA
4631/* Returns true if REGSET has been disabled. */
4632
4633static int
4634regset_disabled (struct regsets_info *info, struct regset_info *regset)
4635{
4636 return (info->disabled_regsets != NULL
4637 && info->disabled_regsets[regset - info->regsets]);
4638}
4639
4640/* Disable REGSET. */
4641
4642static void
4643disable_regset (struct regsets_info *info, struct regset_info *regset)
4644{
4645 int dr_offset;
4646
4647 dr_offset = regset - info->regsets;
4648 if (info->disabled_regsets == NULL)
4649 info->disabled_regsets = xcalloc (1, info->num_regsets);
4650 info->disabled_regsets[dr_offset] = 1;
4651}
4652
58caa3dc 4653static int
3aee8918
PA
4654regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4655 struct regcache *regcache)
58caa3dc
DJ
4656{
4657 struct regset_info *regset;
e9d25b98 4658 int saw_general_regs = 0;
95954743 4659 int pid;
1570b33e 4660 struct iovec iov;
58caa3dc 4661
0bfdf32f 4662 pid = lwpid_of (current_thread);
28eef672 4663 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4664 {
1570b33e
L
4665 void *buf, *data;
4666 int nt_type, res;
58caa3dc 4667
030031ee 4668 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 4669 continue;
58caa3dc 4670
bca929d3 4671 buf = xmalloc (regset->size);
1570b33e
L
4672
4673 nt_type = regset->nt_type;
4674 if (nt_type)
4675 {
4676 iov.iov_base = buf;
4677 iov.iov_len = regset->size;
4678 data = (void *) &iov;
4679 }
4680 else
4681 data = buf;
4682
dfb64f85 4683#ifndef __sparc__
f15f9948 4684 res = ptrace (regset->get_request, pid,
b8e1b30e 4685 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4686#else
1570b33e 4687 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4688#endif
58caa3dc
DJ
4689 if (res < 0)
4690 {
4691 if (errno == EIO)
4692 {
52fa2412 4693 /* If we get EIO on a regset, do not try it again for
3aee8918 4694 this process mode. */
030031ee 4695 disable_regset (regsets_info, regset);
58caa3dc 4696 }
e5a9158d
AA
4697 else if (errno == ENODATA)
4698 {
4699 /* ENODATA may be returned if the regset is currently
4700 not "active". This can happen in normal operation,
4701 so suppress the warning in this case. */
4702 }
58caa3dc
DJ
4703 else
4704 {
0d62e5e8 4705 char s[256];
95954743
PA
4706 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4707 pid);
0d62e5e8 4708 perror (s);
58caa3dc
DJ
4709 }
4710 }
098dbe61
AA
4711 else
4712 {
4713 if (regset->type == GENERAL_REGS)
4714 saw_general_regs = 1;
4715 regset->store_function (regcache, buf);
4716 }
fdeb2a12 4717 free (buf);
58caa3dc 4718 }
e9d25b98
DJ
4719 if (saw_general_regs)
4720 return 0;
4721 else
4722 return 1;
58caa3dc
DJ
4723}
4724
4725static int
3aee8918
PA
4726regsets_store_inferior_registers (struct regsets_info *regsets_info,
4727 struct regcache *regcache)
58caa3dc
DJ
4728{
4729 struct regset_info *regset;
e9d25b98 4730 int saw_general_regs = 0;
95954743 4731 int pid;
1570b33e 4732 struct iovec iov;
58caa3dc 4733
0bfdf32f 4734 pid = lwpid_of (current_thread);
28eef672 4735 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4736 {
1570b33e
L
4737 void *buf, *data;
4738 int nt_type, res;
58caa3dc 4739
feea5f36
AA
4740 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4741 || regset->fill_function == NULL)
28eef672 4742 continue;
58caa3dc 4743
bca929d3 4744 buf = xmalloc (regset->size);
545587ee
DJ
4745
4746 /* First fill the buffer with the current register set contents,
4747 in case there are any items in the kernel's regset that are
4748 not in gdbserver's regcache. */
1570b33e
L
4749
4750 nt_type = regset->nt_type;
4751 if (nt_type)
4752 {
4753 iov.iov_base = buf;
4754 iov.iov_len = regset->size;
4755 data = (void *) &iov;
4756 }
4757 else
4758 data = buf;
4759
dfb64f85 4760#ifndef __sparc__
f15f9948 4761 res = ptrace (regset->get_request, pid,
b8e1b30e 4762 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4763#else
689cc2ae 4764 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4765#endif
545587ee
DJ
4766
4767 if (res == 0)
4768 {
4769 /* Then overlay our cached registers on that. */
442ea881 4770 regset->fill_function (regcache, buf);
545587ee
DJ
4771
4772 /* Only now do we write the register set. */
dfb64f85 4773#ifndef __sparc__
f15f9948 4774 res = ptrace (regset->set_request, pid,
b8e1b30e 4775 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4776#else
1570b33e 4777 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 4778#endif
545587ee
DJ
4779 }
4780
58caa3dc
DJ
4781 if (res < 0)
4782 {
4783 if (errno == EIO)
4784 {
52fa2412 4785 /* If we get EIO on a regset, do not try it again for
3aee8918 4786 this process mode. */
030031ee 4787 disable_regset (regsets_info, regset);
58caa3dc 4788 }
3221518c
UW
4789 else if (errno == ESRCH)
4790 {
1b3f6016
PA
4791 /* At this point, ESRCH should mean the process is
4792 already gone, in which case we simply ignore attempts
4793 to change its registers. See also the related
4794 comment in linux_resume_one_lwp. */
fdeb2a12 4795 free (buf);
3221518c
UW
4796 return 0;
4797 }
58caa3dc
DJ
4798 else
4799 {
ce3a066d 4800 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
4801 }
4802 }
e9d25b98
DJ
4803 else if (regset->type == GENERAL_REGS)
4804 saw_general_regs = 1;
09ec9b38 4805 free (buf);
58caa3dc 4806 }
e9d25b98
DJ
4807 if (saw_general_regs)
4808 return 0;
4809 else
4810 return 1;
58caa3dc
DJ
4811}
4812
1faeff08 4813#else /* !HAVE_LINUX_REGSETS */
58caa3dc 4814
1faeff08 4815#define use_linux_regsets 0
3aee8918
PA
4816#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4817#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 4818
58caa3dc 4819#endif
1faeff08
MR
4820
4821/* Return 1 if register REGNO is supported by one of the regset ptrace
4822 calls or 0 if it has to be transferred individually. */
4823
4824static int
3aee8918 4825linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
4826{
4827 unsigned char mask = 1 << (regno % 8);
4828 size_t index = regno / 8;
4829
4830 return (use_linux_regsets
3aee8918
PA
4831 && (regs_info->regset_bitmap == NULL
4832 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
4833}
4834
58caa3dc 4835#ifdef HAVE_LINUX_USRREGS
1faeff08
MR
4836
4837int
3aee8918 4838register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
4839{
4840 int addr;
4841
3aee8918 4842 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
4843 error ("Invalid register number %d.", regnum);
4844
3aee8918 4845 addr = usrregs->regmap[regnum];
1faeff08
MR
4846
4847 return addr;
4848}
4849
4850/* Fetch one register. */
4851static void
3aee8918
PA
4852fetch_register (const struct usrregs_info *usrregs,
4853 struct regcache *regcache, int regno)
1faeff08
MR
4854{
4855 CORE_ADDR regaddr;
4856 int i, size;
4857 char *buf;
4858 int pid;
4859
3aee8918 4860 if (regno >= usrregs->num_regs)
1faeff08
MR
4861 return;
4862 if ((*the_low_target.cannot_fetch_register) (regno))
4863 return;
4864
3aee8918 4865 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4866 if (regaddr == -1)
4867 return;
4868
3aee8918
PA
4869 size = ((register_size (regcache->tdesc, regno)
4870 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4871 & -sizeof (PTRACE_XFER_TYPE));
4872 buf = alloca (size);
4873
0bfdf32f 4874 pid = lwpid_of (current_thread);
1faeff08
MR
4875 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4876 {
4877 errno = 0;
4878 *(PTRACE_XFER_TYPE *) (buf + i) =
4879 ptrace (PTRACE_PEEKUSER, pid,
4880 /* Coerce to a uintptr_t first to avoid potential gcc warning
4881 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4882 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
4883 regaddr += sizeof (PTRACE_XFER_TYPE);
4884 if (errno != 0)
4885 error ("reading register %d: %s", regno, strerror (errno));
4886 }
4887
4888 if (the_low_target.supply_ptrace_register)
4889 the_low_target.supply_ptrace_register (regcache, regno, buf);
4890 else
4891 supply_register (regcache, regno, buf);
4892}
4893
4894/* Store one register. */
4895static void
3aee8918
PA
4896store_register (const struct usrregs_info *usrregs,
4897 struct regcache *regcache, int regno)
1faeff08
MR
4898{
4899 CORE_ADDR regaddr;
4900 int i, size;
4901 char *buf;
4902 int pid;
4903
3aee8918 4904 if (regno >= usrregs->num_regs)
1faeff08
MR
4905 return;
4906 if ((*the_low_target.cannot_store_register) (regno))
4907 return;
4908
3aee8918 4909 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4910 if (regaddr == -1)
4911 return;
4912
3aee8918
PA
4913 size = ((register_size (regcache->tdesc, regno)
4914 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4915 & -sizeof (PTRACE_XFER_TYPE));
4916 buf = alloca (size);
4917 memset (buf, 0, size);
4918
4919 if (the_low_target.collect_ptrace_register)
4920 the_low_target.collect_ptrace_register (regcache, regno, buf);
4921 else
4922 collect_register (regcache, regno, buf);
4923
0bfdf32f 4924 pid = lwpid_of (current_thread);
1faeff08
MR
4925 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4926 {
4927 errno = 0;
4928 ptrace (PTRACE_POKEUSER, pid,
4929 /* Coerce to a uintptr_t first to avoid potential gcc warning
4930 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
4931 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4932 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
4933 if (errno != 0)
4934 {
4935 /* At this point, ESRCH should mean the process is
4936 already gone, in which case we simply ignore attempts
4937 to change its registers. See also the related
4938 comment in linux_resume_one_lwp. */
4939 if (errno == ESRCH)
4940 return;
4941
4942 if ((*the_low_target.cannot_store_register) (regno) == 0)
4943 error ("writing register %d: %s", regno, strerror (errno));
4944 }
4945 regaddr += sizeof (PTRACE_XFER_TYPE);
4946 }
4947}
4948
4949/* Fetch all registers, or just one, from the child process.
4950 If REGNO is -1, do this for all registers, skipping any that are
4951 assumed to have been retrieved by regsets_fetch_inferior_registers,
4952 unless ALL is non-zero.
4953 Otherwise, REGNO specifies which register (so we can save time). */
4954static void
3aee8918
PA
4955usr_fetch_inferior_registers (const struct regs_info *regs_info,
4956 struct regcache *regcache, int regno, int all)
1faeff08 4957{
3aee8918
PA
4958 struct usrregs_info *usr = regs_info->usrregs;
4959
1faeff08
MR
4960 if (regno == -1)
4961 {
3aee8918
PA
4962 for (regno = 0; regno < usr->num_regs; regno++)
4963 if (all || !linux_register_in_regsets (regs_info, regno))
4964 fetch_register (usr, regcache, regno);
1faeff08
MR
4965 }
4966 else
3aee8918 4967 fetch_register (usr, regcache, regno);
1faeff08
MR
4968}
4969
4970/* Store our register values back into the inferior.
4971 If REGNO is -1, do this for all registers, skipping any that are
4972 assumed to have been saved by regsets_store_inferior_registers,
4973 unless ALL is non-zero.
4974 Otherwise, REGNO specifies which register (so we can save time). */
4975static void
3aee8918
PA
4976usr_store_inferior_registers (const struct regs_info *regs_info,
4977 struct regcache *regcache, int regno, int all)
1faeff08 4978{
3aee8918
PA
4979 struct usrregs_info *usr = regs_info->usrregs;
4980
1faeff08
MR
4981 if (regno == -1)
4982 {
3aee8918
PA
4983 for (regno = 0; regno < usr->num_regs; regno++)
4984 if (all || !linux_register_in_regsets (regs_info, regno))
4985 store_register (usr, regcache, regno);
1faeff08
MR
4986 }
4987 else
3aee8918 4988 store_register (usr, regcache, regno);
1faeff08
MR
4989}
4990
4991#else /* !HAVE_LINUX_USRREGS */
4992
3aee8918
PA
4993#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4994#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
1faeff08 4995
58caa3dc 4996#endif
1faeff08
MR
4997
4998
4999void
5000linux_fetch_registers (struct regcache *regcache, int regno)
5001{
5002 int use_regsets;
5003 int all = 0;
3aee8918 5004 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
5005
5006 if (regno == -1)
5007 {
3aee8918
PA
5008 if (the_low_target.fetch_register != NULL
5009 && regs_info->usrregs != NULL)
5010 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
c14dfd32
PA
5011 (*the_low_target.fetch_register) (regcache, regno);
5012
3aee8918
PA
5013 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5014 if (regs_info->usrregs != NULL)
5015 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5016 }
5017 else
5018 {
c14dfd32
PA
5019 if (the_low_target.fetch_register != NULL
5020 && (*the_low_target.fetch_register) (regcache, regno))
5021 return;
5022
3aee8918 5023 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5024 if (use_regsets)
3aee8918
PA
5025 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5026 regcache);
5027 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5028 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5029 }
58caa3dc
DJ
5030}
5031
5032void
442ea881 5033linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 5034{
1faeff08
MR
5035 int use_regsets;
5036 int all = 0;
3aee8918 5037 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
5038
5039 if (regno == -1)
5040 {
3aee8918
PA
5041 all = regsets_store_inferior_registers (regs_info->regsets_info,
5042 regcache);
5043 if (regs_info->usrregs != NULL)
5044 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5045 }
5046 else
5047 {
3aee8918 5048 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5049 if (use_regsets)
3aee8918
PA
5050 all = regsets_store_inferior_registers (regs_info->regsets_info,
5051 regcache);
5052 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5053 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5054 }
58caa3dc
DJ
5055}
5056
da6d8c04 5057
da6d8c04
DJ
5058/* Copy LEN bytes from inferior's memory starting at MEMADDR
5059 to debugger memory starting at MYADDR. */
5060
c3e735a6 5061static int
f450004a 5062linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04 5063{
0bfdf32f 5064 int pid = lwpid_of (current_thread);
4934b29e
MR
5065 register PTRACE_XFER_TYPE *buffer;
5066 register CORE_ADDR addr;
5067 register int count;
5068 char filename[64];
da6d8c04 5069 register int i;
4934b29e 5070 int ret;
fd462a61 5071 int fd;
fd462a61
DJ
5072
5073 /* Try using /proc. Don't bother for one word. */
5074 if (len >= 3 * sizeof (long))
5075 {
4934b29e
MR
5076 int bytes;
5077
fd462a61
DJ
5078 /* We could keep this file open and cache it - possibly one per
5079 thread. That requires some juggling, but is even faster. */
95954743 5080 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5081 fd = open (filename, O_RDONLY | O_LARGEFILE);
5082 if (fd == -1)
5083 goto no_proc;
5084
5085 /* If pread64 is available, use it. It's faster if the kernel
5086 supports it (only one syscall), and it's 64-bit safe even on
5087 32-bit platforms (for instance, SPARC debugging a SPARC64
5088 application). */
5089#ifdef HAVE_PREAD64
4934b29e 5090 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5091#else
4934b29e
MR
5092 bytes = -1;
5093 if (lseek (fd, memaddr, SEEK_SET) != -1)
5094 bytes = read (fd, myaddr, len);
fd462a61 5095#endif
fd462a61
DJ
5096
5097 close (fd);
4934b29e
MR
5098 if (bytes == len)
5099 return 0;
5100
5101 /* Some data was read, we'll try to get the rest with ptrace. */
5102 if (bytes > 0)
5103 {
5104 memaddr += bytes;
5105 myaddr += bytes;
5106 len -= bytes;
5107 }
fd462a61 5108 }
da6d8c04 5109
fd462a61 5110 no_proc:
4934b29e
MR
5111 /* Round starting address down to longword boundary. */
5112 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5113 /* Round ending address up; get number of longwords that makes. */
5114 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5115 / sizeof (PTRACE_XFER_TYPE));
5116 /* Allocate buffer of that many longwords. */
5117 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5118
da6d8c04 5119 /* Read all the longwords */
4934b29e 5120 errno = 0;
da6d8c04
DJ
5121 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5122 {
14ce3065
DE
5123 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5124 about coercing an 8 byte integer to a 4 byte pointer. */
5125 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5126 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5127 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5128 if (errno)
4934b29e 5129 break;
da6d8c04 5130 }
4934b29e 5131 ret = errno;
da6d8c04
DJ
5132
5133 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5134 if (i > 0)
5135 {
5136 i *= sizeof (PTRACE_XFER_TYPE);
5137 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5138 memcpy (myaddr,
5139 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5140 i < len ? i : len);
5141 }
c3e735a6 5142
4934b29e 5143 return ret;
da6d8c04
DJ
5144}
5145
93ae6fdc
PA
5146/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5147 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5148 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5149
ce3a066d 5150static int
f450004a 5151linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
5152{
5153 register int i;
5154 /* Round starting address down to longword boundary. */
5155 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5156 /* Round ending address up; get number of longwords that makes. */
5157 register int count
493e2a69
MS
5158 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5159 / sizeof (PTRACE_XFER_TYPE);
5160
da6d8c04 5161 /* Allocate buffer of that many longwords. */
493e2a69
MS
5162 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5163 alloca (count * sizeof (PTRACE_XFER_TYPE));
5164
0bfdf32f 5165 int pid = lwpid_of (current_thread);
da6d8c04 5166
f0ae6fc3
PA
5167 if (len == 0)
5168 {
5169 /* Zero length write always succeeds. */
5170 return 0;
5171 }
5172
0d62e5e8
DJ
5173 if (debug_threads)
5174 {
58d6951d
DJ
5175 /* Dump up to four bytes. */
5176 unsigned int val = * (unsigned int *) myaddr;
5177 if (len == 1)
5178 val = val & 0xff;
5179 else if (len == 2)
5180 val = val & 0xffff;
5181 else if (len == 3)
5182 val = val & 0xffffff;
de0d863e
DB
5183 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5184 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
0d62e5e8
DJ
5185 }
5186
da6d8c04
DJ
5187 /* Fill start and end extra bytes of buffer with existing memory data. */
5188
93ae6fdc 5189 errno = 0;
14ce3065
DE
5190 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5191 about coercing an 8 byte integer to a 4 byte pointer. */
5192 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5193 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5194 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5195 if (errno)
5196 return errno;
da6d8c04
DJ
5197
5198 if (count > 1)
5199 {
93ae6fdc 5200 errno = 0;
da6d8c04 5201 buffer[count - 1]
95954743 5202 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5203 /* Coerce to a uintptr_t first to avoid potential gcc warning
5204 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5205 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5206 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5207 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5208 if (errno)
5209 return errno;
da6d8c04
DJ
5210 }
5211
93ae6fdc 5212 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5213
493e2a69
MS
5214 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5215 myaddr, len);
da6d8c04
DJ
5216
5217 /* Write the entire buffer. */
5218
5219 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5220 {
5221 errno = 0;
14ce3065
DE
5222 ptrace (PTRACE_POKETEXT, pid,
5223 /* Coerce to a uintptr_t first to avoid potential gcc warning
5224 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5225 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5226 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5227 if (errno)
5228 return errno;
5229 }
5230
5231 return 0;
5232}
2f2893d9
DJ
5233
5234static void
5235linux_look_up_symbols (void)
5236{
0d62e5e8 5237#ifdef USE_THREAD_DB
95954743
PA
5238 struct process_info *proc = current_process ();
5239
fe978cb0 5240 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5241 return;
5242
96d7229d
LM
5243 /* If the kernel supports tracing clones, then we don't need to
5244 use the magic thread event breakpoint to learn about
5245 threads. */
5246 thread_db_init (!linux_supports_traceclone ());
0d62e5e8
DJ
5247#endif
5248}
5249
e5379b03 5250static void
ef57601b 5251linux_request_interrupt (void)
e5379b03 5252{
a1928bad 5253 extern unsigned long signal_pid;
e5379b03 5254
78708b7c
PA
5255 /* Send a SIGINT to the process group. This acts just like the user
5256 typed a ^C on the controlling terminal. */
5257 kill (-signal_pid, SIGINT);
e5379b03
DJ
5258}
5259
aa691b87
RM
5260/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5261 to debugger memory starting at MYADDR. */
5262
5263static int
f450004a 5264linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
5265{
5266 char filename[PATH_MAX];
5267 int fd, n;
0bfdf32f 5268 int pid = lwpid_of (current_thread);
aa691b87 5269
6cebaf6e 5270 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5271
5272 fd = open (filename, O_RDONLY);
5273 if (fd < 0)
5274 return -1;
5275
5276 if (offset != (CORE_ADDR) 0
5277 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5278 n = -1;
5279 else
5280 n = read (fd, myaddr, len);
5281
5282 close (fd);
5283
5284 return n;
5285}
5286
d993e290
PA
5287/* These breakpoint and watchpoint related wrapper functions simply
5288 pass on the function call if the target has registered a
5289 corresponding function. */
e013ee27
OF
5290
5291static int
802e8e6d
PA
5292linux_supports_z_point_type (char z_type)
5293{
5294 return (the_low_target.supports_z_point_type != NULL
5295 && the_low_target.supports_z_point_type (z_type));
5296}
5297
5298static int
5299linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5300 int size, struct raw_breakpoint *bp)
e013ee27 5301{
c8f4bfdd
YQ
5302 if (type == raw_bkpt_type_sw)
5303 return insert_memory_breakpoint (bp);
5304 else if (the_low_target.insert_point != NULL)
802e8e6d 5305 return the_low_target.insert_point (type, addr, size, bp);
e013ee27
OF
5306 else
5307 /* Unsupported (see target.h). */
5308 return 1;
5309}
5310
5311static int
802e8e6d
PA
5312linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5313 int size, struct raw_breakpoint *bp)
e013ee27 5314{
c8f4bfdd
YQ
5315 if (type == raw_bkpt_type_sw)
5316 return remove_memory_breakpoint (bp);
5317 else if (the_low_target.remove_point != NULL)
802e8e6d 5318 return the_low_target.remove_point (type, addr, size, bp);
e013ee27
OF
5319 else
5320 /* Unsupported (see target.h). */
5321 return 1;
5322}
5323
3e572f71
PA
5324/* Implement the to_stopped_by_sw_breakpoint target_ops
5325 method. */
5326
5327static int
5328linux_stopped_by_sw_breakpoint (void)
5329{
5330 struct lwp_info *lwp = get_thread_lwp (current_thread);
5331
5332 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5333}
5334
5335/* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5336 method. */
5337
5338static int
5339linux_supports_stopped_by_sw_breakpoint (void)
5340{
5341 return USE_SIGTRAP_SIGINFO;
5342}
5343
5344/* Implement the to_stopped_by_hw_breakpoint target_ops
5345 method. */
5346
5347static int
5348linux_stopped_by_hw_breakpoint (void)
5349{
5350 struct lwp_info *lwp = get_thread_lwp (current_thread);
5351
5352 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5353}
5354
5355/* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5356 method. */
5357
5358static int
5359linux_supports_stopped_by_hw_breakpoint (void)
5360{
5361 return USE_SIGTRAP_SIGINFO;
5362}
5363
45614f15
YQ
5364/* Implement the supports_conditional_breakpoints target_ops
5365 method. */
5366
5367static int
5368linux_supports_conditional_breakpoints (void)
5369{
5370 /* GDBserver needs to step over the breakpoint if the condition is
5371 false. GDBserver software single step is too simple, so disable
5372 conditional breakpoints if the target doesn't have hardware single
5373 step. */
5374 return can_hardware_single_step ();
5375}
5376
e013ee27
OF
5377static int
5378linux_stopped_by_watchpoint (void)
5379{
0bfdf32f 5380 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5381
15c66dd6 5382 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5383}
5384
5385static CORE_ADDR
5386linux_stopped_data_address (void)
5387{
0bfdf32f 5388 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5389
5390 return lwp->stopped_data_address;
e013ee27
OF
5391}
5392
db0dfaa0
LM
5393#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5394 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5395 && defined(PT_TEXT_END_ADDR)
5396
5397/* This is only used for targets that define PT_TEXT_ADDR,
5398 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5399 the target has different ways of acquiring this information, like
5400 loadmaps. */
52fb6437
NS
5401
5402/* Under uClinux, programs are loaded at non-zero offsets, which we need
5403 to tell gdb about. */
5404
5405static int
5406linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5407{
52fb6437 5408 unsigned long text, text_end, data;
62828379 5409 int pid = lwpid_of (current_thread);
52fb6437
NS
5410
5411 errno = 0;
5412
b8e1b30e
LM
5413 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5414 (PTRACE_TYPE_ARG4) 0);
5415 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5416 (PTRACE_TYPE_ARG4) 0);
5417 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5418 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5419
5420 if (errno == 0)
5421 {
5422 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5423 used by gdb) are relative to the beginning of the program,
5424 with the data segment immediately following the text segment.
5425 However, the actual runtime layout in memory may put the data
5426 somewhere else, so when we send gdb a data base-address, we
5427 use the real data base address and subtract the compile-time
5428 data base-address from it (which is just the length of the
5429 text segment). BSS immediately follows data in both
5430 cases. */
52fb6437
NS
5431 *text_p = text;
5432 *data_p = data - (text_end - text);
1b3f6016 5433
52fb6437
NS
5434 return 1;
5435 }
52fb6437
NS
5436 return 0;
5437}
5438#endif
5439
07e059b5
VP
5440static int
5441linux_qxfer_osdata (const char *annex,
1b3f6016
PA
5442 unsigned char *readbuf, unsigned const char *writebuf,
5443 CORE_ADDR offset, int len)
07e059b5 5444{
d26e3629 5445 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5446}
5447
d0722149
DE
5448/* Convert a native/host siginfo object, into/from the siginfo in the
5449 layout of the inferiors' architecture. */
5450
5451static void
a5362b9a 5452siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
d0722149
DE
5453{
5454 int done = 0;
5455
5456 if (the_low_target.siginfo_fixup != NULL)
5457 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5458
5459 /* If there was no callback, or the callback didn't do anything,
5460 then just do a straight memcpy. */
5461 if (!done)
5462 {
5463 if (direction == 1)
a5362b9a 5464 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5465 else
a5362b9a 5466 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5467 }
5468}
5469
4aa995e1
PA
5470static int
5471linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5472 unsigned const char *writebuf, CORE_ADDR offset, int len)
5473{
d0722149 5474 int pid;
a5362b9a
TS
5475 siginfo_t siginfo;
5476 char inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5477
0bfdf32f 5478 if (current_thread == NULL)
4aa995e1
PA
5479 return -1;
5480
0bfdf32f 5481 pid = lwpid_of (current_thread);
4aa995e1
PA
5482
5483 if (debug_threads)
87ce2a04
DE
5484 debug_printf ("%s siginfo for lwp %d.\n",
5485 readbuf != NULL ? "Reading" : "Writing",
5486 pid);
4aa995e1 5487
0adea5f7 5488 if (offset >= sizeof (siginfo))
4aa995e1
PA
5489 return -1;
5490
b8e1b30e 5491 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5492 return -1;
5493
d0722149
DE
5494 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5495 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5496 inferior with a 64-bit GDBSERVER should look the same as debugging it
5497 with a 32-bit GDBSERVER, we need to convert it. */
5498 siginfo_fixup (&siginfo, inf_siginfo, 0);
5499
4aa995e1
PA
5500 if (offset + len > sizeof (siginfo))
5501 len = sizeof (siginfo) - offset;
5502
5503 if (readbuf != NULL)
d0722149 5504 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5505 else
5506 {
d0722149
DE
5507 memcpy (inf_siginfo + offset, writebuf, len);
5508
5509 /* Convert back to ptrace layout before flushing it out. */
5510 siginfo_fixup (&siginfo, inf_siginfo, 1);
5511
b8e1b30e 5512 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5513 return -1;
5514 }
5515
5516 return len;
5517}
5518
bd99dc85
PA
5519/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5520 so we notice when children change state; as the handler for the
5521 sigsuspend in my_waitpid. */
5522
5523static void
5524sigchld_handler (int signo)
5525{
5526 int old_errno = errno;
5527
5528 if (debug_threads)
e581f2b4
PA
5529 {
5530 do
5531 {
5532 /* fprintf is not async-signal-safe, so call write
5533 directly. */
5534 if (write (2, "sigchld_handler\n",
5535 sizeof ("sigchld_handler\n") - 1) < 0)
5536 break; /* just ignore */
5537 } while (0);
5538 }
bd99dc85
PA
5539
5540 if (target_is_async_p ())
5541 async_file_mark (); /* trigger a linux_wait */
5542
5543 errno = old_errno;
5544}
5545
5546static int
5547linux_supports_non_stop (void)
5548{
5549 return 1;
5550}
5551
5552static int
5553linux_async (int enable)
5554{
7089dca4 5555 int previous = target_is_async_p ();
bd99dc85 5556
8336d594 5557 if (debug_threads)
87ce2a04
DE
5558 debug_printf ("linux_async (%d), previous=%d\n",
5559 enable, previous);
8336d594 5560
bd99dc85
PA
5561 if (previous != enable)
5562 {
5563 sigset_t mask;
5564 sigemptyset (&mask);
5565 sigaddset (&mask, SIGCHLD);
5566
5567 sigprocmask (SIG_BLOCK, &mask, NULL);
5568
5569 if (enable)
5570 {
5571 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
5572 {
5573 linux_event_pipe[0] = -1;
5574 linux_event_pipe[1] = -1;
5575 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5576
5577 warning ("creating event pipe failed.");
5578 return previous;
5579 }
bd99dc85
PA
5580
5581 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5582 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5583
5584 /* Register the event loop handler. */
5585 add_file_handler (linux_event_pipe[0],
5586 handle_target_event, NULL);
5587
5588 /* Always trigger a linux_wait. */
5589 async_file_mark ();
5590 }
5591 else
5592 {
5593 delete_file_handler (linux_event_pipe[0]);
5594
5595 close (linux_event_pipe[0]);
5596 close (linux_event_pipe[1]);
5597 linux_event_pipe[0] = -1;
5598 linux_event_pipe[1] = -1;
5599 }
5600
5601 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5602 }
5603
5604 return previous;
5605}
5606
5607static int
5608linux_start_non_stop (int nonstop)
5609{
5610 /* Register or unregister from event-loop accordingly. */
5611 linux_async (nonstop);
aa96c426
GB
5612
5613 if (target_is_async_p () != (nonstop != 0))
5614 return -1;
5615
bd99dc85
PA
5616 return 0;
5617}
5618
cf8fd78b
PA
5619static int
5620linux_supports_multi_process (void)
5621{
5622 return 1;
5623}
5624
89245bc0
DB
5625/* Check if fork events are supported. */
5626
5627static int
5628linux_supports_fork_events (void)
5629{
5630 return linux_supports_tracefork ();
5631}
5632
5633/* Check if vfork events are supported. */
5634
5635static int
5636linux_supports_vfork_events (void)
5637{
5638 return linux_supports_tracefork ();
5639}
5640
de0d863e
DB
5641/* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5642 options for the specified lwp. */
5643
5644static int
5645reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5646 void *args)
5647{
5648 struct thread_info *thread = (struct thread_info *) entry;
5649 struct lwp_info *lwp = get_thread_lwp (thread);
5650
5651 if (!lwp->stopped)
5652 {
5653 /* Stop the lwp so we can modify its ptrace options. */
5654 lwp->must_set_ptrace_flags = 1;
5655 linux_stop_lwp (lwp);
5656 }
5657 else
5658 {
5659 /* Already stopped; go ahead and set the ptrace options. */
5660 struct process_info *proc = find_process_pid (pid_of (thread));
5661 int options = linux_low_ptrace_options (proc->attached);
5662
5663 linux_enable_event_reporting (lwpid_of (thread), options);
5664 lwp->must_set_ptrace_flags = 0;
5665 }
5666
5667 return 0;
5668}
5669
5670/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5671 ptrace flags for all inferiors. This is in case the new GDB connection
5672 doesn't support the same set of events that the previous one did. */
5673
5674static void
5675linux_handle_new_gdb_connection (void)
5676{
5677 pid_t pid;
5678
5679 /* Request that all the lwps reset their ptrace options. */
5680 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5681}
5682
03583c20
UW
5683static int
5684linux_supports_disable_randomization (void)
5685{
5686#ifdef HAVE_PERSONALITY
5687 return 1;
5688#else
5689 return 0;
5690#endif
5691}
efcbbd14 5692
d1feda86
YQ
5693static int
5694linux_supports_agent (void)
5695{
5696 return 1;
5697}
5698
c2d6af84
PA
5699static int
5700linux_supports_range_stepping (void)
5701{
5702 if (*the_low_target.supports_range_stepping == NULL)
5703 return 0;
5704
5705 return (*the_low_target.supports_range_stepping) ();
5706}
5707
efcbbd14
UW
5708/* Enumerate spufs IDs for process PID. */
5709static int
5710spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5711{
5712 int pos = 0;
5713 int written = 0;
5714 char path[128];
5715 DIR *dir;
5716 struct dirent *entry;
5717
5718 sprintf (path, "/proc/%ld/fd", pid);
5719 dir = opendir (path);
5720 if (!dir)
5721 return -1;
5722
5723 rewinddir (dir);
5724 while ((entry = readdir (dir)) != NULL)
5725 {
5726 struct stat st;
5727 struct statfs stfs;
5728 int fd;
5729
5730 fd = atoi (entry->d_name);
5731 if (!fd)
5732 continue;
5733
5734 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5735 if (stat (path, &st) != 0)
5736 continue;
5737 if (!S_ISDIR (st.st_mode))
5738 continue;
5739
5740 if (statfs (path, &stfs) != 0)
5741 continue;
5742 if (stfs.f_type != SPUFS_MAGIC)
5743 continue;
5744
5745 if (pos >= offset && pos + 4 <= offset + len)
5746 {
5747 *(unsigned int *)(buf + pos - offset) = fd;
5748 written += 4;
5749 }
5750 pos += 4;
5751 }
5752
5753 closedir (dir);
5754 return written;
5755}
5756
5757/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5758 object type, using the /proc file system. */
5759static int
5760linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5761 unsigned const char *writebuf,
5762 CORE_ADDR offset, int len)
5763{
0bfdf32f 5764 long pid = lwpid_of (current_thread);
efcbbd14
UW
5765 char buf[128];
5766 int fd = 0;
5767 int ret = 0;
5768
5769 if (!writebuf && !readbuf)
5770 return -1;
5771
5772 if (!*annex)
5773 {
5774 if (!readbuf)
5775 return -1;
5776 else
5777 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5778 }
5779
5780 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5781 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5782 if (fd <= 0)
5783 return -1;
5784
5785 if (offset != 0
5786 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5787 {
5788 close (fd);
5789 return 0;
5790 }
5791
5792 if (writebuf)
5793 ret = write (fd, writebuf, (size_t) len);
5794 else
5795 ret = read (fd, readbuf, (size_t) len);
5796
5797 close (fd);
5798 return ret;
5799}
5800
723b724b 5801#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
5802struct target_loadseg
5803{
5804 /* Core address to which the segment is mapped. */
5805 Elf32_Addr addr;
5806 /* VMA recorded in the program header. */
5807 Elf32_Addr p_vaddr;
5808 /* Size of this segment in memory. */
5809 Elf32_Word p_memsz;
5810};
5811
723b724b 5812# if defined PT_GETDSBT
78d85199
YQ
5813struct target_loadmap
5814{
5815 /* Protocol version number, must be zero. */
5816 Elf32_Word version;
5817 /* Pointer to the DSBT table, its size, and the DSBT index. */
5818 unsigned *dsbt_table;
5819 unsigned dsbt_size, dsbt_index;
5820 /* Number of segments in this map. */
5821 Elf32_Word nsegs;
5822 /* The actual memory map. */
5823 struct target_loadseg segs[/*nsegs*/];
5824};
723b724b
MF
5825# define LINUX_LOADMAP PT_GETDSBT
5826# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5827# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5828# else
5829struct target_loadmap
5830{
5831 /* Protocol version number, must be zero. */
5832 Elf32_Half version;
5833 /* Number of segments in this map. */
5834 Elf32_Half nsegs;
5835 /* The actual memory map. */
5836 struct target_loadseg segs[/*nsegs*/];
5837};
5838# define LINUX_LOADMAP PTRACE_GETFDPIC
5839# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5840# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5841# endif
78d85199 5842
78d85199
YQ
5843static int
5844linux_read_loadmap (const char *annex, CORE_ADDR offset,
5845 unsigned char *myaddr, unsigned int len)
5846{
0bfdf32f 5847 int pid = lwpid_of (current_thread);
78d85199
YQ
5848 int addr = -1;
5849 struct target_loadmap *data = NULL;
5850 unsigned int actual_length, copy_length;
5851
5852 if (strcmp (annex, "exec") == 0)
723b724b 5853 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 5854 else if (strcmp (annex, "interp") == 0)
723b724b 5855 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
5856 else
5857 return -1;
5858
723b724b 5859 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
5860 return -1;
5861
5862 if (data == NULL)
5863 return -1;
5864
5865 actual_length = sizeof (struct target_loadmap)
5866 + sizeof (struct target_loadseg) * data->nsegs;
5867
5868 if (offset < 0 || offset > actual_length)
5869 return -1;
5870
5871 copy_length = actual_length - offset < len ? actual_length - offset : len;
5872 memcpy (myaddr, (char *) data + offset, copy_length);
5873 return copy_length;
5874}
723b724b
MF
5875#else
5876# define linux_read_loadmap NULL
5877#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 5878
1570b33e
L
5879static void
5880linux_process_qsupported (const char *query)
5881{
5882 if (the_low_target.process_qsupported != NULL)
5883 the_low_target.process_qsupported (query);
5884}
5885
219f2f23
PA
5886static int
5887linux_supports_tracepoints (void)
5888{
5889 if (*the_low_target.supports_tracepoints == NULL)
5890 return 0;
5891
5892 return (*the_low_target.supports_tracepoints) ();
5893}
5894
5895static CORE_ADDR
5896linux_read_pc (struct regcache *regcache)
5897{
5898 if (the_low_target.get_pc == NULL)
5899 return 0;
5900
5901 return (*the_low_target.get_pc) (regcache);
5902}
5903
5904static void
5905linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5906{
5907 gdb_assert (the_low_target.set_pc != NULL);
5908
5909 (*the_low_target.set_pc) (regcache, pc);
5910}
5911
8336d594
PA
5912static int
5913linux_thread_stopped (struct thread_info *thread)
5914{
5915 return get_thread_lwp (thread)->stopped;
5916}
5917
5918/* This exposes stop-all-threads functionality to other modules. */
5919
5920static void
7984d532 5921linux_pause_all (int freeze)
8336d594 5922{
7984d532
PA
5923 stop_all_lwps (freeze, NULL);
5924}
5925
5926/* This exposes unstop-all-threads functionality to other gdbserver
5927 modules. */
5928
5929static void
5930linux_unpause_all (int unfreeze)
5931{
5932 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
5933}
5934
90d74c30
PA
5935static int
5936linux_prepare_to_access_memory (void)
5937{
5938 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5939 running LWP. */
5940 if (non_stop)
5941 linux_pause_all (1);
5942 return 0;
5943}
5944
5945static void
0146f85b 5946linux_done_accessing_memory (void)
90d74c30
PA
5947{
5948 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5949 running LWP. */
5950 if (non_stop)
5951 linux_unpause_all (1);
5952}
5953
fa593d66
PA
5954static int
5955linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5956 CORE_ADDR collector,
5957 CORE_ADDR lockaddr,
5958 ULONGEST orig_size,
5959 CORE_ADDR *jump_entry,
405f8e94
SS
5960 CORE_ADDR *trampoline,
5961 ULONGEST *trampoline_size,
fa593d66
PA
5962 unsigned char *jjump_pad_insn,
5963 ULONGEST *jjump_pad_insn_size,
5964 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
5965 CORE_ADDR *adjusted_insn_addr_end,
5966 char *err)
fa593d66
PA
5967{
5968 return (*the_low_target.install_fast_tracepoint_jump_pad)
5969 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
5970 jump_entry, trampoline, trampoline_size,
5971 jjump_pad_insn, jjump_pad_insn_size,
5972 adjusted_insn_addr, adjusted_insn_addr_end,
5973 err);
fa593d66
PA
5974}
5975
6a271cae
PA
5976static struct emit_ops *
5977linux_emit_ops (void)
5978{
5979 if (the_low_target.emit_ops != NULL)
5980 return (*the_low_target.emit_ops) ();
5981 else
5982 return NULL;
5983}
5984
405f8e94
SS
5985static int
5986linux_get_min_fast_tracepoint_insn_len (void)
5987{
5988 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5989}
5990
2268b414
JK
5991/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5992
5993static int
5994get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5995 CORE_ADDR *phdr_memaddr, int *num_phdr)
5996{
5997 char filename[PATH_MAX];
5998 int fd;
5999 const int auxv_size = is_elf64
6000 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6001 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6002
6003 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6004
6005 fd = open (filename, O_RDONLY);
6006 if (fd < 0)
6007 return 1;
6008
6009 *phdr_memaddr = 0;
6010 *num_phdr = 0;
6011 while (read (fd, buf, auxv_size) == auxv_size
6012 && (*phdr_memaddr == 0 || *num_phdr == 0))
6013 {
6014 if (is_elf64)
6015 {
6016 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6017
6018 switch (aux->a_type)
6019 {
6020 case AT_PHDR:
6021 *phdr_memaddr = aux->a_un.a_val;
6022 break;
6023 case AT_PHNUM:
6024 *num_phdr = aux->a_un.a_val;
6025 break;
6026 }
6027 }
6028 else
6029 {
6030 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6031
6032 switch (aux->a_type)
6033 {
6034 case AT_PHDR:
6035 *phdr_memaddr = aux->a_un.a_val;
6036 break;
6037 case AT_PHNUM:
6038 *num_phdr = aux->a_un.a_val;
6039 break;
6040 }
6041 }
6042 }
6043
6044 close (fd);
6045
6046 if (*phdr_memaddr == 0 || *num_phdr == 0)
6047 {
6048 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6049 "phdr_memaddr = %ld, phdr_num = %d",
6050 (long) *phdr_memaddr, *num_phdr);
6051 return 2;
6052 }
6053
6054 return 0;
6055}
6056
6057/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6058
6059static CORE_ADDR
6060get_dynamic (const int pid, const int is_elf64)
6061{
6062 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6063 int num_phdr, i;
2268b414 6064 unsigned char *phdr_buf;
db1ff28b 6065 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6066
6067 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6068 return 0;
6069
6070 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6071 phdr_buf = alloca (num_phdr * phdr_size);
6072
6073 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6074 return 0;
6075
6076 /* Compute relocation: it is expected to be 0 for "regular" executables,
6077 non-zero for PIE ones. */
6078 relocation = -1;
db1ff28b
JK
6079 for (i = 0; relocation == -1 && i < num_phdr; i++)
6080 if (is_elf64)
6081 {
6082 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6083
6084 if (p->p_type == PT_PHDR)
6085 relocation = phdr_memaddr - p->p_vaddr;
6086 }
6087 else
6088 {
6089 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6090
6091 if (p->p_type == PT_PHDR)
6092 relocation = phdr_memaddr - p->p_vaddr;
6093 }
6094
2268b414
JK
6095 if (relocation == -1)
6096 {
e237a7e2
JK
6097 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6098 any real world executables, including PIE executables, have always
6099 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6100 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6101 or present DT_DEBUG anyway (fpc binaries are statically linked).
6102
6103 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6104
6105 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6106
2268b414
JK
6107 return 0;
6108 }
6109
db1ff28b
JK
6110 for (i = 0; i < num_phdr; i++)
6111 {
6112 if (is_elf64)
6113 {
6114 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6115
6116 if (p->p_type == PT_DYNAMIC)
6117 return p->p_vaddr + relocation;
6118 }
6119 else
6120 {
6121 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6122
db1ff28b
JK
6123 if (p->p_type == PT_DYNAMIC)
6124 return p->p_vaddr + relocation;
6125 }
6126 }
2268b414
JK
6127
6128 return 0;
6129}
6130
6131/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6132 can be 0 if the inferior does not yet have the library list initialized.
6133 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6134 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6135
6136static CORE_ADDR
6137get_r_debug (const int pid, const int is_elf64)
6138{
6139 CORE_ADDR dynamic_memaddr;
6140 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6141 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6142 CORE_ADDR map = -1;
2268b414
JK
6143
6144 dynamic_memaddr = get_dynamic (pid, is_elf64);
6145 if (dynamic_memaddr == 0)
367ba2c2 6146 return map;
2268b414
JK
6147
6148 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6149 {
6150 if (is_elf64)
6151 {
6152 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
75f62ce7 6153#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6154 union
6155 {
6156 Elf64_Xword map;
6157 unsigned char buf[sizeof (Elf64_Xword)];
6158 }
6159 rld_map;
6160
6161 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6162 {
6163 if (linux_read_memory (dyn->d_un.d_val,
6164 rld_map.buf, sizeof (rld_map.buf)) == 0)
6165 return rld_map.map;
6166 else
6167 break;
6168 }
75f62ce7 6169#endif /* DT_MIPS_RLD_MAP */
2268b414 6170
367ba2c2
MR
6171 if (dyn->d_tag == DT_DEBUG && map == -1)
6172 map = dyn->d_un.d_val;
2268b414
JK
6173
6174 if (dyn->d_tag == DT_NULL)
6175 break;
6176 }
6177 else
6178 {
6179 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
75f62ce7 6180#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6181 union
6182 {
6183 Elf32_Word map;
6184 unsigned char buf[sizeof (Elf32_Word)];
6185 }
6186 rld_map;
6187
6188 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6189 {
6190 if (linux_read_memory (dyn->d_un.d_val,
6191 rld_map.buf, sizeof (rld_map.buf)) == 0)
6192 return rld_map.map;
6193 else
6194 break;
6195 }
75f62ce7 6196#endif /* DT_MIPS_RLD_MAP */
2268b414 6197
367ba2c2
MR
6198 if (dyn->d_tag == DT_DEBUG && map == -1)
6199 map = dyn->d_un.d_val;
2268b414
JK
6200
6201 if (dyn->d_tag == DT_NULL)
6202 break;
6203 }
6204
6205 dynamic_memaddr += dyn_size;
6206 }
6207
367ba2c2 6208 return map;
2268b414
JK
6209}
6210
6211/* Read one pointer from MEMADDR in the inferior. */
6212
6213static int
6214read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6215{
485f1ee4
PA
6216 int ret;
6217
6218 /* Go through a union so this works on either big or little endian
6219 hosts, when the inferior's pointer size is smaller than the size
6220 of CORE_ADDR. It is assumed the inferior's endianness is the
6221 same of the superior's. */
6222 union
6223 {
6224 CORE_ADDR core_addr;
6225 unsigned int ui;
6226 unsigned char uc;
6227 } addr;
6228
6229 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6230 if (ret == 0)
6231 {
6232 if (ptr_size == sizeof (CORE_ADDR))
6233 *ptr = addr.core_addr;
6234 else if (ptr_size == sizeof (unsigned int))
6235 *ptr = addr.ui;
6236 else
6237 gdb_assert_not_reached ("unhandled pointer size");
6238 }
6239 return ret;
2268b414
JK
6240}
6241
6242struct link_map_offsets
6243 {
6244 /* Offset and size of r_debug.r_version. */
6245 int r_version_offset;
6246
6247 /* Offset and size of r_debug.r_map. */
6248 int r_map_offset;
6249
6250 /* Offset to l_addr field in struct link_map. */
6251 int l_addr_offset;
6252
6253 /* Offset to l_name field in struct link_map. */
6254 int l_name_offset;
6255
6256 /* Offset to l_ld field in struct link_map. */
6257 int l_ld_offset;
6258
6259 /* Offset to l_next field in struct link_map. */
6260 int l_next_offset;
6261
6262 /* Offset to l_prev field in struct link_map. */
6263 int l_prev_offset;
6264 };
6265
fb723180 6266/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
6267
6268static int
6269linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6270 unsigned const char *writebuf,
6271 CORE_ADDR offset, int len)
6272{
6273 char *document;
6274 unsigned document_len;
fe978cb0 6275 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6276 char filename[PATH_MAX];
6277 int pid, is_elf64;
6278
6279 static const struct link_map_offsets lmo_32bit_offsets =
6280 {
6281 0, /* r_version offset. */
6282 4, /* r_debug.r_map offset. */
6283 0, /* l_addr offset in link_map. */
6284 4, /* l_name offset in link_map. */
6285 8, /* l_ld offset in link_map. */
6286 12, /* l_next offset in link_map. */
6287 16 /* l_prev offset in link_map. */
6288 };
6289
6290 static const struct link_map_offsets lmo_64bit_offsets =
6291 {
6292 0, /* r_version offset. */
6293 8, /* r_debug.r_map offset. */
6294 0, /* l_addr offset in link_map. */
6295 8, /* l_name offset in link_map. */
6296 16, /* l_ld offset in link_map. */
6297 24, /* l_next offset in link_map. */
6298 32 /* l_prev offset in link_map. */
6299 };
6300 const struct link_map_offsets *lmo;
214d508e 6301 unsigned int machine;
b1fbec62
GB
6302 int ptr_size;
6303 CORE_ADDR lm_addr = 0, lm_prev = 0;
6304 int allocated = 1024;
6305 char *p;
6306 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6307 int header_done = 0;
2268b414
JK
6308
6309 if (writebuf != NULL)
6310 return -2;
6311 if (readbuf == NULL)
6312 return -1;
6313
0bfdf32f 6314 pid = lwpid_of (current_thread);
2268b414 6315 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6316 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 6317 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 6318 ptr_size = is_elf64 ? 8 : 4;
2268b414 6319
b1fbec62
GB
6320 while (annex[0] != '\0')
6321 {
6322 const char *sep;
6323 CORE_ADDR *addrp;
6324 int len;
2268b414 6325
b1fbec62
GB
6326 sep = strchr (annex, '=');
6327 if (sep == NULL)
6328 break;
0c5bf5a9 6329
b1fbec62 6330 len = sep - annex;
61012eef 6331 if (len == 5 && startswith (annex, "start"))
b1fbec62 6332 addrp = &lm_addr;
61012eef 6333 else if (len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6334 addrp = &lm_prev;
6335 else
6336 {
6337 annex = strchr (sep, ';');
6338 if (annex == NULL)
6339 break;
6340 annex++;
6341 continue;
6342 }
6343
6344 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6345 }
b1fbec62
GB
6346
6347 if (lm_addr == 0)
2268b414 6348 {
b1fbec62
GB
6349 int r_version = 0;
6350
6351 if (priv->r_debug == 0)
6352 priv->r_debug = get_r_debug (pid, is_elf64);
6353
6354 /* We failed to find DT_DEBUG. Such situation will not change
6355 for this inferior - do not retry it. Report it to GDB as
6356 E01, see for the reasons at the GDB solib-svr4.c side. */
6357 if (priv->r_debug == (CORE_ADDR) -1)
6358 return -1;
6359
6360 if (priv->r_debug != 0)
2268b414 6361 {
b1fbec62
GB
6362 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6363 (unsigned char *) &r_version,
6364 sizeof (r_version)) != 0
6365 || r_version != 1)
6366 {
6367 warning ("unexpected r_debug version %d", r_version);
6368 }
6369 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6370 &lm_addr, ptr_size) != 0)
6371 {
6372 warning ("unable to read r_map from 0x%lx",
6373 (long) priv->r_debug + lmo->r_map_offset);
6374 }
2268b414 6375 }
b1fbec62 6376 }
2268b414 6377
b1fbec62
GB
6378 document = xmalloc (allocated);
6379 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6380 p = document + strlen (document);
6381
6382 while (lm_addr
6383 && read_one_ptr (lm_addr + lmo->l_name_offset,
6384 &l_name, ptr_size) == 0
6385 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6386 &l_addr, ptr_size) == 0
6387 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6388 &l_ld, ptr_size) == 0
6389 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6390 &l_prev, ptr_size) == 0
6391 && read_one_ptr (lm_addr + lmo->l_next_offset,
6392 &l_next, ptr_size) == 0)
6393 {
6394 unsigned char libname[PATH_MAX];
6395
6396 if (lm_prev != l_prev)
2268b414 6397 {
b1fbec62
GB
6398 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6399 (long) lm_prev, (long) l_prev);
6400 break;
2268b414
JK
6401 }
6402
d878444c
JK
6403 /* Ignore the first entry even if it has valid name as the first entry
6404 corresponds to the main executable. The first entry should not be
6405 skipped if the dynamic loader was loaded late by a static executable
6406 (see solib-svr4.c parameter ignore_first). But in such case the main
6407 executable does not have PT_DYNAMIC present and this function already
6408 exited above due to failed get_r_debug. */
6409 if (lm_prev == 0)
2268b414 6410 {
d878444c
JK
6411 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6412 p = p + strlen (p);
6413 }
6414 else
6415 {
6416 /* Not checking for error because reading may stop before
6417 we've got PATH_MAX worth of characters. */
6418 libname[0] = '\0';
6419 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6420 libname[sizeof (libname) - 1] = '\0';
6421 if (libname[0] != '\0')
2268b414 6422 {
d878444c
JK
6423 /* 6x the size for xml_escape_text below. */
6424 size_t len = 6 * strlen ((char *) libname);
6425 char *name;
2268b414 6426
d878444c
JK
6427 if (!header_done)
6428 {
6429 /* Terminate `<library-list-svr4'. */
6430 *p++ = '>';
6431 header_done = 1;
6432 }
2268b414 6433
db1ff28b 6434 while (allocated < p - document + len + 200)
d878444c
JK
6435 {
6436 /* Expand to guarantee sufficient storage. */
6437 uintptr_t document_len = p - document;
2268b414 6438
d878444c
JK
6439 document = xrealloc (document, 2 * allocated);
6440 allocated *= 2;
6441 p = document + document_len;
6442 }
6443
6444 name = xml_escape_text ((char *) libname);
6445 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
db1ff28b 6446 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
d878444c
JK
6447 name, (unsigned long) lm_addr,
6448 (unsigned long) l_addr, (unsigned long) l_ld);
6449 free (name);
6450 }
0afae3cf 6451 }
b1fbec62
GB
6452
6453 lm_prev = lm_addr;
6454 lm_addr = l_next;
2268b414
JK
6455 }
6456
b1fbec62
GB
6457 if (!header_done)
6458 {
6459 /* Empty list; terminate `<library-list-svr4'. */
6460 strcpy (p, "/>");
6461 }
6462 else
6463 strcpy (p, "</library-list-svr4>");
6464
2268b414
JK
6465 document_len = strlen (document);
6466 if (offset < document_len)
6467 document_len -= offset;
6468 else
6469 document_len = 0;
6470 if (len > document_len)
6471 len = document_len;
6472
6473 memcpy (readbuf, document + offset, len);
6474 xfree (document);
6475
6476 return len;
6477}
6478
9accd112
MM
6479#ifdef HAVE_LINUX_BTRACE
6480
969c39fb 6481/* See to_enable_btrace target method. */
9accd112
MM
6482
6483static struct btrace_target_info *
f4abbc16 6484linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
9accd112
MM
6485{
6486 struct btrace_target_info *tinfo;
6487
f4abbc16 6488 tinfo = linux_enable_btrace (ptid, conf);
3aee8918 6489
d68e53f4 6490 if (tinfo != NULL && tinfo->ptr_bits == 0)
3aee8918
PA
6491 {
6492 struct thread_info *thread = find_thread_ptid (ptid);
6493 struct regcache *regcache = get_thread_regcache (thread, 0);
6494
6495 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6496 }
9accd112
MM
6497
6498 return tinfo;
6499}
6500
969c39fb 6501/* See to_disable_btrace target method. */
9accd112 6502
969c39fb
MM
6503static int
6504linux_low_disable_btrace (struct btrace_target_info *tinfo)
6505{
6506 enum btrace_error err;
6507
6508 err = linux_disable_btrace (tinfo);
6509 return (err == BTRACE_ERR_NONE ? 0 : -1);
6510}
6511
b20a6524
MM
6512/* Encode an Intel(R) Processor Trace configuration. */
6513
6514static void
6515linux_low_encode_pt_config (struct buffer *buffer,
6516 const struct btrace_data_pt_config *config)
6517{
6518 buffer_grow_str (buffer, "<pt-config>\n");
6519
6520 switch (config->cpu.vendor)
6521 {
6522 case CV_INTEL:
6523 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6524 "model=\"%u\" stepping=\"%u\"/>\n",
6525 config->cpu.family, config->cpu.model,
6526 config->cpu.stepping);
6527 break;
6528
6529 default:
6530 break;
6531 }
6532
6533 buffer_grow_str (buffer, "</pt-config>\n");
6534}
6535
6536/* Encode a raw buffer. */
6537
6538static void
6539linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6540 unsigned int size)
6541{
6542 if (size == 0)
6543 return;
6544
6545 /* We use hex encoding - see common/rsp-low.h. */
6546 buffer_grow_str (buffer, "<raw>\n");
6547
6548 while (size-- > 0)
6549 {
6550 char elem[2];
6551
6552 elem[0] = tohex ((*data >> 4) & 0xf);
6553 elem[1] = tohex (*data++ & 0xf);
6554
6555 buffer_grow (buffer, elem, 2);
6556 }
6557
6558 buffer_grow_str (buffer, "</raw>\n");
6559}
6560
969c39fb
MM
6561/* See to_read_btrace target method. */
6562
6563static int
9accd112
MM
6564linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6565 int type)
6566{
734b0e4b 6567 struct btrace_data btrace;
9accd112 6568 struct btrace_block *block;
969c39fb 6569 enum btrace_error err;
9accd112
MM
6570 int i;
6571
734b0e4b
MM
6572 btrace_data_init (&btrace);
6573
969c39fb
MM
6574 err = linux_read_btrace (&btrace, tinfo, type);
6575 if (err != BTRACE_ERR_NONE)
6576 {
6577 if (err == BTRACE_ERR_OVERFLOW)
6578 buffer_grow_str0 (buffer, "E.Overflow.");
6579 else
6580 buffer_grow_str0 (buffer, "E.Generic Error.");
6581
b20a6524 6582 goto err;
969c39fb 6583 }
9accd112 6584
734b0e4b
MM
6585 switch (btrace.format)
6586 {
6587 case BTRACE_FORMAT_NONE:
6588 buffer_grow_str0 (buffer, "E.No Trace.");
b20a6524 6589 goto err;
734b0e4b
MM
6590
6591 case BTRACE_FORMAT_BTS:
6592 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6593 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 6594
734b0e4b
MM
6595 for (i = 0;
6596 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6597 i++)
6598 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6599 paddress (block->begin), paddress (block->end));
9accd112 6600
734b0e4b
MM
6601 buffer_grow_str0 (buffer, "</btrace>\n");
6602 break;
6603
b20a6524
MM
6604 case BTRACE_FORMAT_PT:
6605 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6606 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6607 buffer_grow_str (buffer, "<pt>\n");
6608
6609 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 6610
b20a6524
MM
6611 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6612 btrace.variant.pt.size);
6613
6614 buffer_grow_str (buffer, "</pt>\n");
6615 buffer_grow_str0 (buffer, "</btrace>\n");
6616 break;
6617
6618 default:
6619 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6620 goto err;
734b0e4b 6621 }
969c39fb 6622
734b0e4b 6623 btrace_data_fini (&btrace);
969c39fb 6624 return 0;
b20a6524
MM
6625
6626err:
6627 btrace_data_fini (&btrace);
6628 return -1;
9accd112 6629}
f4abbc16
MM
6630
6631/* See to_btrace_conf target method. */
6632
6633static int
6634linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6635 struct buffer *buffer)
6636{
6637 const struct btrace_config *conf;
6638
6639 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6640 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6641
6642 conf = linux_btrace_conf (tinfo);
6643 if (conf != NULL)
6644 {
6645 switch (conf->format)
6646 {
6647 case BTRACE_FORMAT_NONE:
6648 break;
6649
6650 case BTRACE_FORMAT_BTS:
d33501a5
MM
6651 buffer_xml_printf (buffer, "<bts");
6652 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6653 buffer_xml_printf (buffer, " />\n");
f4abbc16 6654 break;
b20a6524
MM
6655
6656 case BTRACE_FORMAT_PT:
6657 buffer_xml_printf (buffer, "<pt");
6658 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6659 buffer_xml_printf (buffer, "/>\n");
6660 break;
f4abbc16
MM
6661 }
6662 }
6663
6664 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6665 return 0;
6666}
9accd112
MM
6667#endif /* HAVE_LINUX_BTRACE */
6668
7b669087
GB
6669/* See nat/linux-nat.h. */
6670
6671ptid_t
6672current_lwp_ptid (void)
6673{
6674 return ptid_of (current_thread);
6675}
6676
ce3a066d
DJ
6677static struct target_ops linux_target_ops = {
6678 linux_create_inferior,
c06cbd92 6679 linux_arch_setup,
ce3a066d
DJ
6680 linux_attach,
6681 linux_kill,
6ad8ae5c 6682 linux_detach,
8336d594 6683 linux_mourn,
444d6139 6684 linux_join,
ce3a066d
DJ
6685 linux_thread_alive,
6686 linux_resume,
6687 linux_wait,
6688 linux_fetch_registers,
6689 linux_store_registers,
90d74c30 6690 linux_prepare_to_access_memory,
0146f85b 6691 linux_done_accessing_memory,
ce3a066d
DJ
6692 linux_read_memory,
6693 linux_write_memory,
2f2893d9 6694 linux_look_up_symbols,
ef57601b 6695 linux_request_interrupt,
aa691b87 6696 linux_read_auxv,
802e8e6d 6697 linux_supports_z_point_type,
d993e290
PA
6698 linux_insert_point,
6699 linux_remove_point,
3e572f71
PA
6700 linux_stopped_by_sw_breakpoint,
6701 linux_supports_stopped_by_sw_breakpoint,
6702 linux_stopped_by_hw_breakpoint,
6703 linux_supports_stopped_by_hw_breakpoint,
45614f15 6704 linux_supports_conditional_breakpoints,
e013ee27
OF
6705 linux_stopped_by_watchpoint,
6706 linux_stopped_data_address,
db0dfaa0
LM
6707#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6708 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6709 && defined(PT_TEXT_END_ADDR)
52fb6437 6710 linux_read_offsets,
dae5f5cf
DJ
6711#else
6712 NULL,
6713#endif
6714#ifdef USE_THREAD_DB
6715 thread_db_get_tls_address,
6716#else
6717 NULL,
52fb6437 6718#endif
efcbbd14 6719 linux_qxfer_spu,
59a016f0 6720 hostio_last_error_from_errno,
07e059b5 6721 linux_qxfer_osdata,
4aa995e1 6722 linux_xfer_siginfo,
bd99dc85
PA
6723 linux_supports_non_stop,
6724 linux_async,
6725 linux_start_non_stop,
cdbfd419 6726 linux_supports_multi_process,
89245bc0
DB
6727 linux_supports_fork_events,
6728 linux_supports_vfork_events,
de0d863e 6729 linux_handle_new_gdb_connection,
cdbfd419 6730#ifdef USE_THREAD_DB
dc146f7c 6731 thread_db_handle_monitor_command,
cdbfd419 6732#else
dc146f7c 6733 NULL,
cdbfd419 6734#endif
d26e3629 6735 linux_common_core_of_thread,
78d85199 6736 linux_read_loadmap,
219f2f23
PA
6737 linux_process_qsupported,
6738 linux_supports_tracepoints,
6739 linux_read_pc,
8336d594
PA
6740 linux_write_pc,
6741 linux_thread_stopped,
7984d532 6742 NULL,
711e434b 6743 linux_pause_all,
7984d532 6744 linux_unpause_all,
fa593d66 6745 linux_stabilize_threads,
6a271cae 6746 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
6747 linux_emit_ops,
6748 linux_supports_disable_randomization,
405f8e94 6749 linux_get_min_fast_tracepoint_insn_len,
2268b414 6750 linux_qxfer_libraries_svr4,
d1feda86 6751 linux_supports_agent,
9accd112
MM
6752#ifdef HAVE_LINUX_BTRACE
6753 linux_supports_btrace,
6754 linux_low_enable_btrace,
969c39fb 6755 linux_low_disable_btrace,
9accd112 6756 linux_low_read_btrace,
f4abbc16 6757 linux_low_btrace_conf,
9accd112
MM
6758#else
6759 NULL,
6760 NULL,
6761 NULL,
6762 NULL,
f4abbc16 6763 NULL,
9accd112 6764#endif
c2d6af84 6765 linux_supports_range_stepping,
e57f1de3 6766 linux_proc_pid_to_exec_file,
14d2069a
GB
6767 linux_mntns_open_cloexec,
6768 linux_mntns_unlink,
6769 linux_mntns_readlink,
ce3a066d
DJ
6770};
6771
0d62e5e8
DJ
6772static void
6773linux_init_signals ()
6774{
6775 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6776 to find what the cancel signal actually is. */
1a981360 6777#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 6778 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 6779#endif
0d62e5e8
DJ
6780}
6781
3aee8918
PA
6782#ifdef HAVE_LINUX_REGSETS
6783void
6784initialize_regsets_info (struct regsets_info *info)
6785{
6786 for (info->num_regsets = 0;
6787 info->regsets[info->num_regsets].size >= 0;
6788 info->num_regsets++)
6789 ;
3aee8918
PA
6790}
6791#endif
6792
da6d8c04
DJ
6793void
6794initialize_low (void)
6795{
bd99dc85
PA
6796 struct sigaction sigchld_action;
6797 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 6798 set_target_ops (&linux_target_ops);
611cb4a5
DJ
6799 set_breakpoint_data (the_low_target.breakpoint,
6800 the_low_target.breakpoint_len);
0d62e5e8 6801 linux_init_signals ();
aa7c7447 6802 linux_ptrace_init_warnings ();
bd99dc85
PA
6803
6804 sigchld_action.sa_handler = sigchld_handler;
6805 sigemptyset (&sigchld_action.sa_mask);
6806 sigchld_action.sa_flags = SA_RESTART;
6807 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
6808
6809 initialize_low_arch ();
89245bc0
DB
6810
6811 linux_check_ptrace_features ();
da6d8c04 6812}