]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
Revert ALIGN changes
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
32d0add0 2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
58b4daa5 22#include "agent.h"
de0d863e 23#include "tdesc.h"
b20a6524 24#include "rsp-low.h"
da6d8c04 25
96d7229d
LM
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
8bdce1ff 28#include "gdb_wait.h"
5826e159 29#include "nat/gdb_ptrace.h"
125f8a3d
GB
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
8cc73a39 32#include "nat/linux-personality.h"
da6d8c04
DJ
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
0a30fbc4 36#include <unistd.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
602e3198 46#include "filestuff.h"
c144c7a0 47#include "tracepoint.h"
533b0600 48#include "hostio.h"
957f3f49
DE
49#ifndef ELFMAG0
50/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54#include <elf.h>
55#endif
14d2069a 56#include "nat/linux-namespaces.h"
efcbbd14
UW
57
58#ifndef SPUFS_MAGIC
59#define SPUFS_MAGIC 0x23c9b64e
60#endif
da6d8c04 61
03583c20
UW
62#ifdef HAVE_PERSONALITY
63# include <sys/personality.h>
64# if !HAVE_DECL_ADDR_NO_RANDOMIZE
65# define ADDR_NO_RANDOMIZE 0x0040000
66# endif
67#endif
68
fd462a61
DJ
69#ifndef O_LARGEFILE
70#define O_LARGEFILE 0
71#endif
72
ec8ebe72
DE
73#ifndef W_STOPCODE
74#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75#endif
76
1a981360
PA
77/* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79#ifndef __SIGRTMIN
80#define __SIGRTMIN 32
81#endif
82
db0dfaa0
LM
83/* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86#if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89#if defined(__mcoldfire__)
90/* These are still undefined in 3.10 kernels. */
91#define PT_TEXT_ADDR 49*4
92#define PT_DATA_ADDR 50*4
93#define PT_TEXT_END_ADDR 51*4
94/* BFIN already defines these since at least 2.6.32 kernels. */
95#elif defined(BFIN)
96#define PT_TEXT_ADDR 220
97#define PT_TEXT_END_ADDR 224
98#define PT_DATA_ADDR 228
99/* These are still undefined in 3.10 kernels. */
100#elif defined(__TMS320C6X__)
101#define PT_TEXT_ADDR (0x10000*4)
102#define PT_DATA_ADDR (0x10004*4)
103#define PT_TEXT_END_ADDR (0x10008*4)
104#endif
105#endif
106
9accd112 107#ifdef HAVE_LINUX_BTRACE
125f8a3d 108# include "nat/linux-btrace.h"
734b0e4b 109# include "btrace-common.h"
9accd112
MM
110#endif
111
8365dcf5
TJB
112#ifndef HAVE_ELF32_AUXV_T
113/* Copied from glibc's elf.h. */
114typedef struct
115{
116 uint32_t a_type; /* Entry type */
117 union
118 {
119 uint32_t a_val; /* Integer value */
120 /* We use to have pointer elements added here. We cannot do that,
121 though, since it does not work when using 32-bit definitions
122 on 64-bit platforms and vice versa. */
123 } a_un;
124} Elf32_auxv_t;
125#endif
126
127#ifndef HAVE_ELF64_AUXV_T
128/* Copied from glibc's elf.h. */
129typedef struct
130{
131 uint64_t a_type; /* Entry type */
132 union
133 {
134 uint64_t a_val; /* Integer value */
135 /* We use to have pointer elements added here. We cannot do that,
136 though, since it does not work when using 32-bit definitions
137 on 64-bit platforms and vice versa. */
138 } a_un;
139} Elf64_auxv_t;
140#endif
141
ded48a5e
YQ
142/* Does the current host support PTRACE_GETREGSET? */
143int have_ptrace_getregset = -1;
144
cff068da
GB
145/* LWP accessors. */
146
147/* See nat/linux-nat.h. */
148
149ptid_t
150ptid_of_lwp (struct lwp_info *lwp)
151{
152 return ptid_of (get_lwp_thread (lwp));
153}
154
155/* See nat/linux-nat.h. */
156
4b134ca1
GB
157void
158lwp_set_arch_private_info (struct lwp_info *lwp,
159 struct arch_lwp_info *info)
160{
161 lwp->arch_private = info;
162}
163
164/* See nat/linux-nat.h. */
165
166struct arch_lwp_info *
167lwp_arch_private_info (struct lwp_info *lwp)
168{
169 return lwp->arch_private;
170}
171
172/* See nat/linux-nat.h. */
173
cff068da
GB
174int
175lwp_is_stopped (struct lwp_info *lwp)
176{
177 return lwp->stopped;
178}
179
180/* See nat/linux-nat.h. */
181
182enum target_stop_reason
183lwp_stop_reason (struct lwp_info *lwp)
184{
185 return lwp->stop_reason;
186}
187
05044653
PA
188/* A list of all unknown processes which receive stop signals. Some
189 other process will presumably claim each of these as forked
190 children momentarily. */
24a09b5f 191
05044653
PA
192struct simple_pid_list
193{
194 /* The process ID. */
195 int pid;
196
197 /* The status as reported by waitpid. */
198 int status;
199
200 /* Next in chain. */
201 struct simple_pid_list *next;
202};
203struct simple_pid_list *stopped_pids;
204
205/* Trivial list manipulation functions to keep track of a list of new
206 stopped processes. */
207
208static void
209add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
210{
211 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
212
213 new_pid->pid = pid;
214 new_pid->status = status;
215 new_pid->next = *listp;
216 *listp = new_pid;
217}
218
219static int
220pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
221{
222 struct simple_pid_list **p;
223
224 for (p = listp; *p != NULL; p = &(*p)->next)
225 if ((*p)->pid == pid)
226 {
227 struct simple_pid_list *next = (*p)->next;
228
229 *statusp = (*p)->status;
230 xfree (*p);
231 *p = next;
232 return 1;
233 }
234 return 0;
235}
24a09b5f 236
bde24c0a
PA
237enum stopping_threads_kind
238 {
239 /* Not stopping threads presently. */
240 NOT_STOPPING_THREADS,
241
242 /* Stopping threads. */
243 STOPPING_THREADS,
244
245 /* Stopping and suspending threads. */
246 STOPPING_AND_SUSPENDING_THREADS
247 };
248
249/* This is set while stop_all_lwps is in effect. */
250enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
251
252/* FIXME make into a target method? */
24a09b5f 253int using_threads = 1;
24a09b5f 254
fa593d66
PA
255/* True if we're presently stabilizing threads (moving them out of
256 jump pads). */
257static int stabilizing_threads;
258
2acc282a 259static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 260 int step, int signal, siginfo_t *info);
2bd7c093 261static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
262static void stop_all_lwps (int suspend, struct lwp_info *except);
263static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
fa96cb38
PA
264static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
265 int *wstat, int options);
95954743 266static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
b3312d80 267static struct lwp_info *add_lwp (ptid_t ptid);
c35fafde 268static int linux_stopped_by_watchpoint (void);
95954743 269static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
d50171e4 270static void proceed_all_lwps (void);
d50171e4 271static int finish_step_over (struct lwp_info *lwp);
d50171e4
PA
272static int kill_lwp (unsigned long lwpid, int signo);
273
582511be
PA
274/* When the event-loop is doing a step-over, this points at the thread
275 being stepped. */
276ptid_t step_over_bkpt;
277
d50171e4
PA
278/* True if the low target can hardware single-step. Such targets
279 don't need a BREAKPOINT_REINSERT_ADDR callback. */
280
281static int
282can_hardware_single_step (void)
283{
284 return (the_low_target.breakpoint_reinsert_addr == NULL);
285}
286
287/* True if the low target supports memory breakpoints. If so, we'll
288 have a GET_PC implementation. */
289
290static int
291supports_breakpoints (void)
292{
293 return (the_low_target.get_pc != NULL);
294}
0d62e5e8 295
fa593d66
PA
296/* Returns true if this target can support fast tracepoints. This
297 does not mean that the in-process agent has been loaded in the
298 inferior. */
299
300static int
301supports_fast_tracepoints (void)
302{
303 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
304}
305
c2d6af84
PA
306/* True if LWP is stopped in its stepping range. */
307
308static int
309lwp_in_step_range (struct lwp_info *lwp)
310{
311 CORE_ADDR pc = lwp->stop_pc;
312
313 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
314}
315
0d62e5e8
DJ
316struct pending_signals
317{
318 int signal;
32ca6d61 319 siginfo_t info;
0d62e5e8
DJ
320 struct pending_signals *prev;
321};
611cb4a5 322
bd99dc85
PA
323/* The read/write ends of the pipe registered as waitable file in the
324 event loop. */
325static int linux_event_pipe[2] = { -1, -1 };
326
327/* True if we're currently in async mode. */
328#define target_is_async_p() (linux_event_pipe[0] != -1)
329
02fc4de7 330static void send_sigstop (struct lwp_info *lwp);
fa96cb38 331static void wait_for_sigstop (void);
bd99dc85 332
d0722149
DE
333/* Return non-zero if HEADER is a 64-bit ELF file. */
334
335static int
214d508e 336elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 337{
214d508e
L
338 if (header->e_ident[EI_MAG0] == ELFMAG0
339 && header->e_ident[EI_MAG1] == ELFMAG1
340 && header->e_ident[EI_MAG2] == ELFMAG2
341 && header->e_ident[EI_MAG3] == ELFMAG3)
342 {
343 *machine = header->e_machine;
344 return header->e_ident[EI_CLASS] == ELFCLASS64;
345
346 }
347 *machine = EM_NONE;
348 return -1;
d0722149
DE
349}
350
351/* Return non-zero if FILE is a 64-bit ELF file,
352 zero if the file is not a 64-bit ELF file,
353 and -1 if the file is not accessible or doesn't exist. */
354
be07f1a2 355static int
214d508e 356elf_64_file_p (const char *file, unsigned int *machine)
d0722149 357{
957f3f49 358 Elf64_Ehdr header;
d0722149
DE
359 int fd;
360
361 fd = open (file, O_RDONLY);
362 if (fd < 0)
363 return -1;
364
365 if (read (fd, &header, sizeof (header)) != sizeof (header))
366 {
367 close (fd);
368 return 0;
369 }
370 close (fd);
371
214d508e 372 return elf_64_header_p (&header, machine);
d0722149
DE
373}
374
be07f1a2
PA
375/* Accepts an integer PID; Returns true if the executable PID is
376 running is a 64-bit ELF file.. */
377
378int
214d508e 379linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 380{
d8d2a3ee 381 char file[PATH_MAX];
be07f1a2
PA
382
383 sprintf (file, "/proc/%d/exe", pid);
214d508e 384 return elf_64_file_p (file, machine);
be07f1a2
PA
385}
386
bd99dc85
PA
387static void
388delete_lwp (struct lwp_info *lwp)
389{
fa96cb38
PA
390 struct thread_info *thr = get_lwp_thread (lwp);
391
392 if (debug_threads)
393 debug_printf ("deleting %ld\n", lwpid_of (thr));
394
395 remove_thread (thr);
aa5ca48f 396 free (lwp->arch_private);
bd99dc85
PA
397 free (lwp);
398}
399
95954743
PA
400/* Add a process to the common process list, and set its private
401 data. */
402
403static struct process_info *
404linux_add_process (int pid, int attached)
405{
406 struct process_info *proc;
407
95954743 408 proc = add_process (pid, attached);
fe978cb0 409 proc->priv = xcalloc (1, sizeof (*proc->priv));
95954743 410
aa5ca48f 411 if (the_low_target.new_process != NULL)
fe978cb0 412 proc->priv->arch_private = the_low_target.new_process ();
aa5ca48f 413
95954743
PA
414 return proc;
415}
416
582511be
PA
417static CORE_ADDR get_pc (struct lwp_info *lwp);
418
bd99dc85 419/* Handle a GNU/Linux extended wait response. If we see a clone
de0d863e
DB
420 event, we need to add the new LWP to our list (and return 0 so as
421 not to report the trap to higher layers). */
0d62e5e8 422
de0d863e
DB
423static int
424handle_extended_wait (struct lwp_info *event_lwp, int wstat)
24a09b5f 425{
89a5711c 426 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 427 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 428 struct lwp_info *new_lwp;
24a09b5f 429
c269dbdb
DB
430 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
431 || (event == PTRACE_EVENT_CLONE))
24a09b5f 432 {
95954743 433 ptid_t ptid;
24a09b5f 434 unsigned long new_pid;
05044653 435 int ret, status;
24a09b5f 436
de0d863e 437 /* Get the pid of the new lwp. */
d86d4aaf 438 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 439 &new_pid);
24a09b5f
DJ
440
441 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 442 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
443 {
444 /* The new child has a pending SIGSTOP. We can't affect it until it
445 hits the SIGSTOP, but we're already attached. */
446
97438e3f 447 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
448
449 if (ret == -1)
450 perror_with_name ("waiting for new child");
451 else if (ret != new_pid)
452 warning ("wait returned unexpected PID %d", ret);
da5898ce 453 else if (!WIFSTOPPED (status))
24a09b5f
DJ
454 warning ("wait returned unexpected status 0x%x", status);
455 }
456
c269dbdb 457 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
458 {
459 struct process_info *parent_proc;
460 struct process_info *child_proc;
461 struct lwp_info *child_lwp;
bfacd19d 462 struct thread_info *child_thr;
de0d863e
DB
463 struct target_desc *tdesc;
464
465 ptid = ptid_build (new_pid, new_pid, 0);
466
467 if (debug_threads)
468 {
469 debug_printf ("HEW: Got fork event from LWP %ld, "
470 "new child is %d\n",
471 ptid_get_lwp (ptid_of (event_thr)),
472 ptid_get_pid (ptid));
473 }
474
475 /* Add the new process to the tables and clone the breakpoint
476 lists of the parent. We need to do this even if the new process
477 will be detached, since we will need the process object and the
478 breakpoints to remove any breakpoints from memory when we
479 detach, and the client side will access registers. */
480 child_proc = linux_add_process (new_pid, 0);
481 gdb_assert (child_proc != NULL);
482 child_lwp = add_lwp (ptid);
483 gdb_assert (child_lwp != NULL);
484 child_lwp->stopped = 1;
bfacd19d
DB
485 child_lwp->must_set_ptrace_flags = 1;
486 child_lwp->status_pending_p = 0;
487 child_thr = get_lwp_thread (child_lwp);
488 child_thr->last_resume_kind = resume_stop;
998d452a
PA
489 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
490
de0d863e
DB
491 parent_proc = get_thread_process (event_thr);
492 child_proc->attached = parent_proc->attached;
493 clone_all_breakpoints (&child_proc->breakpoints,
494 &child_proc->raw_breakpoints,
495 parent_proc->breakpoints);
496
497 tdesc = xmalloc (sizeof (struct target_desc));
498 copy_target_description (tdesc, parent_proc->tdesc);
499 child_proc->tdesc = tdesc;
de0d863e 500
3a8a0396
DB
501 /* Clone arch-specific process data. */
502 if (the_low_target.new_fork != NULL)
503 the_low_target.new_fork (parent_proc, child_proc);
504
de0d863e 505 /* Save fork info in the parent thread. */
c269dbdb
DB
506 if (event == PTRACE_EVENT_FORK)
507 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
508 else if (event == PTRACE_EVENT_VFORK)
509 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
510
de0d863e 511 event_lwp->waitstatus.value.related_pid = ptid;
c269dbdb 512
de0d863e
DB
513 /* The status_pending field contains bits denoting the
514 extended event, so when the pending event is handled,
515 the handler will look at lwp->waitstatus. */
516 event_lwp->status_pending_p = 1;
517 event_lwp->status_pending = wstat;
518
519 /* Report the event. */
520 return 0;
521 }
522
fa96cb38
PA
523 if (debug_threads)
524 debug_printf ("HEW: Got clone event "
525 "from LWP %ld, new child is LWP %ld\n",
526 lwpid_of (event_thr), new_pid);
527
d86d4aaf 528 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
b3312d80 529 new_lwp = add_lwp (ptid);
24a09b5f 530
e27d73f6
DE
531 /* Either we're going to immediately resume the new thread
532 or leave it stopped. linux_resume_one_lwp is a nop if it
533 thinks the thread is currently running, so set this first
534 before calling linux_resume_one_lwp. */
535 new_lwp->stopped = 1;
536
bde24c0a
PA
537 /* If we're suspending all threads, leave this one suspended
538 too. */
539 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
540 new_lwp->suspended = 1;
541
da5898ce
DJ
542 /* Normally we will get the pending SIGSTOP. But in some cases
543 we might get another signal delivered to the group first.
f21cc1a2 544 If we do get another signal, be sure not to lose it. */
20ba1ce6 545 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 546 {
54a0b537 547 new_lwp->stop_expected = 1;
20ba1ce6
PA
548 new_lwp->status_pending_p = 1;
549 new_lwp->status_pending = status;
da5898ce 550 }
de0d863e
DB
551
552 /* Don't report the event. */
553 return 1;
24a09b5f 554 }
c269dbdb
DB
555 else if (event == PTRACE_EVENT_VFORK_DONE)
556 {
557 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
558
559 /* Report the event. */
560 return 0;
561 }
de0d863e
DB
562
563 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
564}
565
d50171e4
PA
566/* Return the PC as read from the regcache of LWP, without any
567 adjustment. */
568
569static CORE_ADDR
570get_pc (struct lwp_info *lwp)
571{
0bfdf32f 572 struct thread_info *saved_thread;
d50171e4
PA
573 struct regcache *regcache;
574 CORE_ADDR pc;
575
576 if (the_low_target.get_pc == NULL)
577 return 0;
578
0bfdf32f
GB
579 saved_thread = current_thread;
580 current_thread = get_lwp_thread (lwp);
d50171e4 581
0bfdf32f 582 regcache = get_thread_regcache (current_thread, 1);
d50171e4
PA
583 pc = (*the_low_target.get_pc) (regcache);
584
585 if (debug_threads)
87ce2a04 586 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 587
0bfdf32f 588 current_thread = saved_thread;
d50171e4
PA
589 return pc;
590}
591
592/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
593 The SIGTRAP could mean several things.
594
595 On i386, where decr_pc_after_break is non-zero:
582511be
PA
596
597 If we were single-stepping this process using PTRACE_SINGLESTEP, we
598 will get only the one SIGTRAP. The value of $eip will be the next
599 instruction. If the instruction we stepped over was a breakpoint,
600 we need to decrement the PC.
601
0d62e5e8
DJ
602 If we continue the process using PTRACE_CONT, we will get a
603 SIGTRAP when we hit a breakpoint. The value of $eip will be
604 the instruction after the breakpoint (i.e. needs to be
605 decremented). If we report the SIGTRAP to GDB, we must also
582511be 606 report the undecremented PC. If the breakpoint is removed, we
0d62e5e8
DJ
607 must resume at the decremented PC.
608
582511be
PA
609 On a non-decr_pc_after_break machine with hardware or kernel
610 single-step:
611
612 If we either single-step a breakpoint instruction, or continue and
613 hit a breakpoint instruction, our PC will point at the breakpoint
0d62e5e8
DJ
614 instruction. */
615
582511be
PA
616static int
617check_stopped_by_breakpoint (struct lwp_info *lwp)
0d62e5e8 618{
582511be
PA
619 CORE_ADDR pc;
620 CORE_ADDR sw_breakpoint_pc;
621 struct thread_info *saved_thread;
3e572f71
PA
622#if USE_SIGTRAP_SIGINFO
623 siginfo_t siginfo;
624#endif
d50171e4
PA
625
626 if (the_low_target.get_pc == NULL)
627 return 0;
0d62e5e8 628
582511be
PA
629 pc = get_pc (lwp);
630 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
d50171e4 631
582511be
PA
632 /* breakpoint_at reads from the current thread. */
633 saved_thread = current_thread;
634 current_thread = get_lwp_thread (lwp);
47c0c975 635
3e572f71
PA
636#if USE_SIGTRAP_SIGINFO
637 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
638 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
639 {
640 if (siginfo.si_signo == SIGTRAP)
641 {
642 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
643 {
644 if (debug_threads)
645 {
646 struct thread_info *thr = get_lwp_thread (lwp);
647
2bf6fb9d 648 debug_printf ("CSBB: %s stopped by software breakpoint\n",
3e572f71
PA
649 target_pid_to_str (ptid_of (thr)));
650 }
651
652 /* Back up the PC if necessary. */
653 if (pc != sw_breakpoint_pc)
654 {
655 struct regcache *regcache
656 = get_thread_regcache (current_thread, 1);
657 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
658 }
659
660 lwp->stop_pc = sw_breakpoint_pc;
661 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
662 current_thread = saved_thread;
663 return 1;
664 }
665 else if (siginfo.si_code == TRAP_HWBKPT)
666 {
667 if (debug_threads)
668 {
669 struct thread_info *thr = get_lwp_thread (lwp);
670
2bf6fb9d
PA
671 debug_printf ("CSBB: %s stopped by hardware "
672 "breakpoint/watchpoint\n",
3e572f71
PA
673 target_pid_to_str (ptid_of (thr)));
674 }
675
676 lwp->stop_pc = pc;
677 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
678 current_thread = saved_thread;
679 return 1;
680 }
2bf6fb9d
PA
681 else if (siginfo.si_code == TRAP_TRACE)
682 {
683 if (debug_threads)
684 {
685 struct thread_info *thr = get_lwp_thread (lwp);
686
687 debug_printf ("CSBB: %s stopped by trace\n",
688 target_pid_to_str (ptid_of (thr)));
689 }
690 }
3e572f71
PA
691 }
692 }
693#else
582511be
PA
694 /* We may have just stepped a breakpoint instruction. E.g., in
695 non-stop mode, GDB first tells the thread A to step a range, and
696 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
697 case we need to report the breakpoint PC. */
698 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
582511be
PA
699 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
700 {
701 if (debug_threads)
702 {
703 struct thread_info *thr = get_lwp_thread (lwp);
704
705 debug_printf ("CSBB: %s stopped by software breakpoint\n",
706 target_pid_to_str (ptid_of (thr)));
707 }
708
709 /* Back up the PC if necessary. */
710 if (pc != sw_breakpoint_pc)
711 {
712 struct regcache *regcache
713 = get_thread_regcache (current_thread, 1);
714 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
715 }
716
717 lwp->stop_pc = sw_breakpoint_pc;
15c66dd6 718 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
582511be
PA
719 current_thread = saved_thread;
720 return 1;
721 }
722
723 if (hardware_breakpoint_inserted_here (pc))
724 {
725 if (debug_threads)
726 {
727 struct thread_info *thr = get_lwp_thread (lwp);
728
729 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
730 target_pid_to_str (ptid_of (thr)));
731 }
47c0c975 732
582511be 733 lwp->stop_pc = pc;
15c66dd6 734 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
582511be
PA
735 current_thread = saved_thread;
736 return 1;
737 }
3e572f71 738#endif
582511be
PA
739
740 current_thread = saved_thread;
741 return 0;
0d62e5e8 742}
ce3a066d 743
b3312d80 744static struct lwp_info *
95954743 745add_lwp (ptid_t ptid)
611cb4a5 746{
54a0b537 747 struct lwp_info *lwp;
0d62e5e8 748
54a0b537
PA
749 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
750 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 751
aa5ca48f 752 if (the_low_target.new_thread != NULL)
34c703da 753 the_low_target.new_thread (lwp);
aa5ca48f 754
f7667f0d 755 lwp->thread = add_thread (ptid, lwp);
0d62e5e8 756
54a0b537 757 return lwp;
0d62e5e8 758}
611cb4a5 759
da6d8c04
DJ
760/* Start an inferior process and returns its pid.
761 ALLARGS is a vector of program-name and args. */
762
ce3a066d
DJ
763static int
764linux_create_inferior (char *program, char **allargs)
da6d8c04 765{
a6dbe5df 766 struct lwp_info *new_lwp;
da6d8c04 767 int pid;
95954743 768 ptid_t ptid;
8cc73a39
SDJ
769 struct cleanup *restore_personality
770 = maybe_disable_address_space_randomization (disable_randomization);
03583c20 771
42c81e2a 772#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
773 pid = vfork ();
774#else
da6d8c04 775 pid = fork ();
52fb6437 776#endif
da6d8c04
DJ
777 if (pid < 0)
778 perror_with_name ("fork");
779
780 if (pid == 0)
781 {
602e3198 782 close_most_fds ();
b8e1b30e 783 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da6d8c04 784
1a981360 785#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 786 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 787#endif
0d62e5e8 788
a9fa9f7d
DJ
789 setpgid (0, 0);
790
e0f9f062
DE
791 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
792 stdout to stderr so that inferior i/o doesn't corrupt the connection.
793 Also, redirect stdin to /dev/null. */
794 if (remote_connection_is_stdio ())
795 {
796 close (0);
797 open ("/dev/null", O_RDONLY);
798 dup2 (2, 1);
3e52c33d
JK
799 if (write (2, "stdin/stdout redirected\n",
800 sizeof ("stdin/stdout redirected\n") - 1) < 0)
8c29b58e
YQ
801 {
802 /* Errors ignored. */;
803 }
e0f9f062
DE
804 }
805
2b876972
DJ
806 execv (program, allargs);
807 if (errno == ENOENT)
808 execvp (program, allargs);
da6d8c04
DJ
809
810 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 811 strerror (errno));
da6d8c04
DJ
812 fflush (stderr);
813 _exit (0177);
814 }
815
8cc73a39 816 do_cleanups (restore_personality);
03583c20 817
55d7b841 818 linux_add_process (pid, 0);
95954743
PA
819
820 ptid = ptid_build (pid, pid, 0);
821 new_lwp = add_lwp (ptid);
a6dbe5df 822 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 823
a9fa9f7d 824 return pid;
da6d8c04
DJ
825}
826
c06cbd92
YQ
827/* Implement the arch_setup target_ops method. */
828
829static void
830linux_arch_setup (void)
831{
832 the_low_target.arch_setup ();
833}
834
8784d563
PA
835/* Attach to an inferior process. Returns 0 on success, ERRNO on
836 error. */
da6d8c04 837
7ae1a6a6
PA
838int
839linux_attach_lwp (ptid_t ptid)
da6d8c04 840{
54a0b537 841 struct lwp_info *new_lwp;
7ae1a6a6 842 int lwpid = ptid_get_lwp (ptid);
611cb4a5 843
b8e1b30e 844 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 845 != 0)
7ae1a6a6 846 return errno;
24a09b5f 847
b3312d80 848 new_lwp = add_lwp (ptid);
0d62e5e8 849
a6dbe5df
PA
850 /* We need to wait for SIGSTOP before being able to make the next
851 ptrace call on this LWP. */
852 new_lwp->must_set_ptrace_flags = 1;
853
644cebc9 854 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
855 {
856 if (debug_threads)
87ce2a04 857 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
858
859 /* The process is definitely stopped. It is in a job control
860 stop, unless the kernel predates the TASK_STOPPED /
861 TASK_TRACED distinction, in which case it might be in a
862 ptrace stop. Make sure it is in a ptrace stop; from there we
863 can kill it, signal it, et cetera.
864
865 First make sure there is a pending SIGSTOP. Since we are
866 already attached, the process can not transition from stopped
867 to running without a PTRACE_CONT; so we know this signal will
868 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
869 probably already in the queue (unless this kernel is old
870 enough to use TASK_STOPPED for ptrace stops); but since
871 SIGSTOP is not an RT signal, it can only be queued once. */
872 kill_lwp (lwpid, SIGSTOP);
873
874 /* Finally, resume the stopped process. This will deliver the
875 SIGSTOP (or a higher priority signal, just like normal
876 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 877 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
878 }
879
0d62e5e8 880 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
881 brings it to a halt.
882
883 There are several cases to consider here:
884
885 1) gdbserver has already attached to the process and is being notified
1b3f6016 886 of a new thread that is being created.
d50171e4
PA
887 In this case we should ignore that SIGSTOP and resume the
888 process. This is handled below by setting stop_expected = 1,
8336d594 889 and the fact that add_thread sets last_resume_kind ==
d50171e4 890 resume_continue.
0e21c1ec
DE
891
892 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
893 to it via attach_inferior.
894 In this case we want the process thread to stop.
d50171e4
PA
895 This is handled by having linux_attach set last_resume_kind ==
896 resume_stop after we return.
e3deef73
LM
897
898 If the pid we are attaching to is also the tgid, we attach to and
899 stop all the existing threads. Otherwise, we attach to pid and
900 ignore any other threads in the same group as this pid.
0e21c1ec
DE
901
902 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
903 existing threads.
904 In this case we want the thread to stop.
905 FIXME: This case is currently not properly handled.
906 We should wait for the SIGSTOP but don't. Things work apparently
907 because enough time passes between when we ptrace (ATTACH) and when
908 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
909
910 On the other hand, if we are currently trying to stop all threads, we
911 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 912 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
913 end of the list, and so the new thread has not yet reached
914 wait_for_sigstop (but will). */
d50171e4 915 new_lwp->stop_expected = 1;
0d62e5e8 916
7ae1a6a6 917 return 0;
95954743
PA
918}
919
8784d563
PA
920/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
921 already attached. Returns true if a new LWP is found, false
922 otherwise. */
923
924static int
925attach_proc_task_lwp_callback (ptid_t ptid)
926{
927 /* Is this a new thread? */
928 if (find_thread_ptid (ptid) == NULL)
929 {
930 int lwpid = ptid_get_lwp (ptid);
931 int err;
932
933 if (debug_threads)
934 debug_printf ("Found new lwp %d\n", lwpid);
935
936 err = linux_attach_lwp (ptid);
937
938 /* Be quiet if we simply raced with the thread exiting. EPERM
939 is returned if the thread's task still exists, and is marked
940 as exited or zombie, as well as other conditions, so in that
941 case, confirm the status in /proc/PID/status. */
942 if (err == ESRCH
943 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
944 {
945 if (debug_threads)
946 {
947 debug_printf ("Cannot attach to lwp %d: "
948 "thread is gone (%d: %s)\n",
949 lwpid, err, strerror (err));
950 }
951 }
952 else if (err != 0)
953 {
954 warning (_("Cannot attach to lwp %d: %s"),
955 lwpid,
956 linux_ptrace_attach_fail_reason_string (ptid, err));
957 }
958
959 return 1;
960 }
961 return 0;
962}
963
e3deef73
LM
964/* Attach to PID. If PID is the tgid, attach to it and all
965 of its threads. */
966
c52daf70 967static int
a1928bad 968linux_attach (unsigned long pid)
0d62e5e8 969{
7ae1a6a6
PA
970 ptid_t ptid = ptid_build (pid, pid, 0);
971 int err;
972
e3deef73
LM
973 /* Attach to PID. We will check for other threads
974 soon. */
7ae1a6a6
PA
975 err = linux_attach_lwp (ptid);
976 if (err != 0)
977 error ("Cannot attach to process %ld: %s",
8784d563 978 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
7ae1a6a6 979
55d7b841 980 linux_add_process (pid, 1);
0d62e5e8 981
bd99dc85
PA
982 if (!non_stop)
983 {
8336d594
PA
984 struct thread_info *thread;
985
986 /* Don't ignore the initial SIGSTOP if we just attached to this
987 process. It will be collected by wait shortly. */
988 thread = find_thread_ptid (ptid_build (pid, pid, 0));
989 thread->last_resume_kind = resume_stop;
bd99dc85 990 }
0d62e5e8 991
8784d563
PA
992 /* We must attach to every LWP. If /proc is mounted, use that to
993 find them now. On the one hand, the inferior may be using raw
994 clone instead of using pthreads. On the other hand, even if it
995 is using pthreads, GDB may not be connected yet (thread_db needs
996 to do symbol lookups, through qSymbol). Also, thread_db walks
997 structures in the inferior's address space to find the list of
998 threads/LWPs, and those structures may well be corrupted. Note
999 that once thread_db is loaded, we'll still use it to list threads
1000 and associate pthread info with each LWP. */
1001 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
95954743
PA
1002 return 0;
1003}
1004
1005struct counter
1006{
1007 int pid;
1008 int count;
1009};
1010
1011static int
1012second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1013{
1014 struct counter *counter = args;
1015
1016 if (ptid_get_pid (entry->id) == counter->pid)
1017 {
1018 if (++counter->count > 1)
1019 return 1;
1020 }
d61ddec4 1021
da6d8c04
DJ
1022 return 0;
1023}
1024
95954743 1025static int
fa96cb38 1026last_thread_of_process_p (int pid)
95954743 1027{
95954743 1028 struct counter counter = { pid , 0 };
da6d8c04 1029
95954743
PA
1030 return (find_inferior (&all_threads,
1031 second_thread_of_pid_p, &counter) == NULL);
1032}
1033
da84f473
PA
1034/* Kill LWP. */
1035
1036static void
1037linux_kill_one_lwp (struct lwp_info *lwp)
1038{
d86d4aaf
DE
1039 struct thread_info *thr = get_lwp_thread (lwp);
1040 int pid = lwpid_of (thr);
da84f473
PA
1041
1042 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1043 there is no signal context, and ptrace(PTRACE_KILL) (or
1044 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1045 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1046 alternative is to kill with SIGKILL. We only need one SIGKILL
1047 per process, not one for each thread. But since we still support
1048 linuxthreads, and we also support debugging programs using raw
1049 clone without CLONE_THREAD, we send one for each thread. For
1050 years, we used PTRACE_KILL only, so we're being a bit paranoid
1051 about some old kernels where PTRACE_KILL might work better
1052 (dubious if there are any such, but that's why it's paranoia), so
1053 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1054 everywhere. */
1055
1056 errno = 0;
69ff6be5 1057 kill_lwp (pid, SIGKILL);
da84f473 1058 if (debug_threads)
ce9e3fe7
PA
1059 {
1060 int save_errno = errno;
1061
1062 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1063 target_pid_to_str (ptid_of (thr)),
1064 save_errno ? strerror (save_errno) : "OK");
1065 }
da84f473
PA
1066
1067 errno = 0;
b8e1b30e 1068 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1069 if (debug_threads)
ce9e3fe7
PA
1070 {
1071 int save_errno = errno;
1072
1073 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1074 target_pid_to_str (ptid_of (thr)),
1075 save_errno ? strerror (save_errno) : "OK");
1076 }
da84f473
PA
1077}
1078
e76126e8
PA
1079/* Kill LWP and wait for it to die. */
1080
1081static void
1082kill_wait_lwp (struct lwp_info *lwp)
1083{
1084 struct thread_info *thr = get_lwp_thread (lwp);
1085 int pid = ptid_get_pid (ptid_of (thr));
1086 int lwpid = ptid_get_lwp (ptid_of (thr));
1087 int wstat;
1088 int res;
1089
1090 if (debug_threads)
1091 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1092
1093 do
1094 {
1095 linux_kill_one_lwp (lwp);
1096
1097 /* Make sure it died. Notes:
1098
1099 - The loop is most likely unnecessary.
1100
1101 - We don't use linux_wait_for_event as that could delete lwps
1102 while we're iterating over them. We're not interested in
1103 any pending status at this point, only in making sure all
1104 wait status on the kernel side are collected until the
1105 process is reaped.
1106
1107 - We don't use __WALL here as the __WALL emulation relies on
1108 SIGCHLD, and killing a stopped process doesn't generate
1109 one, nor an exit status.
1110 */
1111 res = my_waitpid (lwpid, &wstat, 0);
1112 if (res == -1 && errno == ECHILD)
1113 res = my_waitpid (lwpid, &wstat, __WCLONE);
1114 } while (res > 0 && WIFSTOPPED (wstat));
1115
586b02a9
PA
1116 /* Even if it was stopped, the child may have already disappeared.
1117 E.g., if it was killed by SIGKILL. */
1118 if (res < 0 && errno != ECHILD)
1119 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1120}
1121
da84f473
PA
1122/* Callback for `find_inferior'. Kills an lwp of a given process,
1123 except the leader. */
95954743
PA
1124
1125static int
da84f473 1126kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
da6d8c04 1127{
0d62e5e8 1128 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1129 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
1130 int pid = * (int *) args;
1131
1132 if (ptid_get_pid (entry->id) != pid)
1133 return 0;
0d62e5e8 1134
fd500816
DJ
1135 /* We avoid killing the first thread here, because of a Linux kernel (at
1136 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1137 the children get a chance to be reaped, it will remain a zombie
1138 forever. */
95954743 1139
d86d4aaf 1140 if (lwpid_of (thread) == pid)
95954743
PA
1141 {
1142 if (debug_threads)
87ce2a04
DE
1143 debug_printf ("lkop: is last of process %s\n",
1144 target_pid_to_str (entry->id));
95954743
PA
1145 return 0;
1146 }
fd500816 1147
e76126e8 1148 kill_wait_lwp (lwp);
95954743 1149 return 0;
da6d8c04
DJ
1150}
1151
95954743
PA
1152static int
1153linux_kill (int pid)
0d62e5e8 1154{
95954743 1155 struct process_info *process;
54a0b537 1156 struct lwp_info *lwp;
fd500816 1157
95954743
PA
1158 process = find_process_pid (pid);
1159 if (process == NULL)
1160 return -1;
9d606399 1161
f9e39928
PA
1162 /* If we're killing a running inferior, make sure it is stopped
1163 first, as PTRACE_KILL will not work otherwise. */
7984d532 1164 stop_all_lwps (0, NULL);
f9e39928 1165
da84f473 1166 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
fd500816 1167
54a0b537 1168 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1169 thread in the list, so do so now. */
95954743 1170 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 1171
784867a5 1172 if (lwp == NULL)
fd500816 1173 {
784867a5 1174 if (debug_threads)
d86d4aaf
DE
1175 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1176 pid);
784867a5
JK
1177 }
1178 else
e76126e8 1179 kill_wait_lwp (lwp);
2d717e4f 1180
8336d594 1181 the_target->mourn (process);
f9e39928
PA
1182
1183 /* Since we presently can only stop all lwps of all processes, we
1184 need to unstop lwps of other processes. */
7984d532 1185 unstop_all_lwps (0, NULL);
95954743 1186 return 0;
0d62e5e8
DJ
1187}
1188
9b224c5e
PA
1189/* Get pending signal of THREAD, for detaching purposes. This is the
1190 signal the thread last stopped for, which we need to deliver to the
1191 thread when detaching, otherwise, it'd be suppressed/lost. */
1192
1193static int
1194get_detach_signal (struct thread_info *thread)
1195{
a493e3e2 1196 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1197 int status;
1198 struct lwp_info *lp = get_thread_lwp (thread);
1199
1200 if (lp->status_pending_p)
1201 status = lp->status_pending;
1202 else
1203 {
1204 /* If the thread had been suspended by gdbserver, and it stopped
1205 cleanly, then it'll have stopped with SIGSTOP. But we don't
1206 want to deliver that SIGSTOP. */
1207 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1208 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1209 return 0;
1210
1211 /* Otherwise, we may need to deliver the signal we
1212 intercepted. */
1213 status = lp->last_status;
1214 }
1215
1216 if (!WIFSTOPPED (status))
1217 {
1218 if (debug_threads)
87ce2a04 1219 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1220 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1221 return 0;
1222 }
1223
1224 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1225 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1226 {
1227 if (debug_threads)
87ce2a04
DE
1228 debug_printf ("GPS: lwp %s had stopped with extended "
1229 "status: no pending signal\n",
d86d4aaf 1230 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1231 return 0;
1232 }
1233
2ea28649 1234 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e
PA
1235
1236 if (program_signals_p && !program_signals[signo])
1237 {
1238 if (debug_threads)
87ce2a04 1239 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1240 target_pid_to_str (ptid_of (thread)),
87ce2a04 1241 gdb_signal_to_string (signo));
9b224c5e
PA
1242 return 0;
1243 }
1244 else if (!program_signals_p
1245 /* If we have no way to know which signals GDB does not
1246 want to have passed to the program, assume
1247 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1248 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1249 {
1250 if (debug_threads)
87ce2a04
DE
1251 debug_printf ("GPS: lwp %s had signal %s, "
1252 "but we don't know if we should pass it. "
1253 "Default to not.\n",
d86d4aaf 1254 target_pid_to_str (ptid_of (thread)),
87ce2a04 1255 gdb_signal_to_string (signo));
9b224c5e
PA
1256 return 0;
1257 }
1258 else
1259 {
1260 if (debug_threads)
87ce2a04 1261 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1262 target_pid_to_str (ptid_of (thread)),
87ce2a04 1263 gdb_signal_to_string (signo));
9b224c5e
PA
1264
1265 return WSTOPSIG (status);
1266 }
1267}
1268
95954743
PA
1269static int
1270linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
1271{
1272 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1273 struct lwp_info *lwp = get_thread_lwp (thread);
95954743 1274 int pid = * (int *) args;
9b224c5e 1275 int sig;
95954743
PA
1276
1277 if (ptid_get_pid (entry->id) != pid)
1278 return 0;
6ad8ae5c 1279
9b224c5e 1280 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1281 if (lwp->stop_expected)
ae13219e 1282 {
9b224c5e 1283 if (debug_threads)
87ce2a04 1284 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1285 target_pid_to_str (ptid_of (thread)));
9b224c5e 1286
d86d4aaf 1287 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1288 lwp->stop_expected = 0;
ae13219e
DJ
1289 }
1290
1291 /* Flush any pending changes to the process's registers. */
d86d4aaf 1292 regcache_invalidate_thread (thread);
ae13219e 1293
9b224c5e
PA
1294 /* Pass on any pending signal for this thread. */
1295 sig = get_detach_signal (thread);
1296
ae13219e 1297 /* Finally, let it resume. */
82bfbe7e
PA
1298 if (the_low_target.prepare_to_resume != NULL)
1299 the_low_target.prepare_to_resume (lwp);
d86d4aaf 1300 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1301 (PTRACE_TYPE_ARG4) (long) sig) < 0)
9b224c5e 1302 error (_("Can't detach %s: %s"),
d86d4aaf 1303 target_pid_to_str (ptid_of (thread)),
9b224c5e 1304 strerror (errno));
bd99dc85
PA
1305
1306 delete_lwp (lwp);
95954743 1307 return 0;
6ad8ae5c
DJ
1308}
1309
95954743
PA
1310static int
1311linux_detach (int pid)
1312{
1313 struct process_info *process;
1314
1315 process = find_process_pid (pid);
1316 if (process == NULL)
1317 return -1;
1318
f9e39928
PA
1319 /* Stop all threads before detaching. First, ptrace requires that
1320 the thread is stopped to sucessfully detach. Second, thread_db
1321 may need to uninstall thread event breakpoints from memory, which
1322 only works with a stopped process anyway. */
7984d532 1323 stop_all_lwps (0, NULL);
f9e39928 1324
ca5c370d 1325#ifdef USE_THREAD_DB
8336d594 1326 thread_db_detach (process);
ca5c370d
PA
1327#endif
1328
fa593d66
PA
1329 /* Stabilize threads (move out of jump pads). */
1330 stabilize_threads ();
1331
95954743 1332 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
1333
1334 the_target->mourn (process);
f9e39928
PA
1335
1336 /* Since we presently can only stop all lwps of all processes, we
1337 need to unstop lwps of other processes. */
7984d532 1338 unstop_all_lwps (0, NULL);
f9e39928
PA
1339 return 0;
1340}
1341
1342/* Remove all LWPs that belong to process PROC from the lwp list. */
1343
1344static int
1345delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1346{
d86d4aaf
DE
1347 struct thread_info *thread = (struct thread_info *) entry;
1348 struct lwp_info *lwp = get_thread_lwp (thread);
f9e39928
PA
1349 struct process_info *process = proc;
1350
d86d4aaf 1351 if (pid_of (thread) == pid_of (process))
f9e39928
PA
1352 delete_lwp (lwp);
1353
dd6953e1 1354 return 0;
6ad8ae5c
DJ
1355}
1356
8336d594
PA
1357static void
1358linux_mourn (struct process_info *process)
1359{
1360 struct process_info_private *priv;
1361
1362#ifdef USE_THREAD_DB
1363 thread_db_mourn (process);
1364#endif
1365
d86d4aaf 1366 find_inferior (&all_threads, delete_lwp_callback, process);
f9e39928 1367
8336d594 1368 /* Freeing all private data. */
fe978cb0 1369 priv = process->priv;
8336d594
PA
1370 free (priv->arch_private);
1371 free (priv);
fe978cb0 1372 process->priv = NULL;
505106cd
PA
1373
1374 remove_process (process);
8336d594
PA
1375}
1376
444d6139 1377static void
95954743 1378linux_join (int pid)
444d6139 1379{
444d6139
PA
1380 int status, ret;
1381
1382 do {
95954743 1383 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1384 if (WIFEXITED (status) || WIFSIGNALED (status))
1385 break;
1386 } while (ret != -1 || errno != ECHILD);
1387}
1388
6ad8ae5c 1389/* Return nonzero if the given thread is still alive. */
0d62e5e8 1390static int
95954743 1391linux_thread_alive (ptid_t ptid)
0d62e5e8 1392{
95954743
PA
1393 struct lwp_info *lwp = find_lwp_pid (ptid);
1394
1395 /* We assume we always know if a thread exits. If a whole process
1396 exited but we still haven't been able to report it to GDB, we'll
1397 hold on to the last lwp of the dead process. */
1398 if (lwp != NULL)
1399 return !lwp->dead;
0d62e5e8
DJ
1400 else
1401 return 0;
1402}
1403
582511be
PA
1404/* Return 1 if this lwp still has an interesting status pending. If
1405 not (e.g., it had stopped for a breakpoint that is gone), return
1406 false. */
1407
1408static int
1409thread_still_has_status_pending_p (struct thread_info *thread)
1410{
1411 struct lwp_info *lp = get_thread_lwp (thread);
1412
1413 if (!lp->status_pending_p)
1414 return 0;
1415
1416 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1417 report any status pending the LWP may have. */
1418 if (thread->last_resume_kind == resume_stop
1419 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1420 return 0;
1421
1422 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1423 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1424 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1425 {
1426 struct thread_info *saved_thread;
1427 CORE_ADDR pc;
1428 int discard = 0;
1429
1430 gdb_assert (lp->last_status != 0);
1431
1432 pc = get_pc (lp);
1433
1434 saved_thread = current_thread;
1435 current_thread = thread;
1436
1437 if (pc != lp->stop_pc)
1438 {
1439 if (debug_threads)
1440 debug_printf ("PC of %ld changed\n",
1441 lwpid_of (thread));
1442 discard = 1;
1443 }
3e572f71
PA
1444
1445#if !USE_SIGTRAP_SIGINFO
15c66dd6 1446 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
582511be
PA
1447 && !(*the_low_target.breakpoint_at) (pc))
1448 {
1449 if (debug_threads)
1450 debug_printf ("previous SW breakpoint of %ld gone\n",
1451 lwpid_of (thread));
1452 discard = 1;
1453 }
15c66dd6 1454 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1455 && !hardware_breakpoint_inserted_here (pc))
1456 {
1457 if (debug_threads)
1458 debug_printf ("previous HW breakpoint of %ld gone\n",
1459 lwpid_of (thread));
1460 discard = 1;
1461 }
3e572f71 1462#endif
582511be
PA
1463
1464 current_thread = saved_thread;
1465
1466 if (discard)
1467 {
1468 if (debug_threads)
1469 debug_printf ("discarding pending breakpoint status\n");
1470 lp->status_pending_p = 0;
1471 return 0;
1472 }
1473 }
1474
1475 return 1;
1476}
1477
6bf5e0ba 1478/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1479static int
d50171e4 1480status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1481{
d86d4aaf 1482 struct thread_info *thread = (struct thread_info *) entry;
582511be 1483 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1484 ptid_t ptid = * (ptid_t *) arg;
1485
1486 /* Check if we're only interested in events from a specific process
afa8d396
PA
1487 or a specific LWP. */
1488 if (!ptid_match (ptid_of (thread), ptid))
95954743 1489 return 0;
0d62e5e8 1490
582511be
PA
1491 if (lp->status_pending_p
1492 && !thread_still_has_status_pending_p (thread))
1493 {
1494 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1495 return 0;
1496 }
0d62e5e8 1497
582511be 1498 return lp->status_pending_p;
0d62e5e8
DJ
1499}
1500
95954743
PA
1501static int
1502same_lwp (struct inferior_list_entry *entry, void *data)
1503{
1504 ptid_t ptid = *(ptid_t *) data;
1505 int lwp;
1506
1507 if (ptid_get_lwp (ptid) != 0)
1508 lwp = ptid_get_lwp (ptid);
1509 else
1510 lwp = ptid_get_pid (ptid);
1511
1512 if (ptid_get_lwp (entry->id) == lwp)
1513 return 1;
1514
1515 return 0;
1516}
1517
1518struct lwp_info *
1519find_lwp_pid (ptid_t ptid)
1520{
d86d4aaf
DE
1521 struct inferior_list_entry *thread
1522 = find_inferior (&all_threads, same_lwp, &ptid);
1523
1524 if (thread == NULL)
1525 return NULL;
1526
1527 return get_thread_lwp ((struct thread_info *) thread);
95954743
PA
1528}
1529
fa96cb38 1530/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1531
fa96cb38
PA
1532static int
1533num_lwps (int pid)
1534{
1535 struct inferior_list_entry *inf, *tmp;
1536 int count = 0;
0d62e5e8 1537
fa96cb38 1538 ALL_INFERIORS (&all_threads, inf, tmp)
24a09b5f 1539 {
fa96cb38
PA
1540 if (ptid_get_pid (inf->id) == pid)
1541 count++;
24a09b5f 1542 }
3aee8918 1543
fa96cb38
PA
1544 return count;
1545}
d61ddec4 1546
6d4ee8c6
GB
1547/* The arguments passed to iterate_over_lwps. */
1548
1549struct iterate_over_lwps_args
1550{
1551 /* The FILTER argument passed to iterate_over_lwps. */
1552 ptid_t filter;
1553
1554 /* The CALLBACK argument passed to iterate_over_lwps. */
1555 iterate_over_lwps_ftype *callback;
1556
1557 /* The DATA argument passed to iterate_over_lwps. */
1558 void *data;
1559};
1560
1561/* Callback for find_inferior used by iterate_over_lwps to filter
1562 calls to the callback supplied to that function. Returning a
1563 nonzero value causes find_inferiors to stop iterating and return
1564 the current inferior_list_entry. Returning zero indicates that
1565 find_inferiors should continue iterating. */
1566
1567static int
1568iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1569{
1570 struct iterate_over_lwps_args *args
1571 = (struct iterate_over_lwps_args *) args_p;
1572
1573 if (ptid_match (entry->id, args->filter))
1574 {
1575 struct thread_info *thr = (struct thread_info *) entry;
1576 struct lwp_info *lwp = get_thread_lwp (thr);
1577
1578 return (*args->callback) (lwp, args->data);
1579 }
1580
1581 return 0;
1582}
1583
1584/* See nat/linux-nat.h. */
1585
1586struct lwp_info *
1587iterate_over_lwps (ptid_t filter,
1588 iterate_over_lwps_ftype callback,
1589 void *data)
1590{
1591 struct iterate_over_lwps_args args = {filter, callback, data};
1592 struct inferior_list_entry *entry;
1593
1594 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1595 if (entry == NULL)
1596 return NULL;
1597
1598 return get_thread_lwp ((struct thread_info *) entry);
1599}
1600
fa96cb38
PA
1601/* Detect zombie thread group leaders, and "exit" them. We can't reap
1602 their exits until all other threads in the group have exited. */
c3adc08c 1603
fa96cb38
PA
1604static void
1605check_zombie_leaders (void)
1606{
1607 struct process_info *proc, *tmp;
c3adc08c 1608
fa96cb38 1609 ALL_PROCESSES (proc, tmp)
c3adc08c 1610 {
fa96cb38
PA
1611 pid_t leader_pid = pid_of (proc);
1612 struct lwp_info *leader_lp;
c3adc08c 1613
fa96cb38 1614 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
c3adc08c 1615
fa96cb38
PA
1616 if (debug_threads)
1617 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1618 "num_lwps=%d, zombie=%d\n",
1619 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1620 linux_proc_pid_is_zombie (leader_pid));
1621
1622 if (leader_lp != NULL
1623 /* Check if there are other threads in the group, as we may
1624 have raced with the inferior simply exiting. */
1625 && !last_thread_of_process_p (leader_pid)
1626 && linux_proc_pid_is_zombie (leader_pid))
1627 {
1628 /* A leader zombie can mean one of two things:
1629
1630 - It exited, and there's an exit status pending
1631 available, or only the leader exited (not the whole
1632 program). In the latter case, we can't waitpid the
1633 leader's exit status until all other threads are gone.
1634
1635 - There are 3 or more threads in the group, and a thread
1636 other than the leader exec'd. On an exec, the Linux
1637 kernel destroys all other threads (except the execing
1638 one) in the thread group, and resets the execing thread's
1639 tid to the tgid. No exit notification is sent for the
1640 execing thread -- from the ptracer's perspective, it
1641 appears as though the execing thread just vanishes.
1642 Until we reap all other threads except the leader and the
1643 execing thread, the leader will be zombie, and the
1644 execing thread will be in `D (disc sleep)'. As soon as
1645 all other threads are reaped, the execing thread changes
1646 it's tid to the tgid, and the previous (zombie) leader
1647 vanishes, giving place to the "new" leader. We could try
1648 distinguishing the exit and exec cases, by waiting once
1649 more, and seeing if something comes out, but it doesn't
1650 sound useful. The previous leader _does_ go away, and
1651 we'll re-add the new one once we see the exec event
1652 (which is just the same as what would happen if the
1653 previous leader did exit voluntarily before some other
1654 thread execs). */
c3adc08c 1655
fa96cb38
PA
1656 if (debug_threads)
1657 fprintf (stderr,
1658 "CZL: Thread group leader %d zombie "
1659 "(it exited, or another thread execd).\n",
1660 leader_pid);
c3adc08c 1661
fa96cb38 1662 delete_lwp (leader_lp);
c3adc08c
PA
1663 }
1664 }
fa96cb38 1665}
c3adc08c 1666
fa96cb38
PA
1667/* Callback for `find_inferior'. Returns the first LWP that is not
1668 stopped. ARG is a PTID filter. */
d50171e4 1669
fa96cb38
PA
1670static int
1671not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1672{
1673 struct thread_info *thr = (struct thread_info *) entry;
1674 struct lwp_info *lwp;
1675 ptid_t filter = *(ptid_t *) arg;
47c0c975 1676
fa96cb38
PA
1677 if (!ptid_match (ptid_of (thr), filter))
1678 return 0;
bd99dc85 1679
fa96cb38
PA
1680 lwp = get_thread_lwp (thr);
1681 if (!lwp->stopped)
1682 return 1;
1683
1684 return 0;
0d62e5e8 1685}
611cb4a5 1686
219f2f23
PA
1687/* This function should only be called if the LWP got a SIGTRAP.
1688
1689 Handle any tracepoint steps or hits. Return true if a tracepoint
1690 event was handled, 0 otherwise. */
1691
1692static int
1693handle_tracepoints (struct lwp_info *lwp)
1694{
1695 struct thread_info *tinfo = get_lwp_thread (lwp);
1696 int tpoint_related_event = 0;
1697
582511be
PA
1698 gdb_assert (lwp->suspended == 0);
1699
7984d532
PA
1700 /* If this tracepoint hit causes a tracing stop, we'll immediately
1701 uninsert tracepoints. To do this, we temporarily pause all
1702 threads, unpatch away, and then unpause threads. We need to make
1703 sure the unpausing doesn't resume LWP too. */
1704 lwp->suspended++;
1705
219f2f23
PA
1706 /* And we need to be sure that any all-threads-stopping doesn't try
1707 to move threads out of the jump pads, as it could deadlock the
1708 inferior (LWP could be in the jump pad, maybe even holding the
1709 lock.) */
1710
1711 /* Do any necessary step collect actions. */
1712 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1713
fa593d66
PA
1714 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1715
219f2f23
PA
1716 /* See if we just hit a tracepoint and do its main collect
1717 actions. */
1718 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1719
7984d532
PA
1720 lwp->suspended--;
1721
1722 gdb_assert (lwp->suspended == 0);
fa593d66 1723 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1724
219f2f23
PA
1725 if (tpoint_related_event)
1726 {
1727 if (debug_threads)
87ce2a04 1728 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1729 return 1;
1730 }
1731
1732 return 0;
1733}
1734
fa593d66
PA
1735/* Convenience wrapper. Returns true if LWP is presently collecting a
1736 fast tracepoint. */
1737
1738static int
1739linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1740 struct fast_tpoint_collect_status *status)
1741{
1742 CORE_ADDR thread_area;
d86d4aaf 1743 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1744
1745 if (the_low_target.get_thread_area == NULL)
1746 return 0;
1747
1748 /* Get the thread area address. This is used to recognize which
1749 thread is which when tracing with the in-process agent library.
1750 We don't read anything from the address, and treat it as opaque;
1751 it's the address itself that we assume is unique per-thread. */
d86d4aaf 1752 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
fa593d66
PA
1753 return 0;
1754
1755 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1756}
1757
1758/* The reason we resume in the caller, is because we want to be able
1759 to pass lwp->status_pending as WSTAT, and we need to clear
1760 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1761 refuses to resume. */
1762
1763static int
1764maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1765{
0bfdf32f 1766 struct thread_info *saved_thread;
fa593d66 1767
0bfdf32f
GB
1768 saved_thread = current_thread;
1769 current_thread = get_lwp_thread (lwp);
fa593d66
PA
1770
1771 if ((wstat == NULL
1772 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1773 && supports_fast_tracepoints ()
58b4daa5 1774 && agent_loaded_p ())
fa593d66
PA
1775 {
1776 struct fast_tpoint_collect_status status;
1777 int r;
1778
1779 if (debug_threads)
87ce2a04
DE
1780 debug_printf ("Checking whether LWP %ld needs to move out of the "
1781 "jump pad.\n",
0bfdf32f 1782 lwpid_of (current_thread));
fa593d66
PA
1783
1784 r = linux_fast_tracepoint_collecting (lwp, &status);
1785
1786 if (wstat == NULL
1787 || (WSTOPSIG (*wstat) != SIGILL
1788 && WSTOPSIG (*wstat) != SIGFPE
1789 && WSTOPSIG (*wstat) != SIGSEGV
1790 && WSTOPSIG (*wstat) != SIGBUS))
1791 {
1792 lwp->collecting_fast_tracepoint = r;
1793
1794 if (r != 0)
1795 {
1796 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1797 {
1798 /* Haven't executed the original instruction yet.
1799 Set breakpoint there, and wait till it's hit,
1800 then single-step until exiting the jump pad. */
1801 lwp->exit_jump_pad_bkpt
1802 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1803 }
1804
1805 if (debug_threads)
87ce2a04
DE
1806 debug_printf ("Checking whether LWP %ld needs to move out of "
1807 "the jump pad...it does\n",
0bfdf32f
GB
1808 lwpid_of (current_thread));
1809 current_thread = saved_thread;
fa593d66
PA
1810
1811 return 1;
1812 }
1813 }
1814 else
1815 {
1816 /* If we get a synchronous signal while collecting, *and*
1817 while executing the (relocated) original instruction,
1818 reset the PC to point at the tpoint address, before
1819 reporting to GDB. Otherwise, it's an IPA lib bug: just
1820 report the signal to GDB, and pray for the best. */
1821
1822 lwp->collecting_fast_tracepoint = 0;
1823
1824 if (r != 0
1825 && (status.adjusted_insn_addr <= lwp->stop_pc
1826 && lwp->stop_pc < status.adjusted_insn_addr_end))
1827 {
1828 siginfo_t info;
1829 struct regcache *regcache;
1830
1831 /* The si_addr on a few signals references the address
1832 of the faulting instruction. Adjust that as
1833 well. */
1834 if ((WSTOPSIG (*wstat) == SIGILL
1835 || WSTOPSIG (*wstat) == SIGFPE
1836 || WSTOPSIG (*wstat) == SIGBUS
1837 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 1838 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1839 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
1840 /* Final check just to make sure we don't clobber
1841 the siginfo of non-kernel-sent signals. */
1842 && (uintptr_t) info.si_addr == lwp->stop_pc)
1843 {
1844 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 1845 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1846 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
1847 }
1848
0bfdf32f 1849 regcache = get_thread_regcache (current_thread, 1);
fa593d66
PA
1850 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1851 lwp->stop_pc = status.tpoint_addr;
1852
1853 /* Cancel any fast tracepoint lock this thread was
1854 holding. */
1855 force_unlock_trace_buffer ();
1856 }
1857
1858 if (lwp->exit_jump_pad_bkpt != NULL)
1859 {
1860 if (debug_threads)
87ce2a04
DE
1861 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1862 "stopping all threads momentarily.\n");
fa593d66
PA
1863
1864 stop_all_lwps (1, lwp);
fa593d66
PA
1865
1866 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1867 lwp->exit_jump_pad_bkpt = NULL;
1868
1869 unstop_all_lwps (1, lwp);
1870
1871 gdb_assert (lwp->suspended >= 0);
1872 }
1873 }
1874 }
1875
1876 if (debug_threads)
87ce2a04
DE
1877 debug_printf ("Checking whether LWP %ld needs to move out of the "
1878 "jump pad...no\n",
0bfdf32f 1879 lwpid_of (current_thread));
0cccb683 1880
0bfdf32f 1881 current_thread = saved_thread;
fa593d66
PA
1882 return 0;
1883}
1884
1885/* Enqueue one signal in the "signals to report later when out of the
1886 jump pad" list. */
1887
1888static void
1889enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1890{
1891 struct pending_signals *p_sig;
d86d4aaf 1892 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1893
1894 if (debug_threads)
87ce2a04 1895 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 1896 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1897
1898 if (debug_threads)
1899 {
1900 struct pending_signals *sig;
1901
1902 for (sig = lwp->pending_signals_to_report;
1903 sig != NULL;
1904 sig = sig->prev)
87ce2a04
DE
1905 debug_printf (" Already queued %d\n",
1906 sig->signal);
fa593d66 1907
87ce2a04 1908 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
1909 }
1910
1a981360
PA
1911 /* Don't enqueue non-RT signals if they are already in the deferred
1912 queue. (SIGSTOP being the easiest signal to see ending up here
1913 twice) */
1914 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1915 {
1916 struct pending_signals *sig;
1917
1918 for (sig = lwp->pending_signals_to_report;
1919 sig != NULL;
1920 sig = sig->prev)
1921 {
1922 if (sig->signal == WSTOPSIG (*wstat))
1923 {
1924 if (debug_threads)
87ce2a04
DE
1925 debug_printf ("Not requeuing already queued non-RT signal %d"
1926 " for LWP %ld\n",
1927 sig->signal,
d86d4aaf 1928 lwpid_of (thread));
1a981360
PA
1929 return;
1930 }
1931 }
1932 }
1933
fa593d66
PA
1934 p_sig = xmalloc (sizeof (*p_sig));
1935 p_sig->prev = lwp->pending_signals_to_report;
1936 p_sig->signal = WSTOPSIG (*wstat);
1937 memset (&p_sig->info, 0, sizeof (siginfo_t));
d86d4aaf 1938 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1939 &p_sig->info);
fa593d66
PA
1940
1941 lwp->pending_signals_to_report = p_sig;
1942}
1943
1944/* Dequeue one signal from the "signals to report later when out of
1945 the jump pad" list. */
1946
1947static int
1948dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1949{
d86d4aaf
DE
1950 struct thread_info *thread = get_lwp_thread (lwp);
1951
fa593d66
PA
1952 if (lwp->pending_signals_to_report != NULL)
1953 {
1954 struct pending_signals **p_sig;
1955
1956 p_sig = &lwp->pending_signals_to_report;
1957 while ((*p_sig)->prev != NULL)
1958 p_sig = &(*p_sig)->prev;
1959
1960 *wstat = W_STOPCODE ((*p_sig)->signal);
1961 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 1962 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1963 &(*p_sig)->info);
fa593d66
PA
1964 free (*p_sig);
1965 *p_sig = NULL;
1966
1967 if (debug_threads)
87ce2a04 1968 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 1969 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1970
1971 if (debug_threads)
1972 {
1973 struct pending_signals *sig;
1974
1975 for (sig = lwp->pending_signals_to_report;
1976 sig != NULL;
1977 sig = sig->prev)
87ce2a04
DE
1978 debug_printf (" Still queued %d\n",
1979 sig->signal);
fa593d66 1980
87ce2a04 1981 debug_printf (" (no more queued signals)\n");
fa593d66
PA
1982 }
1983
1984 return 1;
1985 }
1986
1987 return 0;
1988}
1989
582511be
PA
1990/* Fetch the possibly triggered data watchpoint info and store it in
1991 CHILD.
d50171e4 1992
582511be
PA
1993 On some archs, like x86, that use debug registers to set
1994 watchpoints, it's possible that the way to know which watched
1995 address trapped, is to check the register that is used to select
1996 which address to watch. Problem is, between setting the watchpoint
1997 and reading back which data address trapped, the user may change
1998 the set of watchpoints, and, as a consequence, GDB changes the
1999 debug registers in the inferior. To avoid reading back a stale
2000 stopped-data-address when that happens, we cache in LP the fact
2001 that a watchpoint trapped, and the corresponding data address, as
2002 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2003 registers meanwhile, we have the cached data we can rely on. */
d50171e4 2004
582511be
PA
2005static int
2006check_stopped_by_watchpoint (struct lwp_info *child)
2007{
2008 if (the_low_target.stopped_by_watchpoint != NULL)
d50171e4 2009 {
582511be 2010 struct thread_info *saved_thread;
d50171e4 2011
582511be
PA
2012 saved_thread = current_thread;
2013 current_thread = get_lwp_thread (child);
2014
2015 if (the_low_target.stopped_by_watchpoint ())
d50171e4 2016 {
15c66dd6 2017 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
582511be
PA
2018
2019 if (the_low_target.stopped_data_address != NULL)
2020 child->stopped_data_address
2021 = the_low_target.stopped_data_address ();
2022 else
2023 child->stopped_data_address = 0;
d50171e4
PA
2024 }
2025
0bfdf32f 2026 current_thread = saved_thread;
d50171e4
PA
2027 }
2028
15c66dd6 2029 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
c4d9ceb6
YQ
2030}
2031
de0d863e
DB
2032/* Return the ptrace options that we want to try to enable. */
2033
2034static int
2035linux_low_ptrace_options (int attached)
2036{
2037 int options = 0;
2038
2039 if (!attached)
2040 options |= PTRACE_O_EXITKILL;
2041
2042 if (report_fork_events)
2043 options |= PTRACE_O_TRACEFORK;
2044
c269dbdb
DB
2045 if (report_vfork_events)
2046 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2047
de0d863e
DB
2048 return options;
2049}
2050
fa96cb38
PA
2051/* Do low-level handling of the event, and check if we should go on
2052 and pass it to caller code. Return the affected lwp if we are, or
2053 NULL otherwise. */
2054
2055static struct lwp_info *
582511be 2056linux_low_filter_event (int lwpid, int wstat)
fa96cb38
PA
2057{
2058 struct lwp_info *child;
2059 struct thread_info *thread;
582511be 2060 int have_stop_pc = 0;
fa96cb38
PA
2061
2062 child = find_lwp_pid (pid_to_ptid (lwpid));
2063
2064 /* If we didn't find a process, one of two things presumably happened:
2065 - A process we started and then detached from has exited. Ignore it.
2066 - A process we are controlling has forked and the new child's stop
2067 was reported to us by the kernel. Save its PID. */
2068 if (child == NULL && WIFSTOPPED (wstat))
2069 {
2070 add_to_pid_list (&stopped_pids, lwpid, wstat);
2071 return NULL;
2072 }
2073 else if (child == NULL)
2074 return NULL;
2075
2076 thread = get_lwp_thread (child);
2077
2078 child->stopped = 1;
2079
2080 child->last_status = wstat;
2081
582511be
PA
2082 /* Check if the thread has exited. */
2083 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2084 {
2085 if (debug_threads)
2086 debug_printf ("LLFE: %d exited.\n", lwpid);
2087 if (num_lwps (pid_of (thread)) > 1)
2088 {
2089
2090 /* If there is at least one more LWP, then the exit signal was
2091 not the end of the debugged application and should be
2092 ignored. */
2093 delete_lwp (child);
2094 return NULL;
2095 }
2096 else
2097 {
2098 /* This was the last lwp in the process. Since events are
2099 serialized to GDB core, and we can't report this one
2100 right now, but GDB core and the other target layers will
2101 want to be notified about the exit code/signal, leave the
2102 status pending for the next time we're able to report
2103 it. */
2104 mark_lwp_dead (child, wstat);
2105 return child;
2106 }
2107 }
2108
2109 gdb_assert (WIFSTOPPED (wstat));
2110
fa96cb38
PA
2111 if (WIFSTOPPED (wstat))
2112 {
2113 struct process_info *proc;
2114
c06cbd92 2115 /* Architecture-specific setup after inferior is running. */
fa96cb38 2116 proc = find_process_pid (pid_of (thread));
c06cbd92 2117 if (proc->tdesc == NULL)
fa96cb38 2118 {
c06cbd92
YQ
2119 if (proc->attached)
2120 {
2121 struct thread_info *saved_thread;
fa96cb38 2122
c06cbd92
YQ
2123 /* This needs to happen after we have attached to the
2124 inferior and it is stopped for the first time, but
2125 before we access any inferior registers. */
2126 saved_thread = current_thread;
2127 current_thread = thread;
fa96cb38 2128
c06cbd92 2129 the_low_target.arch_setup ();
fa96cb38 2130
c06cbd92 2131 current_thread = saved_thread;
c06cbd92
YQ
2132 }
2133 else
2134 {
2135 /* The process is started, but GDBserver will do
2136 architecture-specific setup after the program stops at
2137 the first instruction. */
2138 child->status_pending_p = 1;
2139 child->status_pending = wstat;
2140 return child;
2141 }
fa96cb38
PA
2142 }
2143 }
2144
fa96cb38
PA
2145 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2146 {
beed38b8 2147 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2148 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2149
de0d863e 2150 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2151 child->must_set_ptrace_flags = 0;
2152 }
2153
582511be
PA
2154 /* Be careful to not overwrite stop_pc until
2155 check_stopped_by_breakpoint is called. */
fa96cb38 2156 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2157 && linux_is_extended_waitstatus (wstat))
fa96cb38 2158 {
582511be 2159 child->stop_pc = get_pc (child);
de0d863e
DB
2160 if (handle_extended_wait (child, wstat))
2161 {
2162 /* The event has been handled, so just return without
2163 reporting it. */
2164 return NULL;
2165 }
fa96cb38
PA
2166 }
2167
3e572f71
PA
2168 /* Check first whether this was a SW/HW breakpoint before checking
2169 watchpoints, because at least s390 can't tell the data address of
2170 hardware watchpoint hits, and returns stopped-by-watchpoint as
2171 long as there's a watchpoint set. */
2172 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
582511be
PA
2173 {
2174 if (check_stopped_by_breakpoint (child))
2175 have_stop_pc = 1;
2176 }
2177
3e572f71
PA
2178 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2179 or hardware watchpoint. Check which is which if we got
2180 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2181 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2182 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2183 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2184 check_stopped_by_watchpoint (child);
2185
582511be
PA
2186 if (!have_stop_pc)
2187 child->stop_pc = get_pc (child);
2188
fa96cb38
PA
2189 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2190 && child->stop_expected)
2191 {
2192 if (debug_threads)
2193 debug_printf ("Expected stop.\n");
2194 child->stop_expected = 0;
2195
2196 if (thread->last_resume_kind == resume_stop)
2197 {
2198 /* We want to report the stop to the core. Treat the
2199 SIGSTOP as a normal event. */
2bf6fb9d
PA
2200 if (debug_threads)
2201 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2202 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2203 }
2204 else if (stopping_threads != NOT_STOPPING_THREADS)
2205 {
2206 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2207 pending. */
2bf6fb9d
PA
2208 if (debug_threads)
2209 debug_printf ("LLW: SIGSTOP caught for %s "
2210 "while stopping threads.\n",
2211 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2212 return NULL;
2213 }
2214 else
2215 {
2bf6fb9d
PA
2216 /* This is a delayed SIGSTOP. Filter out the event. */
2217 if (debug_threads)
2218 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2219 child->stepping ? "step" : "continue",
2220 target_pid_to_str (ptid_of (thread)));
2221
fa96cb38
PA
2222 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2223 return NULL;
2224 }
2225 }
2226
582511be
PA
2227 child->status_pending_p = 1;
2228 child->status_pending = wstat;
fa96cb38
PA
2229 return child;
2230}
2231
20ba1ce6
PA
2232/* Resume LWPs that are currently stopped without any pending status
2233 to report, but are resumed from the core's perspective. */
2234
2235static void
2236resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2237{
2238 struct thread_info *thread = (struct thread_info *) entry;
2239 struct lwp_info *lp = get_thread_lwp (thread);
2240
2241 if (lp->stopped
2242 && !lp->status_pending_p
2243 && thread->last_resume_kind != resume_stop
2244 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2245 {
2246 int step = thread->last_resume_kind == resume_step;
2247
2248 if (debug_threads)
2249 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2250 target_pid_to_str (ptid_of (thread)),
2251 paddress (lp->stop_pc),
2252 step);
2253
2254 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2255 }
2256}
2257
fa96cb38
PA
2258/* Wait for an event from child(ren) WAIT_PTID, and return any that
2259 match FILTER_PTID (leaving others pending). The PTIDs can be:
2260 minus_one_ptid, to specify any child; a pid PTID, specifying all
2261 lwps of a thread group; or a PTID representing a single lwp. Store
2262 the stop status through the status pointer WSTAT. OPTIONS is
2263 passed to the waitpid call. Return 0 if no event was found and
2264 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2265 was found. Return the PID of the stopped child otherwise. */
bd99dc85 2266
0d62e5e8 2267static int
fa96cb38
PA
2268linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2269 int *wstatp, int options)
0d62e5e8 2270{
d86d4aaf 2271 struct thread_info *event_thread;
d50171e4 2272 struct lwp_info *event_child, *requested_child;
fa96cb38 2273 sigset_t block_mask, prev_mask;
d50171e4 2274
fa96cb38 2275 retry:
d86d4aaf
DE
2276 /* N.B. event_thread points to the thread_info struct that contains
2277 event_child. Keep them in sync. */
2278 event_thread = NULL;
d50171e4
PA
2279 event_child = NULL;
2280 requested_child = NULL;
0d62e5e8 2281
95954743 2282 /* Check for a lwp with a pending status. */
bd99dc85 2283
fa96cb38 2284 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
0d62e5e8 2285 {
d86d4aaf 2286 event_thread = (struct thread_info *)
fa96cb38 2287 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
d86d4aaf
DE
2288 if (event_thread != NULL)
2289 event_child = get_thread_lwp (event_thread);
2290 if (debug_threads && event_thread)
2291 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2292 }
fa96cb38 2293 else if (!ptid_equal (filter_ptid, null_ptid))
0d62e5e8 2294 {
fa96cb38 2295 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2296
bde24c0a 2297 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66
PA
2298 && requested_child->status_pending_p
2299 && requested_child->collecting_fast_tracepoint)
2300 {
2301 enqueue_one_deferred_signal (requested_child,
2302 &requested_child->status_pending);
2303 requested_child->status_pending_p = 0;
2304 requested_child->status_pending = 0;
2305 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2306 }
2307
2308 if (requested_child->suspended
2309 && requested_child->status_pending_p)
38e08fca
GB
2310 {
2311 internal_error (__FILE__, __LINE__,
2312 "requesting an event out of a"
2313 " suspended child?");
2314 }
fa593d66 2315
d50171e4 2316 if (requested_child->status_pending_p)
d86d4aaf
DE
2317 {
2318 event_child = requested_child;
2319 event_thread = get_lwp_thread (event_child);
2320 }
0d62e5e8 2321 }
611cb4a5 2322
0d62e5e8
DJ
2323 if (event_child != NULL)
2324 {
bd99dc85 2325 if (debug_threads)
87ce2a04 2326 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2327 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2328 *wstatp = event_child->status_pending;
bd99dc85
PA
2329 event_child->status_pending_p = 0;
2330 event_child->status_pending = 0;
0bfdf32f 2331 current_thread = event_thread;
d86d4aaf 2332 return lwpid_of (event_thread);
0d62e5e8
DJ
2333 }
2334
fa96cb38
PA
2335 /* But if we don't find a pending event, we'll have to wait.
2336
2337 We only enter this loop if no process has a pending wait status.
2338 Thus any action taken in response to a wait status inside this
2339 loop is responding as soon as we detect the status, not after any
2340 pending events. */
d8301ad1 2341
fa96cb38
PA
2342 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2343 all signals while here. */
2344 sigfillset (&block_mask);
2345 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2346
582511be
PA
2347 /* Always pull all events out of the kernel. We'll randomly select
2348 an event LWP out of all that have events, to prevent
2349 starvation. */
fa96cb38 2350 while (event_child == NULL)
0d62e5e8 2351 {
fa96cb38 2352 pid_t ret = 0;
0d62e5e8 2353
fa96cb38
PA
2354 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2355 quirks:
0d62e5e8 2356
fa96cb38
PA
2357 - If the thread group leader exits while other threads in the
2358 thread group still exist, waitpid(TGID, ...) hangs. That
2359 waitpid won't return an exit status until the other threads
2360 in the group are reaped.
611cb4a5 2361
fa96cb38
PA
2362 - When a non-leader thread execs, that thread just vanishes
2363 without reporting an exit (so we'd hang if we waited for it
2364 explicitly in that case). The exec event is reported to
2365 the TGID pid (although we don't currently enable exec
2366 events). */
2367 errno = 0;
2368 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2369
fa96cb38
PA
2370 if (debug_threads)
2371 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2372 ret, errno ? strerror (errno) : "ERRNO-OK");
0d62e5e8 2373
fa96cb38 2374 if (ret > 0)
0d62e5e8 2375 {
89be2091 2376 if (debug_threads)
bd99dc85 2377 {
fa96cb38
PA
2378 debug_printf ("LLW: waitpid %ld received %s\n",
2379 (long) ret, status_to_str (*wstatp));
bd99dc85 2380 }
89be2091 2381
582511be
PA
2382 /* Filter all events. IOW, leave all events pending. We'll
2383 randomly select an event LWP out of all that have events
2384 below. */
2385 linux_low_filter_event (ret, *wstatp);
fa96cb38
PA
2386 /* Retry until nothing comes out of waitpid. A single
2387 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2388 continue;
2389 }
2390
20ba1ce6
PA
2391 /* Now that we've pulled all events out of the kernel, resume
2392 LWPs that don't have an interesting event to report. */
2393 if (stopping_threads == NOT_STOPPING_THREADS)
2394 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2395
2396 /* ... and find an LWP with a status to report to the core, if
2397 any. */
582511be
PA
2398 event_thread = (struct thread_info *)
2399 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2400 if (event_thread != NULL)
2401 {
2402 event_child = get_thread_lwp (event_thread);
2403 *wstatp = event_child->status_pending;
2404 event_child->status_pending_p = 0;
2405 event_child->status_pending = 0;
2406 break;
2407 }
2408
fa96cb38
PA
2409 /* Check for zombie thread group leaders. Those can't be reaped
2410 until all other threads in the thread group are. */
2411 check_zombie_leaders ();
2412
2413 /* If there are no resumed children left in the set of LWPs we
2414 want to wait for, bail. We can't just block in
2415 waitpid/sigsuspend, because lwps might have been left stopped
2416 in trace-stop state, and we'd be stuck forever waiting for
2417 their status to change (which would only happen if we resumed
2418 them). Even if WNOHANG is set, this return code is preferred
2419 over 0 (below), as it is more detailed. */
2420 if ((find_inferior (&all_threads,
2421 not_stopped_callback,
2422 &wait_ptid) == NULL))
a6dbe5df 2423 {
fa96cb38
PA
2424 if (debug_threads)
2425 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2426 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2427 return -1;
a6dbe5df
PA
2428 }
2429
fa96cb38
PA
2430 /* No interesting event to report to the caller. */
2431 if ((options & WNOHANG))
24a09b5f 2432 {
fa96cb38
PA
2433 if (debug_threads)
2434 debug_printf ("WNOHANG set, no event found\n");
2435
2436 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2437 return 0;
24a09b5f
DJ
2438 }
2439
fa96cb38
PA
2440 /* Block until we get an event reported with SIGCHLD. */
2441 if (debug_threads)
2442 debug_printf ("sigsuspend'ing\n");
d50171e4 2443
fa96cb38
PA
2444 sigsuspend (&prev_mask);
2445 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2446 goto retry;
2447 }
d50171e4 2448
fa96cb38 2449 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2450
0bfdf32f 2451 current_thread = event_thread;
d50171e4 2452
fa96cb38
PA
2453 /* Check for thread exit. */
2454 if (! WIFSTOPPED (*wstatp))
2455 {
2456 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2457
2458 if (debug_threads)
2459 debug_printf ("LWP %d is the last lwp of process. "
2460 "Process %ld exiting.\n",
2461 pid_of (event_thread), lwpid_of (event_thread));
d86d4aaf 2462 return lwpid_of (event_thread);
611cb4a5 2463 }
0d62e5e8 2464
fa96cb38
PA
2465 return lwpid_of (event_thread);
2466}
2467
2468/* Wait for an event from child(ren) PTID. PTIDs can be:
2469 minus_one_ptid, to specify any child; a pid PTID, specifying all
2470 lwps of a thread group; or a PTID representing a single lwp. Store
2471 the stop status through the status pointer WSTAT. OPTIONS is
2472 passed to the waitpid call. Return 0 if no event was found and
2473 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2474 was found. Return the PID of the stopped child otherwise. */
2475
2476static int
2477linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2478{
2479 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2480}
2481
6bf5e0ba
PA
2482/* Count the LWP's that have had events. */
2483
2484static int
2485count_events_callback (struct inferior_list_entry *entry, void *data)
2486{
d86d4aaf 2487 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2488 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2489 int *count = data;
2490
2491 gdb_assert (count != NULL);
2492
582511be 2493 /* Count only resumed LWPs that have an event pending. */
8336d594 2494 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2495 && lp->status_pending_p)
6bf5e0ba
PA
2496 (*count)++;
2497
2498 return 0;
2499}
2500
2501/* Select the LWP (if any) that is currently being single-stepped. */
2502
2503static int
2504select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2505{
d86d4aaf
DE
2506 struct thread_info *thread = (struct thread_info *) entry;
2507 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba 2508
8336d594
PA
2509 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2510 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
2511 && lp->status_pending_p)
2512 return 1;
2513 else
2514 return 0;
2515}
2516
b90fc188 2517/* Select the Nth LWP that has had an event. */
6bf5e0ba
PA
2518
2519static int
2520select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2521{
d86d4aaf 2522 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2523 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2524 int *selector = data;
2525
2526 gdb_assert (selector != NULL);
2527
582511be 2528 /* Select only resumed LWPs that have an event pending. */
91baf43f 2529 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2530 && lp->status_pending_p)
6bf5e0ba
PA
2531 if ((*selector)-- == 0)
2532 return 1;
2533
2534 return 0;
2535}
2536
6bf5e0ba
PA
2537/* Select one LWP out of those that have events pending. */
2538
2539static void
2540select_event_lwp (struct lwp_info **orig_lp)
2541{
2542 int num_events = 0;
2543 int random_selector;
582511be
PA
2544 struct thread_info *event_thread = NULL;
2545
2546 /* In all-stop, give preference to the LWP that is being
2547 single-stepped. There will be at most one, and it's the LWP that
2548 the core is most interested in. If we didn't do this, then we'd
2549 have to handle pending step SIGTRAPs somehow in case the core
2550 later continues the previously-stepped thread, otherwise we'd
2551 report the pending SIGTRAP, and the core, not having stepped the
2552 thread, wouldn't understand what the trap was for, and therefore
2553 would report it to the user as a random signal. */
2554 if (!non_stop)
6bf5e0ba 2555 {
582511be
PA
2556 event_thread
2557 = (struct thread_info *) find_inferior (&all_threads,
2558 select_singlestep_lwp_callback,
2559 NULL);
2560 if (event_thread != NULL)
2561 {
2562 if (debug_threads)
2563 debug_printf ("SEL: Select single-step %s\n",
2564 target_pid_to_str (ptid_of (event_thread)));
2565 }
6bf5e0ba 2566 }
582511be 2567 if (event_thread == NULL)
6bf5e0ba
PA
2568 {
2569 /* No single-stepping LWP. Select one at random, out of those
b90fc188 2570 which have had events. */
6bf5e0ba 2571
b90fc188 2572 /* First see how many events we have. */
d86d4aaf 2573 find_inferior (&all_threads, count_events_callback, &num_events);
8bf3b159 2574 gdb_assert (num_events > 0);
6bf5e0ba 2575
b90fc188
PA
2576 /* Now randomly pick a LWP out of those that have had
2577 events. */
6bf5e0ba
PA
2578 random_selector = (int)
2579 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2580
2581 if (debug_threads && num_events > 1)
87ce2a04
DE
2582 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2583 num_events, random_selector);
6bf5e0ba 2584
d86d4aaf
DE
2585 event_thread
2586 = (struct thread_info *) find_inferior (&all_threads,
2587 select_event_lwp_callback,
2588 &random_selector);
6bf5e0ba
PA
2589 }
2590
d86d4aaf 2591 if (event_thread != NULL)
6bf5e0ba 2592 {
d86d4aaf
DE
2593 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2594
6bf5e0ba
PA
2595 /* Switch the event LWP. */
2596 *orig_lp = event_lp;
2597 }
2598}
2599
7984d532
PA
2600/* Decrement the suspend count of an LWP. */
2601
2602static int
2603unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2604{
d86d4aaf
DE
2605 struct thread_info *thread = (struct thread_info *) entry;
2606 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
2607
2608 /* Ignore EXCEPT. */
2609 if (lwp == except)
2610 return 0;
2611
2612 lwp->suspended--;
2613
2614 gdb_assert (lwp->suspended >= 0);
2615 return 0;
2616}
2617
2618/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2619 NULL. */
2620
2621static void
2622unsuspend_all_lwps (struct lwp_info *except)
2623{
d86d4aaf 2624 find_inferior (&all_threads, unsuspend_one_lwp, except);
7984d532
PA
2625}
2626
fa593d66
PA
2627static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2628static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2629 void *data);
2630static int lwp_running (struct inferior_list_entry *entry, void *data);
2631static ptid_t linux_wait_1 (ptid_t ptid,
2632 struct target_waitstatus *ourstatus,
2633 int target_options);
2634
2635/* Stabilize threads (move out of jump pads).
2636
2637 If a thread is midway collecting a fast tracepoint, we need to
2638 finish the collection and move it out of the jump pad before
2639 reporting the signal.
2640
2641 This avoids recursion while collecting (when a signal arrives
2642 midway, and the signal handler itself collects), which would trash
2643 the trace buffer. In case the user set a breakpoint in a signal
2644 handler, this avoids the backtrace showing the jump pad, etc..
2645 Most importantly, there are certain things we can't do safely if
2646 threads are stopped in a jump pad (or in its callee's). For
2647 example:
2648
2649 - starting a new trace run. A thread still collecting the
2650 previous run, could trash the trace buffer when resumed. The trace
2651 buffer control structures would have been reset but the thread had
2652 no way to tell. The thread could even midway memcpy'ing to the
2653 buffer, which would mean that when resumed, it would clobber the
2654 trace buffer that had been set for a new run.
2655
2656 - we can't rewrite/reuse the jump pads for new tracepoints
2657 safely. Say you do tstart while a thread is stopped midway while
2658 collecting. When the thread is later resumed, it finishes the
2659 collection, and returns to the jump pad, to execute the original
2660 instruction that was under the tracepoint jump at the time the
2661 older run had been started. If the jump pad had been rewritten
2662 since for something else in the new run, the thread would now
2663 execute the wrong / random instructions. */
2664
2665static void
2666linux_stabilize_threads (void)
2667{
0bfdf32f 2668 struct thread_info *saved_thread;
d86d4aaf 2669 struct thread_info *thread_stuck;
fa593d66 2670
d86d4aaf
DE
2671 thread_stuck
2672 = (struct thread_info *) find_inferior (&all_threads,
2673 stuck_in_jump_pad_callback,
2674 NULL);
2675 if (thread_stuck != NULL)
fa593d66 2676 {
b4d51a55 2677 if (debug_threads)
87ce2a04 2678 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2679 lwpid_of (thread_stuck));
fa593d66
PA
2680 return;
2681 }
2682
0bfdf32f 2683 saved_thread = current_thread;
fa593d66
PA
2684
2685 stabilizing_threads = 1;
2686
2687 /* Kick 'em all. */
d86d4aaf 2688 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
fa593d66
PA
2689
2690 /* Loop until all are stopped out of the jump pads. */
d86d4aaf 2691 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
fa593d66
PA
2692 {
2693 struct target_waitstatus ourstatus;
2694 struct lwp_info *lwp;
fa593d66
PA
2695 int wstat;
2696
2697 /* Note that we go through the full wait even loop. While
2698 moving threads out of jump pad, we need to be able to step
2699 over internal breakpoints and such. */
32fcada3 2700 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2701
2702 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2703 {
0bfdf32f 2704 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2705
2706 /* Lock it. */
2707 lwp->suspended++;
2708
a493e3e2 2709 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2710 || current_thread->last_resume_kind == resume_stop)
fa593d66 2711 {
2ea28649 2712 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2713 enqueue_one_deferred_signal (lwp, &wstat);
2714 }
2715 }
2716 }
2717
d86d4aaf 2718 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
fa593d66
PA
2719
2720 stabilizing_threads = 0;
2721
0bfdf32f 2722 current_thread = saved_thread;
fa593d66 2723
b4d51a55 2724 if (debug_threads)
fa593d66 2725 {
d86d4aaf
DE
2726 thread_stuck
2727 = (struct thread_info *) find_inferior (&all_threads,
2728 stuck_in_jump_pad_callback,
2729 NULL);
2730 if (thread_stuck != NULL)
87ce2a04 2731 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2732 lwpid_of (thread_stuck));
fa593d66
PA
2733 }
2734}
2735
582511be
PA
2736static void async_file_mark (void);
2737
2738/* Convenience function that is called when the kernel reports an
2739 event that is not passed out to GDB. */
2740
2741static ptid_t
2742ignore_event (struct target_waitstatus *ourstatus)
2743{
2744 /* If we got an event, there may still be others, as a single
2745 SIGCHLD can indicate more than one child stopped. This forces
2746 another target_wait call. */
2747 async_file_mark ();
2748
2749 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2750 return null_ptid;
2751}
2752
de0d863e
DB
2753/* Return non-zero if WAITSTATUS reflects an extended linux
2754 event. Otherwise, return zero. */
2755
2756static int
2757extended_event_reported (const struct target_waitstatus *waitstatus)
2758{
2759 if (waitstatus == NULL)
2760 return 0;
2761
c269dbdb
DB
2762 return (waitstatus->kind == TARGET_WAITKIND_FORKED
2763 || waitstatus->kind == TARGET_WAITKIND_VFORKED
2764 || waitstatus->kind == TARGET_WAITKIND_VFORK_DONE);
de0d863e
DB
2765}
2766
0d62e5e8 2767/* Wait for process, returns status. */
da6d8c04 2768
95954743
PA
2769static ptid_t
2770linux_wait_1 (ptid_t ptid,
2771 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2772{
e5f1222d 2773 int w;
fc7238bb 2774 struct lwp_info *event_child;
bd99dc85 2775 int options;
bd99dc85 2776 int pid;
6bf5e0ba
PA
2777 int step_over_finished;
2778 int bp_explains_trap;
2779 int maybe_internal_trap;
2780 int report_to_gdb;
219f2f23 2781 int trace_event;
c2d6af84 2782 int in_step_range;
bd99dc85 2783
87ce2a04
DE
2784 if (debug_threads)
2785 {
2786 debug_enter ();
2787 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2788 }
2789
bd99dc85
PA
2790 /* Translate generic target options into linux options. */
2791 options = __WALL;
2792 if (target_options & TARGET_WNOHANG)
2793 options |= WNOHANG;
0d62e5e8 2794
fa593d66
PA
2795 bp_explains_trap = 0;
2796 trace_event = 0;
c2d6af84 2797 in_step_range = 0;
bd99dc85
PA
2798 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2799
6bf5e0ba
PA
2800 if (ptid_equal (step_over_bkpt, null_ptid))
2801 pid = linux_wait_for_event (ptid, &w, options);
2802 else
2803 {
2804 if (debug_threads)
87ce2a04
DE
2805 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2806 target_pid_to_str (step_over_bkpt));
6bf5e0ba
PA
2807 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2808 }
2809
fa96cb38 2810 if (pid == 0)
87ce2a04 2811 {
fa96cb38
PA
2812 gdb_assert (target_options & TARGET_WNOHANG);
2813
87ce2a04
DE
2814 if (debug_threads)
2815 {
fa96cb38
PA
2816 debug_printf ("linux_wait_1 ret = null_ptid, "
2817 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
2818 debug_exit ();
2819 }
fa96cb38
PA
2820
2821 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
2822 return null_ptid;
2823 }
fa96cb38
PA
2824 else if (pid == -1)
2825 {
2826 if (debug_threads)
2827 {
2828 debug_printf ("linux_wait_1 ret = null_ptid, "
2829 "TARGET_WAITKIND_NO_RESUMED\n");
2830 debug_exit ();
2831 }
bd99dc85 2832
fa96cb38
PA
2833 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2834 return null_ptid;
2835 }
0d62e5e8 2836
0bfdf32f 2837 event_child = get_thread_lwp (current_thread);
0d62e5e8 2838
fa96cb38
PA
2839 /* linux_wait_for_event only returns an exit status for the last
2840 child of a process. Report it. */
2841 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 2842 {
fa96cb38 2843 if (WIFEXITED (w))
0d62e5e8 2844 {
fa96cb38
PA
2845 ourstatus->kind = TARGET_WAITKIND_EXITED;
2846 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 2847
fa96cb38 2848 if (debug_threads)
bd99dc85 2849 {
fa96cb38
PA
2850 debug_printf ("linux_wait_1 ret = %s, exited with "
2851 "retcode %d\n",
0bfdf32f 2852 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2853 WEXITSTATUS (w));
2854 debug_exit ();
bd99dc85 2855 }
fa96cb38
PA
2856 }
2857 else
2858 {
2859 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2860 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 2861
fa96cb38
PA
2862 if (debug_threads)
2863 {
2864 debug_printf ("linux_wait_1 ret = %s, terminated with "
2865 "signal %d\n",
0bfdf32f 2866 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2867 WTERMSIG (w));
2868 debug_exit ();
2869 }
0d62e5e8 2870 }
fa96cb38 2871
0bfdf32f 2872 return ptid_of (current_thread);
da6d8c04
DJ
2873 }
2874
8090aef2
PA
2875 /* If step-over executes a breakpoint instruction, it means a
2876 gdb/gdbserver breakpoint had been planted on top of a permanent
2877 breakpoint. The PC has been adjusted by
2878 check_stopped_by_breakpoint to point at the breakpoint address.
2879 Advance the PC manually past the breakpoint, otherwise the
2880 program would keep trapping the permanent breakpoint forever. */
2881 if (!ptid_equal (step_over_bkpt, null_ptid)
15c66dd6 2882 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
8090aef2 2883 {
9beb7c4e 2884 unsigned int increment_pc = the_low_target.breakpoint_len;
8090aef2
PA
2885
2886 if (debug_threads)
2887 {
2888 debug_printf ("step-over for %s executed software breakpoint\n",
2889 target_pid_to_str (ptid_of (current_thread)));
2890 }
2891
2892 if (increment_pc != 0)
2893 {
2894 struct regcache *regcache
2895 = get_thread_regcache (current_thread, 1);
2896
2897 event_child->stop_pc += increment_pc;
2898 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2899
2900 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
15c66dd6 2901 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
2902 }
2903 }
2904
6bf5e0ba
PA
2905 /* If this event was not handled before, and is not a SIGTRAP, we
2906 report it. SIGILL and SIGSEGV are also treated as traps in case
2907 a breakpoint is inserted at the current PC. If this target does
2908 not support internal breakpoints at all, we also report the
2909 SIGTRAP without further processing; it's of no concern to us. */
2910 maybe_internal_trap
2911 = (supports_breakpoints ()
2912 && (WSTOPSIG (w) == SIGTRAP
2913 || ((WSTOPSIG (w) == SIGILL
2914 || WSTOPSIG (w) == SIGSEGV)
2915 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2916
2917 if (maybe_internal_trap)
2918 {
2919 /* Handle anything that requires bookkeeping before deciding to
2920 report the event or continue waiting. */
2921
2922 /* First check if we can explain the SIGTRAP with an internal
2923 breakpoint, or if we should possibly report the event to GDB.
2924 Do this before anything that may remove or insert a
2925 breakpoint. */
2926 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2927
2928 /* We have a SIGTRAP, possibly a step-over dance has just
2929 finished. If so, tweak the state machine accordingly,
2930 reinsert breakpoints and delete any reinsert (software
2931 single-step) breakpoints. */
2932 step_over_finished = finish_step_over (event_child);
2933
2934 /* Now invoke the callbacks of any internal breakpoints there. */
2935 check_breakpoints (event_child->stop_pc);
2936
219f2f23
PA
2937 /* Handle tracepoint data collecting. This may overflow the
2938 trace buffer, and cause a tracing stop, removing
2939 breakpoints. */
2940 trace_event = handle_tracepoints (event_child);
2941
6bf5e0ba
PA
2942 if (bp_explains_trap)
2943 {
2944 /* If we stepped or ran into an internal breakpoint, we've
2945 already handled it. So next time we resume (from this
2946 PC), we should step over it. */
2947 if (debug_threads)
87ce2a04 2948 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 2949
8b07ae33
PA
2950 if (breakpoint_here (event_child->stop_pc))
2951 event_child->need_step_over = 1;
6bf5e0ba
PA
2952 }
2953 }
2954 else
2955 {
2956 /* We have some other signal, possibly a step-over dance was in
2957 progress, and it should be cancelled too. */
2958 step_over_finished = finish_step_over (event_child);
fa593d66
PA
2959 }
2960
2961 /* We have all the data we need. Either report the event to GDB, or
2962 resume threads and keep waiting for more. */
2963
2964 /* If we're collecting a fast tracepoint, finish the collection and
2965 move out of the jump pad before delivering a signal. See
2966 linux_stabilize_threads. */
2967
2968 if (WIFSTOPPED (w)
2969 && WSTOPSIG (w) != SIGTRAP
2970 && supports_fast_tracepoints ()
58b4daa5 2971 && agent_loaded_p ())
fa593d66
PA
2972 {
2973 if (debug_threads)
87ce2a04
DE
2974 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2975 "to defer or adjust it.\n",
0bfdf32f 2976 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
2977
2978 /* Allow debugging the jump pad itself. */
0bfdf32f 2979 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
2980 && maybe_move_out_of_jump_pad (event_child, &w))
2981 {
2982 enqueue_one_deferred_signal (event_child, &w);
2983
2984 if (debug_threads)
87ce2a04 2985 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 2986 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
2987
2988 linux_resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
2989
2990 return ignore_event (ourstatus);
fa593d66
PA
2991 }
2992 }
219f2f23 2993
fa593d66
PA
2994 if (event_child->collecting_fast_tracepoint)
2995 {
2996 if (debug_threads)
87ce2a04
DE
2997 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2998 "Check if we're already there.\n",
0bfdf32f 2999 lwpid_of (current_thread),
87ce2a04 3000 event_child->collecting_fast_tracepoint);
fa593d66
PA
3001
3002 trace_event = 1;
3003
3004 event_child->collecting_fast_tracepoint
3005 = linux_fast_tracepoint_collecting (event_child, NULL);
3006
3007 if (event_child->collecting_fast_tracepoint != 1)
3008 {
3009 /* No longer need this breakpoint. */
3010 if (event_child->exit_jump_pad_bkpt != NULL)
3011 {
3012 if (debug_threads)
87ce2a04
DE
3013 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3014 "stopping all threads momentarily.\n");
fa593d66
PA
3015
3016 /* Other running threads could hit this breakpoint.
3017 We don't handle moribund locations like GDB does,
3018 instead we always pause all threads when removing
3019 breakpoints, so that any step-over or
3020 decr_pc_after_break adjustment is always taken
3021 care of while the breakpoint is still
3022 inserted. */
3023 stop_all_lwps (1, event_child);
fa593d66
PA
3024
3025 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3026 event_child->exit_jump_pad_bkpt = NULL;
3027
3028 unstop_all_lwps (1, event_child);
3029
3030 gdb_assert (event_child->suspended >= 0);
3031 }
3032 }
3033
3034 if (event_child->collecting_fast_tracepoint == 0)
3035 {
3036 if (debug_threads)
87ce2a04
DE
3037 debug_printf ("fast tracepoint finished "
3038 "collecting successfully.\n");
fa593d66
PA
3039
3040 /* We may have a deferred signal to report. */
3041 if (dequeue_one_deferred_signal (event_child, &w))
3042 {
3043 if (debug_threads)
87ce2a04 3044 debug_printf ("dequeued one signal.\n");
fa593d66 3045 }
3c11dd79 3046 else
fa593d66 3047 {
3c11dd79 3048 if (debug_threads)
87ce2a04 3049 debug_printf ("no deferred signals.\n");
fa593d66
PA
3050
3051 if (stabilizing_threads)
3052 {
3053 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 3054 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
3055
3056 if (debug_threads)
3057 {
3058 debug_printf ("linux_wait_1 ret = %s, stopped "
3059 "while stabilizing threads\n",
0bfdf32f 3060 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
3061 debug_exit ();
3062 }
3063
0bfdf32f 3064 return ptid_of (current_thread);
fa593d66
PA
3065 }
3066 }
3067 }
6bf5e0ba
PA
3068 }
3069
e471f25b
PA
3070 /* Check whether GDB would be interested in this event. */
3071
3072 /* If GDB is not interested in this signal, don't stop other
3073 threads, and don't report it to GDB. Just resume the inferior
3074 right away. We do this for threading-related signals as well as
3075 any that GDB specifically requested we ignore. But never ignore
3076 SIGSTOP if we sent it ourselves, and do not ignore signals when
3077 stepping - they may require special handling to skip the signal
c9587f88
AT
3078 handler. Also never ignore signals that could be caused by a
3079 breakpoint. */
e471f25b
PA
3080 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3081 thread library? */
3082 if (WIFSTOPPED (w)
0bfdf32f 3083 && current_thread->last_resume_kind != resume_step
e471f25b 3084 && (
1a981360 3085#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3086 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3087 && (WSTOPSIG (w) == __SIGRTMIN
3088 || WSTOPSIG (w) == __SIGRTMIN + 1))
3089 ||
3090#endif
2ea28649 3091 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3092 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3093 && current_thread->last_resume_kind == resume_stop)
3094 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3095 {
3096 siginfo_t info, *info_p;
3097
3098 if (debug_threads)
87ce2a04 3099 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 3100 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3101
0bfdf32f 3102 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3103 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3104 info_p = &info;
3105 else
3106 info_p = NULL;
3107 linux_resume_one_lwp (event_child, event_child->stepping,
3108 WSTOPSIG (w), info_p);
582511be 3109 return ignore_event (ourstatus);
e471f25b
PA
3110 }
3111
c2d6af84
PA
3112 /* Note that all addresses are always "out of the step range" when
3113 there's no range to begin with. */
3114 in_step_range = lwp_in_step_range (event_child);
3115
3116 /* If GDB wanted this thread to single step, and the thread is out
3117 of the step range, we always want to report the SIGTRAP, and let
3118 GDB handle it. Watchpoints should always be reported. So should
3119 signals we can't explain. A SIGTRAP we can't explain could be a
3120 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3121 do, we're be able to handle GDB breakpoints on top of internal
3122 breakpoints, by handling the internal breakpoint and still
3123 reporting the event to GDB. If we don't, we're out of luck, GDB
3124 won't see the breakpoint hit. */
6bf5e0ba 3125 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3126 || (current_thread->last_resume_kind == resume_step
c2d6af84 3127 && !in_step_range)
15c66dd6 3128 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
c2d6af84 3129 || (!step_over_finished && !in_step_range
493e2a69 3130 && !bp_explains_trap && !trace_event)
9f3a5c85 3131 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3132 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e
DB
3133 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3134 || extended_event_reported (&event_child->waitstatus));
d3ce09f5
SS
3135
3136 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3137
3138 /* We found no reason GDB would want us to stop. We either hit one
3139 of our own breakpoints, or finished an internal step GDB
3140 shouldn't know about. */
3141 if (!report_to_gdb)
3142 {
3143 if (debug_threads)
3144 {
3145 if (bp_explains_trap)
87ce2a04 3146 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3147 if (step_over_finished)
87ce2a04 3148 debug_printf ("Step-over finished.\n");
219f2f23 3149 if (trace_event)
87ce2a04 3150 debug_printf ("Tracepoint event.\n");
c2d6af84 3151 if (lwp_in_step_range (event_child))
87ce2a04
DE
3152 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3153 paddress (event_child->stop_pc),
3154 paddress (event_child->step_range_start),
3155 paddress (event_child->step_range_end));
de0d863e
DB
3156 if (extended_event_reported (&event_child->waitstatus))
3157 {
3158 char *str = target_waitstatus_to_string (ourstatus);
3159 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3160 lwpid_of (get_lwp_thread (event_child)), str);
3161 xfree (str);
3162 }
6bf5e0ba
PA
3163 }
3164
3165 /* We're not reporting this breakpoint to GDB, so apply the
3166 decr_pc_after_break adjustment to the inferior's regcache
3167 ourselves. */
3168
3169 if (the_low_target.set_pc != NULL)
3170 {
3171 struct regcache *regcache
0bfdf32f 3172 = get_thread_regcache (current_thread, 1);
6bf5e0ba
PA
3173 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3174 }
3175
7984d532
PA
3176 /* We may have finished stepping over a breakpoint. If so,
3177 we've stopped and suspended all LWPs momentarily except the
3178 stepping one. This is where we resume them all again. We're
3179 going to keep waiting, so use proceed, which handles stepping
3180 over the next breakpoint. */
6bf5e0ba 3181 if (debug_threads)
87ce2a04 3182 debug_printf ("proceeding all threads.\n");
7984d532
PA
3183
3184 if (step_over_finished)
3185 unsuspend_all_lwps (event_child);
3186
6bf5e0ba 3187 proceed_all_lwps ();
582511be 3188 return ignore_event (ourstatus);
6bf5e0ba
PA
3189 }
3190
3191 if (debug_threads)
3192 {
0bfdf32f 3193 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3194 {
3195 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3196 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3197 else if (!lwp_in_step_range (event_child))
87ce2a04 3198 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3199 }
15c66dd6 3200 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3201 debug_printf ("Stopped by watchpoint.\n");
582511be 3202 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3203 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3204 if (debug_threads)
87ce2a04 3205 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3206 }
3207
3208 /* Alright, we're going to report a stop. */
3209
582511be 3210 if (!stabilizing_threads)
6bf5e0ba
PA
3211 {
3212 /* In all-stop, stop all threads. */
582511be
PA
3213 if (!non_stop)
3214 stop_all_lwps (0, NULL);
6bf5e0ba
PA
3215
3216 /* If we're not waiting for a specific LWP, choose an event LWP
3217 from among those that have had events. Giving equal priority
3218 to all LWPs that have had events helps prevent
3219 starvation. */
3220 if (ptid_equal (ptid, minus_one_ptid))
3221 {
3222 event_child->status_pending_p = 1;
3223 event_child->status_pending = w;
3224
3225 select_event_lwp (&event_child);
3226
0bfdf32f
GB
3227 /* current_thread and event_child must stay in sync. */
3228 current_thread = get_lwp_thread (event_child);
ee1e2d4f 3229
6bf5e0ba
PA
3230 event_child->status_pending_p = 0;
3231 w = event_child->status_pending;
3232 }
3233
c03e6ccc 3234 if (step_over_finished)
582511be
PA
3235 {
3236 if (!non_stop)
3237 {
3238 /* If we were doing a step-over, all other threads but
3239 the stepping one had been paused in start_step_over,
3240 with their suspend counts incremented. We don't want
3241 to do a full unstop/unpause, because we're in
3242 all-stop mode (so we want threads stopped), but we
3243 still need to unsuspend the other threads, to
3244 decrement their `suspended' count back. */
3245 unsuspend_all_lwps (event_child);
3246 }
3247 else
3248 {
3249 /* If we just finished a step-over, then all threads had
3250 been momentarily paused. In all-stop, that's fine,
3251 we want threads stopped by now anyway. In non-stop,
3252 we need to re-resume threads that GDB wanted to be
3253 running. */
3254 unstop_all_lwps (1, event_child);
3255 }
3256 }
c03e6ccc 3257
fa593d66 3258 /* Stabilize threads (move out of jump pads). */
582511be
PA
3259 if (!non_stop)
3260 stabilize_threads ();
6bf5e0ba
PA
3261 }
3262 else
3263 {
3264 /* If we just finished a step-over, then all threads had been
3265 momentarily paused. In all-stop, that's fine, we want
3266 threads stopped by now anyway. In non-stop, we need to
3267 re-resume threads that GDB wanted to be running. */
3268 if (step_over_finished)
7984d532 3269 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3270 }
3271
de0d863e
DB
3272 if (extended_event_reported (&event_child->waitstatus))
3273 {
3274 /* If the reported event is a fork, vfork or exec, let GDB know. */
3275 ourstatus->kind = event_child->waitstatus.kind;
3276 ourstatus->value = event_child->waitstatus.value;
3277
3278 /* Clear the event lwp's waitstatus since we handled it already. */
3279 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3280 }
3281 else
3282 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3283
582511be 3284 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3285 it was a software breakpoint, and the client doesn't know we can
3286 adjust the breakpoint ourselves. */
3287 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3288 && !swbreak_feature)
582511be
PA
3289 {
3290 int decr_pc = the_low_target.decr_pc_after_break;
3291
3292 if (decr_pc != 0)
3293 {
3294 struct regcache *regcache
3295 = get_thread_regcache (current_thread, 1);
3296 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3297 }
3298 }
3299
0bfdf32f 3300 if (current_thread->last_resume_kind == resume_stop
8336d594 3301 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3302 {
3303 /* A thread that has been requested to stop by GDB with vCont;t,
3304 and it stopped cleanly, so report as SIG0. The use of
3305 SIGSTOP is an implementation detail. */
a493e3e2 3306 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3307 }
0bfdf32f 3308 else if (current_thread->last_resume_kind == resume_stop
8336d594 3309 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3310 {
3311 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3312 but, it stopped for other reasons. */
2ea28649 3313 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85 3314 }
de0d863e 3315 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
bd99dc85 3316 {
2ea28649 3317 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3318 }
3319
d50171e4
PA
3320 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3321
bd99dc85 3322 if (debug_threads)
87ce2a04
DE
3323 {
3324 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
0bfdf32f 3325 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3326 ourstatus->kind, ourstatus->value.sig);
3327 debug_exit ();
3328 }
bd99dc85 3329
0bfdf32f 3330 return ptid_of (current_thread);
bd99dc85
PA
3331}
3332
3333/* Get rid of any pending event in the pipe. */
3334static void
3335async_file_flush (void)
3336{
3337 int ret;
3338 char buf;
3339
3340 do
3341 ret = read (linux_event_pipe[0], &buf, 1);
3342 while (ret >= 0 || (ret == -1 && errno == EINTR));
3343}
3344
3345/* Put something in the pipe, so the event loop wakes up. */
3346static void
3347async_file_mark (void)
3348{
3349 int ret;
3350
3351 async_file_flush ();
3352
3353 do
3354 ret = write (linux_event_pipe[1], "+", 1);
3355 while (ret == 0 || (ret == -1 && errno == EINTR));
3356
3357 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3358 be awakened anyway. */
3359}
3360
95954743
PA
3361static ptid_t
3362linux_wait (ptid_t ptid,
3363 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 3364{
95954743 3365 ptid_t event_ptid;
bd99dc85 3366
bd99dc85
PA
3367 /* Flush the async file first. */
3368 if (target_is_async_p ())
3369 async_file_flush ();
3370
582511be
PA
3371 do
3372 {
3373 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3374 }
3375 while ((target_options & TARGET_WNOHANG) == 0
3376 && ptid_equal (event_ptid, null_ptid)
3377 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3378
3379 /* If at least one stop was reported, there may be more. A single
3380 SIGCHLD can signal more than one child stop. */
3381 if (target_is_async_p ()
3382 && (target_options & TARGET_WNOHANG) != 0
95954743 3383 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
3384 async_file_mark ();
3385
3386 return event_ptid;
da6d8c04
DJ
3387}
3388
c5f62d5f 3389/* Send a signal to an LWP. */
fd500816
DJ
3390
3391static int
a1928bad 3392kill_lwp (unsigned long lwpid, int signo)
fd500816 3393{
c5f62d5f
DE
3394 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3395 fails, then we are not using nptl threads and we should be using kill. */
fd500816 3396
c5f62d5f
DE
3397#ifdef __NR_tkill
3398 {
3399 static int tkill_failed;
fd500816 3400
c5f62d5f
DE
3401 if (!tkill_failed)
3402 {
3403 int ret;
3404
3405 errno = 0;
3406 ret = syscall (__NR_tkill, lwpid, signo);
3407 if (errno != ENOSYS)
3408 return ret;
3409 tkill_failed = 1;
3410 }
3411 }
fd500816
DJ
3412#endif
3413
3414 return kill (lwpid, signo);
3415}
3416
964e4306
PA
3417void
3418linux_stop_lwp (struct lwp_info *lwp)
3419{
3420 send_sigstop (lwp);
3421}
3422
0d62e5e8 3423static void
02fc4de7 3424send_sigstop (struct lwp_info *lwp)
0d62e5e8 3425{
bd99dc85 3426 int pid;
0d62e5e8 3427
d86d4aaf 3428 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3429
0d62e5e8
DJ
3430 /* If we already have a pending stop signal for this process, don't
3431 send another. */
54a0b537 3432 if (lwp->stop_expected)
0d62e5e8 3433 {
ae13219e 3434 if (debug_threads)
87ce2a04 3435 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3436
0d62e5e8
DJ
3437 return;
3438 }
3439
3440 if (debug_threads)
87ce2a04 3441 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3442
d50171e4 3443 lwp->stop_expected = 1;
bd99dc85 3444 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3445}
3446
7984d532
PA
3447static int
3448send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7 3449{
d86d4aaf
DE
3450 struct thread_info *thread = (struct thread_info *) entry;
3451 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3452
7984d532
PA
3453 /* Ignore EXCEPT. */
3454 if (lwp == except)
3455 return 0;
3456
02fc4de7 3457 if (lwp->stopped)
7984d532 3458 return 0;
02fc4de7
PA
3459
3460 send_sigstop (lwp);
7984d532
PA
3461 return 0;
3462}
3463
3464/* Increment the suspend count of an LWP, and stop it, if not stopped
3465 yet. */
3466static int
3467suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3468 void *except)
3469{
d86d4aaf
DE
3470 struct thread_info *thread = (struct thread_info *) entry;
3471 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3472
3473 /* Ignore EXCEPT. */
3474 if (lwp == except)
3475 return 0;
3476
3477 lwp->suspended++;
3478
3479 return send_sigstop_callback (entry, except);
02fc4de7
PA
3480}
3481
95954743
PA
3482static void
3483mark_lwp_dead (struct lwp_info *lwp, int wstat)
3484{
3485 /* It's dead, really. */
3486 lwp->dead = 1;
3487
3488 /* Store the exit status for later. */
3489 lwp->status_pending_p = 1;
3490 lwp->status_pending = wstat;
3491
95954743
PA
3492 /* Prevent trying to stop it. */
3493 lwp->stopped = 1;
3494
3495 /* No further stops are expected from a dead lwp. */
3496 lwp->stop_expected = 0;
3497}
3498
fa96cb38
PA
3499/* Wait for all children to stop for the SIGSTOPs we just queued. */
3500
0d62e5e8 3501static void
fa96cb38 3502wait_for_sigstop (void)
0d62e5e8 3503{
0bfdf32f 3504 struct thread_info *saved_thread;
95954743 3505 ptid_t saved_tid;
fa96cb38
PA
3506 int wstat;
3507 int ret;
0d62e5e8 3508
0bfdf32f
GB
3509 saved_thread = current_thread;
3510 if (saved_thread != NULL)
3511 saved_tid = saved_thread->entry.id;
bd99dc85 3512 else
95954743 3513 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3514
d50171e4 3515 if (debug_threads)
fa96cb38 3516 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3517
fa96cb38
PA
3518 /* Passing NULL_PTID as filter indicates we want all events to be
3519 left pending. Eventually this returns when there are no
3520 unwaited-for children left. */
3521 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3522 &wstat, __WALL);
3523 gdb_assert (ret == -1);
0d62e5e8 3524
0bfdf32f
GB
3525 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3526 current_thread = saved_thread;
0d62e5e8
DJ
3527 else
3528 {
3529 if (debug_threads)
87ce2a04 3530 debug_printf ("Previously current thread died.\n");
0d62e5e8 3531
bd99dc85
PA
3532 if (non_stop)
3533 {
3534 /* We can't change the current inferior behind GDB's back,
3535 otherwise, a subsequent command may apply to the wrong
3536 process. */
0bfdf32f 3537 current_thread = NULL;
bd99dc85
PA
3538 }
3539 else
3540 {
3541 /* Set a valid thread as current. */
0bfdf32f 3542 set_desired_thread (0);
bd99dc85 3543 }
0d62e5e8
DJ
3544 }
3545}
3546
fa593d66
PA
3547/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3548 move it out, because we need to report the stop event to GDB. For
3549 example, if the user puts a breakpoint in the jump pad, it's
3550 because she wants to debug it. */
3551
3552static int
3553stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3554{
d86d4aaf
DE
3555 struct thread_info *thread = (struct thread_info *) entry;
3556 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3557
3558 gdb_assert (lwp->suspended == 0);
3559 gdb_assert (lwp->stopped);
3560
3561 /* Allow debugging the jump pad, gdb_collect, etc.. */
3562 return (supports_fast_tracepoints ()
58b4daa5 3563 && agent_loaded_p ()
fa593d66 3564 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3565 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3566 || thread->last_resume_kind == resume_step)
3567 && linux_fast_tracepoint_collecting (lwp, NULL));
3568}
3569
3570static void
3571move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3572{
d86d4aaf
DE
3573 struct thread_info *thread = (struct thread_info *) entry;
3574 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3575 int *wstat;
3576
3577 gdb_assert (lwp->suspended == 0);
3578 gdb_assert (lwp->stopped);
3579
3580 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3581
3582 /* Allow debugging the jump pad, gdb_collect, etc. */
3583 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3584 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3585 && thread->last_resume_kind != resume_step
3586 && maybe_move_out_of_jump_pad (lwp, wstat))
3587 {
3588 if (debug_threads)
87ce2a04 3589 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 3590 lwpid_of (thread));
fa593d66
PA
3591
3592 if (wstat)
3593 {
3594 lwp->status_pending_p = 0;
3595 enqueue_one_deferred_signal (lwp, wstat);
3596
3597 if (debug_threads)
87ce2a04
DE
3598 debug_printf ("Signal %d for LWP %ld deferred "
3599 "(in jump pad)\n",
d86d4aaf 3600 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3601 }
3602
3603 linux_resume_one_lwp (lwp, 0, 0, NULL);
3604 }
3605 else
3606 lwp->suspended++;
3607}
3608
3609static int
3610lwp_running (struct inferior_list_entry *entry, void *data)
3611{
d86d4aaf
DE
3612 struct thread_info *thread = (struct thread_info *) entry;
3613 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3614
3615 if (lwp->dead)
3616 return 0;
3617 if (lwp->stopped)
3618 return 0;
3619 return 1;
3620}
3621
7984d532
PA
3622/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3623 If SUSPEND, then also increase the suspend count of every LWP,
3624 except EXCEPT. */
3625
0d62e5e8 3626static void
7984d532 3627stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8 3628{
bde24c0a
PA
3629 /* Should not be called recursively. */
3630 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3631
87ce2a04
DE
3632 if (debug_threads)
3633 {
3634 debug_enter ();
3635 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3636 suspend ? "stop-and-suspend" : "stop",
3637 except != NULL
d86d4aaf 3638 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
3639 : "none");
3640 }
3641
bde24c0a
PA
3642 stopping_threads = (suspend
3643 ? STOPPING_AND_SUSPENDING_THREADS
3644 : STOPPING_THREADS);
7984d532
PA
3645
3646 if (suspend)
d86d4aaf 3647 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
7984d532 3648 else
d86d4aaf 3649 find_inferior (&all_threads, send_sigstop_callback, except);
fa96cb38 3650 wait_for_sigstop ();
bde24c0a 3651 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
3652
3653 if (debug_threads)
3654 {
3655 debug_printf ("stop_all_lwps done, setting stopping_threads "
3656 "back to !stopping\n");
3657 debug_exit ();
3658 }
0d62e5e8
DJ
3659}
3660
23f238d3
PA
3661/* Resume execution of LWP. If STEP is nonzero, single-step it. If
3662 SIGNAL is nonzero, give it that signal. */
da6d8c04 3663
ce3a066d 3664static void
23f238d3
PA
3665linux_resume_one_lwp_throw (struct lwp_info *lwp,
3666 int step, int signal, siginfo_t *info)
da6d8c04 3667{
d86d4aaf 3668 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 3669 struct thread_info *saved_thread;
fa593d66 3670 int fast_tp_collecting;
c06cbd92
YQ
3671 struct process_info *proc = get_thread_process (thread);
3672
3673 /* Note that target description may not be initialised
3674 (proc->tdesc == NULL) at this point because the program hasn't
3675 stopped at the first instruction yet. It means GDBserver skips
3676 the extra traps from the wrapper program (see option --wrapper).
3677 Code in this function that requires register access should be
3678 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 3679
54a0b537 3680 if (lwp->stopped == 0)
0d62e5e8
DJ
3681 return;
3682
fa593d66
PA
3683 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3684
3685 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3686
219f2f23
PA
3687 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3688 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 3689 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
3690 {
3691 /* Collecting 'while-stepping' actions doesn't make sense
3692 anymore. */
d86d4aaf 3693 release_while_stepping_state_list (thread);
219f2f23
PA
3694 }
3695
0d62e5e8
DJ
3696 /* If we have pending signals or status, and a new signal, enqueue the
3697 signal. Also enqueue the signal if we are waiting to reinsert a
3698 breakpoint; it will be picked up again below. */
3699 if (signal != 0
fa593d66
PA
3700 && (lwp->status_pending_p
3701 || lwp->pending_signals != NULL
3702 || lwp->bp_reinsert != 0
3703 || fast_tp_collecting))
0d62e5e8
DJ
3704 {
3705 struct pending_signals *p_sig;
bca929d3 3706 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 3707 p_sig->prev = lwp->pending_signals;
0d62e5e8 3708 p_sig->signal = signal;
32ca6d61
DJ
3709 if (info == NULL)
3710 memset (&p_sig->info, 0, sizeof (siginfo_t));
3711 else
3712 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 3713 lwp->pending_signals = p_sig;
0d62e5e8
DJ
3714 }
3715
d50171e4
PA
3716 if (lwp->status_pending_p)
3717 {
3718 if (debug_threads)
87ce2a04
DE
3719 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3720 " has pending status\n",
d86d4aaf 3721 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3722 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
3723 return;
3724 }
0d62e5e8 3725
0bfdf32f
GB
3726 saved_thread = current_thread;
3727 current_thread = thread;
0d62e5e8
DJ
3728
3729 if (debug_threads)
87ce2a04 3730 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
d86d4aaf 3731 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3732 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
3733
3734 /* This bit needs some thinking about. If we get a signal that
3735 we must report while a single-step reinsert is still pending,
3736 we often end up resuming the thread. It might be better to
3737 (ew) allow a stack of pending events; then we could be sure that
3738 the reinsert happened right away and not lose any signals.
3739
3740 Making this stack would also shrink the window in which breakpoints are
54a0b537 3741 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
3742 complete correctness, so it won't solve that problem. It may be
3743 worthwhile just to solve this one, however. */
54a0b537 3744 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
3745 {
3746 if (debug_threads)
87ce2a04
DE
3747 debug_printf (" pending reinsert at 0x%s\n",
3748 paddress (lwp->bp_reinsert));
d50171e4 3749
85e00e85 3750 if (can_hardware_single_step ())
d50171e4 3751 {
fa593d66
PA
3752 if (fast_tp_collecting == 0)
3753 {
3754 if (step == 0)
3755 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3756 if (lwp->suspended)
3757 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3758 lwp->suspended);
3759 }
d50171e4
PA
3760
3761 step = 1;
3762 }
0d62e5e8
DJ
3763
3764 /* Postpone any pending signal. It was enqueued above. */
3765 signal = 0;
3766 }
3767
fa593d66
PA
3768 if (fast_tp_collecting == 1)
3769 {
3770 if (debug_threads)
87ce2a04
DE
3771 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3772 " (exit-jump-pad-bkpt)\n",
d86d4aaf 3773 lwpid_of (thread));
fa593d66
PA
3774
3775 /* Postpone any pending signal. It was enqueued above. */
3776 signal = 0;
3777 }
3778 else if (fast_tp_collecting == 2)
3779 {
3780 if (debug_threads)
87ce2a04
DE
3781 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3782 " single-stepping\n",
d86d4aaf 3783 lwpid_of (thread));
fa593d66
PA
3784
3785 if (can_hardware_single_step ())
3786 step = 1;
3787 else
38e08fca
GB
3788 {
3789 internal_error (__FILE__, __LINE__,
3790 "moving out of jump pad single-stepping"
3791 " not implemented on this target");
3792 }
fa593d66
PA
3793
3794 /* Postpone any pending signal. It was enqueued above. */
3795 signal = 0;
3796 }
3797
219f2f23
PA
3798 /* If we have while-stepping actions in this thread set it stepping.
3799 If we have a signal to deliver, it may or may not be set to
3800 SIG_IGN, we don't know. Assume so, and allow collecting
3801 while-stepping into a signal handler. A possible smart thing to
3802 do would be to set an internal breakpoint at the signal return
3803 address, continue, and carry on catching this while-stepping
3804 action only when that breakpoint is hit. A future
3805 enhancement. */
d86d4aaf 3806 if (thread->while_stepping != NULL
219f2f23
PA
3807 && can_hardware_single_step ())
3808 {
3809 if (debug_threads)
87ce2a04 3810 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 3811 lwpid_of (thread));
219f2f23
PA
3812 step = 1;
3813 }
3814
c06cbd92 3815 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
0d62e5e8 3816 {
0bfdf32f 3817 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be
PA
3818
3819 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3820
3821 if (debug_threads)
3822 {
3823 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3824 (long) lwp->stop_pc);
3825 }
0d62e5e8
DJ
3826 }
3827
fa593d66
PA
3828 /* If we have pending signals, consume one unless we are trying to
3829 reinsert a breakpoint or we're trying to finish a fast tracepoint
3830 collect. */
3831 if (lwp->pending_signals != NULL
3832 && lwp->bp_reinsert == 0
3833 && fast_tp_collecting == 0)
0d62e5e8
DJ
3834 {
3835 struct pending_signals **p_sig;
3836
54a0b537 3837 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
3838 while ((*p_sig)->prev != NULL)
3839 p_sig = &(*p_sig)->prev;
3840
3841 signal = (*p_sig)->signal;
32ca6d61 3842 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 3843 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 3844 &(*p_sig)->info);
32ca6d61 3845
0d62e5e8
DJ
3846 free (*p_sig);
3847 *p_sig = NULL;
3848 }
3849
aa5ca48f
DE
3850 if (the_low_target.prepare_to_resume != NULL)
3851 the_low_target.prepare_to_resume (lwp);
3852
d86d4aaf 3853 regcache_invalidate_thread (thread);
da6d8c04 3854 errno = 0;
54a0b537 3855 lwp->stepping = step;
d86d4aaf 3856 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
b8e1b30e 3857 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
3858 /* Coerce to a uintptr_t first to avoid potential gcc warning
3859 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 3860 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 3861
0bfdf32f 3862 current_thread = saved_thread;
da6d8c04 3863 if (errno)
23f238d3
PA
3864 perror_with_name ("resuming thread");
3865
3866 /* Successfully resumed. Clear state that no longer makes sense,
3867 and mark the LWP as running. Must not do this before resuming
3868 otherwise if that fails other code will be confused. E.g., we'd
3869 later try to stop the LWP and hang forever waiting for a stop
3870 status. Note that we must not throw after this is cleared,
3871 otherwise handle_zombie_lwp_error would get confused. */
3872 lwp->stopped = 0;
3873 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3874}
3875
3876/* Called when we try to resume a stopped LWP and that errors out. If
3877 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3878 or about to become), discard the error, clear any pending status
3879 the LWP may have, and return true (we'll collect the exit status
3880 soon enough). Otherwise, return false. */
3881
3882static int
3883check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3884{
3885 struct thread_info *thread = get_lwp_thread (lp);
3886
3887 /* If we get an error after resuming the LWP successfully, we'd
3888 confuse !T state for the LWP being gone. */
3889 gdb_assert (lp->stopped);
3890
3891 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3892 because even if ptrace failed with ESRCH, the tracee may be "not
3893 yet fully dead", but already refusing ptrace requests. In that
3894 case the tracee has 'R (Running)' state for a little bit
3895 (observed in Linux 3.18). See also the note on ESRCH in the
3896 ptrace(2) man page. Instead, check whether the LWP has any state
3897 other than ptrace-stopped. */
3898
3899 /* Don't assume anything if /proc/PID/status can't be read. */
3900 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 3901 {
23f238d3
PA
3902 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3903 lp->status_pending_p = 0;
3904 return 1;
3905 }
3906 return 0;
3907}
3908
3909/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3910 disappears while we try to resume it. */
3221518c 3911
23f238d3
PA
3912static void
3913linux_resume_one_lwp (struct lwp_info *lwp,
3914 int step, int signal, siginfo_t *info)
3915{
3916 TRY
3917 {
3918 linux_resume_one_lwp_throw (lwp, step, signal, info);
3919 }
3920 CATCH (ex, RETURN_MASK_ERROR)
3921 {
3922 if (!check_ptrace_stopped_lwp_gone (lwp))
3923 throw_exception (ex);
3221518c 3924 }
23f238d3 3925 END_CATCH
da6d8c04
DJ
3926}
3927
2bd7c093
PA
3928struct thread_resume_array
3929{
3930 struct thread_resume *resume;
3931 size_t n;
3932};
64386c31 3933
ebcf782c
DE
3934/* This function is called once per thread via find_inferior.
3935 ARG is a pointer to a thread_resume_array struct.
3936 We look up the thread specified by ENTRY in ARG, and mark the thread
3937 with a pointer to the appropriate resume request.
5544ad89
DJ
3938
3939 This algorithm is O(threads * resume elements), but resume elements
3940 is small (and will remain small at least until GDB supports thread
3941 suspension). */
ebcf782c 3942
2bd7c093
PA
3943static int
3944linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 3945{
d86d4aaf
DE
3946 struct thread_info *thread = (struct thread_info *) entry;
3947 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 3948 int ndx;
2bd7c093 3949 struct thread_resume_array *r;
64386c31 3950
2bd7c093 3951 r = arg;
64386c31 3952
2bd7c093 3953 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
3954 {
3955 ptid_t ptid = r->resume[ndx].thread;
3956 if (ptid_equal (ptid, minus_one_ptid)
3957 || ptid_equal (ptid, entry->id)
0c9070b3
YQ
3958 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3959 of PID'. */
d86d4aaf 3960 || (ptid_get_pid (ptid) == pid_of (thread)
0c9070b3
YQ
3961 && (ptid_is_pid (ptid)
3962 || ptid_get_lwp (ptid) == -1)))
95954743 3963 {
d50171e4 3964 if (r->resume[ndx].kind == resume_stop
8336d594 3965 && thread->last_resume_kind == resume_stop)
d50171e4
PA
3966 {
3967 if (debug_threads)
87ce2a04
DE
3968 debug_printf ("already %s LWP %ld at GDB's request\n",
3969 (thread->last_status.kind
3970 == TARGET_WAITKIND_STOPPED)
3971 ? "stopped"
3972 : "stopping",
d86d4aaf 3973 lwpid_of (thread));
d50171e4
PA
3974
3975 continue;
3976 }
3977
95954743 3978 lwp->resume = &r->resume[ndx];
8336d594 3979 thread->last_resume_kind = lwp->resume->kind;
fa593d66 3980
c2d6af84
PA
3981 lwp->step_range_start = lwp->resume->step_range_start;
3982 lwp->step_range_end = lwp->resume->step_range_end;
3983
fa593d66
PA
3984 /* If we had a deferred signal to report, dequeue one now.
3985 This can happen if LWP gets more than one signal while
3986 trying to get out of a jump pad. */
3987 if (lwp->stopped
3988 && !lwp->status_pending_p
3989 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3990 {
3991 lwp->status_pending_p = 1;
3992
3993 if (debug_threads)
87ce2a04
DE
3994 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3995 "leaving status pending.\n",
d86d4aaf
DE
3996 WSTOPSIG (lwp->status_pending),
3997 lwpid_of (thread));
fa593d66
PA
3998 }
3999
95954743
PA
4000 return 0;
4001 }
4002 }
2bd7c093
PA
4003
4004 /* No resume action for this thread. */
4005 lwp->resume = NULL;
64386c31 4006
2bd7c093 4007 return 0;
5544ad89
DJ
4008}
4009
20ad9378
DE
4010/* find_inferior callback for linux_resume.
4011 Set *FLAG_P if this lwp has an interesting status pending. */
5544ad89 4012
bd99dc85
PA
4013static int
4014resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 4015{
d86d4aaf
DE
4016 struct thread_info *thread = (struct thread_info *) entry;
4017 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4018
bd99dc85
PA
4019 /* LWPs which will not be resumed are not interesting, because
4020 we might not wait for them next time through linux_wait. */
2bd7c093 4021 if (lwp->resume == NULL)
bd99dc85 4022 return 0;
64386c31 4023
582511be 4024 if (thread_still_has_status_pending_p (thread))
d50171e4
PA
4025 * (int *) flag_p = 1;
4026
4027 return 0;
4028}
4029
4030/* Return 1 if this lwp that GDB wants running is stopped at an
4031 internal breakpoint that we need to step over. It assumes that any
4032 required STOP_PC adjustment has already been propagated to the
4033 inferior's regcache. */
4034
4035static int
4036need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4037{
d86d4aaf
DE
4038 struct thread_info *thread = (struct thread_info *) entry;
4039 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 4040 struct thread_info *saved_thread;
d50171e4 4041 CORE_ADDR pc;
c06cbd92
YQ
4042 struct process_info *proc = get_thread_process (thread);
4043
4044 /* GDBserver is skipping the extra traps from the wrapper program,
4045 don't have to do step over. */
4046 if (proc->tdesc == NULL)
4047 return 0;
d50171e4
PA
4048
4049 /* LWPs which will not be resumed are not interesting, because we
4050 might not wait for them next time through linux_wait. */
4051
4052 if (!lwp->stopped)
4053 {
4054 if (debug_threads)
87ce2a04 4055 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 4056 lwpid_of (thread));
d50171e4
PA
4057 return 0;
4058 }
4059
8336d594 4060 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
4061 {
4062 if (debug_threads)
87ce2a04
DE
4063 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4064 " stopped\n",
d86d4aaf 4065 lwpid_of (thread));
d50171e4
PA
4066 return 0;
4067 }
4068
7984d532
PA
4069 gdb_assert (lwp->suspended >= 0);
4070
4071 if (lwp->suspended)
4072 {
4073 if (debug_threads)
87ce2a04 4074 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 4075 lwpid_of (thread));
7984d532
PA
4076 return 0;
4077 }
4078
d50171e4
PA
4079 if (!lwp->need_step_over)
4080 {
4081 if (debug_threads)
d86d4aaf 4082 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
d50171e4 4083 }
5544ad89 4084
bd99dc85 4085 if (lwp->status_pending_p)
d50171e4
PA
4086 {
4087 if (debug_threads)
87ce2a04
DE
4088 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4089 " status.\n",
d86d4aaf 4090 lwpid_of (thread));
d50171e4
PA
4091 return 0;
4092 }
4093
4094 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4095 or we have. */
4096 pc = get_pc (lwp);
4097
4098 /* If the PC has changed since we stopped, then don't do anything,
4099 and let the breakpoint/tracepoint be hit. This happens if, for
4100 instance, GDB handled the decr_pc_after_break subtraction itself,
4101 GDB is OOL stepping this thread, or the user has issued a "jump"
4102 command, or poked thread's registers herself. */
4103 if (pc != lwp->stop_pc)
4104 {
4105 if (debug_threads)
87ce2a04
DE
4106 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4107 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
4108 lwpid_of (thread),
4109 paddress (lwp->stop_pc), paddress (pc));
d50171e4
PA
4110
4111 lwp->need_step_over = 0;
4112 return 0;
4113 }
4114
0bfdf32f
GB
4115 saved_thread = current_thread;
4116 current_thread = thread;
d50171e4 4117
8b07ae33 4118 /* We can only step over breakpoints we know about. */
fa593d66 4119 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4120 {
8b07ae33 4121 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4122 though. If the condition is being evaluated on the target's side
4123 and it evaluate to false, step over this breakpoint as well. */
4124 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4125 && gdb_condition_true_at_breakpoint (pc)
4126 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
4127 {
4128 if (debug_threads)
87ce2a04
DE
4129 debug_printf ("Need step over [LWP %ld]? yes, but found"
4130 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 4131 lwpid_of (thread), paddress (pc));
d50171e4 4132
0bfdf32f 4133 current_thread = saved_thread;
8b07ae33
PA
4134 return 0;
4135 }
4136 else
4137 {
4138 if (debug_threads)
87ce2a04
DE
4139 debug_printf ("Need step over [LWP %ld]? yes, "
4140 "found breakpoint at 0x%s\n",
d86d4aaf 4141 lwpid_of (thread), paddress (pc));
d50171e4 4142
8b07ae33
PA
4143 /* We've found an lwp that needs stepping over --- return 1 so
4144 that find_inferior stops looking. */
0bfdf32f 4145 current_thread = saved_thread;
8b07ae33
PA
4146
4147 /* If the step over is cancelled, this is set again. */
4148 lwp->need_step_over = 0;
4149 return 1;
4150 }
d50171e4
PA
4151 }
4152
0bfdf32f 4153 current_thread = saved_thread;
d50171e4
PA
4154
4155 if (debug_threads)
87ce2a04
DE
4156 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4157 " at 0x%s\n",
d86d4aaf 4158 lwpid_of (thread), paddress (pc));
c6ecbae5 4159
bd99dc85 4160 return 0;
5544ad89
DJ
4161}
4162
d50171e4
PA
4163/* Start a step-over operation on LWP. When LWP stopped at a
4164 breakpoint, to make progress, we need to remove the breakpoint out
4165 of the way. If we let other threads run while we do that, they may
4166 pass by the breakpoint location and miss hitting it. To avoid
4167 that, a step-over momentarily stops all threads while LWP is
4168 single-stepped while the breakpoint is temporarily uninserted from
4169 the inferior. When the single-step finishes, we reinsert the
4170 breakpoint, and let all threads that are supposed to be running,
4171 run again.
4172
4173 On targets that don't support hardware single-step, we don't
4174 currently support full software single-stepping. Instead, we only
4175 support stepping over the thread event breakpoint, by asking the
4176 low target where to place a reinsert breakpoint. Since this
4177 routine assumes the breakpoint being stepped over is a thread event
4178 breakpoint, it usually assumes the return address of the current
4179 function is a good enough place to set the reinsert breakpoint. */
4180
4181static int
4182start_step_over (struct lwp_info *lwp)
4183{
d86d4aaf 4184 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4185 struct thread_info *saved_thread;
d50171e4
PA
4186 CORE_ADDR pc;
4187 int step;
4188
4189 if (debug_threads)
87ce2a04 4190 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 4191 lwpid_of (thread));
d50171e4 4192
7984d532
PA
4193 stop_all_lwps (1, lwp);
4194 gdb_assert (lwp->suspended == 0);
d50171e4
PA
4195
4196 if (debug_threads)
87ce2a04 4197 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
4198
4199 /* Note, we should always reach here with an already adjusted PC,
4200 either by GDB (if we're resuming due to GDB's request), or by our
4201 caller, if we just finished handling an internal breakpoint GDB
4202 shouldn't care about. */
4203 pc = get_pc (lwp);
4204
0bfdf32f
GB
4205 saved_thread = current_thread;
4206 current_thread = thread;
d50171e4
PA
4207
4208 lwp->bp_reinsert = pc;
4209 uninsert_breakpoints_at (pc);
fa593d66 4210 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
4211
4212 if (can_hardware_single_step ())
4213 {
4214 step = 1;
4215 }
4216 else
4217 {
4218 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4219 set_reinsert_breakpoint (raddr);
4220 step = 0;
4221 }
4222
0bfdf32f 4223 current_thread = saved_thread;
d50171e4
PA
4224
4225 linux_resume_one_lwp (lwp, step, 0, NULL);
4226
4227 /* Require next event from this LWP. */
d86d4aaf 4228 step_over_bkpt = thread->entry.id;
d50171e4
PA
4229 return 1;
4230}
4231
4232/* Finish a step-over. Reinsert the breakpoint we had uninserted in
4233 start_step_over, if still there, and delete any reinsert
4234 breakpoints we've set, on non hardware single-step targets. */
4235
4236static int
4237finish_step_over (struct lwp_info *lwp)
4238{
4239 if (lwp->bp_reinsert != 0)
4240 {
4241 if (debug_threads)
87ce2a04 4242 debug_printf ("Finished step over.\n");
d50171e4
PA
4243
4244 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4245 may be no breakpoint to reinsert there by now. */
4246 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4247 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4248
4249 lwp->bp_reinsert = 0;
4250
4251 /* Delete any software-single-step reinsert breakpoints. No
4252 longer needed. We don't have to worry about other threads
4253 hitting this trap, and later not being able to explain it,
4254 because we were stepping over a breakpoint, and we hold all
4255 threads but LWP stopped while doing that. */
4256 if (!can_hardware_single_step ())
4257 delete_reinsert_breakpoints ();
4258
4259 step_over_bkpt = null_ptid;
4260 return 1;
4261 }
4262 else
4263 return 0;
4264}
4265
5544ad89
DJ
4266/* This function is called once per thread. We check the thread's resume
4267 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 4268 stopped; and what signal, if any, it should be sent.
5544ad89 4269
bd99dc85
PA
4270 For threads which we aren't explicitly told otherwise, we preserve
4271 the stepping flag; this is used for stepping over gdbserver-placed
4272 breakpoints.
4273
4274 If pending_flags was set in any thread, we queue any needed
4275 signals, since we won't actually resume. We already have a pending
4276 event to report, so we don't need to preserve any step requests;
4277 they should be re-issued if necessary. */
4278
4279static int
4280linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 4281{
d86d4aaf
DE
4282 struct thread_info *thread = (struct thread_info *) entry;
4283 struct lwp_info *lwp = get_thread_lwp (thread);
bd99dc85 4284 int step;
d50171e4
PA
4285 int leave_all_stopped = * (int *) arg;
4286 int leave_pending;
5544ad89 4287
2bd7c093 4288 if (lwp->resume == NULL)
bd99dc85 4289 return 0;
5544ad89 4290
bd99dc85 4291 if (lwp->resume->kind == resume_stop)
5544ad89 4292 {
bd99dc85 4293 if (debug_threads)
d86d4aaf 4294 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4295
4296 if (!lwp->stopped)
4297 {
4298 if (debug_threads)
d86d4aaf 4299 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4300
d50171e4
PA
4301 /* Stop the thread, and wait for the event asynchronously,
4302 through the event loop. */
02fc4de7 4303 send_sigstop (lwp);
bd99dc85
PA
4304 }
4305 else
4306 {
4307 if (debug_threads)
87ce2a04 4308 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4309 lwpid_of (thread));
d50171e4
PA
4310
4311 /* The LWP may have been stopped in an internal event that
4312 was not meant to be notified back to GDB (e.g., gdbserver
4313 breakpoint), so we should be reporting a stop event in
4314 this case too. */
4315
4316 /* If the thread already has a pending SIGSTOP, this is a
4317 no-op. Otherwise, something later will presumably resume
4318 the thread and this will cause it to cancel any pending
4319 operation, due to last_resume_kind == resume_stop. If
4320 the thread already has a pending status to report, we
4321 will still report it the next time we wait - see
4322 status_pending_p_callback. */
1a981360
PA
4323
4324 /* If we already have a pending signal to report, then
4325 there's no need to queue a SIGSTOP, as this means we're
4326 midway through moving the LWP out of the jumppad, and we
4327 will report the pending signal as soon as that is
4328 finished. */
4329 if (lwp->pending_signals_to_report == NULL)
4330 send_sigstop (lwp);
bd99dc85 4331 }
32ca6d61 4332
bd99dc85
PA
4333 /* For stop requests, we're done. */
4334 lwp->resume = NULL;
fc7238bb 4335 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4336 return 0;
5544ad89
DJ
4337 }
4338
bd99dc85
PA
4339 /* If this thread which is about to be resumed has a pending status,
4340 then don't resume any threads - we can just report the pending
4341 status. Make sure to queue any signals that would otherwise be
4342 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
4343 thread has a pending status. If there's a thread that needs the
4344 step-over-breakpoint dance, then don't resume any other thread
4345 but that particular one. */
4346 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 4347
d50171e4 4348 if (!leave_pending)
bd99dc85
PA
4349 {
4350 if (debug_threads)
d86d4aaf 4351 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4352
d50171e4 4353 step = (lwp->resume->kind == resume_step);
2acc282a 4354 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
4355 }
4356 else
4357 {
4358 if (debug_threads)
d86d4aaf 4359 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5544ad89 4360
bd99dc85
PA
4361 /* If we have a new signal, enqueue the signal. */
4362 if (lwp->resume->sig != 0)
4363 {
4364 struct pending_signals *p_sig;
4365 p_sig = xmalloc (sizeof (*p_sig));
4366 p_sig->prev = lwp->pending_signals;
4367 p_sig->signal = lwp->resume->sig;
4368 memset (&p_sig->info, 0, sizeof (siginfo_t));
4369
4370 /* If this is the same signal we were previously stopped by,
4371 make sure to queue its siginfo. We can ignore the return
4372 value of ptrace; if it fails, we'll skip
4373 PTRACE_SETSIGINFO. */
4374 if (WIFSTOPPED (lwp->last_status)
4375 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
d86d4aaf 4376 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4377 &p_sig->info);
bd99dc85
PA
4378
4379 lwp->pending_signals = p_sig;
4380 }
4381 }
5544ad89 4382
fc7238bb 4383 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4384 lwp->resume = NULL;
5544ad89 4385 return 0;
0d62e5e8
DJ
4386}
4387
4388static void
2bd7c093 4389linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 4390{
2bd7c093 4391 struct thread_resume_array array = { resume_info, n };
d86d4aaf 4392 struct thread_info *need_step_over = NULL;
d50171e4
PA
4393 int any_pending;
4394 int leave_all_stopped;
c6ecbae5 4395
87ce2a04
DE
4396 if (debug_threads)
4397 {
4398 debug_enter ();
4399 debug_printf ("linux_resume:\n");
4400 }
4401
2bd7c093 4402 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 4403
d50171e4
PA
4404 /* If there is a thread which would otherwise be resumed, which has
4405 a pending status, then don't resume any threads - we can just
4406 report the pending status. Make sure to queue any signals that
4407 would otherwise be sent. In non-stop mode, we'll apply this
4408 logic to each thread individually. We consume all pending events
4409 before considering to start a step-over (in all-stop). */
4410 any_pending = 0;
bd99dc85 4411 if (!non_stop)
d86d4aaf 4412 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
d50171e4
PA
4413
4414 /* If there is a thread which would otherwise be resumed, which is
4415 stopped at a breakpoint that needs stepping over, then don't
4416 resume any threads - have it step over the breakpoint with all
4417 other threads stopped, then resume all threads again. Make sure
4418 to queue any signals that would otherwise be delivered or
4419 queued. */
4420 if (!any_pending && supports_breakpoints ())
4421 need_step_over
d86d4aaf
DE
4422 = (struct thread_info *) find_inferior (&all_threads,
4423 need_step_over_p, NULL);
d50171e4
PA
4424
4425 leave_all_stopped = (need_step_over != NULL || any_pending);
4426
4427 if (debug_threads)
4428 {
4429 if (need_step_over != NULL)
87ce2a04 4430 debug_printf ("Not resuming all, need step over\n");
d50171e4 4431 else if (any_pending)
87ce2a04
DE
4432 debug_printf ("Not resuming, all-stop and found "
4433 "an LWP with pending status\n");
d50171e4 4434 else
87ce2a04 4435 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4436 }
4437
4438 /* Even if we're leaving threads stopped, queue all signals we'd
4439 otherwise deliver. */
4440 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4441
4442 if (need_step_over)
d86d4aaf 4443 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4444
4445 if (debug_threads)
4446 {
4447 debug_printf ("linux_resume done\n");
4448 debug_exit ();
4449 }
d50171e4
PA
4450}
4451
4452/* This function is called once per thread. We check the thread's
4453 last resume request, which will tell us whether to resume, step, or
4454 leave the thread stopped. Any signal the client requested to be
4455 delivered has already been enqueued at this point.
4456
4457 If any thread that GDB wants running is stopped at an internal
4458 breakpoint that needs stepping over, we start a step-over operation
4459 on that particular thread, and leave all others stopped. */
4460
7984d532
PA
4461static int
4462proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 4463{
d86d4aaf
DE
4464 struct thread_info *thread = (struct thread_info *) entry;
4465 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4466 int step;
4467
7984d532
PA
4468 if (lwp == except)
4469 return 0;
d50171e4
PA
4470
4471 if (debug_threads)
d86d4aaf 4472 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
4473
4474 if (!lwp->stopped)
4475 {
4476 if (debug_threads)
d86d4aaf 4477 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
7984d532 4478 return 0;
d50171e4
PA
4479 }
4480
02fc4de7
PA
4481 if (thread->last_resume_kind == resume_stop
4482 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
4483 {
4484 if (debug_threads)
87ce2a04 4485 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 4486 lwpid_of (thread));
7984d532 4487 return 0;
d50171e4
PA
4488 }
4489
4490 if (lwp->status_pending_p)
4491 {
4492 if (debug_threads)
87ce2a04 4493 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 4494 lwpid_of (thread));
7984d532 4495 return 0;
d50171e4
PA
4496 }
4497
7984d532
PA
4498 gdb_assert (lwp->suspended >= 0);
4499
d50171e4
PA
4500 if (lwp->suspended)
4501 {
4502 if (debug_threads)
d86d4aaf 4503 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
7984d532 4504 return 0;
d50171e4
PA
4505 }
4506
1a981360
PA
4507 if (thread->last_resume_kind == resume_stop
4508 && lwp->pending_signals_to_report == NULL
4509 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
4510 {
4511 /* We haven't reported this LWP as stopped yet (otherwise, the
4512 last_status.kind check above would catch it, and we wouldn't
4513 reach here. This LWP may have been momentarily paused by a
4514 stop_all_lwps call while handling for example, another LWP's
4515 step-over. In that case, the pending expected SIGSTOP signal
4516 that was queued at vCont;t handling time will have already
4517 been consumed by wait_for_sigstop, and so we need to requeue
4518 another one here. Note that if the LWP already has a SIGSTOP
4519 pending, this is a no-op. */
4520
4521 if (debug_threads)
87ce2a04
DE
4522 debug_printf ("Client wants LWP %ld to stop. "
4523 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 4524 lwpid_of (thread));
02fc4de7
PA
4525
4526 send_sigstop (lwp);
4527 }
4528
8336d594 4529 step = thread->last_resume_kind == resume_step;
d50171e4 4530 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4531 return 0;
4532}
4533
4534static int
4535unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4536{
d86d4aaf
DE
4537 struct thread_info *thread = (struct thread_info *) entry;
4538 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4539
4540 if (lwp == except)
4541 return 0;
4542
4543 lwp->suspended--;
4544 gdb_assert (lwp->suspended >= 0);
4545
4546 return proceed_one_lwp (entry, except);
d50171e4
PA
4547}
4548
4549/* When we finish a step-over, set threads running again. If there's
4550 another thread that may need a step-over, now's the time to start
4551 it. Eventually, we'll move all threads past their breakpoints. */
4552
4553static void
4554proceed_all_lwps (void)
4555{
d86d4aaf 4556 struct thread_info *need_step_over;
d50171e4
PA
4557
4558 /* If there is a thread which would otherwise be resumed, which is
4559 stopped at a breakpoint that needs stepping over, then don't
4560 resume any threads - have it step over the breakpoint with all
4561 other threads stopped, then resume all threads again. */
4562
4563 if (supports_breakpoints ())
4564 {
4565 need_step_over
d86d4aaf
DE
4566 = (struct thread_info *) find_inferior (&all_threads,
4567 need_step_over_p, NULL);
d50171e4
PA
4568
4569 if (need_step_over != NULL)
4570 {
4571 if (debug_threads)
87ce2a04
DE
4572 debug_printf ("proceed_all_lwps: found "
4573 "thread %ld needing a step-over\n",
4574 lwpid_of (need_step_over));
d50171e4 4575
d86d4aaf 4576 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4577 return;
4578 }
4579 }
5544ad89 4580
d50171e4 4581 if (debug_threads)
87ce2a04 4582 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 4583
d86d4aaf 4584 find_inferior (&all_threads, proceed_one_lwp, NULL);
d50171e4
PA
4585}
4586
4587/* Stopped LWPs that the client wanted to be running, that don't have
4588 pending statuses, are set to run again, except for EXCEPT, if not
4589 NULL. This undoes a stop_all_lwps call. */
4590
4591static void
7984d532 4592unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 4593{
5544ad89
DJ
4594 if (debug_threads)
4595 {
87ce2a04 4596 debug_enter ();
d50171e4 4597 if (except)
87ce2a04 4598 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 4599 lwpid_of (get_lwp_thread (except)));
5544ad89 4600 else
87ce2a04 4601 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
4602 }
4603
7984d532 4604 if (unsuspend)
d86d4aaf 4605 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
7984d532 4606 else
d86d4aaf 4607 find_inferior (&all_threads, proceed_one_lwp, except);
87ce2a04
DE
4608
4609 if (debug_threads)
4610 {
4611 debug_printf ("unstop_all_lwps done\n");
4612 debug_exit ();
4613 }
0d62e5e8
DJ
4614}
4615
58caa3dc
DJ
4616
4617#ifdef HAVE_LINUX_REGSETS
4618
1faeff08
MR
4619#define use_linux_regsets 1
4620
030031ee
PA
4621/* Returns true if REGSET has been disabled. */
4622
4623static int
4624regset_disabled (struct regsets_info *info, struct regset_info *regset)
4625{
4626 return (info->disabled_regsets != NULL
4627 && info->disabled_regsets[regset - info->regsets]);
4628}
4629
4630/* Disable REGSET. */
4631
4632static void
4633disable_regset (struct regsets_info *info, struct regset_info *regset)
4634{
4635 int dr_offset;
4636
4637 dr_offset = regset - info->regsets;
4638 if (info->disabled_regsets == NULL)
4639 info->disabled_regsets = xcalloc (1, info->num_regsets);
4640 info->disabled_regsets[dr_offset] = 1;
4641}
4642
58caa3dc 4643static int
3aee8918
PA
4644regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4645 struct regcache *regcache)
58caa3dc
DJ
4646{
4647 struct regset_info *regset;
e9d25b98 4648 int saw_general_regs = 0;
95954743 4649 int pid;
1570b33e 4650 struct iovec iov;
58caa3dc 4651
0bfdf32f 4652 pid = lwpid_of (current_thread);
28eef672 4653 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4654 {
1570b33e
L
4655 void *buf, *data;
4656 int nt_type, res;
58caa3dc 4657
030031ee 4658 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 4659 continue;
58caa3dc 4660
bca929d3 4661 buf = xmalloc (regset->size);
1570b33e
L
4662
4663 nt_type = regset->nt_type;
4664 if (nt_type)
4665 {
4666 iov.iov_base = buf;
4667 iov.iov_len = regset->size;
4668 data = (void *) &iov;
4669 }
4670 else
4671 data = buf;
4672
dfb64f85 4673#ifndef __sparc__
f15f9948 4674 res = ptrace (regset->get_request, pid,
b8e1b30e 4675 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4676#else
1570b33e 4677 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4678#endif
58caa3dc
DJ
4679 if (res < 0)
4680 {
4681 if (errno == EIO)
4682 {
52fa2412 4683 /* If we get EIO on a regset, do not try it again for
3aee8918 4684 this process mode. */
030031ee 4685 disable_regset (regsets_info, regset);
58caa3dc 4686 }
e5a9158d
AA
4687 else if (errno == ENODATA)
4688 {
4689 /* ENODATA may be returned if the regset is currently
4690 not "active". This can happen in normal operation,
4691 so suppress the warning in this case. */
4692 }
58caa3dc
DJ
4693 else
4694 {
0d62e5e8 4695 char s[256];
95954743
PA
4696 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4697 pid);
0d62e5e8 4698 perror (s);
58caa3dc
DJ
4699 }
4700 }
098dbe61
AA
4701 else
4702 {
4703 if (regset->type == GENERAL_REGS)
4704 saw_general_regs = 1;
4705 regset->store_function (regcache, buf);
4706 }
fdeb2a12 4707 free (buf);
58caa3dc 4708 }
e9d25b98
DJ
4709 if (saw_general_regs)
4710 return 0;
4711 else
4712 return 1;
58caa3dc
DJ
4713}
4714
4715static int
3aee8918
PA
4716regsets_store_inferior_registers (struct regsets_info *regsets_info,
4717 struct regcache *regcache)
58caa3dc
DJ
4718{
4719 struct regset_info *regset;
e9d25b98 4720 int saw_general_regs = 0;
95954743 4721 int pid;
1570b33e 4722 struct iovec iov;
58caa3dc 4723
0bfdf32f 4724 pid = lwpid_of (current_thread);
28eef672 4725 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4726 {
1570b33e
L
4727 void *buf, *data;
4728 int nt_type, res;
58caa3dc 4729
feea5f36
AA
4730 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4731 || regset->fill_function == NULL)
28eef672 4732 continue;
58caa3dc 4733
bca929d3 4734 buf = xmalloc (regset->size);
545587ee
DJ
4735
4736 /* First fill the buffer with the current register set contents,
4737 in case there are any items in the kernel's regset that are
4738 not in gdbserver's regcache. */
1570b33e
L
4739
4740 nt_type = regset->nt_type;
4741 if (nt_type)
4742 {
4743 iov.iov_base = buf;
4744 iov.iov_len = regset->size;
4745 data = (void *) &iov;
4746 }
4747 else
4748 data = buf;
4749
dfb64f85 4750#ifndef __sparc__
f15f9948 4751 res = ptrace (regset->get_request, pid,
b8e1b30e 4752 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4753#else
689cc2ae 4754 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4755#endif
545587ee
DJ
4756
4757 if (res == 0)
4758 {
4759 /* Then overlay our cached registers on that. */
442ea881 4760 regset->fill_function (regcache, buf);
545587ee
DJ
4761
4762 /* Only now do we write the register set. */
dfb64f85 4763#ifndef __sparc__
f15f9948 4764 res = ptrace (regset->set_request, pid,
b8e1b30e 4765 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4766#else
1570b33e 4767 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 4768#endif
545587ee
DJ
4769 }
4770
58caa3dc
DJ
4771 if (res < 0)
4772 {
4773 if (errno == EIO)
4774 {
52fa2412 4775 /* If we get EIO on a regset, do not try it again for
3aee8918 4776 this process mode. */
030031ee 4777 disable_regset (regsets_info, regset);
58caa3dc 4778 }
3221518c
UW
4779 else if (errno == ESRCH)
4780 {
1b3f6016
PA
4781 /* At this point, ESRCH should mean the process is
4782 already gone, in which case we simply ignore attempts
4783 to change its registers. See also the related
4784 comment in linux_resume_one_lwp. */
fdeb2a12 4785 free (buf);
3221518c
UW
4786 return 0;
4787 }
58caa3dc
DJ
4788 else
4789 {
ce3a066d 4790 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
4791 }
4792 }
e9d25b98
DJ
4793 else if (regset->type == GENERAL_REGS)
4794 saw_general_regs = 1;
09ec9b38 4795 free (buf);
58caa3dc 4796 }
e9d25b98
DJ
4797 if (saw_general_regs)
4798 return 0;
4799 else
4800 return 1;
58caa3dc
DJ
4801}
4802
1faeff08 4803#else /* !HAVE_LINUX_REGSETS */
58caa3dc 4804
1faeff08 4805#define use_linux_regsets 0
3aee8918
PA
4806#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4807#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 4808
58caa3dc 4809#endif
1faeff08
MR
4810
4811/* Return 1 if register REGNO is supported by one of the regset ptrace
4812 calls or 0 if it has to be transferred individually. */
4813
4814static int
3aee8918 4815linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
4816{
4817 unsigned char mask = 1 << (regno % 8);
4818 size_t index = regno / 8;
4819
4820 return (use_linux_regsets
3aee8918
PA
4821 && (regs_info->regset_bitmap == NULL
4822 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
4823}
4824
58caa3dc 4825#ifdef HAVE_LINUX_USRREGS
1faeff08
MR
4826
4827int
3aee8918 4828register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
4829{
4830 int addr;
4831
3aee8918 4832 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
4833 error ("Invalid register number %d.", regnum);
4834
3aee8918 4835 addr = usrregs->regmap[regnum];
1faeff08
MR
4836
4837 return addr;
4838}
4839
4840/* Fetch one register. */
4841static void
3aee8918
PA
4842fetch_register (const struct usrregs_info *usrregs,
4843 struct regcache *regcache, int regno)
1faeff08
MR
4844{
4845 CORE_ADDR regaddr;
4846 int i, size;
4847 char *buf;
4848 int pid;
4849
3aee8918 4850 if (regno >= usrregs->num_regs)
1faeff08
MR
4851 return;
4852 if ((*the_low_target.cannot_fetch_register) (regno))
4853 return;
4854
3aee8918 4855 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4856 if (regaddr == -1)
4857 return;
4858
3aee8918
PA
4859 size = ((register_size (regcache->tdesc, regno)
4860 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4861 & -sizeof (PTRACE_XFER_TYPE));
4862 buf = alloca (size);
4863
0bfdf32f 4864 pid = lwpid_of (current_thread);
1faeff08
MR
4865 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4866 {
4867 errno = 0;
4868 *(PTRACE_XFER_TYPE *) (buf + i) =
4869 ptrace (PTRACE_PEEKUSER, pid,
4870 /* Coerce to a uintptr_t first to avoid potential gcc warning
4871 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4872 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
4873 regaddr += sizeof (PTRACE_XFER_TYPE);
4874 if (errno != 0)
4875 error ("reading register %d: %s", regno, strerror (errno));
4876 }
4877
4878 if (the_low_target.supply_ptrace_register)
4879 the_low_target.supply_ptrace_register (regcache, regno, buf);
4880 else
4881 supply_register (regcache, regno, buf);
4882}
4883
4884/* Store one register. */
4885static void
3aee8918
PA
4886store_register (const struct usrregs_info *usrregs,
4887 struct regcache *regcache, int regno)
1faeff08
MR
4888{
4889 CORE_ADDR regaddr;
4890 int i, size;
4891 char *buf;
4892 int pid;
4893
3aee8918 4894 if (regno >= usrregs->num_regs)
1faeff08
MR
4895 return;
4896 if ((*the_low_target.cannot_store_register) (regno))
4897 return;
4898
3aee8918 4899 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4900 if (regaddr == -1)
4901 return;
4902
3aee8918
PA
4903 size = ((register_size (regcache->tdesc, regno)
4904 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4905 & -sizeof (PTRACE_XFER_TYPE));
4906 buf = alloca (size);
4907 memset (buf, 0, size);
4908
4909 if (the_low_target.collect_ptrace_register)
4910 the_low_target.collect_ptrace_register (regcache, regno, buf);
4911 else
4912 collect_register (regcache, regno, buf);
4913
0bfdf32f 4914 pid = lwpid_of (current_thread);
1faeff08
MR
4915 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4916 {
4917 errno = 0;
4918 ptrace (PTRACE_POKEUSER, pid,
4919 /* Coerce to a uintptr_t first to avoid potential gcc warning
4920 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
4921 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4922 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
4923 if (errno != 0)
4924 {
4925 /* At this point, ESRCH should mean the process is
4926 already gone, in which case we simply ignore attempts
4927 to change its registers. See also the related
4928 comment in linux_resume_one_lwp. */
4929 if (errno == ESRCH)
4930 return;
4931
4932 if ((*the_low_target.cannot_store_register) (regno) == 0)
4933 error ("writing register %d: %s", regno, strerror (errno));
4934 }
4935 regaddr += sizeof (PTRACE_XFER_TYPE);
4936 }
4937}
4938
4939/* Fetch all registers, or just one, from the child process.
4940 If REGNO is -1, do this for all registers, skipping any that are
4941 assumed to have been retrieved by regsets_fetch_inferior_registers,
4942 unless ALL is non-zero.
4943 Otherwise, REGNO specifies which register (so we can save time). */
4944static void
3aee8918
PA
4945usr_fetch_inferior_registers (const struct regs_info *regs_info,
4946 struct regcache *regcache, int regno, int all)
1faeff08 4947{
3aee8918
PA
4948 struct usrregs_info *usr = regs_info->usrregs;
4949
1faeff08
MR
4950 if (regno == -1)
4951 {
3aee8918
PA
4952 for (regno = 0; regno < usr->num_regs; regno++)
4953 if (all || !linux_register_in_regsets (regs_info, regno))
4954 fetch_register (usr, regcache, regno);
1faeff08
MR
4955 }
4956 else
3aee8918 4957 fetch_register (usr, regcache, regno);
1faeff08
MR
4958}
4959
4960/* Store our register values back into the inferior.
4961 If REGNO is -1, do this for all registers, skipping any that are
4962 assumed to have been saved by regsets_store_inferior_registers,
4963 unless ALL is non-zero.
4964 Otherwise, REGNO specifies which register (so we can save time). */
4965static void
3aee8918
PA
4966usr_store_inferior_registers (const struct regs_info *regs_info,
4967 struct regcache *regcache, int regno, int all)
1faeff08 4968{
3aee8918
PA
4969 struct usrregs_info *usr = regs_info->usrregs;
4970
1faeff08
MR
4971 if (regno == -1)
4972 {
3aee8918
PA
4973 for (regno = 0; regno < usr->num_regs; regno++)
4974 if (all || !linux_register_in_regsets (regs_info, regno))
4975 store_register (usr, regcache, regno);
1faeff08
MR
4976 }
4977 else
3aee8918 4978 store_register (usr, regcache, regno);
1faeff08
MR
4979}
4980
4981#else /* !HAVE_LINUX_USRREGS */
4982
3aee8918
PA
4983#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4984#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
1faeff08 4985
58caa3dc 4986#endif
1faeff08
MR
4987
4988
4989void
4990linux_fetch_registers (struct regcache *regcache, int regno)
4991{
4992 int use_regsets;
4993 int all = 0;
3aee8918 4994 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
4995
4996 if (regno == -1)
4997 {
3aee8918
PA
4998 if (the_low_target.fetch_register != NULL
4999 && regs_info->usrregs != NULL)
5000 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
c14dfd32
PA
5001 (*the_low_target.fetch_register) (regcache, regno);
5002
3aee8918
PA
5003 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5004 if (regs_info->usrregs != NULL)
5005 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5006 }
5007 else
5008 {
c14dfd32
PA
5009 if (the_low_target.fetch_register != NULL
5010 && (*the_low_target.fetch_register) (regcache, regno))
5011 return;
5012
3aee8918 5013 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5014 if (use_regsets)
3aee8918
PA
5015 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5016 regcache);
5017 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5018 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5019 }
58caa3dc
DJ
5020}
5021
5022void
442ea881 5023linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 5024{
1faeff08
MR
5025 int use_regsets;
5026 int all = 0;
3aee8918 5027 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
5028
5029 if (regno == -1)
5030 {
3aee8918
PA
5031 all = regsets_store_inferior_registers (regs_info->regsets_info,
5032 regcache);
5033 if (regs_info->usrregs != NULL)
5034 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5035 }
5036 else
5037 {
3aee8918 5038 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5039 if (use_regsets)
3aee8918
PA
5040 all = regsets_store_inferior_registers (regs_info->regsets_info,
5041 regcache);
5042 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5043 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5044 }
58caa3dc
DJ
5045}
5046
da6d8c04 5047
da6d8c04
DJ
5048/* Copy LEN bytes from inferior's memory starting at MEMADDR
5049 to debugger memory starting at MYADDR. */
5050
c3e735a6 5051static int
f450004a 5052linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04 5053{
0bfdf32f 5054 int pid = lwpid_of (current_thread);
4934b29e
MR
5055 register PTRACE_XFER_TYPE *buffer;
5056 register CORE_ADDR addr;
5057 register int count;
5058 char filename[64];
da6d8c04 5059 register int i;
4934b29e 5060 int ret;
fd462a61 5061 int fd;
fd462a61
DJ
5062
5063 /* Try using /proc. Don't bother for one word. */
5064 if (len >= 3 * sizeof (long))
5065 {
4934b29e
MR
5066 int bytes;
5067
fd462a61
DJ
5068 /* We could keep this file open and cache it - possibly one per
5069 thread. That requires some juggling, but is even faster. */
95954743 5070 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5071 fd = open (filename, O_RDONLY | O_LARGEFILE);
5072 if (fd == -1)
5073 goto no_proc;
5074
5075 /* If pread64 is available, use it. It's faster if the kernel
5076 supports it (only one syscall), and it's 64-bit safe even on
5077 32-bit platforms (for instance, SPARC debugging a SPARC64
5078 application). */
5079#ifdef HAVE_PREAD64
4934b29e 5080 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5081#else
4934b29e
MR
5082 bytes = -1;
5083 if (lseek (fd, memaddr, SEEK_SET) != -1)
5084 bytes = read (fd, myaddr, len);
fd462a61 5085#endif
fd462a61
DJ
5086
5087 close (fd);
4934b29e
MR
5088 if (bytes == len)
5089 return 0;
5090
5091 /* Some data was read, we'll try to get the rest with ptrace. */
5092 if (bytes > 0)
5093 {
5094 memaddr += bytes;
5095 myaddr += bytes;
5096 len -= bytes;
5097 }
fd462a61 5098 }
da6d8c04 5099
fd462a61 5100 no_proc:
4934b29e
MR
5101 /* Round starting address down to longword boundary. */
5102 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5103 /* Round ending address up; get number of longwords that makes. */
5104 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5105 / sizeof (PTRACE_XFER_TYPE));
5106 /* Allocate buffer of that many longwords. */
5107 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5108
da6d8c04 5109 /* Read all the longwords */
4934b29e 5110 errno = 0;
da6d8c04
DJ
5111 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5112 {
14ce3065
DE
5113 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5114 about coercing an 8 byte integer to a 4 byte pointer. */
5115 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5116 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5117 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5118 if (errno)
4934b29e 5119 break;
da6d8c04 5120 }
4934b29e 5121 ret = errno;
da6d8c04
DJ
5122
5123 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5124 if (i > 0)
5125 {
5126 i *= sizeof (PTRACE_XFER_TYPE);
5127 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5128 memcpy (myaddr,
5129 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5130 i < len ? i : len);
5131 }
c3e735a6 5132
4934b29e 5133 return ret;
da6d8c04
DJ
5134}
5135
93ae6fdc
PA
5136/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5137 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5138 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5139
ce3a066d 5140static int
f450004a 5141linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
5142{
5143 register int i;
5144 /* Round starting address down to longword boundary. */
5145 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5146 /* Round ending address up; get number of longwords that makes. */
5147 register int count
493e2a69
MS
5148 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5149 / sizeof (PTRACE_XFER_TYPE);
5150
da6d8c04 5151 /* Allocate buffer of that many longwords. */
493e2a69
MS
5152 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5153 alloca (count * sizeof (PTRACE_XFER_TYPE));
5154
0bfdf32f 5155 int pid = lwpid_of (current_thread);
da6d8c04 5156
f0ae6fc3
PA
5157 if (len == 0)
5158 {
5159 /* Zero length write always succeeds. */
5160 return 0;
5161 }
5162
0d62e5e8
DJ
5163 if (debug_threads)
5164 {
58d6951d
DJ
5165 /* Dump up to four bytes. */
5166 unsigned int val = * (unsigned int *) myaddr;
5167 if (len == 1)
5168 val = val & 0xff;
5169 else if (len == 2)
5170 val = val & 0xffff;
5171 else if (len == 3)
5172 val = val & 0xffffff;
de0d863e
DB
5173 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5174 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
0d62e5e8
DJ
5175 }
5176
da6d8c04
DJ
5177 /* Fill start and end extra bytes of buffer with existing memory data. */
5178
93ae6fdc 5179 errno = 0;
14ce3065
DE
5180 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5181 about coercing an 8 byte integer to a 4 byte pointer. */
5182 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5183 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5184 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5185 if (errno)
5186 return errno;
da6d8c04
DJ
5187
5188 if (count > 1)
5189 {
93ae6fdc 5190 errno = 0;
da6d8c04 5191 buffer[count - 1]
95954743 5192 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5193 /* Coerce to a uintptr_t first to avoid potential gcc warning
5194 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5195 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5196 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5197 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5198 if (errno)
5199 return errno;
da6d8c04
DJ
5200 }
5201
93ae6fdc 5202 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5203
493e2a69
MS
5204 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5205 myaddr, len);
da6d8c04
DJ
5206
5207 /* Write the entire buffer. */
5208
5209 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5210 {
5211 errno = 0;
14ce3065
DE
5212 ptrace (PTRACE_POKETEXT, pid,
5213 /* Coerce to a uintptr_t first to avoid potential gcc warning
5214 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5215 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5216 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5217 if (errno)
5218 return errno;
5219 }
5220
5221 return 0;
5222}
2f2893d9
DJ
5223
5224static void
5225linux_look_up_symbols (void)
5226{
0d62e5e8 5227#ifdef USE_THREAD_DB
95954743
PA
5228 struct process_info *proc = current_process ();
5229
fe978cb0 5230 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5231 return;
5232
96d7229d
LM
5233 /* If the kernel supports tracing clones, then we don't need to
5234 use the magic thread event breakpoint to learn about
5235 threads. */
5236 thread_db_init (!linux_supports_traceclone ());
0d62e5e8
DJ
5237#endif
5238}
5239
e5379b03 5240static void
ef57601b 5241linux_request_interrupt (void)
e5379b03 5242{
a1928bad 5243 extern unsigned long signal_pid;
e5379b03 5244
78708b7c
PA
5245 /* Send a SIGINT to the process group. This acts just like the user
5246 typed a ^C on the controlling terminal. */
5247 kill (-signal_pid, SIGINT);
e5379b03
DJ
5248}
5249
aa691b87
RM
5250/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5251 to debugger memory starting at MYADDR. */
5252
5253static int
f450004a 5254linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
5255{
5256 char filename[PATH_MAX];
5257 int fd, n;
0bfdf32f 5258 int pid = lwpid_of (current_thread);
aa691b87 5259
6cebaf6e 5260 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5261
5262 fd = open (filename, O_RDONLY);
5263 if (fd < 0)
5264 return -1;
5265
5266 if (offset != (CORE_ADDR) 0
5267 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5268 n = -1;
5269 else
5270 n = read (fd, myaddr, len);
5271
5272 close (fd);
5273
5274 return n;
5275}
5276
d993e290
PA
5277/* These breakpoint and watchpoint related wrapper functions simply
5278 pass on the function call if the target has registered a
5279 corresponding function. */
e013ee27
OF
5280
5281static int
802e8e6d
PA
5282linux_supports_z_point_type (char z_type)
5283{
5284 return (the_low_target.supports_z_point_type != NULL
5285 && the_low_target.supports_z_point_type (z_type));
5286}
5287
5288static int
5289linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5290 int size, struct raw_breakpoint *bp)
e013ee27 5291{
c8f4bfdd
YQ
5292 if (type == raw_bkpt_type_sw)
5293 return insert_memory_breakpoint (bp);
5294 else if (the_low_target.insert_point != NULL)
802e8e6d 5295 return the_low_target.insert_point (type, addr, size, bp);
e013ee27
OF
5296 else
5297 /* Unsupported (see target.h). */
5298 return 1;
5299}
5300
5301static int
802e8e6d
PA
5302linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5303 int size, struct raw_breakpoint *bp)
e013ee27 5304{
c8f4bfdd
YQ
5305 if (type == raw_bkpt_type_sw)
5306 return remove_memory_breakpoint (bp);
5307 else if (the_low_target.remove_point != NULL)
802e8e6d 5308 return the_low_target.remove_point (type, addr, size, bp);
e013ee27
OF
5309 else
5310 /* Unsupported (see target.h). */
5311 return 1;
5312}
5313
3e572f71
PA
5314/* Implement the to_stopped_by_sw_breakpoint target_ops
5315 method. */
5316
5317static int
5318linux_stopped_by_sw_breakpoint (void)
5319{
5320 struct lwp_info *lwp = get_thread_lwp (current_thread);
5321
5322 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5323}
5324
5325/* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5326 method. */
5327
5328static int
5329linux_supports_stopped_by_sw_breakpoint (void)
5330{
5331 return USE_SIGTRAP_SIGINFO;
5332}
5333
5334/* Implement the to_stopped_by_hw_breakpoint target_ops
5335 method. */
5336
5337static int
5338linux_stopped_by_hw_breakpoint (void)
5339{
5340 struct lwp_info *lwp = get_thread_lwp (current_thread);
5341
5342 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5343}
5344
5345/* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5346 method. */
5347
5348static int
5349linux_supports_stopped_by_hw_breakpoint (void)
5350{
5351 return USE_SIGTRAP_SIGINFO;
5352}
5353
45614f15
YQ
5354/* Implement the supports_conditional_breakpoints target_ops
5355 method. */
5356
5357static int
5358linux_supports_conditional_breakpoints (void)
5359{
5360 /* GDBserver needs to step over the breakpoint if the condition is
5361 false. GDBserver software single step is too simple, so disable
5362 conditional breakpoints if the target doesn't have hardware single
5363 step. */
5364 return can_hardware_single_step ();
5365}
5366
e013ee27
OF
5367static int
5368linux_stopped_by_watchpoint (void)
5369{
0bfdf32f 5370 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5371
15c66dd6 5372 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5373}
5374
5375static CORE_ADDR
5376linux_stopped_data_address (void)
5377{
0bfdf32f 5378 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5379
5380 return lwp->stopped_data_address;
e013ee27
OF
5381}
5382
db0dfaa0
LM
5383#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5384 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5385 && defined(PT_TEXT_END_ADDR)
5386
5387/* This is only used for targets that define PT_TEXT_ADDR,
5388 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5389 the target has different ways of acquiring this information, like
5390 loadmaps. */
52fb6437
NS
5391
5392/* Under uClinux, programs are loaded at non-zero offsets, which we need
5393 to tell gdb about. */
5394
5395static int
5396linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5397{
52fb6437 5398 unsigned long text, text_end, data;
62828379 5399 int pid = lwpid_of (current_thread);
52fb6437
NS
5400
5401 errno = 0;
5402
b8e1b30e
LM
5403 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5404 (PTRACE_TYPE_ARG4) 0);
5405 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5406 (PTRACE_TYPE_ARG4) 0);
5407 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5408 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5409
5410 if (errno == 0)
5411 {
5412 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5413 used by gdb) are relative to the beginning of the program,
5414 with the data segment immediately following the text segment.
5415 However, the actual runtime layout in memory may put the data
5416 somewhere else, so when we send gdb a data base-address, we
5417 use the real data base address and subtract the compile-time
5418 data base-address from it (which is just the length of the
5419 text segment). BSS immediately follows data in both
5420 cases. */
52fb6437
NS
5421 *text_p = text;
5422 *data_p = data - (text_end - text);
1b3f6016 5423
52fb6437
NS
5424 return 1;
5425 }
52fb6437
NS
5426 return 0;
5427}
5428#endif
5429
07e059b5
VP
5430static int
5431linux_qxfer_osdata (const char *annex,
1b3f6016
PA
5432 unsigned char *readbuf, unsigned const char *writebuf,
5433 CORE_ADDR offset, int len)
07e059b5 5434{
d26e3629 5435 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5436}
5437
d0722149
DE
5438/* Convert a native/host siginfo object, into/from the siginfo in the
5439 layout of the inferiors' architecture. */
5440
5441static void
a5362b9a 5442siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
d0722149
DE
5443{
5444 int done = 0;
5445
5446 if (the_low_target.siginfo_fixup != NULL)
5447 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5448
5449 /* If there was no callback, or the callback didn't do anything,
5450 then just do a straight memcpy. */
5451 if (!done)
5452 {
5453 if (direction == 1)
a5362b9a 5454 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5455 else
a5362b9a 5456 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5457 }
5458}
5459
4aa995e1
PA
5460static int
5461linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5462 unsigned const char *writebuf, CORE_ADDR offset, int len)
5463{
d0722149 5464 int pid;
a5362b9a
TS
5465 siginfo_t siginfo;
5466 char inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5467
0bfdf32f 5468 if (current_thread == NULL)
4aa995e1
PA
5469 return -1;
5470
0bfdf32f 5471 pid = lwpid_of (current_thread);
4aa995e1
PA
5472
5473 if (debug_threads)
87ce2a04
DE
5474 debug_printf ("%s siginfo for lwp %d.\n",
5475 readbuf != NULL ? "Reading" : "Writing",
5476 pid);
4aa995e1 5477
0adea5f7 5478 if (offset >= sizeof (siginfo))
4aa995e1
PA
5479 return -1;
5480
b8e1b30e 5481 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5482 return -1;
5483
d0722149
DE
5484 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5485 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5486 inferior with a 64-bit GDBSERVER should look the same as debugging it
5487 with a 32-bit GDBSERVER, we need to convert it. */
5488 siginfo_fixup (&siginfo, inf_siginfo, 0);
5489
4aa995e1
PA
5490 if (offset + len > sizeof (siginfo))
5491 len = sizeof (siginfo) - offset;
5492
5493 if (readbuf != NULL)
d0722149 5494 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5495 else
5496 {
d0722149
DE
5497 memcpy (inf_siginfo + offset, writebuf, len);
5498
5499 /* Convert back to ptrace layout before flushing it out. */
5500 siginfo_fixup (&siginfo, inf_siginfo, 1);
5501
b8e1b30e 5502 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5503 return -1;
5504 }
5505
5506 return len;
5507}
5508
bd99dc85
PA
5509/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5510 so we notice when children change state; as the handler for the
5511 sigsuspend in my_waitpid. */
5512
5513static void
5514sigchld_handler (int signo)
5515{
5516 int old_errno = errno;
5517
5518 if (debug_threads)
e581f2b4
PA
5519 {
5520 do
5521 {
5522 /* fprintf is not async-signal-safe, so call write
5523 directly. */
5524 if (write (2, "sigchld_handler\n",
5525 sizeof ("sigchld_handler\n") - 1) < 0)
5526 break; /* just ignore */
5527 } while (0);
5528 }
bd99dc85
PA
5529
5530 if (target_is_async_p ())
5531 async_file_mark (); /* trigger a linux_wait */
5532
5533 errno = old_errno;
5534}
5535
5536static int
5537linux_supports_non_stop (void)
5538{
5539 return 1;
5540}
5541
5542static int
5543linux_async (int enable)
5544{
7089dca4 5545 int previous = target_is_async_p ();
bd99dc85 5546
8336d594 5547 if (debug_threads)
87ce2a04
DE
5548 debug_printf ("linux_async (%d), previous=%d\n",
5549 enable, previous);
8336d594 5550
bd99dc85
PA
5551 if (previous != enable)
5552 {
5553 sigset_t mask;
5554 sigemptyset (&mask);
5555 sigaddset (&mask, SIGCHLD);
5556
5557 sigprocmask (SIG_BLOCK, &mask, NULL);
5558
5559 if (enable)
5560 {
5561 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
5562 {
5563 linux_event_pipe[0] = -1;
5564 linux_event_pipe[1] = -1;
5565 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5566
5567 warning ("creating event pipe failed.");
5568 return previous;
5569 }
bd99dc85
PA
5570
5571 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5572 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5573
5574 /* Register the event loop handler. */
5575 add_file_handler (linux_event_pipe[0],
5576 handle_target_event, NULL);
5577
5578 /* Always trigger a linux_wait. */
5579 async_file_mark ();
5580 }
5581 else
5582 {
5583 delete_file_handler (linux_event_pipe[0]);
5584
5585 close (linux_event_pipe[0]);
5586 close (linux_event_pipe[1]);
5587 linux_event_pipe[0] = -1;
5588 linux_event_pipe[1] = -1;
5589 }
5590
5591 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5592 }
5593
5594 return previous;
5595}
5596
5597static int
5598linux_start_non_stop (int nonstop)
5599{
5600 /* Register or unregister from event-loop accordingly. */
5601 linux_async (nonstop);
aa96c426
GB
5602
5603 if (target_is_async_p () != (nonstop != 0))
5604 return -1;
5605
bd99dc85
PA
5606 return 0;
5607}
5608
cf8fd78b
PA
5609static int
5610linux_supports_multi_process (void)
5611{
5612 return 1;
5613}
5614
89245bc0
DB
5615/* Check if fork events are supported. */
5616
5617static int
5618linux_supports_fork_events (void)
5619{
5620 return linux_supports_tracefork ();
5621}
5622
5623/* Check if vfork events are supported. */
5624
5625static int
5626linux_supports_vfork_events (void)
5627{
5628 return linux_supports_tracefork ();
5629}
5630
de0d863e
DB
5631/* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5632 options for the specified lwp. */
5633
5634static int
5635reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5636 void *args)
5637{
5638 struct thread_info *thread = (struct thread_info *) entry;
5639 struct lwp_info *lwp = get_thread_lwp (thread);
5640
5641 if (!lwp->stopped)
5642 {
5643 /* Stop the lwp so we can modify its ptrace options. */
5644 lwp->must_set_ptrace_flags = 1;
5645 linux_stop_lwp (lwp);
5646 }
5647 else
5648 {
5649 /* Already stopped; go ahead and set the ptrace options. */
5650 struct process_info *proc = find_process_pid (pid_of (thread));
5651 int options = linux_low_ptrace_options (proc->attached);
5652
5653 linux_enable_event_reporting (lwpid_of (thread), options);
5654 lwp->must_set_ptrace_flags = 0;
5655 }
5656
5657 return 0;
5658}
5659
5660/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5661 ptrace flags for all inferiors. This is in case the new GDB connection
5662 doesn't support the same set of events that the previous one did. */
5663
5664static void
5665linux_handle_new_gdb_connection (void)
5666{
5667 pid_t pid;
5668
5669 /* Request that all the lwps reset their ptrace options. */
5670 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5671}
5672
03583c20
UW
5673static int
5674linux_supports_disable_randomization (void)
5675{
5676#ifdef HAVE_PERSONALITY
5677 return 1;
5678#else
5679 return 0;
5680#endif
5681}
efcbbd14 5682
d1feda86
YQ
5683static int
5684linux_supports_agent (void)
5685{
5686 return 1;
5687}
5688
c2d6af84
PA
5689static int
5690linux_supports_range_stepping (void)
5691{
5692 if (*the_low_target.supports_range_stepping == NULL)
5693 return 0;
5694
5695 return (*the_low_target.supports_range_stepping) ();
5696}
5697
efcbbd14
UW
5698/* Enumerate spufs IDs for process PID. */
5699static int
5700spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5701{
5702 int pos = 0;
5703 int written = 0;
5704 char path[128];
5705 DIR *dir;
5706 struct dirent *entry;
5707
5708 sprintf (path, "/proc/%ld/fd", pid);
5709 dir = opendir (path);
5710 if (!dir)
5711 return -1;
5712
5713 rewinddir (dir);
5714 while ((entry = readdir (dir)) != NULL)
5715 {
5716 struct stat st;
5717 struct statfs stfs;
5718 int fd;
5719
5720 fd = atoi (entry->d_name);
5721 if (!fd)
5722 continue;
5723
5724 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5725 if (stat (path, &st) != 0)
5726 continue;
5727 if (!S_ISDIR (st.st_mode))
5728 continue;
5729
5730 if (statfs (path, &stfs) != 0)
5731 continue;
5732 if (stfs.f_type != SPUFS_MAGIC)
5733 continue;
5734
5735 if (pos >= offset && pos + 4 <= offset + len)
5736 {
5737 *(unsigned int *)(buf + pos - offset) = fd;
5738 written += 4;
5739 }
5740 pos += 4;
5741 }
5742
5743 closedir (dir);
5744 return written;
5745}
5746
5747/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5748 object type, using the /proc file system. */
5749static int
5750linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5751 unsigned const char *writebuf,
5752 CORE_ADDR offset, int len)
5753{
0bfdf32f 5754 long pid = lwpid_of (current_thread);
efcbbd14
UW
5755 char buf[128];
5756 int fd = 0;
5757 int ret = 0;
5758
5759 if (!writebuf && !readbuf)
5760 return -1;
5761
5762 if (!*annex)
5763 {
5764 if (!readbuf)
5765 return -1;
5766 else
5767 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5768 }
5769
5770 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5771 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5772 if (fd <= 0)
5773 return -1;
5774
5775 if (offset != 0
5776 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5777 {
5778 close (fd);
5779 return 0;
5780 }
5781
5782 if (writebuf)
5783 ret = write (fd, writebuf, (size_t) len);
5784 else
5785 ret = read (fd, readbuf, (size_t) len);
5786
5787 close (fd);
5788 return ret;
5789}
5790
723b724b 5791#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
5792struct target_loadseg
5793{
5794 /* Core address to which the segment is mapped. */
5795 Elf32_Addr addr;
5796 /* VMA recorded in the program header. */
5797 Elf32_Addr p_vaddr;
5798 /* Size of this segment in memory. */
5799 Elf32_Word p_memsz;
5800};
5801
723b724b 5802# if defined PT_GETDSBT
78d85199
YQ
5803struct target_loadmap
5804{
5805 /* Protocol version number, must be zero. */
5806 Elf32_Word version;
5807 /* Pointer to the DSBT table, its size, and the DSBT index. */
5808 unsigned *dsbt_table;
5809 unsigned dsbt_size, dsbt_index;
5810 /* Number of segments in this map. */
5811 Elf32_Word nsegs;
5812 /* The actual memory map. */
5813 struct target_loadseg segs[/*nsegs*/];
5814};
723b724b
MF
5815# define LINUX_LOADMAP PT_GETDSBT
5816# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5817# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5818# else
5819struct target_loadmap
5820{
5821 /* Protocol version number, must be zero. */
5822 Elf32_Half version;
5823 /* Number of segments in this map. */
5824 Elf32_Half nsegs;
5825 /* The actual memory map. */
5826 struct target_loadseg segs[/*nsegs*/];
5827};
5828# define LINUX_LOADMAP PTRACE_GETFDPIC
5829# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5830# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5831# endif
78d85199 5832
78d85199
YQ
5833static int
5834linux_read_loadmap (const char *annex, CORE_ADDR offset,
5835 unsigned char *myaddr, unsigned int len)
5836{
0bfdf32f 5837 int pid = lwpid_of (current_thread);
78d85199
YQ
5838 int addr = -1;
5839 struct target_loadmap *data = NULL;
5840 unsigned int actual_length, copy_length;
5841
5842 if (strcmp (annex, "exec") == 0)
723b724b 5843 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 5844 else if (strcmp (annex, "interp") == 0)
723b724b 5845 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
5846 else
5847 return -1;
5848
723b724b 5849 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
5850 return -1;
5851
5852 if (data == NULL)
5853 return -1;
5854
5855 actual_length = sizeof (struct target_loadmap)
5856 + sizeof (struct target_loadseg) * data->nsegs;
5857
5858 if (offset < 0 || offset > actual_length)
5859 return -1;
5860
5861 copy_length = actual_length - offset < len ? actual_length - offset : len;
5862 memcpy (myaddr, (char *) data + offset, copy_length);
5863 return copy_length;
5864}
723b724b
MF
5865#else
5866# define linux_read_loadmap NULL
5867#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 5868
1570b33e
L
5869static void
5870linux_process_qsupported (const char *query)
5871{
5872 if (the_low_target.process_qsupported != NULL)
5873 the_low_target.process_qsupported (query);
5874}
5875
219f2f23
PA
5876static int
5877linux_supports_tracepoints (void)
5878{
5879 if (*the_low_target.supports_tracepoints == NULL)
5880 return 0;
5881
5882 return (*the_low_target.supports_tracepoints) ();
5883}
5884
5885static CORE_ADDR
5886linux_read_pc (struct regcache *regcache)
5887{
5888 if (the_low_target.get_pc == NULL)
5889 return 0;
5890
5891 return (*the_low_target.get_pc) (regcache);
5892}
5893
5894static void
5895linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5896{
5897 gdb_assert (the_low_target.set_pc != NULL);
5898
5899 (*the_low_target.set_pc) (regcache, pc);
5900}
5901
8336d594
PA
5902static int
5903linux_thread_stopped (struct thread_info *thread)
5904{
5905 return get_thread_lwp (thread)->stopped;
5906}
5907
5908/* This exposes stop-all-threads functionality to other modules. */
5909
5910static void
7984d532 5911linux_pause_all (int freeze)
8336d594 5912{
7984d532
PA
5913 stop_all_lwps (freeze, NULL);
5914}
5915
5916/* This exposes unstop-all-threads functionality to other gdbserver
5917 modules. */
5918
5919static void
5920linux_unpause_all (int unfreeze)
5921{
5922 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
5923}
5924
90d74c30
PA
5925static int
5926linux_prepare_to_access_memory (void)
5927{
5928 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5929 running LWP. */
5930 if (non_stop)
5931 linux_pause_all (1);
5932 return 0;
5933}
5934
5935static void
0146f85b 5936linux_done_accessing_memory (void)
90d74c30
PA
5937{
5938 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5939 running LWP. */
5940 if (non_stop)
5941 linux_unpause_all (1);
5942}
5943
fa593d66
PA
5944static int
5945linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5946 CORE_ADDR collector,
5947 CORE_ADDR lockaddr,
5948 ULONGEST orig_size,
5949 CORE_ADDR *jump_entry,
405f8e94
SS
5950 CORE_ADDR *trampoline,
5951 ULONGEST *trampoline_size,
fa593d66
PA
5952 unsigned char *jjump_pad_insn,
5953 ULONGEST *jjump_pad_insn_size,
5954 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
5955 CORE_ADDR *adjusted_insn_addr_end,
5956 char *err)
fa593d66
PA
5957{
5958 return (*the_low_target.install_fast_tracepoint_jump_pad)
5959 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
5960 jump_entry, trampoline, trampoline_size,
5961 jjump_pad_insn, jjump_pad_insn_size,
5962 adjusted_insn_addr, adjusted_insn_addr_end,
5963 err);
fa593d66
PA
5964}
5965
6a271cae
PA
5966static struct emit_ops *
5967linux_emit_ops (void)
5968{
5969 if (the_low_target.emit_ops != NULL)
5970 return (*the_low_target.emit_ops) ();
5971 else
5972 return NULL;
5973}
5974
405f8e94
SS
5975static int
5976linux_get_min_fast_tracepoint_insn_len (void)
5977{
5978 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5979}
5980
2268b414
JK
5981/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5982
5983static int
5984get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5985 CORE_ADDR *phdr_memaddr, int *num_phdr)
5986{
5987 char filename[PATH_MAX];
5988 int fd;
5989 const int auxv_size = is_elf64
5990 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5991 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5992
5993 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5994
5995 fd = open (filename, O_RDONLY);
5996 if (fd < 0)
5997 return 1;
5998
5999 *phdr_memaddr = 0;
6000 *num_phdr = 0;
6001 while (read (fd, buf, auxv_size) == auxv_size
6002 && (*phdr_memaddr == 0 || *num_phdr == 0))
6003 {
6004 if (is_elf64)
6005 {
6006 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6007
6008 switch (aux->a_type)
6009 {
6010 case AT_PHDR:
6011 *phdr_memaddr = aux->a_un.a_val;
6012 break;
6013 case AT_PHNUM:
6014 *num_phdr = aux->a_un.a_val;
6015 break;
6016 }
6017 }
6018 else
6019 {
6020 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6021
6022 switch (aux->a_type)
6023 {
6024 case AT_PHDR:
6025 *phdr_memaddr = aux->a_un.a_val;
6026 break;
6027 case AT_PHNUM:
6028 *num_phdr = aux->a_un.a_val;
6029 break;
6030 }
6031 }
6032 }
6033
6034 close (fd);
6035
6036 if (*phdr_memaddr == 0 || *num_phdr == 0)
6037 {
6038 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6039 "phdr_memaddr = %ld, phdr_num = %d",
6040 (long) *phdr_memaddr, *num_phdr);
6041 return 2;
6042 }
6043
6044 return 0;
6045}
6046
6047/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6048
6049static CORE_ADDR
6050get_dynamic (const int pid, const int is_elf64)
6051{
6052 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6053 int num_phdr, i;
2268b414 6054 unsigned char *phdr_buf;
db1ff28b 6055 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6056
6057 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6058 return 0;
6059
6060 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6061 phdr_buf = alloca (num_phdr * phdr_size);
6062
6063 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6064 return 0;
6065
6066 /* Compute relocation: it is expected to be 0 for "regular" executables,
6067 non-zero for PIE ones. */
6068 relocation = -1;
db1ff28b
JK
6069 for (i = 0; relocation == -1 && i < num_phdr; i++)
6070 if (is_elf64)
6071 {
6072 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6073
6074 if (p->p_type == PT_PHDR)
6075 relocation = phdr_memaddr - p->p_vaddr;
6076 }
6077 else
6078 {
6079 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6080
6081 if (p->p_type == PT_PHDR)
6082 relocation = phdr_memaddr - p->p_vaddr;
6083 }
6084
2268b414
JK
6085 if (relocation == -1)
6086 {
e237a7e2
JK
6087 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6088 any real world executables, including PIE executables, have always
6089 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6090 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6091 or present DT_DEBUG anyway (fpc binaries are statically linked).
6092
6093 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6094
6095 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6096
2268b414
JK
6097 return 0;
6098 }
6099
db1ff28b
JK
6100 for (i = 0; i < num_phdr; i++)
6101 {
6102 if (is_elf64)
6103 {
6104 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6105
6106 if (p->p_type == PT_DYNAMIC)
6107 return p->p_vaddr + relocation;
6108 }
6109 else
6110 {
6111 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6112
db1ff28b
JK
6113 if (p->p_type == PT_DYNAMIC)
6114 return p->p_vaddr + relocation;
6115 }
6116 }
2268b414
JK
6117
6118 return 0;
6119}
6120
6121/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6122 can be 0 if the inferior does not yet have the library list initialized.
6123 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6124 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6125
6126static CORE_ADDR
6127get_r_debug (const int pid, const int is_elf64)
6128{
6129 CORE_ADDR dynamic_memaddr;
6130 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6131 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6132 CORE_ADDR map = -1;
2268b414
JK
6133
6134 dynamic_memaddr = get_dynamic (pid, is_elf64);
6135 if (dynamic_memaddr == 0)
367ba2c2 6136 return map;
2268b414
JK
6137
6138 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6139 {
6140 if (is_elf64)
6141 {
6142 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
75f62ce7 6143#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6144 union
6145 {
6146 Elf64_Xword map;
6147 unsigned char buf[sizeof (Elf64_Xword)];
6148 }
6149 rld_map;
6150
6151 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6152 {
6153 if (linux_read_memory (dyn->d_un.d_val,
6154 rld_map.buf, sizeof (rld_map.buf)) == 0)
6155 return rld_map.map;
6156 else
6157 break;
6158 }
75f62ce7 6159#endif /* DT_MIPS_RLD_MAP */
2268b414 6160
367ba2c2
MR
6161 if (dyn->d_tag == DT_DEBUG && map == -1)
6162 map = dyn->d_un.d_val;
2268b414
JK
6163
6164 if (dyn->d_tag == DT_NULL)
6165 break;
6166 }
6167 else
6168 {
6169 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
75f62ce7 6170#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6171 union
6172 {
6173 Elf32_Word map;
6174 unsigned char buf[sizeof (Elf32_Word)];
6175 }
6176 rld_map;
6177
6178 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6179 {
6180 if (linux_read_memory (dyn->d_un.d_val,
6181 rld_map.buf, sizeof (rld_map.buf)) == 0)
6182 return rld_map.map;
6183 else
6184 break;
6185 }
75f62ce7 6186#endif /* DT_MIPS_RLD_MAP */
2268b414 6187
367ba2c2
MR
6188 if (dyn->d_tag == DT_DEBUG && map == -1)
6189 map = dyn->d_un.d_val;
2268b414
JK
6190
6191 if (dyn->d_tag == DT_NULL)
6192 break;
6193 }
6194
6195 dynamic_memaddr += dyn_size;
6196 }
6197
367ba2c2 6198 return map;
2268b414
JK
6199}
6200
6201/* Read one pointer from MEMADDR in the inferior. */
6202
6203static int
6204read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6205{
485f1ee4
PA
6206 int ret;
6207
6208 /* Go through a union so this works on either big or little endian
6209 hosts, when the inferior's pointer size is smaller than the size
6210 of CORE_ADDR. It is assumed the inferior's endianness is the
6211 same of the superior's. */
6212 union
6213 {
6214 CORE_ADDR core_addr;
6215 unsigned int ui;
6216 unsigned char uc;
6217 } addr;
6218
6219 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6220 if (ret == 0)
6221 {
6222 if (ptr_size == sizeof (CORE_ADDR))
6223 *ptr = addr.core_addr;
6224 else if (ptr_size == sizeof (unsigned int))
6225 *ptr = addr.ui;
6226 else
6227 gdb_assert_not_reached ("unhandled pointer size");
6228 }
6229 return ret;
2268b414
JK
6230}
6231
6232struct link_map_offsets
6233 {
6234 /* Offset and size of r_debug.r_version. */
6235 int r_version_offset;
6236
6237 /* Offset and size of r_debug.r_map. */
6238 int r_map_offset;
6239
6240 /* Offset to l_addr field in struct link_map. */
6241 int l_addr_offset;
6242
6243 /* Offset to l_name field in struct link_map. */
6244 int l_name_offset;
6245
6246 /* Offset to l_ld field in struct link_map. */
6247 int l_ld_offset;
6248
6249 /* Offset to l_next field in struct link_map. */
6250 int l_next_offset;
6251
6252 /* Offset to l_prev field in struct link_map. */
6253 int l_prev_offset;
6254 };
6255
fb723180 6256/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
6257
6258static int
6259linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6260 unsigned const char *writebuf,
6261 CORE_ADDR offset, int len)
6262{
6263 char *document;
6264 unsigned document_len;
fe978cb0 6265 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6266 char filename[PATH_MAX];
6267 int pid, is_elf64;
6268
6269 static const struct link_map_offsets lmo_32bit_offsets =
6270 {
6271 0, /* r_version offset. */
6272 4, /* r_debug.r_map offset. */
6273 0, /* l_addr offset in link_map. */
6274 4, /* l_name offset in link_map. */
6275 8, /* l_ld offset in link_map. */
6276 12, /* l_next offset in link_map. */
6277 16 /* l_prev offset in link_map. */
6278 };
6279
6280 static const struct link_map_offsets lmo_64bit_offsets =
6281 {
6282 0, /* r_version offset. */
6283 8, /* r_debug.r_map offset. */
6284 0, /* l_addr offset in link_map. */
6285 8, /* l_name offset in link_map. */
6286 16, /* l_ld offset in link_map. */
6287 24, /* l_next offset in link_map. */
6288 32 /* l_prev offset in link_map. */
6289 };
6290 const struct link_map_offsets *lmo;
214d508e 6291 unsigned int machine;
b1fbec62
GB
6292 int ptr_size;
6293 CORE_ADDR lm_addr = 0, lm_prev = 0;
6294 int allocated = 1024;
6295 char *p;
6296 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6297 int header_done = 0;
2268b414
JK
6298
6299 if (writebuf != NULL)
6300 return -2;
6301 if (readbuf == NULL)
6302 return -1;
6303
0bfdf32f 6304 pid = lwpid_of (current_thread);
2268b414 6305 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6306 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 6307 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 6308 ptr_size = is_elf64 ? 8 : 4;
2268b414 6309
b1fbec62
GB
6310 while (annex[0] != '\0')
6311 {
6312 const char *sep;
6313 CORE_ADDR *addrp;
6314 int len;
2268b414 6315
b1fbec62
GB
6316 sep = strchr (annex, '=');
6317 if (sep == NULL)
6318 break;
0c5bf5a9 6319
b1fbec62 6320 len = sep - annex;
61012eef 6321 if (len == 5 && startswith (annex, "start"))
b1fbec62 6322 addrp = &lm_addr;
61012eef 6323 else if (len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6324 addrp = &lm_prev;
6325 else
6326 {
6327 annex = strchr (sep, ';');
6328 if (annex == NULL)
6329 break;
6330 annex++;
6331 continue;
6332 }
6333
6334 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6335 }
b1fbec62
GB
6336
6337 if (lm_addr == 0)
2268b414 6338 {
b1fbec62
GB
6339 int r_version = 0;
6340
6341 if (priv->r_debug == 0)
6342 priv->r_debug = get_r_debug (pid, is_elf64);
6343
6344 /* We failed to find DT_DEBUG. Such situation will not change
6345 for this inferior - do not retry it. Report it to GDB as
6346 E01, see for the reasons at the GDB solib-svr4.c side. */
6347 if (priv->r_debug == (CORE_ADDR) -1)
6348 return -1;
6349
6350 if (priv->r_debug != 0)
2268b414 6351 {
b1fbec62
GB
6352 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6353 (unsigned char *) &r_version,
6354 sizeof (r_version)) != 0
6355 || r_version != 1)
6356 {
6357 warning ("unexpected r_debug version %d", r_version);
6358 }
6359 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6360 &lm_addr, ptr_size) != 0)
6361 {
6362 warning ("unable to read r_map from 0x%lx",
6363 (long) priv->r_debug + lmo->r_map_offset);
6364 }
2268b414 6365 }
b1fbec62 6366 }
2268b414 6367
b1fbec62
GB
6368 document = xmalloc (allocated);
6369 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6370 p = document + strlen (document);
6371
6372 while (lm_addr
6373 && read_one_ptr (lm_addr + lmo->l_name_offset,
6374 &l_name, ptr_size) == 0
6375 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6376 &l_addr, ptr_size) == 0
6377 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6378 &l_ld, ptr_size) == 0
6379 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6380 &l_prev, ptr_size) == 0
6381 && read_one_ptr (lm_addr + lmo->l_next_offset,
6382 &l_next, ptr_size) == 0)
6383 {
6384 unsigned char libname[PATH_MAX];
6385
6386 if (lm_prev != l_prev)
2268b414 6387 {
b1fbec62
GB
6388 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6389 (long) lm_prev, (long) l_prev);
6390 break;
2268b414
JK
6391 }
6392
d878444c
JK
6393 /* Ignore the first entry even if it has valid name as the first entry
6394 corresponds to the main executable. The first entry should not be
6395 skipped if the dynamic loader was loaded late by a static executable
6396 (see solib-svr4.c parameter ignore_first). But in such case the main
6397 executable does not have PT_DYNAMIC present and this function already
6398 exited above due to failed get_r_debug. */
6399 if (lm_prev == 0)
2268b414 6400 {
d878444c
JK
6401 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6402 p = p + strlen (p);
6403 }
6404 else
6405 {
6406 /* Not checking for error because reading may stop before
6407 we've got PATH_MAX worth of characters. */
6408 libname[0] = '\0';
6409 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6410 libname[sizeof (libname) - 1] = '\0';
6411 if (libname[0] != '\0')
2268b414 6412 {
d878444c
JK
6413 /* 6x the size for xml_escape_text below. */
6414 size_t len = 6 * strlen ((char *) libname);
6415 char *name;
2268b414 6416
d878444c
JK
6417 if (!header_done)
6418 {
6419 /* Terminate `<library-list-svr4'. */
6420 *p++ = '>';
6421 header_done = 1;
6422 }
2268b414 6423
db1ff28b 6424 while (allocated < p - document + len + 200)
d878444c
JK
6425 {
6426 /* Expand to guarantee sufficient storage. */
6427 uintptr_t document_len = p - document;
2268b414 6428
d878444c
JK
6429 document = xrealloc (document, 2 * allocated);
6430 allocated *= 2;
6431 p = document + document_len;
6432 }
6433
6434 name = xml_escape_text ((char *) libname);
6435 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
db1ff28b 6436 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
d878444c
JK
6437 name, (unsigned long) lm_addr,
6438 (unsigned long) l_addr, (unsigned long) l_ld);
6439 free (name);
6440 }
0afae3cf 6441 }
b1fbec62
GB
6442
6443 lm_prev = lm_addr;
6444 lm_addr = l_next;
2268b414
JK
6445 }
6446
b1fbec62
GB
6447 if (!header_done)
6448 {
6449 /* Empty list; terminate `<library-list-svr4'. */
6450 strcpy (p, "/>");
6451 }
6452 else
6453 strcpy (p, "</library-list-svr4>");
6454
2268b414
JK
6455 document_len = strlen (document);
6456 if (offset < document_len)
6457 document_len -= offset;
6458 else
6459 document_len = 0;
6460 if (len > document_len)
6461 len = document_len;
6462
6463 memcpy (readbuf, document + offset, len);
6464 xfree (document);
6465
6466 return len;
6467}
6468
9accd112
MM
6469#ifdef HAVE_LINUX_BTRACE
6470
969c39fb 6471/* See to_enable_btrace target method. */
9accd112
MM
6472
6473static struct btrace_target_info *
f4abbc16 6474linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
9accd112
MM
6475{
6476 struct btrace_target_info *tinfo;
6477
f4abbc16 6478 tinfo = linux_enable_btrace (ptid, conf);
3aee8918 6479
d68e53f4 6480 if (tinfo != NULL && tinfo->ptr_bits == 0)
3aee8918
PA
6481 {
6482 struct thread_info *thread = find_thread_ptid (ptid);
6483 struct regcache *regcache = get_thread_regcache (thread, 0);
6484
6485 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6486 }
9accd112
MM
6487
6488 return tinfo;
6489}
6490
969c39fb 6491/* See to_disable_btrace target method. */
9accd112 6492
969c39fb
MM
6493static int
6494linux_low_disable_btrace (struct btrace_target_info *tinfo)
6495{
6496 enum btrace_error err;
6497
6498 err = linux_disable_btrace (tinfo);
6499 return (err == BTRACE_ERR_NONE ? 0 : -1);
6500}
6501
b20a6524
MM
6502/* Encode an Intel(R) Processor Trace configuration. */
6503
6504static void
6505linux_low_encode_pt_config (struct buffer *buffer,
6506 const struct btrace_data_pt_config *config)
6507{
6508 buffer_grow_str (buffer, "<pt-config>\n");
6509
6510 switch (config->cpu.vendor)
6511 {
6512 case CV_INTEL:
6513 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6514 "model=\"%u\" stepping=\"%u\"/>\n",
6515 config->cpu.family, config->cpu.model,
6516 config->cpu.stepping);
6517 break;
6518
6519 default:
6520 break;
6521 }
6522
6523 buffer_grow_str (buffer, "</pt-config>\n");
6524}
6525
6526/* Encode a raw buffer. */
6527
6528static void
6529linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6530 unsigned int size)
6531{
6532 if (size == 0)
6533 return;
6534
6535 /* We use hex encoding - see common/rsp-low.h. */
6536 buffer_grow_str (buffer, "<raw>\n");
6537
6538 while (size-- > 0)
6539 {
6540 char elem[2];
6541
6542 elem[0] = tohex ((*data >> 4) & 0xf);
6543 elem[1] = tohex (*data++ & 0xf);
6544
6545 buffer_grow (buffer, elem, 2);
6546 }
6547
6548 buffer_grow_str (buffer, "</raw>\n");
6549}
6550
969c39fb
MM
6551/* See to_read_btrace target method. */
6552
6553static int
9accd112
MM
6554linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6555 int type)
6556{
734b0e4b 6557 struct btrace_data btrace;
9accd112 6558 struct btrace_block *block;
969c39fb 6559 enum btrace_error err;
9accd112
MM
6560 int i;
6561
734b0e4b
MM
6562 btrace_data_init (&btrace);
6563
969c39fb
MM
6564 err = linux_read_btrace (&btrace, tinfo, type);
6565 if (err != BTRACE_ERR_NONE)
6566 {
6567 if (err == BTRACE_ERR_OVERFLOW)
6568 buffer_grow_str0 (buffer, "E.Overflow.");
6569 else
6570 buffer_grow_str0 (buffer, "E.Generic Error.");
6571
b20a6524 6572 goto err;
969c39fb 6573 }
9accd112 6574
734b0e4b
MM
6575 switch (btrace.format)
6576 {
6577 case BTRACE_FORMAT_NONE:
6578 buffer_grow_str0 (buffer, "E.No Trace.");
b20a6524 6579 goto err;
734b0e4b
MM
6580
6581 case BTRACE_FORMAT_BTS:
6582 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6583 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 6584
734b0e4b
MM
6585 for (i = 0;
6586 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6587 i++)
6588 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6589 paddress (block->begin), paddress (block->end));
9accd112 6590
734b0e4b
MM
6591 buffer_grow_str0 (buffer, "</btrace>\n");
6592 break;
6593
b20a6524
MM
6594 case BTRACE_FORMAT_PT:
6595 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6596 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6597 buffer_grow_str (buffer, "<pt>\n");
6598
6599 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 6600
b20a6524
MM
6601 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6602 btrace.variant.pt.size);
6603
6604 buffer_grow_str (buffer, "</pt>\n");
6605 buffer_grow_str0 (buffer, "</btrace>\n");
6606 break;
6607
6608 default:
6609 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6610 goto err;
734b0e4b 6611 }
969c39fb 6612
734b0e4b 6613 btrace_data_fini (&btrace);
969c39fb 6614 return 0;
b20a6524
MM
6615
6616err:
6617 btrace_data_fini (&btrace);
6618 return -1;
9accd112 6619}
f4abbc16
MM
6620
6621/* See to_btrace_conf target method. */
6622
6623static int
6624linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6625 struct buffer *buffer)
6626{
6627 const struct btrace_config *conf;
6628
6629 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6630 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6631
6632 conf = linux_btrace_conf (tinfo);
6633 if (conf != NULL)
6634 {
6635 switch (conf->format)
6636 {
6637 case BTRACE_FORMAT_NONE:
6638 break;
6639
6640 case BTRACE_FORMAT_BTS:
d33501a5
MM
6641 buffer_xml_printf (buffer, "<bts");
6642 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6643 buffer_xml_printf (buffer, " />\n");
f4abbc16 6644 break;
b20a6524
MM
6645
6646 case BTRACE_FORMAT_PT:
6647 buffer_xml_printf (buffer, "<pt");
6648 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6649 buffer_xml_printf (buffer, "/>\n");
6650 break;
f4abbc16
MM
6651 }
6652 }
6653
6654 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6655 return 0;
6656}
9accd112
MM
6657#endif /* HAVE_LINUX_BTRACE */
6658
7b669087
GB
6659/* See nat/linux-nat.h. */
6660
6661ptid_t
6662current_lwp_ptid (void)
6663{
6664 return ptid_of (current_thread);
6665}
6666
ce3a066d
DJ
6667static struct target_ops linux_target_ops = {
6668 linux_create_inferior,
c06cbd92 6669 linux_arch_setup,
ce3a066d
DJ
6670 linux_attach,
6671 linux_kill,
6ad8ae5c 6672 linux_detach,
8336d594 6673 linux_mourn,
444d6139 6674 linux_join,
ce3a066d
DJ
6675 linux_thread_alive,
6676 linux_resume,
6677 linux_wait,
6678 linux_fetch_registers,
6679 linux_store_registers,
90d74c30 6680 linux_prepare_to_access_memory,
0146f85b 6681 linux_done_accessing_memory,
ce3a066d
DJ
6682 linux_read_memory,
6683 linux_write_memory,
2f2893d9 6684 linux_look_up_symbols,
ef57601b 6685 linux_request_interrupt,
aa691b87 6686 linux_read_auxv,
802e8e6d 6687 linux_supports_z_point_type,
d993e290
PA
6688 linux_insert_point,
6689 linux_remove_point,
3e572f71
PA
6690 linux_stopped_by_sw_breakpoint,
6691 linux_supports_stopped_by_sw_breakpoint,
6692 linux_stopped_by_hw_breakpoint,
6693 linux_supports_stopped_by_hw_breakpoint,
45614f15 6694 linux_supports_conditional_breakpoints,
e013ee27
OF
6695 linux_stopped_by_watchpoint,
6696 linux_stopped_data_address,
db0dfaa0
LM
6697#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6698 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6699 && defined(PT_TEXT_END_ADDR)
52fb6437 6700 linux_read_offsets,
dae5f5cf
DJ
6701#else
6702 NULL,
6703#endif
6704#ifdef USE_THREAD_DB
6705 thread_db_get_tls_address,
6706#else
6707 NULL,
52fb6437 6708#endif
efcbbd14 6709 linux_qxfer_spu,
59a016f0 6710 hostio_last_error_from_errno,
07e059b5 6711 linux_qxfer_osdata,
4aa995e1 6712 linux_xfer_siginfo,
bd99dc85
PA
6713 linux_supports_non_stop,
6714 linux_async,
6715 linux_start_non_stop,
cdbfd419 6716 linux_supports_multi_process,
89245bc0
DB
6717 linux_supports_fork_events,
6718 linux_supports_vfork_events,
de0d863e 6719 linux_handle_new_gdb_connection,
cdbfd419 6720#ifdef USE_THREAD_DB
dc146f7c 6721 thread_db_handle_monitor_command,
cdbfd419 6722#else
dc146f7c 6723 NULL,
cdbfd419 6724#endif
d26e3629 6725 linux_common_core_of_thread,
78d85199 6726 linux_read_loadmap,
219f2f23
PA
6727 linux_process_qsupported,
6728 linux_supports_tracepoints,
6729 linux_read_pc,
8336d594
PA
6730 linux_write_pc,
6731 linux_thread_stopped,
7984d532 6732 NULL,
711e434b 6733 linux_pause_all,
7984d532 6734 linux_unpause_all,
fa593d66 6735 linux_stabilize_threads,
6a271cae 6736 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
6737 linux_emit_ops,
6738 linux_supports_disable_randomization,
405f8e94 6739 linux_get_min_fast_tracepoint_insn_len,
2268b414 6740 linux_qxfer_libraries_svr4,
d1feda86 6741 linux_supports_agent,
9accd112
MM
6742#ifdef HAVE_LINUX_BTRACE
6743 linux_supports_btrace,
6744 linux_low_enable_btrace,
969c39fb 6745 linux_low_disable_btrace,
9accd112 6746 linux_low_read_btrace,
f4abbc16 6747 linux_low_btrace_conf,
9accd112
MM
6748#else
6749 NULL,
6750 NULL,
6751 NULL,
6752 NULL,
f4abbc16 6753 NULL,
9accd112 6754#endif
c2d6af84 6755 linux_supports_range_stepping,
e57f1de3 6756 linux_proc_pid_to_exec_file,
14d2069a
GB
6757 linux_mntns_open_cloexec,
6758 linux_mntns_unlink,
6759 linux_mntns_readlink,
ce3a066d
DJ
6760};
6761
0d62e5e8
DJ
6762static void
6763linux_init_signals ()
6764{
6765 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6766 to find what the cancel signal actually is. */
1a981360 6767#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 6768 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 6769#endif
0d62e5e8
DJ
6770}
6771
3aee8918
PA
6772#ifdef HAVE_LINUX_REGSETS
6773void
6774initialize_regsets_info (struct regsets_info *info)
6775{
6776 for (info->num_regsets = 0;
6777 info->regsets[info->num_regsets].size >= 0;
6778 info->num_regsets++)
6779 ;
3aee8918
PA
6780}
6781#endif
6782
da6d8c04
DJ
6783void
6784initialize_low (void)
6785{
bd99dc85
PA
6786 struct sigaction sigchld_action;
6787 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 6788 set_target_ops (&linux_target_ops);
611cb4a5
DJ
6789 set_breakpoint_data (the_low_target.breakpoint,
6790 the_low_target.breakpoint_len);
0d62e5e8 6791 linux_init_signals ();
aa7c7447 6792 linux_ptrace_init_warnings ();
bd99dc85
PA
6793
6794 sigchld_action.sa_handler = sigchld_handler;
6795 sigemptyset (&sigchld_action.sa_mask);
6796 sigchld_action.sa_flags = SA_RESTART;
6797 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
6798
6799 initialize_low_arch ();
89245bc0
DB
6800
6801 linux_check_ptrace_features ();
da6d8c04 6802}