]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
Fix one heap buffer overflow in aarch64_push_dummy_call
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
32d0add0 2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
58b4daa5 22#include "agent.h"
de0d863e 23#include "tdesc.h"
b20a6524 24#include "rsp-low.h"
da6d8c04 25
96d7229d
LM
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
8bdce1ff 28#include "gdb_wait.h"
5826e159 29#include "nat/gdb_ptrace.h"
125f8a3d
GB
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
8cc73a39 32#include "nat/linux-personality.h"
da6d8c04
DJ
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
0a30fbc4 36#include <unistd.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
602e3198 46#include "filestuff.h"
c144c7a0 47#include "tracepoint.h"
533b0600 48#include "hostio.h"
957f3f49
DE
49#ifndef ELFMAG0
50/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54#include <elf.h>
55#endif
14d2069a 56#include "nat/linux-namespaces.h"
efcbbd14
UW
57
58#ifndef SPUFS_MAGIC
59#define SPUFS_MAGIC 0x23c9b64e
60#endif
da6d8c04 61
03583c20
UW
62#ifdef HAVE_PERSONALITY
63# include <sys/personality.h>
64# if !HAVE_DECL_ADDR_NO_RANDOMIZE
65# define ADDR_NO_RANDOMIZE 0x0040000
66# endif
67#endif
68
fd462a61
DJ
69#ifndef O_LARGEFILE
70#define O_LARGEFILE 0
71#endif
1a981360 72
db0dfaa0
LM
73/* Some targets did not define these ptrace constants from the start,
74 so gdbserver defines them locally here. In the future, these may
75 be removed after they are added to asm/ptrace.h. */
76#if !(defined(PT_TEXT_ADDR) \
77 || defined(PT_DATA_ADDR) \
78 || defined(PT_TEXT_END_ADDR))
79#if defined(__mcoldfire__)
80/* These are still undefined in 3.10 kernels. */
81#define PT_TEXT_ADDR 49*4
82#define PT_DATA_ADDR 50*4
83#define PT_TEXT_END_ADDR 51*4
84/* BFIN already defines these since at least 2.6.32 kernels. */
85#elif defined(BFIN)
86#define PT_TEXT_ADDR 220
87#define PT_TEXT_END_ADDR 224
88#define PT_DATA_ADDR 228
89/* These are still undefined in 3.10 kernels. */
90#elif defined(__TMS320C6X__)
91#define PT_TEXT_ADDR (0x10000*4)
92#define PT_DATA_ADDR (0x10004*4)
93#define PT_TEXT_END_ADDR (0x10008*4)
94#endif
95#endif
96
9accd112 97#ifdef HAVE_LINUX_BTRACE
125f8a3d 98# include "nat/linux-btrace.h"
734b0e4b 99# include "btrace-common.h"
9accd112
MM
100#endif
101
8365dcf5
TJB
102#ifndef HAVE_ELF32_AUXV_T
103/* Copied from glibc's elf.h. */
104typedef struct
105{
106 uint32_t a_type; /* Entry type */
107 union
108 {
109 uint32_t a_val; /* Integer value */
110 /* We use to have pointer elements added here. We cannot do that,
111 though, since it does not work when using 32-bit definitions
112 on 64-bit platforms and vice versa. */
113 } a_un;
114} Elf32_auxv_t;
115#endif
116
117#ifndef HAVE_ELF64_AUXV_T
118/* Copied from glibc's elf.h. */
119typedef struct
120{
121 uint64_t a_type; /* Entry type */
122 union
123 {
124 uint64_t a_val; /* Integer value */
125 /* We use to have pointer elements added here. We cannot do that,
126 though, since it does not work when using 32-bit definitions
127 on 64-bit platforms and vice versa. */
128 } a_un;
129} Elf64_auxv_t;
130#endif
131
ded48a5e
YQ
132/* Does the current host support PTRACE_GETREGSET? */
133int have_ptrace_getregset = -1;
134
cff068da
GB
135/* LWP accessors. */
136
137/* See nat/linux-nat.h. */
138
139ptid_t
140ptid_of_lwp (struct lwp_info *lwp)
141{
142 return ptid_of (get_lwp_thread (lwp));
143}
144
145/* See nat/linux-nat.h. */
146
4b134ca1
GB
147void
148lwp_set_arch_private_info (struct lwp_info *lwp,
149 struct arch_lwp_info *info)
150{
151 lwp->arch_private = info;
152}
153
154/* See nat/linux-nat.h. */
155
156struct arch_lwp_info *
157lwp_arch_private_info (struct lwp_info *lwp)
158{
159 return lwp->arch_private;
160}
161
162/* See nat/linux-nat.h. */
163
cff068da
GB
164int
165lwp_is_stopped (struct lwp_info *lwp)
166{
167 return lwp->stopped;
168}
169
170/* See nat/linux-nat.h. */
171
172enum target_stop_reason
173lwp_stop_reason (struct lwp_info *lwp)
174{
175 return lwp->stop_reason;
176}
177
05044653
PA
178/* A list of all unknown processes which receive stop signals. Some
179 other process will presumably claim each of these as forked
180 children momentarily. */
24a09b5f 181
05044653
PA
182struct simple_pid_list
183{
184 /* The process ID. */
185 int pid;
186
187 /* The status as reported by waitpid. */
188 int status;
189
190 /* Next in chain. */
191 struct simple_pid_list *next;
192};
193struct simple_pid_list *stopped_pids;
194
195/* Trivial list manipulation functions to keep track of a list of new
196 stopped processes. */
197
198static void
199add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
200{
8d749320 201 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
202
203 new_pid->pid = pid;
204 new_pid->status = status;
205 new_pid->next = *listp;
206 *listp = new_pid;
207}
208
209static int
210pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
211{
212 struct simple_pid_list **p;
213
214 for (p = listp; *p != NULL; p = &(*p)->next)
215 if ((*p)->pid == pid)
216 {
217 struct simple_pid_list *next = (*p)->next;
218
219 *statusp = (*p)->status;
220 xfree (*p);
221 *p = next;
222 return 1;
223 }
224 return 0;
225}
24a09b5f 226
bde24c0a
PA
227enum stopping_threads_kind
228 {
229 /* Not stopping threads presently. */
230 NOT_STOPPING_THREADS,
231
232 /* Stopping threads. */
233 STOPPING_THREADS,
234
235 /* Stopping and suspending threads. */
236 STOPPING_AND_SUSPENDING_THREADS
237 };
238
239/* This is set while stop_all_lwps is in effect. */
240enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
241
242/* FIXME make into a target method? */
24a09b5f 243int using_threads = 1;
24a09b5f 244
fa593d66
PA
245/* True if we're presently stabilizing threads (moving them out of
246 jump pads). */
247static int stabilizing_threads;
248
2acc282a 249static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 250 int step, int signal, siginfo_t *info);
2bd7c093 251static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
252static void stop_all_lwps (int suspend, struct lwp_info *except);
253static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
fa96cb38
PA
254static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
255 int *wstat, int options);
95954743 256static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
b3312d80 257static struct lwp_info *add_lwp (ptid_t ptid);
94585166 258static void linux_mourn (struct process_info *process);
c35fafde 259static int linux_stopped_by_watchpoint (void);
95954743 260static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 261static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 262static void proceed_all_lwps (void);
d50171e4 263static int finish_step_over (struct lwp_info *lwp);
d50171e4 264static int kill_lwp (unsigned long lwpid, int signo);
863d01bd
PA
265static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
266static void complete_ongoing_step_over (void);
ece66d65 267static int linux_low_ptrace_options (int attached);
d50171e4 268
582511be
PA
269/* When the event-loop is doing a step-over, this points at the thread
270 being stepped. */
271ptid_t step_over_bkpt;
272
7d00775e 273/* True if the low target can hardware single-step. */
d50171e4
PA
274
275static int
276can_hardware_single_step (void)
277{
7d00775e
AT
278 if (the_low_target.supports_hardware_single_step != NULL)
279 return the_low_target.supports_hardware_single_step ();
280 else
281 return 0;
282}
283
284/* True if the low target can software single-step. Such targets
285 implement the BREAKPOINT_REINSERT_ADDR callback. */
286
287static int
288can_software_single_step (void)
289{
290 return (the_low_target.breakpoint_reinsert_addr != NULL);
d50171e4
PA
291}
292
293/* True if the low target supports memory breakpoints. If so, we'll
294 have a GET_PC implementation. */
295
296static int
297supports_breakpoints (void)
298{
299 return (the_low_target.get_pc != NULL);
300}
0d62e5e8 301
fa593d66
PA
302/* Returns true if this target can support fast tracepoints. This
303 does not mean that the in-process agent has been loaded in the
304 inferior. */
305
306static int
307supports_fast_tracepoints (void)
308{
309 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
310}
311
c2d6af84
PA
312/* True if LWP is stopped in its stepping range. */
313
314static int
315lwp_in_step_range (struct lwp_info *lwp)
316{
317 CORE_ADDR pc = lwp->stop_pc;
318
319 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
320}
321
0d62e5e8
DJ
322struct pending_signals
323{
324 int signal;
32ca6d61 325 siginfo_t info;
0d62e5e8
DJ
326 struct pending_signals *prev;
327};
611cb4a5 328
bd99dc85
PA
329/* The read/write ends of the pipe registered as waitable file in the
330 event loop. */
331static int linux_event_pipe[2] = { -1, -1 };
332
333/* True if we're currently in async mode. */
334#define target_is_async_p() (linux_event_pipe[0] != -1)
335
02fc4de7 336static void send_sigstop (struct lwp_info *lwp);
fa96cb38 337static void wait_for_sigstop (void);
bd99dc85 338
d0722149
DE
339/* Return non-zero if HEADER is a 64-bit ELF file. */
340
341static int
214d508e 342elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 343{
214d508e
L
344 if (header->e_ident[EI_MAG0] == ELFMAG0
345 && header->e_ident[EI_MAG1] == ELFMAG1
346 && header->e_ident[EI_MAG2] == ELFMAG2
347 && header->e_ident[EI_MAG3] == ELFMAG3)
348 {
349 *machine = header->e_machine;
350 return header->e_ident[EI_CLASS] == ELFCLASS64;
351
352 }
353 *machine = EM_NONE;
354 return -1;
d0722149
DE
355}
356
357/* Return non-zero if FILE is a 64-bit ELF file,
358 zero if the file is not a 64-bit ELF file,
359 and -1 if the file is not accessible or doesn't exist. */
360
be07f1a2 361static int
214d508e 362elf_64_file_p (const char *file, unsigned int *machine)
d0722149 363{
957f3f49 364 Elf64_Ehdr header;
d0722149
DE
365 int fd;
366
367 fd = open (file, O_RDONLY);
368 if (fd < 0)
369 return -1;
370
371 if (read (fd, &header, sizeof (header)) != sizeof (header))
372 {
373 close (fd);
374 return 0;
375 }
376 close (fd);
377
214d508e 378 return elf_64_header_p (&header, machine);
d0722149
DE
379}
380
be07f1a2
PA
381/* Accepts an integer PID; Returns true if the executable PID is
382 running is a 64-bit ELF file.. */
383
384int
214d508e 385linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 386{
d8d2a3ee 387 char file[PATH_MAX];
be07f1a2
PA
388
389 sprintf (file, "/proc/%d/exe", pid);
214d508e 390 return elf_64_file_p (file, machine);
be07f1a2
PA
391}
392
bd99dc85
PA
393static void
394delete_lwp (struct lwp_info *lwp)
395{
fa96cb38
PA
396 struct thread_info *thr = get_lwp_thread (lwp);
397
398 if (debug_threads)
399 debug_printf ("deleting %ld\n", lwpid_of (thr));
400
401 remove_thread (thr);
aa5ca48f 402 free (lwp->arch_private);
bd99dc85
PA
403 free (lwp);
404}
405
95954743
PA
406/* Add a process to the common process list, and set its private
407 data. */
408
409static struct process_info *
410linux_add_process (int pid, int attached)
411{
412 struct process_info *proc;
413
95954743 414 proc = add_process (pid, attached);
8d749320 415 proc->priv = XCNEW (struct process_info_private);
95954743 416
aa5ca48f 417 if (the_low_target.new_process != NULL)
fe978cb0 418 proc->priv->arch_private = the_low_target.new_process ();
aa5ca48f 419
95954743
PA
420 return proc;
421}
422
582511be
PA
423static CORE_ADDR get_pc (struct lwp_info *lwp);
424
ece66d65 425/* Call the target arch_setup function on the current thread. */
94585166
DB
426
427static void
428linux_arch_setup (void)
429{
430 the_low_target.arch_setup ();
431}
432
433/* Call the target arch_setup function on THREAD. */
434
435static void
436linux_arch_setup_thread (struct thread_info *thread)
437{
438 struct thread_info *saved_thread;
439
440 saved_thread = current_thread;
441 current_thread = thread;
442
443 linux_arch_setup ();
444
445 current_thread = saved_thread;
446}
447
448/* Handle a GNU/Linux extended wait response. If we see a clone,
449 fork, or vfork event, we need to add the new LWP to our list
450 (and return 0 so as not to report the trap to higher layers).
451 If we see an exec event, we will modify ORIG_EVENT_LWP to point
452 to a new LWP representing the new program. */
0d62e5e8 453
de0d863e 454static int
94585166 455handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
24a09b5f 456{
94585166 457 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 458 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 459 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 460 struct lwp_info *new_lwp;
24a09b5f 461
65706a29
PA
462 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
463
c269dbdb
DB
464 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
465 || (event == PTRACE_EVENT_CLONE))
24a09b5f 466 {
95954743 467 ptid_t ptid;
24a09b5f 468 unsigned long new_pid;
05044653 469 int ret, status;
24a09b5f 470
de0d863e 471 /* Get the pid of the new lwp. */
d86d4aaf 472 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 473 &new_pid);
24a09b5f
DJ
474
475 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 476 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
477 {
478 /* The new child has a pending SIGSTOP. We can't affect it until it
479 hits the SIGSTOP, but we're already attached. */
480
97438e3f 481 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
482
483 if (ret == -1)
484 perror_with_name ("waiting for new child");
485 else if (ret != new_pid)
486 warning ("wait returned unexpected PID %d", ret);
da5898ce 487 else if (!WIFSTOPPED (status))
24a09b5f
DJ
488 warning ("wait returned unexpected status 0x%x", status);
489 }
490
c269dbdb 491 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
492 {
493 struct process_info *parent_proc;
494 struct process_info *child_proc;
495 struct lwp_info *child_lwp;
bfacd19d 496 struct thread_info *child_thr;
de0d863e
DB
497 struct target_desc *tdesc;
498
499 ptid = ptid_build (new_pid, new_pid, 0);
500
501 if (debug_threads)
502 {
503 debug_printf ("HEW: Got fork event from LWP %ld, "
504 "new child is %d\n",
505 ptid_get_lwp (ptid_of (event_thr)),
506 ptid_get_pid (ptid));
507 }
508
509 /* Add the new process to the tables and clone the breakpoint
510 lists of the parent. We need to do this even if the new process
511 will be detached, since we will need the process object and the
512 breakpoints to remove any breakpoints from memory when we
513 detach, and the client side will access registers. */
514 child_proc = linux_add_process (new_pid, 0);
515 gdb_assert (child_proc != NULL);
516 child_lwp = add_lwp (ptid);
517 gdb_assert (child_lwp != NULL);
518 child_lwp->stopped = 1;
bfacd19d
DB
519 child_lwp->must_set_ptrace_flags = 1;
520 child_lwp->status_pending_p = 0;
521 child_thr = get_lwp_thread (child_lwp);
522 child_thr->last_resume_kind = resume_stop;
998d452a
PA
523 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
524
863d01bd
PA
525 /* If we're suspending all threads, leave this one suspended
526 too. */
527 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
528 {
529 if (debug_threads)
530 debug_printf ("HEW: leaving child suspended\n");
531 child_lwp->suspended = 1;
532 }
533
de0d863e
DB
534 parent_proc = get_thread_process (event_thr);
535 child_proc->attached = parent_proc->attached;
536 clone_all_breakpoints (&child_proc->breakpoints,
537 &child_proc->raw_breakpoints,
538 parent_proc->breakpoints);
539
8d749320 540 tdesc = XNEW (struct target_desc);
de0d863e
DB
541 copy_target_description (tdesc, parent_proc->tdesc);
542 child_proc->tdesc = tdesc;
de0d863e 543
3a8a0396
DB
544 /* Clone arch-specific process data. */
545 if (the_low_target.new_fork != NULL)
546 the_low_target.new_fork (parent_proc, child_proc);
547
de0d863e 548 /* Save fork info in the parent thread. */
c269dbdb
DB
549 if (event == PTRACE_EVENT_FORK)
550 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
551 else if (event == PTRACE_EVENT_VFORK)
552 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
553
de0d863e 554 event_lwp->waitstatus.value.related_pid = ptid;
c269dbdb 555
de0d863e
DB
556 /* The status_pending field contains bits denoting the
557 extended event, so when the pending event is handled,
558 the handler will look at lwp->waitstatus. */
559 event_lwp->status_pending_p = 1;
560 event_lwp->status_pending = wstat;
561
562 /* Report the event. */
563 return 0;
564 }
565
fa96cb38
PA
566 if (debug_threads)
567 debug_printf ("HEW: Got clone event "
568 "from LWP %ld, new child is LWP %ld\n",
569 lwpid_of (event_thr), new_pid);
570
d86d4aaf 571 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
b3312d80 572 new_lwp = add_lwp (ptid);
24a09b5f 573
e27d73f6
DE
574 /* Either we're going to immediately resume the new thread
575 or leave it stopped. linux_resume_one_lwp is a nop if it
576 thinks the thread is currently running, so set this first
577 before calling linux_resume_one_lwp. */
578 new_lwp->stopped = 1;
579
bde24c0a
PA
580 /* If we're suspending all threads, leave this one suspended
581 too. */
582 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
583 new_lwp->suspended = 1;
584
da5898ce
DJ
585 /* Normally we will get the pending SIGSTOP. But in some cases
586 we might get another signal delivered to the group first.
f21cc1a2 587 If we do get another signal, be sure not to lose it. */
20ba1ce6 588 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 589 {
54a0b537 590 new_lwp->stop_expected = 1;
20ba1ce6
PA
591 new_lwp->status_pending_p = 1;
592 new_lwp->status_pending = status;
da5898ce 593 }
65706a29
PA
594 else if (report_thread_events)
595 {
596 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
597 new_lwp->status_pending_p = 1;
598 new_lwp->status_pending = status;
599 }
de0d863e
DB
600
601 /* Don't report the event. */
602 return 1;
24a09b5f 603 }
c269dbdb
DB
604 else if (event == PTRACE_EVENT_VFORK_DONE)
605 {
606 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
607
608 /* Report the event. */
609 return 0;
610 }
94585166
DB
611 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
612 {
613 struct process_info *proc;
614 ptid_t event_ptid;
615 pid_t event_pid;
616
617 if (debug_threads)
618 {
619 debug_printf ("HEW: Got exec event from LWP %ld\n",
620 lwpid_of (event_thr));
621 }
622
623 /* Get the event ptid. */
624 event_ptid = ptid_of (event_thr);
625 event_pid = ptid_get_pid (event_ptid);
626
627 /* Delete the execing process and all its threads. */
628 proc = get_thread_process (event_thr);
629 linux_mourn (proc);
630 current_thread = NULL;
631
632 /* Create a new process/lwp/thread. */
633 proc = linux_add_process (event_pid, 0);
634 event_lwp = add_lwp (event_ptid);
635 event_thr = get_lwp_thread (event_lwp);
636 gdb_assert (current_thread == event_thr);
637 linux_arch_setup_thread (event_thr);
638
639 /* Set the event status. */
640 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
641 event_lwp->waitstatus.value.execd_pathname
642 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
643
644 /* Mark the exec status as pending. */
645 event_lwp->stopped = 1;
646 event_lwp->status_pending_p = 1;
647 event_lwp->status_pending = wstat;
648 event_thr->last_resume_kind = resume_continue;
649 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
650
651 /* Report the event. */
652 *orig_event_lwp = event_lwp;
653 return 0;
654 }
de0d863e
DB
655
656 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
657}
658
d50171e4
PA
659/* Return the PC as read from the regcache of LWP, without any
660 adjustment. */
661
662static CORE_ADDR
663get_pc (struct lwp_info *lwp)
664{
0bfdf32f 665 struct thread_info *saved_thread;
d50171e4
PA
666 struct regcache *regcache;
667 CORE_ADDR pc;
668
669 if (the_low_target.get_pc == NULL)
670 return 0;
671
0bfdf32f
GB
672 saved_thread = current_thread;
673 current_thread = get_lwp_thread (lwp);
d50171e4 674
0bfdf32f 675 regcache = get_thread_regcache (current_thread, 1);
d50171e4
PA
676 pc = (*the_low_target.get_pc) (regcache);
677
678 if (debug_threads)
87ce2a04 679 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 680
0bfdf32f 681 current_thread = saved_thread;
d50171e4
PA
682 return pc;
683}
684
685/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
686 The SIGTRAP could mean several things.
687
688 On i386, where decr_pc_after_break is non-zero:
582511be
PA
689
690 If we were single-stepping this process using PTRACE_SINGLESTEP, we
691 will get only the one SIGTRAP. The value of $eip will be the next
692 instruction. If the instruction we stepped over was a breakpoint,
693 we need to decrement the PC.
694
0d62e5e8
DJ
695 If we continue the process using PTRACE_CONT, we will get a
696 SIGTRAP when we hit a breakpoint. The value of $eip will be
697 the instruction after the breakpoint (i.e. needs to be
698 decremented). If we report the SIGTRAP to GDB, we must also
582511be 699 report the undecremented PC. If the breakpoint is removed, we
0d62e5e8
DJ
700 must resume at the decremented PC.
701
582511be
PA
702 On a non-decr_pc_after_break machine with hardware or kernel
703 single-step:
704
705 If we either single-step a breakpoint instruction, or continue and
706 hit a breakpoint instruction, our PC will point at the breakpoint
0d62e5e8
DJ
707 instruction. */
708
582511be
PA
709static int
710check_stopped_by_breakpoint (struct lwp_info *lwp)
0d62e5e8 711{
582511be
PA
712 CORE_ADDR pc;
713 CORE_ADDR sw_breakpoint_pc;
714 struct thread_info *saved_thread;
3e572f71
PA
715#if USE_SIGTRAP_SIGINFO
716 siginfo_t siginfo;
717#endif
d50171e4
PA
718
719 if (the_low_target.get_pc == NULL)
720 return 0;
0d62e5e8 721
582511be
PA
722 pc = get_pc (lwp);
723 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
d50171e4 724
582511be
PA
725 /* breakpoint_at reads from the current thread. */
726 saved_thread = current_thread;
727 current_thread = get_lwp_thread (lwp);
47c0c975 728
3e572f71
PA
729#if USE_SIGTRAP_SIGINFO
730 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
731 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
732 {
733 if (siginfo.si_signo == SIGTRAP)
734 {
1db33b5a 735 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
3e572f71
PA
736 {
737 if (debug_threads)
738 {
739 struct thread_info *thr = get_lwp_thread (lwp);
740
2bf6fb9d 741 debug_printf ("CSBB: %s stopped by software breakpoint\n",
3e572f71
PA
742 target_pid_to_str (ptid_of (thr)));
743 }
744
745 /* Back up the PC if necessary. */
746 if (pc != sw_breakpoint_pc)
747 {
748 struct regcache *regcache
749 = get_thread_regcache (current_thread, 1);
750 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
751 }
752
753 lwp->stop_pc = sw_breakpoint_pc;
754 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
755 current_thread = saved_thread;
756 return 1;
757 }
758 else if (siginfo.si_code == TRAP_HWBKPT)
759 {
760 if (debug_threads)
761 {
762 struct thread_info *thr = get_lwp_thread (lwp);
763
2bf6fb9d
PA
764 debug_printf ("CSBB: %s stopped by hardware "
765 "breakpoint/watchpoint\n",
3e572f71
PA
766 target_pid_to_str (ptid_of (thr)));
767 }
768
769 lwp->stop_pc = pc;
770 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
771 current_thread = saved_thread;
772 return 1;
773 }
2bf6fb9d
PA
774 else if (siginfo.si_code == TRAP_TRACE)
775 {
776 if (debug_threads)
777 {
778 struct thread_info *thr = get_lwp_thread (lwp);
779
780 debug_printf ("CSBB: %s stopped by trace\n",
781 target_pid_to_str (ptid_of (thr)));
782 }
863d01bd
PA
783
784 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 785 }
3e572f71
PA
786 }
787 }
788#else
582511be
PA
789 /* We may have just stepped a breakpoint instruction. E.g., in
790 non-stop mode, GDB first tells the thread A to step a range, and
791 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
792 case we need to report the breakpoint PC. */
793 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
582511be
PA
794 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
795 {
796 if (debug_threads)
797 {
798 struct thread_info *thr = get_lwp_thread (lwp);
799
800 debug_printf ("CSBB: %s stopped by software breakpoint\n",
801 target_pid_to_str (ptid_of (thr)));
802 }
803
804 /* Back up the PC if necessary. */
805 if (pc != sw_breakpoint_pc)
806 {
807 struct regcache *regcache
808 = get_thread_regcache (current_thread, 1);
809 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
810 }
811
812 lwp->stop_pc = sw_breakpoint_pc;
15c66dd6 813 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
582511be
PA
814 current_thread = saved_thread;
815 return 1;
816 }
817
818 if (hardware_breakpoint_inserted_here (pc))
819 {
820 if (debug_threads)
821 {
822 struct thread_info *thr = get_lwp_thread (lwp);
823
824 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
825 target_pid_to_str (ptid_of (thr)));
826 }
47c0c975 827
582511be 828 lwp->stop_pc = pc;
15c66dd6 829 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
582511be
PA
830 current_thread = saved_thread;
831 return 1;
832 }
3e572f71 833#endif
582511be
PA
834
835 current_thread = saved_thread;
836 return 0;
0d62e5e8 837}
ce3a066d 838
b3312d80 839static struct lwp_info *
95954743 840add_lwp (ptid_t ptid)
611cb4a5 841{
54a0b537 842 struct lwp_info *lwp;
0d62e5e8 843
8d749320 844 lwp = XCNEW (struct lwp_info);
00db26fa
PA
845
846 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
0d62e5e8 847
aa5ca48f 848 if (the_low_target.new_thread != NULL)
34c703da 849 the_low_target.new_thread (lwp);
aa5ca48f 850
f7667f0d 851 lwp->thread = add_thread (ptid, lwp);
0d62e5e8 852
54a0b537 853 return lwp;
0d62e5e8 854}
611cb4a5 855
da6d8c04
DJ
856/* Start an inferior process and returns its pid.
857 ALLARGS is a vector of program-name and args. */
858
ce3a066d
DJ
859static int
860linux_create_inferior (char *program, char **allargs)
da6d8c04 861{
a6dbe5df 862 struct lwp_info *new_lwp;
da6d8c04 863 int pid;
95954743 864 ptid_t ptid;
8cc73a39
SDJ
865 struct cleanup *restore_personality
866 = maybe_disable_address_space_randomization (disable_randomization);
03583c20 867
42c81e2a 868#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
869 pid = vfork ();
870#else
da6d8c04 871 pid = fork ();
52fb6437 872#endif
da6d8c04
DJ
873 if (pid < 0)
874 perror_with_name ("fork");
875
876 if (pid == 0)
877 {
602e3198 878 close_most_fds ();
b8e1b30e 879 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da6d8c04 880
1a981360 881#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 882 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 883#endif
0d62e5e8 884
a9fa9f7d
DJ
885 setpgid (0, 0);
886
e0f9f062
DE
887 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
888 stdout to stderr so that inferior i/o doesn't corrupt the connection.
889 Also, redirect stdin to /dev/null. */
890 if (remote_connection_is_stdio ())
891 {
892 close (0);
893 open ("/dev/null", O_RDONLY);
894 dup2 (2, 1);
3e52c33d
JK
895 if (write (2, "stdin/stdout redirected\n",
896 sizeof ("stdin/stdout redirected\n") - 1) < 0)
8c29b58e
YQ
897 {
898 /* Errors ignored. */;
899 }
e0f9f062
DE
900 }
901
2b876972
DJ
902 execv (program, allargs);
903 if (errno == ENOENT)
904 execvp (program, allargs);
da6d8c04
DJ
905
906 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 907 strerror (errno));
da6d8c04
DJ
908 fflush (stderr);
909 _exit (0177);
910 }
911
8cc73a39 912 do_cleanups (restore_personality);
03583c20 913
55d7b841 914 linux_add_process (pid, 0);
95954743
PA
915
916 ptid = ptid_build (pid, pid, 0);
917 new_lwp = add_lwp (ptid);
a6dbe5df 918 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 919
a9fa9f7d 920 return pid;
da6d8c04
DJ
921}
922
ece66d65
JS
923/* Implement the post_create_inferior target_ops method. */
924
925static void
926linux_post_create_inferior (void)
927{
928 struct lwp_info *lwp = get_thread_lwp (current_thread);
929
930 linux_arch_setup ();
931
932 if (lwp->must_set_ptrace_flags)
933 {
934 struct process_info *proc = current_process ();
935 int options = linux_low_ptrace_options (proc->attached);
936
937 linux_enable_event_reporting (lwpid_of (current_thread), options);
938 lwp->must_set_ptrace_flags = 0;
939 }
940}
941
8784d563
PA
942/* Attach to an inferior process. Returns 0 on success, ERRNO on
943 error. */
da6d8c04 944
7ae1a6a6
PA
945int
946linux_attach_lwp (ptid_t ptid)
da6d8c04 947{
54a0b537 948 struct lwp_info *new_lwp;
7ae1a6a6 949 int lwpid = ptid_get_lwp (ptid);
611cb4a5 950
b8e1b30e 951 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 952 != 0)
7ae1a6a6 953 return errno;
24a09b5f 954
b3312d80 955 new_lwp = add_lwp (ptid);
0d62e5e8 956
a6dbe5df
PA
957 /* We need to wait for SIGSTOP before being able to make the next
958 ptrace call on this LWP. */
959 new_lwp->must_set_ptrace_flags = 1;
960
644cebc9 961 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
962 {
963 if (debug_threads)
87ce2a04 964 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
965
966 /* The process is definitely stopped. It is in a job control
967 stop, unless the kernel predates the TASK_STOPPED /
968 TASK_TRACED distinction, in which case it might be in a
969 ptrace stop. Make sure it is in a ptrace stop; from there we
970 can kill it, signal it, et cetera.
971
972 First make sure there is a pending SIGSTOP. Since we are
973 already attached, the process can not transition from stopped
974 to running without a PTRACE_CONT; so we know this signal will
975 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
976 probably already in the queue (unless this kernel is old
977 enough to use TASK_STOPPED for ptrace stops); but since
978 SIGSTOP is not an RT signal, it can only be queued once. */
979 kill_lwp (lwpid, SIGSTOP);
980
981 /* Finally, resume the stopped process. This will deliver the
982 SIGSTOP (or a higher priority signal, just like normal
983 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 984 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
985 }
986
0d62e5e8 987 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
988 brings it to a halt.
989
990 There are several cases to consider here:
991
992 1) gdbserver has already attached to the process and is being notified
1b3f6016 993 of a new thread that is being created.
d50171e4
PA
994 In this case we should ignore that SIGSTOP and resume the
995 process. This is handled below by setting stop_expected = 1,
8336d594 996 and the fact that add_thread sets last_resume_kind ==
d50171e4 997 resume_continue.
0e21c1ec
DE
998
999 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1000 to it via attach_inferior.
1001 In this case we want the process thread to stop.
d50171e4
PA
1002 This is handled by having linux_attach set last_resume_kind ==
1003 resume_stop after we return.
e3deef73
LM
1004
1005 If the pid we are attaching to is also the tgid, we attach to and
1006 stop all the existing threads. Otherwise, we attach to pid and
1007 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1008
1009 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1010 existing threads.
1011 In this case we want the thread to stop.
1012 FIXME: This case is currently not properly handled.
1013 We should wait for the SIGSTOP but don't. Things work apparently
1014 because enough time passes between when we ptrace (ATTACH) and when
1015 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1016
1017 On the other hand, if we are currently trying to stop all threads, we
1018 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1019 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1020 end of the list, and so the new thread has not yet reached
1021 wait_for_sigstop (but will). */
d50171e4 1022 new_lwp->stop_expected = 1;
0d62e5e8 1023
7ae1a6a6 1024 return 0;
95954743
PA
1025}
1026
8784d563
PA
1027/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1028 already attached. Returns true if a new LWP is found, false
1029 otherwise. */
1030
1031static int
1032attach_proc_task_lwp_callback (ptid_t ptid)
1033{
1034 /* Is this a new thread? */
1035 if (find_thread_ptid (ptid) == NULL)
1036 {
1037 int lwpid = ptid_get_lwp (ptid);
1038 int err;
1039
1040 if (debug_threads)
1041 debug_printf ("Found new lwp %d\n", lwpid);
1042
1043 err = linux_attach_lwp (ptid);
1044
1045 /* Be quiet if we simply raced with the thread exiting. EPERM
1046 is returned if the thread's task still exists, and is marked
1047 as exited or zombie, as well as other conditions, so in that
1048 case, confirm the status in /proc/PID/status. */
1049 if (err == ESRCH
1050 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1051 {
1052 if (debug_threads)
1053 {
1054 debug_printf ("Cannot attach to lwp %d: "
1055 "thread is gone (%d: %s)\n",
1056 lwpid, err, strerror (err));
1057 }
1058 }
1059 else if (err != 0)
1060 {
1061 warning (_("Cannot attach to lwp %d: %s"),
1062 lwpid,
1063 linux_ptrace_attach_fail_reason_string (ptid, err));
1064 }
1065
1066 return 1;
1067 }
1068 return 0;
1069}
1070
500c1d85
PA
1071static void async_file_mark (void);
1072
e3deef73
LM
1073/* Attach to PID. If PID is the tgid, attach to it and all
1074 of its threads. */
1075
c52daf70 1076static int
a1928bad 1077linux_attach (unsigned long pid)
0d62e5e8 1078{
500c1d85
PA
1079 struct process_info *proc;
1080 struct thread_info *initial_thread;
7ae1a6a6
PA
1081 ptid_t ptid = ptid_build (pid, pid, 0);
1082 int err;
1083
e3deef73
LM
1084 /* Attach to PID. We will check for other threads
1085 soon. */
7ae1a6a6
PA
1086 err = linux_attach_lwp (ptid);
1087 if (err != 0)
1088 error ("Cannot attach to process %ld: %s",
8784d563 1089 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
7ae1a6a6 1090
500c1d85 1091 proc = linux_add_process (pid, 1);
0d62e5e8 1092
500c1d85
PA
1093 /* Don't ignore the initial SIGSTOP if we just attached to this
1094 process. It will be collected by wait shortly. */
1095 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1096 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1097
8784d563
PA
1098 /* We must attach to every LWP. If /proc is mounted, use that to
1099 find them now. On the one hand, the inferior may be using raw
1100 clone instead of using pthreads. On the other hand, even if it
1101 is using pthreads, GDB may not be connected yet (thread_db needs
1102 to do symbol lookups, through qSymbol). Also, thread_db walks
1103 structures in the inferior's address space to find the list of
1104 threads/LWPs, and those structures may well be corrupted. Note
1105 that once thread_db is loaded, we'll still use it to list threads
1106 and associate pthread info with each LWP. */
1107 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
500c1d85
PA
1108
1109 /* GDB will shortly read the xml target description for this
1110 process, to figure out the process' architecture. But the target
1111 description is only filled in when the first process/thread in
1112 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1113 that now, otherwise, if GDB is fast enough, it could read the
1114 target description _before_ that initial stop. */
1115 if (non_stop)
1116 {
1117 struct lwp_info *lwp;
1118 int wstat, lwpid;
1119 ptid_t pid_ptid = pid_to_ptid (pid);
1120
1121 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1122 &wstat, __WALL);
1123 gdb_assert (lwpid > 0);
1124
1125 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1126
1127 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1128 {
1129 lwp->status_pending_p = 1;
1130 lwp->status_pending = wstat;
1131 }
1132
1133 initial_thread->last_resume_kind = resume_continue;
1134
1135 async_file_mark ();
1136
1137 gdb_assert (proc->tdesc != NULL);
1138 }
1139
95954743
PA
1140 return 0;
1141}
1142
1143struct counter
1144{
1145 int pid;
1146 int count;
1147};
1148
1149static int
1150second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1151{
9a3c8263 1152 struct counter *counter = (struct counter *) args;
95954743
PA
1153
1154 if (ptid_get_pid (entry->id) == counter->pid)
1155 {
1156 if (++counter->count > 1)
1157 return 1;
1158 }
d61ddec4 1159
da6d8c04
DJ
1160 return 0;
1161}
1162
95954743 1163static int
fa96cb38 1164last_thread_of_process_p (int pid)
95954743 1165{
95954743 1166 struct counter counter = { pid , 0 };
da6d8c04 1167
95954743
PA
1168 return (find_inferior (&all_threads,
1169 second_thread_of_pid_p, &counter) == NULL);
1170}
1171
da84f473
PA
1172/* Kill LWP. */
1173
1174static void
1175linux_kill_one_lwp (struct lwp_info *lwp)
1176{
d86d4aaf
DE
1177 struct thread_info *thr = get_lwp_thread (lwp);
1178 int pid = lwpid_of (thr);
da84f473
PA
1179
1180 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1181 there is no signal context, and ptrace(PTRACE_KILL) (or
1182 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1183 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1184 alternative is to kill with SIGKILL. We only need one SIGKILL
1185 per process, not one for each thread. But since we still support
1186 linuxthreads, and we also support debugging programs using raw
1187 clone without CLONE_THREAD, we send one for each thread. For
1188 years, we used PTRACE_KILL only, so we're being a bit paranoid
1189 about some old kernels where PTRACE_KILL might work better
1190 (dubious if there are any such, but that's why it's paranoia), so
1191 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1192 everywhere. */
1193
1194 errno = 0;
69ff6be5 1195 kill_lwp (pid, SIGKILL);
da84f473 1196 if (debug_threads)
ce9e3fe7
PA
1197 {
1198 int save_errno = errno;
1199
1200 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1201 target_pid_to_str (ptid_of (thr)),
1202 save_errno ? strerror (save_errno) : "OK");
1203 }
da84f473
PA
1204
1205 errno = 0;
b8e1b30e 1206 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1207 if (debug_threads)
ce9e3fe7
PA
1208 {
1209 int save_errno = errno;
1210
1211 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1212 target_pid_to_str (ptid_of (thr)),
1213 save_errno ? strerror (save_errno) : "OK");
1214 }
da84f473
PA
1215}
1216
e76126e8
PA
1217/* Kill LWP and wait for it to die. */
1218
1219static void
1220kill_wait_lwp (struct lwp_info *lwp)
1221{
1222 struct thread_info *thr = get_lwp_thread (lwp);
1223 int pid = ptid_get_pid (ptid_of (thr));
1224 int lwpid = ptid_get_lwp (ptid_of (thr));
1225 int wstat;
1226 int res;
1227
1228 if (debug_threads)
1229 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1230
1231 do
1232 {
1233 linux_kill_one_lwp (lwp);
1234
1235 /* Make sure it died. Notes:
1236
1237 - The loop is most likely unnecessary.
1238
1239 - We don't use linux_wait_for_event as that could delete lwps
1240 while we're iterating over them. We're not interested in
1241 any pending status at this point, only in making sure all
1242 wait status on the kernel side are collected until the
1243 process is reaped.
1244
1245 - We don't use __WALL here as the __WALL emulation relies on
1246 SIGCHLD, and killing a stopped process doesn't generate
1247 one, nor an exit status.
1248 */
1249 res = my_waitpid (lwpid, &wstat, 0);
1250 if (res == -1 && errno == ECHILD)
1251 res = my_waitpid (lwpid, &wstat, __WCLONE);
1252 } while (res > 0 && WIFSTOPPED (wstat));
1253
586b02a9
PA
1254 /* Even if it was stopped, the child may have already disappeared.
1255 E.g., if it was killed by SIGKILL. */
1256 if (res < 0 && errno != ECHILD)
1257 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1258}
1259
da84f473
PA
1260/* Callback for `find_inferior'. Kills an lwp of a given process,
1261 except the leader. */
95954743
PA
1262
1263static int
da84f473 1264kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
da6d8c04 1265{
0d62e5e8 1266 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1267 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
1268 int pid = * (int *) args;
1269
1270 if (ptid_get_pid (entry->id) != pid)
1271 return 0;
0d62e5e8 1272
fd500816
DJ
1273 /* We avoid killing the first thread here, because of a Linux kernel (at
1274 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1275 the children get a chance to be reaped, it will remain a zombie
1276 forever. */
95954743 1277
d86d4aaf 1278 if (lwpid_of (thread) == pid)
95954743
PA
1279 {
1280 if (debug_threads)
87ce2a04
DE
1281 debug_printf ("lkop: is last of process %s\n",
1282 target_pid_to_str (entry->id));
95954743
PA
1283 return 0;
1284 }
fd500816 1285
e76126e8 1286 kill_wait_lwp (lwp);
95954743 1287 return 0;
da6d8c04
DJ
1288}
1289
95954743
PA
1290static int
1291linux_kill (int pid)
0d62e5e8 1292{
95954743 1293 struct process_info *process;
54a0b537 1294 struct lwp_info *lwp;
fd500816 1295
95954743
PA
1296 process = find_process_pid (pid);
1297 if (process == NULL)
1298 return -1;
9d606399 1299
f9e39928
PA
1300 /* If we're killing a running inferior, make sure it is stopped
1301 first, as PTRACE_KILL will not work otherwise. */
7984d532 1302 stop_all_lwps (0, NULL);
f9e39928 1303
da84f473 1304 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
fd500816 1305
54a0b537 1306 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1307 thread in the list, so do so now. */
95954743 1308 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 1309
784867a5 1310 if (lwp == NULL)
fd500816 1311 {
784867a5 1312 if (debug_threads)
d86d4aaf
DE
1313 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1314 pid);
784867a5
JK
1315 }
1316 else
e76126e8 1317 kill_wait_lwp (lwp);
2d717e4f 1318
8336d594 1319 the_target->mourn (process);
f9e39928
PA
1320
1321 /* Since we presently can only stop all lwps of all processes, we
1322 need to unstop lwps of other processes. */
7984d532 1323 unstop_all_lwps (0, NULL);
95954743 1324 return 0;
0d62e5e8
DJ
1325}
1326
9b224c5e
PA
1327/* Get pending signal of THREAD, for detaching purposes. This is the
1328 signal the thread last stopped for, which we need to deliver to the
1329 thread when detaching, otherwise, it'd be suppressed/lost. */
1330
1331static int
1332get_detach_signal (struct thread_info *thread)
1333{
a493e3e2 1334 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1335 int status;
1336 struct lwp_info *lp = get_thread_lwp (thread);
1337
1338 if (lp->status_pending_p)
1339 status = lp->status_pending;
1340 else
1341 {
1342 /* If the thread had been suspended by gdbserver, and it stopped
1343 cleanly, then it'll have stopped with SIGSTOP. But we don't
1344 want to deliver that SIGSTOP. */
1345 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1346 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1347 return 0;
1348
1349 /* Otherwise, we may need to deliver the signal we
1350 intercepted. */
1351 status = lp->last_status;
1352 }
1353
1354 if (!WIFSTOPPED (status))
1355 {
1356 if (debug_threads)
87ce2a04 1357 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1358 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1359 return 0;
1360 }
1361
1362 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1363 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1364 {
1365 if (debug_threads)
87ce2a04
DE
1366 debug_printf ("GPS: lwp %s had stopped with extended "
1367 "status: no pending signal\n",
d86d4aaf 1368 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1369 return 0;
1370 }
1371
2ea28649 1372 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e
PA
1373
1374 if (program_signals_p && !program_signals[signo])
1375 {
1376 if (debug_threads)
87ce2a04 1377 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1378 target_pid_to_str (ptid_of (thread)),
87ce2a04 1379 gdb_signal_to_string (signo));
9b224c5e
PA
1380 return 0;
1381 }
1382 else if (!program_signals_p
1383 /* If we have no way to know which signals GDB does not
1384 want to have passed to the program, assume
1385 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1386 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1387 {
1388 if (debug_threads)
87ce2a04
DE
1389 debug_printf ("GPS: lwp %s had signal %s, "
1390 "but we don't know if we should pass it. "
1391 "Default to not.\n",
d86d4aaf 1392 target_pid_to_str (ptid_of (thread)),
87ce2a04 1393 gdb_signal_to_string (signo));
9b224c5e
PA
1394 return 0;
1395 }
1396 else
1397 {
1398 if (debug_threads)
87ce2a04 1399 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1400 target_pid_to_str (ptid_of (thread)),
87ce2a04 1401 gdb_signal_to_string (signo));
9b224c5e
PA
1402
1403 return WSTOPSIG (status);
1404 }
1405}
1406
95954743
PA
1407static int
1408linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
1409{
1410 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1411 struct lwp_info *lwp = get_thread_lwp (thread);
95954743 1412 int pid = * (int *) args;
9b224c5e 1413 int sig;
95954743
PA
1414
1415 if (ptid_get_pid (entry->id) != pid)
1416 return 0;
6ad8ae5c 1417
9b224c5e 1418 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1419 if (lwp->stop_expected)
ae13219e 1420 {
9b224c5e 1421 if (debug_threads)
87ce2a04 1422 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1423 target_pid_to_str (ptid_of (thread)));
9b224c5e 1424
d86d4aaf 1425 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1426 lwp->stop_expected = 0;
ae13219e
DJ
1427 }
1428
1429 /* Flush any pending changes to the process's registers. */
d86d4aaf 1430 regcache_invalidate_thread (thread);
ae13219e 1431
9b224c5e
PA
1432 /* Pass on any pending signal for this thread. */
1433 sig = get_detach_signal (thread);
1434
ae13219e 1435 /* Finally, let it resume. */
82bfbe7e
PA
1436 if (the_low_target.prepare_to_resume != NULL)
1437 the_low_target.prepare_to_resume (lwp);
d86d4aaf 1438 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1439 (PTRACE_TYPE_ARG4) (long) sig) < 0)
9b224c5e 1440 error (_("Can't detach %s: %s"),
d86d4aaf 1441 target_pid_to_str (ptid_of (thread)),
9b224c5e 1442 strerror (errno));
bd99dc85
PA
1443
1444 delete_lwp (lwp);
95954743 1445 return 0;
6ad8ae5c
DJ
1446}
1447
95954743
PA
1448static int
1449linux_detach (int pid)
1450{
1451 struct process_info *process;
1452
1453 process = find_process_pid (pid);
1454 if (process == NULL)
1455 return -1;
1456
863d01bd
PA
1457 /* As there's a step over already in progress, let it finish first,
1458 otherwise nesting a stabilize_threads operation on top gets real
1459 messy. */
1460 complete_ongoing_step_over ();
1461
f9e39928
PA
1462 /* Stop all threads before detaching. First, ptrace requires that
1463 the thread is stopped to sucessfully detach. Second, thread_db
1464 may need to uninstall thread event breakpoints from memory, which
1465 only works with a stopped process anyway. */
7984d532 1466 stop_all_lwps (0, NULL);
f9e39928 1467
ca5c370d 1468#ifdef USE_THREAD_DB
8336d594 1469 thread_db_detach (process);
ca5c370d
PA
1470#endif
1471
fa593d66
PA
1472 /* Stabilize threads (move out of jump pads). */
1473 stabilize_threads ();
1474
95954743 1475 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
1476
1477 the_target->mourn (process);
f9e39928
PA
1478
1479 /* Since we presently can only stop all lwps of all processes, we
1480 need to unstop lwps of other processes. */
7984d532 1481 unstop_all_lwps (0, NULL);
f9e39928
PA
1482 return 0;
1483}
1484
1485/* Remove all LWPs that belong to process PROC from the lwp list. */
1486
1487static int
1488delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1489{
d86d4aaf
DE
1490 struct thread_info *thread = (struct thread_info *) entry;
1491 struct lwp_info *lwp = get_thread_lwp (thread);
9a3c8263 1492 struct process_info *process = (struct process_info *) proc;
f9e39928 1493
d86d4aaf 1494 if (pid_of (thread) == pid_of (process))
f9e39928
PA
1495 delete_lwp (lwp);
1496
dd6953e1 1497 return 0;
6ad8ae5c
DJ
1498}
1499
8336d594
PA
1500static void
1501linux_mourn (struct process_info *process)
1502{
1503 struct process_info_private *priv;
1504
1505#ifdef USE_THREAD_DB
1506 thread_db_mourn (process);
1507#endif
1508
d86d4aaf 1509 find_inferior (&all_threads, delete_lwp_callback, process);
f9e39928 1510
8336d594 1511 /* Freeing all private data. */
fe978cb0 1512 priv = process->priv;
8336d594
PA
1513 free (priv->arch_private);
1514 free (priv);
fe978cb0 1515 process->priv = NULL;
505106cd
PA
1516
1517 remove_process (process);
8336d594
PA
1518}
1519
444d6139 1520static void
95954743 1521linux_join (int pid)
444d6139 1522{
444d6139
PA
1523 int status, ret;
1524
1525 do {
95954743 1526 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1527 if (WIFEXITED (status) || WIFSIGNALED (status))
1528 break;
1529 } while (ret != -1 || errno != ECHILD);
1530}
1531
6ad8ae5c 1532/* Return nonzero if the given thread is still alive. */
0d62e5e8 1533static int
95954743 1534linux_thread_alive (ptid_t ptid)
0d62e5e8 1535{
95954743
PA
1536 struct lwp_info *lwp = find_lwp_pid (ptid);
1537
1538 /* We assume we always know if a thread exits. If a whole process
1539 exited but we still haven't been able to report it to GDB, we'll
1540 hold on to the last lwp of the dead process. */
1541 if (lwp != NULL)
00db26fa 1542 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1543 else
1544 return 0;
1545}
1546
582511be
PA
1547/* Return 1 if this lwp still has an interesting status pending. If
1548 not (e.g., it had stopped for a breakpoint that is gone), return
1549 false. */
1550
1551static int
1552thread_still_has_status_pending_p (struct thread_info *thread)
1553{
1554 struct lwp_info *lp = get_thread_lwp (thread);
1555
1556 if (!lp->status_pending_p)
1557 return 0;
1558
582511be 1559 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1560 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1561 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1562 {
1563 struct thread_info *saved_thread;
1564 CORE_ADDR pc;
1565 int discard = 0;
1566
1567 gdb_assert (lp->last_status != 0);
1568
1569 pc = get_pc (lp);
1570
1571 saved_thread = current_thread;
1572 current_thread = thread;
1573
1574 if (pc != lp->stop_pc)
1575 {
1576 if (debug_threads)
1577 debug_printf ("PC of %ld changed\n",
1578 lwpid_of (thread));
1579 discard = 1;
1580 }
3e572f71
PA
1581
1582#if !USE_SIGTRAP_SIGINFO
15c66dd6 1583 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
582511be
PA
1584 && !(*the_low_target.breakpoint_at) (pc))
1585 {
1586 if (debug_threads)
1587 debug_printf ("previous SW breakpoint of %ld gone\n",
1588 lwpid_of (thread));
1589 discard = 1;
1590 }
15c66dd6 1591 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1592 && !hardware_breakpoint_inserted_here (pc))
1593 {
1594 if (debug_threads)
1595 debug_printf ("previous HW breakpoint of %ld gone\n",
1596 lwpid_of (thread));
1597 discard = 1;
1598 }
3e572f71 1599#endif
582511be
PA
1600
1601 current_thread = saved_thread;
1602
1603 if (discard)
1604 {
1605 if (debug_threads)
1606 debug_printf ("discarding pending breakpoint status\n");
1607 lp->status_pending_p = 0;
1608 return 0;
1609 }
1610 }
1611
1612 return 1;
1613}
1614
a681f9c9
PA
1615/* Returns true if LWP is resumed from the client's perspective. */
1616
1617static int
1618lwp_resumed (struct lwp_info *lwp)
1619{
1620 struct thread_info *thread = get_lwp_thread (lwp);
1621
1622 if (thread->last_resume_kind != resume_stop)
1623 return 1;
1624
1625 /* Did gdb send us a `vCont;t', but we haven't reported the
1626 corresponding stop to gdb yet? If so, the thread is still
1627 resumed/running from gdb's perspective. */
1628 if (thread->last_resume_kind == resume_stop
1629 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1630 return 1;
1631
1632 return 0;
1633}
1634
6bf5e0ba 1635/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1636static int
d50171e4 1637status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1638{
d86d4aaf 1639 struct thread_info *thread = (struct thread_info *) entry;
582511be 1640 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1641 ptid_t ptid = * (ptid_t *) arg;
1642
1643 /* Check if we're only interested in events from a specific process
afa8d396
PA
1644 or a specific LWP. */
1645 if (!ptid_match (ptid_of (thread), ptid))
95954743 1646 return 0;
0d62e5e8 1647
a681f9c9
PA
1648 if (!lwp_resumed (lp))
1649 return 0;
1650
582511be
PA
1651 if (lp->status_pending_p
1652 && !thread_still_has_status_pending_p (thread))
1653 {
1654 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1655 return 0;
1656 }
0d62e5e8 1657
582511be 1658 return lp->status_pending_p;
0d62e5e8
DJ
1659}
1660
95954743
PA
1661static int
1662same_lwp (struct inferior_list_entry *entry, void *data)
1663{
1664 ptid_t ptid = *(ptid_t *) data;
1665 int lwp;
1666
1667 if (ptid_get_lwp (ptid) != 0)
1668 lwp = ptid_get_lwp (ptid);
1669 else
1670 lwp = ptid_get_pid (ptid);
1671
1672 if (ptid_get_lwp (entry->id) == lwp)
1673 return 1;
1674
1675 return 0;
1676}
1677
1678struct lwp_info *
1679find_lwp_pid (ptid_t ptid)
1680{
d86d4aaf
DE
1681 struct inferior_list_entry *thread
1682 = find_inferior (&all_threads, same_lwp, &ptid);
1683
1684 if (thread == NULL)
1685 return NULL;
1686
1687 return get_thread_lwp ((struct thread_info *) thread);
95954743
PA
1688}
1689
fa96cb38 1690/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1691
fa96cb38
PA
1692static int
1693num_lwps (int pid)
1694{
1695 struct inferior_list_entry *inf, *tmp;
1696 int count = 0;
0d62e5e8 1697
fa96cb38 1698 ALL_INFERIORS (&all_threads, inf, tmp)
24a09b5f 1699 {
fa96cb38
PA
1700 if (ptid_get_pid (inf->id) == pid)
1701 count++;
24a09b5f 1702 }
3aee8918 1703
fa96cb38
PA
1704 return count;
1705}
d61ddec4 1706
6d4ee8c6
GB
1707/* The arguments passed to iterate_over_lwps. */
1708
1709struct iterate_over_lwps_args
1710{
1711 /* The FILTER argument passed to iterate_over_lwps. */
1712 ptid_t filter;
1713
1714 /* The CALLBACK argument passed to iterate_over_lwps. */
1715 iterate_over_lwps_ftype *callback;
1716
1717 /* The DATA argument passed to iterate_over_lwps. */
1718 void *data;
1719};
1720
1721/* Callback for find_inferior used by iterate_over_lwps to filter
1722 calls to the callback supplied to that function. Returning a
1723 nonzero value causes find_inferiors to stop iterating and return
1724 the current inferior_list_entry. Returning zero indicates that
1725 find_inferiors should continue iterating. */
1726
1727static int
1728iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1729{
1730 struct iterate_over_lwps_args *args
1731 = (struct iterate_over_lwps_args *) args_p;
1732
1733 if (ptid_match (entry->id, args->filter))
1734 {
1735 struct thread_info *thr = (struct thread_info *) entry;
1736 struct lwp_info *lwp = get_thread_lwp (thr);
1737
1738 return (*args->callback) (lwp, args->data);
1739 }
1740
1741 return 0;
1742}
1743
1744/* See nat/linux-nat.h. */
1745
1746struct lwp_info *
1747iterate_over_lwps (ptid_t filter,
1748 iterate_over_lwps_ftype callback,
1749 void *data)
1750{
1751 struct iterate_over_lwps_args args = {filter, callback, data};
1752 struct inferior_list_entry *entry;
1753
1754 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1755 if (entry == NULL)
1756 return NULL;
1757
1758 return get_thread_lwp ((struct thread_info *) entry);
1759}
1760
fa96cb38
PA
1761/* Detect zombie thread group leaders, and "exit" them. We can't reap
1762 their exits until all other threads in the group have exited. */
c3adc08c 1763
fa96cb38
PA
1764static void
1765check_zombie_leaders (void)
1766{
1767 struct process_info *proc, *tmp;
c3adc08c 1768
fa96cb38 1769 ALL_PROCESSES (proc, tmp)
c3adc08c 1770 {
fa96cb38
PA
1771 pid_t leader_pid = pid_of (proc);
1772 struct lwp_info *leader_lp;
c3adc08c 1773
fa96cb38 1774 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
c3adc08c 1775
fa96cb38
PA
1776 if (debug_threads)
1777 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1778 "num_lwps=%d, zombie=%d\n",
1779 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1780 linux_proc_pid_is_zombie (leader_pid));
1781
94585166 1782 if (leader_lp != NULL && !leader_lp->stopped
fa96cb38
PA
1783 /* Check if there are other threads in the group, as we may
1784 have raced with the inferior simply exiting. */
1785 && !last_thread_of_process_p (leader_pid)
1786 && linux_proc_pid_is_zombie (leader_pid))
1787 {
1788 /* A leader zombie can mean one of two things:
1789
1790 - It exited, and there's an exit status pending
1791 available, or only the leader exited (not the whole
1792 program). In the latter case, we can't waitpid the
1793 leader's exit status until all other threads are gone.
1794
1795 - There are 3 or more threads in the group, and a thread
1796 other than the leader exec'd. On an exec, the Linux
1797 kernel destroys all other threads (except the execing
1798 one) in the thread group, and resets the execing thread's
1799 tid to the tgid. No exit notification is sent for the
1800 execing thread -- from the ptracer's perspective, it
1801 appears as though the execing thread just vanishes.
1802 Until we reap all other threads except the leader and the
1803 execing thread, the leader will be zombie, and the
1804 execing thread will be in `D (disc sleep)'. As soon as
1805 all other threads are reaped, the execing thread changes
1806 it's tid to the tgid, and the previous (zombie) leader
1807 vanishes, giving place to the "new" leader. We could try
1808 distinguishing the exit and exec cases, by waiting once
1809 more, and seeing if something comes out, but it doesn't
1810 sound useful. The previous leader _does_ go away, and
1811 we'll re-add the new one once we see the exec event
1812 (which is just the same as what would happen if the
1813 previous leader did exit voluntarily before some other
1814 thread execs). */
c3adc08c 1815
fa96cb38
PA
1816 if (debug_threads)
1817 fprintf (stderr,
1818 "CZL: Thread group leader %d zombie "
1819 "(it exited, or another thread execd).\n",
1820 leader_pid);
c3adc08c 1821
fa96cb38 1822 delete_lwp (leader_lp);
c3adc08c
PA
1823 }
1824 }
fa96cb38 1825}
c3adc08c 1826
fa96cb38
PA
1827/* Callback for `find_inferior'. Returns the first LWP that is not
1828 stopped. ARG is a PTID filter. */
d50171e4 1829
fa96cb38
PA
1830static int
1831not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1832{
1833 struct thread_info *thr = (struct thread_info *) entry;
1834 struct lwp_info *lwp;
1835 ptid_t filter = *(ptid_t *) arg;
47c0c975 1836
fa96cb38
PA
1837 if (!ptid_match (ptid_of (thr), filter))
1838 return 0;
bd99dc85 1839
fa96cb38
PA
1840 lwp = get_thread_lwp (thr);
1841 if (!lwp->stopped)
1842 return 1;
1843
1844 return 0;
0d62e5e8 1845}
611cb4a5 1846
863d01bd
PA
1847/* Increment LWP's suspend count. */
1848
1849static void
1850lwp_suspended_inc (struct lwp_info *lwp)
1851{
1852 lwp->suspended++;
1853
1854 if (debug_threads && lwp->suspended > 4)
1855 {
1856 struct thread_info *thread = get_lwp_thread (lwp);
1857
1858 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1859 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1860 }
1861}
1862
1863/* Decrement LWP's suspend count. */
1864
1865static void
1866lwp_suspended_decr (struct lwp_info *lwp)
1867{
1868 lwp->suspended--;
1869
1870 if (lwp->suspended < 0)
1871 {
1872 struct thread_info *thread = get_lwp_thread (lwp);
1873
1874 internal_error (__FILE__, __LINE__,
1875 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1876 lwp->suspended);
1877 }
1878}
1879
219f2f23
PA
1880/* This function should only be called if the LWP got a SIGTRAP.
1881
1882 Handle any tracepoint steps or hits. Return true if a tracepoint
1883 event was handled, 0 otherwise. */
1884
1885static int
1886handle_tracepoints (struct lwp_info *lwp)
1887{
1888 struct thread_info *tinfo = get_lwp_thread (lwp);
1889 int tpoint_related_event = 0;
1890
582511be
PA
1891 gdb_assert (lwp->suspended == 0);
1892
7984d532
PA
1893 /* If this tracepoint hit causes a tracing stop, we'll immediately
1894 uninsert tracepoints. To do this, we temporarily pause all
1895 threads, unpatch away, and then unpause threads. We need to make
1896 sure the unpausing doesn't resume LWP too. */
863d01bd 1897 lwp_suspended_inc (lwp);
7984d532 1898
219f2f23
PA
1899 /* And we need to be sure that any all-threads-stopping doesn't try
1900 to move threads out of the jump pads, as it could deadlock the
1901 inferior (LWP could be in the jump pad, maybe even holding the
1902 lock.) */
1903
1904 /* Do any necessary step collect actions. */
1905 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1906
fa593d66
PA
1907 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1908
219f2f23
PA
1909 /* See if we just hit a tracepoint and do its main collect
1910 actions. */
1911 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1912
863d01bd 1913 lwp_suspended_decr (lwp);
7984d532
PA
1914
1915 gdb_assert (lwp->suspended == 0);
fa593d66 1916 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1917
219f2f23
PA
1918 if (tpoint_related_event)
1919 {
1920 if (debug_threads)
87ce2a04 1921 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1922 return 1;
1923 }
1924
1925 return 0;
1926}
1927
fa593d66
PA
1928/* Convenience wrapper. Returns true if LWP is presently collecting a
1929 fast tracepoint. */
1930
1931static int
1932linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1933 struct fast_tpoint_collect_status *status)
1934{
1935 CORE_ADDR thread_area;
d86d4aaf 1936 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1937
1938 if (the_low_target.get_thread_area == NULL)
1939 return 0;
1940
1941 /* Get the thread area address. This is used to recognize which
1942 thread is which when tracing with the in-process agent library.
1943 We don't read anything from the address, and treat it as opaque;
1944 it's the address itself that we assume is unique per-thread. */
d86d4aaf 1945 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
fa593d66
PA
1946 return 0;
1947
1948 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1949}
1950
1951/* The reason we resume in the caller, is because we want to be able
1952 to pass lwp->status_pending as WSTAT, and we need to clear
1953 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1954 refuses to resume. */
1955
1956static int
1957maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1958{
0bfdf32f 1959 struct thread_info *saved_thread;
fa593d66 1960
0bfdf32f
GB
1961 saved_thread = current_thread;
1962 current_thread = get_lwp_thread (lwp);
fa593d66
PA
1963
1964 if ((wstat == NULL
1965 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1966 && supports_fast_tracepoints ()
58b4daa5 1967 && agent_loaded_p ())
fa593d66
PA
1968 {
1969 struct fast_tpoint_collect_status status;
1970 int r;
1971
1972 if (debug_threads)
87ce2a04
DE
1973 debug_printf ("Checking whether LWP %ld needs to move out of the "
1974 "jump pad.\n",
0bfdf32f 1975 lwpid_of (current_thread));
fa593d66
PA
1976
1977 r = linux_fast_tracepoint_collecting (lwp, &status);
1978
1979 if (wstat == NULL
1980 || (WSTOPSIG (*wstat) != SIGILL
1981 && WSTOPSIG (*wstat) != SIGFPE
1982 && WSTOPSIG (*wstat) != SIGSEGV
1983 && WSTOPSIG (*wstat) != SIGBUS))
1984 {
1985 lwp->collecting_fast_tracepoint = r;
1986
1987 if (r != 0)
1988 {
1989 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1990 {
1991 /* Haven't executed the original instruction yet.
1992 Set breakpoint there, and wait till it's hit,
1993 then single-step until exiting the jump pad. */
1994 lwp->exit_jump_pad_bkpt
1995 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1996 }
1997
1998 if (debug_threads)
87ce2a04
DE
1999 debug_printf ("Checking whether LWP %ld needs to move out of "
2000 "the jump pad...it does\n",
0bfdf32f
GB
2001 lwpid_of (current_thread));
2002 current_thread = saved_thread;
fa593d66
PA
2003
2004 return 1;
2005 }
2006 }
2007 else
2008 {
2009 /* If we get a synchronous signal while collecting, *and*
2010 while executing the (relocated) original instruction,
2011 reset the PC to point at the tpoint address, before
2012 reporting to GDB. Otherwise, it's an IPA lib bug: just
2013 report the signal to GDB, and pray for the best. */
2014
2015 lwp->collecting_fast_tracepoint = 0;
2016
2017 if (r != 0
2018 && (status.adjusted_insn_addr <= lwp->stop_pc
2019 && lwp->stop_pc < status.adjusted_insn_addr_end))
2020 {
2021 siginfo_t info;
2022 struct regcache *regcache;
2023
2024 /* The si_addr on a few signals references the address
2025 of the faulting instruction. Adjust that as
2026 well. */
2027 if ((WSTOPSIG (*wstat) == SIGILL
2028 || WSTOPSIG (*wstat) == SIGFPE
2029 || WSTOPSIG (*wstat) == SIGBUS
2030 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2031 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2032 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2033 /* Final check just to make sure we don't clobber
2034 the siginfo of non-kernel-sent signals. */
2035 && (uintptr_t) info.si_addr == lwp->stop_pc)
2036 {
2037 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2038 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2039 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2040 }
2041
0bfdf32f 2042 regcache = get_thread_regcache (current_thread, 1);
fa593d66
PA
2043 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2044 lwp->stop_pc = status.tpoint_addr;
2045
2046 /* Cancel any fast tracepoint lock this thread was
2047 holding. */
2048 force_unlock_trace_buffer ();
2049 }
2050
2051 if (lwp->exit_jump_pad_bkpt != NULL)
2052 {
2053 if (debug_threads)
87ce2a04
DE
2054 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2055 "stopping all threads momentarily.\n");
fa593d66
PA
2056
2057 stop_all_lwps (1, lwp);
fa593d66
PA
2058
2059 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2060 lwp->exit_jump_pad_bkpt = NULL;
2061
2062 unstop_all_lwps (1, lwp);
2063
2064 gdb_assert (lwp->suspended >= 0);
2065 }
2066 }
2067 }
2068
2069 if (debug_threads)
87ce2a04
DE
2070 debug_printf ("Checking whether LWP %ld needs to move out of the "
2071 "jump pad...no\n",
0bfdf32f 2072 lwpid_of (current_thread));
0cccb683 2073
0bfdf32f 2074 current_thread = saved_thread;
fa593d66
PA
2075 return 0;
2076}
2077
2078/* Enqueue one signal in the "signals to report later when out of the
2079 jump pad" list. */
2080
2081static void
2082enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2083{
2084 struct pending_signals *p_sig;
d86d4aaf 2085 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2086
2087 if (debug_threads)
87ce2a04 2088 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 2089 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2090
2091 if (debug_threads)
2092 {
2093 struct pending_signals *sig;
2094
2095 for (sig = lwp->pending_signals_to_report;
2096 sig != NULL;
2097 sig = sig->prev)
87ce2a04
DE
2098 debug_printf (" Already queued %d\n",
2099 sig->signal);
fa593d66 2100
87ce2a04 2101 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
2102 }
2103
1a981360
PA
2104 /* Don't enqueue non-RT signals if they are already in the deferred
2105 queue. (SIGSTOP being the easiest signal to see ending up here
2106 twice) */
2107 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2108 {
2109 struct pending_signals *sig;
2110
2111 for (sig = lwp->pending_signals_to_report;
2112 sig != NULL;
2113 sig = sig->prev)
2114 {
2115 if (sig->signal == WSTOPSIG (*wstat))
2116 {
2117 if (debug_threads)
87ce2a04
DE
2118 debug_printf ("Not requeuing already queued non-RT signal %d"
2119 " for LWP %ld\n",
2120 sig->signal,
d86d4aaf 2121 lwpid_of (thread));
1a981360
PA
2122 return;
2123 }
2124 }
2125 }
2126
8d749320 2127 p_sig = XCNEW (struct pending_signals);
fa593d66
PA
2128 p_sig->prev = lwp->pending_signals_to_report;
2129 p_sig->signal = WSTOPSIG (*wstat);
8d749320 2130
d86d4aaf 2131 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2132 &p_sig->info);
fa593d66
PA
2133
2134 lwp->pending_signals_to_report = p_sig;
2135}
2136
2137/* Dequeue one signal from the "signals to report later when out of
2138 the jump pad" list. */
2139
2140static int
2141dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2142{
d86d4aaf
DE
2143 struct thread_info *thread = get_lwp_thread (lwp);
2144
fa593d66
PA
2145 if (lwp->pending_signals_to_report != NULL)
2146 {
2147 struct pending_signals **p_sig;
2148
2149 p_sig = &lwp->pending_signals_to_report;
2150 while ((*p_sig)->prev != NULL)
2151 p_sig = &(*p_sig)->prev;
2152
2153 *wstat = W_STOPCODE ((*p_sig)->signal);
2154 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 2155 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2156 &(*p_sig)->info);
fa593d66
PA
2157 free (*p_sig);
2158 *p_sig = NULL;
2159
2160 if (debug_threads)
87ce2a04 2161 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 2162 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2163
2164 if (debug_threads)
2165 {
2166 struct pending_signals *sig;
2167
2168 for (sig = lwp->pending_signals_to_report;
2169 sig != NULL;
2170 sig = sig->prev)
87ce2a04
DE
2171 debug_printf (" Still queued %d\n",
2172 sig->signal);
fa593d66 2173
87ce2a04 2174 debug_printf (" (no more queued signals)\n");
fa593d66
PA
2175 }
2176
2177 return 1;
2178 }
2179
2180 return 0;
2181}
2182
582511be
PA
2183/* Fetch the possibly triggered data watchpoint info and store it in
2184 CHILD.
d50171e4 2185
582511be
PA
2186 On some archs, like x86, that use debug registers to set
2187 watchpoints, it's possible that the way to know which watched
2188 address trapped, is to check the register that is used to select
2189 which address to watch. Problem is, between setting the watchpoint
2190 and reading back which data address trapped, the user may change
2191 the set of watchpoints, and, as a consequence, GDB changes the
2192 debug registers in the inferior. To avoid reading back a stale
2193 stopped-data-address when that happens, we cache in LP the fact
2194 that a watchpoint trapped, and the corresponding data address, as
2195 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2196 registers meanwhile, we have the cached data we can rely on. */
d50171e4 2197
582511be
PA
2198static int
2199check_stopped_by_watchpoint (struct lwp_info *child)
2200{
2201 if (the_low_target.stopped_by_watchpoint != NULL)
d50171e4 2202 {
582511be 2203 struct thread_info *saved_thread;
d50171e4 2204
582511be
PA
2205 saved_thread = current_thread;
2206 current_thread = get_lwp_thread (child);
2207
2208 if (the_low_target.stopped_by_watchpoint ())
d50171e4 2209 {
15c66dd6 2210 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
582511be
PA
2211
2212 if (the_low_target.stopped_data_address != NULL)
2213 child->stopped_data_address
2214 = the_low_target.stopped_data_address ();
2215 else
2216 child->stopped_data_address = 0;
d50171e4
PA
2217 }
2218
0bfdf32f 2219 current_thread = saved_thread;
d50171e4
PA
2220 }
2221
15c66dd6 2222 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
c4d9ceb6
YQ
2223}
2224
de0d863e
DB
2225/* Return the ptrace options that we want to try to enable. */
2226
2227static int
2228linux_low_ptrace_options (int attached)
2229{
2230 int options = 0;
2231
2232 if (!attached)
2233 options |= PTRACE_O_EXITKILL;
2234
2235 if (report_fork_events)
2236 options |= PTRACE_O_TRACEFORK;
2237
c269dbdb
DB
2238 if (report_vfork_events)
2239 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2240
94585166
DB
2241 if (report_exec_events)
2242 options |= PTRACE_O_TRACEEXEC;
2243
de0d863e
DB
2244 return options;
2245}
2246
fa96cb38
PA
2247/* Do low-level handling of the event, and check if we should go on
2248 and pass it to caller code. Return the affected lwp if we are, or
2249 NULL otherwise. */
2250
2251static struct lwp_info *
582511be 2252linux_low_filter_event (int lwpid, int wstat)
fa96cb38
PA
2253{
2254 struct lwp_info *child;
2255 struct thread_info *thread;
582511be 2256 int have_stop_pc = 0;
fa96cb38
PA
2257
2258 child = find_lwp_pid (pid_to_ptid (lwpid));
2259
94585166
DB
2260 /* Check for stop events reported by a process we didn't already
2261 know about - anything not already in our LWP list.
2262
2263 If we're expecting to receive stopped processes after
2264 fork, vfork, and clone events, then we'll just add the
2265 new one to our list and go back to waiting for the event
2266 to be reported - the stopped process might be returned
2267 from waitpid before or after the event is.
2268
2269 But note the case of a non-leader thread exec'ing after the
2270 leader having exited, and gone from our lists (because
2271 check_zombie_leaders deleted it). The non-leader thread
2272 changes its tid to the tgid. */
2273
2274 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2275 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2276 {
2277 ptid_t child_ptid;
2278
2279 /* A multi-thread exec after we had seen the leader exiting. */
2280 if (debug_threads)
2281 {
2282 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2283 "after exec.\n", lwpid);
2284 }
2285
2286 child_ptid = ptid_build (lwpid, lwpid, 0);
2287 child = add_lwp (child_ptid);
2288 child->stopped = 1;
2289 current_thread = child->thread;
2290 }
2291
fa96cb38
PA
2292 /* If we didn't find a process, one of two things presumably happened:
2293 - A process we started and then detached from has exited. Ignore it.
2294 - A process we are controlling has forked and the new child's stop
2295 was reported to us by the kernel. Save its PID. */
2296 if (child == NULL && WIFSTOPPED (wstat))
2297 {
2298 add_to_pid_list (&stopped_pids, lwpid, wstat);
2299 return NULL;
2300 }
2301 else if (child == NULL)
2302 return NULL;
2303
2304 thread = get_lwp_thread (child);
2305
2306 child->stopped = 1;
2307
2308 child->last_status = wstat;
2309
582511be
PA
2310 /* Check if the thread has exited. */
2311 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2312 {
2313 if (debug_threads)
2314 debug_printf ("LLFE: %d exited.\n", lwpid);
65706a29
PA
2315 /* If there is at least one more LWP, then the exit signal was
2316 not the end of the debugged application and should be
2317 ignored, unless GDB wants to hear about thread exits. */
2318 if (report_thread_events
2319 || last_thread_of_process_p (pid_of (thread)))
582511be 2320 {
65706a29
PA
2321 /* Since events are serialized to GDB core, and we can't
2322 report this one right now. Leave the status pending for
2323 the next time we're able to report it. */
2324 mark_lwp_dead (child, wstat);
2325 return child;
582511be
PA
2326 }
2327 else
2328 {
65706a29
PA
2329 delete_lwp (child);
2330 return NULL;
582511be
PA
2331 }
2332 }
2333
2334 gdb_assert (WIFSTOPPED (wstat));
2335
fa96cb38
PA
2336 if (WIFSTOPPED (wstat))
2337 {
2338 struct process_info *proc;
2339
c06cbd92 2340 /* Architecture-specific setup after inferior is running. */
fa96cb38 2341 proc = find_process_pid (pid_of (thread));
c06cbd92 2342 if (proc->tdesc == NULL)
fa96cb38 2343 {
c06cbd92
YQ
2344 if (proc->attached)
2345 {
c06cbd92
YQ
2346 /* This needs to happen after we have attached to the
2347 inferior and it is stopped for the first time, but
2348 before we access any inferior registers. */
94585166 2349 linux_arch_setup_thread (thread);
c06cbd92
YQ
2350 }
2351 else
2352 {
2353 /* The process is started, but GDBserver will do
2354 architecture-specific setup after the program stops at
2355 the first instruction. */
2356 child->status_pending_p = 1;
2357 child->status_pending = wstat;
2358 return child;
2359 }
fa96cb38
PA
2360 }
2361 }
2362
fa96cb38
PA
2363 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2364 {
beed38b8 2365 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2366 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2367
de0d863e 2368 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2369 child->must_set_ptrace_flags = 0;
2370 }
2371
582511be
PA
2372 /* Be careful to not overwrite stop_pc until
2373 check_stopped_by_breakpoint is called. */
fa96cb38 2374 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2375 && linux_is_extended_waitstatus (wstat))
fa96cb38 2376 {
582511be 2377 child->stop_pc = get_pc (child);
94585166 2378 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2379 {
2380 /* The event has been handled, so just return without
2381 reporting it. */
2382 return NULL;
2383 }
fa96cb38
PA
2384 }
2385
3e572f71
PA
2386 /* Check first whether this was a SW/HW breakpoint before checking
2387 watchpoints, because at least s390 can't tell the data address of
2388 hardware watchpoint hits, and returns stopped-by-watchpoint as
2389 long as there's a watchpoint set. */
2390 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
582511be
PA
2391 {
2392 if (check_stopped_by_breakpoint (child))
2393 have_stop_pc = 1;
2394 }
2395
3e572f71
PA
2396 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2397 or hardware watchpoint. Check which is which if we got
863d01bd
PA
2398 TARGET_STOPPED_BY_HW_BREAKPOINT. Likewise, we may have single
2399 stepped an instruction that triggered a watchpoint. In that
2400 case, on some architectures (such as x86), instead of
2401 TRAP_HWBKPT, si_code indicates TRAP_TRACE, and we need to check
2402 the debug registers separately. */
3e572f71 2403 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
863d01bd 2404 && child->stop_reason != TARGET_STOPPED_BY_SW_BREAKPOINT)
3e572f71
PA
2405 check_stopped_by_watchpoint (child);
2406
582511be
PA
2407 if (!have_stop_pc)
2408 child->stop_pc = get_pc (child);
2409
fa96cb38
PA
2410 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2411 && child->stop_expected)
2412 {
2413 if (debug_threads)
2414 debug_printf ("Expected stop.\n");
2415 child->stop_expected = 0;
2416
2417 if (thread->last_resume_kind == resume_stop)
2418 {
2419 /* We want to report the stop to the core. Treat the
2420 SIGSTOP as a normal event. */
2bf6fb9d
PA
2421 if (debug_threads)
2422 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2423 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2424 }
2425 else if (stopping_threads != NOT_STOPPING_THREADS)
2426 {
2427 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2428 pending. */
2bf6fb9d
PA
2429 if (debug_threads)
2430 debug_printf ("LLW: SIGSTOP caught for %s "
2431 "while stopping threads.\n",
2432 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2433 return NULL;
2434 }
2435 else
2436 {
2bf6fb9d
PA
2437 /* This is a delayed SIGSTOP. Filter out the event. */
2438 if (debug_threads)
2439 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2440 child->stepping ? "step" : "continue",
2441 target_pid_to_str (ptid_of (thread)));
2442
fa96cb38
PA
2443 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2444 return NULL;
2445 }
2446 }
2447
582511be
PA
2448 child->status_pending_p = 1;
2449 child->status_pending = wstat;
fa96cb38
PA
2450 return child;
2451}
2452
20ba1ce6
PA
2453/* Resume LWPs that are currently stopped without any pending status
2454 to report, but are resumed from the core's perspective. */
2455
2456static void
2457resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2458{
2459 struct thread_info *thread = (struct thread_info *) entry;
2460 struct lwp_info *lp = get_thread_lwp (thread);
2461
2462 if (lp->stopped
863d01bd 2463 && !lp->suspended
20ba1ce6 2464 && !lp->status_pending_p
20ba1ce6
PA
2465 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2466 {
2467 int step = thread->last_resume_kind == resume_step;
2468
2469 if (debug_threads)
2470 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2471 target_pid_to_str (ptid_of (thread)),
2472 paddress (lp->stop_pc),
2473 step);
2474
2475 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2476 }
2477}
2478
fa96cb38
PA
2479/* Wait for an event from child(ren) WAIT_PTID, and return any that
2480 match FILTER_PTID (leaving others pending). The PTIDs can be:
2481 minus_one_ptid, to specify any child; a pid PTID, specifying all
2482 lwps of a thread group; or a PTID representing a single lwp. Store
2483 the stop status through the status pointer WSTAT. OPTIONS is
2484 passed to the waitpid call. Return 0 if no event was found and
2485 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2486 was found. Return the PID of the stopped child otherwise. */
bd99dc85 2487
0d62e5e8 2488static int
fa96cb38
PA
2489linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2490 int *wstatp, int options)
0d62e5e8 2491{
d86d4aaf 2492 struct thread_info *event_thread;
d50171e4 2493 struct lwp_info *event_child, *requested_child;
fa96cb38 2494 sigset_t block_mask, prev_mask;
d50171e4 2495
fa96cb38 2496 retry:
d86d4aaf
DE
2497 /* N.B. event_thread points to the thread_info struct that contains
2498 event_child. Keep them in sync. */
2499 event_thread = NULL;
d50171e4
PA
2500 event_child = NULL;
2501 requested_child = NULL;
0d62e5e8 2502
95954743 2503 /* Check for a lwp with a pending status. */
bd99dc85 2504
fa96cb38 2505 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
0d62e5e8 2506 {
d86d4aaf 2507 event_thread = (struct thread_info *)
fa96cb38 2508 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
d86d4aaf
DE
2509 if (event_thread != NULL)
2510 event_child = get_thread_lwp (event_thread);
2511 if (debug_threads && event_thread)
2512 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2513 }
fa96cb38 2514 else if (!ptid_equal (filter_ptid, null_ptid))
0d62e5e8 2515 {
fa96cb38 2516 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2517
bde24c0a 2518 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66
PA
2519 && requested_child->status_pending_p
2520 && requested_child->collecting_fast_tracepoint)
2521 {
2522 enqueue_one_deferred_signal (requested_child,
2523 &requested_child->status_pending);
2524 requested_child->status_pending_p = 0;
2525 requested_child->status_pending = 0;
2526 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2527 }
2528
2529 if (requested_child->suspended
2530 && requested_child->status_pending_p)
38e08fca
GB
2531 {
2532 internal_error (__FILE__, __LINE__,
2533 "requesting an event out of a"
2534 " suspended child?");
2535 }
fa593d66 2536
d50171e4 2537 if (requested_child->status_pending_p)
d86d4aaf
DE
2538 {
2539 event_child = requested_child;
2540 event_thread = get_lwp_thread (event_child);
2541 }
0d62e5e8 2542 }
611cb4a5 2543
0d62e5e8
DJ
2544 if (event_child != NULL)
2545 {
bd99dc85 2546 if (debug_threads)
87ce2a04 2547 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2548 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2549 *wstatp = event_child->status_pending;
bd99dc85
PA
2550 event_child->status_pending_p = 0;
2551 event_child->status_pending = 0;
0bfdf32f 2552 current_thread = event_thread;
d86d4aaf 2553 return lwpid_of (event_thread);
0d62e5e8
DJ
2554 }
2555
fa96cb38
PA
2556 /* But if we don't find a pending event, we'll have to wait.
2557
2558 We only enter this loop if no process has a pending wait status.
2559 Thus any action taken in response to a wait status inside this
2560 loop is responding as soon as we detect the status, not after any
2561 pending events. */
d8301ad1 2562
fa96cb38
PA
2563 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2564 all signals while here. */
2565 sigfillset (&block_mask);
2566 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2567
582511be
PA
2568 /* Always pull all events out of the kernel. We'll randomly select
2569 an event LWP out of all that have events, to prevent
2570 starvation. */
fa96cb38 2571 while (event_child == NULL)
0d62e5e8 2572 {
fa96cb38 2573 pid_t ret = 0;
0d62e5e8 2574
fa96cb38
PA
2575 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2576 quirks:
0d62e5e8 2577
fa96cb38
PA
2578 - If the thread group leader exits while other threads in the
2579 thread group still exist, waitpid(TGID, ...) hangs. That
2580 waitpid won't return an exit status until the other threads
2581 in the group are reaped.
611cb4a5 2582
fa96cb38
PA
2583 - When a non-leader thread execs, that thread just vanishes
2584 without reporting an exit (so we'd hang if we waited for it
2585 explicitly in that case). The exec event is reported to
94585166 2586 the TGID pid. */
fa96cb38
PA
2587 errno = 0;
2588 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2589
fa96cb38
PA
2590 if (debug_threads)
2591 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2592 ret, errno ? strerror (errno) : "ERRNO-OK");
0d62e5e8 2593
fa96cb38 2594 if (ret > 0)
0d62e5e8 2595 {
89be2091 2596 if (debug_threads)
bd99dc85 2597 {
fa96cb38
PA
2598 debug_printf ("LLW: waitpid %ld received %s\n",
2599 (long) ret, status_to_str (*wstatp));
bd99dc85 2600 }
89be2091 2601
582511be
PA
2602 /* Filter all events. IOW, leave all events pending. We'll
2603 randomly select an event LWP out of all that have events
2604 below. */
2605 linux_low_filter_event (ret, *wstatp);
fa96cb38
PA
2606 /* Retry until nothing comes out of waitpid. A single
2607 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2608 continue;
2609 }
2610
20ba1ce6
PA
2611 /* Now that we've pulled all events out of the kernel, resume
2612 LWPs that don't have an interesting event to report. */
2613 if (stopping_threads == NOT_STOPPING_THREADS)
2614 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2615
2616 /* ... and find an LWP with a status to report to the core, if
2617 any. */
582511be
PA
2618 event_thread = (struct thread_info *)
2619 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2620 if (event_thread != NULL)
2621 {
2622 event_child = get_thread_lwp (event_thread);
2623 *wstatp = event_child->status_pending;
2624 event_child->status_pending_p = 0;
2625 event_child->status_pending = 0;
2626 break;
2627 }
2628
fa96cb38
PA
2629 /* Check for zombie thread group leaders. Those can't be reaped
2630 until all other threads in the thread group are. */
2631 check_zombie_leaders ();
2632
2633 /* If there are no resumed children left in the set of LWPs we
2634 want to wait for, bail. We can't just block in
2635 waitpid/sigsuspend, because lwps might have been left stopped
2636 in trace-stop state, and we'd be stuck forever waiting for
2637 their status to change (which would only happen if we resumed
2638 them). Even if WNOHANG is set, this return code is preferred
2639 over 0 (below), as it is more detailed. */
2640 if ((find_inferior (&all_threads,
2641 not_stopped_callback,
2642 &wait_ptid) == NULL))
a6dbe5df 2643 {
fa96cb38
PA
2644 if (debug_threads)
2645 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2646 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2647 return -1;
a6dbe5df
PA
2648 }
2649
fa96cb38
PA
2650 /* No interesting event to report to the caller. */
2651 if ((options & WNOHANG))
24a09b5f 2652 {
fa96cb38
PA
2653 if (debug_threads)
2654 debug_printf ("WNOHANG set, no event found\n");
2655
2656 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2657 return 0;
24a09b5f
DJ
2658 }
2659
fa96cb38
PA
2660 /* Block until we get an event reported with SIGCHLD. */
2661 if (debug_threads)
2662 debug_printf ("sigsuspend'ing\n");
d50171e4 2663
fa96cb38
PA
2664 sigsuspend (&prev_mask);
2665 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2666 goto retry;
2667 }
d50171e4 2668
fa96cb38 2669 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2670
0bfdf32f 2671 current_thread = event_thread;
d50171e4 2672
fa96cb38
PA
2673 return lwpid_of (event_thread);
2674}
2675
2676/* Wait for an event from child(ren) PTID. PTIDs can be:
2677 minus_one_ptid, to specify any child; a pid PTID, specifying all
2678 lwps of a thread group; or a PTID representing a single lwp. Store
2679 the stop status through the status pointer WSTAT. OPTIONS is
2680 passed to the waitpid call. Return 0 if no event was found and
2681 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2682 was found. Return the PID of the stopped child otherwise. */
2683
2684static int
2685linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2686{
2687 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2688}
2689
6bf5e0ba
PA
2690/* Count the LWP's that have had events. */
2691
2692static int
2693count_events_callback (struct inferior_list_entry *entry, void *data)
2694{
d86d4aaf 2695 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2696 struct lwp_info *lp = get_thread_lwp (thread);
9a3c8263 2697 int *count = (int *) data;
6bf5e0ba
PA
2698
2699 gdb_assert (count != NULL);
2700
582511be 2701 /* Count only resumed LWPs that have an event pending. */
8336d594 2702 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2703 && lp->status_pending_p)
6bf5e0ba
PA
2704 (*count)++;
2705
2706 return 0;
2707}
2708
2709/* Select the LWP (if any) that is currently being single-stepped. */
2710
2711static int
2712select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2713{
d86d4aaf
DE
2714 struct thread_info *thread = (struct thread_info *) entry;
2715 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba 2716
8336d594
PA
2717 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2718 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
2719 && lp->status_pending_p)
2720 return 1;
2721 else
2722 return 0;
2723}
2724
b90fc188 2725/* Select the Nth LWP that has had an event. */
6bf5e0ba
PA
2726
2727static int
2728select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2729{
d86d4aaf 2730 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2731 struct lwp_info *lp = get_thread_lwp (thread);
9a3c8263 2732 int *selector = (int *) data;
6bf5e0ba
PA
2733
2734 gdb_assert (selector != NULL);
2735
582511be 2736 /* Select only resumed LWPs that have an event pending. */
91baf43f 2737 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2738 && lp->status_pending_p)
6bf5e0ba
PA
2739 if ((*selector)-- == 0)
2740 return 1;
2741
2742 return 0;
2743}
2744
6bf5e0ba
PA
2745/* Select one LWP out of those that have events pending. */
2746
2747static void
2748select_event_lwp (struct lwp_info **orig_lp)
2749{
2750 int num_events = 0;
2751 int random_selector;
582511be
PA
2752 struct thread_info *event_thread = NULL;
2753
2754 /* In all-stop, give preference to the LWP that is being
2755 single-stepped. There will be at most one, and it's the LWP that
2756 the core is most interested in. If we didn't do this, then we'd
2757 have to handle pending step SIGTRAPs somehow in case the core
2758 later continues the previously-stepped thread, otherwise we'd
2759 report the pending SIGTRAP, and the core, not having stepped the
2760 thread, wouldn't understand what the trap was for, and therefore
2761 would report it to the user as a random signal. */
2762 if (!non_stop)
6bf5e0ba 2763 {
582511be
PA
2764 event_thread
2765 = (struct thread_info *) find_inferior (&all_threads,
2766 select_singlestep_lwp_callback,
2767 NULL);
2768 if (event_thread != NULL)
2769 {
2770 if (debug_threads)
2771 debug_printf ("SEL: Select single-step %s\n",
2772 target_pid_to_str (ptid_of (event_thread)));
2773 }
6bf5e0ba 2774 }
582511be 2775 if (event_thread == NULL)
6bf5e0ba
PA
2776 {
2777 /* No single-stepping LWP. Select one at random, out of those
b90fc188 2778 which have had events. */
6bf5e0ba 2779
b90fc188 2780 /* First see how many events we have. */
d86d4aaf 2781 find_inferior (&all_threads, count_events_callback, &num_events);
8bf3b159 2782 gdb_assert (num_events > 0);
6bf5e0ba 2783
b90fc188
PA
2784 /* Now randomly pick a LWP out of those that have had
2785 events. */
6bf5e0ba
PA
2786 random_selector = (int)
2787 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2788
2789 if (debug_threads && num_events > 1)
87ce2a04
DE
2790 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2791 num_events, random_selector);
6bf5e0ba 2792
d86d4aaf
DE
2793 event_thread
2794 = (struct thread_info *) find_inferior (&all_threads,
2795 select_event_lwp_callback,
2796 &random_selector);
6bf5e0ba
PA
2797 }
2798
d86d4aaf 2799 if (event_thread != NULL)
6bf5e0ba 2800 {
d86d4aaf
DE
2801 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2802
6bf5e0ba
PA
2803 /* Switch the event LWP. */
2804 *orig_lp = event_lp;
2805 }
2806}
2807
7984d532
PA
2808/* Decrement the suspend count of an LWP. */
2809
2810static int
2811unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2812{
d86d4aaf
DE
2813 struct thread_info *thread = (struct thread_info *) entry;
2814 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
2815
2816 /* Ignore EXCEPT. */
2817 if (lwp == except)
2818 return 0;
2819
863d01bd 2820 lwp_suspended_decr (lwp);
7984d532
PA
2821 return 0;
2822}
2823
2824/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2825 NULL. */
2826
2827static void
2828unsuspend_all_lwps (struct lwp_info *except)
2829{
d86d4aaf 2830 find_inferior (&all_threads, unsuspend_one_lwp, except);
7984d532
PA
2831}
2832
fa593d66
PA
2833static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2834static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2835 void *data);
2836static int lwp_running (struct inferior_list_entry *entry, void *data);
2837static ptid_t linux_wait_1 (ptid_t ptid,
2838 struct target_waitstatus *ourstatus,
2839 int target_options);
2840
2841/* Stabilize threads (move out of jump pads).
2842
2843 If a thread is midway collecting a fast tracepoint, we need to
2844 finish the collection and move it out of the jump pad before
2845 reporting the signal.
2846
2847 This avoids recursion while collecting (when a signal arrives
2848 midway, and the signal handler itself collects), which would trash
2849 the trace buffer. In case the user set a breakpoint in a signal
2850 handler, this avoids the backtrace showing the jump pad, etc..
2851 Most importantly, there are certain things we can't do safely if
2852 threads are stopped in a jump pad (or in its callee's). For
2853 example:
2854
2855 - starting a new trace run. A thread still collecting the
2856 previous run, could trash the trace buffer when resumed. The trace
2857 buffer control structures would have been reset but the thread had
2858 no way to tell. The thread could even midway memcpy'ing to the
2859 buffer, which would mean that when resumed, it would clobber the
2860 trace buffer that had been set for a new run.
2861
2862 - we can't rewrite/reuse the jump pads for new tracepoints
2863 safely. Say you do tstart while a thread is stopped midway while
2864 collecting. When the thread is later resumed, it finishes the
2865 collection, and returns to the jump pad, to execute the original
2866 instruction that was under the tracepoint jump at the time the
2867 older run had been started. If the jump pad had been rewritten
2868 since for something else in the new run, the thread would now
2869 execute the wrong / random instructions. */
2870
2871static void
2872linux_stabilize_threads (void)
2873{
0bfdf32f 2874 struct thread_info *saved_thread;
d86d4aaf 2875 struct thread_info *thread_stuck;
fa593d66 2876
d86d4aaf
DE
2877 thread_stuck
2878 = (struct thread_info *) find_inferior (&all_threads,
2879 stuck_in_jump_pad_callback,
2880 NULL);
2881 if (thread_stuck != NULL)
fa593d66 2882 {
b4d51a55 2883 if (debug_threads)
87ce2a04 2884 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2885 lwpid_of (thread_stuck));
fa593d66
PA
2886 return;
2887 }
2888
0bfdf32f 2889 saved_thread = current_thread;
fa593d66
PA
2890
2891 stabilizing_threads = 1;
2892
2893 /* Kick 'em all. */
d86d4aaf 2894 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
fa593d66
PA
2895
2896 /* Loop until all are stopped out of the jump pads. */
d86d4aaf 2897 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
fa593d66
PA
2898 {
2899 struct target_waitstatus ourstatus;
2900 struct lwp_info *lwp;
fa593d66
PA
2901 int wstat;
2902
2903 /* Note that we go through the full wait even loop. While
2904 moving threads out of jump pad, we need to be able to step
2905 over internal breakpoints and such. */
32fcada3 2906 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2907
2908 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2909 {
0bfdf32f 2910 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2911
2912 /* Lock it. */
863d01bd 2913 lwp_suspended_inc (lwp);
fa593d66 2914
a493e3e2 2915 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2916 || current_thread->last_resume_kind == resume_stop)
fa593d66 2917 {
2ea28649 2918 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2919 enqueue_one_deferred_signal (lwp, &wstat);
2920 }
2921 }
2922 }
2923
d86d4aaf 2924 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
fa593d66
PA
2925
2926 stabilizing_threads = 0;
2927
0bfdf32f 2928 current_thread = saved_thread;
fa593d66 2929
b4d51a55 2930 if (debug_threads)
fa593d66 2931 {
d86d4aaf
DE
2932 thread_stuck
2933 = (struct thread_info *) find_inferior (&all_threads,
2934 stuck_in_jump_pad_callback,
2935 NULL);
2936 if (thread_stuck != NULL)
87ce2a04 2937 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2938 lwpid_of (thread_stuck));
fa593d66
PA
2939 }
2940}
2941
582511be
PA
2942/* Convenience function that is called when the kernel reports an
2943 event that is not passed out to GDB. */
2944
2945static ptid_t
2946ignore_event (struct target_waitstatus *ourstatus)
2947{
2948 /* If we got an event, there may still be others, as a single
2949 SIGCHLD can indicate more than one child stopped. This forces
2950 another target_wait call. */
2951 async_file_mark ();
2952
2953 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2954 return null_ptid;
2955}
2956
65706a29
PA
2957/* Convenience function that is called when the kernel reports an exit
2958 event. This decides whether to report the event to GDB as a
2959 process exit event, a thread exit event, or to suppress the
2960 event. */
2961
2962static ptid_t
2963filter_exit_event (struct lwp_info *event_child,
2964 struct target_waitstatus *ourstatus)
2965{
2966 struct thread_info *thread = get_lwp_thread (event_child);
2967 ptid_t ptid = ptid_of (thread);
2968
2969 if (!last_thread_of_process_p (pid_of (thread)))
2970 {
2971 if (report_thread_events)
2972 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2973 else
2974 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2975
2976 delete_lwp (event_child);
2977 }
2978 return ptid;
2979}
2980
0d62e5e8 2981/* Wait for process, returns status. */
da6d8c04 2982
95954743
PA
2983static ptid_t
2984linux_wait_1 (ptid_t ptid,
2985 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2986{
e5f1222d 2987 int w;
fc7238bb 2988 struct lwp_info *event_child;
bd99dc85 2989 int options;
bd99dc85 2990 int pid;
6bf5e0ba
PA
2991 int step_over_finished;
2992 int bp_explains_trap;
2993 int maybe_internal_trap;
2994 int report_to_gdb;
219f2f23 2995 int trace_event;
c2d6af84 2996 int in_step_range;
f2faf941 2997 int any_resumed;
bd99dc85 2998
87ce2a04
DE
2999 if (debug_threads)
3000 {
3001 debug_enter ();
3002 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3003 }
3004
bd99dc85
PA
3005 /* Translate generic target options into linux options. */
3006 options = __WALL;
3007 if (target_options & TARGET_WNOHANG)
3008 options |= WNOHANG;
0d62e5e8 3009
fa593d66
PA
3010 bp_explains_trap = 0;
3011 trace_event = 0;
c2d6af84 3012 in_step_range = 0;
bd99dc85
PA
3013 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3014
f2faf941
PA
3015 /* Find a resumed LWP, if any. */
3016 if (find_inferior (&all_threads,
3017 status_pending_p_callback,
3018 &minus_one_ptid) != NULL)
3019 any_resumed = 1;
3020 else if ((find_inferior (&all_threads,
3021 not_stopped_callback,
3022 &minus_one_ptid) != NULL))
3023 any_resumed = 1;
3024 else
3025 any_resumed = 0;
3026
6bf5e0ba
PA
3027 if (ptid_equal (step_over_bkpt, null_ptid))
3028 pid = linux_wait_for_event (ptid, &w, options);
3029 else
3030 {
3031 if (debug_threads)
87ce2a04
DE
3032 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3033 target_pid_to_str (step_over_bkpt));
6bf5e0ba
PA
3034 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3035 }
3036
f2faf941 3037 if (pid == 0 || (pid == -1 && !any_resumed))
87ce2a04 3038 {
fa96cb38
PA
3039 gdb_assert (target_options & TARGET_WNOHANG);
3040
87ce2a04
DE
3041 if (debug_threads)
3042 {
fa96cb38
PA
3043 debug_printf ("linux_wait_1 ret = null_ptid, "
3044 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
3045 debug_exit ();
3046 }
fa96cb38
PA
3047
3048 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
3049 return null_ptid;
3050 }
fa96cb38
PA
3051 else if (pid == -1)
3052 {
3053 if (debug_threads)
3054 {
3055 debug_printf ("linux_wait_1 ret = null_ptid, "
3056 "TARGET_WAITKIND_NO_RESUMED\n");
3057 debug_exit ();
3058 }
bd99dc85 3059
fa96cb38
PA
3060 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3061 return null_ptid;
3062 }
0d62e5e8 3063
0bfdf32f 3064 event_child = get_thread_lwp (current_thread);
0d62e5e8 3065
fa96cb38
PA
3066 /* linux_wait_for_event only returns an exit status for the last
3067 child of a process. Report it. */
3068 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3069 {
fa96cb38 3070 if (WIFEXITED (w))
0d62e5e8 3071 {
fa96cb38
PA
3072 ourstatus->kind = TARGET_WAITKIND_EXITED;
3073 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 3074
fa96cb38 3075 if (debug_threads)
bd99dc85 3076 {
fa96cb38
PA
3077 debug_printf ("linux_wait_1 ret = %s, exited with "
3078 "retcode %d\n",
0bfdf32f 3079 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3080 WEXITSTATUS (w));
3081 debug_exit ();
bd99dc85 3082 }
fa96cb38
PA
3083 }
3084 else
3085 {
3086 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3087 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 3088
fa96cb38
PA
3089 if (debug_threads)
3090 {
3091 debug_printf ("linux_wait_1 ret = %s, terminated with "
3092 "signal %d\n",
0bfdf32f 3093 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3094 WTERMSIG (w));
3095 debug_exit ();
3096 }
0d62e5e8 3097 }
fa96cb38 3098
65706a29
PA
3099 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3100 return filter_exit_event (event_child, ourstatus);
3101
0bfdf32f 3102 return ptid_of (current_thread);
da6d8c04
DJ
3103 }
3104
2d97cd35
AT
3105 /* If step-over executes a breakpoint instruction, in the case of a
3106 hardware single step it means a gdb/gdbserver breakpoint had been
3107 planted on top of a permanent breakpoint, in the case of a software
3108 single step it may just mean that gdbserver hit the reinsert breakpoint.
3109 The PC has been adjusted by check_stopped_by_breakpoint to point at
3110 the breakpoint address.
3111 So in the case of the hardware single step advance the PC manually
3112 past the breakpoint and in the case of software single step advance only
3113 if it's not the reinsert_breakpoint we are hitting.
3114 This avoids that a program would keep trapping a permanent breakpoint
3115 forever. */
8090aef2 3116 if (!ptid_equal (step_over_bkpt, null_ptid)
2d97cd35
AT
3117 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3118 && (event_child->stepping
3119 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3120 {
dd373349
AT
3121 int increment_pc = 0;
3122 int breakpoint_kind = 0;
3123 CORE_ADDR stop_pc = event_child->stop_pc;
3124
769ef81f
AT
3125 breakpoint_kind =
3126 the_target->breakpoint_kind_from_current_state (&stop_pc);
dd373349 3127 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2
PA
3128
3129 if (debug_threads)
3130 {
3131 debug_printf ("step-over for %s executed software breakpoint\n",
3132 target_pid_to_str (ptid_of (current_thread)));
3133 }
3134
3135 if (increment_pc != 0)
3136 {
3137 struct regcache *regcache
3138 = get_thread_regcache (current_thread, 1);
3139
3140 event_child->stop_pc += increment_pc;
3141 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3142
3143 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
15c66dd6 3144 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3145 }
3146 }
3147
6bf5e0ba
PA
3148 /* If this event was not handled before, and is not a SIGTRAP, we
3149 report it. SIGILL and SIGSEGV are also treated as traps in case
3150 a breakpoint is inserted at the current PC. If this target does
3151 not support internal breakpoints at all, we also report the
3152 SIGTRAP without further processing; it's of no concern to us. */
3153 maybe_internal_trap
3154 = (supports_breakpoints ()
3155 && (WSTOPSIG (w) == SIGTRAP
3156 || ((WSTOPSIG (w) == SIGILL
3157 || WSTOPSIG (w) == SIGSEGV)
3158 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3159
3160 if (maybe_internal_trap)
3161 {
3162 /* Handle anything that requires bookkeeping before deciding to
3163 report the event or continue waiting. */
3164
3165 /* First check if we can explain the SIGTRAP with an internal
3166 breakpoint, or if we should possibly report the event to GDB.
3167 Do this before anything that may remove or insert a
3168 breakpoint. */
3169 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3170
3171 /* We have a SIGTRAP, possibly a step-over dance has just
3172 finished. If so, tweak the state machine accordingly,
3173 reinsert breakpoints and delete any reinsert (software
3174 single-step) breakpoints. */
3175 step_over_finished = finish_step_over (event_child);
3176
3177 /* Now invoke the callbacks of any internal breakpoints there. */
3178 check_breakpoints (event_child->stop_pc);
3179
219f2f23
PA
3180 /* Handle tracepoint data collecting. This may overflow the
3181 trace buffer, and cause a tracing stop, removing
3182 breakpoints. */
3183 trace_event = handle_tracepoints (event_child);
3184
6bf5e0ba
PA
3185 if (bp_explains_trap)
3186 {
3187 /* If we stepped or ran into an internal breakpoint, we've
3188 already handled it. So next time we resume (from this
3189 PC), we should step over it. */
3190 if (debug_threads)
87ce2a04 3191 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3192
8b07ae33
PA
3193 if (breakpoint_here (event_child->stop_pc))
3194 event_child->need_step_over = 1;
6bf5e0ba
PA
3195 }
3196 }
3197 else
3198 {
3199 /* We have some other signal, possibly a step-over dance was in
3200 progress, and it should be cancelled too. */
3201 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3202 }
3203
3204 /* We have all the data we need. Either report the event to GDB, or
3205 resume threads and keep waiting for more. */
3206
3207 /* If we're collecting a fast tracepoint, finish the collection and
3208 move out of the jump pad before delivering a signal. See
3209 linux_stabilize_threads. */
3210
3211 if (WIFSTOPPED (w)
3212 && WSTOPSIG (w) != SIGTRAP
3213 && supports_fast_tracepoints ()
58b4daa5 3214 && agent_loaded_p ())
fa593d66
PA
3215 {
3216 if (debug_threads)
87ce2a04
DE
3217 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3218 "to defer or adjust it.\n",
0bfdf32f 3219 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3220
3221 /* Allow debugging the jump pad itself. */
0bfdf32f 3222 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3223 && maybe_move_out_of_jump_pad (event_child, &w))
3224 {
3225 enqueue_one_deferred_signal (event_child, &w);
3226
3227 if (debug_threads)
87ce2a04 3228 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 3229 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3230
3231 linux_resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
3232
3233 return ignore_event (ourstatus);
fa593d66
PA
3234 }
3235 }
219f2f23 3236
fa593d66
PA
3237 if (event_child->collecting_fast_tracepoint)
3238 {
3239 if (debug_threads)
87ce2a04
DE
3240 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3241 "Check if we're already there.\n",
0bfdf32f 3242 lwpid_of (current_thread),
87ce2a04 3243 event_child->collecting_fast_tracepoint);
fa593d66
PA
3244
3245 trace_event = 1;
3246
3247 event_child->collecting_fast_tracepoint
3248 = linux_fast_tracepoint_collecting (event_child, NULL);
3249
3250 if (event_child->collecting_fast_tracepoint != 1)
3251 {
3252 /* No longer need this breakpoint. */
3253 if (event_child->exit_jump_pad_bkpt != NULL)
3254 {
3255 if (debug_threads)
87ce2a04
DE
3256 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3257 "stopping all threads momentarily.\n");
fa593d66
PA
3258
3259 /* Other running threads could hit this breakpoint.
3260 We don't handle moribund locations like GDB does,
3261 instead we always pause all threads when removing
3262 breakpoints, so that any step-over or
3263 decr_pc_after_break adjustment is always taken
3264 care of while the breakpoint is still
3265 inserted. */
3266 stop_all_lwps (1, event_child);
fa593d66
PA
3267
3268 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3269 event_child->exit_jump_pad_bkpt = NULL;
3270
3271 unstop_all_lwps (1, event_child);
3272
3273 gdb_assert (event_child->suspended >= 0);
3274 }
3275 }
3276
3277 if (event_child->collecting_fast_tracepoint == 0)
3278 {
3279 if (debug_threads)
87ce2a04
DE
3280 debug_printf ("fast tracepoint finished "
3281 "collecting successfully.\n");
fa593d66
PA
3282
3283 /* We may have a deferred signal to report. */
3284 if (dequeue_one_deferred_signal (event_child, &w))
3285 {
3286 if (debug_threads)
87ce2a04 3287 debug_printf ("dequeued one signal.\n");
fa593d66 3288 }
3c11dd79 3289 else
fa593d66 3290 {
3c11dd79 3291 if (debug_threads)
87ce2a04 3292 debug_printf ("no deferred signals.\n");
fa593d66
PA
3293
3294 if (stabilizing_threads)
3295 {
3296 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 3297 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
3298
3299 if (debug_threads)
3300 {
3301 debug_printf ("linux_wait_1 ret = %s, stopped "
3302 "while stabilizing threads\n",
0bfdf32f 3303 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
3304 debug_exit ();
3305 }
3306
0bfdf32f 3307 return ptid_of (current_thread);
fa593d66
PA
3308 }
3309 }
3310 }
6bf5e0ba
PA
3311 }
3312
e471f25b
PA
3313 /* Check whether GDB would be interested in this event. */
3314
3315 /* If GDB is not interested in this signal, don't stop other
3316 threads, and don't report it to GDB. Just resume the inferior
3317 right away. We do this for threading-related signals as well as
3318 any that GDB specifically requested we ignore. But never ignore
3319 SIGSTOP if we sent it ourselves, and do not ignore signals when
3320 stepping - they may require special handling to skip the signal
c9587f88
AT
3321 handler. Also never ignore signals that could be caused by a
3322 breakpoint. */
e471f25b
PA
3323 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3324 thread library? */
3325 if (WIFSTOPPED (w)
0bfdf32f 3326 && current_thread->last_resume_kind != resume_step
e471f25b 3327 && (
1a981360 3328#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3329 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3330 && (WSTOPSIG (w) == __SIGRTMIN
3331 || WSTOPSIG (w) == __SIGRTMIN + 1))
3332 ||
3333#endif
2ea28649 3334 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3335 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3336 && current_thread->last_resume_kind == resume_stop)
3337 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3338 {
3339 siginfo_t info, *info_p;
3340
3341 if (debug_threads)
87ce2a04 3342 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 3343 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3344
0bfdf32f 3345 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3346 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3347 info_p = &info;
3348 else
3349 info_p = NULL;
863d01bd
PA
3350
3351 if (step_over_finished)
3352 {
3353 /* We cancelled this thread's step-over above. We still
3354 need to unsuspend all other LWPs, and set them back
3355 running again while the signal handler runs. */
3356 unsuspend_all_lwps (event_child);
3357
3358 /* Enqueue the pending signal info so that proceed_all_lwps
3359 doesn't lose it. */
3360 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3361
3362 proceed_all_lwps ();
3363 }
3364 else
3365 {
3366 linux_resume_one_lwp (event_child, event_child->stepping,
3367 WSTOPSIG (w), info_p);
3368 }
582511be 3369 return ignore_event (ourstatus);
e471f25b
PA
3370 }
3371
c2d6af84
PA
3372 /* Note that all addresses are always "out of the step range" when
3373 there's no range to begin with. */
3374 in_step_range = lwp_in_step_range (event_child);
3375
3376 /* If GDB wanted this thread to single step, and the thread is out
3377 of the step range, we always want to report the SIGTRAP, and let
3378 GDB handle it. Watchpoints should always be reported. So should
3379 signals we can't explain. A SIGTRAP we can't explain could be a
3380 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3381 do, we're be able to handle GDB breakpoints on top of internal
3382 breakpoints, by handling the internal breakpoint and still
3383 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3384 won't see the breakpoint hit. If we see a single-step event but
3385 the thread should be continuing, don't pass the trap to gdb.
3386 That indicates that we had previously finished a single-step but
3387 left the single-step pending -- see
3388 complete_ongoing_step_over. */
6bf5e0ba 3389 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3390 || (current_thread->last_resume_kind == resume_step
c2d6af84 3391 && !in_step_range)
15c66dd6 3392 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3393 || (!in_step_range
3394 && !bp_explains_trap
3395 && !trace_event
3396 && !step_over_finished
3397 && !(current_thread->last_resume_kind == resume_continue
3398 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3399 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3400 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3401 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
00db26fa 3402 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3403
3404 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3405
3406 /* We found no reason GDB would want us to stop. We either hit one
3407 of our own breakpoints, or finished an internal step GDB
3408 shouldn't know about. */
3409 if (!report_to_gdb)
3410 {
3411 if (debug_threads)
3412 {
3413 if (bp_explains_trap)
87ce2a04 3414 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3415 if (step_over_finished)
87ce2a04 3416 debug_printf ("Step-over finished.\n");
219f2f23 3417 if (trace_event)
87ce2a04 3418 debug_printf ("Tracepoint event.\n");
c2d6af84 3419 if (lwp_in_step_range (event_child))
87ce2a04
DE
3420 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3421 paddress (event_child->stop_pc),
3422 paddress (event_child->step_range_start),
3423 paddress (event_child->step_range_end));
6bf5e0ba
PA
3424 }
3425
3426 /* We're not reporting this breakpoint to GDB, so apply the
3427 decr_pc_after_break adjustment to the inferior's regcache
3428 ourselves. */
3429
3430 if (the_low_target.set_pc != NULL)
3431 {
3432 struct regcache *regcache
0bfdf32f 3433 = get_thread_regcache (current_thread, 1);
6bf5e0ba
PA
3434 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3435 }
3436
7984d532
PA
3437 /* We may have finished stepping over a breakpoint. If so,
3438 we've stopped and suspended all LWPs momentarily except the
3439 stepping one. This is where we resume them all again. We're
3440 going to keep waiting, so use proceed, which handles stepping
3441 over the next breakpoint. */
6bf5e0ba 3442 if (debug_threads)
87ce2a04 3443 debug_printf ("proceeding all threads.\n");
7984d532
PA
3444
3445 if (step_over_finished)
3446 unsuspend_all_lwps (event_child);
3447
6bf5e0ba 3448 proceed_all_lwps ();
582511be 3449 return ignore_event (ourstatus);
6bf5e0ba
PA
3450 }
3451
3452 if (debug_threads)
3453 {
00db26fa 3454 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
ad071a30
PA
3455 {
3456 char *str;
3457
3458 str = target_waitstatus_to_string (&event_child->waitstatus);
3459 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3460 lwpid_of (get_lwp_thread (event_child)), str);
3461 xfree (str);
3462 }
0bfdf32f 3463 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3464 {
3465 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3466 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3467 else if (!lwp_in_step_range (event_child))
87ce2a04 3468 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3469 }
15c66dd6 3470 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3471 debug_printf ("Stopped by watchpoint.\n");
582511be 3472 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3473 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3474 if (debug_threads)
87ce2a04 3475 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3476 }
3477
3478 /* Alright, we're going to report a stop. */
3479
582511be 3480 if (!stabilizing_threads)
6bf5e0ba
PA
3481 {
3482 /* In all-stop, stop all threads. */
582511be
PA
3483 if (!non_stop)
3484 stop_all_lwps (0, NULL);
6bf5e0ba
PA
3485
3486 /* If we're not waiting for a specific LWP, choose an event LWP
3487 from among those that have had events. Giving equal priority
3488 to all LWPs that have had events helps prevent
3489 starvation. */
3490 if (ptid_equal (ptid, minus_one_ptid))
3491 {
3492 event_child->status_pending_p = 1;
3493 event_child->status_pending = w;
3494
3495 select_event_lwp (&event_child);
3496
0bfdf32f
GB
3497 /* current_thread and event_child must stay in sync. */
3498 current_thread = get_lwp_thread (event_child);
ee1e2d4f 3499
6bf5e0ba
PA
3500 event_child->status_pending_p = 0;
3501 w = event_child->status_pending;
3502 }
3503
c03e6ccc 3504 if (step_over_finished)
582511be
PA
3505 {
3506 if (!non_stop)
3507 {
3508 /* If we were doing a step-over, all other threads but
3509 the stepping one had been paused in start_step_over,
3510 with their suspend counts incremented. We don't want
3511 to do a full unstop/unpause, because we're in
3512 all-stop mode (so we want threads stopped), but we
3513 still need to unsuspend the other threads, to
3514 decrement their `suspended' count back. */
3515 unsuspend_all_lwps (event_child);
3516 }
3517 else
3518 {
3519 /* If we just finished a step-over, then all threads had
3520 been momentarily paused. In all-stop, that's fine,
3521 we want threads stopped by now anyway. In non-stop,
3522 we need to re-resume threads that GDB wanted to be
3523 running. */
3524 unstop_all_lwps (1, event_child);
3525 }
3526 }
c03e6ccc 3527
fa593d66 3528 /* Stabilize threads (move out of jump pads). */
582511be
PA
3529 if (!non_stop)
3530 stabilize_threads ();
6bf5e0ba
PA
3531 }
3532 else
3533 {
3534 /* If we just finished a step-over, then all threads had been
3535 momentarily paused. In all-stop, that's fine, we want
3536 threads stopped by now anyway. In non-stop, we need to
3537 re-resume threads that GDB wanted to be running. */
3538 if (step_over_finished)
7984d532 3539 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3540 }
3541
00db26fa 3542 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
de0d863e 3543 {
00db26fa
PA
3544 /* If the reported event is an exit, fork, vfork or exec, let
3545 GDB know. */
3546 *ourstatus = event_child->waitstatus;
de0d863e
DB
3547 /* Clear the event lwp's waitstatus since we handled it already. */
3548 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3549 }
3550 else
3551 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3552
582511be 3553 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3554 it was a software breakpoint, and the client doesn't know we can
3555 adjust the breakpoint ourselves. */
3556 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3557 && !swbreak_feature)
582511be
PA
3558 {
3559 int decr_pc = the_low_target.decr_pc_after_break;
3560
3561 if (decr_pc != 0)
3562 {
3563 struct regcache *regcache
3564 = get_thread_regcache (current_thread, 1);
3565 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3566 }
3567 }
3568
0bfdf32f 3569 if (current_thread->last_resume_kind == resume_stop
8336d594 3570 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3571 {
3572 /* A thread that has been requested to stop by GDB with vCont;t,
3573 and it stopped cleanly, so report as SIG0. The use of
3574 SIGSTOP is an implementation detail. */
a493e3e2 3575 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3576 }
0bfdf32f 3577 else if (current_thread->last_resume_kind == resume_stop
8336d594 3578 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3579 {
3580 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3581 but, it stopped for other reasons. */
2ea28649 3582 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85 3583 }
de0d863e 3584 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
bd99dc85 3585 {
2ea28649 3586 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3587 }
3588
d50171e4
PA
3589 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3590
bd99dc85 3591 if (debug_threads)
87ce2a04
DE
3592 {
3593 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
0bfdf32f 3594 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3595 ourstatus->kind, ourstatus->value.sig);
3596 debug_exit ();
3597 }
bd99dc85 3598
65706a29
PA
3599 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3600 return filter_exit_event (event_child, ourstatus);
3601
0bfdf32f 3602 return ptid_of (current_thread);
bd99dc85
PA
3603}
3604
3605/* Get rid of any pending event in the pipe. */
3606static void
3607async_file_flush (void)
3608{
3609 int ret;
3610 char buf;
3611
3612 do
3613 ret = read (linux_event_pipe[0], &buf, 1);
3614 while (ret >= 0 || (ret == -1 && errno == EINTR));
3615}
3616
3617/* Put something in the pipe, so the event loop wakes up. */
3618static void
3619async_file_mark (void)
3620{
3621 int ret;
3622
3623 async_file_flush ();
3624
3625 do
3626 ret = write (linux_event_pipe[1], "+", 1);
3627 while (ret == 0 || (ret == -1 && errno == EINTR));
3628
3629 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3630 be awakened anyway. */
3631}
3632
95954743
PA
3633static ptid_t
3634linux_wait (ptid_t ptid,
3635 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 3636{
95954743 3637 ptid_t event_ptid;
bd99dc85 3638
bd99dc85
PA
3639 /* Flush the async file first. */
3640 if (target_is_async_p ())
3641 async_file_flush ();
3642
582511be
PA
3643 do
3644 {
3645 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3646 }
3647 while ((target_options & TARGET_WNOHANG) == 0
3648 && ptid_equal (event_ptid, null_ptid)
3649 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3650
3651 /* If at least one stop was reported, there may be more. A single
3652 SIGCHLD can signal more than one child stop. */
3653 if (target_is_async_p ()
3654 && (target_options & TARGET_WNOHANG) != 0
95954743 3655 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
3656 async_file_mark ();
3657
3658 return event_ptid;
da6d8c04
DJ
3659}
3660
c5f62d5f 3661/* Send a signal to an LWP. */
fd500816
DJ
3662
3663static int
a1928bad 3664kill_lwp (unsigned long lwpid, int signo)
fd500816 3665{
c5f62d5f
DE
3666 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3667 fails, then we are not using nptl threads and we should be using kill. */
fd500816 3668
c5f62d5f
DE
3669#ifdef __NR_tkill
3670 {
3671 static int tkill_failed;
fd500816 3672
c5f62d5f
DE
3673 if (!tkill_failed)
3674 {
3675 int ret;
3676
3677 errno = 0;
3678 ret = syscall (__NR_tkill, lwpid, signo);
3679 if (errno != ENOSYS)
3680 return ret;
3681 tkill_failed = 1;
3682 }
3683 }
fd500816
DJ
3684#endif
3685
3686 return kill (lwpid, signo);
3687}
3688
964e4306
PA
3689void
3690linux_stop_lwp (struct lwp_info *lwp)
3691{
3692 send_sigstop (lwp);
3693}
3694
0d62e5e8 3695static void
02fc4de7 3696send_sigstop (struct lwp_info *lwp)
0d62e5e8 3697{
bd99dc85 3698 int pid;
0d62e5e8 3699
d86d4aaf 3700 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3701
0d62e5e8
DJ
3702 /* If we already have a pending stop signal for this process, don't
3703 send another. */
54a0b537 3704 if (lwp->stop_expected)
0d62e5e8 3705 {
ae13219e 3706 if (debug_threads)
87ce2a04 3707 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3708
0d62e5e8
DJ
3709 return;
3710 }
3711
3712 if (debug_threads)
87ce2a04 3713 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3714
d50171e4 3715 lwp->stop_expected = 1;
bd99dc85 3716 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3717}
3718
7984d532
PA
3719static int
3720send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7 3721{
d86d4aaf
DE
3722 struct thread_info *thread = (struct thread_info *) entry;
3723 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3724
7984d532
PA
3725 /* Ignore EXCEPT. */
3726 if (lwp == except)
3727 return 0;
3728
02fc4de7 3729 if (lwp->stopped)
7984d532 3730 return 0;
02fc4de7
PA
3731
3732 send_sigstop (lwp);
7984d532
PA
3733 return 0;
3734}
3735
3736/* Increment the suspend count of an LWP, and stop it, if not stopped
3737 yet. */
3738static int
3739suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3740 void *except)
3741{
d86d4aaf
DE
3742 struct thread_info *thread = (struct thread_info *) entry;
3743 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3744
3745 /* Ignore EXCEPT. */
3746 if (lwp == except)
3747 return 0;
3748
863d01bd 3749 lwp_suspended_inc (lwp);
7984d532
PA
3750
3751 return send_sigstop_callback (entry, except);
02fc4de7
PA
3752}
3753
95954743
PA
3754static void
3755mark_lwp_dead (struct lwp_info *lwp, int wstat)
3756{
95954743
PA
3757 /* Store the exit status for later. */
3758 lwp->status_pending_p = 1;
3759 lwp->status_pending = wstat;
3760
00db26fa
PA
3761 /* Store in waitstatus as well, as there's nothing else to process
3762 for this event. */
3763 if (WIFEXITED (wstat))
3764 {
3765 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3766 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3767 }
3768 else if (WIFSIGNALED (wstat))
3769 {
3770 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3771 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3772 }
3773
95954743
PA
3774 /* Prevent trying to stop it. */
3775 lwp->stopped = 1;
3776
3777 /* No further stops are expected from a dead lwp. */
3778 lwp->stop_expected = 0;
3779}
3780
00db26fa
PA
3781/* Return true if LWP has exited already, and has a pending exit event
3782 to report to GDB. */
3783
3784static int
3785lwp_is_marked_dead (struct lwp_info *lwp)
3786{
3787 return (lwp->status_pending_p
3788 && (WIFEXITED (lwp->status_pending)
3789 || WIFSIGNALED (lwp->status_pending)));
3790}
3791
fa96cb38
PA
3792/* Wait for all children to stop for the SIGSTOPs we just queued. */
3793
0d62e5e8 3794static void
fa96cb38 3795wait_for_sigstop (void)
0d62e5e8 3796{
0bfdf32f 3797 struct thread_info *saved_thread;
95954743 3798 ptid_t saved_tid;
fa96cb38
PA
3799 int wstat;
3800 int ret;
0d62e5e8 3801
0bfdf32f
GB
3802 saved_thread = current_thread;
3803 if (saved_thread != NULL)
3804 saved_tid = saved_thread->entry.id;
bd99dc85 3805 else
95954743 3806 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3807
d50171e4 3808 if (debug_threads)
fa96cb38 3809 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3810
fa96cb38
PA
3811 /* Passing NULL_PTID as filter indicates we want all events to be
3812 left pending. Eventually this returns when there are no
3813 unwaited-for children left. */
3814 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3815 &wstat, __WALL);
3816 gdb_assert (ret == -1);
0d62e5e8 3817
0bfdf32f
GB
3818 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3819 current_thread = saved_thread;
0d62e5e8
DJ
3820 else
3821 {
3822 if (debug_threads)
87ce2a04 3823 debug_printf ("Previously current thread died.\n");
0d62e5e8 3824
f0db101d
PA
3825 /* We can't change the current inferior behind GDB's back,
3826 otherwise, a subsequent command may apply to the wrong
3827 process. */
3828 current_thread = NULL;
0d62e5e8
DJ
3829 }
3830}
3831
fa593d66
PA
3832/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3833 move it out, because we need to report the stop event to GDB. For
3834 example, if the user puts a breakpoint in the jump pad, it's
3835 because she wants to debug it. */
3836
3837static int
3838stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3839{
d86d4aaf
DE
3840 struct thread_info *thread = (struct thread_info *) entry;
3841 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3842
863d01bd
PA
3843 if (lwp->suspended != 0)
3844 {
3845 internal_error (__FILE__, __LINE__,
3846 "LWP %ld is suspended, suspended=%d\n",
3847 lwpid_of (thread), lwp->suspended);
3848 }
fa593d66
PA
3849 gdb_assert (lwp->stopped);
3850
3851 /* Allow debugging the jump pad, gdb_collect, etc.. */
3852 return (supports_fast_tracepoints ()
58b4daa5 3853 && agent_loaded_p ()
fa593d66 3854 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3855 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3856 || thread->last_resume_kind == resume_step)
3857 && linux_fast_tracepoint_collecting (lwp, NULL));
3858}
3859
3860static void
3861move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3862{
d86d4aaf 3863 struct thread_info *thread = (struct thread_info *) entry;
f0ce0d3a 3864 struct thread_info *saved_thread;
d86d4aaf 3865 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3866 int *wstat;
3867
863d01bd
PA
3868 if (lwp->suspended != 0)
3869 {
3870 internal_error (__FILE__, __LINE__,
3871 "LWP %ld is suspended, suspended=%d\n",
3872 lwpid_of (thread), lwp->suspended);
3873 }
fa593d66
PA
3874 gdb_assert (lwp->stopped);
3875
f0ce0d3a
PA
3876 /* For gdb_breakpoint_here. */
3877 saved_thread = current_thread;
3878 current_thread = thread;
3879
fa593d66
PA
3880 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3881
3882 /* Allow debugging the jump pad, gdb_collect, etc. */
3883 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3884 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3885 && thread->last_resume_kind != resume_step
3886 && maybe_move_out_of_jump_pad (lwp, wstat))
3887 {
3888 if (debug_threads)
87ce2a04 3889 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 3890 lwpid_of (thread));
fa593d66
PA
3891
3892 if (wstat)
3893 {
3894 lwp->status_pending_p = 0;
3895 enqueue_one_deferred_signal (lwp, wstat);
3896
3897 if (debug_threads)
87ce2a04
DE
3898 debug_printf ("Signal %d for LWP %ld deferred "
3899 "(in jump pad)\n",
d86d4aaf 3900 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3901 }
3902
3903 linux_resume_one_lwp (lwp, 0, 0, NULL);
3904 }
3905 else
863d01bd 3906 lwp_suspended_inc (lwp);
f0ce0d3a
PA
3907
3908 current_thread = saved_thread;
fa593d66
PA
3909}
3910
3911static int
3912lwp_running (struct inferior_list_entry *entry, void *data)
3913{
d86d4aaf
DE
3914 struct thread_info *thread = (struct thread_info *) entry;
3915 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3916
00db26fa 3917 if (lwp_is_marked_dead (lwp))
fa593d66
PA
3918 return 0;
3919 if (lwp->stopped)
3920 return 0;
3921 return 1;
3922}
3923
7984d532
PA
3924/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3925 If SUSPEND, then also increase the suspend count of every LWP,
3926 except EXCEPT. */
3927
0d62e5e8 3928static void
7984d532 3929stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8 3930{
bde24c0a
PA
3931 /* Should not be called recursively. */
3932 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3933
87ce2a04
DE
3934 if (debug_threads)
3935 {
3936 debug_enter ();
3937 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3938 suspend ? "stop-and-suspend" : "stop",
3939 except != NULL
d86d4aaf 3940 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
3941 : "none");
3942 }
3943
bde24c0a
PA
3944 stopping_threads = (suspend
3945 ? STOPPING_AND_SUSPENDING_THREADS
3946 : STOPPING_THREADS);
7984d532
PA
3947
3948 if (suspend)
d86d4aaf 3949 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
7984d532 3950 else
d86d4aaf 3951 find_inferior (&all_threads, send_sigstop_callback, except);
fa96cb38 3952 wait_for_sigstop ();
bde24c0a 3953 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
3954
3955 if (debug_threads)
3956 {
3957 debug_printf ("stop_all_lwps done, setting stopping_threads "
3958 "back to !stopping\n");
3959 debug_exit ();
3960 }
0d62e5e8
DJ
3961}
3962
863d01bd
PA
3963/* Enqueue one signal in the chain of signals which need to be
3964 delivered to this process on next resume. */
3965
3966static void
3967enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3968{
8d749320 3969 struct pending_signals *p_sig = XNEW (struct pending_signals);
863d01bd 3970
863d01bd
PA
3971 p_sig->prev = lwp->pending_signals;
3972 p_sig->signal = signal;
3973 if (info == NULL)
3974 memset (&p_sig->info, 0, sizeof (siginfo_t));
3975 else
3976 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3977 lwp->pending_signals = p_sig;
3978}
3979
23f238d3
PA
3980/* Resume execution of LWP. If STEP is nonzero, single-step it. If
3981 SIGNAL is nonzero, give it that signal. */
da6d8c04 3982
ce3a066d 3983static void
23f238d3
PA
3984linux_resume_one_lwp_throw (struct lwp_info *lwp,
3985 int step, int signal, siginfo_t *info)
da6d8c04 3986{
d86d4aaf 3987 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 3988 struct thread_info *saved_thread;
fa593d66 3989 int fast_tp_collecting;
c06cbd92
YQ
3990 struct process_info *proc = get_thread_process (thread);
3991
3992 /* Note that target description may not be initialised
3993 (proc->tdesc == NULL) at this point because the program hasn't
3994 stopped at the first instruction yet. It means GDBserver skips
3995 the extra traps from the wrapper program (see option --wrapper).
3996 Code in this function that requires register access should be
3997 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 3998
54a0b537 3999 if (lwp->stopped == 0)
0d62e5e8
DJ
4000 return;
4001
65706a29
PA
4002 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4003
fa593d66
PA
4004 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4005
4006 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4007
219f2f23
PA
4008 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4009 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4010 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4011 {
4012 /* Collecting 'while-stepping' actions doesn't make sense
4013 anymore. */
d86d4aaf 4014 release_while_stepping_state_list (thread);
219f2f23
PA
4015 }
4016
0d62e5e8
DJ
4017 /* If we have pending signals or status, and a new signal, enqueue the
4018 signal. Also enqueue the signal if we are waiting to reinsert a
4019 breakpoint; it will be picked up again below. */
4020 if (signal != 0
fa593d66
PA
4021 && (lwp->status_pending_p
4022 || lwp->pending_signals != NULL
4023 || lwp->bp_reinsert != 0
4024 || fast_tp_collecting))
0d62e5e8 4025 {
8d749320
SM
4026 struct pending_signals *p_sig = XNEW (struct pending_signals);
4027
54a0b537 4028 p_sig->prev = lwp->pending_signals;
0d62e5e8 4029 p_sig->signal = signal;
32ca6d61
DJ
4030 if (info == NULL)
4031 memset (&p_sig->info, 0, sizeof (siginfo_t));
4032 else
4033 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 4034 lwp->pending_signals = p_sig;
0d62e5e8
DJ
4035 }
4036
d50171e4
PA
4037 if (lwp->status_pending_p)
4038 {
4039 if (debug_threads)
87ce2a04
DE
4040 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
4041 " has pending status\n",
d86d4aaf 4042 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 4043 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4044 return;
4045 }
0d62e5e8 4046
0bfdf32f
GB
4047 saved_thread = current_thread;
4048 current_thread = thread;
0d62e5e8
DJ
4049
4050 if (debug_threads)
87ce2a04 4051 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
d86d4aaf 4052 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 4053 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
4054
4055 /* This bit needs some thinking about. If we get a signal that
4056 we must report while a single-step reinsert is still pending,
4057 we often end up resuming the thread. It might be better to
4058 (ew) allow a stack of pending events; then we could be sure that
4059 the reinsert happened right away and not lose any signals.
4060
4061 Making this stack would also shrink the window in which breakpoints are
54a0b537 4062 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4063 complete correctness, so it won't solve that problem. It may be
4064 worthwhile just to solve this one, however. */
54a0b537 4065 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
4066 {
4067 if (debug_threads)
87ce2a04
DE
4068 debug_printf (" pending reinsert at 0x%s\n",
4069 paddress (lwp->bp_reinsert));
d50171e4 4070
85e00e85 4071 if (can_hardware_single_step ())
d50171e4 4072 {
fa593d66
PA
4073 if (fast_tp_collecting == 0)
4074 {
4075 if (step == 0)
4076 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4077 if (lwp->suspended)
4078 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4079 lwp->suspended);
4080 }
d50171e4
PA
4081
4082 step = 1;
4083 }
0d62e5e8
DJ
4084
4085 /* Postpone any pending signal. It was enqueued above. */
4086 signal = 0;
4087 }
4088
fa593d66
PA
4089 if (fast_tp_collecting == 1)
4090 {
4091 if (debug_threads)
87ce2a04
DE
4092 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4093 " (exit-jump-pad-bkpt)\n",
d86d4aaf 4094 lwpid_of (thread));
fa593d66
PA
4095
4096 /* Postpone any pending signal. It was enqueued above. */
4097 signal = 0;
4098 }
4099 else if (fast_tp_collecting == 2)
4100 {
4101 if (debug_threads)
87ce2a04
DE
4102 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4103 " single-stepping\n",
d86d4aaf 4104 lwpid_of (thread));
fa593d66
PA
4105
4106 if (can_hardware_single_step ())
4107 step = 1;
4108 else
38e08fca
GB
4109 {
4110 internal_error (__FILE__, __LINE__,
4111 "moving out of jump pad single-stepping"
4112 " not implemented on this target");
4113 }
fa593d66
PA
4114
4115 /* Postpone any pending signal. It was enqueued above. */
4116 signal = 0;
4117 }
4118
219f2f23
PA
4119 /* If we have while-stepping actions in this thread set it stepping.
4120 If we have a signal to deliver, it may or may not be set to
4121 SIG_IGN, we don't know. Assume so, and allow collecting
4122 while-stepping into a signal handler. A possible smart thing to
4123 do would be to set an internal breakpoint at the signal return
4124 address, continue, and carry on catching this while-stepping
4125 action only when that breakpoint is hit. A future
4126 enhancement. */
d86d4aaf 4127 if (thread->while_stepping != NULL
219f2f23
PA
4128 && can_hardware_single_step ())
4129 {
4130 if (debug_threads)
87ce2a04 4131 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 4132 lwpid_of (thread));
219f2f23
PA
4133 step = 1;
4134 }
4135
c06cbd92 4136 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
0d62e5e8 4137 {
0bfdf32f 4138 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be
PA
4139
4140 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4141
4142 if (debug_threads)
4143 {
4144 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4145 (long) lwp->stop_pc);
4146 }
0d62e5e8
DJ
4147 }
4148
fa593d66
PA
4149 /* If we have pending signals, consume one unless we are trying to
4150 reinsert a breakpoint or we're trying to finish a fast tracepoint
4151 collect. */
4152 if (lwp->pending_signals != NULL
4153 && lwp->bp_reinsert == 0
4154 && fast_tp_collecting == 0)
0d62e5e8
DJ
4155 {
4156 struct pending_signals **p_sig;
4157
54a0b537 4158 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
4159 while ((*p_sig)->prev != NULL)
4160 p_sig = &(*p_sig)->prev;
4161
4162 signal = (*p_sig)->signal;
32ca6d61 4163 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 4164 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4165 &(*p_sig)->info);
32ca6d61 4166
0d62e5e8
DJ
4167 free (*p_sig);
4168 *p_sig = NULL;
4169 }
4170
aa5ca48f
DE
4171 if (the_low_target.prepare_to_resume != NULL)
4172 the_low_target.prepare_to_resume (lwp);
4173
d86d4aaf 4174 regcache_invalidate_thread (thread);
da6d8c04 4175 errno = 0;
54a0b537 4176 lwp->stepping = step;
d86d4aaf 4177 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
b8e1b30e 4178 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4179 /* Coerce to a uintptr_t first to avoid potential gcc warning
4180 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4181 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4182
0bfdf32f 4183 current_thread = saved_thread;
da6d8c04 4184 if (errno)
23f238d3
PA
4185 perror_with_name ("resuming thread");
4186
4187 /* Successfully resumed. Clear state that no longer makes sense,
4188 and mark the LWP as running. Must not do this before resuming
4189 otherwise if that fails other code will be confused. E.g., we'd
4190 later try to stop the LWP and hang forever waiting for a stop
4191 status. Note that we must not throw after this is cleared,
4192 otherwise handle_zombie_lwp_error would get confused. */
4193 lwp->stopped = 0;
4194 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4195}
4196
4197/* Called when we try to resume a stopped LWP and that errors out. If
4198 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4199 or about to become), discard the error, clear any pending status
4200 the LWP may have, and return true (we'll collect the exit status
4201 soon enough). Otherwise, return false. */
4202
4203static int
4204check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4205{
4206 struct thread_info *thread = get_lwp_thread (lp);
4207
4208 /* If we get an error after resuming the LWP successfully, we'd
4209 confuse !T state for the LWP being gone. */
4210 gdb_assert (lp->stopped);
4211
4212 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4213 because even if ptrace failed with ESRCH, the tracee may be "not
4214 yet fully dead", but already refusing ptrace requests. In that
4215 case the tracee has 'R (Running)' state for a little bit
4216 (observed in Linux 3.18). See also the note on ESRCH in the
4217 ptrace(2) man page. Instead, check whether the LWP has any state
4218 other than ptrace-stopped. */
4219
4220 /* Don't assume anything if /proc/PID/status can't be read. */
4221 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4222 {
23f238d3
PA
4223 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4224 lp->status_pending_p = 0;
4225 return 1;
4226 }
4227 return 0;
4228}
4229
4230/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4231 disappears while we try to resume it. */
3221518c 4232
23f238d3
PA
4233static void
4234linux_resume_one_lwp (struct lwp_info *lwp,
4235 int step, int signal, siginfo_t *info)
4236{
4237 TRY
4238 {
4239 linux_resume_one_lwp_throw (lwp, step, signal, info);
4240 }
4241 CATCH (ex, RETURN_MASK_ERROR)
4242 {
4243 if (!check_ptrace_stopped_lwp_gone (lwp))
4244 throw_exception (ex);
3221518c 4245 }
23f238d3 4246 END_CATCH
da6d8c04
DJ
4247}
4248
2bd7c093
PA
4249struct thread_resume_array
4250{
4251 struct thread_resume *resume;
4252 size_t n;
4253};
64386c31 4254
ebcf782c
DE
4255/* This function is called once per thread via find_inferior.
4256 ARG is a pointer to a thread_resume_array struct.
4257 We look up the thread specified by ENTRY in ARG, and mark the thread
4258 with a pointer to the appropriate resume request.
5544ad89
DJ
4259
4260 This algorithm is O(threads * resume elements), but resume elements
4261 is small (and will remain small at least until GDB supports thread
4262 suspension). */
ebcf782c 4263
2bd7c093
PA
4264static int
4265linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 4266{
d86d4aaf
DE
4267 struct thread_info *thread = (struct thread_info *) entry;
4268 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4269 int ndx;
2bd7c093 4270 struct thread_resume_array *r;
64386c31 4271
9a3c8263 4272 r = (struct thread_resume_array *) arg;
64386c31 4273
2bd7c093 4274 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
4275 {
4276 ptid_t ptid = r->resume[ndx].thread;
4277 if (ptid_equal (ptid, minus_one_ptid)
4278 || ptid_equal (ptid, entry->id)
0c9070b3
YQ
4279 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4280 of PID'. */
d86d4aaf 4281 || (ptid_get_pid (ptid) == pid_of (thread)
0c9070b3
YQ
4282 && (ptid_is_pid (ptid)
4283 || ptid_get_lwp (ptid) == -1)))
95954743 4284 {
d50171e4 4285 if (r->resume[ndx].kind == resume_stop
8336d594 4286 && thread->last_resume_kind == resume_stop)
d50171e4
PA
4287 {
4288 if (debug_threads)
87ce2a04
DE
4289 debug_printf ("already %s LWP %ld at GDB's request\n",
4290 (thread->last_status.kind
4291 == TARGET_WAITKIND_STOPPED)
4292 ? "stopped"
4293 : "stopping",
d86d4aaf 4294 lwpid_of (thread));
d50171e4
PA
4295
4296 continue;
4297 }
4298
95954743 4299 lwp->resume = &r->resume[ndx];
8336d594 4300 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4301
c2d6af84
PA
4302 lwp->step_range_start = lwp->resume->step_range_start;
4303 lwp->step_range_end = lwp->resume->step_range_end;
4304
fa593d66
PA
4305 /* If we had a deferred signal to report, dequeue one now.
4306 This can happen if LWP gets more than one signal while
4307 trying to get out of a jump pad. */
4308 if (lwp->stopped
4309 && !lwp->status_pending_p
4310 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4311 {
4312 lwp->status_pending_p = 1;
4313
4314 if (debug_threads)
87ce2a04
DE
4315 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4316 "leaving status pending.\n",
d86d4aaf
DE
4317 WSTOPSIG (lwp->status_pending),
4318 lwpid_of (thread));
fa593d66
PA
4319 }
4320
95954743
PA
4321 return 0;
4322 }
4323 }
2bd7c093
PA
4324
4325 /* No resume action for this thread. */
4326 lwp->resume = NULL;
64386c31 4327
2bd7c093 4328 return 0;
5544ad89
DJ
4329}
4330
20ad9378
DE
4331/* find_inferior callback for linux_resume.
4332 Set *FLAG_P if this lwp has an interesting status pending. */
5544ad89 4333
bd99dc85
PA
4334static int
4335resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 4336{
d86d4aaf
DE
4337 struct thread_info *thread = (struct thread_info *) entry;
4338 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4339
bd99dc85
PA
4340 /* LWPs which will not be resumed are not interesting, because
4341 we might not wait for them next time through linux_wait. */
2bd7c093 4342 if (lwp->resume == NULL)
bd99dc85 4343 return 0;
64386c31 4344
582511be 4345 if (thread_still_has_status_pending_p (thread))
d50171e4
PA
4346 * (int *) flag_p = 1;
4347
4348 return 0;
4349}
4350
4351/* Return 1 if this lwp that GDB wants running is stopped at an
4352 internal breakpoint that we need to step over. It assumes that any
4353 required STOP_PC adjustment has already been propagated to the
4354 inferior's regcache. */
4355
4356static int
4357need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4358{
d86d4aaf
DE
4359 struct thread_info *thread = (struct thread_info *) entry;
4360 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 4361 struct thread_info *saved_thread;
d50171e4 4362 CORE_ADDR pc;
c06cbd92
YQ
4363 struct process_info *proc = get_thread_process (thread);
4364
4365 /* GDBserver is skipping the extra traps from the wrapper program,
4366 don't have to do step over. */
4367 if (proc->tdesc == NULL)
4368 return 0;
d50171e4
PA
4369
4370 /* LWPs which will not be resumed are not interesting, because we
4371 might not wait for them next time through linux_wait. */
4372
4373 if (!lwp->stopped)
4374 {
4375 if (debug_threads)
87ce2a04 4376 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 4377 lwpid_of (thread));
d50171e4
PA
4378 return 0;
4379 }
4380
8336d594 4381 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
4382 {
4383 if (debug_threads)
87ce2a04
DE
4384 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4385 " stopped\n",
d86d4aaf 4386 lwpid_of (thread));
d50171e4
PA
4387 return 0;
4388 }
4389
7984d532
PA
4390 gdb_assert (lwp->suspended >= 0);
4391
4392 if (lwp->suspended)
4393 {
4394 if (debug_threads)
87ce2a04 4395 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 4396 lwpid_of (thread));
7984d532
PA
4397 return 0;
4398 }
4399
d50171e4
PA
4400 if (!lwp->need_step_over)
4401 {
4402 if (debug_threads)
d86d4aaf 4403 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
d50171e4 4404 }
5544ad89 4405
bd99dc85 4406 if (lwp->status_pending_p)
d50171e4
PA
4407 {
4408 if (debug_threads)
87ce2a04
DE
4409 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4410 " status.\n",
d86d4aaf 4411 lwpid_of (thread));
d50171e4
PA
4412 return 0;
4413 }
4414
4415 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4416 or we have. */
4417 pc = get_pc (lwp);
4418
4419 /* If the PC has changed since we stopped, then don't do anything,
4420 and let the breakpoint/tracepoint be hit. This happens if, for
4421 instance, GDB handled the decr_pc_after_break subtraction itself,
4422 GDB is OOL stepping this thread, or the user has issued a "jump"
4423 command, or poked thread's registers herself. */
4424 if (pc != lwp->stop_pc)
4425 {
4426 if (debug_threads)
87ce2a04
DE
4427 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4428 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
4429 lwpid_of (thread),
4430 paddress (lwp->stop_pc), paddress (pc));
d50171e4
PA
4431
4432 lwp->need_step_over = 0;
4433 return 0;
4434 }
4435
0bfdf32f
GB
4436 saved_thread = current_thread;
4437 current_thread = thread;
d50171e4 4438
8b07ae33 4439 /* We can only step over breakpoints we know about. */
fa593d66 4440 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4441 {
8b07ae33 4442 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4443 though. If the condition is being evaluated on the target's side
4444 and it evaluate to false, step over this breakpoint as well. */
4445 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4446 && gdb_condition_true_at_breakpoint (pc)
4447 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
4448 {
4449 if (debug_threads)
87ce2a04
DE
4450 debug_printf ("Need step over [LWP %ld]? yes, but found"
4451 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 4452 lwpid_of (thread), paddress (pc));
d50171e4 4453
0bfdf32f 4454 current_thread = saved_thread;
8b07ae33
PA
4455 return 0;
4456 }
4457 else
4458 {
4459 if (debug_threads)
87ce2a04
DE
4460 debug_printf ("Need step over [LWP %ld]? yes, "
4461 "found breakpoint at 0x%s\n",
d86d4aaf 4462 lwpid_of (thread), paddress (pc));
d50171e4 4463
8b07ae33
PA
4464 /* We've found an lwp that needs stepping over --- return 1 so
4465 that find_inferior stops looking. */
0bfdf32f 4466 current_thread = saved_thread;
8b07ae33
PA
4467
4468 /* If the step over is cancelled, this is set again. */
4469 lwp->need_step_over = 0;
4470 return 1;
4471 }
d50171e4
PA
4472 }
4473
0bfdf32f 4474 current_thread = saved_thread;
d50171e4
PA
4475
4476 if (debug_threads)
87ce2a04
DE
4477 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4478 " at 0x%s\n",
d86d4aaf 4479 lwpid_of (thread), paddress (pc));
c6ecbae5 4480
bd99dc85 4481 return 0;
5544ad89
DJ
4482}
4483
d50171e4
PA
4484/* Start a step-over operation on LWP. When LWP stopped at a
4485 breakpoint, to make progress, we need to remove the breakpoint out
4486 of the way. If we let other threads run while we do that, they may
4487 pass by the breakpoint location and miss hitting it. To avoid
4488 that, a step-over momentarily stops all threads while LWP is
4489 single-stepped while the breakpoint is temporarily uninserted from
4490 the inferior. When the single-step finishes, we reinsert the
4491 breakpoint, and let all threads that are supposed to be running,
4492 run again.
4493
4494 On targets that don't support hardware single-step, we don't
4495 currently support full software single-stepping. Instead, we only
4496 support stepping over the thread event breakpoint, by asking the
4497 low target where to place a reinsert breakpoint. Since this
4498 routine assumes the breakpoint being stepped over is a thread event
4499 breakpoint, it usually assumes the return address of the current
4500 function is a good enough place to set the reinsert breakpoint. */
4501
4502static int
4503start_step_over (struct lwp_info *lwp)
4504{
d86d4aaf 4505 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4506 struct thread_info *saved_thread;
d50171e4
PA
4507 CORE_ADDR pc;
4508 int step;
4509
4510 if (debug_threads)
87ce2a04 4511 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 4512 lwpid_of (thread));
d50171e4 4513
7984d532 4514 stop_all_lwps (1, lwp);
863d01bd
PA
4515
4516 if (lwp->suspended != 0)
4517 {
4518 internal_error (__FILE__, __LINE__,
4519 "LWP %ld suspended=%d\n", lwpid_of (thread),
4520 lwp->suspended);
4521 }
d50171e4
PA
4522
4523 if (debug_threads)
87ce2a04 4524 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
4525
4526 /* Note, we should always reach here with an already adjusted PC,
4527 either by GDB (if we're resuming due to GDB's request), or by our
4528 caller, if we just finished handling an internal breakpoint GDB
4529 shouldn't care about. */
4530 pc = get_pc (lwp);
4531
0bfdf32f
GB
4532 saved_thread = current_thread;
4533 current_thread = thread;
d50171e4
PA
4534
4535 lwp->bp_reinsert = pc;
4536 uninsert_breakpoints_at (pc);
fa593d66 4537 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
4538
4539 if (can_hardware_single_step ())
4540 {
4541 step = 1;
4542 }
7d00775e 4543 else if (can_software_single_step ())
d50171e4
PA
4544 {
4545 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4546 set_reinsert_breakpoint (raddr);
4547 step = 0;
4548 }
7d00775e
AT
4549 else
4550 {
4551 internal_error (__FILE__, __LINE__,
4552 "stepping is not implemented on this target");
4553 }
d50171e4 4554
0bfdf32f 4555 current_thread = saved_thread;
d50171e4
PA
4556
4557 linux_resume_one_lwp (lwp, step, 0, NULL);
4558
4559 /* Require next event from this LWP. */
d86d4aaf 4560 step_over_bkpt = thread->entry.id;
d50171e4
PA
4561 return 1;
4562}
4563
4564/* Finish a step-over. Reinsert the breakpoint we had uninserted in
4565 start_step_over, if still there, and delete any reinsert
4566 breakpoints we've set, on non hardware single-step targets. */
4567
4568static int
4569finish_step_over (struct lwp_info *lwp)
4570{
4571 if (lwp->bp_reinsert != 0)
4572 {
4573 if (debug_threads)
87ce2a04 4574 debug_printf ("Finished step over.\n");
d50171e4
PA
4575
4576 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4577 may be no breakpoint to reinsert there by now. */
4578 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4579 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4580
4581 lwp->bp_reinsert = 0;
4582
4583 /* Delete any software-single-step reinsert breakpoints. No
4584 longer needed. We don't have to worry about other threads
4585 hitting this trap, and later not being able to explain it,
4586 because we were stepping over a breakpoint, and we hold all
4587 threads but LWP stopped while doing that. */
4588 if (!can_hardware_single_step ())
4589 delete_reinsert_breakpoints ();
4590
4591 step_over_bkpt = null_ptid;
4592 return 1;
4593 }
4594 else
4595 return 0;
4596}
4597
863d01bd
PA
4598/* If there's a step over in progress, wait until all threads stop
4599 (that is, until the stepping thread finishes its step), and
4600 unsuspend all lwps. The stepping thread ends with its status
4601 pending, which is processed later when we get back to processing
4602 events. */
4603
4604static void
4605complete_ongoing_step_over (void)
4606{
4607 if (!ptid_equal (step_over_bkpt, null_ptid))
4608 {
4609 struct lwp_info *lwp;
4610 int wstat;
4611 int ret;
4612
4613 if (debug_threads)
4614 debug_printf ("detach: step over in progress, finish it first\n");
4615
4616 /* Passing NULL_PTID as filter indicates we want all events to
4617 be left pending. Eventually this returns when there are no
4618 unwaited-for children left. */
4619 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4620 &wstat, __WALL);
4621 gdb_assert (ret == -1);
4622
4623 lwp = find_lwp_pid (step_over_bkpt);
4624 if (lwp != NULL)
4625 finish_step_over (lwp);
4626 step_over_bkpt = null_ptid;
4627 unsuspend_all_lwps (lwp);
4628 }
4629}
4630
5544ad89
DJ
4631/* This function is called once per thread. We check the thread's resume
4632 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 4633 stopped; and what signal, if any, it should be sent.
5544ad89 4634
bd99dc85
PA
4635 For threads which we aren't explicitly told otherwise, we preserve
4636 the stepping flag; this is used for stepping over gdbserver-placed
4637 breakpoints.
4638
4639 If pending_flags was set in any thread, we queue any needed
4640 signals, since we won't actually resume. We already have a pending
4641 event to report, so we don't need to preserve any step requests;
4642 they should be re-issued if necessary. */
4643
4644static int
4645linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 4646{
d86d4aaf
DE
4647 struct thread_info *thread = (struct thread_info *) entry;
4648 struct lwp_info *lwp = get_thread_lwp (thread);
bd99dc85 4649 int step;
d50171e4
PA
4650 int leave_all_stopped = * (int *) arg;
4651 int leave_pending;
5544ad89 4652
2bd7c093 4653 if (lwp->resume == NULL)
bd99dc85 4654 return 0;
5544ad89 4655
bd99dc85 4656 if (lwp->resume->kind == resume_stop)
5544ad89 4657 {
bd99dc85 4658 if (debug_threads)
d86d4aaf 4659 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4660
4661 if (!lwp->stopped)
4662 {
4663 if (debug_threads)
d86d4aaf 4664 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4665
d50171e4
PA
4666 /* Stop the thread, and wait for the event asynchronously,
4667 through the event loop. */
02fc4de7 4668 send_sigstop (lwp);
bd99dc85
PA
4669 }
4670 else
4671 {
4672 if (debug_threads)
87ce2a04 4673 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4674 lwpid_of (thread));
d50171e4
PA
4675
4676 /* The LWP may have been stopped in an internal event that
4677 was not meant to be notified back to GDB (e.g., gdbserver
4678 breakpoint), so we should be reporting a stop event in
4679 this case too. */
4680
4681 /* If the thread already has a pending SIGSTOP, this is a
4682 no-op. Otherwise, something later will presumably resume
4683 the thread and this will cause it to cancel any pending
4684 operation, due to last_resume_kind == resume_stop. If
4685 the thread already has a pending status to report, we
4686 will still report it the next time we wait - see
4687 status_pending_p_callback. */
1a981360
PA
4688
4689 /* If we already have a pending signal to report, then
4690 there's no need to queue a SIGSTOP, as this means we're
4691 midway through moving the LWP out of the jumppad, and we
4692 will report the pending signal as soon as that is
4693 finished. */
4694 if (lwp->pending_signals_to_report == NULL)
4695 send_sigstop (lwp);
bd99dc85 4696 }
32ca6d61 4697
bd99dc85
PA
4698 /* For stop requests, we're done. */
4699 lwp->resume = NULL;
fc7238bb 4700 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4701 return 0;
5544ad89
DJ
4702 }
4703
bd99dc85 4704 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4705 then don't resume it - we can just report the pending status.
4706 Likewise if it is suspended, because e.g., another thread is
4707 stepping past a breakpoint. Make sure to queue any signals that
4708 would otherwise be sent. In all-stop mode, we do this decision
4709 based on if *any* thread has a pending status. If there's a
4710 thread that needs the step-over-breakpoint dance, then don't
4711 resume any other thread but that particular one. */
4712 leave_pending = (lwp->suspended
4713 || lwp->status_pending_p
4714 || leave_all_stopped);
5544ad89 4715
d50171e4 4716 if (!leave_pending)
bd99dc85
PA
4717 {
4718 if (debug_threads)
d86d4aaf 4719 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4720
d50171e4 4721 step = (lwp->resume->kind == resume_step);
2acc282a 4722 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
4723 }
4724 else
4725 {
4726 if (debug_threads)
d86d4aaf 4727 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5544ad89 4728
bd99dc85
PA
4729 /* If we have a new signal, enqueue the signal. */
4730 if (lwp->resume->sig != 0)
4731 {
8d749320
SM
4732 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4733
bd99dc85
PA
4734 p_sig->prev = lwp->pending_signals;
4735 p_sig->signal = lwp->resume->sig;
bd99dc85
PA
4736
4737 /* If this is the same signal we were previously stopped by,
4738 make sure to queue its siginfo. We can ignore the return
4739 value of ptrace; if it fails, we'll skip
4740 PTRACE_SETSIGINFO. */
4741 if (WIFSTOPPED (lwp->last_status)
4742 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
d86d4aaf 4743 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4744 &p_sig->info);
bd99dc85
PA
4745
4746 lwp->pending_signals = p_sig;
4747 }
4748 }
5544ad89 4749
fc7238bb 4750 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4751 lwp->resume = NULL;
5544ad89 4752 return 0;
0d62e5e8
DJ
4753}
4754
4755static void
2bd7c093 4756linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 4757{
2bd7c093 4758 struct thread_resume_array array = { resume_info, n };
d86d4aaf 4759 struct thread_info *need_step_over = NULL;
d50171e4
PA
4760 int any_pending;
4761 int leave_all_stopped;
c6ecbae5 4762
87ce2a04
DE
4763 if (debug_threads)
4764 {
4765 debug_enter ();
4766 debug_printf ("linux_resume:\n");
4767 }
4768
2bd7c093 4769 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 4770
d50171e4
PA
4771 /* If there is a thread which would otherwise be resumed, which has
4772 a pending status, then don't resume any threads - we can just
4773 report the pending status. Make sure to queue any signals that
4774 would otherwise be sent. In non-stop mode, we'll apply this
4775 logic to each thread individually. We consume all pending events
4776 before considering to start a step-over (in all-stop). */
4777 any_pending = 0;
bd99dc85 4778 if (!non_stop)
d86d4aaf 4779 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
d50171e4
PA
4780
4781 /* If there is a thread which would otherwise be resumed, which is
4782 stopped at a breakpoint that needs stepping over, then don't
4783 resume any threads - have it step over the breakpoint with all
4784 other threads stopped, then resume all threads again. Make sure
4785 to queue any signals that would otherwise be delivered or
4786 queued. */
4787 if (!any_pending && supports_breakpoints ())
4788 need_step_over
d86d4aaf
DE
4789 = (struct thread_info *) find_inferior (&all_threads,
4790 need_step_over_p, NULL);
d50171e4
PA
4791
4792 leave_all_stopped = (need_step_over != NULL || any_pending);
4793
4794 if (debug_threads)
4795 {
4796 if (need_step_over != NULL)
87ce2a04 4797 debug_printf ("Not resuming all, need step over\n");
d50171e4 4798 else if (any_pending)
87ce2a04
DE
4799 debug_printf ("Not resuming, all-stop and found "
4800 "an LWP with pending status\n");
d50171e4 4801 else
87ce2a04 4802 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4803 }
4804
4805 /* Even if we're leaving threads stopped, queue all signals we'd
4806 otherwise deliver. */
4807 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4808
4809 if (need_step_over)
d86d4aaf 4810 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4811
4812 if (debug_threads)
4813 {
4814 debug_printf ("linux_resume done\n");
4815 debug_exit ();
4816 }
1bebeeca
PA
4817
4818 /* We may have events that were pending that can/should be sent to
4819 the client now. Trigger a linux_wait call. */
4820 if (target_is_async_p ())
4821 async_file_mark ();
d50171e4
PA
4822}
4823
4824/* This function is called once per thread. We check the thread's
4825 last resume request, which will tell us whether to resume, step, or
4826 leave the thread stopped. Any signal the client requested to be
4827 delivered has already been enqueued at this point.
4828
4829 If any thread that GDB wants running is stopped at an internal
4830 breakpoint that needs stepping over, we start a step-over operation
4831 on that particular thread, and leave all others stopped. */
4832
7984d532
PA
4833static int
4834proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 4835{
d86d4aaf
DE
4836 struct thread_info *thread = (struct thread_info *) entry;
4837 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4838 int step;
4839
7984d532
PA
4840 if (lwp == except)
4841 return 0;
d50171e4
PA
4842
4843 if (debug_threads)
d86d4aaf 4844 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
4845
4846 if (!lwp->stopped)
4847 {
4848 if (debug_threads)
d86d4aaf 4849 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
7984d532 4850 return 0;
d50171e4
PA
4851 }
4852
02fc4de7
PA
4853 if (thread->last_resume_kind == resume_stop
4854 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
4855 {
4856 if (debug_threads)
87ce2a04 4857 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 4858 lwpid_of (thread));
7984d532 4859 return 0;
d50171e4
PA
4860 }
4861
4862 if (lwp->status_pending_p)
4863 {
4864 if (debug_threads)
87ce2a04 4865 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 4866 lwpid_of (thread));
7984d532 4867 return 0;
d50171e4
PA
4868 }
4869
7984d532
PA
4870 gdb_assert (lwp->suspended >= 0);
4871
d50171e4
PA
4872 if (lwp->suspended)
4873 {
4874 if (debug_threads)
d86d4aaf 4875 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
7984d532 4876 return 0;
d50171e4
PA
4877 }
4878
1a981360
PA
4879 if (thread->last_resume_kind == resume_stop
4880 && lwp->pending_signals_to_report == NULL
4881 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
4882 {
4883 /* We haven't reported this LWP as stopped yet (otherwise, the
4884 last_status.kind check above would catch it, and we wouldn't
4885 reach here. This LWP may have been momentarily paused by a
4886 stop_all_lwps call while handling for example, another LWP's
4887 step-over. In that case, the pending expected SIGSTOP signal
4888 that was queued at vCont;t handling time will have already
4889 been consumed by wait_for_sigstop, and so we need to requeue
4890 another one here. Note that if the LWP already has a SIGSTOP
4891 pending, this is a no-op. */
4892
4893 if (debug_threads)
87ce2a04
DE
4894 debug_printf ("Client wants LWP %ld to stop. "
4895 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 4896 lwpid_of (thread));
02fc4de7
PA
4897
4898 send_sigstop (lwp);
4899 }
4900
863d01bd
PA
4901 if (thread->last_resume_kind == resume_step)
4902 {
4903 if (debug_threads)
4904 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4905 lwpid_of (thread));
4906 step = 1;
4907 }
4908 else if (lwp->bp_reinsert != 0)
4909 {
4910 if (debug_threads)
4911 debug_printf (" stepping LWP %ld, reinsert set\n",
4912 lwpid_of (thread));
4913 step = 1;
4914 }
4915 else
4916 step = 0;
4917
d50171e4 4918 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4919 return 0;
4920}
4921
4922static int
4923unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4924{
d86d4aaf
DE
4925 struct thread_info *thread = (struct thread_info *) entry;
4926 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4927
4928 if (lwp == except)
4929 return 0;
4930
863d01bd 4931 lwp_suspended_decr (lwp);
7984d532
PA
4932
4933 return proceed_one_lwp (entry, except);
d50171e4
PA
4934}
4935
4936/* When we finish a step-over, set threads running again. If there's
4937 another thread that may need a step-over, now's the time to start
4938 it. Eventually, we'll move all threads past their breakpoints. */
4939
4940static void
4941proceed_all_lwps (void)
4942{
d86d4aaf 4943 struct thread_info *need_step_over;
d50171e4
PA
4944
4945 /* If there is a thread which would otherwise be resumed, which is
4946 stopped at a breakpoint that needs stepping over, then don't
4947 resume any threads - have it step over the breakpoint with all
4948 other threads stopped, then resume all threads again. */
4949
4950 if (supports_breakpoints ())
4951 {
4952 need_step_over
d86d4aaf
DE
4953 = (struct thread_info *) find_inferior (&all_threads,
4954 need_step_over_p, NULL);
d50171e4
PA
4955
4956 if (need_step_over != NULL)
4957 {
4958 if (debug_threads)
87ce2a04
DE
4959 debug_printf ("proceed_all_lwps: found "
4960 "thread %ld needing a step-over\n",
4961 lwpid_of (need_step_over));
d50171e4 4962
d86d4aaf 4963 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4964 return;
4965 }
4966 }
5544ad89 4967
d50171e4 4968 if (debug_threads)
87ce2a04 4969 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 4970
d86d4aaf 4971 find_inferior (&all_threads, proceed_one_lwp, NULL);
d50171e4
PA
4972}
4973
4974/* Stopped LWPs that the client wanted to be running, that don't have
4975 pending statuses, are set to run again, except for EXCEPT, if not
4976 NULL. This undoes a stop_all_lwps call. */
4977
4978static void
7984d532 4979unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 4980{
5544ad89
DJ
4981 if (debug_threads)
4982 {
87ce2a04 4983 debug_enter ();
d50171e4 4984 if (except)
87ce2a04 4985 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 4986 lwpid_of (get_lwp_thread (except)));
5544ad89 4987 else
87ce2a04 4988 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
4989 }
4990
7984d532 4991 if (unsuspend)
d86d4aaf 4992 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
7984d532 4993 else
d86d4aaf 4994 find_inferior (&all_threads, proceed_one_lwp, except);
87ce2a04
DE
4995
4996 if (debug_threads)
4997 {
4998 debug_printf ("unstop_all_lwps done\n");
4999 debug_exit ();
5000 }
0d62e5e8
DJ
5001}
5002
58caa3dc
DJ
5003
5004#ifdef HAVE_LINUX_REGSETS
5005
1faeff08
MR
5006#define use_linux_regsets 1
5007
030031ee
PA
5008/* Returns true if REGSET has been disabled. */
5009
5010static int
5011regset_disabled (struct regsets_info *info, struct regset_info *regset)
5012{
5013 return (info->disabled_regsets != NULL
5014 && info->disabled_regsets[regset - info->regsets]);
5015}
5016
5017/* Disable REGSET. */
5018
5019static void
5020disable_regset (struct regsets_info *info, struct regset_info *regset)
5021{
5022 int dr_offset;
5023
5024 dr_offset = regset - info->regsets;
5025 if (info->disabled_regsets == NULL)
224c3ddb 5026 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
5027 info->disabled_regsets[dr_offset] = 1;
5028}
5029
58caa3dc 5030static int
3aee8918
PA
5031regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5032 struct regcache *regcache)
58caa3dc
DJ
5033{
5034 struct regset_info *regset;
e9d25b98 5035 int saw_general_regs = 0;
95954743 5036 int pid;
1570b33e 5037 struct iovec iov;
58caa3dc 5038
0bfdf32f 5039 pid = lwpid_of (current_thread);
28eef672 5040 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5041 {
1570b33e
L
5042 void *buf, *data;
5043 int nt_type, res;
58caa3dc 5044
030031ee 5045 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 5046 continue;
58caa3dc 5047
bca929d3 5048 buf = xmalloc (regset->size);
1570b33e
L
5049
5050 nt_type = regset->nt_type;
5051 if (nt_type)
5052 {
5053 iov.iov_base = buf;
5054 iov.iov_len = regset->size;
5055 data = (void *) &iov;
5056 }
5057 else
5058 data = buf;
5059
dfb64f85 5060#ifndef __sparc__
f15f9948 5061 res = ptrace (regset->get_request, pid,
b8e1b30e 5062 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5063#else
1570b33e 5064 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5065#endif
58caa3dc
DJ
5066 if (res < 0)
5067 {
5068 if (errno == EIO)
5069 {
52fa2412 5070 /* If we get EIO on a regset, do not try it again for
3aee8918 5071 this process mode. */
030031ee 5072 disable_regset (regsets_info, regset);
58caa3dc 5073 }
e5a9158d
AA
5074 else if (errno == ENODATA)
5075 {
5076 /* ENODATA may be returned if the regset is currently
5077 not "active". This can happen in normal operation,
5078 so suppress the warning in this case. */
5079 }
58caa3dc
DJ
5080 else
5081 {
0d62e5e8 5082 char s[256];
95954743
PA
5083 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5084 pid);
0d62e5e8 5085 perror (s);
58caa3dc
DJ
5086 }
5087 }
098dbe61
AA
5088 else
5089 {
5090 if (regset->type == GENERAL_REGS)
5091 saw_general_regs = 1;
5092 regset->store_function (regcache, buf);
5093 }
fdeb2a12 5094 free (buf);
58caa3dc 5095 }
e9d25b98
DJ
5096 if (saw_general_regs)
5097 return 0;
5098 else
5099 return 1;
58caa3dc
DJ
5100}
5101
5102static int
3aee8918
PA
5103regsets_store_inferior_registers (struct regsets_info *regsets_info,
5104 struct regcache *regcache)
58caa3dc
DJ
5105{
5106 struct regset_info *regset;
e9d25b98 5107 int saw_general_regs = 0;
95954743 5108 int pid;
1570b33e 5109 struct iovec iov;
58caa3dc 5110
0bfdf32f 5111 pid = lwpid_of (current_thread);
28eef672 5112 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5113 {
1570b33e
L
5114 void *buf, *data;
5115 int nt_type, res;
58caa3dc 5116
feea5f36
AA
5117 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5118 || regset->fill_function == NULL)
28eef672 5119 continue;
58caa3dc 5120
bca929d3 5121 buf = xmalloc (regset->size);
545587ee
DJ
5122
5123 /* First fill the buffer with the current register set contents,
5124 in case there are any items in the kernel's regset that are
5125 not in gdbserver's regcache. */
1570b33e
L
5126
5127 nt_type = regset->nt_type;
5128 if (nt_type)
5129 {
5130 iov.iov_base = buf;
5131 iov.iov_len = regset->size;
5132 data = (void *) &iov;
5133 }
5134 else
5135 data = buf;
5136
dfb64f85 5137#ifndef __sparc__
f15f9948 5138 res = ptrace (regset->get_request, pid,
b8e1b30e 5139 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5140#else
689cc2ae 5141 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5142#endif
545587ee
DJ
5143
5144 if (res == 0)
5145 {
5146 /* Then overlay our cached registers on that. */
442ea881 5147 regset->fill_function (regcache, buf);
545587ee
DJ
5148
5149 /* Only now do we write the register set. */
dfb64f85 5150#ifndef __sparc__
f15f9948 5151 res = ptrace (regset->set_request, pid,
b8e1b30e 5152 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5153#else
1570b33e 5154 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5155#endif
545587ee
DJ
5156 }
5157
58caa3dc
DJ
5158 if (res < 0)
5159 {
5160 if (errno == EIO)
5161 {
52fa2412 5162 /* If we get EIO on a regset, do not try it again for
3aee8918 5163 this process mode. */
030031ee 5164 disable_regset (regsets_info, regset);
58caa3dc 5165 }
3221518c
UW
5166 else if (errno == ESRCH)
5167 {
1b3f6016
PA
5168 /* At this point, ESRCH should mean the process is
5169 already gone, in which case we simply ignore attempts
5170 to change its registers. See also the related
5171 comment in linux_resume_one_lwp. */
fdeb2a12 5172 free (buf);
3221518c
UW
5173 return 0;
5174 }
58caa3dc
DJ
5175 else
5176 {
ce3a066d 5177 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5178 }
5179 }
e9d25b98
DJ
5180 else if (regset->type == GENERAL_REGS)
5181 saw_general_regs = 1;
09ec9b38 5182 free (buf);
58caa3dc 5183 }
e9d25b98
DJ
5184 if (saw_general_regs)
5185 return 0;
5186 else
5187 return 1;
58caa3dc
DJ
5188}
5189
1faeff08 5190#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5191
1faeff08 5192#define use_linux_regsets 0
3aee8918
PA
5193#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5194#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5195
58caa3dc 5196#endif
1faeff08
MR
5197
5198/* Return 1 if register REGNO is supported by one of the regset ptrace
5199 calls or 0 if it has to be transferred individually. */
5200
5201static int
3aee8918 5202linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5203{
5204 unsigned char mask = 1 << (regno % 8);
5205 size_t index = regno / 8;
5206
5207 return (use_linux_regsets
3aee8918
PA
5208 && (regs_info->regset_bitmap == NULL
5209 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5210}
5211
58caa3dc 5212#ifdef HAVE_LINUX_USRREGS
1faeff08
MR
5213
5214int
3aee8918 5215register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5216{
5217 int addr;
5218
3aee8918 5219 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5220 error ("Invalid register number %d.", regnum);
5221
3aee8918 5222 addr = usrregs->regmap[regnum];
1faeff08
MR
5223
5224 return addr;
5225}
5226
5227/* Fetch one register. */
5228static void
3aee8918
PA
5229fetch_register (const struct usrregs_info *usrregs,
5230 struct regcache *regcache, int regno)
1faeff08
MR
5231{
5232 CORE_ADDR regaddr;
5233 int i, size;
5234 char *buf;
5235 int pid;
5236
3aee8918 5237 if (regno >= usrregs->num_regs)
1faeff08
MR
5238 return;
5239 if ((*the_low_target.cannot_fetch_register) (regno))
5240 return;
5241
3aee8918 5242 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5243 if (regaddr == -1)
5244 return;
5245
3aee8918
PA
5246 size = ((register_size (regcache->tdesc, regno)
5247 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5248 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5249 buf = (char *) alloca (size);
1faeff08 5250
0bfdf32f 5251 pid = lwpid_of (current_thread);
1faeff08
MR
5252 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5253 {
5254 errno = 0;
5255 *(PTRACE_XFER_TYPE *) (buf + i) =
5256 ptrace (PTRACE_PEEKUSER, pid,
5257 /* Coerce to a uintptr_t first to avoid potential gcc warning
5258 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5259 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5260 regaddr += sizeof (PTRACE_XFER_TYPE);
5261 if (errno != 0)
5262 error ("reading register %d: %s", regno, strerror (errno));
5263 }
5264
5265 if (the_low_target.supply_ptrace_register)
5266 the_low_target.supply_ptrace_register (regcache, regno, buf);
5267 else
5268 supply_register (regcache, regno, buf);
5269}
5270
5271/* Store one register. */
5272static void
3aee8918
PA
5273store_register (const struct usrregs_info *usrregs,
5274 struct regcache *regcache, int regno)
1faeff08
MR
5275{
5276 CORE_ADDR regaddr;
5277 int i, size;
5278 char *buf;
5279 int pid;
5280
3aee8918 5281 if (regno >= usrregs->num_regs)
1faeff08
MR
5282 return;
5283 if ((*the_low_target.cannot_store_register) (regno))
5284 return;
5285
3aee8918 5286 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5287 if (regaddr == -1)
5288 return;
5289
3aee8918
PA
5290 size = ((register_size (regcache->tdesc, regno)
5291 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5292 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5293 buf = (char *) alloca (size);
1faeff08
MR
5294 memset (buf, 0, size);
5295
5296 if (the_low_target.collect_ptrace_register)
5297 the_low_target.collect_ptrace_register (regcache, regno, buf);
5298 else
5299 collect_register (regcache, regno, buf);
5300
0bfdf32f 5301 pid = lwpid_of (current_thread);
1faeff08
MR
5302 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5303 {
5304 errno = 0;
5305 ptrace (PTRACE_POKEUSER, pid,
5306 /* Coerce to a uintptr_t first to avoid potential gcc warning
5307 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5308 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5309 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5310 if (errno != 0)
5311 {
5312 /* At this point, ESRCH should mean the process is
5313 already gone, in which case we simply ignore attempts
5314 to change its registers. See also the related
5315 comment in linux_resume_one_lwp. */
5316 if (errno == ESRCH)
5317 return;
5318
5319 if ((*the_low_target.cannot_store_register) (regno) == 0)
5320 error ("writing register %d: %s", regno, strerror (errno));
5321 }
5322 regaddr += sizeof (PTRACE_XFER_TYPE);
5323 }
5324}
5325
5326/* Fetch all registers, or just one, from the child process.
5327 If REGNO is -1, do this for all registers, skipping any that are
5328 assumed to have been retrieved by regsets_fetch_inferior_registers,
5329 unless ALL is non-zero.
5330 Otherwise, REGNO specifies which register (so we can save time). */
5331static void
3aee8918
PA
5332usr_fetch_inferior_registers (const struct regs_info *regs_info,
5333 struct regcache *regcache, int regno, int all)
1faeff08 5334{
3aee8918
PA
5335 struct usrregs_info *usr = regs_info->usrregs;
5336
1faeff08
MR
5337 if (regno == -1)
5338 {
3aee8918
PA
5339 for (regno = 0; regno < usr->num_regs; regno++)
5340 if (all || !linux_register_in_regsets (regs_info, regno))
5341 fetch_register (usr, regcache, regno);
1faeff08
MR
5342 }
5343 else
3aee8918 5344 fetch_register (usr, regcache, regno);
1faeff08
MR
5345}
5346
5347/* Store our register values back into the inferior.
5348 If REGNO is -1, do this for all registers, skipping any that are
5349 assumed to have been saved by regsets_store_inferior_registers,
5350 unless ALL is non-zero.
5351 Otherwise, REGNO specifies which register (so we can save time). */
5352static void
3aee8918
PA
5353usr_store_inferior_registers (const struct regs_info *regs_info,
5354 struct regcache *regcache, int regno, int all)
1faeff08 5355{
3aee8918
PA
5356 struct usrregs_info *usr = regs_info->usrregs;
5357
1faeff08
MR
5358 if (regno == -1)
5359 {
3aee8918
PA
5360 for (regno = 0; regno < usr->num_regs; regno++)
5361 if (all || !linux_register_in_regsets (regs_info, regno))
5362 store_register (usr, regcache, regno);
1faeff08
MR
5363 }
5364 else
3aee8918 5365 store_register (usr, regcache, regno);
1faeff08
MR
5366}
5367
5368#else /* !HAVE_LINUX_USRREGS */
5369
3aee8918
PA
5370#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5371#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
1faeff08 5372
58caa3dc 5373#endif
1faeff08
MR
5374
5375
5376void
5377linux_fetch_registers (struct regcache *regcache, int regno)
5378{
5379 int use_regsets;
5380 int all = 0;
3aee8918 5381 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
5382
5383 if (regno == -1)
5384 {
3aee8918
PA
5385 if (the_low_target.fetch_register != NULL
5386 && regs_info->usrregs != NULL)
5387 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
c14dfd32
PA
5388 (*the_low_target.fetch_register) (regcache, regno);
5389
3aee8918
PA
5390 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5391 if (regs_info->usrregs != NULL)
5392 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5393 }
5394 else
5395 {
c14dfd32
PA
5396 if (the_low_target.fetch_register != NULL
5397 && (*the_low_target.fetch_register) (regcache, regno))
5398 return;
5399
3aee8918 5400 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5401 if (use_regsets)
3aee8918
PA
5402 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5403 regcache);
5404 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5405 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5406 }
58caa3dc
DJ
5407}
5408
5409void
442ea881 5410linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 5411{
1faeff08
MR
5412 int use_regsets;
5413 int all = 0;
3aee8918 5414 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
5415
5416 if (regno == -1)
5417 {
3aee8918
PA
5418 all = regsets_store_inferior_registers (regs_info->regsets_info,
5419 regcache);
5420 if (regs_info->usrregs != NULL)
5421 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5422 }
5423 else
5424 {
3aee8918 5425 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5426 if (use_regsets)
3aee8918
PA
5427 all = regsets_store_inferior_registers (regs_info->regsets_info,
5428 regcache);
5429 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5430 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5431 }
58caa3dc
DJ
5432}
5433
da6d8c04 5434
da6d8c04
DJ
5435/* Copy LEN bytes from inferior's memory starting at MEMADDR
5436 to debugger memory starting at MYADDR. */
5437
c3e735a6 5438static int
f450004a 5439linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04 5440{
0bfdf32f 5441 int pid = lwpid_of (current_thread);
4934b29e
MR
5442 register PTRACE_XFER_TYPE *buffer;
5443 register CORE_ADDR addr;
5444 register int count;
5445 char filename[64];
da6d8c04 5446 register int i;
4934b29e 5447 int ret;
fd462a61 5448 int fd;
fd462a61
DJ
5449
5450 /* Try using /proc. Don't bother for one word. */
5451 if (len >= 3 * sizeof (long))
5452 {
4934b29e
MR
5453 int bytes;
5454
fd462a61
DJ
5455 /* We could keep this file open and cache it - possibly one per
5456 thread. That requires some juggling, but is even faster. */
95954743 5457 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5458 fd = open (filename, O_RDONLY | O_LARGEFILE);
5459 if (fd == -1)
5460 goto no_proc;
5461
5462 /* If pread64 is available, use it. It's faster if the kernel
5463 supports it (only one syscall), and it's 64-bit safe even on
5464 32-bit platforms (for instance, SPARC debugging a SPARC64
5465 application). */
5466#ifdef HAVE_PREAD64
4934b29e 5467 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5468#else
4934b29e
MR
5469 bytes = -1;
5470 if (lseek (fd, memaddr, SEEK_SET) != -1)
5471 bytes = read (fd, myaddr, len);
fd462a61 5472#endif
fd462a61
DJ
5473
5474 close (fd);
4934b29e
MR
5475 if (bytes == len)
5476 return 0;
5477
5478 /* Some data was read, we'll try to get the rest with ptrace. */
5479 if (bytes > 0)
5480 {
5481 memaddr += bytes;
5482 myaddr += bytes;
5483 len -= bytes;
5484 }
fd462a61 5485 }
da6d8c04 5486
fd462a61 5487 no_proc:
4934b29e
MR
5488 /* Round starting address down to longword boundary. */
5489 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5490 /* Round ending address up; get number of longwords that makes. */
5491 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5492 / sizeof (PTRACE_XFER_TYPE));
5493 /* Allocate buffer of that many longwords. */
8d749320 5494 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
4934b29e 5495
da6d8c04 5496 /* Read all the longwords */
4934b29e 5497 errno = 0;
da6d8c04
DJ
5498 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5499 {
14ce3065
DE
5500 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5501 about coercing an 8 byte integer to a 4 byte pointer. */
5502 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5503 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5504 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5505 if (errno)
4934b29e 5506 break;
da6d8c04 5507 }
4934b29e 5508 ret = errno;
da6d8c04
DJ
5509
5510 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5511 if (i > 0)
5512 {
5513 i *= sizeof (PTRACE_XFER_TYPE);
5514 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5515 memcpy (myaddr,
5516 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5517 i < len ? i : len);
5518 }
c3e735a6 5519
4934b29e 5520 return ret;
da6d8c04
DJ
5521}
5522
93ae6fdc
PA
5523/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5524 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5525 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5526
ce3a066d 5527static int
f450004a 5528linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
5529{
5530 register int i;
5531 /* Round starting address down to longword boundary. */
5532 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5533 /* Round ending address up; get number of longwords that makes. */
5534 register int count
493e2a69
MS
5535 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5536 / sizeof (PTRACE_XFER_TYPE);
5537
da6d8c04 5538 /* Allocate buffer of that many longwords. */
8d749320 5539 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
493e2a69 5540
0bfdf32f 5541 int pid = lwpid_of (current_thread);
da6d8c04 5542
f0ae6fc3
PA
5543 if (len == 0)
5544 {
5545 /* Zero length write always succeeds. */
5546 return 0;
5547 }
5548
0d62e5e8
DJ
5549 if (debug_threads)
5550 {
58d6951d 5551 /* Dump up to four bytes. */
bf47e248
PA
5552 char str[4 * 2 + 1];
5553 char *p = str;
5554 int dump = len < 4 ? len : 4;
5555
5556 for (i = 0; i < dump; i++)
5557 {
5558 sprintf (p, "%02x", myaddr[i]);
5559 p += 2;
5560 }
5561 *p = '\0';
5562
5563 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5564 str, (long) memaddr, pid);
0d62e5e8
DJ
5565 }
5566
da6d8c04
DJ
5567 /* Fill start and end extra bytes of buffer with existing memory data. */
5568
93ae6fdc 5569 errno = 0;
14ce3065
DE
5570 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5571 about coercing an 8 byte integer to a 4 byte pointer. */
5572 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5573 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5574 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5575 if (errno)
5576 return errno;
da6d8c04
DJ
5577
5578 if (count > 1)
5579 {
93ae6fdc 5580 errno = 0;
da6d8c04 5581 buffer[count - 1]
95954743 5582 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5583 /* Coerce to a uintptr_t first to avoid potential gcc warning
5584 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5585 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5586 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5587 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5588 if (errno)
5589 return errno;
da6d8c04
DJ
5590 }
5591
93ae6fdc 5592 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5593
493e2a69
MS
5594 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5595 myaddr, len);
da6d8c04
DJ
5596
5597 /* Write the entire buffer. */
5598
5599 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5600 {
5601 errno = 0;
14ce3065
DE
5602 ptrace (PTRACE_POKETEXT, pid,
5603 /* Coerce to a uintptr_t first to avoid potential gcc warning
5604 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5605 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5606 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5607 if (errno)
5608 return errno;
5609 }
5610
5611 return 0;
5612}
2f2893d9
DJ
5613
5614static void
5615linux_look_up_symbols (void)
5616{
0d62e5e8 5617#ifdef USE_THREAD_DB
95954743
PA
5618 struct process_info *proc = current_process ();
5619
fe978cb0 5620 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5621 return;
5622
9b4c5f87 5623 thread_db_init ();
0d62e5e8
DJ
5624#endif
5625}
5626
e5379b03 5627static void
ef57601b 5628linux_request_interrupt (void)
e5379b03 5629{
a1928bad 5630 extern unsigned long signal_pid;
e5379b03 5631
78708b7c
PA
5632 /* Send a SIGINT to the process group. This acts just like the user
5633 typed a ^C on the controlling terminal. */
5634 kill (-signal_pid, SIGINT);
e5379b03
DJ
5635}
5636
aa691b87
RM
5637/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5638 to debugger memory starting at MYADDR. */
5639
5640static int
f450004a 5641linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
5642{
5643 char filename[PATH_MAX];
5644 int fd, n;
0bfdf32f 5645 int pid = lwpid_of (current_thread);
aa691b87 5646
6cebaf6e 5647 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5648
5649 fd = open (filename, O_RDONLY);
5650 if (fd < 0)
5651 return -1;
5652
5653 if (offset != (CORE_ADDR) 0
5654 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5655 n = -1;
5656 else
5657 n = read (fd, myaddr, len);
5658
5659 close (fd);
5660
5661 return n;
5662}
5663
d993e290
PA
5664/* These breakpoint and watchpoint related wrapper functions simply
5665 pass on the function call if the target has registered a
5666 corresponding function. */
e013ee27
OF
5667
5668static int
802e8e6d
PA
5669linux_supports_z_point_type (char z_type)
5670{
5671 return (the_low_target.supports_z_point_type != NULL
5672 && the_low_target.supports_z_point_type (z_type));
5673}
5674
5675static int
5676linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5677 int size, struct raw_breakpoint *bp)
e013ee27 5678{
c8f4bfdd
YQ
5679 if (type == raw_bkpt_type_sw)
5680 return insert_memory_breakpoint (bp);
5681 else if (the_low_target.insert_point != NULL)
802e8e6d 5682 return the_low_target.insert_point (type, addr, size, bp);
e013ee27
OF
5683 else
5684 /* Unsupported (see target.h). */
5685 return 1;
5686}
5687
5688static int
802e8e6d
PA
5689linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5690 int size, struct raw_breakpoint *bp)
e013ee27 5691{
c8f4bfdd
YQ
5692 if (type == raw_bkpt_type_sw)
5693 return remove_memory_breakpoint (bp);
5694 else if (the_low_target.remove_point != NULL)
802e8e6d 5695 return the_low_target.remove_point (type, addr, size, bp);
e013ee27
OF
5696 else
5697 /* Unsupported (see target.h). */
5698 return 1;
5699}
5700
3e572f71
PA
5701/* Implement the to_stopped_by_sw_breakpoint target_ops
5702 method. */
5703
5704static int
5705linux_stopped_by_sw_breakpoint (void)
5706{
5707 struct lwp_info *lwp = get_thread_lwp (current_thread);
5708
5709 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5710}
5711
5712/* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5713 method. */
5714
5715static int
5716linux_supports_stopped_by_sw_breakpoint (void)
5717{
5718 return USE_SIGTRAP_SIGINFO;
5719}
5720
5721/* Implement the to_stopped_by_hw_breakpoint target_ops
5722 method. */
5723
5724static int
5725linux_stopped_by_hw_breakpoint (void)
5726{
5727 struct lwp_info *lwp = get_thread_lwp (current_thread);
5728
5729 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5730}
5731
5732/* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5733 method. */
5734
5735static int
5736linux_supports_stopped_by_hw_breakpoint (void)
5737{
5738 return USE_SIGTRAP_SIGINFO;
5739}
5740
70b90b91 5741/* Implement the supports_hardware_single_step target_ops method. */
45614f15
YQ
5742
5743static int
70b90b91 5744linux_supports_hardware_single_step (void)
45614f15 5745{
45614f15
YQ
5746 return can_hardware_single_step ();
5747}
5748
7d00775e
AT
5749static int
5750linux_supports_software_single_step (void)
5751{
5752 return can_software_single_step ();
5753}
5754
e013ee27
OF
5755static int
5756linux_stopped_by_watchpoint (void)
5757{
0bfdf32f 5758 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5759
15c66dd6 5760 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5761}
5762
5763static CORE_ADDR
5764linux_stopped_data_address (void)
5765{
0bfdf32f 5766 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5767
5768 return lwp->stopped_data_address;
e013ee27
OF
5769}
5770
db0dfaa0
LM
5771#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5772 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5773 && defined(PT_TEXT_END_ADDR)
5774
5775/* This is only used for targets that define PT_TEXT_ADDR,
5776 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5777 the target has different ways of acquiring this information, like
5778 loadmaps. */
52fb6437
NS
5779
5780/* Under uClinux, programs are loaded at non-zero offsets, which we need
5781 to tell gdb about. */
5782
5783static int
5784linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5785{
52fb6437 5786 unsigned long text, text_end, data;
62828379 5787 int pid = lwpid_of (current_thread);
52fb6437
NS
5788
5789 errno = 0;
5790
b8e1b30e
LM
5791 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5792 (PTRACE_TYPE_ARG4) 0);
5793 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5794 (PTRACE_TYPE_ARG4) 0);
5795 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5796 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5797
5798 if (errno == 0)
5799 {
5800 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5801 used by gdb) are relative to the beginning of the program,
5802 with the data segment immediately following the text segment.
5803 However, the actual runtime layout in memory may put the data
5804 somewhere else, so when we send gdb a data base-address, we
5805 use the real data base address and subtract the compile-time
5806 data base-address from it (which is just the length of the
5807 text segment). BSS immediately follows data in both
5808 cases. */
52fb6437
NS
5809 *text_p = text;
5810 *data_p = data - (text_end - text);
1b3f6016 5811
52fb6437
NS
5812 return 1;
5813 }
52fb6437
NS
5814 return 0;
5815}
5816#endif
5817
07e059b5
VP
5818static int
5819linux_qxfer_osdata (const char *annex,
1b3f6016
PA
5820 unsigned char *readbuf, unsigned const char *writebuf,
5821 CORE_ADDR offset, int len)
07e059b5 5822{
d26e3629 5823 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5824}
5825
d0722149
DE
5826/* Convert a native/host siginfo object, into/from the siginfo in the
5827 layout of the inferiors' architecture. */
5828
5829static void
a5362b9a 5830siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
d0722149
DE
5831{
5832 int done = 0;
5833
5834 if (the_low_target.siginfo_fixup != NULL)
5835 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5836
5837 /* If there was no callback, or the callback didn't do anything,
5838 then just do a straight memcpy. */
5839 if (!done)
5840 {
5841 if (direction == 1)
a5362b9a 5842 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5843 else
a5362b9a 5844 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5845 }
5846}
5847
4aa995e1
PA
5848static int
5849linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5850 unsigned const char *writebuf, CORE_ADDR offset, int len)
5851{
d0722149 5852 int pid;
a5362b9a
TS
5853 siginfo_t siginfo;
5854 char inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5855
0bfdf32f 5856 if (current_thread == NULL)
4aa995e1
PA
5857 return -1;
5858
0bfdf32f 5859 pid = lwpid_of (current_thread);
4aa995e1
PA
5860
5861 if (debug_threads)
87ce2a04
DE
5862 debug_printf ("%s siginfo for lwp %d.\n",
5863 readbuf != NULL ? "Reading" : "Writing",
5864 pid);
4aa995e1 5865
0adea5f7 5866 if (offset >= sizeof (siginfo))
4aa995e1
PA
5867 return -1;
5868
b8e1b30e 5869 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5870 return -1;
5871
d0722149
DE
5872 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5873 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5874 inferior with a 64-bit GDBSERVER should look the same as debugging it
5875 with a 32-bit GDBSERVER, we need to convert it. */
5876 siginfo_fixup (&siginfo, inf_siginfo, 0);
5877
4aa995e1
PA
5878 if (offset + len > sizeof (siginfo))
5879 len = sizeof (siginfo) - offset;
5880
5881 if (readbuf != NULL)
d0722149 5882 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5883 else
5884 {
d0722149
DE
5885 memcpy (inf_siginfo + offset, writebuf, len);
5886
5887 /* Convert back to ptrace layout before flushing it out. */
5888 siginfo_fixup (&siginfo, inf_siginfo, 1);
5889
b8e1b30e 5890 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5891 return -1;
5892 }
5893
5894 return len;
5895}
5896
bd99dc85
PA
5897/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5898 so we notice when children change state; as the handler for the
5899 sigsuspend in my_waitpid. */
5900
5901static void
5902sigchld_handler (int signo)
5903{
5904 int old_errno = errno;
5905
5906 if (debug_threads)
e581f2b4
PA
5907 {
5908 do
5909 {
5910 /* fprintf is not async-signal-safe, so call write
5911 directly. */
5912 if (write (2, "sigchld_handler\n",
5913 sizeof ("sigchld_handler\n") - 1) < 0)
5914 break; /* just ignore */
5915 } while (0);
5916 }
bd99dc85
PA
5917
5918 if (target_is_async_p ())
5919 async_file_mark (); /* trigger a linux_wait */
5920
5921 errno = old_errno;
5922}
5923
5924static int
5925linux_supports_non_stop (void)
5926{
5927 return 1;
5928}
5929
5930static int
5931linux_async (int enable)
5932{
7089dca4 5933 int previous = target_is_async_p ();
bd99dc85 5934
8336d594 5935 if (debug_threads)
87ce2a04
DE
5936 debug_printf ("linux_async (%d), previous=%d\n",
5937 enable, previous);
8336d594 5938
bd99dc85
PA
5939 if (previous != enable)
5940 {
5941 sigset_t mask;
5942 sigemptyset (&mask);
5943 sigaddset (&mask, SIGCHLD);
5944
5945 sigprocmask (SIG_BLOCK, &mask, NULL);
5946
5947 if (enable)
5948 {
5949 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
5950 {
5951 linux_event_pipe[0] = -1;
5952 linux_event_pipe[1] = -1;
5953 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5954
5955 warning ("creating event pipe failed.");
5956 return previous;
5957 }
bd99dc85
PA
5958
5959 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5960 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5961
5962 /* Register the event loop handler. */
5963 add_file_handler (linux_event_pipe[0],
5964 handle_target_event, NULL);
5965
5966 /* Always trigger a linux_wait. */
5967 async_file_mark ();
5968 }
5969 else
5970 {
5971 delete_file_handler (linux_event_pipe[0]);
5972
5973 close (linux_event_pipe[0]);
5974 close (linux_event_pipe[1]);
5975 linux_event_pipe[0] = -1;
5976 linux_event_pipe[1] = -1;
5977 }
5978
5979 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5980 }
5981
5982 return previous;
5983}
5984
5985static int
5986linux_start_non_stop (int nonstop)
5987{
5988 /* Register or unregister from event-loop accordingly. */
5989 linux_async (nonstop);
aa96c426
GB
5990
5991 if (target_is_async_p () != (nonstop != 0))
5992 return -1;
5993
bd99dc85
PA
5994 return 0;
5995}
5996
cf8fd78b
PA
5997static int
5998linux_supports_multi_process (void)
5999{
6000 return 1;
6001}
6002
89245bc0
DB
6003/* Check if fork events are supported. */
6004
6005static int
6006linux_supports_fork_events (void)
6007{
6008 return linux_supports_tracefork ();
6009}
6010
6011/* Check if vfork events are supported. */
6012
6013static int
6014linux_supports_vfork_events (void)
6015{
6016 return linux_supports_tracefork ();
6017}
6018
94585166
DB
6019/* Check if exec events are supported. */
6020
6021static int
6022linux_supports_exec_events (void)
6023{
6024 return linux_supports_traceexec ();
6025}
6026
de0d863e
DB
6027/* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6028 options for the specified lwp. */
6029
6030static int
6031reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6032 void *args)
6033{
6034 struct thread_info *thread = (struct thread_info *) entry;
6035 struct lwp_info *lwp = get_thread_lwp (thread);
6036
6037 if (!lwp->stopped)
6038 {
6039 /* Stop the lwp so we can modify its ptrace options. */
6040 lwp->must_set_ptrace_flags = 1;
6041 linux_stop_lwp (lwp);
6042 }
6043 else
6044 {
6045 /* Already stopped; go ahead and set the ptrace options. */
6046 struct process_info *proc = find_process_pid (pid_of (thread));
6047 int options = linux_low_ptrace_options (proc->attached);
6048
6049 linux_enable_event_reporting (lwpid_of (thread), options);
6050 lwp->must_set_ptrace_flags = 0;
6051 }
6052
6053 return 0;
6054}
6055
6056/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6057 ptrace flags for all inferiors. This is in case the new GDB connection
6058 doesn't support the same set of events that the previous one did. */
6059
6060static void
6061linux_handle_new_gdb_connection (void)
6062{
6063 pid_t pid;
6064
6065 /* Request that all the lwps reset their ptrace options. */
6066 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6067}
6068
03583c20
UW
6069static int
6070linux_supports_disable_randomization (void)
6071{
6072#ifdef HAVE_PERSONALITY
6073 return 1;
6074#else
6075 return 0;
6076#endif
6077}
efcbbd14 6078
d1feda86
YQ
6079static int
6080linux_supports_agent (void)
6081{
6082 return 1;
6083}
6084
c2d6af84
PA
6085static int
6086linux_supports_range_stepping (void)
6087{
6088 if (*the_low_target.supports_range_stepping == NULL)
6089 return 0;
6090
6091 return (*the_low_target.supports_range_stepping) ();
6092}
6093
efcbbd14
UW
6094/* Enumerate spufs IDs for process PID. */
6095static int
6096spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6097{
6098 int pos = 0;
6099 int written = 0;
6100 char path[128];
6101 DIR *dir;
6102 struct dirent *entry;
6103
6104 sprintf (path, "/proc/%ld/fd", pid);
6105 dir = opendir (path);
6106 if (!dir)
6107 return -1;
6108
6109 rewinddir (dir);
6110 while ((entry = readdir (dir)) != NULL)
6111 {
6112 struct stat st;
6113 struct statfs stfs;
6114 int fd;
6115
6116 fd = atoi (entry->d_name);
6117 if (!fd)
6118 continue;
6119
6120 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6121 if (stat (path, &st) != 0)
6122 continue;
6123 if (!S_ISDIR (st.st_mode))
6124 continue;
6125
6126 if (statfs (path, &stfs) != 0)
6127 continue;
6128 if (stfs.f_type != SPUFS_MAGIC)
6129 continue;
6130
6131 if (pos >= offset && pos + 4 <= offset + len)
6132 {
6133 *(unsigned int *)(buf + pos - offset) = fd;
6134 written += 4;
6135 }
6136 pos += 4;
6137 }
6138
6139 closedir (dir);
6140 return written;
6141}
6142
6143/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6144 object type, using the /proc file system. */
6145static int
6146linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6147 unsigned const char *writebuf,
6148 CORE_ADDR offset, int len)
6149{
0bfdf32f 6150 long pid = lwpid_of (current_thread);
efcbbd14
UW
6151 char buf[128];
6152 int fd = 0;
6153 int ret = 0;
6154
6155 if (!writebuf && !readbuf)
6156 return -1;
6157
6158 if (!*annex)
6159 {
6160 if (!readbuf)
6161 return -1;
6162 else
6163 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6164 }
6165
6166 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6167 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6168 if (fd <= 0)
6169 return -1;
6170
6171 if (offset != 0
6172 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6173 {
6174 close (fd);
6175 return 0;
6176 }
6177
6178 if (writebuf)
6179 ret = write (fd, writebuf, (size_t) len);
6180 else
6181 ret = read (fd, readbuf, (size_t) len);
6182
6183 close (fd);
6184 return ret;
6185}
6186
723b724b 6187#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6188struct target_loadseg
6189{
6190 /* Core address to which the segment is mapped. */
6191 Elf32_Addr addr;
6192 /* VMA recorded in the program header. */
6193 Elf32_Addr p_vaddr;
6194 /* Size of this segment in memory. */
6195 Elf32_Word p_memsz;
6196};
6197
723b724b 6198# if defined PT_GETDSBT
78d85199
YQ
6199struct target_loadmap
6200{
6201 /* Protocol version number, must be zero. */
6202 Elf32_Word version;
6203 /* Pointer to the DSBT table, its size, and the DSBT index. */
6204 unsigned *dsbt_table;
6205 unsigned dsbt_size, dsbt_index;
6206 /* Number of segments in this map. */
6207 Elf32_Word nsegs;
6208 /* The actual memory map. */
6209 struct target_loadseg segs[/*nsegs*/];
6210};
723b724b
MF
6211# define LINUX_LOADMAP PT_GETDSBT
6212# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6213# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6214# else
6215struct target_loadmap
6216{
6217 /* Protocol version number, must be zero. */
6218 Elf32_Half version;
6219 /* Number of segments in this map. */
6220 Elf32_Half nsegs;
6221 /* The actual memory map. */
6222 struct target_loadseg segs[/*nsegs*/];
6223};
6224# define LINUX_LOADMAP PTRACE_GETFDPIC
6225# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6226# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6227# endif
78d85199 6228
78d85199
YQ
6229static int
6230linux_read_loadmap (const char *annex, CORE_ADDR offset,
6231 unsigned char *myaddr, unsigned int len)
6232{
0bfdf32f 6233 int pid = lwpid_of (current_thread);
78d85199
YQ
6234 int addr = -1;
6235 struct target_loadmap *data = NULL;
6236 unsigned int actual_length, copy_length;
6237
6238 if (strcmp (annex, "exec") == 0)
723b724b 6239 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6240 else if (strcmp (annex, "interp") == 0)
723b724b 6241 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6242 else
6243 return -1;
6244
723b724b 6245 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6246 return -1;
6247
6248 if (data == NULL)
6249 return -1;
6250
6251 actual_length = sizeof (struct target_loadmap)
6252 + sizeof (struct target_loadseg) * data->nsegs;
6253
6254 if (offset < 0 || offset > actual_length)
6255 return -1;
6256
6257 copy_length = actual_length - offset < len ? actual_length - offset : len;
6258 memcpy (myaddr, (char *) data + offset, copy_length);
6259 return copy_length;
6260}
723b724b
MF
6261#else
6262# define linux_read_loadmap NULL
6263#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6264
1570b33e 6265static void
06e03fff 6266linux_process_qsupported (char **features, int count)
1570b33e
L
6267{
6268 if (the_low_target.process_qsupported != NULL)
06e03fff 6269 the_low_target.process_qsupported (features, count);
1570b33e
L
6270}
6271
219f2f23
PA
6272static int
6273linux_supports_tracepoints (void)
6274{
6275 if (*the_low_target.supports_tracepoints == NULL)
6276 return 0;
6277
6278 return (*the_low_target.supports_tracepoints) ();
6279}
6280
6281static CORE_ADDR
6282linux_read_pc (struct regcache *regcache)
6283{
6284 if (the_low_target.get_pc == NULL)
6285 return 0;
6286
6287 return (*the_low_target.get_pc) (regcache);
6288}
6289
6290static void
6291linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6292{
6293 gdb_assert (the_low_target.set_pc != NULL);
6294
6295 (*the_low_target.set_pc) (regcache, pc);
6296}
6297
8336d594
PA
6298static int
6299linux_thread_stopped (struct thread_info *thread)
6300{
6301 return get_thread_lwp (thread)->stopped;
6302}
6303
6304/* This exposes stop-all-threads functionality to other modules. */
6305
6306static void
7984d532 6307linux_pause_all (int freeze)
8336d594 6308{
7984d532
PA
6309 stop_all_lwps (freeze, NULL);
6310}
6311
6312/* This exposes unstop-all-threads functionality to other gdbserver
6313 modules. */
6314
6315static void
6316linux_unpause_all (int unfreeze)
6317{
6318 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6319}
6320
90d74c30
PA
6321static int
6322linux_prepare_to_access_memory (void)
6323{
6324 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6325 running LWP. */
6326 if (non_stop)
6327 linux_pause_all (1);
6328 return 0;
6329}
6330
6331static void
0146f85b 6332linux_done_accessing_memory (void)
90d74c30
PA
6333{
6334 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6335 running LWP. */
6336 if (non_stop)
6337 linux_unpause_all (1);
6338}
6339
fa593d66
PA
6340static int
6341linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6342 CORE_ADDR collector,
6343 CORE_ADDR lockaddr,
6344 ULONGEST orig_size,
6345 CORE_ADDR *jump_entry,
405f8e94
SS
6346 CORE_ADDR *trampoline,
6347 ULONGEST *trampoline_size,
fa593d66
PA
6348 unsigned char *jjump_pad_insn,
6349 ULONGEST *jjump_pad_insn_size,
6350 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
6351 CORE_ADDR *adjusted_insn_addr_end,
6352 char *err)
fa593d66
PA
6353{
6354 return (*the_low_target.install_fast_tracepoint_jump_pad)
6355 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
6356 jump_entry, trampoline, trampoline_size,
6357 jjump_pad_insn, jjump_pad_insn_size,
6358 adjusted_insn_addr, adjusted_insn_addr_end,
6359 err);
fa593d66
PA
6360}
6361
6a271cae
PA
6362static struct emit_ops *
6363linux_emit_ops (void)
6364{
6365 if (the_low_target.emit_ops != NULL)
6366 return (*the_low_target.emit_ops) ();
6367 else
6368 return NULL;
6369}
6370
405f8e94
SS
6371static int
6372linux_get_min_fast_tracepoint_insn_len (void)
6373{
6374 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6375}
6376
2268b414
JK
6377/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6378
6379static int
6380get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6381 CORE_ADDR *phdr_memaddr, int *num_phdr)
6382{
6383 char filename[PATH_MAX];
6384 int fd;
6385 const int auxv_size = is_elf64
6386 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6387 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6388
6389 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6390
6391 fd = open (filename, O_RDONLY);
6392 if (fd < 0)
6393 return 1;
6394
6395 *phdr_memaddr = 0;
6396 *num_phdr = 0;
6397 while (read (fd, buf, auxv_size) == auxv_size
6398 && (*phdr_memaddr == 0 || *num_phdr == 0))
6399 {
6400 if (is_elf64)
6401 {
6402 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6403
6404 switch (aux->a_type)
6405 {
6406 case AT_PHDR:
6407 *phdr_memaddr = aux->a_un.a_val;
6408 break;
6409 case AT_PHNUM:
6410 *num_phdr = aux->a_un.a_val;
6411 break;
6412 }
6413 }
6414 else
6415 {
6416 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6417
6418 switch (aux->a_type)
6419 {
6420 case AT_PHDR:
6421 *phdr_memaddr = aux->a_un.a_val;
6422 break;
6423 case AT_PHNUM:
6424 *num_phdr = aux->a_un.a_val;
6425 break;
6426 }
6427 }
6428 }
6429
6430 close (fd);
6431
6432 if (*phdr_memaddr == 0 || *num_phdr == 0)
6433 {
6434 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6435 "phdr_memaddr = %ld, phdr_num = %d",
6436 (long) *phdr_memaddr, *num_phdr);
6437 return 2;
6438 }
6439
6440 return 0;
6441}
6442
6443/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6444
6445static CORE_ADDR
6446get_dynamic (const int pid, const int is_elf64)
6447{
6448 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6449 int num_phdr, i;
2268b414 6450 unsigned char *phdr_buf;
db1ff28b 6451 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6452
6453 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6454 return 0;
6455
6456 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6457 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6458
6459 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6460 return 0;
6461
6462 /* Compute relocation: it is expected to be 0 for "regular" executables,
6463 non-zero for PIE ones. */
6464 relocation = -1;
db1ff28b
JK
6465 for (i = 0; relocation == -1 && i < num_phdr; i++)
6466 if (is_elf64)
6467 {
6468 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6469
6470 if (p->p_type == PT_PHDR)
6471 relocation = phdr_memaddr - p->p_vaddr;
6472 }
6473 else
6474 {
6475 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6476
6477 if (p->p_type == PT_PHDR)
6478 relocation = phdr_memaddr - p->p_vaddr;
6479 }
6480
2268b414
JK
6481 if (relocation == -1)
6482 {
e237a7e2
JK
6483 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6484 any real world executables, including PIE executables, have always
6485 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6486 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6487 or present DT_DEBUG anyway (fpc binaries are statically linked).
6488
6489 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6490
6491 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6492
2268b414
JK
6493 return 0;
6494 }
6495
db1ff28b
JK
6496 for (i = 0; i < num_phdr; i++)
6497 {
6498 if (is_elf64)
6499 {
6500 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6501
6502 if (p->p_type == PT_DYNAMIC)
6503 return p->p_vaddr + relocation;
6504 }
6505 else
6506 {
6507 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6508
db1ff28b
JK
6509 if (p->p_type == PT_DYNAMIC)
6510 return p->p_vaddr + relocation;
6511 }
6512 }
2268b414
JK
6513
6514 return 0;
6515}
6516
6517/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6518 can be 0 if the inferior does not yet have the library list initialized.
6519 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6520 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6521
6522static CORE_ADDR
6523get_r_debug (const int pid, const int is_elf64)
6524{
6525 CORE_ADDR dynamic_memaddr;
6526 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6527 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6528 CORE_ADDR map = -1;
2268b414
JK
6529
6530 dynamic_memaddr = get_dynamic (pid, is_elf64);
6531 if (dynamic_memaddr == 0)
367ba2c2 6532 return map;
2268b414
JK
6533
6534 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6535 {
6536 if (is_elf64)
6537 {
6538 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6539#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6540 union
6541 {
6542 Elf64_Xword map;
6543 unsigned char buf[sizeof (Elf64_Xword)];
6544 }
6545 rld_map;
a738da3a
MF
6546#endif
6547#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6548 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6549 {
6550 if (linux_read_memory (dyn->d_un.d_val,
6551 rld_map.buf, sizeof (rld_map.buf)) == 0)
6552 return rld_map.map;
6553 else
6554 break;
6555 }
75f62ce7 6556#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6557#ifdef DT_MIPS_RLD_MAP_REL
6558 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6559 {
6560 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6561 rld_map.buf, sizeof (rld_map.buf)) == 0)
6562 return rld_map.map;
6563 else
6564 break;
6565 }
6566#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6567
367ba2c2
MR
6568 if (dyn->d_tag == DT_DEBUG && map == -1)
6569 map = dyn->d_un.d_val;
2268b414
JK
6570
6571 if (dyn->d_tag == DT_NULL)
6572 break;
6573 }
6574 else
6575 {
6576 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6577#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6578 union
6579 {
6580 Elf32_Word map;
6581 unsigned char buf[sizeof (Elf32_Word)];
6582 }
6583 rld_map;
a738da3a
MF
6584#endif
6585#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6586 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6587 {
6588 if (linux_read_memory (dyn->d_un.d_val,
6589 rld_map.buf, sizeof (rld_map.buf)) == 0)
6590 return rld_map.map;
6591 else
6592 break;
6593 }
75f62ce7 6594#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6595#ifdef DT_MIPS_RLD_MAP_REL
6596 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6597 {
6598 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6599 rld_map.buf, sizeof (rld_map.buf)) == 0)
6600 return rld_map.map;
6601 else
6602 break;
6603 }
6604#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6605
367ba2c2
MR
6606 if (dyn->d_tag == DT_DEBUG && map == -1)
6607 map = dyn->d_un.d_val;
2268b414
JK
6608
6609 if (dyn->d_tag == DT_NULL)
6610 break;
6611 }
6612
6613 dynamic_memaddr += dyn_size;
6614 }
6615
367ba2c2 6616 return map;
2268b414
JK
6617}
6618
6619/* Read one pointer from MEMADDR in the inferior. */
6620
6621static int
6622read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6623{
485f1ee4
PA
6624 int ret;
6625
6626 /* Go through a union so this works on either big or little endian
6627 hosts, when the inferior's pointer size is smaller than the size
6628 of CORE_ADDR. It is assumed the inferior's endianness is the
6629 same of the superior's. */
6630 union
6631 {
6632 CORE_ADDR core_addr;
6633 unsigned int ui;
6634 unsigned char uc;
6635 } addr;
6636
6637 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6638 if (ret == 0)
6639 {
6640 if (ptr_size == sizeof (CORE_ADDR))
6641 *ptr = addr.core_addr;
6642 else if (ptr_size == sizeof (unsigned int))
6643 *ptr = addr.ui;
6644 else
6645 gdb_assert_not_reached ("unhandled pointer size");
6646 }
6647 return ret;
2268b414
JK
6648}
6649
6650struct link_map_offsets
6651 {
6652 /* Offset and size of r_debug.r_version. */
6653 int r_version_offset;
6654
6655 /* Offset and size of r_debug.r_map. */
6656 int r_map_offset;
6657
6658 /* Offset to l_addr field in struct link_map. */
6659 int l_addr_offset;
6660
6661 /* Offset to l_name field in struct link_map. */
6662 int l_name_offset;
6663
6664 /* Offset to l_ld field in struct link_map. */
6665 int l_ld_offset;
6666
6667 /* Offset to l_next field in struct link_map. */
6668 int l_next_offset;
6669
6670 /* Offset to l_prev field in struct link_map. */
6671 int l_prev_offset;
6672 };
6673
fb723180 6674/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
6675
6676static int
6677linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6678 unsigned const char *writebuf,
6679 CORE_ADDR offset, int len)
6680{
6681 char *document;
6682 unsigned document_len;
fe978cb0 6683 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6684 char filename[PATH_MAX];
6685 int pid, is_elf64;
6686
6687 static const struct link_map_offsets lmo_32bit_offsets =
6688 {
6689 0, /* r_version offset. */
6690 4, /* r_debug.r_map offset. */
6691 0, /* l_addr offset in link_map. */
6692 4, /* l_name offset in link_map. */
6693 8, /* l_ld offset in link_map. */
6694 12, /* l_next offset in link_map. */
6695 16 /* l_prev offset in link_map. */
6696 };
6697
6698 static const struct link_map_offsets lmo_64bit_offsets =
6699 {
6700 0, /* r_version offset. */
6701 8, /* r_debug.r_map offset. */
6702 0, /* l_addr offset in link_map. */
6703 8, /* l_name offset in link_map. */
6704 16, /* l_ld offset in link_map. */
6705 24, /* l_next offset in link_map. */
6706 32 /* l_prev offset in link_map. */
6707 };
6708 const struct link_map_offsets *lmo;
214d508e 6709 unsigned int machine;
b1fbec62
GB
6710 int ptr_size;
6711 CORE_ADDR lm_addr = 0, lm_prev = 0;
6712 int allocated = 1024;
6713 char *p;
6714 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6715 int header_done = 0;
2268b414
JK
6716
6717 if (writebuf != NULL)
6718 return -2;
6719 if (readbuf == NULL)
6720 return -1;
6721
0bfdf32f 6722 pid = lwpid_of (current_thread);
2268b414 6723 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6724 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 6725 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 6726 ptr_size = is_elf64 ? 8 : 4;
2268b414 6727
b1fbec62
GB
6728 while (annex[0] != '\0')
6729 {
6730 const char *sep;
6731 CORE_ADDR *addrp;
6732 int len;
2268b414 6733
b1fbec62
GB
6734 sep = strchr (annex, '=');
6735 if (sep == NULL)
6736 break;
0c5bf5a9 6737
b1fbec62 6738 len = sep - annex;
61012eef 6739 if (len == 5 && startswith (annex, "start"))
b1fbec62 6740 addrp = &lm_addr;
61012eef 6741 else if (len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6742 addrp = &lm_prev;
6743 else
6744 {
6745 annex = strchr (sep, ';');
6746 if (annex == NULL)
6747 break;
6748 annex++;
6749 continue;
6750 }
6751
6752 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6753 }
b1fbec62
GB
6754
6755 if (lm_addr == 0)
2268b414 6756 {
b1fbec62
GB
6757 int r_version = 0;
6758
6759 if (priv->r_debug == 0)
6760 priv->r_debug = get_r_debug (pid, is_elf64);
6761
6762 /* We failed to find DT_DEBUG. Such situation will not change
6763 for this inferior - do not retry it. Report it to GDB as
6764 E01, see for the reasons at the GDB solib-svr4.c side. */
6765 if (priv->r_debug == (CORE_ADDR) -1)
6766 return -1;
6767
6768 if (priv->r_debug != 0)
2268b414 6769 {
b1fbec62
GB
6770 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6771 (unsigned char *) &r_version,
6772 sizeof (r_version)) != 0
6773 || r_version != 1)
6774 {
6775 warning ("unexpected r_debug version %d", r_version);
6776 }
6777 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6778 &lm_addr, ptr_size) != 0)
6779 {
6780 warning ("unable to read r_map from 0x%lx",
6781 (long) priv->r_debug + lmo->r_map_offset);
6782 }
2268b414 6783 }
b1fbec62 6784 }
2268b414 6785
224c3ddb 6786 document = (char *) xmalloc (allocated);
b1fbec62
GB
6787 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6788 p = document + strlen (document);
6789
6790 while (lm_addr
6791 && read_one_ptr (lm_addr + lmo->l_name_offset,
6792 &l_name, ptr_size) == 0
6793 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6794 &l_addr, ptr_size) == 0
6795 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6796 &l_ld, ptr_size) == 0
6797 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6798 &l_prev, ptr_size) == 0
6799 && read_one_ptr (lm_addr + lmo->l_next_offset,
6800 &l_next, ptr_size) == 0)
6801 {
6802 unsigned char libname[PATH_MAX];
6803
6804 if (lm_prev != l_prev)
2268b414 6805 {
b1fbec62
GB
6806 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6807 (long) lm_prev, (long) l_prev);
6808 break;
2268b414
JK
6809 }
6810
d878444c
JK
6811 /* Ignore the first entry even if it has valid name as the first entry
6812 corresponds to the main executable. The first entry should not be
6813 skipped if the dynamic loader was loaded late by a static executable
6814 (see solib-svr4.c parameter ignore_first). But in such case the main
6815 executable does not have PT_DYNAMIC present and this function already
6816 exited above due to failed get_r_debug. */
6817 if (lm_prev == 0)
2268b414 6818 {
d878444c
JK
6819 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6820 p = p + strlen (p);
6821 }
6822 else
6823 {
6824 /* Not checking for error because reading may stop before
6825 we've got PATH_MAX worth of characters. */
6826 libname[0] = '\0';
6827 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6828 libname[sizeof (libname) - 1] = '\0';
6829 if (libname[0] != '\0')
2268b414 6830 {
d878444c
JK
6831 /* 6x the size for xml_escape_text below. */
6832 size_t len = 6 * strlen ((char *) libname);
6833 char *name;
2268b414 6834
d878444c
JK
6835 if (!header_done)
6836 {
6837 /* Terminate `<library-list-svr4'. */
6838 *p++ = '>';
6839 header_done = 1;
6840 }
2268b414 6841
db1ff28b 6842 while (allocated < p - document + len + 200)
d878444c
JK
6843 {
6844 /* Expand to guarantee sufficient storage. */
6845 uintptr_t document_len = p - document;
2268b414 6846
224c3ddb 6847 document = (char *) xrealloc (document, 2 * allocated);
d878444c
JK
6848 allocated *= 2;
6849 p = document + document_len;
6850 }
6851
6852 name = xml_escape_text ((char *) libname);
6853 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
db1ff28b 6854 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
d878444c
JK
6855 name, (unsigned long) lm_addr,
6856 (unsigned long) l_addr, (unsigned long) l_ld);
6857 free (name);
6858 }
0afae3cf 6859 }
b1fbec62
GB
6860
6861 lm_prev = lm_addr;
6862 lm_addr = l_next;
2268b414
JK
6863 }
6864
b1fbec62
GB
6865 if (!header_done)
6866 {
6867 /* Empty list; terminate `<library-list-svr4'. */
6868 strcpy (p, "/>");
6869 }
6870 else
6871 strcpy (p, "</library-list-svr4>");
6872
2268b414
JK
6873 document_len = strlen (document);
6874 if (offset < document_len)
6875 document_len -= offset;
6876 else
6877 document_len = 0;
6878 if (len > document_len)
6879 len = document_len;
6880
6881 memcpy (readbuf, document + offset, len);
6882 xfree (document);
6883
6884 return len;
6885}
6886
9accd112
MM
6887#ifdef HAVE_LINUX_BTRACE
6888
969c39fb 6889/* See to_disable_btrace target method. */
9accd112 6890
969c39fb
MM
6891static int
6892linux_low_disable_btrace (struct btrace_target_info *tinfo)
6893{
6894 enum btrace_error err;
6895
6896 err = linux_disable_btrace (tinfo);
6897 return (err == BTRACE_ERR_NONE ? 0 : -1);
6898}
6899
b20a6524
MM
6900/* Encode an Intel(R) Processor Trace configuration. */
6901
6902static void
6903linux_low_encode_pt_config (struct buffer *buffer,
6904 const struct btrace_data_pt_config *config)
6905{
6906 buffer_grow_str (buffer, "<pt-config>\n");
6907
6908 switch (config->cpu.vendor)
6909 {
6910 case CV_INTEL:
6911 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6912 "model=\"%u\" stepping=\"%u\"/>\n",
6913 config->cpu.family, config->cpu.model,
6914 config->cpu.stepping);
6915 break;
6916
6917 default:
6918 break;
6919 }
6920
6921 buffer_grow_str (buffer, "</pt-config>\n");
6922}
6923
6924/* Encode a raw buffer. */
6925
6926static void
6927linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6928 unsigned int size)
6929{
6930 if (size == 0)
6931 return;
6932
6933 /* We use hex encoding - see common/rsp-low.h. */
6934 buffer_grow_str (buffer, "<raw>\n");
6935
6936 while (size-- > 0)
6937 {
6938 char elem[2];
6939
6940 elem[0] = tohex ((*data >> 4) & 0xf);
6941 elem[1] = tohex (*data++ & 0xf);
6942
6943 buffer_grow (buffer, elem, 2);
6944 }
6945
6946 buffer_grow_str (buffer, "</raw>\n");
6947}
6948
969c39fb
MM
6949/* See to_read_btrace target method. */
6950
6951static int
9accd112 6952linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
add67df8 6953 enum btrace_read_type type)
9accd112 6954{
734b0e4b 6955 struct btrace_data btrace;
9accd112 6956 struct btrace_block *block;
969c39fb 6957 enum btrace_error err;
9accd112
MM
6958 int i;
6959
734b0e4b
MM
6960 btrace_data_init (&btrace);
6961
969c39fb
MM
6962 err = linux_read_btrace (&btrace, tinfo, type);
6963 if (err != BTRACE_ERR_NONE)
6964 {
6965 if (err == BTRACE_ERR_OVERFLOW)
6966 buffer_grow_str0 (buffer, "E.Overflow.");
6967 else
6968 buffer_grow_str0 (buffer, "E.Generic Error.");
6969
b20a6524 6970 goto err;
969c39fb 6971 }
9accd112 6972
734b0e4b
MM
6973 switch (btrace.format)
6974 {
6975 case BTRACE_FORMAT_NONE:
6976 buffer_grow_str0 (buffer, "E.No Trace.");
b20a6524 6977 goto err;
734b0e4b
MM
6978
6979 case BTRACE_FORMAT_BTS:
6980 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6981 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 6982
734b0e4b
MM
6983 for (i = 0;
6984 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6985 i++)
6986 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6987 paddress (block->begin), paddress (block->end));
9accd112 6988
734b0e4b
MM
6989 buffer_grow_str0 (buffer, "</btrace>\n");
6990 break;
6991
b20a6524
MM
6992 case BTRACE_FORMAT_PT:
6993 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6994 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6995 buffer_grow_str (buffer, "<pt>\n");
6996
6997 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 6998
b20a6524
MM
6999 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7000 btrace.variant.pt.size);
7001
7002 buffer_grow_str (buffer, "</pt>\n");
7003 buffer_grow_str0 (buffer, "</btrace>\n");
7004 break;
7005
7006 default:
7007 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7008 goto err;
734b0e4b 7009 }
969c39fb 7010
734b0e4b 7011 btrace_data_fini (&btrace);
969c39fb 7012 return 0;
b20a6524
MM
7013
7014err:
7015 btrace_data_fini (&btrace);
7016 return -1;
9accd112 7017}
f4abbc16
MM
7018
7019/* See to_btrace_conf target method. */
7020
7021static int
7022linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7023 struct buffer *buffer)
7024{
7025 const struct btrace_config *conf;
7026
7027 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7028 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7029
7030 conf = linux_btrace_conf (tinfo);
7031 if (conf != NULL)
7032 {
7033 switch (conf->format)
7034 {
7035 case BTRACE_FORMAT_NONE:
7036 break;
7037
7038 case BTRACE_FORMAT_BTS:
d33501a5
MM
7039 buffer_xml_printf (buffer, "<bts");
7040 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7041 buffer_xml_printf (buffer, " />\n");
f4abbc16 7042 break;
b20a6524
MM
7043
7044 case BTRACE_FORMAT_PT:
7045 buffer_xml_printf (buffer, "<pt");
7046 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7047 buffer_xml_printf (buffer, "/>\n");
7048 break;
f4abbc16
MM
7049 }
7050 }
7051
7052 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7053 return 0;
7054}
9accd112
MM
7055#endif /* HAVE_LINUX_BTRACE */
7056
7b669087
GB
7057/* See nat/linux-nat.h. */
7058
7059ptid_t
7060current_lwp_ptid (void)
7061{
7062 return ptid_of (current_thread);
7063}
7064
dd373349
AT
7065/* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7066
7067static int
7068linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7069{
7070 if (the_low_target.breakpoint_kind_from_pc != NULL)
7071 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7072 else
1652a986 7073 return default_breakpoint_kind_from_pc (pcptr);
dd373349
AT
7074}
7075
7076/* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7077
7078static const gdb_byte *
7079linux_sw_breakpoint_from_kind (int kind, int *size)
7080{
7081 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7082
7083 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7084}
7085
769ef81f
AT
7086/* Implementation of the target_ops method
7087 "breakpoint_kind_from_current_state". */
7088
7089static int
7090linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7091{
7092 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7093 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7094 else
7095 return linux_breakpoint_kind_from_pc (pcptr);
7096}
7097
ce3a066d
DJ
7098static struct target_ops linux_target_ops = {
7099 linux_create_inferior,
ece66d65 7100 linux_post_create_inferior,
ce3a066d
DJ
7101 linux_attach,
7102 linux_kill,
6ad8ae5c 7103 linux_detach,
8336d594 7104 linux_mourn,
444d6139 7105 linux_join,
ce3a066d
DJ
7106 linux_thread_alive,
7107 linux_resume,
7108 linux_wait,
7109 linux_fetch_registers,
7110 linux_store_registers,
90d74c30 7111 linux_prepare_to_access_memory,
0146f85b 7112 linux_done_accessing_memory,
ce3a066d
DJ
7113 linux_read_memory,
7114 linux_write_memory,
2f2893d9 7115 linux_look_up_symbols,
ef57601b 7116 linux_request_interrupt,
aa691b87 7117 linux_read_auxv,
802e8e6d 7118 linux_supports_z_point_type,
d993e290
PA
7119 linux_insert_point,
7120 linux_remove_point,
3e572f71
PA
7121 linux_stopped_by_sw_breakpoint,
7122 linux_supports_stopped_by_sw_breakpoint,
7123 linux_stopped_by_hw_breakpoint,
7124 linux_supports_stopped_by_hw_breakpoint,
70b90b91 7125 linux_supports_hardware_single_step,
e013ee27
OF
7126 linux_stopped_by_watchpoint,
7127 linux_stopped_data_address,
db0dfaa0
LM
7128#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7129 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7130 && defined(PT_TEXT_END_ADDR)
52fb6437 7131 linux_read_offsets,
dae5f5cf
DJ
7132#else
7133 NULL,
7134#endif
7135#ifdef USE_THREAD_DB
7136 thread_db_get_tls_address,
7137#else
7138 NULL,
52fb6437 7139#endif
efcbbd14 7140 linux_qxfer_spu,
59a016f0 7141 hostio_last_error_from_errno,
07e059b5 7142 linux_qxfer_osdata,
4aa995e1 7143 linux_xfer_siginfo,
bd99dc85
PA
7144 linux_supports_non_stop,
7145 linux_async,
7146 linux_start_non_stop,
cdbfd419 7147 linux_supports_multi_process,
89245bc0
DB
7148 linux_supports_fork_events,
7149 linux_supports_vfork_events,
94585166 7150 linux_supports_exec_events,
de0d863e 7151 linux_handle_new_gdb_connection,
cdbfd419 7152#ifdef USE_THREAD_DB
dc146f7c 7153 thread_db_handle_monitor_command,
cdbfd419 7154#else
dc146f7c 7155 NULL,
cdbfd419 7156#endif
d26e3629 7157 linux_common_core_of_thread,
78d85199 7158 linux_read_loadmap,
219f2f23
PA
7159 linux_process_qsupported,
7160 linux_supports_tracepoints,
7161 linux_read_pc,
8336d594
PA
7162 linux_write_pc,
7163 linux_thread_stopped,
7984d532 7164 NULL,
711e434b 7165 linux_pause_all,
7984d532 7166 linux_unpause_all,
fa593d66 7167 linux_stabilize_threads,
6a271cae 7168 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
7169 linux_emit_ops,
7170 linux_supports_disable_randomization,
405f8e94 7171 linux_get_min_fast_tracepoint_insn_len,
2268b414 7172 linux_qxfer_libraries_svr4,
d1feda86 7173 linux_supports_agent,
9accd112
MM
7174#ifdef HAVE_LINUX_BTRACE
7175 linux_supports_btrace,
0568462b 7176 linux_enable_btrace,
969c39fb 7177 linux_low_disable_btrace,
9accd112 7178 linux_low_read_btrace,
f4abbc16 7179 linux_low_btrace_conf,
9accd112
MM
7180#else
7181 NULL,
7182 NULL,
7183 NULL,
7184 NULL,
f4abbc16 7185 NULL,
9accd112 7186#endif
c2d6af84 7187 linux_supports_range_stepping,
e57f1de3 7188 linux_proc_pid_to_exec_file,
14d2069a
GB
7189 linux_mntns_open_cloexec,
7190 linux_mntns_unlink,
7191 linux_mntns_readlink,
dd373349 7192 linux_breakpoint_kind_from_pc,
79efa585
SM
7193 linux_sw_breakpoint_from_kind,
7194 linux_proc_tid_get_name,
7d00775e
AT
7195 linux_breakpoint_kind_from_current_state,
7196 linux_supports_software_single_step
ce3a066d
DJ
7197};
7198
0d62e5e8
DJ
7199static void
7200linux_init_signals ()
7201{
7202 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
7203 to find what the cancel signal actually is. */
1a981360 7204#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 7205 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 7206#endif
0d62e5e8
DJ
7207}
7208
3aee8918
PA
7209#ifdef HAVE_LINUX_REGSETS
7210void
7211initialize_regsets_info (struct regsets_info *info)
7212{
7213 for (info->num_regsets = 0;
7214 info->regsets[info->num_regsets].size >= 0;
7215 info->num_regsets++)
7216 ;
3aee8918
PA
7217}
7218#endif
7219
da6d8c04
DJ
7220void
7221initialize_low (void)
7222{
bd99dc85 7223 struct sigaction sigchld_action;
dd373349 7224
bd99dc85 7225 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 7226 set_target_ops (&linux_target_ops);
dd373349 7227
0d62e5e8 7228 linux_init_signals ();
aa7c7447 7229 linux_ptrace_init_warnings ();
bd99dc85
PA
7230
7231 sigchld_action.sa_handler = sigchld_handler;
7232 sigemptyset (&sigchld_action.sa_mask);
7233 sigchld_action.sa_flags = SA_RESTART;
7234 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7235
7236 initialize_low_arch ();
89245bc0
DB
7237
7238 linux_check_ptrace_features ();
da6d8c04 7239}