]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
Introduce basic LWP accessors
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
32d0add0 2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
58b4daa5 22#include "agent.h"
da6d8c04 23
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
8bdce1ff 26#include "gdb_wait.h"
da6d8c04 27#include <sys/ptrace.h>
125f8a3d
GB
28#include "nat/linux-ptrace.h"
29#include "nat/linux-procfs.h"
8cc73a39 30#include "nat/linux-personality.h"
da6d8c04
DJ
31#include <signal.h>
32#include <sys/ioctl.h>
33#include <fcntl.h>
0a30fbc4 34#include <unistd.h>
fd500816 35#include <sys/syscall.h>
f9387fc3 36#include <sched.h>
07e059b5
VP
37#include <ctype.h>
38#include <pwd.h>
39#include <sys/types.h>
40#include <dirent.h>
53ce3c39 41#include <sys/stat.h>
efcbbd14 42#include <sys/vfs.h>
1570b33e 43#include <sys/uio.h>
602e3198 44#include "filestuff.h"
c144c7a0 45#include "tracepoint.h"
533b0600 46#include "hostio.h"
957f3f49
DE
47#ifndef ELFMAG0
48/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
49 then ELFMAG0 will have been defined. If it didn't get included by
50 gdb_proc_service.h then including it will likely introduce a duplicate
51 definition of elf_fpregset_t. */
52#include <elf.h>
53#endif
efcbbd14
UW
54
55#ifndef SPUFS_MAGIC
56#define SPUFS_MAGIC 0x23c9b64e
57#endif
da6d8c04 58
03583c20
UW
59#ifdef HAVE_PERSONALITY
60# include <sys/personality.h>
61# if !HAVE_DECL_ADDR_NO_RANDOMIZE
62# define ADDR_NO_RANDOMIZE 0x0040000
63# endif
64#endif
65
fd462a61
DJ
66#ifndef O_LARGEFILE
67#define O_LARGEFILE 0
68#endif
69
ec8ebe72
DE
70#ifndef W_STOPCODE
71#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
72#endif
73
1a981360
PA
74/* This is the kernel's hard limit. Not to be confused with
75 SIGRTMIN. */
76#ifndef __SIGRTMIN
77#define __SIGRTMIN 32
78#endif
79
db0dfaa0
LM
80/* Some targets did not define these ptrace constants from the start,
81 so gdbserver defines them locally here. In the future, these may
82 be removed after they are added to asm/ptrace.h. */
83#if !(defined(PT_TEXT_ADDR) \
84 || defined(PT_DATA_ADDR) \
85 || defined(PT_TEXT_END_ADDR))
86#if defined(__mcoldfire__)
87/* These are still undefined in 3.10 kernels. */
88#define PT_TEXT_ADDR 49*4
89#define PT_DATA_ADDR 50*4
90#define PT_TEXT_END_ADDR 51*4
91/* BFIN already defines these since at least 2.6.32 kernels. */
92#elif defined(BFIN)
93#define PT_TEXT_ADDR 220
94#define PT_TEXT_END_ADDR 224
95#define PT_DATA_ADDR 228
96/* These are still undefined in 3.10 kernels. */
97#elif defined(__TMS320C6X__)
98#define PT_TEXT_ADDR (0x10000*4)
99#define PT_DATA_ADDR (0x10004*4)
100#define PT_TEXT_END_ADDR (0x10008*4)
101#endif
102#endif
103
9accd112 104#ifdef HAVE_LINUX_BTRACE
125f8a3d 105# include "nat/linux-btrace.h"
734b0e4b 106# include "btrace-common.h"
9accd112
MM
107#endif
108
8365dcf5
TJB
109#ifndef HAVE_ELF32_AUXV_T
110/* Copied from glibc's elf.h. */
111typedef struct
112{
113 uint32_t a_type; /* Entry type */
114 union
115 {
116 uint32_t a_val; /* Integer value */
117 /* We use to have pointer elements added here. We cannot do that,
118 though, since it does not work when using 32-bit definitions
119 on 64-bit platforms and vice versa. */
120 } a_un;
121} Elf32_auxv_t;
122#endif
123
124#ifndef HAVE_ELF64_AUXV_T
125/* Copied from glibc's elf.h. */
126typedef struct
127{
128 uint64_t a_type; /* Entry type */
129 union
130 {
131 uint64_t a_val; /* Integer value */
132 /* We use to have pointer elements added here. We cannot do that,
133 though, since it does not work when using 32-bit definitions
134 on 64-bit platforms and vice versa. */
135 } a_un;
136} Elf64_auxv_t;
137#endif
138
cff068da
GB
139/* LWP accessors. */
140
141/* See nat/linux-nat.h. */
142
143ptid_t
144ptid_of_lwp (struct lwp_info *lwp)
145{
146 return ptid_of (get_lwp_thread (lwp));
147}
148
149/* See nat/linux-nat.h. */
150
151int
152lwp_is_stopped (struct lwp_info *lwp)
153{
154 return lwp->stopped;
155}
156
157/* See nat/linux-nat.h. */
158
159enum target_stop_reason
160lwp_stop_reason (struct lwp_info *lwp)
161{
162 return lwp->stop_reason;
163}
164
05044653
PA
165/* A list of all unknown processes which receive stop signals. Some
166 other process will presumably claim each of these as forked
167 children momentarily. */
24a09b5f 168
05044653
PA
169struct simple_pid_list
170{
171 /* The process ID. */
172 int pid;
173
174 /* The status as reported by waitpid. */
175 int status;
176
177 /* Next in chain. */
178 struct simple_pid_list *next;
179};
180struct simple_pid_list *stopped_pids;
181
182/* Trivial list manipulation functions to keep track of a list of new
183 stopped processes. */
184
185static void
186add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
187{
188 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
189
190 new_pid->pid = pid;
191 new_pid->status = status;
192 new_pid->next = *listp;
193 *listp = new_pid;
194}
195
196static int
197pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
198{
199 struct simple_pid_list **p;
200
201 for (p = listp; *p != NULL; p = &(*p)->next)
202 if ((*p)->pid == pid)
203 {
204 struct simple_pid_list *next = (*p)->next;
205
206 *statusp = (*p)->status;
207 xfree (*p);
208 *p = next;
209 return 1;
210 }
211 return 0;
212}
24a09b5f 213
bde24c0a
PA
214enum stopping_threads_kind
215 {
216 /* Not stopping threads presently. */
217 NOT_STOPPING_THREADS,
218
219 /* Stopping threads. */
220 STOPPING_THREADS,
221
222 /* Stopping and suspending threads. */
223 STOPPING_AND_SUSPENDING_THREADS
224 };
225
226/* This is set while stop_all_lwps is in effect. */
227enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
228
229/* FIXME make into a target method? */
24a09b5f 230int using_threads = 1;
24a09b5f 231
fa593d66
PA
232/* True if we're presently stabilizing threads (moving them out of
233 jump pads). */
234static int stabilizing_threads;
235
2acc282a 236static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 237 int step, int signal, siginfo_t *info);
2bd7c093 238static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
239static void stop_all_lwps (int suspend, struct lwp_info *except);
240static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
fa96cb38
PA
241static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
242 int *wstat, int options);
95954743 243static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
b3312d80 244static struct lwp_info *add_lwp (ptid_t ptid);
c35fafde 245static int linux_stopped_by_watchpoint (void);
95954743 246static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
d50171e4 247static void proceed_all_lwps (void);
d50171e4 248static int finish_step_over (struct lwp_info *lwp);
d50171e4
PA
249static int kill_lwp (unsigned long lwpid, int signo);
250
582511be
PA
251/* When the event-loop is doing a step-over, this points at the thread
252 being stepped. */
253ptid_t step_over_bkpt;
254
d50171e4
PA
255/* True if the low target can hardware single-step. Such targets
256 don't need a BREAKPOINT_REINSERT_ADDR callback. */
257
258static int
259can_hardware_single_step (void)
260{
261 return (the_low_target.breakpoint_reinsert_addr == NULL);
262}
263
264/* True if the low target supports memory breakpoints. If so, we'll
265 have a GET_PC implementation. */
266
267static int
268supports_breakpoints (void)
269{
270 return (the_low_target.get_pc != NULL);
271}
0d62e5e8 272
fa593d66
PA
273/* Returns true if this target can support fast tracepoints. This
274 does not mean that the in-process agent has been loaded in the
275 inferior. */
276
277static int
278supports_fast_tracepoints (void)
279{
280 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
281}
282
c2d6af84
PA
283/* True if LWP is stopped in its stepping range. */
284
285static int
286lwp_in_step_range (struct lwp_info *lwp)
287{
288 CORE_ADDR pc = lwp->stop_pc;
289
290 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
291}
292
0d62e5e8
DJ
293struct pending_signals
294{
295 int signal;
32ca6d61 296 siginfo_t info;
0d62e5e8
DJ
297 struct pending_signals *prev;
298};
611cb4a5 299
bd99dc85
PA
300/* The read/write ends of the pipe registered as waitable file in the
301 event loop. */
302static int linux_event_pipe[2] = { -1, -1 };
303
304/* True if we're currently in async mode. */
305#define target_is_async_p() (linux_event_pipe[0] != -1)
306
02fc4de7 307static void send_sigstop (struct lwp_info *lwp);
fa96cb38 308static void wait_for_sigstop (void);
bd99dc85 309
d0722149
DE
310/* Return non-zero if HEADER is a 64-bit ELF file. */
311
312static int
214d508e 313elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 314{
214d508e
L
315 if (header->e_ident[EI_MAG0] == ELFMAG0
316 && header->e_ident[EI_MAG1] == ELFMAG1
317 && header->e_ident[EI_MAG2] == ELFMAG2
318 && header->e_ident[EI_MAG3] == ELFMAG3)
319 {
320 *machine = header->e_machine;
321 return header->e_ident[EI_CLASS] == ELFCLASS64;
322
323 }
324 *machine = EM_NONE;
325 return -1;
d0722149
DE
326}
327
328/* Return non-zero if FILE is a 64-bit ELF file,
329 zero if the file is not a 64-bit ELF file,
330 and -1 if the file is not accessible or doesn't exist. */
331
be07f1a2 332static int
214d508e 333elf_64_file_p (const char *file, unsigned int *machine)
d0722149 334{
957f3f49 335 Elf64_Ehdr header;
d0722149
DE
336 int fd;
337
338 fd = open (file, O_RDONLY);
339 if (fd < 0)
340 return -1;
341
342 if (read (fd, &header, sizeof (header)) != sizeof (header))
343 {
344 close (fd);
345 return 0;
346 }
347 close (fd);
348
214d508e 349 return elf_64_header_p (&header, machine);
d0722149
DE
350}
351
be07f1a2
PA
352/* Accepts an integer PID; Returns true if the executable PID is
353 running is a 64-bit ELF file.. */
354
355int
214d508e 356linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 357{
d8d2a3ee 358 char file[PATH_MAX];
be07f1a2
PA
359
360 sprintf (file, "/proc/%d/exe", pid);
214d508e 361 return elf_64_file_p (file, machine);
be07f1a2
PA
362}
363
bd99dc85
PA
364static void
365delete_lwp (struct lwp_info *lwp)
366{
fa96cb38
PA
367 struct thread_info *thr = get_lwp_thread (lwp);
368
369 if (debug_threads)
370 debug_printf ("deleting %ld\n", lwpid_of (thr));
371
372 remove_thread (thr);
aa5ca48f 373 free (lwp->arch_private);
bd99dc85
PA
374 free (lwp);
375}
376
95954743
PA
377/* Add a process to the common process list, and set its private
378 data. */
379
380static struct process_info *
381linux_add_process (int pid, int attached)
382{
383 struct process_info *proc;
384
95954743 385 proc = add_process (pid, attached);
fe978cb0 386 proc->priv = xcalloc (1, sizeof (*proc->priv));
95954743 387
3aee8918 388 /* Set the arch when the first LWP stops. */
fe978cb0 389 proc->priv->new_inferior = 1;
3aee8918 390
aa5ca48f 391 if (the_low_target.new_process != NULL)
fe978cb0 392 proc->priv->arch_private = the_low_target.new_process ();
aa5ca48f 393
95954743
PA
394 return proc;
395}
396
582511be
PA
397static CORE_ADDR get_pc (struct lwp_info *lwp);
398
bd99dc85
PA
399/* Handle a GNU/Linux extended wait response. If we see a clone
400 event, we need to add the new LWP to our list (and not report the
401 trap to higher layers). */
0d62e5e8 402
24a09b5f 403static void
54a0b537 404handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f 405{
89a5711c 406 int event = linux_ptrace_get_extended_event (wstat);
d86d4aaf 407 struct thread_info *event_thr = get_lwp_thread (event_child);
54a0b537 408 struct lwp_info *new_lwp;
24a09b5f
DJ
409
410 if (event == PTRACE_EVENT_CLONE)
411 {
95954743 412 ptid_t ptid;
24a09b5f 413 unsigned long new_pid;
05044653 414 int ret, status;
24a09b5f 415
d86d4aaf 416 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 417 &new_pid);
24a09b5f
DJ
418
419 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 420 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
421 {
422 /* The new child has a pending SIGSTOP. We can't affect it until it
423 hits the SIGSTOP, but we're already attached. */
424
97438e3f 425 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
426
427 if (ret == -1)
428 perror_with_name ("waiting for new child");
429 else if (ret != new_pid)
430 warning ("wait returned unexpected PID %d", ret);
da5898ce 431 else if (!WIFSTOPPED (status))
24a09b5f
DJ
432 warning ("wait returned unexpected status 0x%x", status);
433 }
434
fa96cb38
PA
435 if (debug_threads)
436 debug_printf ("HEW: Got clone event "
437 "from LWP %ld, new child is LWP %ld\n",
438 lwpid_of (event_thr), new_pid);
439
d86d4aaf 440 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
b3312d80 441 new_lwp = add_lwp (ptid);
24a09b5f 442
e27d73f6
DE
443 /* Either we're going to immediately resume the new thread
444 or leave it stopped. linux_resume_one_lwp is a nop if it
445 thinks the thread is currently running, so set this first
446 before calling linux_resume_one_lwp. */
447 new_lwp->stopped = 1;
448
bde24c0a
PA
449 /* If we're suspending all threads, leave this one suspended
450 too. */
451 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
452 new_lwp->suspended = 1;
453
da5898ce
DJ
454 /* Normally we will get the pending SIGSTOP. But in some cases
455 we might get another signal delivered to the group first.
f21cc1a2 456 If we do get another signal, be sure not to lose it. */
20ba1ce6 457 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 458 {
54a0b537 459 new_lwp->stop_expected = 1;
20ba1ce6
PA
460 new_lwp->status_pending_p = 1;
461 new_lwp->status_pending = status;
da5898ce 462 }
24a09b5f
DJ
463 }
464}
465
d50171e4
PA
466/* Return the PC as read from the regcache of LWP, without any
467 adjustment. */
468
469static CORE_ADDR
470get_pc (struct lwp_info *lwp)
471{
0bfdf32f 472 struct thread_info *saved_thread;
d50171e4
PA
473 struct regcache *regcache;
474 CORE_ADDR pc;
475
476 if (the_low_target.get_pc == NULL)
477 return 0;
478
0bfdf32f
GB
479 saved_thread = current_thread;
480 current_thread = get_lwp_thread (lwp);
d50171e4 481
0bfdf32f 482 regcache = get_thread_regcache (current_thread, 1);
d50171e4
PA
483 pc = (*the_low_target.get_pc) (regcache);
484
485 if (debug_threads)
87ce2a04 486 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 487
0bfdf32f 488 current_thread = saved_thread;
d50171e4
PA
489 return pc;
490}
491
492/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
493 The SIGTRAP could mean several things.
494
495 On i386, where decr_pc_after_break is non-zero:
582511be
PA
496
497 If we were single-stepping this process using PTRACE_SINGLESTEP, we
498 will get only the one SIGTRAP. The value of $eip will be the next
499 instruction. If the instruction we stepped over was a breakpoint,
500 we need to decrement the PC.
501
0d62e5e8
DJ
502 If we continue the process using PTRACE_CONT, we will get a
503 SIGTRAP when we hit a breakpoint. The value of $eip will be
504 the instruction after the breakpoint (i.e. needs to be
505 decremented). If we report the SIGTRAP to GDB, we must also
582511be 506 report the undecremented PC. If the breakpoint is removed, we
0d62e5e8
DJ
507 must resume at the decremented PC.
508
582511be
PA
509 On a non-decr_pc_after_break machine with hardware or kernel
510 single-step:
511
512 If we either single-step a breakpoint instruction, or continue and
513 hit a breakpoint instruction, our PC will point at the breakpoint
0d62e5e8
DJ
514 instruction. */
515
582511be
PA
516static int
517check_stopped_by_breakpoint (struct lwp_info *lwp)
0d62e5e8 518{
582511be
PA
519 CORE_ADDR pc;
520 CORE_ADDR sw_breakpoint_pc;
521 struct thread_info *saved_thread;
3e572f71
PA
522#if USE_SIGTRAP_SIGINFO
523 siginfo_t siginfo;
524#endif
d50171e4
PA
525
526 if (the_low_target.get_pc == NULL)
527 return 0;
0d62e5e8 528
582511be
PA
529 pc = get_pc (lwp);
530 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
d50171e4 531
582511be
PA
532 /* breakpoint_at reads from the current thread. */
533 saved_thread = current_thread;
534 current_thread = get_lwp_thread (lwp);
47c0c975 535
3e572f71
PA
536#if USE_SIGTRAP_SIGINFO
537 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
538 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
539 {
540 if (siginfo.si_signo == SIGTRAP)
541 {
542 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
543 {
544 if (debug_threads)
545 {
546 struct thread_info *thr = get_lwp_thread (lwp);
547
548 debug_printf ("CSBB: Push back software breakpoint for %s\n",
549 target_pid_to_str (ptid_of (thr)));
550 }
551
552 /* Back up the PC if necessary. */
553 if (pc != sw_breakpoint_pc)
554 {
555 struct regcache *regcache
556 = get_thread_regcache (current_thread, 1);
557 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
558 }
559
560 lwp->stop_pc = sw_breakpoint_pc;
561 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
562 current_thread = saved_thread;
563 return 1;
564 }
565 else if (siginfo.si_code == TRAP_HWBKPT)
566 {
567 if (debug_threads)
568 {
569 struct thread_info *thr = get_lwp_thread (lwp);
570
571 debug_printf ("CSBB: Push back hardware "
572 "breakpoint/watchpoint for %s\n",
573 target_pid_to_str (ptid_of (thr)));
574 }
575
576 lwp->stop_pc = pc;
577 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
578 current_thread = saved_thread;
579 return 1;
580 }
581 }
582 }
583#else
582511be
PA
584 /* We may have just stepped a breakpoint instruction. E.g., in
585 non-stop mode, GDB first tells the thread A to step a range, and
586 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
587 case we need to report the breakpoint PC. */
588 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
582511be
PA
589 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
590 {
591 if (debug_threads)
592 {
593 struct thread_info *thr = get_lwp_thread (lwp);
594
595 debug_printf ("CSBB: %s stopped by software breakpoint\n",
596 target_pid_to_str (ptid_of (thr)));
597 }
598
599 /* Back up the PC if necessary. */
600 if (pc != sw_breakpoint_pc)
601 {
602 struct regcache *regcache
603 = get_thread_regcache (current_thread, 1);
604 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
605 }
606
607 lwp->stop_pc = sw_breakpoint_pc;
15c66dd6 608 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
582511be
PA
609 current_thread = saved_thread;
610 return 1;
611 }
612
613 if (hardware_breakpoint_inserted_here (pc))
614 {
615 if (debug_threads)
616 {
617 struct thread_info *thr = get_lwp_thread (lwp);
618
619 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
620 target_pid_to_str (ptid_of (thr)));
621 }
47c0c975 622
582511be 623 lwp->stop_pc = pc;
15c66dd6 624 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
582511be
PA
625 current_thread = saved_thread;
626 return 1;
627 }
3e572f71 628#endif
582511be
PA
629
630 current_thread = saved_thread;
631 return 0;
0d62e5e8 632}
ce3a066d 633
b3312d80 634static struct lwp_info *
95954743 635add_lwp (ptid_t ptid)
611cb4a5 636{
54a0b537 637 struct lwp_info *lwp;
0d62e5e8 638
54a0b537
PA
639 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
640 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 641
aa5ca48f
DE
642 if (the_low_target.new_thread != NULL)
643 lwp->arch_private = the_low_target.new_thread ();
644
f7667f0d 645 lwp->thread = add_thread (ptid, lwp);
0d62e5e8 646
54a0b537 647 return lwp;
0d62e5e8 648}
611cb4a5 649
da6d8c04
DJ
650/* Start an inferior process and returns its pid.
651 ALLARGS is a vector of program-name and args. */
652
ce3a066d
DJ
653static int
654linux_create_inferior (char *program, char **allargs)
da6d8c04 655{
a6dbe5df 656 struct lwp_info *new_lwp;
da6d8c04 657 int pid;
95954743 658 ptid_t ptid;
8cc73a39
SDJ
659 struct cleanup *restore_personality
660 = maybe_disable_address_space_randomization (disable_randomization);
03583c20 661
42c81e2a 662#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
663 pid = vfork ();
664#else
da6d8c04 665 pid = fork ();
52fb6437 666#endif
da6d8c04
DJ
667 if (pid < 0)
668 perror_with_name ("fork");
669
670 if (pid == 0)
671 {
602e3198 672 close_most_fds ();
b8e1b30e 673 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da6d8c04 674
1a981360 675#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 676 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 677#endif
0d62e5e8 678
a9fa9f7d
DJ
679 setpgid (0, 0);
680
e0f9f062
DE
681 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
682 stdout to stderr so that inferior i/o doesn't corrupt the connection.
683 Also, redirect stdin to /dev/null. */
684 if (remote_connection_is_stdio ())
685 {
686 close (0);
687 open ("/dev/null", O_RDONLY);
688 dup2 (2, 1);
3e52c33d
JK
689 if (write (2, "stdin/stdout redirected\n",
690 sizeof ("stdin/stdout redirected\n") - 1) < 0)
8c29b58e
YQ
691 {
692 /* Errors ignored. */;
693 }
e0f9f062
DE
694 }
695
2b876972
DJ
696 execv (program, allargs);
697 if (errno == ENOENT)
698 execvp (program, allargs);
da6d8c04
DJ
699
700 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 701 strerror (errno));
da6d8c04
DJ
702 fflush (stderr);
703 _exit (0177);
704 }
705
8cc73a39 706 do_cleanups (restore_personality);
03583c20 707
95954743
PA
708 linux_add_process (pid, 0);
709
710 ptid = ptid_build (pid, pid, 0);
711 new_lwp = add_lwp (ptid);
a6dbe5df 712 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 713
a9fa9f7d 714 return pid;
da6d8c04
DJ
715}
716
8784d563
PA
717/* Attach to an inferior process. Returns 0 on success, ERRNO on
718 error. */
da6d8c04 719
7ae1a6a6
PA
720int
721linux_attach_lwp (ptid_t ptid)
da6d8c04 722{
54a0b537 723 struct lwp_info *new_lwp;
7ae1a6a6 724 int lwpid = ptid_get_lwp (ptid);
611cb4a5 725
b8e1b30e 726 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 727 != 0)
7ae1a6a6 728 return errno;
24a09b5f 729
b3312d80 730 new_lwp = add_lwp (ptid);
0d62e5e8 731
a6dbe5df
PA
732 /* We need to wait for SIGSTOP before being able to make the next
733 ptrace call on this LWP. */
734 new_lwp->must_set_ptrace_flags = 1;
735
644cebc9 736 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
737 {
738 if (debug_threads)
87ce2a04 739 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
740
741 /* The process is definitely stopped. It is in a job control
742 stop, unless the kernel predates the TASK_STOPPED /
743 TASK_TRACED distinction, in which case it might be in a
744 ptrace stop. Make sure it is in a ptrace stop; from there we
745 can kill it, signal it, et cetera.
746
747 First make sure there is a pending SIGSTOP. Since we are
748 already attached, the process can not transition from stopped
749 to running without a PTRACE_CONT; so we know this signal will
750 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
751 probably already in the queue (unless this kernel is old
752 enough to use TASK_STOPPED for ptrace stops); but since
753 SIGSTOP is not an RT signal, it can only be queued once. */
754 kill_lwp (lwpid, SIGSTOP);
755
756 /* Finally, resume the stopped process. This will deliver the
757 SIGSTOP (or a higher priority signal, just like normal
758 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 759 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
760 }
761
0d62e5e8 762 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
763 brings it to a halt.
764
765 There are several cases to consider here:
766
767 1) gdbserver has already attached to the process and is being notified
1b3f6016 768 of a new thread that is being created.
d50171e4
PA
769 In this case we should ignore that SIGSTOP and resume the
770 process. This is handled below by setting stop_expected = 1,
8336d594 771 and the fact that add_thread sets last_resume_kind ==
d50171e4 772 resume_continue.
0e21c1ec
DE
773
774 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
775 to it via attach_inferior.
776 In this case we want the process thread to stop.
d50171e4
PA
777 This is handled by having linux_attach set last_resume_kind ==
778 resume_stop after we return.
e3deef73
LM
779
780 If the pid we are attaching to is also the tgid, we attach to and
781 stop all the existing threads. Otherwise, we attach to pid and
782 ignore any other threads in the same group as this pid.
0e21c1ec
DE
783
784 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
785 existing threads.
786 In this case we want the thread to stop.
787 FIXME: This case is currently not properly handled.
788 We should wait for the SIGSTOP but don't. Things work apparently
789 because enough time passes between when we ptrace (ATTACH) and when
790 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
791
792 On the other hand, if we are currently trying to stop all threads, we
793 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 794 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
795 end of the list, and so the new thread has not yet reached
796 wait_for_sigstop (but will). */
d50171e4 797 new_lwp->stop_expected = 1;
0d62e5e8 798
7ae1a6a6 799 return 0;
95954743
PA
800}
801
8784d563
PA
802/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
803 already attached. Returns true if a new LWP is found, false
804 otherwise. */
805
806static int
807attach_proc_task_lwp_callback (ptid_t ptid)
808{
809 /* Is this a new thread? */
810 if (find_thread_ptid (ptid) == NULL)
811 {
812 int lwpid = ptid_get_lwp (ptid);
813 int err;
814
815 if (debug_threads)
816 debug_printf ("Found new lwp %d\n", lwpid);
817
818 err = linux_attach_lwp (ptid);
819
820 /* Be quiet if we simply raced with the thread exiting. EPERM
821 is returned if the thread's task still exists, and is marked
822 as exited or zombie, as well as other conditions, so in that
823 case, confirm the status in /proc/PID/status. */
824 if (err == ESRCH
825 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
826 {
827 if (debug_threads)
828 {
829 debug_printf ("Cannot attach to lwp %d: "
830 "thread is gone (%d: %s)\n",
831 lwpid, err, strerror (err));
832 }
833 }
834 else if (err != 0)
835 {
836 warning (_("Cannot attach to lwp %d: %s"),
837 lwpid,
838 linux_ptrace_attach_fail_reason_string (ptid, err));
839 }
840
841 return 1;
842 }
843 return 0;
844}
845
e3deef73
LM
846/* Attach to PID. If PID is the tgid, attach to it and all
847 of its threads. */
848
c52daf70 849static int
a1928bad 850linux_attach (unsigned long pid)
0d62e5e8 851{
7ae1a6a6
PA
852 ptid_t ptid = ptid_build (pid, pid, 0);
853 int err;
854
e3deef73
LM
855 /* Attach to PID. We will check for other threads
856 soon. */
7ae1a6a6
PA
857 err = linux_attach_lwp (ptid);
858 if (err != 0)
859 error ("Cannot attach to process %ld: %s",
8784d563 860 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
7ae1a6a6 861
95954743 862 linux_add_process (pid, 1);
0d62e5e8 863
bd99dc85
PA
864 if (!non_stop)
865 {
8336d594
PA
866 struct thread_info *thread;
867
868 /* Don't ignore the initial SIGSTOP if we just attached to this
869 process. It will be collected by wait shortly. */
870 thread = find_thread_ptid (ptid_build (pid, pid, 0));
871 thread->last_resume_kind = resume_stop;
bd99dc85 872 }
0d62e5e8 873
8784d563
PA
874 /* We must attach to every LWP. If /proc is mounted, use that to
875 find them now. On the one hand, the inferior may be using raw
876 clone instead of using pthreads. On the other hand, even if it
877 is using pthreads, GDB may not be connected yet (thread_db needs
878 to do symbol lookups, through qSymbol). Also, thread_db walks
879 structures in the inferior's address space to find the list of
880 threads/LWPs, and those structures may well be corrupted. Note
881 that once thread_db is loaded, we'll still use it to list threads
882 and associate pthread info with each LWP. */
883 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
95954743
PA
884 return 0;
885}
886
887struct counter
888{
889 int pid;
890 int count;
891};
892
893static int
894second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
895{
896 struct counter *counter = args;
897
898 if (ptid_get_pid (entry->id) == counter->pid)
899 {
900 if (++counter->count > 1)
901 return 1;
902 }
d61ddec4 903
da6d8c04
DJ
904 return 0;
905}
906
95954743 907static int
fa96cb38 908last_thread_of_process_p (int pid)
95954743 909{
95954743 910 struct counter counter = { pid , 0 };
da6d8c04 911
95954743
PA
912 return (find_inferior (&all_threads,
913 second_thread_of_pid_p, &counter) == NULL);
914}
915
da84f473
PA
916/* Kill LWP. */
917
918static void
919linux_kill_one_lwp (struct lwp_info *lwp)
920{
d86d4aaf
DE
921 struct thread_info *thr = get_lwp_thread (lwp);
922 int pid = lwpid_of (thr);
da84f473
PA
923
924 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
925 there is no signal context, and ptrace(PTRACE_KILL) (or
926 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
927 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
928 alternative is to kill with SIGKILL. We only need one SIGKILL
929 per process, not one for each thread. But since we still support
930 linuxthreads, and we also support debugging programs using raw
931 clone without CLONE_THREAD, we send one for each thread. For
932 years, we used PTRACE_KILL only, so we're being a bit paranoid
933 about some old kernels where PTRACE_KILL might work better
934 (dubious if there are any such, but that's why it's paranoia), so
935 we try SIGKILL first, PTRACE_KILL second, and so we're fine
936 everywhere. */
937
938 errno = 0;
69ff6be5 939 kill_lwp (pid, SIGKILL);
da84f473 940 if (debug_threads)
ce9e3fe7
PA
941 {
942 int save_errno = errno;
943
944 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
945 target_pid_to_str (ptid_of (thr)),
946 save_errno ? strerror (save_errno) : "OK");
947 }
da84f473
PA
948
949 errno = 0;
b8e1b30e 950 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 951 if (debug_threads)
ce9e3fe7
PA
952 {
953 int save_errno = errno;
954
955 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
956 target_pid_to_str (ptid_of (thr)),
957 save_errno ? strerror (save_errno) : "OK");
958 }
da84f473
PA
959}
960
e76126e8
PA
961/* Kill LWP and wait for it to die. */
962
963static void
964kill_wait_lwp (struct lwp_info *lwp)
965{
966 struct thread_info *thr = get_lwp_thread (lwp);
967 int pid = ptid_get_pid (ptid_of (thr));
968 int lwpid = ptid_get_lwp (ptid_of (thr));
969 int wstat;
970 int res;
971
972 if (debug_threads)
973 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
974
975 do
976 {
977 linux_kill_one_lwp (lwp);
978
979 /* Make sure it died. Notes:
980
981 - The loop is most likely unnecessary.
982
983 - We don't use linux_wait_for_event as that could delete lwps
984 while we're iterating over them. We're not interested in
985 any pending status at this point, only in making sure all
986 wait status on the kernel side are collected until the
987 process is reaped.
988
989 - We don't use __WALL here as the __WALL emulation relies on
990 SIGCHLD, and killing a stopped process doesn't generate
991 one, nor an exit status.
992 */
993 res = my_waitpid (lwpid, &wstat, 0);
994 if (res == -1 && errno == ECHILD)
995 res = my_waitpid (lwpid, &wstat, __WCLONE);
996 } while (res > 0 && WIFSTOPPED (wstat));
997
998 gdb_assert (res > 0);
999}
1000
da84f473
PA
1001/* Callback for `find_inferior'. Kills an lwp of a given process,
1002 except the leader. */
95954743
PA
1003
1004static int
da84f473 1005kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
da6d8c04 1006{
0d62e5e8 1007 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1008 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
1009 int pid = * (int *) args;
1010
1011 if (ptid_get_pid (entry->id) != pid)
1012 return 0;
0d62e5e8 1013
fd500816
DJ
1014 /* We avoid killing the first thread here, because of a Linux kernel (at
1015 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1016 the children get a chance to be reaped, it will remain a zombie
1017 forever. */
95954743 1018
d86d4aaf 1019 if (lwpid_of (thread) == pid)
95954743
PA
1020 {
1021 if (debug_threads)
87ce2a04
DE
1022 debug_printf ("lkop: is last of process %s\n",
1023 target_pid_to_str (entry->id));
95954743
PA
1024 return 0;
1025 }
fd500816 1026
e76126e8 1027 kill_wait_lwp (lwp);
95954743 1028 return 0;
da6d8c04
DJ
1029}
1030
95954743
PA
1031static int
1032linux_kill (int pid)
0d62e5e8 1033{
95954743 1034 struct process_info *process;
54a0b537 1035 struct lwp_info *lwp;
fd500816 1036
95954743
PA
1037 process = find_process_pid (pid);
1038 if (process == NULL)
1039 return -1;
9d606399 1040
f9e39928
PA
1041 /* If we're killing a running inferior, make sure it is stopped
1042 first, as PTRACE_KILL will not work otherwise. */
7984d532 1043 stop_all_lwps (0, NULL);
f9e39928 1044
da84f473 1045 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
fd500816 1046
54a0b537 1047 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1048 thread in the list, so do so now. */
95954743 1049 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 1050
784867a5 1051 if (lwp == NULL)
fd500816 1052 {
784867a5 1053 if (debug_threads)
d86d4aaf
DE
1054 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1055 pid);
784867a5
JK
1056 }
1057 else
e76126e8 1058 kill_wait_lwp (lwp);
2d717e4f 1059
8336d594 1060 the_target->mourn (process);
f9e39928
PA
1061
1062 /* Since we presently can only stop all lwps of all processes, we
1063 need to unstop lwps of other processes. */
7984d532 1064 unstop_all_lwps (0, NULL);
95954743 1065 return 0;
0d62e5e8
DJ
1066}
1067
9b224c5e
PA
1068/* Get pending signal of THREAD, for detaching purposes. This is the
1069 signal the thread last stopped for, which we need to deliver to the
1070 thread when detaching, otherwise, it'd be suppressed/lost. */
1071
1072static int
1073get_detach_signal (struct thread_info *thread)
1074{
a493e3e2 1075 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1076 int status;
1077 struct lwp_info *lp = get_thread_lwp (thread);
1078
1079 if (lp->status_pending_p)
1080 status = lp->status_pending;
1081 else
1082 {
1083 /* If the thread had been suspended by gdbserver, and it stopped
1084 cleanly, then it'll have stopped with SIGSTOP. But we don't
1085 want to deliver that SIGSTOP. */
1086 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1087 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1088 return 0;
1089
1090 /* Otherwise, we may need to deliver the signal we
1091 intercepted. */
1092 status = lp->last_status;
1093 }
1094
1095 if (!WIFSTOPPED (status))
1096 {
1097 if (debug_threads)
87ce2a04 1098 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1099 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1100 return 0;
1101 }
1102
1103 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1104 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1105 {
1106 if (debug_threads)
87ce2a04
DE
1107 debug_printf ("GPS: lwp %s had stopped with extended "
1108 "status: no pending signal\n",
d86d4aaf 1109 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1110 return 0;
1111 }
1112
2ea28649 1113 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e
PA
1114
1115 if (program_signals_p && !program_signals[signo])
1116 {
1117 if (debug_threads)
87ce2a04 1118 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1119 target_pid_to_str (ptid_of (thread)),
87ce2a04 1120 gdb_signal_to_string (signo));
9b224c5e
PA
1121 return 0;
1122 }
1123 else if (!program_signals_p
1124 /* If we have no way to know which signals GDB does not
1125 want to have passed to the program, assume
1126 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1127 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1128 {
1129 if (debug_threads)
87ce2a04
DE
1130 debug_printf ("GPS: lwp %s had signal %s, "
1131 "but we don't know if we should pass it. "
1132 "Default to not.\n",
d86d4aaf 1133 target_pid_to_str (ptid_of (thread)),
87ce2a04 1134 gdb_signal_to_string (signo));
9b224c5e
PA
1135 return 0;
1136 }
1137 else
1138 {
1139 if (debug_threads)
87ce2a04 1140 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1141 target_pid_to_str (ptid_of (thread)),
87ce2a04 1142 gdb_signal_to_string (signo));
9b224c5e
PA
1143
1144 return WSTOPSIG (status);
1145 }
1146}
1147
95954743
PA
1148static int
1149linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
1150{
1151 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1152 struct lwp_info *lwp = get_thread_lwp (thread);
95954743 1153 int pid = * (int *) args;
9b224c5e 1154 int sig;
95954743
PA
1155
1156 if (ptid_get_pid (entry->id) != pid)
1157 return 0;
6ad8ae5c 1158
9b224c5e 1159 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1160 if (lwp->stop_expected)
ae13219e 1161 {
9b224c5e 1162 if (debug_threads)
87ce2a04 1163 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1164 target_pid_to_str (ptid_of (thread)));
9b224c5e 1165
d86d4aaf 1166 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1167 lwp->stop_expected = 0;
ae13219e
DJ
1168 }
1169
1170 /* Flush any pending changes to the process's registers. */
d86d4aaf 1171 regcache_invalidate_thread (thread);
ae13219e 1172
9b224c5e
PA
1173 /* Pass on any pending signal for this thread. */
1174 sig = get_detach_signal (thread);
1175
ae13219e 1176 /* Finally, let it resume. */
82bfbe7e
PA
1177 if (the_low_target.prepare_to_resume != NULL)
1178 the_low_target.prepare_to_resume (lwp);
d86d4aaf 1179 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1180 (PTRACE_TYPE_ARG4) (long) sig) < 0)
9b224c5e 1181 error (_("Can't detach %s: %s"),
d86d4aaf 1182 target_pid_to_str (ptid_of (thread)),
9b224c5e 1183 strerror (errno));
bd99dc85
PA
1184
1185 delete_lwp (lwp);
95954743 1186 return 0;
6ad8ae5c
DJ
1187}
1188
95954743
PA
1189static int
1190linux_detach (int pid)
1191{
1192 struct process_info *process;
1193
1194 process = find_process_pid (pid);
1195 if (process == NULL)
1196 return -1;
1197
f9e39928
PA
1198 /* Stop all threads before detaching. First, ptrace requires that
1199 the thread is stopped to sucessfully detach. Second, thread_db
1200 may need to uninstall thread event breakpoints from memory, which
1201 only works with a stopped process anyway. */
7984d532 1202 stop_all_lwps (0, NULL);
f9e39928 1203
ca5c370d 1204#ifdef USE_THREAD_DB
8336d594 1205 thread_db_detach (process);
ca5c370d
PA
1206#endif
1207
fa593d66
PA
1208 /* Stabilize threads (move out of jump pads). */
1209 stabilize_threads ();
1210
95954743 1211 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
1212
1213 the_target->mourn (process);
f9e39928
PA
1214
1215 /* Since we presently can only stop all lwps of all processes, we
1216 need to unstop lwps of other processes. */
7984d532 1217 unstop_all_lwps (0, NULL);
f9e39928
PA
1218 return 0;
1219}
1220
1221/* Remove all LWPs that belong to process PROC from the lwp list. */
1222
1223static int
1224delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1225{
d86d4aaf
DE
1226 struct thread_info *thread = (struct thread_info *) entry;
1227 struct lwp_info *lwp = get_thread_lwp (thread);
f9e39928
PA
1228 struct process_info *process = proc;
1229
d86d4aaf 1230 if (pid_of (thread) == pid_of (process))
f9e39928
PA
1231 delete_lwp (lwp);
1232
dd6953e1 1233 return 0;
6ad8ae5c
DJ
1234}
1235
8336d594
PA
1236static void
1237linux_mourn (struct process_info *process)
1238{
1239 struct process_info_private *priv;
1240
1241#ifdef USE_THREAD_DB
1242 thread_db_mourn (process);
1243#endif
1244
d86d4aaf 1245 find_inferior (&all_threads, delete_lwp_callback, process);
f9e39928 1246
8336d594 1247 /* Freeing all private data. */
fe978cb0 1248 priv = process->priv;
8336d594
PA
1249 free (priv->arch_private);
1250 free (priv);
fe978cb0 1251 process->priv = NULL;
505106cd
PA
1252
1253 remove_process (process);
8336d594
PA
1254}
1255
444d6139 1256static void
95954743 1257linux_join (int pid)
444d6139 1258{
444d6139
PA
1259 int status, ret;
1260
1261 do {
95954743 1262 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1263 if (WIFEXITED (status) || WIFSIGNALED (status))
1264 break;
1265 } while (ret != -1 || errno != ECHILD);
1266}
1267
6ad8ae5c 1268/* Return nonzero if the given thread is still alive. */
0d62e5e8 1269static int
95954743 1270linux_thread_alive (ptid_t ptid)
0d62e5e8 1271{
95954743
PA
1272 struct lwp_info *lwp = find_lwp_pid (ptid);
1273
1274 /* We assume we always know if a thread exits. If a whole process
1275 exited but we still haven't been able to report it to GDB, we'll
1276 hold on to the last lwp of the dead process. */
1277 if (lwp != NULL)
1278 return !lwp->dead;
0d62e5e8
DJ
1279 else
1280 return 0;
1281}
1282
582511be
PA
1283/* Return 1 if this lwp still has an interesting status pending. If
1284 not (e.g., it had stopped for a breakpoint that is gone), return
1285 false. */
1286
1287static int
1288thread_still_has_status_pending_p (struct thread_info *thread)
1289{
1290 struct lwp_info *lp = get_thread_lwp (thread);
1291
1292 if (!lp->status_pending_p)
1293 return 0;
1294
1295 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1296 report any status pending the LWP may have. */
1297 if (thread->last_resume_kind == resume_stop
1298 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1299 return 0;
1300
1301 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1302 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1303 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1304 {
1305 struct thread_info *saved_thread;
1306 CORE_ADDR pc;
1307 int discard = 0;
1308
1309 gdb_assert (lp->last_status != 0);
1310
1311 pc = get_pc (lp);
1312
1313 saved_thread = current_thread;
1314 current_thread = thread;
1315
1316 if (pc != lp->stop_pc)
1317 {
1318 if (debug_threads)
1319 debug_printf ("PC of %ld changed\n",
1320 lwpid_of (thread));
1321 discard = 1;
1322 }
3e572f71
PA
1323
1324#if !USE_SIGTRAP_SIGINFO
15c66dd6 1325 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
582511be
PA
1326 && !(*the_low_target.breakpoint_at) (pc))
1327 {
1328 if (debug_threads)
1329 debug_printf ("previous SW breakpoint of %ld gone\n",
1330 lwpid_of (thread));
1331 discard = 1;
1332 }
15c66dd6 1333 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1334 && !hardware_breakpoint_inserted_here (pc))
1335 {
1336 if (debug_threads)
1337 debug_printf ("previous HW breakpoint of %ld gone\n",
1338 lwpid_of (thread));
1339 discard = 1;
1340 }
3e572f71 1341#endif
582511be
PA
1342
1343 current_thread = saved_thread;
1344
1345 if (discard)
1346 {
1347 if (debug_threads)
1348 debug_printf ("discarding pending breakpoint status\n");
1349 lp->status_pending_p = 0;
1350 return 0;
1351 }
1352 }
1353
1354 return 1;
1355}
1356
6bf5e0ba 1357/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1358static int
d50171e4 1359status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1360{
d86d4aaf 1361 struct thread_info *thread = (struct thread_info *) entry;
582511be 1362 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1363 ptid_t ptid = * (ptid_t *) arg;
1364
1365 /* Check if we're only interested in events from a specific process
afa8d396
PA
1366 or a specific LWP. */
1367 if (!ptid_match (ptid_of (thread), ptid))
95954743 1368 return 0;
0d62e5e8 1369
582511be
PA
1370 if (lp->status_pending_p
1371 && !thread_still_has_status_pending_p (thread))
1372 {
1373 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1374 return 0;
1375 }
0d62e5e8 1376
582511be 1377 return lp->status_pending_p;
0d62e5e8
DJ
1378}
1379
95954743
PA
1380static int
1381same_lwp (struct inferior_list_entry *entry, void *data)
1382{
1383 ptid_t ptid = *(ptid_t *) data;
1384 int lwp;
1385
1386 if (ptid_get_lwp (ptid) != 0)
1387 lwp = ptid_get_lwp (ptid);
1388 else
1389 lwp = ptid_get_pid (ptid);
1390
1391 if (ptid_get_lwp (entry->id) == lwp)
1392 return 1;
1393
1394 return 0;
1395}
1396
1397struct lwp_info *
1398find_lwp_pid (ptid_t ptid)
1399{
d86d4aaf
DE
1400 struct inferior_list_entry *thread
1401 = find_inferior (&all_threads, same_lwp, &ptid);
1402
1403 if (thread == NULL)
1404 return NULL;
1405
1406 return get_thread_lwp ((struct thread_info *) thread);
95954743
PA
1407}
1408
fa96cb38 1409/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1410
fa96cb38
PA
1411static int
1412num_lwps (int pid)
1413{
1414 struct inferior_list_entry *inf, *tmp;
1415 int count = 0;
0d62e5e8 1416
fa96cb38 1417 ALL_INFERIORS (&all_threads, inf, tmp)
24a09b5f 1418 {
fa96cb38
PA
1419 if (ptid_get_pid (inf->id) == pid)
1420 count++;
24a09b5f 1421 }
3aee8918 1422
fa96cb38
PA
1423 return count;
1424}
d61ddec4 1425
6d4ee8c6
GB
1426/* The arguments passed to iterate_over_lwps. */
1427
1428struct iterate_over_lwps_args
1429{
1430 /* The FILTER argument passed to iterate_over_lwps. */
1431 ptid_t filter;
1432
1433 /* The CALLBACK argument passed to iterate_over_lwps. */
1434 iterate_over_lwps_ftype *callback;
1435
1436 /* The DATA argument passed to iterate_over_lwps. */
1437 void *data;
1438};
1439
1440/* Callback for find_inferior used by iterate_over_lwps to filter
1441 calls to the callback supplied to that function. Returning a
1442 nonzero value causes find_inferiors to stop iterating and return
1443 the current inferior_list_entry. Returning zero indicates that
1444 find_inferiors should continue iterating. */
1445
1446static int
1447iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1448{
1449 struct iterate_over_lwps_args *args
1450 = (struct iterate_over_lwps_args *) args_p;
1451
1452 if (ptid_match (entry->id, args->filter))
1453 {
1454 struct thread_info *thr = (struct thread_info *) entry;
1455 struct lwp_info *lwp = get_thread_lwp (thr);
1456
1457 return (*args->callback) (lwp, args->data);
1458 }
1459
1460 return 0;
1461}
1462
1463/* See nat/linux-nat.h. */
1464
1465struct lwp_info *
1466iterate_over_lwps (ptid_t filter,
1467 iterate_over_lwps_ftype callback,
1468 void *data)
1469{
1470 struct iterate_over_lwps_args args = {filter, callback, data};
1471 struct inferior_list_entry *entry;
1472
1473 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1474 if (entry == NULL)
1475 return NULL;
1476
1477 return get_thread_lwp ((struct thread_info *) entry);
1478}
1479
fa96cb38
PA
1480/* Detect zombie thread group leaders, and "exit" them. We can't reap
1481 their exits until all other threads in the group have exited. */
c3adc08c 1482
fa96cb38
PA
1483static void
1484check_zombie_leaders (void)
1485{
1486 struct process_info *proc, *tmp;
c3adc08c 1487
fa96cb38 1488 ALL_PROCESSES (proc, tmp)
c3adc08c 1489 {
fa96cb38
PA
1490 pid_t leader_pid = pid_of (proc);
1491 struct lwp_info *leader_lp;
c3adc08c 1492
fa96cb38 1493 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
c3adc08c 1494
fa96cb38
PA
1495 if (debug_threads)
1496 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1497 "num_lwps=%d, zombie=%d\n",
1498 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1499 linux_proc_pid_is_zombie (leader_pid));
1500
1501 if (leader_lp != NULL
1502 /* Check if there are other threads in the group, as we may
1503 have raced with the inferior simply exiting. */
1504 && !last_thread_of_process_p (leader_pid)
1505 && linux_proc_pid_is_zombie (leader_pid))
1506 {
1507 /* A leader zombie can mean one of two things:
1508
1509 - It exited, and there's an exit status pending
1510 available, or only the leader exited (not the whole
1511 program). In the latter case, we can't waitpid the
1512 leader's exit status until all other threads are gone.
1513
1514 - There are 3 or more threads in the group, and a thread
1515 other than the leader exec'd. On an exec, the Linux
1516 kernel destroys all other threads (except the execing
1517 one) in the thread group, and resets the execing thread's
1518 tid to the tgid. No exit notification is sent for the
1519 execing thread -- from the ptracer's perspective, it
1520 appears as though the execing thread just vanishes.
1521 Until we reap all other threads except the leader and the
1522 execing thread, the leader will be zombie, and the
1523 execing thread will be in `D (disc sleep)'. As soon as
1524 all other threads are reaped, the execing thread changes
1525 it's tid to the tgid, and the previous (zombie) leader
1526 vanishes, giving place to the "new" leader. We could try
1527 distinguishing the exit and exec cases, by waiting once
1528 more, and seeing if something comes out, but it doesn't
1529 sound useful. The previous leader _does_ go away, and
1530 we'll re-add the new one once we see the exec event
1531 (which is just the same as what would happen if the
1532 previous leader did exit voluntarily before some other
1533 thread execs). */
c3adc08c 1534
fa96cb38
PA
1535 if (debug_threads)
1536 fprintf (stderr,
1537 "CZL: Thread group leader %d zombie "
1538 "(it exited, or another thread execd).\n",
1539 leader_pid);
c3adc08c 1540
fa96cb38 1541 delete_lwp (leader_lp);
c3adc08c
PA
1542 }
1543 }
fa96cb38 1544}
c3adc08c 1545
fa96cb38
PA
1546/* Callback for `find_inferior'. Returns the first LWP that is not
1547 stopped. ARG is a PTID filter. */
d50171e4 1548
fa96cb38
PA
1549static int
1550not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1551{
1552 struct thread_info *thr = (struct thread_info *) entry;
1553 struct lwp_info *lwp;
1554 ptid_t filter = *(ptid_t *) arg;
47c0c975 1555
fa96cb38
PA
1556 if (!ptid_match (ptid_of (thr), filter))
1557 return 0;
bd99dc85 1558
fa96cb38
PA
1559 lwp = get_thread_lwp (thr);
1560 if (!lwp->stopped)
1561 return 1;
1562
1563 return 0;
0d62e5e8 1564}
611cb4a5 1565
219f2f23
PA
1566/* This function should only be called if the LWP got a SIGTRAP.
1567
1568 Handle any tracepoint steps or hits. Return true if a tracepoint
1569 event was handled, 0 otherwise. */
1570
1571static int
1572handle_tracepoints (struct lwp_info *lwp)
1573{
1574 struct thread_info *tinfo = get_lwp_thread (lwp);
1575 int tpoint_related_event = 0;
1576
582511be
PA
1577 gdb_assert (lwp->suspended == 0);
1578
7984d532
PA
1579 /* If this tracepoint hit causes a tracing stop, we'll immediately
1580 uninsert tracepoints. To do this, we temporarily pause all
1581 threads, unpatch away, and then unpause threads. We need to make
1582 sure the unpausing doesn't resume LWP too. */
1583 lwp->suspended++;
1584
219f2f23
PA
1585 /* And we need to be sure that any all-threads-stopping doesn't try
1586 to move threads out of the jump pads, as it could deadlock the
1587 inferior (LWP could be in the jump pad, maybe even holding the
1588 lock.) */
1589
1590 /* Do any necessary step collect actions. */
1591 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1592
fa593d66
PA
1593 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1594
219f2f23
PA
1595 /* See if we just hit a tracepoint and do its main collect
1596 actions. */
1597 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1598
7984d532
PA
1599 lwp->suspended--;
1600
1601 gdb_assert (lwp->suspended == 0);
fa593d66 1602 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1603
219f2f23
PA
1604 if (tpoint_related_event)
1605 {
1606 if (debug_threads)
87ce2a04 1607 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1608 return 1;
1609 }
1610
1611 return 0;
1612}
1613
fa593d66
PA
1614/* Convenience wrapper. Returns true if LWP is presently collecting a
1615 fast tracepoint. */
1616
1617static int
1618linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1619 struct fast_tpoint_collect_status *status)
1620{
1621 CORE_ADDR thread_area;
d86d4aaf 1622 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1623
1624 if (the_low_target.get_thread_area == NULL)
1625 return 0;
1626
1627 /* Get the thread area address. This is used to recognize which
1628 thread is which when tracing with the in-process agent library.
1629 We don't read anything from the address, and treat it as opaque;
1630 it's the address itself that we assume is unique per-thread. */
d86d4aaf 1631 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
fa593d66
PA
1632 return 0;
1633
1634 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1635}
1636
1637/* The reason we resume in the caller, is because we want to be able
1638 to pass lwp->status_pending as WSTAT, and we need to clear
1639 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1640 refuses to resume. */
1641
1642static int
1643maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1644{
0bfdf32f 1645 struct thread_info *saved_thread;
fa593d66 1646
0bfdf32f
GB
1647 saved_thread = current_thread;
1648 current_thread = get_lwp_thread (lwp);
fa593d66
PA
1649
1650 if ((wstat == NULL
1651 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1652 && supports_fast_tracepoints ()
58b4daa5 1653 && agent_loaded_p ())
fa593d66
PA
1654 {
1655 struct fast_tpoint_collect_status status;
1656 int r;
1657
1658 if (debug_threads)
87ce2a04
DE
1659 debug_printf ("Checking whether LWP %ld needs to move out of the "
1660 "jump pad.\n",
0bfdf32f 1661 lwpid_of (current_thread));
fa593d66
PA
1662
1663 r = linux_fast_tracepoint_collecting (lwp, &status);
1664
1665 if (wstat == NULL
1666 || (WSTOPSIG (*wstat) != SIGILL
1667 && WSTOPSIG (*wstat) != SIGFPE
1668 && WSTOPSIG (*wstat) != SIGSEGV
1669 && WSTOPSIG (*wstat) != SIGBUS))
1670 {
1671 lwp->collecting_fast_tracepoint = r;
1672
1673 if (r != 0)
1674 {
1675 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1676 {
1677 /* Haven't executed the original instruction yet.
1678 Set breakpoint there, and wait till it's hit,
1679 then single-step until exiting the jump pad. */
1680 lwp->exit_jump_pad_bkpt
1681 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1682 }
1683
1684 if (debug_threads)
87ce2a04
DE
1685 debug_printf ("Checking whether LWP %ld needs to move out of "
1686 "the jump pad...it does\n",
0bfdf32f
GB
1687 lwpid_of (current_thread));
1688 current_thread = saved_thread;
fa593d66
PA
1689
1690 return 1;
1691 }
1692 }
1693 else
1694 {
1695 /* If we get a synchronous signal while collecting, *and*
1696 while executing the (relocated) original instruction,
1697 reset the PC to point at the tpoint address, before
1698 reporting to GDB. Otherwise, it's an IPA lib bug: just
1699 report the signal to GDB, and pray for the best. */
1700
1701 lwp->collecting_fast_tracepoint = 0;
1702
1703 if (r != 0
1704 && (status.adjusted_insn_addr <= lwp->stop_pc
1705 && lwp->stop_pc < status.adjusted_insn_addr_end))
1706 {
1707 siginfo_t info;
1708 struct regcache *regcache;
1709
1710 /* The si_addr on a few signals references the address
1711 of the faulting instruction. Adjust that as
1712 well. */
1713 if ((WSTOPSIG (*wstat) == SIGILL
1714 || WSTOPSIG (*wstat) == SIGFPE
1715 || WSTOPSIG (*wstat) == SIGBUS
1716 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 1717 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1718 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
1719 /* Final check just to make sure we don't clobber
1720 the siginfo of non-kernel-sent signals. */
1721 && (uintptr_t) info.si_addr == lwp->stop_pc)
1722 {
1723 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 1724 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1725 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
1726 }
1727
0bfdf32f 1728 regcache = get_thread_regcache (current_thread, 1);
fa593d66
PA
1729 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1730 lwp->stop_pc = status.tpoint_addr;
1731
1732 /* Cancel any fast tracepoint lock this thread was
1733 holding. */
1734 force_unlock_trace_buffer ();
1735 }
1736
1737 if (lwp->exit_jump_pad_bkpt != NULL)
1738 {
1739 if (debug_threads)
87ce2a04
DE
1740 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1741 "stopping all threads momentarily.\n");
fa593d66
PA
1742
1743 stop_all_lwps (1, lwp);
fa593d66
PA
1744
1745 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1746 lwp->exit_jump_pad_bkpt = NULL;
1747
1748 unstop_all_lwps (1, lwp);
1749
1750 gdb_assert (lwp->suspended >= 0);
1751 }
1752 }
1753 }
1754
1755 if (debug_threads)
87ce2a04
DE
1756 debug_printf ("Checking whether LWP %ld needs to move out of the "
1757 "jump pad...no\n",
0bfdf32f 1758 lwpid_of (current_thread));
0cccb683 1759
0bfdf32f 1760 current_thread = saved_thread;
fa593d66
PA
1761 return 0;
1762}
1763
1764/* Enqueue one signal in the "signals to report later when out of the
1765 jump pad" list. */
1766
1767static void
1768enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1769{
1770 struct pending_signals *p_sig;
d86d4aaf 1771 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1772
1773 if (debug_threads)
87ce2a04 1774 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 1775 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1776
1777 if (debug_threads)
1778 {
1779 struct pending_signals *sig;
1780
1781 for (sig = lwp->pending_signals_to_report;
1782 sig != NULL;
1783 sig = sig->prev)
87ce2a04
DE
1784 debug_printf (" Already queued %d\n",
1785 sig->signal);
fa593d66 1786
87ce2a04 1787 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
1788 }
1789
1a981360
PA
1790 /* Don't enqueue non-RT signals if they are already in the deferred
1791 queue. (SIGSTOP being the easiest signal to see ending up here
1792 twice) */
1793 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1794 {
1795 struct pending_signals *sig;
1796
1797 for (sig = lwp->pending_signals_to_report;
1798 sig != NULL;
1799 sig = sig->prev)
1800 {
1801 if (sig->signal == WSTOPSIG (*wstat))
1802 {
1803 if (debug_threads)
87ce2a04
DE
1804 debug_printf ("Not requeuing already queued non-RT signal %d"
1805 " for LWP %ld\n",
1806 sig->signal,
d86d4aaf 1807 lwpid_of (thread));
1a981360
PA
1808 return;
1809 }
1810 }
1811 }
1812
fa593d66
PA
1813 p_sig = xmalloc (sizeof (*p_sig));
1814 p_sig->prev = lwp->pending_signals_to_report;
1815 p_sig->signal = WSTOPSIG (*wstat);
1816 memset (&p_sig->info, 0, sizeof (siginfo_t));
d86d4aaf 1817 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1818 &p_sig->info);
fa593d66
PA
1819
1820 lwp->pending_signals_to_report = p_sig;
1821}
1822
1823/* Dequeue one signal from the "signals to report later when out of
1824 the jump pad" list. */
1825
1826static int
1827dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1828{
d86d4aaf
DE
1829 struct thread_info *thread = get_lwp_thread (lwp);
1830
fa593d66
PA
1831 if (lwp->pending_signals_to_report != NULL)
1832 {
1833 struct pending_signals **p_sig;
1834
1835 p_sig = &lwp->pending_signals_to_report;
1836 while ((*p_sig)->prev != NULL)
1837 p_sig = &(*p_sig)->prev;
1838
1839 *wstat = W_STOPCODE ((*p_sig)->signal);
1840 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 1841 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1842 &(*p_sig)->info);
fa593d66
PA
1843 free (*p_sig);
1844 *p_sig = NULL;
1845
1846 if (debug_threads)
87ce2a04 1847 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 1848 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1849
1850 if (debug_threads)
1851 {
1852 struct pending_signals *sig;
1853
1854 for (sig = lwp->pending_signals_to_report;
1855 sig != NULL;
1856 sig = sig->prev)
87ce2a04
DE
1857 debug_printf (" Still queued %d\n",
1858 sig->signal);
fa593d66 1859
87ce2a04 1860 debug_printf (" (no more queued signals)\n");
fa593d66
PA
1861 }
1862
1863 return 1;
1864 }
1865
1866 return 0;
1867}
1868
582511be
PA
1869/* Fetch the possibly triggered data watchpoint info and store it in
1870 CHILD.
d50171e4 1871
582511be
PA
1872 On some archs, like x86, that use debug registers to set
1873 watchpoints, it's possible that the way to know which watched
1874 address trapped, is to check the register that is used to select
1875 which address to watch. Problem is, between setting the watchpoint
1876 and reading back which data address trapped, the user may change
1877 the set of watchpoints, and, as a consequence, GDB changes the
1878 debug registers in the inferior. To avoid reading back a stale
1879 stopped-data-address when that happens, we cache in LP the fact
1880 that a watchpoint trapped, and the corresponding data address, as
1881 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1882 registers meanwhile, we have the cached data we can rely on. */
d50171e4 1883
582511be
PA
1884static int
1885check_stopped_by_watchpoint (struct lwp_info *child)
1886{
1887 if (the_low_target.stopped_by_watchpoint != NULL)
d50171e4 1888 {
582511be 1889 struct thread_info *saved_thread;
d50171e4 1890
582511be
PA
1891 saved_thread = current_thread;
1892 current_thread = get_lwp_thread (child);
1893
1894 if (the_low_target.stopped_by_watchpoint ())
d50171e4 1895 {
15c66dd6 1896 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
582511be
PA
1897
1898 if (the_low_target.stopped_data_address != NULL)
1899 child->stopped_data_address
1900 = the_low_target.stopped_data_address ();
1901 else
1902 child->stopped_data_address = 0;
d50171e4
PA
1903 }
1904
0bfdf32f 1905 current_thread = saved_thread;
d50171e4
PA
1906 }
1907
15c66dd6 1908 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
c4d9ceb6
YQ
1909}
1910
fa96cb38
PA
1911/* Do low-level handling of the event, and check if we should go on
1912 and pass it to caller code. Return the affected lwp if we are, or
1913 NULL otherwise. */
1914
1915static struct lwp_info *
582511be 1916linux_low_filter_event (int lwpid, int wstat)
fa96cb38
PA
1917{
1918 struct lwp_info *child;
1919 struct thread_info *thread;
582511be 1920 int have_stop_pc = 0;
fa96cb38
PA
1921
1922 child = find_lwp_pid (pid_to_ptid (lwpid));
1923
1924 /* If we didn't find a process, one of two things presumably happened:
1925 - A process we started and then detached from has exited. Ignore it.
1926 - A process we are controlling has forked and the new child's stop
1927 was reported to us by the kernel. Save its PID. */
1928 if (child == NULL && WIFSTOPPED (wstat))
1929 {
1930 add_to_pid_list (&stopped_pids, lwpid, wstat);
1931 return NULL;
1932 }
1933 else if (child == NULL)
1934 return NULL;
1935
1936 thread = get_lwp_thread (child);
1937
1938 child->stopped = 1;
1939
1940 child->last_status = wstat;
1941
582511be
PA
1942 /* Check if the thread has exited. */
1943 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
1944 {
1945 if (debug_threads)
1946 debug_printf ("LLFE: %d exited.\n", lwpid);
1947 if (num_lwps (pid_of (thread)) > 1)
1948 {
1949
1950 /* If there is at least one more LWP, then the exit signal was
1951 not the end of the debugged application and should be
1952 ignored. */
1953 delete_lwp (child);
1954 return NULL;
1955 }
1956 else
1957 {
1958 /* This was the last lwp in the process. Since events are
1959 serialized to GDB core, and we can't report this one
1960 right now, but GDB core and the other target layers will
1961 want to be notified about the exit code/signal, leave the
1962 status pending for the next time we're able to report
1963 it. */
1964 mark_lwp_dead (child, wstat);
1965 return child;
1966 }
1967 }
1968
1969 gdb_assert (WIFSTOPPED (wstat));
1970
fa96cb38
PA
1971 if (WIFSTOPPED (wstat))
1972 {
1973 struct process_info *proc;
1974
1975 /* Architecture-specific setup after inferior is running. This
1976 needs to happen after we have attached to the inferior and it
1977 is stopped for the first time, but before we access any
1978 inferior registers. */
1979 proc = find_process_pid (pid_of (thread));
fe978cb0 1980 if (proc->priv->new_inferior)
fa96cb38 1981 {
0bfdf32f 1982 struct thread_info *saved_thread;
fa96cb38 1983
0bfdf32f
GB
1984 saved_thread = current_thread;
1985 current_thread = thread;
fa96cb38
PA
1986
1987 the_low_target.arch_setup ();
1988
0bfdf32f 1989 current_thread = saved_thread;
fa96cb38 1990
fe978cb0 1991 proc->priv->new_inferior = 0;
fa96cb38
PA
1992 }
1993 }
1994
fa96cb38
PA
1995 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1996 {
beed38b8
JB
1997 struct process_info *proc = find_process_pid (pid_of (thread));
1998
1999 linux_enable_event_reporting (lwpid, proc->attached);
fa96cb38
PA
2000 child->must_set_ptrace_flags = 0;
2001 }
2002
582511be
PA
2003 /* Be careful to not overwrite stop_pc until
2004 check_stopped_by_breakpoint is called. */
fa96cb38 2005 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2006 && linux_is_extended_waitstatus (wstat))
fa96cb38 2007 {
582511be 2008 child->stop_pc = get_pc (child);
fa96cb38
PA
2009 handle_extended_wait (child, wstat);
2010 return NULL;
2011 }
2012
3e572f71
PA
2013 /* Check first whether this was a SW/HW breakpoint before checking
2014 watchpoints, because at least s390 can't tell the data address of
2015 hardware watchpoint hits, and returns stopped-by-watchpoint as
2016 long as there's a watchpoint set. */
2017 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
582511be
PA
2018 {
2019 if (check_stopped_by_breakpoint (child))
2020 have_stop_pc = 1;
2021 }
2022
3e572f71
PA
2023 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2024 or hardware watchpoint. Check which is which if we got
2025 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2026 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2027 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2028 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2029 check_stopped_by_watchpoint (child);
2030
582511be
PA
2031 if (!have_stop_pc)
2032 child->stop_pc = get_pc (child);
2033
fa96cb38
PA
2034 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2035 && child->stop_expected)
2036 {
2037 if (debug_threads)
2038 debug_printf ("Expected stop.\n");
2039 child->stop_expected = 0;
2040
2041 if (thread->last_resume_kind == resume_stop)
2042 {
2043 /* We want to report the stop to the core. Treat the
2044 SIGSTOP as a normal event. */
2045 }
2046 else if (stopping_threads != NOT_STOPPING_THREADS)
2047 {
2048 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2049 pending. */
fa96cb38
PA
2050 return NULL;
2051 }
2052 else
2053 {
2054 /* Filter out the event. */
2055 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2056 return NULL;
2057 }
2058 }
2059
582511be
PA
2060 child->status_pending_p = 1;
2061 child->status_pending = wstat;
fa96cb38
PA
2062 return child;
2063}
2064
20ba1ce6
PA
2065/* Resume LWPs that are currently stopped without any pending status
2066 to report, but are resumed from the core's perspective. */
2067
2068static void
2069resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2070{
2071 struct thread_info *thread = (struct thread_info *) entry;
2072 struct lwp_info *lp = get_thread_lwp (thread);
2073
2074 if (lp->stopped
2075 && !lp->status_pending_p
2076 && thread->last_resume_kind != resume_stop
2077 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2078 {
2079 int step = thread->last_resume_kind == resume_step;
2080
2081 if (debug_threads)
2082 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2083 target_pid_to_str (ptid_of (thread)),
2084 paddress (lp->stop_pc),
2085 step);
2086
2087 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2088 }
2089}
2090
fa96cb38
PA
2091/* Wait for an event from child(ren) WAIT_PTID, and return any that
2092 match FILTER_PTID (leaving others pending). The PTIDs can be:
2093 minus_one_ptid, to specify any child; a pid PTID, specifying all
2094 lwps of a thread group; or a PTID representing a single lwp. Store
2095 the stop status through the status pointer WSTAT. OPTIONS is
2096 passed to the waitpid call. Return 0 if no event was found and
2097 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2098 was found. Return the PID of the stopped child otherwise. */
bd99dc85 2099
0d62e5e8 2100static int
fa96cb38
PA
2101linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2102 int *wstatp, int options)
0d62e5e8 2103{
d86d4aaf 2104 struct thread_info *event_thread;
d50171e4 2105 struct lwp_info *event_child, *requested_child;
fa96cb38 2106 sigset_t block_mask, prev_mask;
d50171e4 2107
fa96cb38 2108 retry:
d86d4aaf
DE
2109 /* N.B. event_thread points to the thread_info struct that contains
2110 event_child. Keep them in sync. */
2111 event_thread = NULL;
d50171e4
PA
2112 event_child = NULL;
2113 requested_child = NULL;
0d62e5e8 2114
95954743 2115 /* Check for a lwp with a pending status. */
bd99dc85 2116
fa96cb38 2117 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
0d62e5e8 2118 {
d86d4aaf 2119 event_thread = (struct thread_info *)
fa96cb38 2120 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
d86d4aaf
DE
2121 if (event_thread != NULL)
2122 event_child = get_thread_lwp (event_thread);
2123 if (debug_threads && event_thread)
2124 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2125 }
fa96cb38 2126 else if (!ptid_equal (filter_ptid, null_ptid))
0d62e5e8 2127 {
fa96cb38 2128 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2129
bde24c0a 2130 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66
PA
2131 && requested_child->status_pending_p
2132 && requested_child->collecting_fast_tracepoint)
2133 {
2134 enqueue_one_deferred_signal (requested_child,
2135 &requested_child->status_pending);
2136 requested_child->status_pending_p = 0;
2137 requested_child->status_pending = 0;
2138 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2139 }
2140
2141 if (requested_child->suspended
2142 && requested_child->status_pending_p)
38e08fca
GB
2143 {
2144 internal_error (__FILE__, __LINE__,
2145 "requesting an event out of a"
2146 " suspended child?");
2147 }
fa593d66 2148
d50171e4 2149 if (requested_child->status_pending_p)
d86d4aaf
DE
2150 {
2151 event_child = requested_child;
2152 event_thread = get_lwp_thread (event_child);
2153 }
0d62e5e8 2154 }
611cb4a5 2155
0d62e5e8
DJ
2156 if (event_child != NULL)
2157 {
bd99dc85 2158 if (debug_threads)
87ce2a04 2159 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2160 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2161 *wstatp = event_child->status_pending;
bd99dc85
PA
2162 event_child->status_pending_p = 0;
2163 event_child->status_pending = 0;
0bfdf32f 2164 current_thread = event_thread;
d86d4aaf 2165 return lwpid_of (event_thread);
0d62e5e8
DJ
2166 }
2167
fa96cb38
PA
2168 /* But if we don't find a pending event, we'll have to wait.
2169
2170 We only enter this loop if no process has a pending wait status.
2171 Thus any action taken in response to a wait status inside this
2172 loop is responding as soon as we detect the status, not after any
2173 pending events. */
d8301ad1 2174
fa96cb38
PA
2175 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2176 all signals while here. */
2177 sigfillset (&block_mask);
2178 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2179
582511be
PA
2180 /* Always pull all events out of the kernel. We'll randomly select
2181 an event LWP out of all that have events, to prevent
2182 starvation. */
fa96cb38 2183 while (event_child == NULL)
0d62e5e8 2184 {
fa96cb38 2185 pid_t ret = 0;
0d62e5e8 2186
fa96cb38
PA
2187 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2188 quirks:
0d62e5e8 2189
fa96cb38
PA
2190 - If the thread group leader exits while other threads in the
2191 thread group still exist, waitpid(TGID, ...) hangs. That
2192 waitpid won't return an exit status until the other threads
2193 in the group are reaped.
611cb4a5 2194
fa96cb38
PA
2195 - When a non-leader thread execs, that thread just vanishes
2196 without reporting an exit (so we'd hang if we waited for it
2197 explicitly in that case). The exec event is reported to
2198 the TGID pid (although we don't currently enable exec
2199 events). */
2200 errno = 0;
2201 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2202
fa96cb38
PA
2203 if (debug_threads)
2204 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2205 ret, errno ? strerror (errno) : "ERRNO-OK");
0d62e5e8 2206
fa96cb38 2207 if (ret > 0)
0d62e5e8 2208 {
89be2091 2209 if (debug_threads)
bd99dc85 2210 {
fa96cb38
PA
2211 debug_printf ("LLW: waitpid %ld received %s\n",
2212 (long) ret, status_to_str (*wstatp));
bd99dc85 2213 }
89be2091 2214
582511be
PA
2215 /* Filter all events. IOW, leave all events pending. We'll
2216 randomly select an event LWP out of all that have events
2217 below. */
2218 linux_low_filter_event (ret, *wstatp);
fa96cb38
PA
2219 /* Retry until nothing comes out of waitpid. A single
2220 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2221 continue;
2222 }
2223
20ba1ce6
PA
2224 /* Now that we've pulled all events out of the kernel, resume
2225 LWPs that don't have an interesting event to report. */
2226 if (stopping_threads == NOT_STOPPING_THREADS)
2227 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2228
2229 /* ... and find an LWP with a status to report to the core, if
2230 any. */
582511be
PA
2231 event_thread = (struct thread_info *)
2232 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2233 if (event_thread != NULL)
2234 {
2235 event_child = get_thread_lwp (event_thread);
2236 *wstatp = event_child->status_pending;
2237 event_child->status_pending_p = 0;
2238 event_child->status_pending = 0;
2239 break;
2240 }
2241
fa96cb38
PA
2242 /* Check for zombie thread group leaders. Those can't be reaped
2243 until all other threads in the thread group are. */
2244 check_zombie_leaders ();
2245
2246 /* If there are no resumed children left in the set of LWPs we
2247 want to wait for, bail. We can't just block in
2248 waitpid/sigsuspend, because lwps might have been left stopped
2249 in trace-stop state, and we'd be stuck forever waiting for
2250 their status to change (which would only happen if we resumed
2251 them). Even if WNOHANG is set, this return code is preferred
2252 over 0 (below), as it is more detailed. */
2253 if ((find_inferior (&all_threads,
2254 not_stopped_callback,
2255 &wait_ptid) == NULL))
a6dbe5df 2256 {
fa96cb38
PA
2257 if (debug_threads)
2258 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2259 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2260 return -1;
a6dbe5df
PA
2261 }
2262
fa96cb38
PA
2263 /* No interesting event to report to the caller. */
2264 if ((options & WNOHANG))
24a09b5f 2265 {
fa96cb38
PA
2266 if (debug_threads)
2267 debug_printf ("WNOHANG set, no event found\n");
2268
2269 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2270 return 0;
24a09b5f
DJ
2271 }
2272
fa96cb38
PA
2273 /* Block until we get an event reported with SIGCHLD. */
2274 if (debug_threads)
2275 debug_printf ("sigsuspend'ing\n");
d50171e4 2276
fa96cb38
PA
2277 sigsuspend (&prev_mask);
2278 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2279 goto retry;
2280 }
d50171e4 2281
fa96cb38 2282 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2283
0bfdf32f 2284 current_thread = event_thread;
d50171e4 2285
fa96cb38
PA
2286 /* Check for thread exit. */
2287 if (! WIFSTOPPED (*wstatp))
2288 {
2289 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2290
2291 if (debug_threads)
2292 debug_printf ("LWP %d is the last lwp of process. "
2293 "Process %ld exiting.\n",
2294 pid_of (event_thread), lwpid_of (event_thread));
d86d4aaf 2295 return lwpid_of (event_thread);
611cb4a5 2296 }
0d62e5e8 2297
fa96cb38
PA
2298 return lwpid_of (event_thread);
2299}
2300
2301/* Wait for an event from child(ren) PTID. PTIDs can be:
2302 minus_one_ptid, to specify any child; a pid PTID, specifying all
2303 lwps of a thread group; or a PTID representing a single lwp. Store
2304 the stop status through the status pointer WSTAT. OPTIONS is
2305 passed to the waitpid call. Return 0 if no event was found and
2306 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2307 was found. Return the PID of the stopped child otherwise. */
2308
2309static int
2310linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2311{
2312 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2313}
2314
6bf5e0ba
PA
2315/* Count the LWP's that have had events. */
2316
2317static int
2318count_events_callback (struct inferior_list_entry *entry, void *data)
2319{
d86d4aaf 2320 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2321 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2322 int *count = data;
2323
2324 gdb_assert (count != NULL);
2325
582511be 2326 /* Count only resumed LWPs that have an event pending. */
8336d594 2327 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2328 && lp->status_pending_p)
6bf5e0ba
PA
2329 (*count)++;
2330
2331 return 0;
2332}
2333
2334/* Select the LWP (if any) that is currently being single-stepped. */
2335
2336static int
2337select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2338{
d86d4aaf
DE
2339 struct thread_info *thread = (struct thread_info *) entry;
2340 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba 2341
8336d594
PA
2342 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2343 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
2344 && lp->status_pending_p)
2345 return 1;
2346 else
2347 return 0;
2348}
2349
b90fc188 2350/* Select the Nth LWP that has had an event. */
6bf5e0ba
PA
2351
2352static int
2353select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2354{
d86d4aaf 2355 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2356 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2357 int *selector = data;
2358
2359 gdb_assert (selector != NULL);
2360
582511be 2361 /* Select only resumed LWPs that have an event pending. */
91baf43f 2362 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2363 && lp->status_pending_p)
6bf5e0ba
PA
2364 if ((*selector)-- == 0)
2365 return 1;
2366
2367 return 0;
2368}
2369
6bf5e0ba
PA
2370/* Select one LWP out of those that have events pending. */
2371
2372static void
2373select_event_lwp (struct lwp_info **orig_lp)
2374{
2375 int num_events = 0;
2376 int random_selector;
582511be
PA
2377 struct thread_info *event_thread = NULL;
2378
2379 /* In all-stop, give preference to the LWP that is being
2380 single-stepped. There will be at most one, and it's the LWP that
2381 the core is most interested in. If we didn't do this, then we'd
2382 have to handle pending step SIGTRAPs somehow in case the core
2383 later continues the previously-stepped thread, otherwise we'd
2384 report the pending SIGTRAP, and the core, not having stepped the
2385 thread, wouldn't understand what the trap was for, and therefore
2386 would report it to the user as a random signal. */
2387 if (!non_stop)
6bf5e0ba 2388 {
582511be
PA
2389 event_thread
2390 = (struct thread_info *) find_inferior (&all_threads,
2391 select_singlestep_lwp_callback,
2392 NULL);
2393 if (event_thread != NULL)
2394 {
2395 if (debug_threads)
2396 debug_printf ("SEL: Select single-step %s\n",
2397 target_pid_to_str (ptid_of (event_thread)));
2398 }
6bf5e0ba 2399 }
582511be 2400 if (event_thread == NULL)
6bf5e0ba
PA
2401 {
2402 /* No single-stepping LWP. Select one at random, out of those
b90fc188 2403 which have had events. */
6bf5e0ba 2404
b90fc188 2405 /* First see how many events we have. */
d86d4aaf 2406 find_inferior (&all_threads, count_events_callback, &num_events);
8bf3b159 2407 gdb_assert (num_events > 0);
6bf5e0ba 2408
b90fc188
PA
2409 /* Now randomly pick a LWP out of those that have had
2410 events. */
6bf5e0ba
PA
2411 random_selector = (int)
2412 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2413
2414 if (debug_threads && num_events > 1)
87ce2a04
DE
2415 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2416 num_events, random_selector);
6bf5e0ba 2417
d86d4aaf
DE
2418 event_thread
2419 = (struct thread_info *) find_inferior (&all_threads,
2420 select_event_lwp_callback,
2421 &random_selector);
6bf5e0ba
PA
2422 }
2423
d86d4aaf 2424 if (event_thread != NULL)
6bf5e0ba 2425 {
d86d4aaf
DE
2426 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2427
6bf5e0ba
PA
2428 /* Switch the event LWP. */
2429 *orig_lp = event_lp;
2430 }
2431}
2432
7984d532
PA
2433/* Decrement the suspend count of an LWP. */
2434
2435static int
2436unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2437{
d86d4aaf
DE
2438 struct thread_info *thread = (struct thread_info *) entry;
2439 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
2440
2441 /* Ignore EXCEPT. */
2442 if (lwp == except)
2443 return 0;
2444
2445 lwp->suspended--;
2446
2447 gdb_assert (lwp->suspended >= 0);
2448 return 0;
2449}
2450
2451/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2452 NULL. */
2453
2454static void
2455unsuspend_all_lwps (struct lwp_info *except)
2456{
d86d4aaf 2457 find_inferior (&all_threads, unsuspend_one_lwp, except);
7984d532
PA
2458}
2459
fa593d66
PA
2460static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2461static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2462 void *data);
2463static int lwp_running (struct inferior_list_entry *entry, void *data);
2464static ptid_t linux_wait_1 (ptid_t ptid,
2465 struct target_waitstatus *ourstatus,
2466 int target_options);
2467
2468/* Stabilize threads (move out of jump pads).
2469
2470 If a thread is midway collecting a fast tracepoint, we need to
2471 finish the collection and move it out of the jump pad before
2472 reporting the signal.
2473
2474 This avoids recursion while collecting (when a signal arrives
2475 midway, and the signal handler itself collects), which would trash
2476 the trace buffer. In case the user set a breakpoint in a signal
2477 handler, this avoids the backtrace showing the jump pad, etc..
2478 Most importantly, there are certain things we can't do safely if
2479 threads are stopped in a jump pad (or in its callee's). For
2480 example:
2481
2482 - starting a new trace run. A thread still collecting the
2483 previous run, could trash the trace buffer when resumed. The trace
2484 buffer control structures would have been reset but the thread had
2485 no way to tell. The thread could even midway memcpy'ing to the
2486 buffer, which would mean that when resumed, it would clobber the
2487 trace buffer that had been set for a new run.
2488
2489 - we can't rewrite/reuse the jump pads for new tracepoints
2490 safely. Say you do tstart while a thread is stopped midway while
2491 collecting. When the thread is later resumed, it finishes the
2492 collection, and returns to the jump pad, to execute the original
2493 instruction that was under the tracepoint jump at the time the
2494 older run had been started. If the jump pad had been rewritten
2495 since for something else in the new run, the thread would now
2496 execute the wrong / random instructions. */
2497
2498static void
2499linux_stabilize_threads (void)
2500{
0bfdf32f 2501 struct thread_info *saved_thread;
d86d4aaf 2502 struct thread_info *thread_stuck;
fa593d66 2503
d86d4aaf
DE
2504 thread_stuck
2505 = (struct thread_info *) find_inferior (&all_threads,
2506 stuck_in_jump_pad_callback,
2507 NULL);
2508 if (thread_stuck != NULL)
fa593d66 2509 {
b4d51a55 2510 if (debug_threads)
87ce2a04 2511 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2512 lwpid_of (thread_stuck));
fa593d66
PA
2513 return;
2514 }
2515
0bfdf32f 2516 saved_thread = current_thread;
fa593d66
PA
2517
2518 stabilizing_threads = 1;
2519
2520 /* Kick 'em all. */
d86d4aaf 2521 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
fa593d66
PA
2522
2523 /* Loop until all are stopped out of the jump pads. */
d86d4aaf 2524 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
fa593d66
PA
2525 {
2526 struct target_waitstatus ourstatus;
2527 struct lwp_info *lwp;
fa593d66
PA
2528 int wstat;
2529
2530 /* Note that we go through the full wait even loop. While
2531 moving threads out of jump pad, we need to be able to step
2532 over internal breakpoints and such. */
32fcada3 2533 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2534
2535 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2536 {
0bfdf32f 2537 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2538
2539 /* Lock it. */
2540 lwp->suspended++;
2541
a493e3e2 2542 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2543 || current_thread->last_resume_kind == resume_stop)
fa593d66 2544 {
2ea28649 2545 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2546 enqueue_one_deferred_signal (lwp, &wstat);
2547 }
2548 }
2549 }
2550
d86d4aaf 2551 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
fa593d66
PA
2552
2553 stabilizing_threads = 0;
2554
0bfdf32f 2555 current_thread = saved_thread;
fa593d66 2556
b4d51a55 2557 if (debug_threads)
fa593d66 2558 {
d86d4aaf
DE
2559 thread_stuck
2560 = (struct thread_info *) find_inferior (&all_threads,
2561 stuck_in_jump_pad_callback,
2562 NULL);
2563 if (thread_stuck != NULL)
87ce2a04 2564 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2565 lwpid_of (thread_stuck));
fa593d66
PA
2566 }
2567}
2568
582511be
PA
2569static void async_file_mark (void);
2570
2571/* Convenience function that is called when the kernel reports an
2572 event that is not passed out to GDB. */
2573
2574static ptid_t
2575ignore_event (struct target_waitstatus *ourstatus)
2576{
2577 /* If we got an event, there may still be others, as a single
2578 SIGCHLD can indicate more than one child stopped. This forces
2579 another target_wait call. */
2580 async_file_mark ();
2581
2582 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2583 return null_ptid;
2584}
2585
0d62e5e8 2586/* Wait for process, returns status. */
da6d8c04 2587
95954743
PA
2588static ptid_t
2589linux_wait_1 (ptid_t ptid,
2590 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2591{
e5f1222d 2592 int w;
fc7238bb 2593 struct lwp_info *event_child;
bd99dc85 2594 int options;
bd99dc85 2595 int pid;
6bf5e0ba
PA
2596 int step_over_finished;
2597 int bp_explains_trap;
2598 int maybe_internal_trap;
2599 int report_to_gdb;
219f2f23 2600 int trace_event;
c2d6af84 2601 int in_step_range;
bd99dc85 2602
87ce2a04
DE
2603 if (debug_threads)
2604 {
2605 debug_enter ();
2606 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2607 }
2608
bd99dc85
PA
2609 /* Translate generic target options into linux options. */
2610 options = __WALL;
2611 if (target_options & TARGET_WNOHANG)
2612 options |= WNOHANG;
0d62e5e8 2613
fa593d66
PA
2614 bp_explains_trap = 0;
2615 trace_event = 0;
c2d6af84 2616 in_step_range = 0;
bd99dc85
PA
2617 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2618
6bf5e0ba
PA
2619 if (ptid_equal (step_over_bkpt, null_ptid))
2620 pid = linux_wait_for_event (ptid, &w, options);
2621 else
2622 {
2623 if (debug_threads)
87ce2a04
DE
2624 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2625 target_pid_to_str (step_over_bkpt));
6bf5e0ba
PA
2626 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2627 }
2628
fa96cb38 2629 if (pid == 0)
87ce2a04 2630 {
fa96cb38
PA
2631 gdb_assert (target_options & TARGET_WNOHANG);
2632
87ce2a04
DE
2633 if (debug_threads)
2634 {
fa96cb38
PA
2635 debug_printf ("linux_wait_1 ret = null_ptid, "
2636 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
2637 debug_exit ();
2638 }
fa96cb38
PA
2639
2640 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
2641 return null_ptid;
2642 }
fa96cb38
PA
2643 else if (pid == -1)
2644 {
2645 if (debug_threads)
2646 {
2647 debug_printf ("linux_wait_1 ret = null_ptid, "
2648 "TARGET_WAITKIND_NO_RESUMED\n");
2649 debug_exit ();
2650 }
bd99dc85 2651
fa96cb38
PA
2652 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2653 return null_ptid;
2654 }
0d62e5e8 2655
0bfdf32f 2656 event_child = get_thread_lwp (current_thread);
0d62e5e8 2657
fa96cb38
PA
2658 /* linux_wait_for_event only returns an exit status for the last
2659 child of a process. Report it. */
2660 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 2661 {
fa96cb38 2662 if (WIFEXITED (w))
0d62e5e8 2663 {
fa96cb38
PA
2664 ourstatus->kind = TARGET_WAITKIND_EXITED;
2665 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 2666
fa96cb38 2667 if (debug_threads)
bd99dc85 2668 {
fa96cb38
PA
2669 debug_printf ("linux_wait_1 ret = %s, exited with "
2670 "retcode %d\n",
0bfdf32f 2671 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2672 WEXITSTATUS (w));
2673 debug_exit ();
bd99dc85 2674 }
fa96cb38
PA
2675 }
2676 else
2677 {
2678 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2679 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 2680
fa96cb38
PA
2681 if (debug_threads)
2682 {
2683 debug_printf ("linux_wait_1 ret = %s, terminated with "
2684 "signal %d\n",
0bfdf32f 2685 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2686 WTERMSIG (w));
2687 debug_exit ();
2688 }
0d62e5e8 2689 }
fa96cb38 2690
0bfdf32f 2691 return ptid_of (current_thread);
da6d8c04
DJ
2692 }
2693
8090aef2
PA
2694 /* If step-over executes a breakpoint instruction, it means a
2695 gdb/gdbserver breakpoint had been planted on top of a permanent
2696 breakpoint. The PC has been adjusted by
2697 check_stopped_by_breakpoint to point at the breakpoint address.
2698 Advance the PC manually past the breakpoint, otherwise the
2699 program would keep trapping the permanent breakpoint forever. */
2700 if (!ptid_equal (step_over_bkpt, null_ptid)
15c66dd6 2701 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
8090aef2 2702 {
9beb7c4e 2703 unsigned int increment_pc = the_low_target.breakpoint_len;
8090aef2
PA
2704
2705 if (debug_threads)
2706 {
2707 debug_printf ("step-over for %s executed software breakpoint\n",
2708 target_pid_to_str (ptid_of (current_thread)));
2709 }
2710
2711 if (increment_pc != 0)
2712 {
2713 struct regcache *regcache
2714 = get_thread_regcache (current_thread, 1);
2715
2716 event_child->stop_pc += increment_pc;
2717 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2718
2719 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
15c66dd6 2720 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
2721 }
2722 }
2723
6bf5e0ba
PA
2724 /* If this event was not handled before, and is not a SIGTRAP, we
2725 report it. SIGILL and SIGSEGV are also treated as traps in case
2726 a breakpoint is inserted at the current PC. If this target does
2727 not support internal breakpoints at all, we also report the
2728 SIGTRAP without further processing; it's of no concern to us. */
2729 maybe_internal_trap
2730 = (supports_breakpoints ()
2731 && (WSTOPSIG (w) == SIGTRAP
2732 || ((WSTOPSIG (w) == SIGILL
2733 || WSTOPSIG (w) == SIGSEGV)
2734 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2735
2736 if (maybe_internal_trap)
2737 {
2738 /* Handle anything that requires bookkeeping before deciding to
2739 report the event or continue waiting. */
2740
2741 /* First check if we can explain the SIGTRAP with an internal
2742 breakpoint, or if we should possibly report the event to GDB.
2743 Do this before anything that may remove or insert a
2744 breakpoint. */
2745 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2746
2747 /* We have a SIGTRAP, possibly a step-over dance has just
2748 finished. If so, tweak the state machine accordingly,
2749 reinsert breakpoints and delete any reinsert (software
2750 single-step) breakpoints. */
2751 step_over_finished = finish_step_over (event_child);
2752
2753 /* Now invoke the callbacks of any internal breakpoints there. */
2754 check_breakpoints (event_child->stop_pc);
2755
219f2f23
PA
2756 /* Handle tracepoint data collecting. This may overflow the
2757 trace buffer, and cause a tracing stop, removing
2758 breakpoints. */
2759 trace_event = handle_tracepoints (event_child);
2760
6bf5e0ba
PA
2761 if (bp_explains_trap)
2762 {
2763 /* If we stepped or ran into an internal breakpoint, we've
2764 already handled it. So next time we resume (from this
2765 PC), we should step over it. */
2766 if (debug_threads)
87ce2a04 2767 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 2768
8b07ae33
PA
2769 if (breakpoint_here (event_child->stop_pc))
2770 event_child->need_step_over = 1;
6bf5e0ba
PA
2771 }
2772 }
2773 else
2774 {
2775 /* We have some other signal, possibly a step-over dance was in
2776 progress, and it should be cancelled too. */
2777 step_over_finished = finish_step_over (event_child);
fa593d66
PA
2778 }
2779
2780 /* We have all the data we need. Either report the event to GDB, or
2781 resume threads and keep waiting for more. */
2782
2783 /* If we're collecting a fast tracepoint, finish the collection and
2784 move out of the jump pad before delivering a signal. See
2785 linux_stabilize_threads. */
2786
2787 if (WIFSTOPPED (w)
2788 && WSTOPSIG (w) != SIGTRAP
2789 && supports_fast_tracepoints ()
58b4daa5 2790 && agent_loaded_p ())
fa593d66
PA
2791 {
2792 if (debug_threads)
87ce2a04
DE
2793 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2794 "to defer or adjust it.\n",
0bfdf32f 2795 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
2796
2797 /* Allow debugging the jump pad itself. */
0bfdf32f 2798 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
2799 && maybe_move_out_of_jump_pad (event_child, &w))
2800 {
2801 enqueue_one_deferred_signal (event_child, &w);
2802
2803 if (debug_threads)
87ce2a04 2804 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 2805 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
2806
2807 linux_resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
2808
2809 return ignore_event (ourstatus);
fa593d66
PA
2810 }
2811 }
219f2f23 2812
fa593d66
PA
2813 if (event_child->collecting_fast_tracepoint)
2814 {
2815 if (debug_threads)
87ce2a04
DE
2816 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2817 "Check if we're already there.\n",
0bfdf32f 2818 lwpid_of (current_thread),
87ce2a04 2819 event_child->collecting_fast_tracepoint);
fa593d66
PA
2820
2821 trace_event = 1;
2822
2823 event_child->collecting_fast_tracepoint
2824 = linux_fast_tracepoint_collecting (event_child, NULL);
2825
2826 if (event_child->collecting_fast_tracepoint != 1)
2827 {
2828 /* No longer need this breakpoint. */
2829 if (event_child->exit_jump_pad_bkpt != NULL)
2830 {
2831 if (debug_threads)
87ce2a04
DE
2832 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2833 "stopping all threads momentarily.\n");
fa593d66
PA
2834
2835 /* Other running threads could hit this breakpoint.
2836 We don't handle moribund locations like GDB does,
2837 instead we always pause all threads when removing
2838 breakpoints, so that any step-over or
2839 decr_pc_after_break adjustment is always taken
2840 care of while the breakpoint is still
2841 inserted. */
2842 stop_all_lwps (1, event_child);
fa593d66
PA
2843
2844 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2845 event_child->exit_jump_pad_bkpt = NULL;
2846
2847 unstop_all_lwps (1, event_child);
2848
2849 gdb_assert (event_child->suspended >= 0);
2850 }
2851 }
2852
2853 if (event_child->collecting_fast_tracepoint == 0)
2854 {
2855 if (debug_threads)
87ce2a04
DE
2856 debug_printf ("fast tracepoint finished "
2857 "collecting successfully.\n");
fa593d66
PA
2858
2859 /* We may have a deferred signal to report. */
2860 if (dequeue_one_deferred_signal (event_child, &w))
2861 {
2862 if (debug_threads)
87ce2a04 2863 debug_printf ("dequeued one signal.\n");
fa593d66 2864 }
3c11dd79 2865 else
fa593d66 2866 {
3c11dd79 2867 if (debug_threads)
87ce2a04 2868 debug_printf ("no deferred signals.\n");
fa593d66
PA
2869
2870 if (stabilizing_threads)
2871 {
2872 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 2873 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
2874
2875 if (debug_threads)
2876 {
2877 debug_printf ("linux_wait_1 ret = %s, stopped "
2878 "while stabilizing threads\n",
0bfdf32f 2879 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
2880 debug_exit ();
2881 }
2882
0bfdf32f 2883 return ptid_of (current_thread);
fa593d66
PA
2884 }
2885 }
2886 }
6bf5e0ba
PA
2887 }
2888
e471f25b
PA
2889 /* Check whether GDB would be interested in this event. */
2890
2891 /* If GDB is not interested in this signal, don't stop other
2892 threads, and don't report it to GDB. Just resume the inferior
2893 right away. We do this for threading-related signals as well as
2894 any that GDB specifically requested we ignore. But never ignore
2895 SIGSTOP if we sent it ourselves, and do not ignore signals when
2896 stepping - they may require special handling to skip the signal
c9587f88
AT
2897 handler. Also never ignore signals that could be caused by a
2898 breakpoint. */
e471f25b
PA
2899 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2900 thread library? */
2901 if (WIFSTOPPED (w)
0bfdf32f 2902 && current_thread->last_resume_kind != resume_step
e471f25b 2903 && (
1a981360 2904#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 2905 (current_process ()->priv->thread_db != NULL
e471f25b
PA
2906 && (WSTOPSIG (w) == __SIGRTMIN
2907 || WSTOPSIG (w) == __SIGRTMIN + 1))
2908 ||
2909#endif
2ea28649 2910 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 2911 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
2912 && current_thread->last_resume_kind == resume_stop)
2913 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
2914 {
2915 siginfo_t info, *info_p;
2916
2917 if (debug_threads)
87ce2a04 2918 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 2919 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 2920
0bfdf32f 2921 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2922 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
2923 info_p = &info;
2924 else
2925 info_p = NULL;
2926 linux_resume_one_lwp (event_child, event_child->stepping,
2927 WSTOPSIG (w), info_p);
582511be 2928 return ignore_event (ourstatus);
e471f25b
PA
2929 }
2930
c2d6af84
PA
2931 /* Note that all addresses are always "out of the step range" when
2932 there's no range to begin with. */
2933 in_step_range = lwp_in_step_range (event_child);
2934
2935 /* If GDB wanted this thread to single step, and the thread is out
2936 of the step range, we always want to report the SIGTRAP, and let
2937 GDB handle it. Watchpoints should always be reported. So should
2938 signals we can't explain. A SIGTRAP we can't explain could be a
2939 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2940 do, we're be able to handle GDB breakpoints on top of internal
2941 breakpoints, by handling the internal breakpoint and still
2942 reporting the event to GDB. If we don't, we're out of luck, GDB
2943 won't see the breakpoint hit. */
6bf5e0ba 2944 report_to_gdb = (!maybe_internal_trap
0bfdf32f 2945 || (current_thread->last_resume_kind == resume_step
c2d6af84 2946 && !in_step_range)
15c66dd6 2947 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
c2d6af84 2948 || (!step_over_finished && !in_step_range
493e2a69 2949 && !bp_explains_trap && !trace_event)
9f3a5c85 2950 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5
SS
2951 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2952 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2953
2954 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
2955
2956 /* We found no reason GDB would want us to stop. We either hit one
2957 of our own breakpoints, or finished an internal step GDB
2958 shouldn't know about. */
2959 if (!report_to_gdb)
2960 {
2961 if (debug_threads)
2962 {
2963 if (bp_explains_trap)
87ce2a04 2964 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 2965 if (step_over_finished)
87ce2a04 2966 debug_printf ("Step-over finished.\n");
219f2f23 2967 if (trace_event)
87ce2a04 2968 debug_printf ("Tracepoint event.\n");
c2d6af84 2969 if (lwp_in_step_range (event_child))
87ce2a04
DE
2970 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2971 paddress (event_child->stop_pc),
2972 paddress (event_child->step_range_start),
2973 paddress (event_child->step_range_end));
6bf5e0ba
PA
2974 }
2975
2976 /* We're not reporting this breakpoint to GDB, so apply the
2977 decr_pc_after_break adjustment to the inferior's regcache
2978 ourselves. */
2979
2980 if (the_low_target.set_pc != NULL)
2981 {
2982 struct regcache *regcache
0bfdf32f 2983 = get_thread_regcache (current_thread, 1);
6bf5e0ba
PA
2984 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2985 }
2986
7984d532
PA
2987 /* We may have finished stepping over a breakpoint. If so,
2988 we've stopped and suspended all LWPs momentarily except the
2989 stepping one. This is where we resume them all again. We're
2990 going to keep waiting, so use proceed, which handles stepping
2991 over the next breakpoint. */
6bf5e0ba 2992 if (debug_threads)
87ce2a04 2993 debug_printf ("proceeding all threads.\n");
7984d532
PA
2994
2995 if (step_over_finished)
2996 unsuspend_all_lwps (event_child);
2997
6bf5e0ba 2998 proceed_all_lwps ();
582511be 2999 return ignore_event (ourstatus);
6bf5e0ba
PA
3000 }
3001
3002 if (debug_threads)
3003 {
0bfdf32f 3004 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3005 {
3006 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3007 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3008 else if (!lwp_in_step_range (event_child))
87ce2a04 3009 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3010 }
15c66dd6 3011 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3012 debug_printf ("Stopped by watchpoint.\n");
582511be 3013 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3014 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3015 if (debug_threads)
87ce2a04 3016 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3017 }
3018
3019 /* Alright, we're going to report a stop. */
3020
582511be 3021 if (!stabilizing_threads)
6bf5e0ba
PA
3022 {
3023 /* In all-stop, stop all threads. */
582511be
PA
3024 if (!non_stop)
3025 stop_all_lwps (0, NULL);
6bf5e0ba
PA
3026
3027 /* If we're not waiting for a specific LWP, choose an event LWP
3028 from among those that have had events. Giving equal priority
3029 to all LWPs that have had events helps prevent
3030 starvation. */
3031 if (ptid_equal (ptid, minus_one_ptid))
3032 {
3033 event_child->status_pending_p = 1;
3034 event_child->status_pending = w;
3035
3036 select_event_lwp (&event_child);
3037
0bfdf32f
GB
3038 /* current_thread and event_child must stay in sync. */
3039 current_thread = get_lwp_thread (event_child);
ee1e2d4f 3040
6bf5e0ba
PA
3041 event_child->status_pending_p = 0;
3042 w = event_child->status_pending;
3043 }
3044
c03e6ccc 3045 if (step_over_finished)
582511be
PA
3046 {
3047 if (!non_stop)
3048 {
3049 /* If we were doing a step-over, all other threads but
3050 the stepping one had been paused in start_step_over,
3051 with their suspend counts incremented. We don't want
3052 to do a full unstop/unpause, because we're in
3053 all-stop mode (so we want threads stopped), but we
3054 still need to unsuspend the other threads, to
3055 decrement their `suspended' count back. */
3056 unsuspend_all_lwps (event_child);
3057 }
3058 else
3059 {
3060 /* If we just finished a step-over, then all threads had
3061 been momentarily paused. In all-stop, that's fine,
3062 we want threads stopped by now anyway. In non-stop,
3063 we need to re-resume threads that GDB wanted to be
3064 running. */
3065 unstop_all_lwps (1, event_child);
3066 }
3067 }
c03e6ccc 3068
fa593d66 3069 /* Stabilize threads (move out of jump pads). */
582511be
PA
3070 if (!non_stop)
3071 stabilize_threads ();
6bf5e0ba
PA
3072 }
3073 else
3074 {
3075 /* If we just finished a step-over, then all threads had been
3076 momentarily paused. In all-stop, that's fine, we want
3077 threads stopped by now anyway. In non-stop, we need to
3078 re-resume threads that GDB wanted to be running. */
3079 if (step_over_finished)
7984d532 3080 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3081 }
3082
5b1c542e 3083 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3084
582511be 3085 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3086 it was a software breakpoint, and the client doesn't know we can
3087 adjust the breakpoint ourselves. */
3088 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3089 && !swbreak_feature)
582511be
PA
3090 {
3091 int decr_pc = the_low_target.decr_pc_after_break;
3092
3093 if (decr_pc != 0)
3094 {
3095 struct regcache *regcache
3096 = get_thread_regcache (current_thread, 1);
3097 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3098 }
3099 }
3100
0bfdf32f 3101 if (current_thread->last_resume_kind == resume_stop
8336d594 3102 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3103 {
3104 /* A thread that has been requested to stop by GDB with vCont;t,
3105 and it stopped cleanly, so report as SIG0. The use of
3106 SIGSTOP is an implementation detail. */
a493e3e2 3107 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3108 }
0bfdf32f 3109 else if (current_thread->last_resume_kind == resume_stop
8336d594 3110 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3111 {
3112 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3113 but, it stopped for other reasons. */
2ea28649 3114 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3115 }
3116 else
3117 {
2ea28649 3118 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3119 }
3120
d50171e4
PA
3121 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3122
bd99dc85 3123 if (debug_threads)
87ce2a04
DE
3124 {
3125 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
0bfdf32f 3126 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3127 ourstatus->kind, ourstatus->value.sig);
3128 debug_exit ();
3129 }
bd99dc85 3130
0bfdf32f 3131 return ptid_of (current_thread);
bd99dc85
PA
3132}
3133
3134/* Get rid of any pending event in the pipe. */
3135static void
3136async_file_flush (void)
3137{
3138 int ret;
3139 char buf;
3140
3141 do
3142 ret = read (linux_event_pipe[0], &buf, 1);
3143 while (ret >= 0 || (ret == -1 && errno == EINTR));
3144}
3145
3146/* Put something in the pipe, so the event loop wakes up. */
3147static void
3148async_file_mark (void)
3149{
3150 int ret;
3151
3152 async_file_flush ();
3153
3154 do
3155 ret = write (linux_event_pipe[1], "+", 1);
3156 while (ret == 0 || (ret == -1 && errno == EINTR));
3157
3158 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3159 be awakened anyway. */
3160}
3161
95954743
PA
3162static ptid_t
3163linux_wait (ptid_t ptid,
3164 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 3165{
95954743 3166 ptid_t event_ptid;
bd99dc85 3167
bd99dc85
PA
3168 /* Flush the async file first. */
3169 if (target_is_async_p ())
3170 async_file_flush ();
3171
582511be
PA
3172 do
3173 {
3174 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3175 }
3176 while ((target_options & TARGET_WNOHANG) == 0
3177 && ptid_equal (event_ptid, null_ptid)
3178 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3179
3180 /* If at least one stop was reported, there may be more. A single
3181 SIGCHLD can signal more than one child stop. */
3182 if (target_is_async_p ()
3183 && (target_options & TARGET_WNOHANG) != 0
95954743 3184 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
3185 async_file_mark ();
3186
3187 return event_ptid;
da6d8c04
DJ
3188}
3189
c5f62d5f 3190/* Send a signal to an LWP. */
fd500816
DJ
3191
3192static int
a1928bad 3193kill_lwp (unsigned long lwpid, int signo)
fd500816 3194{
c5f62d5f
DE
3195 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3196 fails, then we are not using nptl threads and we should be using kill. */
fd500816 3197
c5f62d5f
DE
3198#ifdef __NR_tkill
3199 {
3200 static int tkill_failed;
fd500816 3201
c5f62d5f
DE
3202 if (!tkill_failed)
3203 {
3204 int ret;
3205
3206 errno = 0;
3207 ret = syscall (__NR_tkill, lwpid, signo);
3208 if (errno != ENOSYS)
3209 return ret;
3210 tkill_failed = 1;
3211 }
3212 }
fd500816
DJ
3213#endif
3214
3215 return kill (lwpid, signo);
3216}
3217
964e4306
PA
3218void
3219linux_stop_lwp (struct lwp_info *lwp)
3220{
3221 send_sigstop (lwp);
3222}
3223
0d62e5e8 3224static void
02fc4de7 3225send_sigstop (struct lwp_info *lwp)
0d62e5e8 3226{
bd99dc85 3227 int pid;
0d62e5e8 3228
d86d4aaf 3229 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3230
0d62e5e8
DJ
3231 /* If we already have a pending stop signal for this process, don't
3232 send another. */
54a0b537 3233 if (lwp->stop_expected)
0d62e5e8 3234 {
ae13219e 3235 if (debug_threads)
87ce2a04 3236 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3237
0d62e5e8
DJ
3238 return;
3239 }
3240
3241 if (debug_threads)
87ce2a04 3242 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3243
d50171e4 3244 lwp->stop_expected = 1;
bd99dc85 3245 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3246}
3247
7984d532
PA
3248static int
3249send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7 3250{
d86d4aaf
DE
3251 struct thread_info *thread = (struct thread_info *) entry;
3252 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3253
7984d532
PA
3254 /* Ignore EXCEPT. */
3255 if (lwp == except)
3256 return 0;
3257
02fc4de7 3258 if (lwp->stopped)
7984d532 3259 return 0;
02fc4de7
PA
3260
3261 send_sigstop (lwp);
7984d532
PA
3262 return 0;
3263}
3264
3265/* Increment the suspend count of an LWP, and stop it, if not stopped
3266 yet. */
3267static int
3268suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3269 void *except)
3270{
d86d4aaf
DE
3271 struct thread_info *thread = (struct thread_info *) entry;
3272 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3273
3274 /* Ignore EXCEPT. */
3275 if (lwp == except)
3276 return 0;
3277
3278 lwp->suspended++;
3279
3280 return send_sigstop_callback (entry, except);
02fc4de7
PA
3281}
3282
95954743
PA
3283static void
3284mark_lwp_dead (struct lwp_info *lwp, int wstat)
3285{
3286 /* It's dead, really. */
3287 lwp->dead = 1;
3288
3289 /* Store the exit status for later. */
3290 lwp->status_pending_p = 1;
3291 lwp->status_pending = wstat;
3292
95954743
PA
3293 /* Prevent trying to stop it. */
3294 lwp->stopped = 1;
3295
3296 /* No further stops are expected from a dead lwp. */
3297 lwp->stop_expected = 0;
3298}
3299
fa96cb38
PA
3300/* Wait for all children to stop for the SIGSTOPs we just queued. */
3301
0d62e5e8 3302static void
fa96cb38 3303wait_for_sigstop (void)
0d62e5e8 3304{
0bfdf32f 3305 struct thread_info *saved_thread;
95954743 3306 ptid_t saved_tid;
fa96cb38
PA
3307 int wstat;
3308 int ret;
0d62e5e8 3309
0bfdf32f
GB
3310 saved_thread = current_thread;
3311 if (saved_thread != NULL)
3312 saved_tid = saved_thread->entry.id;
bd99dc85 3313 else
95954743 3314 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3315
d50171e4 3316 if (debug_threads)
fa96cb38 3317 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3318
fa96cb38
PA
3319 /* Passing NULL_PTID as filter indicates we want all events to be
3320 left pending. Eventually this returns when there are no
3321 unwaited-for children left. */
3322 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3323 &wstat, __WALL);
3324 gdb_assert (ret == -1);
0d62e5e8 3325
0bfdf32f
GB
3326 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3327 current_thread = saved_thread;
0d62e5e8
DJ
3328 else
3329 {
3330 if (debug_threads)
87ce2a04 3331 debug_printf ("Previously current thread died.\n");
0d62e5e8 3332
bd99dc85
PA
3333 if (non_stop)
3334 {
3335 /* We can't change the current inferior behind GDB's back,
3336 otherwise, a subsequent command may apply to the wrong
3337 process. */
0bfdf32f 3338 current_thread = NULL;
bd99dc85
PA
3339 }
3340 else
3341 {
3342 /* Set a valid thread as current. */
0bfdf32f 3343 set_desired_thread (0);
bd99dc85 3344 }
0d62e5e8
DJ
3345 }
3346}
3347
fa593d66
PA
3348/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3349 move it out, because we need to report the stop event to GDB. For
3350 example, if the user puts a breakpoint in the jump pad, it's
3351 because she wants to debug it. */
3352
3353static int
3354stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3355{
d86d4aaf
DE
3356 struct thread_info *thread = (struct thread_info *) entry;
3357 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3358
3359 gdb_assert (lwp->suspended == 0);
3360 gdb_assert (lwp->stopped);
3361
3362 /* Allow debugging the jump pad, gdb_collect, etc.. */
3363 return (supports_fast_tracepoints ()
58b4daa5 3364 && agent_loaded_p ()
fa593d66 3365 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3366 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3367 || thread->last_resume_kind == resume_step)
3368 && linux_fast_tracepoint_collecting (lwp, NULL));
3369}
3370
3371static void
3372move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3373{
d86d4aaf
DE
3374 struct thread_info *thread = (struct thread_info *) entry;
3375 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3376 int *wstat;
3377
3378 gdb_assert (lwp->suspended == 0);
3379 gdb_assert (lwp->stopped);
3380
3381 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3382
3383 /* Allow debugging the jump pad, gdb_collect, etc. */
3384 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3385 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3386 && thread->last_resume_kind != resume_step
3387 && maybe_move_out_of_jump_pad (lwp, wstat))
3388 {
3389 if (debug_threads)
87ce2a04 3390 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 3391 lwpid_of (thread));
fa593d66
PA
3392
3393 if (wstat)
3394 {
3395 lwp->status_pending_p = 0;
3396 enqueue_one_deferred_signal (lwp, wstat);
3397
3398 if (debug_threads)
87ce2a04
DE
3399 debug_printf ("Signal %d for LWP %ld deferred "
3400 "(in jump pad)\n",
d86d4aaf 3401 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3402 }
3403
3404 linux_resume_one_lwp (lwp, 0, 0, NULL);
3405 }
3406 else
3407 lwp->suspended++;
3408}
3409
3410static int
3411lwp_running (struct inferior_list_entry *entry, void *data)
3412{
d86d4aaf
DE
3413 struct thread_info *thread = (struct thread_info *) entry;
3414 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3415
3416 if (lwp->dead)
3417 return 0;
3418 if (lwp->stopped)
3419 return 0;
3420 return 1;
3421}
3422
7984d532
PA
3423/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3424 If SUSPEND, then also increase the suspend count of every LWP,
3425 except EXCEPT. */
3426
0d62e5e8 3427static void
7984d532 3428stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8 3429{
bde24c0a
PA
3430 /* Should not be called recursively. */
3431 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3432
87ce2a04
DE
3433 if (debug_threads)
3434 {
3435 debug_enter ();
3436 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3437 suspend ? "stop-and-suspend" : "stop",
3438 except != NULL
d86d4aaf 3439 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
3440 : "none");
3441 }
3442
bde24c0a
PA
3443 stopping_threads = (suspend
3444 ? STOPPING_AND_SUSPENDING_THREADS
3445 : STOPPING_THREADS);
7984d532
PA
3446
3447 if (suspend)
d86d4aaf 3448 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
7984d532 3449 else
d86d4aaf 3450 find_inferior (&all_threads, send_sigstop_callback, except);
fa96cb38 3451 wait_for_sigstop ();
bde24c0a 3452 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
3453
3454 if (debug_threads)
3455 {
3456 debug_printf ("stop_all_lwps done, setting stopping_threads "
3457 "back to !stopping\n");
3458 debug_exit ();
3459 }
0d62e5e8
DJ
3460}
3461
23f238d3
PA
3462/* Resume execution of LWP. If STEP is nonzero, single-step it. If
3463 SIGNAL is nonzero, give it that signal. */
da6d8c04 3464
ce3a066d 3465static void
23f238d3
PA
3466linux_resume_one_lwp_throw (struct lwp_info *lwp,
3467 int step, int signal, siginfo_t *info)
da6d8c04 3468{
d86d4aaf 3469 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 3470 struct thread_info *saved_thread;
fa593d66 3471 int fast_tp_collecting;
0d62e5e8 3472
54a0b537 3473 if (lwp->stopped == 0)
0d62e5e8
DJ
3474 return;
3475
fa593d66
PA
3476 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3477
3478 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3479
219f2f23
PA
3480 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3481 user used the "jump" command, or "set $pc = foo"). */
3482 if (lwp->stop_pc != get_pc (lwp))
3483 {
3484 /* Collecting 'while-stepping' actions doesn't make sense
3485 anymore. */
d86d4aaf 3486 release_while_stepping_state_list (thread);
219f2f23
PA
3487 }
3488
0d62e5e8
DJ
3489 /* If we have pending signals or status, and a new signal, enqueue the
3490 signal. Also enqueue the signal if we are waiting to reinsert a
3491 breakpoint; it will be picked up again below. */
3492 if (signal != 0
fa593d66
PA
3493 && (lwp->status_pending_p
3494 || lwp->pending_signals != NULL
3495 || lwp->bp_reinsert != 0
3496 || fast_tp_collecting))
0d62e5e8
DJ
3497 {
3498 struct pending_signals *p_sig;
bca929d3 3499 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 3500 p_sig->prev = lwp->pending_signals;
0d62e5e8 3501 p_sig->signal = signal;
32ca6d61
DJ
3502 if (info == NULL)
3503 memset (&p_sig->info, 0, sizeof (siginfo_t));
3504 else
3505 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 3506 lwp->pending_signals = p_sig;
0d62e5e8
DJ
3507 }
3508
d50171e4
PA
3509 if (lwp->status_pending_p)
3510 {
3511 if (debug_threads)
87ce2a04
DE
3512 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3513 " has pending status\n",
d86d4aaf 3514 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3515 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
3516 return;
3517 }
0d62e5e8 3518
0bfdf32f
GB
3519 saved_thread = current_thread;
3520 current_thread = thread;
0d62e5e8
DJ
3521
3522 if (debug_threads)
87ce2a04 3523 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
d86d4aaf 3524 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3525 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
3526
3527 /* This bit needs some thinking about. If we get a signal that
3528 we must report while a single-step reinsert is still pending,
3529 we often end up resuming the thread. It might be better to
3530 (ew) allow a stack of pending events; then we could be sure that
3531 the reinsert happened right away and not lose any signals.
3532
3533 Making this stack would also shrink the window in which breakpoints are
54a0b537 3534 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
3535 complete correctness, so it won't solve that problem. It may be
3536 worthwhile just to solve this one, however. */
54a0b537 3537 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
3538 {
3539 if (debug_threads)
87ce2a04
DE
3540 debug_printf (" pending reinsert at 0x%s\n",
3541 paddress (lwp->bp_reinsert));
d50171e4 3542
85e00e85 3543 if (can_hardware_single_step ())
d50171e4 3544 {
fa593d66
PA
3545 if (fast_tp_collecting == 0)
3546 {
3547 if (step == 0)
3548 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3549 if (lwp->suspended)
3550 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3551 lwp->suspended);
3552 }
d50171e4
PA
3553
3554 step = 1;
3555 }
0d62e5e8
DJ
3556
3557 /* Postpone any pending signal. It was enqueued above. */
3558 signal = 0;
3559 }
3560
fa593d66
PA
3561 if (fast_tp_collecting == 1)
3562 {
3563 if (debug_threads)
87ce2a04
DE
3564 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3565 " (exit-jump-pad-bkpt)\n",
d86d4aaf 3566 lwpid_of (thread));
fa593d66
PA
3567
3568 /* Postpone any pending signal. It was enqueued above. */
3569 signal = 0;
3570 }
3571 else if (fast_tp_collecting == 2)
3572 {
3573 if (debug_threads)
87ce2a04
DE
3574 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3575 " single-stepping\n",
d86d4aaf 3576 lwpid_of (thread));
fa593d66
PA
3577
3578 if (can_hardware_single_step ())
3579 step = 1;
3580 else
38e08fca
GB
3581 {
3582 internal_error (__FILE__, __LINE__,
3583 "moving out of jump pad single-stepping"
3584 " not implemented on this target");
3585 }
fa593d66
PA
3586
3587 /* Postpone any pending signal. It was enqueued above. */
3588 signal = 0;
3589 }
3590
219f2f23
PA
3591 /* If we have while-stepping actions in this thread set it stepping.
3592 If we have a signal to deliver, it may or may not be set to
3593 SIG_IGN, we don't know. Assume so, and allow collecting
3594 while-stepping into a signal handler. A possible smart thing to
3595 do would be to set an internal breakpoint at the signal return
3596 address, continue, and carry on catching this while-stepping
3597 action only when that breakpoint is hit. A future
3598 enhancement. */
d86d4aaf 3599 if (thread->while_stepping != NULL
219f2f23
PA
3600 && can_hardware_single_step ())
3601 {
3602 if (debug_threads)
87ce2a04 3603 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 3604 lwpid_of (thread));
219f2f23
PA
3605 step = 1;
3606 }
3607
582511be 3608 if (the_low_target.get_pc != NULL)
0d62e5e8 3609 {
0bfdf32f 3610 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be
PA
3611
3612 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3613
3614 if (debug_threads)
3615 {
3616 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3617 (long) lwp->stop_pc);
3618 }
0d62e5e8
DJ
3619 }
3620
fa593d66
PA
3621 /* If we have pending signals, consume one unless we are trying to
3622 reinsert a breakpoint or we're trying to finish a fast tracepoint
3623 collect. */
3624 if (lwp->pending_signals != NULL
3625 && lwp->bp_reinsert == 0
3626 && fast_tp_collecting == 0)
0d62e5e8
DJ
3627 {
3628 struct pending_signals **p_sig;
3629
54a0b537 3630 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
3631 while ((*p_sig)->prev != NULL)
3632 p_sig = &(*p_sig)->prev;
3633
3634 signal = (*p_sig)->signal;
32ca6d61 3635 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 3636 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 3637 &(*p_sig)->info);
32ca6d61 3638
0d62e5e8
DJ
3639 free (*p_sig);
3640 *p_sig = NULL;
3641 }
3642
aa5ca48f
DE
3643 if (the_low_target.prepare_to_resume != NULL)
3644 the_low_target.prepare_to_resume (lwp);
3645
d86d4aaf 3646 regcache_invalidate_thread (thread);
da6d8c04 3647 errno = 0;
54a0b537 3648 lwp->stepping = step;
d86d4aaf 3649 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
b8e1b30e 3650 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
3651 /* Coerce to a uintptr_t first to avoid potential gcc warning
3652 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 3653 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 3654
0bfdf32f 3655 current_thread = saved_thread;
da6d8c04 3656 if (errno)
23f238d3
PA
3657 perror_with_name ("resuming thread");
3658
3659 /* Successfully resumed. Clear state that no longer makes sense,
3660 and mark the LWP as running. Must not do this before resuming
3661 otherwise if that fails other code will be confused. E.g., we'd
3662 later try to stop the LWP and hang forever waiting for a stop
3663 status. Note that we must not throw after this is cleared,
3664 otherwise handle_zombie_lwp_error would get confused. */
3665 lwp->stopped = 0;
3666 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3667}
3668
3669/* Called when we try to resume a stopped LWP and that errors out. If
3670 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3671 or about to become), discard the error, clear any pending status
3672 the LWP may have, and return true (we'll collect the exit status
3673 soon enough). Otherwise, return false. */
3674
3675static int
3676check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3677{
3678 struct thread_info *thread = get_lwp_thread (lp);
3679
3680 /* If we get an error after resuming the LWP successfully, we'd
3681 confuse !T state for the LWP being gone. */
3682 gdb_assert (lp->stopped);
3683
3684 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3685 because even if ptrace failed with ESRCH, the tracee may be "not
3686 yet fully dead", but already refusing ptrace requests. In that
3687 case the tracee has 'R (Running)' state for a little bit
3688 (observed in Linux 3.18). See also the note on ESRCH in the
3689 ptrace(2) man page. Instead, check whether the LWP has any state
3690 other than ptrace-stopped. */
3691
3692 /* Don't assume anything if /proc/PID/status can't be read. */
3693 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 3694 {
23f238d3
PA
3695 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3696 lp->status_pending_p = 0;
3697 return 1;
3698 }
3699 return 0;
3700}
3701
3702/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3703 disappears while we try to resume it. */
3221518c 3704
23f238d3
PA
3705static void
3706linux_resume_one_lwp (struct lwp_info *lwp,
3707 int step, int signal, siginfo_t *info)
3708{
3709 TRY
3710 {
3711 linux_resume_one_lwp_throw (lwp, step, signal, info);
3712 }
3713 CATCH (ex, RETURN_MASK_ERROR)
3714 {
3715 if (!check_ptrace_stopped_lwp_gone (lwp))
3716 throw_exception (ex);
3221518c 3717 }
23f238d3 3718 END_CATCH
da6d8c04
DJ
3719}
3720
2bd7c093
PA
3721struct thread_resume_array
3722{
3723 struct thread_resume *resume;
3724 size_t n;
3725};
64386c31 3726
ebcf782c
DE
3727/* This function is called once per thread via find_inferior.
3728 ARG is a pointer to a thread_resume_array struct.
3729 We look up the thread specified by ENTRY in ARG, and mark the thread
3730 with a pointer to the appropriate resume request.
5544ad89
DJ
3731
3732 This algorithm is O(threads * resume elements), but resume elements
3733 is small (and will remain small at least until GDB supports thread
3734 suspension). */
ebcf782c 3735
2bd7c093
PA
3736static int
3737linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 3738{
d86d4aaf
DE
3739 struct thread_info *thread = (struct thread_info *) entry;
3740 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 3741 int ndx;
2bd7c093 3742 struct thread_resume_array *r;
64386c31 3743
2bd7c093 3744 r = arg;
64386c31 3745
2bd7c093 3746 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
3747 {
3748 ptid_t ptid = r->resume[ndx].thread;
3749 if (ptid_equal (ptid, minus_one_ptid)
3750 || ptid_equal (ptid, entry->id)
0c9070b3
YQ
3751 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3752 of PID'. */
d86d4aaf 3753 || (ptid_get_pid (ptid) == pid_of (thread)
0c9070b3
YQ
3754 && (ptid_is_pid (ptid)
3755 || ptid_get_lwp (ptid) == -1)))
95954743 3756 {
d50171e4 3757 if (r->resume[ndx].kind == resume_stop
8336d594 3758 && thread->last_resume_kind == resume_stop)
d50171e4
PA
3759 {
3760 if (debug_threads)
87ce2a04
DE
3761 debug_printf ("already %s LWP %ld at GDB's request\n",
3762 (thread->last_status.kind
3763 == TARGET_WAITKIND_STOPPED)
3764 ? "stopped"
3765 : "stopping",
d86d4aaf 3766 lwpid_of (thread));
d50171e4
PA
3767
3768 continue;
3769 }
3770
95954743 3771 lwp->resume = &r->resume[ndx];
8336d594 3772 thread->last_resume_kind = lwp->resume->kind;
fa593d66 3773
c2d6af84
PA
3774 lwp->step_range_start = lwp->resume->step_range_start;
3775 lwp->step_range_end = lwp->resume->step_range_end;
3776
fa593d66
PA
3777 /* If we had a deferred signal to report, dequeue one now.
3778 This can happen if LWP gets more than one signal while
3779 trying to get out of a jump pad. */
3780 if (lwp->stopped
3781 && !lwp->status_pending_p
3782 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3783 {
3784 lwp->status_pending_p = 1;
3785
3786 if (debug_threads)
87ce2a04
DE
3787 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3788 "leaving status pending.\n",
d86d4aaf
DE
3789 WSTOPSIG (lwp->status_pending),
3790 lwpid_of (thread));
fa593d66
PA
3791 }
3792
95954743
PA
3793 return 0;
3794 }
3795 }
2bd7c093
PA
3796
3797 /* No resume action for this thread. */
3798 lwp->resume = NULL;
64386c31 3799
2bd7c093 3800 return 0;
5544ad89
DJ
3801}
3802
20ad9378
DE
3803/* find_inferior callback for linux_resume.
3804 Set *FLAG_P if this lwp has an interesting status pending. */
5544ad89 3805
bd99dc85
PA
3806static int
3807resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 3808{
d86d4aaf
DE
3809 struct thread_info *thread = (struct thread_info *) entry;
3810 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 3811
bd99dc85
PA
3812 /* LWPs which will not be resumed are not interesting, because
3813 we might not wait for them next time through linux_wait. */
2bd7c093 3814 if (lwp->resume == NULL)
bd99dc85 3815 return 0;
64386c31 3816
582511be 3817 if (thread_still_has_status_pending_p (thread))
d50171e4
PA
3818 * (int *) flag_p = 1;
3819
3820 return 0;
3821}
3822
3823/* Return 1 if this lwp that GDB wants running is stopped at an
3824 internal breakpoint that we need to step over. It assumes that any
3825 required STOP_PC adjustment has already been propagated to the
3826 inferior's regcache. */
3827
3828static int
3829need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3830{
d86d4aaf
DE
3831 struct thread_info *thread = (struct thread_info *) entry;
3832 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 3833 struct thread_info *saved_thread;
d50171e4
PA
3834 CORE_ADDR pc;
3835
3836 /* LWPs which will not be resumed are not interesting, because we
3837 might not wait for them next time through linux_wait. */
3838
3839 if (!lwp->stopped)
3840 {
3841 if (debug_threads)
87ce2a04 3842 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 3843 lwpid_of (thread));
d50171e4
PA
3844 return 0;
3845 }
3846
8336d594 3847 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
3848 {
3849 if (debug_threads)
87ce2a04
DE
3850 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3851 " stopped\n",
d86d4aaf 3852 lwpid_of (thread));
d50171e4
PA
3853 return 0;
3854 }
3855
7984d532
PA
3856 gdb_assert (lwp->suspended >= 0);
3857
3858 if (lwp->suspended)
3859 {
3860 if (debug_threads)
87ce2a04 3861 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 3862 lwpid_of (thread));
7984d532
PA
3863 return 0;
3864 }
3865
d50171e4
PA
3866 if (!lwp->need_step_over)
3867 {
3868 if (debug_threads)
d86d4aaf 3869 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
d50171e4 3870 }
5544ad89 3871
bd99dc85 3872 if (lwp->status_pending_p)
d50171e4
PA
3873 {
3874 if (debug_threads)
87ce2a04
DE
3875 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3876 " status.\n",
d86d4aaf 3877 lwpid_of (thread));
d50171e4
PA
3878 return 0;
3879 }
3880
3881 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3882 or we have. */
3883 pc = get_pc (lwp);
3884
3885 /* If the PC has changed since we stopped, then don't do anything,
3886 and let the breakpoint/tracepoint be hit. This happens if, for
3887 instance, GDB handled the decr_pc_after_break subtraction itself,
3888 GDB is OOL stepping this thread, or the user has issued a "jump"
3889 command, or poked thread's registers herself. */
3890 if (pc != lwp->stop_pc)
3891 {
3892 if (debug_threads)
87ce2a04
DE
3893 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3894 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
3895 lwpid_of (thread),
3896 paddress (lwp->stop_pc), paddress (pc));
d50171e4
PA
3897
3898 lwp->need_step_over = 0;
3899 return 0;
3900 }
3901
0bfdf32f
GB
3902 saved_thread = current_thread;
3903 current_thread = thread;
d50171e4 3904
8b07ae33 3905 /* We can only step over breakpoints we know about. */
fa593d66 3906 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 3907 {
8b07ae33 3908 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
3909 though. If the condition is being evaluated on the target's side
3910 and it evaluate to false, step over this breakpoint as well. */
3911 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
3912 && gdb_condition_true_at_breakpoint (pc)
3913 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
3914 {
3915 if (debug_threads)
87ce2a04
DE
3916 debug_printf ("Need step over [LWP %ld]? yes, but found"
3917 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 3918 lwpid_of (thread), paddress (pc));
d50171e4 3919
0bfdf32f 3920 current_thread = saved_thread;
8b07ae33
PA
3921 return 0;
3922 }
3923 else
3924 {
3925 if (debug_threads)
87ce2a04
DE
3926 debug_printf ("Need step over [LWP %ld]? yes, "
3927 "found breakpoint at 0x%s\n",
d86d4aaf 3928 lwpid_of (thread), paddress (pc));
d50171e4 3929
8b07ae33
PA
3930 /* We've found an lwp that needs stepping over --- return 1 so
3931 that find_inferior stops looking. */
0bfdf32f 3932 current_thread = saved_thread;
8b07ae33
PA
3933
3934 /* If the step over is cancelled, this is set again. */
3935 lwp->need_step_over = 0;
3936 return 1;
3937 }
d50171e4
PA
3938 }
3939
0bfdf32f 3940 current_thread = saved_thread;
d50171e4
PA
3941
3942 if (debug_threads)
87ce2a04
DE
3943 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3944 " at 0x%s\n",
d86d4aaf 3945 lwpid_of (thread), paddress (pc));
c6ecbae5 3946
bd99dc85 3947 return 0;
5544ad89
DJ
3948}
3949
d50171e4
PA
3950/* Start a step-over operation on LWP. When LWP stopped at a
3951 breakpoint, to make progress, we need to remove the breakpoint out
3952 of the way. If we let other threads run while we do that, they may
3953 pass by the breakpoint location and miss hitting it. To avoid
3954 that, a step-over momentarily stops all threads while LWP is
3955 single-stepped while the breakpoint is temporarily uninserted from
3956 the inferior. When the single-step finishes, we reinsert the
3957 breakpoint, and let all threads that are supposed to be running,
3958 run again.
3959
3960 On targets that don't support hardware single-step, we don't
3961 currently support full software single-stepping. Instead, we only
3962 support stepping over the thread event breakpoint, by asking the
3963 low target where to place a reinsert breakpoint. Since this
3964 routine assumes the breakpoint being stepped over is a thread event
3965 breakpoint, it usually assumes the return address of the current
3966 function is a good enough place to set the reinsert breakpoint. */
3967
3968static int
3969start_step_over (struct lwp_info *lwp)
3970{
d86d4aaf 3971 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 3972 struct thread_info *saved_thread;
d50171e4
PA
3973 CORE_ADDR pc;
3974 int step;
3975
3976 if (debug_threads)
87ce2a04 3977 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 3978 lwpid_of (thread));
d50171e4 3979
7984d532
PA
3980 stop_all_lwps (1, lwp);
3981 gdb_assert (lwp->suspended == 0);
d50171e4
PA
3982
3983 if (debug_threads)
87ce2a04 3984 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
3985
3986 /* Note, we should always reach here with an already adjusted PC,
3987 either by GDB (if we're resuming due to GDB's request), or by our
3988 caller, if we just finished handling an internal breakpoint GDB
3989 shouldn't care about. */
3990 pc = get_pc (lwp);
3991
0bfdf32f
GB
3992 saved_thread = current_thread;
3993 current_thread = thread;
d50171e4
PA
3994
3995 lwp->bp_reinsert = pc;
3996 uninsert_breakpoints_at (pc);
fa593d66 3997 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
3998
3999 if (can_hardware_single_step ())
4000 {
4001 step = 1;
4002 }
4003 else
4004 {
4005 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4006 set_reinsert_breakpoint (raddr);
4007 step = 0;
4008 }
4009
0bfdf32f 4010 current_thread = saved_thread;
d50171e4
PA
4011
4012 linux_resume_one_lwp (lwp, step, 0, NULL);
4013
4014 /* Require next event from this LWP. */
d86d4aaf 4015 step_over_bkpt = thread->entry.id;
d50171e4
PA
4016 return 1;
4017}
4018
4019/* Finish a step-over. Reinsert the breakpoint we had uninserted in
4020 start_step_over, if still there, and delete any reinsert
4021 breakpoints we've set, on non hardware single-step targets. */
4022
4023static int
4024finish_step_over (struct lwp_info *lwp)
4025{
4026 if (lwp->bp_reinsert != 0)
4027 {
4028 if (debug_threads)
87ce2a04 4029 debug_printf ("Finished step over.\n");
d50171e4
PA
4030
4031 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4032 may be no breakpoint to reinsert there by now. */
4033 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4034 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4035
4036 lwp->bp_reinsert = 0;
4037
4038 /* Delete any software-single-step reinsert breakpoints. No
4039 longer needed. We don't have to worry about other threads
4040 hitting this trap, and later not being able to explain it,
4041 because we were stepping over a breakpoint, and we hold all
4042 threads but LWP stopped while doing that. */
4043 if (!can_hardware_single_step ())
4044 delete_reinsert_breakpoints ();
4045
4046 step_over_bkpt = null_ptid;
4047 return 1;
4048 }
4049 else
4050 return 0;
4051}
4052
5544ad89
DJ
4053/* This function is called once per thread. We check the thread's resume
4054 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 4055 stopped; and what signal, if any, it should be sent.
5544ad89 4056
bd99dc85
PA
4057 For threads which we aren't explicitly told otherwise, we preserve
4058 the stepping flag; this is used for stepping over gdbserver-placed
4059 breakpoints.
4060
4061 If pending_flags was set in any thread, we queue any needed
4062 signals, since we won't actually resume. We already have a pending
4063 event to report, so we don't need to preserve any step requests;
4064 they should be re-issued if necessary. */
4065
4066static int
4067linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 4068{
d86d4aaf
DE
4069 struct thread_info *thread = (struct thread_info *) entry;
4070 struct lwp_info *lwp = get_thread_lwp (thread);
bd99dc85 4071 int step;
d50171e4
PA
4072 int leave_all_stopped = * (int *) arg;
4073 int leave_pending;
5544ad89 4074
2bd7c093 4075 if (lwp->resume == NULL)
bd99dc85 4076 return 0;
5544ad89 4077
bd99dc85 4078 if (lwp->resume->kind == resume_stop)
5544ad89 4079 {
bd99dc85 4080 if (debug_threads)
d86d4aaf 4081 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4082
4083 if (!lwp->stopped)
4084 {
4085 if (debug_threads)
d86d4aaf 4086 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4087
d50171e4
PA
4088 /* Stop the thread, and wait for the event asynchronously,
4089 through the event loop. */
02fc4de7 4090 send_sigstop (lwp);
bd99dc85
PA
4091 }
4092 else
4093 {
4094 if (debug_threads)
87ce2a04 4095 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4096 lwpid_of (thread));
d50171e4
PA
4097
4098 /* The LWP may have been stopped in an internal event that
4099 was not meant to be notified back to GDB (e.g., gdbserver
4100 breakpoint), so we should be reporting a stop event in
4101 this case too. */
4102
4103 /* If the thread already has a pending SIGSTOP, this is a
4104 no-op. Otherwise, something later will presumably resume
4105 the thread and this will cause it to cancel any pending
4106 operation, due to last_resume_kind == resume_stop. If
4107 the thread already has a pending status to report, we
4108 will still report it the next time we wait - see
4109 status_pending_p_callback. */
1a981360
PA
4110
4111 /* If we already have a pending signal to report, then
4112 there's no need to queue a SIGSTOP, as this means we're
4113 midway through moving the LWP out of the jumppad, and we
4114 will report the pending signal as soon as that is
4115 finished. */
4116 if (lwp->pending_signals_to_report == NULL)
4117 send_sigstop (lwp);
bd99dc85 4118 }
32ca6d61 4119
bd99dc85
PA
4120 /* For stop requests, we're done. */
4121 lwp->resume = NULL;
fc7238bb 4122 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4123 return 0;
5544ad89
DJ
4124 }
4125
bd99dc85
PA
4126 /* If this thread which is about to be resumed has a pending status,
4127 then don't resume any threads - we can just report the pending
4128 status. Make sure to queue any signals that would otherwise be
4129 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
4130 thread has a pending status. If there's a thread that needs the
4131 step-over-breakpoint dance, then don't resume any other thread
4132 but that particular one. */
4133 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 4134
d50171e4 4135 if (!leave_pending)
bd99dc85
PA
4136 {
4137 if (debug_threads)
d86d4aaf 4138 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4139
d50171e4 4140 step = (lwp->resume->kind == resume_step);
2acc282a 4141 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
4142 }
4143 else
4144 {
4145 if (debug_threads)
d86d4aaf 4146 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5544ad89 4147
bd99dc85
PA
4148 /* If we have a new signal, enqueue the signal. */
4149 if (lwp->resume->sig != 0)
4150 {
4151 struct pending_signals *p_sig;
4152 p_sig = xmalloc (sizeof (*p_sig));
4153 p_sig->prev = lwp->pending_signals;
4154 p_sig->signal = lwp->resume->sig;
4155 memset (&p_sig->info, 0, sizeof (siginfo_t));
4156
4157 /* If this is the same signal we were previously stopped by,
4158 make sure to queue its siginfo. We can ignore the return
4159 value of ptrace; if it fails, we'll skip
4160 PTRACE_SETSIGINFO. */
4161 if (WIFSTOPPED (lwp->last_status)
4162 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
d86d4aaf 4163 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4164 &p_sig->info);
bd99dc85
PA
4165
4166 lwp->pending_signals = p_sig;
4167 }
4168 }
5544ad89 4169
fc7238bb 4170 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4171 lwp->resume = NULL;
5544ad89 4172 return 0;
0d62e5e8
DJ
4173}
4174
4175static void
2bd7c093 4176linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 4177{
2bd7c093 4178 struct thread_resume_array array = { resume_info, n };
d86d4aaf 4179 struct thread_info *need_step_over = NULL;
d50171e4
PA
4180 int any_pending;
4181 int leave_all_stopped;
c6ecbae5 4182
87ce2a04
DE
4183 if (debug_threads)
4184 {
4185 debug_enter ();
4186 debug_printf ("linux_resume:\n");
4187 }
4188
2bd7c093 4189 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 4190
d50171e4
PA
4191 /* If there is a thread which would otherwise be resumed, which has
4192 a pending status, then don't resume any threads - we can just
4193 report the pending status. Make sure to queue any signals that
4194 would otherwise be sent. In non-stop mode, we'll apply this
4195 logic to each thread individually. We consume all pending events
4196 before considering to start a step-over (in all-stop). */
4197 any_pending = 0;
bd99dc85 4198 if (!non_stop)
d86d4aaf 4199 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
d50171e4
PA
4200
4201 /* If there is a thread which would otherwise be resumed, which is
4202 stopped at a breakpoint that needs stepping over, then don't
4203 resume any threads - have it step over the breakpoint with all
4204 other threads stopped, then resume all threads again. Make sure
4205 to queue any signals that would otherwise be delivered or
4206 queued. */
4207 if (!any_pending && supports_breakpoints ())
4208 need_step_over
d86d4aaf
DE
4209 = (struct thread_info *) find_inferior (&all_threads,
4210 need_step_over_p, NULL);
d50171e4
PA
4211
4212 leave_all_stopped = (need_step_over != NULL || any_pending);
4213
4214 if (debug_threads)
4215 {
4216 if (need_step_over != NULL)
87ce2a04 4217 debug_printf ("Not resuming all, need step over\n");
d50171e4 4218 else if (any_pending)
87ce2a04
DE
4219 debug_printf ("Not resuming, all-stop and found "
4220 "an LWP with pending status\n");
d50171e4 4221 else
87ce2a04 4222 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4223 }
4224
4225 /* Even if we're leaving threads stopped, queue all signals we'd
4226 otherwise deliver. */
4227 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4228
4229 if (need_step_over)
d86d4aaf 4230 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4231
4232 if (debug_threads)
4233 {
4234 debug_printf ("linux_resume done\n");
4235 debug_exit ();
4236 }
d50171e4
PA
4237}
4238
4239/* This function is called once per thread. We check the thread's
4240 last resume request, which will tell us whether to resume, step, or
4241 leave the thread stopped. Any signal the client requested to be
4242 delivered has already been enqueued at this point.
4243
4244 If any thread that GDB wants running is stopped at an internal
4245 breakpoint that needs stepping over, we start a step-over operation
4246 on that particular thread, and leave all others stopped. */
4247
7984d532
PA
4248static int
4249proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 4250{
d86d4aaf
DE
4251 struct thread_info *thread = (struct thread_info *) entry;
4252 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4253 int step;
4254
7984d532
PA
4255 if (lwp == except)
4256 return 0;
d50171e4
PA
4257
4258 if (debug_threads)
d86d4aaf 4259 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
4260
4261 if (!lwp->stopped)
4262 {
4263 if (debug_threads)
d86d4aaf 4264 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
7984d532 4265 return 0;
d50171e4
PA
4266 }
4267
02fc4de7
PA
4268 if (thread->last_resume_kind == resume_stop
4269 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
4270 {
4271 if (debug_threads)
87ce2a04 4272 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 4273 lwpid_of (thread));
7984d532 4274 return 0;
d50171e4
PA
4275 }
4276
4277 if (lwp->status_pending_p)
4278 {
4279 if (debug_threads)
87ce2a04 4280 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 4281 lwpid_of (thread));
7984d532 4282 return 0;
d50171e4
PA
4283 }
4284
7984d532
PA
4285 gdb_assert (lwp->suspended >= 0);
4286
d50171e4
PA
4287 if (lwp->suspended)
4288 {
4289 if (debug_threads)
d86d4aaf 4290 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
7984d532 4291 return 0;
d50171e4
PA
4292 }
4293
1a981360
PA
4294 if (thread->last_resume_kind == resume_stop
4295 && lwp->pending_signals_to_report == NULL
4296 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
4297 {
4298 /* We haven't reported this LWP as stopped yet (otherwise, the
4299 last_status.kind check above would catch it, and we wouldn't
4300 reach here. This LWP may have been momentarily paused by a
4301 stop_all_lwps call while handling for example, another LWP's
4302 step-over. In that case, the pending expected SIGSTOP signal
4303 that was queued at vCont;t handling time will have already
4304 been consumed by wait_for_sigstop, and so we need to requeue
4305 another one here. Note that if the LWP already has a SIGSTOP
4306 pending, this is a no-op. */
4307
4308 if (debug_threads)
87ce2a04
DE
4309 debug_printf ("Client wants LWP %ld to stop. "
4310 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 4311 lwpid_of (thread));
02fc4de7
PA
4312
4313 send_sigstop (lwp);
4314 }
4315
8336d594 4316 step = thread->last_resume_kind == resume_step;
d50171e4 4317 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4318 return 0;
4319}
4320
4321static int
4322unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4323{
d86d4aaf
DE
4324 struct thread_info *thread = (struct thread_info *) entry;
4325 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4326
4327 if (lwp == except)
4328 return 0;
4329
4330 lwp->suspended--;
4331 gdb_assert (lwp->suspended >= 0);
4332
4333 return proceed_one_lwp (entry, except);
d50171e4
PA
4334}
4335
4336/* When we finish a step-over, set threads running again. If there's
4337 another thread that may need a step-over, now's the time to start
4338 it. Eventually, we'll move all threads past their breakpoints. */
4339
4340static void
4341proceed_all_lwps (void)
4342{
d86d4aaf 4343 struct thread_info *need_step_over;
d50171e4
PA
4344
4345 /* If there is a thread which would otherwise be resumed, which is
4346 stopped at a breakpoint that needs stepping over, then don't
4347 resume any threads - have it step over the breakpoint with all
4348 other threads stopped, then resume all threads again. */
4349
4350 if (supports_breakpoints ())
4351 {
4352 need_step_over
d86d4aaf
DE
4353 = (struct thread_info *) find_inferior (&all_threads,
4354 need_step_over_p, NULL);
d50171e4
PA
4355
4356 if (need_step_over != NULL)
4357 {
4358 if (debug_threads)
87ce2a04
DE
4359 debug_printf ("proceed_all_lwps: found "
4360 "thread %ld needing a step-over\n",
4361 lwpid_of (need_step_over));
d50171e4 4362
d86d4aaf 4363 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4364 return;
4365 }
4366 }
5544ad89 4367
d50171e4 4368 if (debug_threads)
87ce2a04 4369 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 4370
d86d4aaf 4371 find_inferior (&all_threads, proceed_one_lwp, NULL);
d50171e4
PA
4372}
4373
4374/* Stopped LWPs that the client wanted to be running, that don't have
4375 pending statuses, are set to run again, except for EXCEPT, if not
4376 NULL. This undoes a stop_all_lwps call. */
4377
4378static void
7984d532 4379unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 4380{
5544ad89
DJ
4381 if (debug_threads)
4382 {
87ce2a04 4383 debug_enter ();
d50171e4 4384 if (except)
87ce2a04 4385 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 4386 lwpid_of (get_lwp_thread (except)));
5544ad89 4387 else
87ce2a04 4388 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
4389 }
4390
7984d532 4391 if (unsuspend)
d86d4aaf 4392 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
7984d532 4393 else
d86d4aaf 4394 find_inferior (&all_threads, proceed_one_lwp, except);
87ce2a04
DE
4395
4396 if (debug_threads)
4397 {
4398 debug_printf ("unstop_all_lwps done\n");
4399 debug_exit ();
4400 }
0d62e5e8
DJ
4401}
4402
58caa3dc
DJ
4403
4404#ifdef HAVE_LINUX_REGSETS
4405
1faeff08
MR
4406#define use_linux_regsets 1
4407
030031ee
PA
4408/* Returns true if REGSET has been disabled. */
4409
4410static int
4411regset_disabled (struct regsets_info *info, struct regset_info *regset)
4412{
4413 return (info->disabled_regsets != NULL
4414 && info->disabled_regsets[regset - info->regsets]);
4415}
4416
4417/* Disable REGSET. */
4418
4419static void
4420disable_regset (struct regsets_info *info, struct regset_info *regset)
4421{
4422 int dr_offset;
4423
4424 dr_offset = regset - info->regsets;
4425 if (info->disabled_regsets == NULL)
4426 info->disabled_regsets = xcalloc (1, info->num_regsets);
4427 info->disabled_regsets[dr_offset] = 1;
4428}
4429
58caa3dc 4430static int
3aee8918
PA
4431regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4432 struct regcache *regcache)
58caa3dc
DJ
4433{
4434 struct regset_info *regset;
e9d25b98 4435 int saw_general_regs = 0;
95954743 4436 int pid;
1570b33e 4437 struct iovec iov;
58caa3dc 4438
0bfdf32f 4439 pid = lwpid_of (current_thread);
28eef672 4440 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4441 {
1570b33e
L
4442 void *buf, *data;
4443 int nt_type, res;
58caa3dc 4444
030031ee 4445 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 4446 continue;
58caa3dc 4447
bca929d3 4448 buf = xmalloc (regset->size);
1570b33e
L
4449
4450 nt_type = regset->nt_type;
4451 if (nt_type)
4452 {
4453 iov.iov_base = buf;
4454 iov.iov_len = regset->size;
4455 data = (void *) &iov;
4456 }
4457 else
4458 data = buf;
4459
dfb64f85 4460#ifndef __sparc__
f15f9948 4461 res = ptrace (regset->get_request, pid,
b8e1b30e 4462 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4463#else
1570b33e 4464 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4465#endif
58caa3dc
DJ
4466 if (res < 0)
4467 {
4468 if (errno == EIO)
4469 {
52fa2412 4470 /* If we get EIO on a regset, do not try it again for
3aee8918 4471 this process mode. */
030031ee 4472 disable_regset (regsets_info, regset);
58caa3dc 4473 }
e5a9158d
AA
4474 else if (errno == ENODATA)
4475 {
4476 /* ENODATA may be returned if the regset is currently
4477 not "active". This can happen in normal operation,
4478 so suppress the warning in this case. */
4479 }
58caa3dc
DJ
4480 else
4481 {
0d62e5e8 4482 char s[256];
95954743
PA
4483 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4484 pid);
0d62e5e8 4485 perror (s);
58caa3dc
DJ
4486 }
4487 }
098dbe61
AA
4488 else
4489 {
4490 if (regset->type == GENERAL_REGS)
4491 saw_general_regs = 1;
4492 regset->store_function (regcache, buf);
4493 }
fdeb2a12 4494 free (buf);
58caa3dc 4495 }
e9d25b98
DJ
4496 if (saw_general_regs)
4497 return 0;
4498 else
4499 return 1;
58caa3dc
DJ
4500}
4501
4502static int
3aee8918
PA
4503regsets_store_inferior_registers (struct regsets_info *regsets_info,
4504 struct regcache *regcache)
58caa3dc
DJ
4505{
4506 struct regset_info *regset;
e9d25b98 4507 int saw_general_regs = 0;
95954743 4508 int pid;
1570b33e 4509 struct iovec iov;
58caa3dc 4510
0bfdf32f 4511 pid = lwpid_of (current_thread);
28eef672 4512 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4513 {
1570b33e
L
4514 void *buf, *data;
4515 int nt_type, res;
58caa3dc 4516
feea5f36
AA
4517 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4518 || regset->fill_function == NULL)
28eef672 4519 continue;
58caa3dc 4520
bca929d3 4521 buf = xmalloc (regset->size);
545587ee
DJ
4522
4523 /* First fill the buffer with the current register set contents,
4524 in case there are any items in the kernel's regset that are
4525 not in gdbserver's regcache. */
1570b33e
L
4526
4527 nt_type = regset->nt_type;
4528 if (nt_type)
4529 {
4530 iov.iov_base = buf;
4531 iov.iov_len = regset->size;
4532 data = (void *) &iov;
4533 }
4534 else
4535 data = buf;
4536
dfb64f85 4537#ifndef __sparc__
f15f9948 4538 res = ptrace (regset->get_request, pid,
b8e1b30e 4539 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4540#else
689cc2ae 4541 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4542#endif
545587ee
DJ
4543
4544 if (res == 0)
4545 {
4546 /* Then overlay our cached registers on that. */
442ea881 4547 regset->fill_function (regcache, buf);
545587ee
DJ
4548
4549 /* Only now do we write the register set. */
dfb64f85 4550#ifndef __sparc__
f15f9948 4551 res = ptrace (regset->set_request, pid,
b8e1b30e 4552 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4553#else
1570b33e 4554 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 4555#endif
545587ee
DJ
4556 }
4557
58caa3dc
DJ
4558 if (res < 0)
4559 {
4560 if (errno == EIO)
4561 {
52fa2412 4562 /* If we get EIO on a regset, do not try it again for
3aee8918 4563 this process mode. */
030031ee 4564 disable_regset (regsets_info, regset);
58caa3dc 4565 }
3221518c
UW
4566 else if (errno == ESRCH)
4567 {
1b3f6016
PA
4568 /* At this point, ESRCH should mean the process is
4569 already gone, in which case we simply ignore attempts
4570 to change its registers. See also the related
4571 comment in linux_resume_one_lwp. */
fdeb2a12 4572 free (buf);
3221518c
UW
4573 return 0;
4574 }
58caa3dc
DJ
4575 else
4576 {
ce3a066d 4577 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
4578 }
4579 }
e9d25b98
DJ
4580 else if (regset->type == GENERAL_REGS)
4581 saw_general_regs = 1;
09ec9b38 4582 free (buf);
58caa3dc 4583 }
e9d25b98
DJ
4584 if (saw_general_regs)
4585 return 0;
4586 else
4587 return 1;
58caa3dc
DJ
4588}
4589
1faeff08 4590#else /* !HAVE_LINUX_REGSETS */
58caa3dc 4591
1faeff08 4592#define use_linux_regsets 0
3aee8918
PA
4593#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4594#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 4595
58caa3dc 4596#endif
1faeff08
MR
4597
4598/* Return 1 if register REGNO is supported by one of the regset ptrace
4599 calls or 0 if it has to be transferred individually. */
4600
4601static int
3aee8918 4602linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
4603{
4604 unsigned char mask = 1 << (regno % 8);
4605 size_t index = regno / 8;
4606
4607 return (use_linux_regsets
3aee8918
PA
4608 && (regs_info->regset_bitmap == NULL
4609 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
4610}
4611
58caa3dc 4612#ifdef HAVE_LINUX_USRREGS
1faeff08
MR
4613
4614int
3aee8918 4615register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
4616{
4617 int addr;
4618
3aee8918 4619 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
4620 error ("Invalid register number %d.", regnum);
4621
3aee8918 4622 addr = usrregs->regmap[regnum];
1faeff08
MR
4623
4624 return addr;
4625}
4626
4627/* Fetch one register. */
4628static void
3aee8918
PA
4629fetch_register (const struct usrregs_info *usrregs,
4630 struct regcache *regcache, int regno)
1faeff08
MR
4631{
4632 CORE_ADDR regaddr;
4633 int i, size;
4634 char *buf;
4635 int pid;
4636
3aee8918 4637 if (regno >= usrregs->num_regs)
1faeff08
MR
4638 return;
4639 if ((*the_low_target.cannot_fetch_register) (regno))
4640 return;
4641
3aee8918 4642 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4643 if (regaddr == -1)
4644 return;
4645
3aee8918
PA
4646 size = ((register_size (regcache->tdesc, regno)
4647 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4648 & -sizeof (PTRACE_XFER_TYPE));
4649 buf = alloca (size);
4650
0bfdf32f 4651 pid = lwpid_of (current_thread);
1faeff08
MR
4652 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4653 {
4654 errno = 0;
4655 *(PTRACE_XFER_TYPE *) (buf + i) =
4656 ptrace (PTRACE_PEEKUSER, pid,
4657 /* Coerce to a uintptr_t first to avoid potential gcc warning
4658 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4659 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
4660 regaddr += sizeof (PTRACE_XFER_TYPE);
4661 if (errno != 0)
4662 error ("reading register %d: %s", regno, strerror (errno));
4663 }
4664
4665 if (the_low_target.supply_ptrace_register)
4666 the_low_target.supply_ptrace_register (regcache, regno, buf);
4667 else
4668 supply_register (regcache, regno, buf);
4669}
4670
4671/* Store one register. */
4672static void
3aee8918
PA
4673store_register (const struct usrregs_info *usrregs,
4674 struct regcache *regcache, int regno)
1faeff08
MR
4675{
4676 CORE_ADDR regaddr;
4677 int i, size;
4678 char *buf;
4679 int pid;
4680
3aee8918 4681 if (regno >= usrregs->num_regs)
1faeff08
MR
4682 return;
4683 if ((*the_low_target.cannot_store_register) (regno))
4684 return;
4685
3aee8918 4686 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4687 if (regaddr == -1)
4688 return;
4689
3aee8918
PA
4690 size = ((register_size (regcache->tdesc, regno)
4691 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4692 & -sizeof (PTRACE_XFER_TYPE));
4693 buf = alloca (size);
4694 memset (buf, 0, size);
4695
4696 if (the_low_target.collect_ptrace_register)
4697 the_low_target.collect_ptrace_register (regcache, regno, buf);
4698 else
4699 collect_register (regcache, regno, buf);
4700
0bfdf32f 4701 pid = lwpid_of (current_thread);
1faeff08
MR
4702 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4703 {
4704 errno = 0;
4705 ptrace (PTRACE_POKEUSER, pid,
4706 /* Coerce to a uintptr_t first to avoid potential gcc warning
4707 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
4708 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4709 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
4710 if (errno != 0)
4711 {
4712 /* At this point, ESRCH should mean the process is
4713 already gone, in which case we simply ignore attempts
4714 to change its registers. See also the related
4715 comment in linux_resume_one_lwp. */
4716 if (errno == ESRCH)
4717 return;
4718
4719 if ((*the_low_target.cannot_store_register) (regno) == 0)
4720 error ("writing register %d: %s", regno, strerror (errno));
4721 }
4722 regaddr += sizeof (PTRACE_XFER_TYPE);
4723 }
4724}
4725
4726/* Fetch all registers, or just one, from the child process.
4727 If REGNO is -1, do this for all registers, skipping any that are
4728 assumed to have been retrieved by regsets_fetch_inferior_registers,
4729 unless ALL is non-zero.
4730 Otherwise, REGNO specifies which register (so we can save time). */
4731static void
3aee8918
PA
4732usr_fetch_inferior_registers (const struct regs_info *regs_info,
4733 struct regcache *regcache, int regno, int all)
1faeff08 4734{
3aee8918
PA
4735 struct usrregs_info *usr = regs_info->usrregs;
4736
1faeff08
MR
4737 if (regno == -1)
4738 {
3aee8918
PA
4739 for (regno = 0; regno < usr->num_regs; regno++)
4740 if (all || !linux_register_in_regsets (regs_info, regno))
4741 fetch_register (usr, regcache, regno);
1faeff08
MR
4742 }
4743 else
3aee8918 4744 fetch_register (usr, regcache, regno);
1faeff08
MR
4745}
4746
4747/* Store our register values back into the inferior.
4748 If REGNO is -1, do this for all registers, skipping any that are
4749 assumed to have been saved by regsets_store_inferior_registers,
4750 unless ALL is non-zero.
4751 Otherwise, REGNO specifies which register (so we can save time). */
4752static void
3aee8918
PA
4753usr_store_inferior_registers (const struct regs_info *regs_info,
4754 struct regcache *regcache, int regno, int all)
1faeff08 4755{
3aee8918
PA
4756 struct usrregs_info *usr = regs_info->usrregs;
4757
1faeff08
MR
4758 if (regno == -1)
4759 {
3aee8918
PA
4760 for (regno = 0; regno < usr->num_regs; regno++)
4761 if (all || !linux_register_in_regsets (regs_info, regno))
4762 store_register (usr, regcache, regno);
1faeff08
MR
4763 }
4764 else
3aee8918 4765 store_register (usr, regcache, regno);
1faeff08
MR
4766}
4767
4768#else /* !HAVE_LINUX_USRREGS */
4769
3aee8918
PA
4770#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4771#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
1faeff08 4772
58caa3dc 4773#endif
1faeff08
MR
4774
4775
4776void
4777linux_fetch_registers (struct regcache *regcache, int regno)
4778{
4779 int use_regsets;
4780 int all = 0;
3aee8918 4781 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
4782
4783 if (regno == -1)
4784 {
3aee8918
PA
4785 if (the_low_target.fetch_register != NULL
4786 && regs_info->usrregs != NULL)
4787 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
c14dfd32
PA
4788 (*the_low_target.fetch_register) (regcache, regno);
4789
3aee8918
PA
4790 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4791 if (regs_info->usrregs != NULL)
4792 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
4793 }
4794 else
4795 {
c14dfd32
PA
4796 if (the_low_target.fetch_register != NULL
4797 && (*the_low_target.fetch_register) (regcache, regno))
4798 return;
4799
3aee8918 4800 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 4801 if (use_regsets)
3aee8918
PA
4802 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4803 regcache);
4804 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4805 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 4806 }
58caa3dc
DJ
4807}
4808
4809void
442ea881 4810linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 4811{
1faeff08
MR
4812 int use_regsets;
4813 int all = 0;
3aee8918 4814 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
4815
4816 if (regno == -1)
4817 {
3aee8918
PA
4818 all = regsets_store_inferior_registers (regs_info->regsets_info,
4819 regcache);
4820 if (regs_info->usrregs != NULL)
4821 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
4822 }
4823 else
4824 {
3aee8918 4825 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 4826 if (use_regsets)
3aee8918
PA
4827 all = regsets_store_inferior_registers (regs_info->regsets_info,
4828 regcache);
4829 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4830 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 4831 }
58caa3dc
DJ
4832}
4833
da6d8c04 4834
da6d8c04
DJ
4835/* Copy LEN bytes from inferior's memory starting at MEMADDR
4836 to debugger memory starting at MYADDR. */
4837
c3e735a6 4838static int
f450004a 4839linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04 4840{
0bfdf32f 4841 int pid = lwpid_of (current_thread);
4934b29e
MR
4842 register PTRACE_XFER_TYPE *buffer;
4843 register CORE_ADDR addr;
4844 register int count;
4845 char filename[64];
da6d8c04 4846 register int i;
4934b29e 4847 int ret;
fd462a61 4848 int fd;
fd462a61
DJ
4849
4850 /* Try using /proc. Don't bother for one word. */
4851 if (len >= 3 * sizeof (long))
4852 {
4934b29e
MR
4853 int bytes;
4854
fd462a61
DJ
4855 /* We could keep this file open and cache it - possibly one per
4856 thread. That requires some juggling, but is even faster. */
95954743 4857 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
4858 fd = open (filename, O_RDONLY | O_LARGEFILE);
4859 if (fd == -1)
4860 goto no_proc;
4861
4862 /* If pread64 is available, use it. It's faster if the kernel
4863 supports it (only one syscall), and it's 64-bit safe even on
4864 32-bit platforms (for instance, SPARC debugging a SPARC64
4865 application). */
4866#ifdef HAVE_PREAD64
4934b29e 4867 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 4868#else
4934b29e
MR
4869 bytes = -1;
4870 if (lseek (fd, memaddr, SEEK_SET) != -1)
4871 bytes = read (fd, myaddr, len);
fd462a61 4872#endif
fd462a61
DJ
4873
4874 close (fd);
4934b29e
MR
4875 if (bytes == len)
4876 return 0;
4877
4878 /* Some data was read, we'll try to get the rest with ptrace. */
4879 if (bytes > 0)
4880 {
4881 memaddr += bytes;
4882 myaddr += bytes;
4883 len -= bytes;
4884 }
fd462a61 4885 }
da6d8c04 4886
fd462a61 4887 no_proc:
4934b29e
MR
4888 /* Round starting address down to longword boundary. */
4889 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4890 /* Round ending address up; get number of longwords that makes. */
4891 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4892 / sizeof (PTRACE_XFER_TYPE));
4893 /* Allocate buffer of that many longwords. */
4894 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4895
da6d8c04 4896 /* Read all the longwords */
4934b29e 4897 errno = 0;
da6d8c04
DJ
4898 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4899 {
14ce3065
DE
4900 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4901 about coercing an 8 byte integer to a 4 byte pointer. */
4902 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
4903 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4904 (PTRACE_TYPE_ARG4) 0);
c3e735a6 4905 if (errno)
4934b29e 4906 break;
da6d8c04 4907 }
4934b29e 4908 ret = errno;
da6d8c04
DJ
4909
4910 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
4911 if (i > 0)
4912 {
4913 i *= sizeof (PTRACE_XFER_TYPE);
4914 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4915 memcpy (myaddr,
4916 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4917 i < len ? i : len);
4918 }
c3e735a6 4919
4934b29e 4920 return ret;
da6d8c04
DJ
4921}
4922
93ae6fdc
PA
4923/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4924 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 4925 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 4926
ce3a066d 4927static int
f450004a 4928linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
4929{
4930 register int i;
4931 /* Round starting address down to longword boundary. */
4932 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4933 /* Round ending address up; get number of longwords that makes. */
4934 register int count
493e2a69
MS
4935 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4936 / sizeof (PTRACE_XFER_TYPE);
4937
da6d8c04 4938 /* Allocate buffer of that many longwords. */
493e2a69
MS
4939 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4940 alloca (count * sizeof (PTRACE_XFER_TYPE));
4941
0bfdf32f 4942 int pid = lwpid_of (current_thread);
da6d8c04 4943
f0ae6fc3
PA
4944 if (len == 0)
4945 {
4946 /* Zero length write always succeeds. */
4947 return 0;
4948 }
4949
0d62e5e8
DJ
4950 if (debug_threads)
4951 {
58d6951d
DJ
4952 /* Dump up to four bytes. */
4953 unsigned int val = * (unsigned int *) myaddr;
4954 if (len == 1)
4955 val = val & 0xff;
4956 else if (len == 2)
4957 val = val & 0xffff;
4958 else if (len == 3)
4959 val = val & 0xffffff;
87ce2a04
DE
4960 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4961 val, (long)memaddr);
0d62e5e8
DJ
4962 }
4963
da6d8c04
DJ
4964 /* Fill start and end extra bytes of buffer with existing memory data. */
4965
93ae6fdc 4966 errno = 0;
14ce3065
DE
4967 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4968 about coercing an 8 byte integer to a 4 byte pointer. */
4969 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
4970 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4971 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
4972 if (errno)
4973 return errno;
da6d8c04
DJ
4974
4975 if (count > 1)
4976 {
93ae6fdc 4977 errno = 0;
da6d8c04 4978 buffer[count - 1]
95954743 4979 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
4980 /* Coerce to a uintptr_t first to avoid potential gcc warning
4981 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4982 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 4983 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 4984 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
4985 if (errno)
4986 return errno;
da6d8c04
DJ
4987 }
4988
93ae6fdc 4989 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 4990
493e2a69
MS
4991 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4992 myaddr, len);
da6d8c04
DJ
4993
4994 /* Write the entire buffer. */
4995
4996 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4997 {
4998 errno = 0;
14ce3065
DE
4999 ptrace (PTRACE_POKETEXT, pid,
5000 /* Coerce to a uintptr_t first to avoid potential gcc warning
5001 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5002 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5003 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5004 if (errno)
5005 return errno;
5006 }
5007
5008 return 0;
5009}
2f2893d9
DJ
5010
5011static void
5012linux_look_up_symbols (void)
5013{
0d62e5e8 5014#ifdef USE_THREAD_DB
95954743
PA
5015 struct process_info *proc = current_process ();
5016
fe978cb0 5017 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5018 return;
5019
96d7229d
LM
5020 /* If the kernel supports tracing clones, then we don't need to
5021 use the magic thread event breakpoint to learn about
5022 threads. */
5023 thread_db_init (!linux_supports_traceclone ());
0d62e5e8
DJ
5024#endif
5025}
5026
e5379b03 5027static void
ef57601b 5028linux_request_interrupt (void)
e5379b03 5029{
a1928bad 5030 extern unsigned long signal_pid;
e5379b03 5031
78708b7c
PA
5032 /* Send a SIGINT to the process group. This acts just like the user
5033 typed a ^C on the controlling terminal. */
5034 kill (-signal_pid, SIGINT);
e5379b03
DJ
5035}
5036
aa691b87
RM
5037/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5038 to debugger memory starting at MYADDR. */
5039
5040static int
f450004a 5041linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
5042{
5043 char filename[PATH_MAX];
5044 int fd, n;
0bfdf32f 5045 int pid = lwpid_of (current_thread);
aa691b87 5046
6cebaf6e 5047 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5048
5049 fd = open (filename, O_RDONLY);
5050 if (fd < 0)
5051 return -1;
5052
5053 if (offset != (CORE_ADDR) 0
5054 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5055 n = -1;
5056 else
5057 n = read (fd, myaddr, len);
5058
5059 close (fd);
5060
5061 return n;
5062}
5063
d993e290
PA
5064/* These breakpoint and watchpoint related wrapper functions simply
5065 pass on the function call if the target has registered a
5066 corresponding function. */
e013ee27
OF
5067
5068static int
802e8e6d
PA
5069linux_supports_z_point_type (char z_type)
5070{
5071 return (the_low_target.supports_z_point_type != NULL
5072 && the_low_target.supports_z_point_type (z_type));
5073}
5074
5075static int
5076linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5077 int size, struct raw_breakpoint *bp)
e013ee27 5078{
d993e290 5079 if (the_low_target.insert_point != NULL)
802e8e6d 5080 return the_low_target.insert_point (type, addr, size, bp);
e013ee27
OF
5081 else
5082 /* Unsupported (see target.h). */
5083 return 1;
5084}
5085
5086static int
802e8e6d
PA
5087linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5088 int size, struct raw_breakpoint *bp)
e013ee27 5089{
d993e290 5090 if (the_low_target.remove_point != NULL)
802e8e6d 5091 return the_low_target.remove_point (type, addr, size, bp);
e013ee27
OF
5092 else
5093 /* Unsupported (see target.h). */
5094 return 1;
5095}
5096
3e572f71
PA
5097/* Implement the to_stopped_by_sw_breakpoint target_ops
5098 method. */
5099
5100static int
5101linux_stopped_by_sw_breakpoint (void)
5102{
5103 struct lwp_info *lwp = get_thread_lwp (current_thread);
5104
5105 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5106}
5107
5108/* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5109 method. */
5110
5111static int
5112linux_supports_stopped_by_sw_breakpoint (void)
5113{
5114 return USE_SIGTRAP_SIGINFO;
5115}
5116
5117/* Implement the to_stopped_by_hw_breakpoint target_ops
5118 method. */
5119
5120static int
5121linux_stopped_by_hw_breakpoint (void)
5122{
5123 struct lwp_info *lwp = get_thread_lwp (current_thread);
5124
5125 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5126}
5127
5128/* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5129 method. */
5130
5131static int
5132linux_supports_stopped_by_hw_breakpoint (void)
5133{
5134 return USE_SIGTRAP_SIGINFO;
5135}
5136
e013ee27
OF
5137static int
5138linux_stopped_by_watchpoint (void)
5139{
0bfdf32f 5140 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5141
15c66dd6 5142 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5143}
5144
5145static CORE_ADDR
5146linux_stopped_data_address (void)
5147{
0bfdf32f 5148 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5149
5150 return lwp->stopped_data_address;
e013ee27
OF
5151}
5152
db0dfaa0
LM
5153#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5154 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5155 && defined(PT_TEXT_END_ADDR)
5156
5157/* This is only used for targets that define PT_TEXT_ADDR,
5158 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5159 the target has different ways of acquiring this information, like
5160 loadmaps. */
52fb6437
NS
5161
5162/* Under uClinux, programs are loaded at non-zero offsets, which we need
5163 to tell gdb about. */
5164
5165static int
5166linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5167{
52fb6437 5168 unsigned long text, text_end, data;
0bfdf32f 5169 int pid = lwpid_of (get_thread_lwp (current_thread));
52fb6437
NS
5170
5171 errno = 0;
5172
b8e1b30e
LM
5173 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5174 (PTRACE_TYPE_ARG4) 0);
5175 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5176 (PTRACE_TYPE_ARG4) 0);
5177 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5178 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5179
5180 if (errno == 0)
5181 {
5182 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5183 used by gdb) are relative to the beginning of the program,
5184 with the data segment immediately following the text segment.
5185 However, the actual runtime layout in memory may put the data
5186 somewhere else, so when we send gdb a data base-address, we
5187 use the real data base address and subtract the compile-time
5188 data base-address from it (which is just the length of the
5189 text segment). BSS immediately follows data in both
5190 cases. */
52fb6437
NS
5191 *text_p = text;
5192 *data_p = data - (text_end - text);
1b3f6016 5193
52fb6437
NS
5194 return 1;
5195 }
52fb6437
NS
5196 return 0;
5197}
5198#endif
5199
07e059b5
VP
5200static int
5201linux_qxfer_osdata (const char *annex,
1b3f6016
PA
5202 unsigned char *readbuf, unsigned const char *writebuf,
5203 CORE_ADDR offset, int len)
07e059b5 5204{
d26e3629 5205 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5206}
5207
d0722149
DE
5208/* Convert a native/host siginfo object, into/from the siginfo in the
5209 layout of the inferiors' architecture. */
5210
5211static void
a5362b9a 5212siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
d0722149
DE
5213{
5214 int done = 0;
5215
5216 if (the_low_target.siginfo_fixup != NULL)
5217 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5218
5219 /* If there was no callback, or the callback didn't do anything,
5220 then just do a straight memcpy. */
5221 if (!done)
5222 {
5223 if (direction == 1)
a5362b9a 5224 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5225 else
a5362b9a 5226 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5227 }
5228}
5229
4aa995e1
PA
5230static int
5231linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5232 unsigned const char *writebuf, CORE_ADDR offset, int len)
5233{
d0722149 5234 int pid;
a5362b9a
TS
5235 siginfo_t siginfo;
5236 char inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5237
0bfdf32f 5238 if (current_thread == NULL)
4aa995e1
PA
5239 return -1;
5240
0bfdf32f 5241 pid = lwpid_of (current_thread);
4aa995e1
PA
5242
5243 if (debug_threads)
87ce2a04
DE
5244 debug_printf ("%s siginfo for lwp %d.\n",
5245 readbuf != NULL ? "Reading" : "Writing",
5246 pid);
4aa995e1 5247
0adea5f7 5248 if (offset >= sizeof (siginfo))
4aa995e1
PA
5249 return -1;
5250
b8e1b30e 5251 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5252 return -1;
5253
d0722149
DE
5254 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5255 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5256 inferior with a 64-bit GDBSERVER should look the same as debugging it
5257 with a 32-bit GDBSERVER, we need to convert it. */
5258 siginfo_fixup (&siginfo, inf_siginfo, 0);
5259
4aa995e1
PA
5260 if (offset + len > sizeof (siginfo))
5261 len = sizeof (siginfo) - offset;
5262
5263 if (readbuf != NULL)
d0722149 5264 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5265 else
5266 {
d0722149
DE
5267 memcpy (inf_siginfo + offset, writebuf, len);
5268
5269 /* Convert back to ptrace layout before flushing it out. */
5270 siginfo_fixup (&siginfo, inf_siginfo, 1);
5271
b8e1b30e 5272 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5273 return -1;
5274 }
5275
5276 return len;
5277}
5278
bd99dc85
PA
5279/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5280 so we notice when children change state; as the handler for the
5281 sigsuspend in my_waitpid. */
5282
5283static void
5284sigchld_handler (int signo)
5285{
5286 int old_errno = errno;
5287
5288 if (debug_threads)
e581f2b4
PA
5289 {
5290 do
5291 {
5292 /* fprintf is not async-signal-safe, so call write
5293 directly. */
5294 if (write (2, "sigchld_handler\n",
5295 sizeof ("sigchld_handler\n") - 1) < 0)
5296 break; /* just ignore */
5297 } while (0);
5298 }
bd99dc85
PA
5299
5300 if (target_is_async_p ())
5301 async_file_mark (); /* trigger a linux_wait */
5302
5303 errno = old_errno;
5304}
5305
5306static int
5307linux_supports_non_stop (void)
5308{
5309 return 1;
5310}
5311
5312static int
5313linux_async (int enable)
5314{
7089dca4 5315 int previous = target_is_async_p ();
bd99dc85 5316
8336d594 5317 if (debug_threads)
87ce2a04
DE
5318 debug_printf ("linux_async (%d), previous=%d\n",
5319 enable, previous);
8336d594 5320
bd99dc85
PA
5321 if (previous != enable)
5322 {
5323 sigset_t mask;
5324 sigemptyset (&mask);
5325 sigaddset (&mask, SIGCHLD);
5326
5327 sigprocmask (SIG_BLOCK, &mask, NULL);
5328
5329 if (enable)
5330 {
5331 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
5332 {
5333 linux_event_pipe[0] = -1;
5334 linux_event_pipe[1] = -1;
5335 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5336
5337 warning ("creating event pipe failed.");
5338 return previous;
5339 }
bd99dc85
PA
5340
5341 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5342 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5343
5344 /* Register the event loop handler. */
5345 add_file_handler (linux_event_pipe[0],
5346 handle_target_event, NULL);
5347
5348 /* Always trigger a linux_wait. */
5349 async_file_mark ();
5350 }
5351 else
5352 {
5353 delete_file_handler (linux_event_pipe[0]);
5354
5355 close (linux_event_pipe[0]);
5356 close (linux_event_pipe[1]);
5357 linux_event_pipe[0] = -1;
5358 linux_event_pipe[1] = -1;
5359 }
5360
5361 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5362 }
5363
5364 return previous;
5365}
5366
5367static int
5368linux_start_non_stop (int nonstop)
5369{
5370 /* Register or unregister from event-loop accordingly. */
5371 linux_async (nonstop);
aa96c426
GB
5372
5373 if (target_is_async_p () != (nonstop != 0))
5374 return -1;
5375
bd99dc85
PA
5376 return 0;
5377}
5378
cf8fd78b
PA
5379static int
5380linux_supports_multi_process (void)
5381{
5382 return 1;
5383}
5384
03583c20
UW
5385static int
5386linux_supports_disable_randomization (void)
5387{
5388#ifdef HAVE_PERSONALITY
5389 return 1;
5390#else
5391 return 0;
5392#endif
5393}
efcbbd14 5394
d1feda86
YQ
5395static int
5396linux_supports_agent (void)
5397{
5398 return 1;
5399}
5400
c2d6af84
PA
5401static int
5402linux_supports_range_stepping (void)
5403{
5404 if (*the_low_target.supports_range_stepping == NULL)
5405 return 0;
5406
5407 return (*the_low_target.supports_range_stepping) ();
5408}
5409
efcbbd14
UW
5410/* Enumerate spufs IDs for process PID. */
5411static int
5412spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5413{
5414 int pos = 0;
5415 int written = 0;
5416 char path[128];
5417 DIR *dir;
5418 struct dirent *entry;
5419
5420 sprintf (path, "/proc/%ld/fd", pid);
5421 dir = opendir (path);
5422 if (!dir)
5423 return -1;
5424
5425 rewinddir (dir);
5426 while ((entry = readdir (dir)) != NULL)
5427 {
5428 struct stat st;
5429 struct statfs stfs;
5430 int fd;
5431
5432 fd = atoi (entry->d_name);
5433 if (!fd)
5434 continue;
5435
5436 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5437 if (stat (path, &st) != 0)
5438 continue;
5439 if (!S_ISDIR (st.st_mode))
5440 continue;
5441
5442 if (statfs (path, &stfs) != 0)
5443 continue;
5444 if (stfs.f_type != SPUFS_MAGIC)
5445 continue;
5446
5447 if (pos >= offset && pos + 4 <= offset + len)
5448 {
5449 *(unsigned int *)(buf + pos - offset) = fd;
5450 written += 4;
5451 }
5452 pos += 4;
5453 }
5454
5455 closedir (dir);
5456 return written;
5457}
5458
5459/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5460 object type, using the /proc file system. */
5461static int
5462linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5463 unsigned const char *writebuf,
5464 CORE_ADDR offset, int len)
5465{
0bfdf32f 5466 long pid = lwpid_of (current_thread);
efcbbd14
UW
5467 char buf[128];
5468 int fd = 0;
5469 int ret = 0;
5470
5471 if (!writebuf && !readbuf)
5472 return -1;
5473
5474 if (!*annex)
5475 {
5476 if (!readbuf)
5477 return -1;
5478 else
5479 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5480 }
5481
5482 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5483 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5484 if (fd <= 0)
5485 return -1;
5486
5487 if (offset != 0
5488 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5489 {
5490 close (fd);
5491 return 0;
5492 }
5493
5494 if (writebuf)
5495 ret = write (fd, writebuf, (size_t) len);
5496 else
5497 ret = read (fd, readbuf, (size_t) len);
5498
5499 close (fd);
5500 return ret;
5501}
5502
723b724b 5503#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
5504struct target_loadseg
5505{
5506 /* Core address to which the segment is mapped. */
5507 Elf32_Addr addr;
5508 /* VMA recorded in the program header. */
5509 Elf32_Addr p_vaddr;
5510 /* Size of this segment in memory. */
5511 Elf32_Word p_memsz;
5512};
5513
723b724b 5514# if defined PT_GETDSBT
78d85199
YQ
5515struct target_loadmap
5516{
5517 /* Protocol version number, must be zero. */
5518 Elf32_Word version;
5519 /* Pointer to the DSBT table, its size, and the DSBT index. */
5520 unsigned *dsbt_table;
5521 unsigned dsbt_size, dsbt_index;
5522 /* Number of segments in this map. */
5523 Elf32_Word nsegs;
5524 /* The actual memory map. */
5525 struct target_loadseg segs[/*nsegs*/];
5526};
723b724b
MF
5527# define LINUX_LOADMAP PT_GETDSBT
5528# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5529# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5530# else
5531struct target_loadmap
5532{
5533 /* Protocol version number, must be zero. */
5534 Elf32_Half version;
5535 /* Number of segments in this map. */
5536 Elf32_Half nsegs;
5537 /* The actual memory map. */
5538 struct target_loadseg segs[/*nsegs*/];
5539};
5540# define LINUX_LOADMAP PTRACE_GETFDPIC
5541# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5542# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5543# endif
78d85199 5544
78d85199
YQ
5545static int
5546linux_read_loadmap (const char *annex, CORE_ADDR offset,
5547 unsigned char *myaddr, unsigned int len)
5548{
0bfdf32f 5549 int pid = lwpid_of (current_thread);
78d85199
YQ
5550 int addr = -1;
5551 struct target_loadmap *data = NULL;
5552 unsigned int actual_length, copy_length;
5553
5554 if (strcmp (annex, "exec") == 0)
723b724b 5555 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 5556 else if (strcmp (annex, "interp") == 0)
723b724b 5557 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
5558 else
5559 return -1;
5560
723b724b 5561 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
5562 return -1;
5563
5564 if (data == NULL)
5565 return -1;
5566
5567 actual_length = sizeof (struct target_loadmap)
5568 + sizeof (struct target_loadseg) * data->nsegs;
5569
5570 if (offset < 0 || offset > actual_length)
5571 return -1;
5572
5573 copy_length = actual_length - offset < len ? actual_length - offset : len;
5574 memcpy (myaddr, (char *) data + offset, copy_length);
5575 return copy_length;
5576}
723b724b
MF
5577#else
5578# define linux_read_loadmap NULL
5579#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 5580
1570b33e
L
5581static void
5582linux_process_qsupported (const char *query)
5583{
5584 if (the_low_target.process_qsupported != NULL)
5585 the_low_target.process_qsupported (query);
5586}
5587
219f2f23
PA
5588static int
5589linux_supports_tracepoints (void)
5590{
5591 if (*the_low_target.supports_tracepoints == NULL)
5592 return 0;
5593
5594 return (*the_low_target.supports_tracepoints) ();
5595}
5596
5597static CORE_ADDR
5598linux_read_pc (struct regcache *regcache)
5599{
5600 if (the_low_target.get_pc == NULL)
5601 return 0;
5602
5603 return (*the_low_target.get_pc) (regcache);
5604}
5605
5606static void
5607linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5608{
5609 gdb_assert (the_low_target.set_pc != NULL);
5610
5611 (*the_low_target.set_pc) (regcache, pc);
5612}
5613
8336d594
PA
5614static int
5615linux_thread_stopped (struct thread_info *thread)
5616{
5617 return get_thread_lwp (thread)->stopped;
5618}
5619
5620/* This exposes stop-all-threads functionality to other modules. */
5621
5622static void
7984d532 5623linux_pause_all (int freeze)
8336d594 5624{
7984d532
PA
5625 stop_all_lwps (freeze, NULL);
5626}
5627
5628/* This exposes unstop-all-threads functionality to other gdbserver
5629 modules. */
5630
5631static void
5632linux_unpause_all (int unfreeze)
5633{
5634 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
5635}
5636
90d74c30
PA
5637static int
5638linux_prepare_to_access_memory (void)
5639{
5640 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5641 running LWP. */
5642 if (non_stop)
5643 linux_pause_all (1);
5644 return 0;
5645}
5646
5647static void
0146f85b 5648linux_done_accessing_memory (void)
90d74c30
PA
5649{
5650 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5651 running LWP. */
5652 if (non_stop)
5653 linux_unpause_all (1);
5654}
5655
fa593d66
PA
5656static int
5657linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5658 CORE_ADDR collector,
5659 CORE_ADDR lockaddr,
5660 ULONGEST orig_size,
5661 CORE_ADDR *jump_entry,
405f8e94
SS
5662 CORE_ADDR *trampoline,
5663 ULONGEST *trampoline_size,
fa593d66
PA
5664 unsigned char *jjump_pad_insn,
5665 ULONGEST *jjump_pad_insn_size,
5666 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
5667 CORE_ADDR *adjusted_insn_addr_end,
5668 char *err)
fa593d66
PA
5669{
5670 return (*the_low_target.install_fast_tracepoint_jump_pad)
5671 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
5672 jump_entry, trampoline, trampoline_size,
5673 jjump_pad_insn, jjump_pad_insn_size,
5674 adjusted_insn_addr, adjusted_insn_addr_end,
5675 err);
fa593d66
PA
5676}
5677
6a271cae
PA
5678static struct emit_ops *
5679linux_emit_ops (void)
5680{
5681 if (the_low_target.emit_ops != NULL)
5682 return (*the_low_target.emit_ops) ();
5683 else
5684 return NULL;
5685}
5686
405f8e94
SS
5687static int
5688linux_get_min_fast_tracepoint_insn_len (void)
5689{
5690 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5691}
5692
2268b414
JK
5693/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5694
5695static int
5696get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5697 CORE_ADDR *phdr_memaddr, int *num_phdr)
5698{
5699 char filename[PATH_MAX];
5700 int fd;
5701 const int auxv_size = is_elf64
5702 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5703 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5704
5705 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5706
5707 fd = open (filename, O_RDONLY);
5708 if (fd < 0)
5709 return 1;
5710
5711 *phdr_memaddr = 0;
5712 *num_phdr = 0;
5713 while (read (fd, buf, auxv_size) == auxv_size
5714 && (*phdr_memaddr == 0 || *num_phdr == 0))
5715 {
5716 if (is_elf64)
5717 {
5718 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5719
5720 switch (aux->a_type)
5721 {
5722 case AT_PHDR:
5723 *phdr_memaddr = aux->a_un.a_val;
5724 break;
5725 case AT_PHNUM:
5726 *num_phdr = aux->a_un.a_val;
5727 break;
5728 }
5729 }
5730 else
5731 {
5732 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5733
5734 switch (aux->a_type)
5735 {
5736 case AT_PHDR:
5737 *phdr_memaddr = aux->a_un.a_val;
5738 break;
5739 case AT_PHNUM:
5740 *num_phdr = aux->a_un.a_val;
5741 break;
5742 }
5743 }
5744 }
5745
5746 close (fd);
5747
5748 if (*phdr_memaddr == 0 || *num_phdr == 0)
5749 {
5750 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5751 "phdr_memaddr = %ld, phdr_num = %d",
5752 (long) *phdr_memaddr, *num_phdr);
5753 return 2;
5754 }
5755
5756 return 0;
5757}
5758
5759/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5760
5761static CORE_ADDR
5762get_dynamic (const int pid, const int is_elf64)
5763{
5764 CORE_ADDR phdr_memaddr, relocation;
5765 int num_phdr, i;
5766 unsigned char *phdr_buf;
5767 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5768
5769 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5770 return 0;
5771
5772 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5773 phdr_buf = alloca (num_phdr * phdr_size);
5774
5775 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5776 return 0;
5777
5778 /* Compute relocation: it is expected to be 0 for "regular" executables,
5779 non-zero for PIE ones. */
5780 relocation = -1;
5781 for (i = 0; relocation == -1 && i < num_phdr; i++)
5782 if (is_elf64)
5783 {
5784 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5785
5786 if (p->p_type == PT_PHDR)
5787 relocation = phdr_memaddr - p->p_vaddr;
5788 }
5789 else
5790 {
5791 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5792
5793 if (p->p_type == PT_PHDR)
5794 relocation = phdr_memaddr - p->p_vaddr;
5795 }
5796
5797 if (relocation == -1)
5798 {
e237a7e2
JK
5799 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5800 any real world executables, including PIE executables, have always
5801 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5802 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5803 or present DT_DEBUG anyway (fpc binaries are statically linked).
5804
5805 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5806
5807 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5808
2268b414
JK
5809 return 0;
5810 }
5811
5812 for (i = 0; i < num_phdr; i++)
5813 {
5814 if (is_elf64)
5815 {
5816 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5817
5818 if (p->p_type == PT_DYNAMIC)
5819 return p->p_vaddr + relocation;
5820 }
5821 else
5822 {
5823 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5824
5825 if (p->p_type == PT_DYNAMIC)
5826 return p->p_vaddr + relocation;
5827 }
5828 }
5829
5830 return 0;
5831}
5832
5833/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
5834 can be 0 if the inferior does not yet have the library list initialized.
5835 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5836 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
5837
5838static CORE_ADDR
5839get_r_debug (const int pid, const int is_elf64)
5840{
5841 CORE_ADDR dynamic_memaddr;
5842 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5843 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 5844 CORE_ADDR map = -1;
2268b414
JK
5845
5846 dynamic_memaddr = get_dynamic (pid, is_elf64);
5847 if (dynamic_memaddr == 0)
367ba2c2 5848 return map;
2268b414
JK
5849
5850 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5851 {
5852 if (is_elf64)
5853 {
5854 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
75f62ce7 5855#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
5856 union
5857 {
5858 Elf64_Xword map;
5859 unsigned char buf[sizeof (Elf64_Xword)];
5860 }
5861 rld_map;
5862
5863 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5864 {
5865 if (linux_read_memory (dyn->d_un.d_val,
5866 rld_map.buf, sizeof (rld_map.buf)) == 0)
5867 return rld_map.map;
5868 else
5869 break;
5870 }
75f62ce7 5871#endif /* DT_MIPS_RLD_MAP */
2268b414 5872
367ba2c2
MR
5873 if (dyn->d_tag == DT_DEBUG && map == -1)
5874 map = dyn->d_un.d_val;
2268b414
JK
5875
5876 if (dyn->d_tag == DT_NULL)
5877 break;
5878 }
5879 else
5880 {
5881 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
75f62ce7 5882#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
5883 union
5884 {
5885 Elf32_Word map;
5886 unsigned char buf[sizeof (Elf32_Word)];
5887 }
5888 rld_map;
5889
5890 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5891 {
5892 if (linux_read_memory (dyn->d_un.d_val,
5893 rld_map.buf, sizeof (rld_map.buf)) == 0)
5894 return rld_map.map;
5895 else
5896 break;
5897 }
75f62ce7 5898#endif /* DT_MIPS_RLD_MAP */
2268b414 5899
367ba2c2
MR
5900 if (dyn->d_tag == DT_DEBUG && map == -1)
5901 map = dyn->d_un.d_val;
2268b414
JK
5902
5903 if (dyn->d_tag == DT_NULL)
5904 break;
5905 }
5906
5907 dynamic_memaddr += dyn_size;
5908 }
5909
367ba2c2 5910 return map;
2268b414
JK
5911}
5912
5913/* Read one pointer from MEMADDR in the inferior. */
5914
5915static int
5916read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5917{
485f1ee4
PA
5918 int ret;
5919
5920 /* Go through a union so this works on either big or little endian
5921 hosts, when the inferior's pointer size is smaller than the size
5922 of CORE_ADDR. It is assumed the inferior's endianness is the
5923 same of the superior's. */
5924 union
5925 {
5926 CORE_ADDR core_addr;
5927 unsigned int ui;
5928 unsigned char uc;
5929 } addr;
5930
5931 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5932 if (ret == 0)
5933 {
5934 if (ptr_size == sizeof (CORE_ADDR))
5935 *ptr = addr.core_addr;
5936 else if (ptr_size == sizeof (unsigned int))
5937 *ptr = addr.ui;
5938 else
5939 gdb_assert_not_reached ("unhandled pointer size");
5940 }
5941 return ret;
2268b414
JK
5942}
5943
5944struct link_map_offsets
5945 {
5946 /* Offset and size of r_debug.r_version. */
5947 int r_version_offset;
5948
5949 /* Offset and size of r_debug.r_map. */
5950 int r_map_offset;
5951
5952 /* Offset to l_addr field in struct link_map. */
5953 int l_addr_offset;
5954
5955 /* Offset to l_name field in struct link_map. */
5956 int l_name_offset;
5957
5958 /* Offset to l_ld field in struct link_map. */
5959 int l_ld_offset;
5960
5961 /* Offset to l_next field in struct link_map. */
5962 int l_next_offset;
5963
5964 /* Offset to l_prev field in struct link_map. */
5965 int l_prev_offset;
5966 };
5967
fb723180 5968/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
5969
5970static int
5971linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5972 unsigned const char *writebuf,
5973 CORE_ADDR offset, int len)
5974{
5975 char *document;
5976 unsigned document_len;
fe978cb0 5977 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
5978 char filename[PATH_MAX];
5979 int pid, is_elf64;
5980
5981 static const struct link_map_offsets lmo_32bit_offsets =
5982 {
5983 0, /* r_version offset. */
5984 4, /* r_debug.r_map offset. */
5985 0, /* l_addr offset in link_map. */
5986 4, /* l_name offset in link_map. */
5987 8, /* l_ld offset in link_map. */
5988 12, /* l_next offset in link_map. */
5989 16 /* l_prev offset in link_map. */
5990 };
5991
5992 static const struct link_map_offsets lmo_64bit_offsets =
5993 {
5994 0, /* r_version offset. */
5995 8, /* r_debug.r_map offset. */
5996 0, /* l_addr offset in link_map. */
5997 8, /* l_name offset in link_map. */
5998 16, /* l_ld offset in link_map. */
5999 24, /* l_next offset in link_map. */
6000 32 /* l_prev offset in link_map. */
6001 };
6002 const struct link_map_offsets *lmo;
214d508e 6003 unsigned int machine;
b1fbec62
GB
6004 int ptr_size;
6005 CORE_ADDR lm_addr = 0, lm_prev = 0;
6006 int allocated = 1024;
6007 char *p;
6008 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6009 int header_done = 0;
2268b414
JK
6010
6011 if (writebuf != NULL)
6012 return -2;
6013 if (readbuf == NULL)
6014 return -1;
6015
0bfdf32f 6016 pid = lwpid_of (current_thread);
2268b414 6017 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6018 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 6019 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 6020 ptr_size = is_elf64 ? 8 : 4;
2268b414 6021
b1fbec62
GB
6022 while (annex[0] != '\0')
6023 {
6024 const char *sep;
6025 CORE_ADDR *addrp;
6026 int len;
2268b414 6027
b1fbec62
GB
6028 sep = strchr (annex, '=');
6029 if (sep == NULL)
6030 break;
0c5bf5a9 6031
b1fbec62 6032 len = sep - annex;
61012eef 6033 if (len == 5 && startswith (annex, "start"))
b1fbec62 6034 addrp = &lm_addr;
61012eef 6035 else if (len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6036 addrp = &lm_prev;
6037 else
6038 {
6039 annex = strchr (sep, ';');
6040 if (annex == NULL)
6041 break;
6042 annex++;
6043 continue;
6044 }
6045
6046 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6047 }
b1fbec62
GB
6048
6049 if (lm_addr == 0)
2268b414 6050 {
b1fbec62
GB
6051 int r_version = 0;
6052
6053 if (priv->r_debug == 0)
6054 priv->r_debug = get_r_debug (pid, is_elf64);
6055
6056 /* We failed to find DT_DEBUG. Such situation will not change
6057 for this inferior - do not retry it. Report it to GDB as
6058 E01, see for the reasons at the GDB solib-svr4.c side. */
6059 if (priv->r_debug == (CORE_ADDR) -1)
6060 return -1;
6061
6062 if (priv->r_debug != 0)
2268b414 6063 {
b1fbec62
GB
6064 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6065 (unsigned char *) &r_version,
6066 sizeof (r_version)) != 0
6067 || r_version != 1)
6068 {
6069 warning ("unexpected r_debug version %d", r_version);
6070 }
6071 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6072 &lm_addr, ptr_size) != 0)
6073 {
6074 warning ("unable to read r_map from 0x%lx",
6075 (long) priv->r_debug + lmo->r_map_offset);
6076 }
2268b414 6077 }
b1fbec62 6078 }
2268b414 6079
b1fbec62
GB
6080 document = xmalloc (allocated);
6081 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6082 p = document + strlen (document);
6083
6084 while (lm_addr
6085 && read_one_ptr (lm_addr + lmo->l_name_offset,
6086 &l_name, ptr_size) == 0
6087 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6088 &l_addr, ptr_size) == 0
6089 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6090 &l_ld, ptr_size) == 0
6091 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6092 &l_prev, ptr_size) == 0
6093 && read_one_ptr (lm_addr + lmo->l_next_offset,
6094 &l_next, ptr_size) == 0)
6095 {
6096 unsigned char libname[PATH_MAX];
6097
6098 if (lm_prev != l_prev)
2268b414 6099 {
b1fbec62
GB
6100 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6101 (long) lm_prev, (long) l_prev);
6102 break;
2268b414
JK
6103 }
6104
d878444c
JK
6105 /* Ignore the first entry even if it has valid name as the first entry
6106 corresponds to the main executable. The first entry should not be
6107 skipped if the dynamic loader was loaded late by a static executable
6108 (see solib-svr4.c parameter ignore_first). But in such case the main
6109 executable does not have PT_DYNAMIC present and this function already
6110 exited above due to failed get_r_debug. */
6111 if (lm_prev == 0)
2268b414 6112 {
d878444c
JK
6113 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6114 p = p + strlen (p);
6115 }
6116 else
6117 {
6118 /* Not checking for error because reading may stop before
6119 we've got PATH_MAX worth of characters. */
6120 libname[0] = '\0';
6121 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6122 libname[sizeof (libname) - 1] = '\0';
6123 if (libname[0] != '\0')
2268b414 6124 {
d878444c
JK
6125 /* 6x the size for xml_escape_text below. */
6126 size_t len = 6 * strlen ((char *) libname);
6127 char *name;
2268b414 6128
d878444c
JK
6129 if (!header_done)
6130 {
6131 /* Terminate `<library-list-svr4'. */
6132 *p++ = '>';
6133 header_done = 1;
6134 }
2268b414 6135
d878444c
JK
6136 while (allocated < p - document + len + 200)
6137 {
6138 /* Expand to guarantee sufficient storage. */
6139 uintptr_t document_len = p - document;
2268b414 6140
d878444c
JK
6141 document = xrealloc (document, 2 * allocated);
6142 allocated *= 2;
6143 p = document + document_len;
6144 }
6145
6146 name = xml_escape_text ((char *) libname);
6147 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6148 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6149 name, (unsigned long) lm_addr,
6150 (unsigned long) l_addr, (unsigned long) l_ld);
6151 free (name);
6152 }
0afae3cf 6153 }
b1fbec62
GB
6154
6155 lm_prev = lm_addr;
6156 lm_addr = l_next;
2268b414
JK
6157 }
6158
b1fbec62
GB
6159 if (!header_done)
6160 {
6161 /* Empty list; terminate `<library-list-svr4'. */
6162 strcpy (p, "/>");
6163 }
6164 else
6165 strcpy (p, "</library-list-svr4>");
6166
2268b414
JK
6167 document_len = strlen (document);
6168 if (offset < document_len)
6169 document_len -= offset;
6170 else
6171 document_len = 0;
6172 if (len > document_len)
6173 len = document_len;
6174
6175 memcpy (readbuf, document + offset, len);
6176 xfree (document);
6177
6178 return len;
6179}
6180
9accd112
MM
6181#ifdef HAVE_LINUX_BTRACE
6182
969c39fb 6183/* See to_enable_btrace target method. */
9accd112
MM
6184
6185static struct btrace_target_info *
f4abbc16 6186linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
9accd112
MM
6187{
6188 struct btrace_target_info *tinfo;
6189
f4abbc16 6190 tinfo = linux_enable_btrace (ptid, conf);
3aee8918 6191
d68e53f4 6192 if (tinfo != NULL && tinfo->ptr_bits == 0)
3aee8918
PA
6193 {
6194 struct thread_info *thread = find_thread_ptid (ptid);
6195 struct regcache *regcache = get_thread_regcache (thread, 0);
6196
6197 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6198 }
9accd112
MM
6199
6200 return tinfo;
6201}
6202
969c39fb 6203/* See to_disable_btrace target method. */
9accd112 6204
969c39fb
MM
6205static int
6206linux_low_disable_btrace (struct btrace_target_info *tinfo)
6207{
6208 enum btrace_error err;
6209
6210 err = linux_disable_btrace (tinfo);
6211 return (err == BTRACE_ERR_NONE ? 0 : -1);
6212}
6213
6214/* See to_read_btrace target method. */
6215
6216static int
9accd112
MM
6217linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6218 int type)
6219{
734b0e4b 6220 struct btrace_data btrace;
9accd112 6221 struct btrace_block *block;
969c39fb 6222 enum btrace_error err;
9accd112
MM
6223 int i;
6224
734b0e4b
MM
6225 btrace_data_init (&btrace);
6226
969c39fb
MM
6227 err = linux_read_btrace (&btrace, tinfo, type);
6228 if (err != BTRACE_ERR_NONE)
6229 {
6230 if (err == BTRACE_ERR_OVERFLOW)
6231 buffer_grow_str0 (buffer, "E.Overflow.");
6232 else
6233 buffer_grow_str0 (buffer, "E.Generic Error.");
6234
734b0e4b 6235 btrace_data_fini (&btrace);
969c39fb
MM
6236 return -1;
6237 }
9accd112 6238
734b0e4b
MM
6239 switch (btrace.format)
6240 {
6241 case BTRACE_FORMAT_NONE:
6242 buffer_grow_str0 (buffer, "E.No Trace.");
6243 break;
6244
6245 case BTRACE_FORMAT_BTS:
6246 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6247 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 6248
734b0e4b
MM
6249 for (i = 0;
6250 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6251 i++)
6252 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6253 paddress (block->begin), paddress (block->end));
9accd112 6254
734b0e4b
MM
6255 buffer_grow_str0 (buffer, "</btrace>\n");
6256 break;
6257
6258 default:
6259 buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
9accd112 6260
734b0e4b
MM
6261 btrace_data_fini (&btrace);
6262 return -1;
6263 }
969c39fb 6264
734b0e4b 6265 btrace_data_fini (&btrace);
969c39fb 6266 return 0;
9accd112 6267}
f4abbc16
MM
6268
6269/* See to_btrace_conf target method. */
6270
6271static int
6272linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6273 struct buffer *buffer)
6274{
6275 const struct btrace_config *conf;
6276
6277 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6278 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6279
6280 conf = linux_btrace_conf (tinfo);
6281 if (conf != NULL)
6282 {
6283 switch (conf->format)
6284 {
6285 case BTRACE_FORMAT_NONE:
6286 break;
6287
6288 case BTRACE_FORMAT_BTS:
d33501a5
MM
6289 buffer_xml_printf (buffer, "<bts");
6290 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6291 buffer_xml_printf (buffer, " />\n");
f4abbc16
MM
6292 break;
6293 }
6294 }
6295
6296 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6297 return 0;
6298}
9accd112
MM
6299#endif /* HAVE_LINUX_BTRACE */
6300
7b669087
GB
6301/* See nat/linux-nat.h. */
6302
6303ptid_t
6304current_lwp_ptid (void)
6305{
6306 return ptid_of (current_thread);
6307}
6308
ce3a066d
DJ
6309static struct target_ops linux_target_ops = {
6310 linux_create_inferior,
6311 linux_attach,
6312 linux_kill,
6ad8ae5c 6313 linux_detach,
8336d594 6314 linux_mourn,
444d6139 6315 linux_join,
ce3a066d
DJ
6316 linux_thread_alive,
6317 linux_resume,
6318 linux_wait,
6319 linux_fetch_registers,
6320 linux_store_registers,
90d74c30 6321 linux_prepare_to_access_memory,
0146f85b 6322 linux_done_accessing_memory,
ce3a066d
DJ
6323 linux_read_memory,
6324 linux_write_memory,
2f2893d9 6325 linux_look_up_symbols,
ef57601b 6326 linux_request_interrupt,
aa691b87 6327 linux_read_auxv,
802e8e6d 6328 linux_supports_z_point_type,
d993e290
PA
6329 linux_insert_point,
6330 linux_remove_point,
3e572f71
PA
6331 linux_stopped_by_sw_breakpoint,
6332 linux_supports_stopped_by_sw_breakpoint,
6333 linux_stopped_by_hw_breakpoint,
6334 linux_supports_stopped_by_hw_breakpoint,
e013ee27
OF
6335 linux_stopped_by_watchpoint,
6336 linux_stopped_data_address,
db0dfaa0
LM
6337#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6338 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6339 && defined(PT_TEXT_END_ADDR)
52fb6437 6340 linux_read_offsets,
dae5f5cf
DJ
6341#else
6342 NULL,
6343#endif
6344#ifdef USE_THREAD_DB
6345 thread_db_get_tls_address,
6346#else
6347 NULL,
52fb6437 6348#endif
efcbbd14 6349 linux_qxfer_spu,
59a016f0 6350 hostio_last_error_from_errno,
07e059b5 6351 linux_qxfer_osdata,
4aa995e1 6352 linux_xfer_siginfo,
bd99dc85
PA
6353 linux_supports_non_stop,
6354 linux_async,
6355 linux_start_non_stop,
cdbfd419
PP
6356 linux_supports_multi_process,
6357#ifdef USE_THREAD_DB
dc146f7c 6358 thread_db_handle_monitor_command,
cdbfd419 6359#else
dc146f7c 6360 NULL,
cdbfd419 6361#endif
d26e3629 6362 linux_common_core_of_thread,
78d85199 6363 linux_read_loadmap,
219f2f23
PA
6364 linux_process_qsupported,
6365 linux_supports_tracepoints,
6366 linux_read_pc,
8336d594
PA
6367 linux_write_pc,
6368 linux_thread_stopped,
7984d532 6369 NULL,
711e434b 6370 linux_pause_all,
7984d532 6371 linux_unpause_all,
fa593d66 6372 linux_stabilize_threads,
6a271cae 6373 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
6374 linux_emit_ops,
6375 linux_supports_disable_randomization,
405f8e94 6376 linux_get_min_fast_tracepoint_insn_len,
2268b414 6377 linux_qxfer_libraries_svr4,
d1feda86 6378 linux_supports_agent,
9accd112
MM
6379#ifdef HAVE_LINUX_BTRACE
6380 linux_supports_btrace,
6381 linux_low_enable_btrace,
969c39fb 6382 linux_low_disable_btrace,
9accd112 6383 linux_low_read_btrace,
f4abbc16 6384 linux_low_btrace_conf,
9accd112
MM
6385#else
6386 NULL,
6387 NULL,
6388 NULL,
6389 NULL,
f4abbc16 6390 NULL,
9accd112 6391#endif
c2d6af84 6392 linux_supports_range_stepping,
ce3a066d
DJ
6393};
6394
0d62e5e8
DJ
6395static void
6396linux_init_signals ()
6397{
6398 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6399 to find what the cancel signal actually is. */
1a981360 6400#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 6401 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 6402#endif
0d62e5e8
DJ
6403}
6404
3aee8918
PA
6405#ifdef HAVE_LINUX_REGSETS
6406void
6407initialize_regsets_info (struct regsets_info *info)
6408{
6409 for (info->num_regsets = 0;
6410 info->regsets[info->num_regsets].size >= 0;
6411 info->num_regsets++)
6412 ;
3aee8918
PA
6413}
6414#endif
6415
da6d8c04
DJ
6416void
6417initialize_low (void)
6418{
bd99dc85
PA
6419 struct sigaction sigchld_action;
6420 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 6421 set_target_ops (&linux_target_ops);
611cb4a5
DJ
6422 set_breakpoint_data (the_low_target.breakpoint,
6423 the_low_target.breakpoint_len);
0d62e5e8 6424 linux_init_signals ();
aa7c7447 6425 linux_ptrace_init_warnings ();
bd99dc85
PA
6426
6427 sigchld_action.sa_handler = sigchld_handler;
6428 sigemptyset (&sigchld_action.sa_mask);
6429 sigchld_action.sa_flags = SA_RESTART;
6430 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
6431
6432 initialize_low_arch ();
da6d8c04 6433}