]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
Make linux_stop_lwp be a shared function
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
32d0add0 2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
58b4daa5 22#include "agent.h"
da6d8c04 23
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
8bdce1ff 26#include "gdb_wait.h"
da6d8c04 27#include <sys/ptrace.h>
125f8a3d
GB
28#include "nat/linux-ptrace.h"
29#include "nat/linux-procfs.h"
8cc73a39 30#include "nat/linux-personality.h"
da6d8c04
DJ
31#include <signal.h>
32#include <sys/ioctl.h>
33#include <fcntl.h>
0a30fbc4 34#include <unistd.h>
fd500816 35#include <sys/syscall.h>
f9387fc3 36#include <sched.h>
07e059b5
VP
37#include <ctype.h>
38#include <pwd.h>
39#include <sys/types.h>
40#include <dirent.h>
53ce3c39 41#include <sys/stat.h>
efcbbd14 42#include <sys/vfs.h>
1570b33e 43#include <sys/uio.h>
602e3198 44#include "filestuff.h"
c144c7a0 45#include "tracepoint.h"
533b0600 46#include "hostio.h"
957f3f49
DE
47#ifndef ELFMAG0
48/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
49 then ELFMAG0 will have been defined. If it didn't get included by
50 gdb_proc_service.h then including it will likely introduce a duplicate
51 definition of elf_fpregset_t. */
52#include <elf.h>
53#endif
efcbbd14
UW
54
55#ifndef SPUFS_MAGIC
56#define SPUFS_MAGIC 0x23c9b64e
57#endif
da6d8c04 58
03583c20
UW
59#ifdef HAVE_PERSONALITY
60# include <sys/personality.h>
61# if !HAVE_DECL_ADDR_NO_RANDOMIZE
62# define ADDR_NO_RANDOMIZE 0x0040000
63# endif
64#endif
65
fd462a61
DJ
66#ifndef O_LARGEFILE
67#define O_LARGEFILE 0
68#endif
69
ec8ebe72
DE
70#ifndef W_STOPCODE
71#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
72#endif
73
1a981360
PA
74/* This is the kernel's hard limit. Not to be confused with
75 SIGRTMIN. */
76#ifndef __SIGRTMIN
77#define __SIGRTMIN 32
78#endif
79
db0dfaa0
LM
80/* Some targets did not define these ptrace constants from the start,
81 so gdbserver defines them locally here. In the future, these may
82 be removed after they are added to asm/ptrace.h. */
83#if !(defined(PT_TEXT_ADDR) \
84 || defined(PT_DATA_ADDR) \
85 || defined(PT_TEXT_END_ADDR))
86#if defined(__mcoldfire__)
87/* These are still undefined in 3.10 kernels. */
88#define PT_TEXT_ADDR 49*4
89#define PT_DATA_ADDR 50*4
90#define PT_TEXT_END_ADDR 51*4
91/* BFIN already defines these since at least 2.6.32 kernels. */
92#elif defined(BFIN)
93#define PT_TEXT_ADDR 220
94#define PT_TEXT_END_ADDR 224
95#define PT_DATA_ADDR 228
96/* These are still undefined in 3.10 kernels. */
97#elif defined(__TMS320C6X__)
98#define PT_TEXT_ADDR (0x10000*4)
99#define PT_DATA_ADDR (0x10004*4)
100#define PT_TEXT_END_ADDR (0x10008*4)
101#endif
102#endif
103
9accd112 104#ifdef HAVE_LINUX_BTRACE
125f8a3d 105# include "nat/linux-btrace.h"
734b0e4b 106# include "btrace-common.h"
9accd112
MM
107#endif
108
8365dcf5
TJB
109#ifndef HAVE_ELF32_AUXV_T
110/* Copied from glibc's elf.h. */
111typedef struct
112{
113 uint32_t a_type; /* Entry type */
114 union
115 {
116 uint32_t a_val; /* Integer value */
117 /* We use to have pointer elements added here. We cannot do that,
118 though, since it does not work when using 32-bit definitions
119 on 64-bit platforms and vice versa. */
120 } a_un;
121} Elf32_auxv_t;
122#endif
123
124#ifndef HAVE_ELF64_AUXV_T
125/* Copied from glibc's elf.h. */
126typedef struct
127{
128 uint64_t a_type; /* Entry type */
129 union
130 {
131 uint64_t a_val; /* Integer value */
132 /* We use to have pointer elements added here. We cannot do that,
133 though, since it does not work when using 32-bit definitions
134 on 64-bit platforms and vice versa. */
135 } a_un;
136} Elf64_auxv_t;
137#endif
138
05044653
PA
139/* A list of all unknown processes which receive stop signals. Some
140 other process will presumably claim each of these as forked
141 children momentarily. */
24a09b5f 142
05044653
PA
143struct simple_pid_list
144{
145 /* The process ID. */
146 int pid;
147
148 /* The status as reported by waitpid. */
149 int status;
150
151 /* Next in chain. */
152 struct simple_pid_list *next;
153};
154struct simple_pid_list *stopped_pids;
155
156/* Trivial list manipulation functions to keep track of a list of new
157 stopped processes. */
158
159static void
160add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
161{
162 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
163
164 new_pid->pid = pid;
165 new_pid->status = status;
166 new_pid->next = *listp;
167 *listp = new_pid;
168}
169
170static int
171pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
172{
173 struct simple_pid_list **p;
174
175 for (p = listp; *p != NULL; p = &(*p)->next)
176 if ((*p)->pid == pid)
177 {
178 struct simple_pid_list *next = (*p)->next;
179
180 *statusp = (*p)->status;
181 xfree (*p);
182 *p = next;
183 return 1;
184 }
185 return 0;
186}
24a09b5f 187
bde24c0a
PA
188enum stopping_threads_kind
189 {
190 /* Not stopping threads presently. */
191 NOT_STOPPING_THREADS,
192
193 /* Stopping threads. */
194 STOPPING_THREADS,
195
196 /* Stopping and suspending threads. */
197 STOPPING_AND_SUSPENDING_THREADS
198 };
199
200/* This is set while stop_all_lwps is in effect. */
201enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
202
203/* FIXME make into a target method? */
24a09b5f 204int using_threads = 1;
24a09b5f 205
fa593d66
PA
206/* True if we're presently stabilizing threads (moving them out of
207 jump pads). */
208static int stabilizing_threads;
209
2acc282a 210static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 211 int step, int signal, siginfo_t *info);
2bd7c093 212static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
213static void stop_all_lwps (int suspend, struct lwp_info *except);
214static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
fa96cb38
PA
215static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
216 int *wstat, int options);
95954743 217static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
b3312d80 218static struct lwp_info *add_lwp (ptid_t ptid);
c35fafde 219static int linux_stopped_by_watchpoint (void);
95954743 220static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
d50171e4 221static void proceed_all_lwps (void);
d50171e4 222static int finish_step_over (struct lwp_info *lwp);
d50171e4
PA
223static int kill_lwp (unsigned long lwpid, int signo);
224
582511be
PA
225/* When the event-loop is doing a step-over, this points at the thread
226 being stepped. */
227ptid_t step_over_bkpt;
228
d50171e4
PA
229/* True if the low target can hardware single-step. Such targets
230 don't need a BREAKPOINT_REINSERT_ADDR callback. */
231
232static int
233can_hardware_single_step (void)
234{
235 return (the_low_target.breakpoint_reinsert_addr == NULL);
236}
237
238/* True if the low target supports memory breakpoints. If so, we'll
239 have a GET_PC implementation. */
240
241static int
242supports_breakpoints (void)
243{
244 return (the_low_target.get_pc != NULL);
245}
0d62e5e8 246
fa593d66
PA
247/* Returns true if this target can support fast tracepoints. This
248 does not mean that the in-process agent has been loaded in the
249 inferior. */
250
251static int
252supports_fast_tracepoints (void)
253{
254 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
255}
256
c2d6af84
PA
257/* True if LWP is stopped in its stepping range. */
258
259static int
260lwp_in_step_range (struct lwp_info *lwp)
261{
262 CORE_ADDR pc = lwp->stop_pc;
263
264 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
265}
266
0d62e5e8
DJ
267struct pending_signals
268{
269 int signal;
32ca6d61 270 siginfo_t info;
0d62e5e8
DJ
271 struct pending_signals *prev;
272};
611cb4a5 273
bd99dc85
PA
274/* The read/write ends of the pipe registered as waitable file in the
275 event loop. */
276static int linux_event_pipe[2] = { -1, -1 };
277
278/* True if we're currently in async mode. */
279#define target_is_async_p() (linux_event_pipe[0] != -1)
280
02fc4de7 281static void send_sigstop (struct lwp_info *lwp);
fa96cb38 282static void wait_for_sigstop (void);
bd99dc85 283
d0722149
DE
284/* Return non-zero if HEADER is a 64-bit ELF file. */
285
286static int
214d508e 287elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 288{
214d508e
L
289 if (header->e_ident[EI_MAG0] == ELFMAG0
290 && header->e_ident[EI_MAG1] == ELFMAG1
291 && header->e_ident[EI_MAG2] == ELFMAG2
292 && header->e_ident[EI_MAG3] == ELFMAG3)
293 {
294 *machine = header->e_machine;
295 return header->e_ident[EI_CLASS] == ELFCLASS64;
296
297 }
298 *machine = EM_NONE;
299 return -1;
d0722149
DE
300}
301
302/* Return non-zero if FILE is a 64-bit ELF file,
303 zero if the file is not a 64-bit ELF file,
304 and -1 if the file is not accessible or doesn't exist. */
305
be07f1a2 306static int
214d508e 307elf_64_file_p (const char *file, unsigned int *machine)
d0722149 308{
957f3f49 309 Elf64_Ehdr header;
d0722149
DE
310 int fd;
311
312 fd = open (file, O_RDONLY);
313 if (fd < 0)
314 return -1;
315
316 if (read (fd, &header, sizeof (header)) != sizeof (header))
317 {
318 close (fd);
319 return 0;
320 }
321 close (fd);
322
214d508e 323 return elf_64_header_p (&header, machine);
d0722149
DE
324}
325
be07f1a2
PA
326/* Accepts an integer PID; Returns true if the executable PID is
327 running is a 64-bit ELF file.. */
328
329int
214d508e 330linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 331{
d8d2a3ee 332 char file[PATH_MAX];
be07f1a2
PA
333
334 sprintf (file, "/proc/%d/exe", pid);
214d508e 335 return elf_64_file_p (file, machine);
be07f1a2
PA
336}
337
bd99dc85
PA
338static void
339delete_lwp (struct lwp_info *lwp)
340{
fa96cb38
PA
341 struct thread_info *thr = get_lwp_thread (lwp);
342
343 if (debug_threads)
344 debug_printf ("deleting %ld\n", lwpid_of (thr));
345
346 remove_thread (thr);
aa5ca48f 347 free (lwp->arch_private);
bd99dc85
PA
348 free (lwp);
349}
350
95954743
PA
351/* Add a process to the common process list, and set its private
352 data. */
353
354static struct process_info *
355linux_add_process (int pid, int attached)
356{
357 struct process_info *proc;
358
95954743 359 proc = add_process (pid, attached);
fe978cb0 360 proc->priv = xcalloc (1, sizeof (*proc->priv));
95954743 361
3aee8918 362 /* Set the arch when the first LWP stops. */
fe978cb0 363 proc->priv->new_inferior = 1;
3aee8918 364
aa5ca48f 365 if (the_low_target.new_process != NULL)
fe978cb0 366 proc->priv->arch_private = the_low_target.new_process ();
aa5ca48f 367
95954743
PA
368 return proc;
369}
370
582511be
PA
371static CORE_ADDR get_pc (struct lwp_info *lwp);
372
bd99dc85
PA
373/* Handle a GNU/Linux extended wait response. If we see a clone
374 event, we need to add the new LWP to our list (and not report the
375 trap to higher layers). */
0d62e5e8 376
24a09b5f 377static void
54a0b537 378handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f 379{
89a5711c 380 int event = linux_ptrace_get_extended_event (wstat);
d86d4aaf 381 struct thread_info *event_thr = get_lwp_thread (event_child);
54a0b537 382 struct lwp_info *new_lwp;
24a09b5f
DJ
383
384 if (event == PTRACE_EVENT_CLONE)
385 {
95954743 386 ptid_t ptid;
24a09b5f 387 unsigned long new_pid;
05044653 388 int ret, status;
24a09b5f 389
d86d4aaf 390 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 391 &new_pid);
24a09b5f
DJ
392
393 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 394 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
395 {
396 /* The new child has a pending SIGSTOP. We can't affect it until it
397 hits the SIGSTOP, but we're already attached. */
398
97438e3f 399 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
400
401 if (ret == -1)
402 perror_with_name ("waiting for new child");
403 else if (ret != new_pid)
404 warning ("wait returned unexpected PID %d", ret);
da5898ce 405 else if (!WIFSTOPPED (status))
24a09b5f
DJ
406 warning ("wait returned unexpected status 0x%x", status);
407 }
408
fa96cb38
PA
409 if (debug_threads)
410 debug_printf ("HEW: Got clone event "
411 "from LWP %ld, new child is LWP %ld\n",
412 lwpid_of (event_thr), new_pid);
413
d86d4aaf 414 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
b3312d80 415 new_lwp = add_lwp (ptid);
24a09b5f 416
e27d73f6
DE
417 /* Either we're going to immediately resume the new thread
418 or leave it stopped. linux_resume_one_lwp is a nop if it
419 thinks the thread is currently running, so set this first
420 before calling linux_resume_one_lwp. */
421 new_lwp->stopped = 1;
422
bde24c0a
PA
423 /* If we're suspending all threads, leave this one suspended
424 too. */
425 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
426 new_lwp->suspended = 1;
427
da5898ce
DJ
428 /* Normally we will get the pending SIGSTOP. But in some cases
429 we might get another signal delivered to the group first.
f21cc1a2 430 If we do get another signal, be sure not to lose it. */
20ba1ce6 431 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 432 {
54a0b537 433 new_lwp->stop_expected = 1;
20ba1ce6
PA
434 new_lwp->status_pending_p = 1;
435 new_lwp->status_pending = status;
da5898ce 436 }
24a09b5f
DJ
437 }
438}
439
d50171e4
PA
440/* Return the PC as read from the regcache of LWP, without any
441 adjustment. */
442
443static CORE_ADDR
444get_pc (struct lwp_info *lwp)
445{
0bfdf32f 446 struct thread_info *saved_thread;
d50171e4
PA
447 struct regcache *regcache;
448 CORE_ADDR pc;
449
450 if (the_low_target.get_pc == NULL)
451 return 0;
452
0bfdf32f
GB
453 saved_thread = current_thread;
454 current_thread = get_lwp_thread (lwp);
d50171e4 455
0bfdf32f 456 regcache = get_thread_regcache (current_thread, 1);
d50171e4
PA
457 pc = (*the_low_target.get_pc) (regcache);
458
459 if (debug_threads)
87ce2a04 460 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 461
0bfdf32f 462 current_thread = saved_thread;
d50171e4
PA
463 return pc;
464}
465
466/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
467 The SIGTRAP could mean several things.
468
469 On i386, where decr_pc_after_break is non-zero:
582511be
PA
470
471 If we were single-stepping this process using PTRACE_SINGLESTEP, we
472 will get only the one SIGTRAP. The value of $eip will be the next
473 instruction. If the instruction we stepped over was a breakpoint,
474 we need to decrement the PC.
475
0d62e5e8
DJ
476 If we continue the process using PTRACE_CONT, we will get a
477 SIGTRAP when we hit a breakpoint. The value of $eip will be
478 the instruction after the breakpoint (i.e. needs to be
479 decremented). If we report the SIGTRAP to GDB, we must also
582511be 480 report the undecremented PC. If the breakpoint is removed, we
0d62e5e8
DJ
481 must resume at the decremented PC.
482
582511be
PA
483 On a non-decr_pc_after_break machine with hardware or kernel
484 single-step:
485
486 If we either single-step a breakpoint instruction, or continue and
487 hit a breakpoint instruction, our PC will point at the breakpoint
0d62e5e8
DJ
488 instruction. */
489
582511be
PA
490static int
491check_stopped_by_breakpoint (struct lwp_info *lwp)
0d62e5e8 492{
582511be
PA
493 CORE_ADDR pc;
494 CORE_ADDR sw_breakpoint_pc;
495 struct thread_info *saved_thread;
3e572f71
PA
496#if USE_SIGTRAP_SIGINFO
497 siginfo_t siginfo;
498#endif
d50171e4
PA
499
500 if (the_low_target.get_pc == NULL)
501 return 0;
0d62e5e8 502
582511be
PA
503 pc = get_pc (lwp);
504 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
d50171e4 505
582511be
PA
506 /* breakpoint_at reads from the current thread. */
507 saved_thread = current_thread;
508 current_thread = get_lwp_thread (lwp);
47c0c975 509
3e572f71
PA
510#if USE_SIGTRAP_SIGINFO
511 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
512 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
513 {
514 if (siginfo.si_signo == SIGTRAP)
515 {
516 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
517 {
518 if (debug_threads)
519 {
520 struct thread_info *thr = get_lwp_thread (lwp);
521
522 debug_printf ("CSBB: Push back software breakpoint for %s\n",
523 target_pid_to_str (ptid_of (thr)));
524 }
525
526 /* Back up the PC if necessary. */
527 if (pc != sw_breakpoint_pc)
528 {
529 struct regcache *regcache
530 = get_thread_regcache (current_thread, 1);
531 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
532 }
533
534 lwp->stop_pc = sw_breakpoint_pc;
535 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
536 current_thread = saved_thread;
537 return 1;
538 }
539 else if (siginfo.si_code == TRAP_HWBKPT)
540 {
541 if (debug_threads)
542 {
543 struct thread_info *thr = get_lwp_thread (lwp);
544
545 debug_printf ("CSBB: Push back hardware "
546 "breakpoint/watchpoint for %s\n",
547 target_pid_to_str (ptid_of (thr)));
548 }
549
550 lwp->stop_pc = pc;
551 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
552 current_thread = saved_thread;
553 return 1;
554 }
555 }
556 }
557#else
582511be
PA
558 /* We may have just stepped a breakpoint instruction. E.g., in
559 non-stop mode, GDB first tells the thread A to step a range, and
560 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
561 case we need to report the breakpoint PC. */
562 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
582511be
PA
563 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
564 {
565 if (debug_threads)
566 {
567 struct thread_info *thr = get_lwp_thread (lwp);
568
569 debug_printf ("CSBB: %s stopped by software breakpoint\n",
570 target_pid_to_str (ptid_of (thr)));
571 }
572
573 /* Back up the PC if necessary. */
574 if (pc != sw_breakpoint_pc)
575 {
576 struct regcache *regcache
577 = get_thread_regcache (current_thread, 1);
578 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
579 }
580
581 lwp->stop_pc = sw_breakpoint_pc;
15c66dd6 582 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
582511be
PA
583 current_thread = saved_thread;
584 return 1;
585 }
586
587 if (hardware_breakpoint_inserted_here (pc))
588 {
589 if (debug_threads)
590 {
591 struct thread_info *thr = get_lwp_thread (lwp);
592
593 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
594 target_pid_to_str (ptid_of (thr)));
595 }
47c0c975 596
582511be 597 lwp->stop_pc = pc;
15c66dd6 598 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
582511be
PA
599 current_thread = saved_thread;
600 return 1;
601 }
3e572f71 602#endif
582511be
PA
603
604 current_thread = saved_thread;
605 return 0;
0d62e5e8 606}
ce3a066d 607
b3312d80 608static struct lwp_info *
95954743 609add_lwp (ptid_t ptid)
611cb4a5 610{
54a0b537 611 struct lwp_info *lwp;
0d62e5e8 612
54a0b537
PA
613 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
614 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 615
aa5ca48f
DE
616 if (the_low_target.new_thread != NULL)
617 lwp->arch_private = the_low_target.new_thread ();
618
f7667f0d 619 lwp->thread = add_thread (ptid, lwp);
0d62e5e8 620
54a0b537 621 return lwp;
0d62e5e8 622}
611cb4a5 623
da6d8c04
DJ
624/* Start an inferior process and returns its pid.
625 ALLARGS is a vector of program-name and args. */
626
ce3a066d
DJ
627static int
628linux_create_inferior (char *program, char **allargs)
da6d8c04 629{
a6dbe5df 630 struct lwp_info *new_lwp;
da6d8c04 631 int pid;
95954743 632 ptid_t ptid;
8cc73a39
SDJ
633 struct cleanup *restore_personality
634 = maybe_disable_address_space_randomization (disable_randomization);
03583c20 635
42c81e2a 636#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
637 pid = vfork ();
638#else
da6d8c04 639 pid = fork ();
52fb6437 640#endif
da6d8c04
DJ
641 if (pid < 0)
642 perror_with_name ("fork");
643
644 if (pid == 0)
645 {
602e3198 646 close_most_fds ();
b8e1b30e 647 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da6d8c04 648
1a981360 649#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 650 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 651#endif
0d62e5e8 652
a9fa9f7d
DJ
653 setpgid (0, 0);
654
e0f9f062
DE
655 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
656 stdout to stderr so that inferior i/o doesn't corrupt the connection.
657 Also, redirect stdin to /dev/null. */
658 if (remote_connection_is_stdio ())
659 {
660 close (0);
661 open ("/dev/null", O_RDONLY);
662 dup2 (2, 1);
3e52c33d
JK
663 if (write (2, "stdin/stdout redirected\n",
664 sizeof ("stdin/stdout redirected\n") - 1) < 0)
8c29b58e
YQ
665 {
666 /* Errors ignored. */;
667 }
e0f9f062
DE
668 }
669
2b876972
DJ
670 execv (program, allargs);
671 if (errno == ENOENT)
672 execvp (program, allargs);
da6d8c04
DJ
673
674 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 675 strerror (errno));
da6d8c04
DJ
676 fflush (stderr);
677 _exit (0177);
678 }
679
8cc73a39 680 do_cleanups (restore_personality);
03583c20 681
95954743
PA
682 linux_add_process (pid, 0);
683
684 ptid = ptid_build (pid, pid, 0);
685 new_lwp = add_lwp (ptid);
a6dbe5df 686 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 687
a9fa9f7d 688 return pid;
da6d8c04
DJ
689}
690
8784d563
PA
691/* Attach to an inferior process. Returns 0 on success, ERRNO on
692 error. */
da6d8c04 693
7ae1a6a6
PA
694int
695linux_attach_lwp (ptid_t ptid)
da6d8c04 696{
54a0b537 697 struct lwp_info *new_lwp;
7ae1a6a6 698 int lwpid = ptid_get_lwp (ptid);
611cb4a5 699
b8e1b30e 700 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 701 != 0)
7ae1a6a6 702 return errno;
24a09b5f 703
b3312d80 704 new_lwp = add_lwp (ptid);
0d62e5e8 705
a6dbe5df
PA
706 /* We need to wait for SIGSTOP before being able to make the next
707 ptrace call on this LWP. */
708 new_lwp->must_set_ptrace_flags = 1;
709
644cebc9 710 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
711 {
712 if (debug_threads)
87ce2a04 713 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
714
715 /* The process is definitely stopped. It is in a job control
716 stop, unless the kernel predates the TASK_STOPPED /
717 TASK_TRACED distinction, in which case it might be in a
718 ptrace stop. Make sure it is in a ptrace stop; from there we
719 can kill it, signal it, et cetera.
720
721 First make sure there is a pending SIGSTOP. Since we are
722 already attached, the process can not transition from stopped
723 to running without a PTRACE_CONT; so we know this signal will
724 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
725 probably already in the queue (unless this kernel is old
726 enough to use TASK_STOPPED for ptrace stops); but since
727 SIGSTOP is not an RT signal, it can only be queued once. */
728 kill_lwp (lwpid, SIGSTOP);
729
730 /* Finally, resume the stopped process. This will deliver the
731 SIGSTOP (or a higher priority signal, just like normal
732 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 733 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
734 }
735
0d62e5e8 736 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
737 brings it to a halt.
738
739 There are several cases to consider here:
740
741 1) gdbserver has already attached to the process and is being notified
1b3f6016 742 of a new thread that is being created.
d50171e4
PA
743 In this case we should ignore that SIGSTOP and resume the
744 process. This is handled below by setting stop_expected = 1,
8336d594 745 and the fact that add_thread sets last_resume_kind ==
d50171e4 746 resume_continue.
0e21c1ec
DE
747
748 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
749 to it via attach_inferior.
750 In this case we want the process thread to stop.
d50171e4
PA
751 This is handled by having linux_attach set last_resume_kind ==
752 resume_stop after we return.
e3deef73
LM
753
754 If the pid we are attaching to is also the tgid, we attach to and
755 stop all the existing threads. Otherwise, we attach to pid and
756 ignore any other threads in the same group as this pid.
0e21c1ec
DE
757
758 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
759 existing threads.
760 In this case we want the thread to stop.
761 FIXME: This case is currently not properly handled.
762 We should wait for the SIGSTOP but don't. Things work apparently
763 because enough time passes between when we ptrace (ATTACH) and when
764 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
765
766 On the other hand, if we are currently trying to stop all threads, we
767 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 768 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
769 end of the list, and so the new thread has not yet reached
770 wait_for_sigstop (but will). */
d50171e4 771 new_lwp->stop_expected = 1;
0d62e5e8 772
7ae1a6a6 773 return 0;
95954743
PA
774}
775
8784d563
PA
776/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
777 already attached. Returns true if a new LWP is found, false
778 otherwise. */
779
780static int
781attach_proc_task_lwp_callback (ptid_t ptid)
782{
783 /* Is this a new thread? */
784 if (find_thread_ptid (ptid) == NULL)
785 {
786 int lwpid = ptid_get_lwp (ptid);
787 int err;
788
789 if (debug_threads)
790 debug_printf ("Found new lwp %d\n", lwpid);
791
792 err = linux_attach_lwp (ptid);
793
794 /* Be quiet if we simply raced with the thread exiting. EPERM
795 is returned if the thread's task still exists, and is marked
796 as exited or zombie, as well as other conditions, so in that
797 case, confirm the status in /proc/PID/status. */
798 if (err == ESRCH
799 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
800 {
801 if (debug_threads)
802 {
803 debug_printf ("Cannot attach to lwp %d: "
804 "thread is gone (%d: %s)\n",
805 lwpid, err, strerror (err));
806 }
807 }
808 else if (err != 0)
809 {
810 warning (_("Cannot attach to lwp %d: %s"),
811 lwpid,
812 linux_ptrace_attach_fail_reason_string (ptid, err));
813 }
814
815 return 1;
816 }
817 return 0;
818}
819
e3deef73
LM
820/* Attach to PID. If PID is the tgid, attach to it and all
821 of its threads. */
822
c52daf70 823static int
a1928bad 824linux_attach (unsigned long pid)
0d62e5e8 825{
7ae1a6a6
PA
826 ptid_t ptid = ptid_build (pid, pid, 0);
827 int err;
828
e3deef73
LM
829 /* Attach to PID. We will check for other threads
830 soon. */
7ae1a6a6
PA
831 err = linux_attach_lwp (ptid);
832 if (err != 0)
833 error ("Cannot attach to process %ld: %s",
8784d563 834 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
7ae1a6a6 835
95954743 836 linux_add_process (pid, 1);
0d62e5e8 837
bd99dc85
PA
838 if (!non_stop)
839 {
8336d594
PA
840 struct thread_info *thread;
841
842 /* Don't ignore the initial SIGSTOP if we just attached to this
843 process. It will be collected by wait shortly. */
844 thread = find_thread_ptid (ptid_build (pid, pid, 0));
845 thread->last_resume_kind = resume_stop;
bd99dc85 846 }
0d62e5e8 847
8784d563
PA
848 /* We must attach to every LWP. If /proc is mounted, use that to
849 find them now. On the one hand, the inferior may be using raw
850 clone instead of using pthreads. On the other hand, even if it
851 is using pthreads, GDB may not be connected yet (thread_db needs
852 to do symbol lookups, through qSymbol). Also, thread_db walks
853 structures in the inferior's address space to find the list of
854 threads/LWPs, and those structures may well be corrupted. Note
855 that once thread_db is loaded, we'll still use it to list threads
856 and associate pthread info with each LWP. */
857 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
95954743
PA
858 return 0;
859}
860
861struct counter
862{
863 int pid;
864 int count;
865};
866
867static int
868second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
869{
870 struct counter *counter = args;
871
872 if (ptid_get_pid (entry->id) == counter->pid)
873 {
874 if (++counter->count > 1)
875 return 1;
876 }
d61ddec4 877
da6d8c04
DJ
878 return 0;
879}
880
95954743 881static int
fa96cb38 882last_thread_of_process_p (int pid)
95954743 883{
95954743 884 struct counter counter = { pid , 0 };
da6d8c04 885
95954743
PA
886 return (find_inferior (&all_threads,
887 second_thread_of_pid_p, &counter) == NULL);
888}
889
da84f473
PA
890/* Kill LWP. */
891
892static void
893linux_kill_one_lwp (struct lwp_info *lwp)
894{
d86d4aaf
DE
895 struct thread_info *thr = get_lwp_thread (lwp);
896 int pid = lwpid_of (thr);
da84f473
PA
897
898 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
899 there is no signal context, and ptrace(PTRACE_KILL) (or
900 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
901 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
902 alternative is to kill with SIGKILL. We only need one SIGKILL
903 per process, not one for each thread. But since we still support
904 linuxthreads, and we also support debugging programs using raw
905 clone without CLONE_THREAD, we send one for each thread. For
906 years, we used PTRACE_KILL only, so we're being a bit paranoid
907 about some old kernels where PTRACE_KILL might work better
908 (dubious if there are any such, but that's why it's paranoia), so
909 we try SIGKILL first, PTRACE_KILL second, and so we're fine
910 everywhere. */
911
912 errno = 0;
69ff6be5 913 kill_lwp (pid, SIGKILL);
da84f473 914 if (debug_threads)
ce9e3fe7
PA
915 {
916 int save_errno = errno;
917
918 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
919 target_pid_to_str (ptid_of (thr)),
920 save_errno ? strerror (save_errno) : "OK");
921 }
da84f473
PA
922
923 errno = 0;
b8e1b30e 924 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 925 if (debug_threads)
ce9e3fe7
PA
926 {
927 int save_errno = errno;
928
929 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
930 target_pid_to_str (ptid_of (thr)),
931 save_errno ? strerror (save_errno) : "OK");
932 }
da84f473
PA
933}
934
e76126e8
PA
935/* Kill LWP and wait for it to die. */
936
937static void
938kill_wait_lwp (struct lwp_info *lwp)
939{
940 struct thread_info *thr = get_lwp_thread (lwp);
941 int pid = ptid_get_pid (ptid_of (thr));
942 int lwpid = ptid_get_lwp (ptid_of (thr));
943 int wstat;
944 int res;
945
946 if (debug_threads)
947 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
948
949 do
950 {
951 linux_kill_one_lwp (lwp);
952
953 /* Make sure it died. Notes:
954
955 - The loop is most likely unnecessary.
956
957 - We don't use linux_wait_for_event as that could delete lwps
958 while we're iterating over them. We're not interested in
959 any pending status at this point, only in making sure all
960 wait status on the kernel side are collected until the
961 process is reaped.
962
963 - We don't use __WALL here as the __WALL emulation relies on
964 SIGCHLD, and killing a stopped process doesn't generate
965 one, nor an exit status.
966 */
967 res = my_waitpid (lwpid, &wstat, 0);
968 if (res == -1 && errno == ECHILD)
969 res = my_waitpid (lwpid, &wstat, __WCLONE);
970 } while (res > 0 && WIFSTOPPED (wstat));
971
972 gdb_assert (res > 0);
973}
974
da84f473
PA
975/* Callback for `find_inferior'. Kills an lwp of a given process,
976 except the leader. */
95954743
PA
977
978static int
da84f473 979kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
da6d8c04 980{
0d62e5e8 981 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 982 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
983 int pid = * (int *) args;
984
985 if (ptid_get_pid (entry->id) != pid)
986 return 0;
0d62e5e8 987
fd500816
DJ
988 /* We avoid killing the first thread here, because of a Linux kernel (at
989 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
990 the children get a chance to be reaped, it will remain a zombie
991 forever. */
95954743 992
d86d4aaf 993 if (lwpid_of (thread) == pid)
95954743
PA
994 {
995 if (debug_threads)
87ce2a04
DE
996 debug_printf ("lkop: is last of process %s\n",
997 target_pid_to_str (entry->id));
95954743
PA
998 return 0;
999 }
fd500816 1000
e76126e8 1001 kill_wait_lwp (lwp);
95954743 1002 return 0;
da6d8c04
DJ
1003}
1004
95954743
PA
1005static int
1006linux_kill (int pid)
0d62e5e8 1007{
95954743 1008 struct process_info *process;
54a0b537 1009 struct lwp_info *lwp;
fd500816 1010
95954743
PA
1011 process = find_process_pid (pid);
1012 if (process == NULL)
1013 return -1;
9d606399 1014
f9e39928
PA
1015 /* If we're killing a running inferior, make sure it is stopped
1016 first, as PTRACE_KILL will not work otherwise. */
7984d532 1017 stop_all_lwps (0, NULL);
f9e39928 1018
da84f473 1019 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
fd500816 1020
54a0b537 1021 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1022 thread in the list, so do so now. */
95954743 1023 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 1024
784867a5 1025 if (lwp == NULL)
fd500816 1026 {
784867a5 1027 if (debug_threads)
d86d4aaf
DE
1028 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1029 pid);
784867a5
JK
1030 }
1031 else
e76126e8 1032 kill_wait_lwp (lwp);
2d717e4f 1033
8336d594 1034 the_target->mourn (process);
f9e39928
PA
1035
1036 /* Since we presently can only stop all lwps of all processes, we
1037 need to unstop lwps of other processes. */
7984d532 1038 unstop_all_lwps (0, NULL);
95954743 1039 return 0;
0d62e5e8
DJ
1040}
1041
9b224c5e
PA
1042/* Get pending signal of THREAD, for detaching purposes. This is the
1043 signal the thread last stopped for, which we need to deliver to the
1044 thread when detaching, otherwise, it'd be suppressed/lost. */
1045
1046static int
1047get_detach_signal (struct thread_info *thread)
1048{
a493e3e2 1049 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1050 int status;
1051 struct lwp_info *lp = get_thread_lwp (thread);
1052
1053 if (lp->status_pending_p)
1054 status = lp->status_pending;
1055 else
1056 {
1057 /* If the thread had been suspended by gdbserver, and it stopped
1058 cleanly, then it'll have stopped with SIGSTOP. But we don't
1059 want to deliver that SIGSTOP. */
1060 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1061 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1062 return 0;
1063
1064 /* Otherwise, we may need to deliver the signal we
1065 intercepted. */
1066 status = lp->last_status;
1067 }
1068
1069 if (!WIFSTOPPED (status))
1070 {
1071 if (debug_threads)
87ce2a04 1072 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1073 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1074 return 0;
1075 }
1076
1077 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1078 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1079 {
1080 if (debug_threads)
87ce2a04
DE
1081 debug_printf ("GPS: lwp %s had stopped with extended "
1082 "status: no pending signal\n",
d86d4aaf 1083 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1084 return 0;
1085 }
1086
2ea28649 1087 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e
PA
1088
1089 if (program_signals_p && !program_signals[signo])
1090 {
1091 if (debug_threads)
87ce2a04 1092 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1093 target_pid_to_str (ptid_of (thread)),
87ce2a04 1094 gdb_signal_to_string (signo));
9b224c5e
PA
1095 return 0;
1096 }
1097 else if (!program_signals_p
1098 /* If we have no way to know which signals GDB does not
1099 want to have passed to the program, assume
1100 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1101 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1102 {
1103 if (debug_threads)
87ce2a04
DE
1104 debug_printf ("GPS: lwp %s had signal %s, "
1105 "but we don't know if we should pass it. "
1106 "Default to not.\n",
d86d4aaf 1107 target_pid_to_str (ptid_of (thread)),
87ce2a04 1108 gdb_signal_to_string (signo));
9b224c5e
PA
1109 return 0;
1110 }
1111 else
1112 {
1113 if (debug_threads)
87ce2a04 1114 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1115 target_pid_to_str (ptid_of (thread)),
87ce2a04 1116 gdb_signal_to_string (signo));
9b224c5e
PA
1117
1118 return WSTOPSIG (status);
1119 }
1120}
1121
95954743
PA
1122static int
1123linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
1124{
1125 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1126 struct lwp_info *lwp = get_thread_lwp (thread);
95954743 1127 int pid = * (int *) args;
9b224c5e 1128 int sig;
95954743
PA
1129
1130 if (ptid_get_pid (entry->id) != pid)
1131 return 0;
6ad8ae5c 1132
9b224c5e 1133 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1134 if (lwp->stop_expected)
ae13219e 1135 {
9b224c5e 1136 if (debug_threads)
87ce2a04 1137 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1138 target_pid_to_str (ptid_of (thread)));
9b224c5e 1139
d86d4aaf 1140 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1141 lwp->stop_expected = 0;
ae13219e
DJ
1142 }
1143
1144 /* Flush any pending changes to the process's registers. */
d86d4aaf 1145 regcache_invalidate_thread (thread);
ae13219e 1146
9b224c5e
PA
1147 /* Pass on any pending signal for this thread. */
1148 sig = get_detach_signal (thread);
1149
ae13219e 1150 /* Finally, let it resume. */
82bfbe7e
PA
1151 if (the_low_target.prepare_to_resume != NULL)
1152 the_low_target.prepare_to_resume (lwp);
d86d4aaf 1153 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1154 (PTRACE_TYPE_ARG4) (long) sig) < 0)
9b224c5e 1155 error (_("Can't detach %s: %s"),
d86d4aaf 1156 target_pid_to_str (ptid_of (thread)),
9b224c5e 1157 strerror (errno));
bd99dc85
PA
1158
1159 delete_lwp (lwp);
95954743 1160 return 0;
6ad8ae5c
DJ
1161}
1162
95954743
PA
1163static int
1164linux_detach (int pid)
1165{
1166 struct process_info *process;
1167
1168 process = find_process_pid (pid);
1169 if (process == NULL)
1170 return -1;
1171
f9e39928
PA
1172 /* Stop all threads before detaching. First, ptrace requires that
1173 the thread is stopped to sucessfully detach. Second, thread_db
1174 may need to uninstall thread event breakpoints from memory, which
1175 only works with a stopped process anyway. */
7984d532 1176 stop_all_lwps (0, NULL);
f9e39928 1177
ca5c370d 1178#ifdef USE_THREAD_DB
8336d594 1179 thread_db_detach (process);
ca5c370d
PA
1180#endif
1181
fa593d66
PA
1182 /* Stabilize threads (move out of jump pads). */
1183 stabilize_threads ();
1184
95954743 1185 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
1186
1187 the_target->mourn (process);
f9e39928
PA
1188
1189 /* Since we presently can only stop all lwps of all processes, we
1190 need to unstop lwps of other processes. */
7984d532 1191 unstop_all_lwps (0, NULL);
f9e39928
PA
1192 return 0;
1193}
1194
1195/* Remove all LWPs that belong to process PROC from the lwp list. */
1196
1197static int
1198delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1199{
d86d4aaf
DE
1200 struct thread_info *thread = (struct thread_info *) entry;
1201 struct lwp_info *lwp = get_thread_lwp (thread);
f9e39928
PA
1202 struct process_info *process = proc;
1203
d86d4aaf 1204 if (pid_of (thread) == pid_of (process))
f9e39928
PA
1205 delete_lwp (lwp);
1206
dd6953e1 1207 return 0;
6ad8ae5c
DJ
1208}
1209
8336d594
PA
1210static void
1211linux_mourn (struct process_info *process)
1212{
1213 struct process_info_private *priv;
1214
1215#ifdef USE_THREAD_DB
1216 thread_db_mourn (process);
1217#endif
1218
d86d4aaf 1219 find_inferior (&all_threads, delete_lwp_callback, process);
f9e39928 1220
8336d594 1221 /* Freeing all private data. */
fe978cb0 1222 priv = process->priv;
8336d594
PA
1223 free (priv->arch_private);
1224 free (priv);
fe978cb0 1225 process->priv = NULL;
505106cd
PA
1226
1227 remove_process (process);
8336d594
PA
1228}
1229
444d6139 1230static void
95954743 1231linux_join (int pid)
444d6139 1232{
444d6139
PA
1233 int status, ret;
1234
1235 do {
95954743 1236 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1237 if (WIFEXITED (status) || WIFSIGNALED (status))
1238 break;
1239 } while (ret != -1 || errno != ECHILD);
1240}
1241
6ad8ae5c 1242/* Return nonzero if the given thread is still alive. */
0d62e5e8 1243static int
95954743 1244linux_thread_alive (ptid_t ptid)
0d62e5e8 1245{
95954743
PA
1246 struct lwp_info *lwp = find_lwp_pid (ptid);
1247
1248 /* We assume we always know if a thread exits. If a whole process
1249 exited but we still haven't been able to report it to GDB, we'll
1250 hold on to the last lwp of the dead process. */
1251 if (lwp != NULL)
1252 return !lwp->dead;
0d62e5e8
DJ
1253 else
1254 return 0;
1255}
1256
582511be
PA
1257/* Return 1 if this lwp still has an interesting status pending. If
1258 not (e.g., it had stopped for a breakpoint that is gone), return
1259 false. */
1260
1261static int
1262thread_still_has_status_pending_p (struct thread_info *thread)
1263{
1264 struct lwp_info *lp = get_thread_lwp (thread);
1265
1266 if (!lp->status_pending_p)
1267 return 0;
1268
1269 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1270 report any status pending the LWP may have. */
1271 if (thread->last_resume_kind == resume_stop
1272 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1273 return 0;
1274
1275 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1276 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1277 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1278 {
1279 struct thread_info *saved_thread;
1280 CORE_ADDR pc;
1281 int discard = 0;
1282
1283 gdb_assert (lp->last_status != 0);
1284
1285 pc = get_pc (lp);
1286
1287 saved_thread = current_thread;
1288 current_thread = thread;
1289
1290 if (pc != lp->stop_pc)
1291 {
1292 if (debug_threads)
1293 debug_printf ("PC of %ld changed\n",
1294 lwpid_of (thread));
1295 discard = 1;
1296 }
3e572f71
PA
1297
1298#if !USE_SIGTRAP_SIGINFO
15c66dd6 1299 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
582511be
PA
1300 && !(*the_low_target.breakpoint_at) (pc))
1301 {
1302 if (debug_threads)
1303 debug_printf ("previous SW breakpoint of %ld gone\n",
1304 lwpid_of (thread));
1305 discard = 1;
1306 }
15c66dd6 1307 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1308 && !hardware_breakpoint_inserted_here (pc))
1309 {
1310 if (debug_threads)
1311 debug_printf ("previous HW breakpoint of %ld gone\n",
1312 lwpid_of (thread));
1313 discard = 1;
1314 }
3e572f71 1315#endif
582511be
PA
1316
1317 current_thread = saved_thread;
1318
1319 if (discard)
1320 {
1321 if (debug_threads)
1322 debug_printf ("discarding pending breakpoint status\n");
1323 lp->status_pending_p = 0;
1324 return 0;
1325 }
1326 }
1327
1328 return 1;
1329}
1330
6bf5e0ba 1331/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1332static int
d50171e4 1333status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1334{
d86d4aaf 1335 struct thread_info *thread = (struct thread_info *) entry;
582511be 1336 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1337 ptid_t ptid = * (ptid_t *) arg;
1338
1339 /* Check if we're only interested in events from a specific process
afa8d396
PA
1340 or a specific LWP. */
1341 if (!ptid_match (ptid_of (thread), ptid))
95954743 1342 return 0;
0d62e5e8 1343
582511be
PA
1344 if (lp->status_pending_p
1345 && !thread_still_has_status_pending_p (thread))
1346 {
1347 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1348 return 0;
1349 }
0d62e5e8 1350
582511be 1351 return lp->status_pending_p;
0d62e5e8
DJ
1352}
1353
95954743
PA
1354static int
1355same_lwp (struct inferior_list_entry *entry, void *data)
1356{
1357 ptid_t ptid = *(ptid_t *) data;
1358 int lwp;
1359
1360 if (ptid_get_lwp (ptid) != 0)
1361 lwp = ptid_get_lwp (ptid);
1362 else
1363 lwp = ptid_get_pid (ptid);
1364
1365 if (ptid_get_lwp (entry->id) == lwp)
1366 return 1;
1367
1368 return 0;
1369}
1370
1371struct lwp_info *
1372find_lwp_pid (ptid_t ptid)
1373{
d86d4aaf
DE
1374 struct inferior_list_entry *thread
1375 = find_inferior (&all_threads, same_lwp, &ptid);
1376
1377 if (thread == NULL)
1378 return NULL;
1379
1380 return get_thread_lwp ((struct thread_info *) thread);
95954743
PA
1381}
1382
fa96cb38 1383/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1384
fa96cb38
PA
1385static int
1386num_lwps (int pid)
1387{
1388 struct inferior_list_entry *inf, *tmp;
1389 int count = 0;
0d62e5e8 1390
fa96cb38 1391 ALL_INFERIORS (&all_threads, inf, tmp)
24a09b5f 1392 {
fa96cb38
PA
1393 if (ptid_get_pid (inf->id) == pid)
1394 count++;
24a09b5f 1395 }
3aee8918 1396
fa96cb38
PA
1397 return count;
1398}
d61ddec4 1399
6d4ee8c6
GB
1400/* The arguments passed to iterate_over_lwps. */
1401
1402struct iterate_over_lwps_args
1403{
1404 /* The FILTER argument passed to iterate_over_lwps. */
1405 ptid_t filter;
1406
1407 /* The CALLBACK argument passed to iterate_over_lwps. */
1408 iterate_over_lwps_ftype *callback;
1409
1410 /* The DATA argument passed to iterate_over_lwps. */
1411 void *data;
1412};
1413
1414/* Callback for find_inferior used by iterate_over_lwps to filter
1415 calls to the callback supplied to that function. Returning a
1416 nonzero value causes find_inferiors to stop iterating and return
1417 the current inferior_list_entry. Returning zero indicates that
1418 find_inferiors should continue iterating. */
1419
1420static int
1421iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1422{
1423 struct iterate_over_lwps_args *args
1424 = (struct iterate_over_lwps_args *) args_p;
1425
1426 if (ptid_match (entry->id, args->filter))
1427 {
1428 struct thread_info *thr = (struct thread_info *) entry;
1429 struct lwp_info *lwp = get_thread_lwp (thr);
1430
1431 return (*args->callback) (lwp, args->data);
1432 }
1433
1434 return 0;
1435}
1436
1437/* See nat/linux-nat.h. */
1438
1439struct lwp_info *
1440iterate_over_lwps (ptid_t filter,
1441 iterate_over_lwps_ftype callback,
1442 void *data)
1443{
1444 struct iterate_over_lwps_args args = {filter, callback, data};
1445 struct inferior_list_entry *entry;
1446
1447 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1448 if (entry == NULL)
1449 return NULL;
1450
1451 return get_thread_lwp ((struct thread_info *) entry);
1452}
1453
fa96cb38
PA
1454/* Detect zombie thread group leaders, and "exit" them. We can't reap
1455 their exits until all other threads in the group have exited. */
c3adc08c 1456
fa96cb38
PA
1457static void
1458check_zombie_leaders (void)
1459{
1460 struct process_info *proc, *tmp;
c3adc08c 1461
fa96cb38 1462 ALL_PROCESSES (proc, tmp)
c3adc08c 1463 {
fa96cb38
PA
1464 pid_t leader_pid = pid_of (proc);
1465 struct lwp_info *leader_lp;
c3adc08c 1466
fa96cb38 1467 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
c3adc08c 1468
fa96cb38
PA
1469 if (debug_threads)
1470 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1471 "num_lwps=%d, zombie=%d\n",
1472 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1473 linux_proc_pid_is_zombie (leader_pid));
1474
1475 if (leader_lp != NULL
1476 /* Check if there are other threads in the group, as we may
1477 have raced with the inferior simply exiting. */
1478 && !last_thread_of_process_p (leader_pid)
1479 && linux_proc_pid_is_zombie (leader_pid))
1480 {
1481 /* A leader zombie can mean one of two things:
1482
1483 - It exited, and there's an exit status pending
1484 available, or only the leader exited (not the whole
1485 program). In the latter case, we can't waitpid the
1486 leader's exit status until all other threads are gone.
1487
1488 - There are 3 or more threads in the group, and a thread
1489 other than the leader exec'd. On an exec, the Linux
1490 kernel destroys all other threads (except the execing
1491 one) in the thread group, and resets the execing thread's
1492 tid to the tgid. No exit notification is sent for the
1493 execing thread -- from the ptracer's perspective, it
1494 appears as though the execing thread just vanishes.
1495 Until we reap all other threads except the leader and the
1496 execing thread, the leader will be zombie, and the
1497 execing thread will be in `D (disc sleep)'. As soon as
1498 all other threads are reaped, the execing thread changes
1499 it's tid to the tgid, and the previous (zombie) leader
1500 vanishes, giving place to the "new" leader. We could try
1501 distinguishing the exit and exec cases, by waiting once
1502 more, and seeing if something comes out, but it doesn't
1503 sound useful. The previous leader _does_ go away, and
1504 we'll re-add the new one once we see the exec event
1505 (which is just the same as what would happen if the
1506 previous leader did exit voluntarily before some other
1507 thread execs). */
c3adc08c 1508
fa96cb38
PA
1509 if (debug_threads)
1510 fprintf (stderr,
1511 "CZL: Thread group leader %d zombie "
1512 "(it exited, or another thread execd).\n",
1513 leader_pid);
c3adc08c 1514
fa96cb38 1515 delete_lwp (leader_lp);
c3adc08c
PA
1516 }
1517 }
fa96cb38 1518}
c3adc08c 1519
fa96cb38
PA
1520/* Callback for `find_inferior'. Returns the first LWP that is not
1521 stopped. ARG is a PTID filter. */
d50171e4 1522
fa96cb38
PA
1523static int
1524not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1525{
1526 struct thread_info *thr = (struct thread_info *) entry;
1527 struct lwp_info *lwp;
1528 ptid_t filter = *(ptid_t *) arg;
47c0c975 1529
fa96cb38
PA
1530 if (!ptid_match (ptid_of (thr), filter))
1531 return 0;
bd99dc85 1532
fa96cb38
PA
1533 lwp = get_thread_lwp (thr);
1534 if (!lwp->stopped)
1535 return 1;
1536
1537 return 0;
0d62e5e8 1538}
611cb4a5 1539
219f2f23
PA
1540/* This function should only be called if the LWP got a SIGTRAP.
1541
1542 Handle any tracepoint steps or hits. Return true if a tracepoint
1543 event was handled, 0 otherwise. */
1544
1545static int
1546handle_tracepoints (struct lwp_info *lwp)
1547{
1548 struct thread_info *tinfo = get_lwp_thread (lwp);
1549 int tpoint_related_event = 0;
1550
582511be
PA
1551 gdb_assert (lwp->suspended == 0);
1552
7984d532
PA
1553 /* If this tracepoint hit causes a tracing stop, we'll immediately
1554 uninsert tracepoints. To do this, we temporarily pause all
1555 threads, unpatch away, and then unpause threads. We need to make
1556 sure the unpausing doesn't resume LWP too. */
1557 lwp->suspended++;
1558
219f2f23
PA
1559 /* And we need to be sure that any all-threads-stopping doesn't try
1560 to move threads out of the jump pads, as it could deadlock the
1561 inferior (LWP could be in the jump pad, maybe even holding the
1562 lock.) */
1563
1564 /* Do any necessary step collect actions. */
1565 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1566
fa593d66
PA
1567 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1568
219f2f23
PA
1569 /* See if we just hit a tracepoint and do its main collect
1570 actions. */
1571 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1572
7984d532
PA
1573 lwp->suspended--;
1574
1575 gdb_assert (lwp->suspended == 0);
fa593d66 1576 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1577
219f2f23
PA
1578 if (tpoint_related_event)
1579 {
1580 if (debug_threads)
87ce2a04 1581 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1582 return 1;
1583 }
1584
1585 return 0;
1586}
1587
fa593d66
PA
1588/* Convenience wrapper. Returns true if LWP is presently collecting a
1589 fast tracepoint. */
1590
1591static int
1592linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1593 struct fast_tpoint_collect_status *status)
1594{
1595 CORE_ADDR thread_area;
d86d4aaf 1596 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1597
1598 if (the_low_target.get_thread_area == NULL)
1599 return 0;
1600
1601 /* Get the thread area address. This is used to recognize which
1602 thread is which when tracing with the in-process agent library.
1603 We don't read anything from the address, and treat it as opaque;
1604 it's the address itself that we assume is unique per-thread. */
d86d4aaf 1605 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
fa593d66
PA
1606 return 0;
1607
1608 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1609}
1610
1611/* The reason we resume in the caller, is because we want to be able
1612 to pass lwp->status_pending as WSTAT, and we need to clear
1613 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1614 refuses to resume. */
1615
1616static int
1617maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1618{
0bfdf32f 1619 struct thread_info *saved_thread;
fa593d66 1620
0bfdf32f
GB
1621 saved_thread = current_thread;
1622 current_thread = get_lwp_thread (lwp);
fa593d66
PA
1623
1624 if ((wstat == NULL
1625 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1626 && supports_fast_tracepoints ()
58b4daa5 1627 && agent_loaded_p ())
fa593d66
PA
1628 {
1629 struct fast_tpoint_collect_status status;
1630 int r;
1631
1632 if (debug_threads)
87ce2a04
DE
1633 debug_printf ("Checking whether LWP %ld needs to move out of the "
1634 "jump pad.\n",
0bfdf32f 1635 lwpid_of (current_thread));
fa593d66
PA
1636
1637 r = linux_fast_tracepoint_collecting (lwp, &status);
1638
1639 if (wstat == NULL
1640 || (WSTOPSIG (*wstat) != SIGILL
1641 && WSTOPSIG (*wstat) != SIGFPE
1642 && WSTOPSIG (*wstat) != SIGSEGV
1643 && WSTOPSIG (*wstat) != SIGBUS))
1644 {
1645 lwp->collecting_fast_tracepoint = r;
1646
1647 if (r != 0)
1648 {
1649 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1650 {
1651 /* Haven't executed the original instruction yet.
1652 Set breakpoint there, and wait till it's hit,
1653 then single-step until exiting the jump pad. */
1654 lwp->exit_jump_pad_bkpt
1655 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1656 }
1657
1658 if (debug_threads)
87ce2a04
DE
1659 debug_printf ("Checking whether LWP %ld needs to move out of "
1660 "the jump pad...it does\n",
0bfdf32f
GB
1661 lwpid_of (current_thread));
1662 current_thread = saved_thread;
fa593d66
PA
1663
1664 return 1;
1665 }
1666 }
1667 else
1668 {
1669 /* If we get a synchronous signal while collecting, *and*
1670 while executing the (relocated) original instruction,
1671 reset the PC to point at the tpoint address, before
1672 reporting to GDB. Otherwise, it's an IPA lib bug: just
1673 report the signal to GDB, and pray for the best. */
1674
1675 lwp->collecting_fast_tracepoint = 0;
1676
1677 if (r != 0
1678 && (status.adjusted_insn_addr <= lwp->stop_pc
1679 && lwp->stop_pc < status.adjusted_insn_addr_end))
1680 {
1681 siginfo_t info;
1682 struct regcache *regcache;
1683
1684 /* The si_addr on a few signals references the address
1685 of the faulting instruction. Adjust that as
1686 well. */
1687 if ((WSTOPSIG (*wstat) == SIGILL
1688 || WSTOPSIG (*wstat) == SIGFPE
1689 || WSTOPSIG (*wstat) == SIGBUS
1690 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 1691 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1692 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
1693 /* Final check just to make sure we don't clobber
1694 the siginfo of non-kernel-sent signals. */
1695 && (uintptr_t) info.si_addr == lwp->stop_pc)
1696 {
1697 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 1698 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1699 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
1700 }
1701
0bfdf32f 1702 regcache = get_thread_regcache (current_thread, 1);
fa593d66
PA
1703 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1704 lwp->stop_pc = status.tpoint_addr;
1705
1706 /* Cancel any fast tracepoint lock this thread was
1707 holding. */
1708 force_unlock_trace_buffer ();
1709 }
1710
1711 if (lwp->exit_jump_pad_bkpt != NULL)
1712 {
1713 if (debug_threads)
87ce2a04
DE
1714 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1715 "stopping all threads momentarily.\n");
fa593d66
PA
1716
1717 stop_all_lwps (1, lwp);
fa593d66
PA
1718
1719 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1720 lwp->exit_jump_pad_bkpt = NULL;
1721
1722 unstop_all_lwps (1, lwp);
1723
1724 gdb_assert (lwp->suspended >= 0);
1725 }
1726 }
1727 }
1728
1729 if (debug_threads)
87ce2a04
DE
1730 debug_printf ("Checking whether LWP %ld needs to move out of the "
1731 "jump pad...no\n",
0bfdf32f 1732 lwpid_of (current_thread));
0cccb683 1733
0bfdf32f 1734 current_thread = saved_thread;
fa593d66
PA
1735 return 0;
1736}
1737
1738/* Enqueue one signal in the "signals to report later when out of the
1739 jump pad" list. */
1740
1741static void
1742enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1743{
1744 struct pending_signals *p_sig;
d86d4aaf 1745 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1746
1747 if (debug_threads)
87ce2a04 1748 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 1749 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1750
1751 if (debug_threads)
1752 {
1753 struct pending_signals *sig;
1754
1755 for (sig = lwp->pending_signals_to_report;
1756 sig != NULL;
1757 sig = sig->prev)
87ce2a04
DE
1758 debug_printf (" Already queued %d\n",
1759 sig->signal);
fa593d66 1760
87ce2a04 1761 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
1762 }
1763
1a981360
PA
1764 /* Don't enqueue non-RT signals if they are already in the deferred
1765 queue. (SIGSTOP being the easiest signal to see ending up here
1766 twice) */
1767 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1768 {
1769 struct pending_signals *sig;
1770
1771 for (sig = lwp->pending_signals_to_report;
1772 sig != NULL;
1773 sig = sig->prev)
1774 {
1775 if (sig->signal == WSTOPSIG (*wstat))
1776 {
1777 if (debug_threads)
87ce2a04
DE
1778 debug_printf ("Not requeuing already queued non-RT signal %d"
1779 " for LWP %ld\n",
1780 sig->signal,
d86d4aaf 1781 lwpid_of (thread));
1a981360
PA
1782 return;
1783 }
1784 }
1785 }
1786
fa593d66
PA
1787 p_sig = xmalloc (sizeof (*p_sig));
1788 p_sig->prev = lwp->pending_signals_to_report;
1789 p_sig->signal = WSTOPSIG (*wstat);
1790 memset (&p_sig->info, 0, sizeof (siginfo_t));
d86d4aaf 1791 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1792 &p_sig->info);
fa593d66
PA
1793
1794 lwp->pending_signals_to_report = p_sig;
1795}
1796
1797/* Dequeue one signal from the "signals to report later when out of
1798 the jump pad" list. */
1799
1800static int
1801dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1802{
d86d4aaf
DE
1803 struct thread_info *thread = get_lwp_thread (lwp);
1804
fa593d66
PA
1805 if (lwp->pending_signals_to_report != NULL)
1806 {
1807 struct pending_signals **p_sig;
1808
1809 p_sig = &lwp->pending_signals_to_report;
1810 while ((*p_sig)->prev != NULL)
1811 p_sig = &(*p_sig)->prev;
1812
1813 *wstat = W_STOPCODE ((*p_sig)->signal);
1814 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 1815 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1816 &(*p_sig)->info);
fa593d66
PA
1817 free (*p_sig);
1818 *p_sig = NULL;
1819
1820 if (debug_threads)
87ce2a04 1821 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 1822 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1823
1824 if (debug_threads)
1825 {
1826 struct pending_signals *sig;
1827
1828 for (sig = lwp->pending_signals_to_report;
1829 sig != NULL;
1830 sig = sig->prev)
87ce2a04
DE
1831 debug_printf (" Still queued %d\n",
1832 sig->signal);
fa593d66 1833
87ce2a04 1834 debug_printf (" (no more queued signals)\n");
fa593d66
PA
1835 }
1836
1837 return 1;
1838 }
1839
1840 return 0;
1841}
1842
582511be
PA
1843/* Fetch the possibly triggered data watchpoint info and store it in
1844 CHILD.
d50171e4 1845
582511be
PA
1846 On some archs, like x86, that use debug registers to set
1847 watchpoints, it's possible that the way to know which watched
1848 address trapped, is to check the register that is used to select
1849 which address to watch. Problem is, between setting the watchpoint
1850 and reading back which data address trapped, the user may change
1851 the set of watchpoints, and, as a consequence, GDB changes the
1852 debug registers in the inferior. To avoid reading back a stale
1853 stopped-data-address when that happens, we cache in LP the fact
1854 that a watchpoint trapped, and the corresponding data address, as
1855 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1856 registers meanwhile, we have the cached data we can rely on. */
d50171e4 1857
582511be
PA
1858static int
1859check_stopped_by_watchpoint (struct lwp_info *child)
1860{
1861 if (the_low_target.stopped_by_watchpoint != NULL)
d50171e4 1862 {
582511be 1863 struct thread_info *saved_thread;
d50171e4 1864
582511be
PA
1865 saved_thread = current_thread;
1866 current_thread = get_lwp_thread (child);
1867
1868 if (the_low_target.stopped_by_watchpoint ())
d50171e4 1869 {
15c66dd6 1870 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
582511be
PA
1871
1872 if (the_low_target.stopped_data_address != NULL)
1873 child->stopped_data_address
1874 = the_low_target.stopped_data_address ();
1875 else
1876 child->stopped_data_address = 0;
d50171e4
PA
1877 }
1878
0bfdf32f 1879 current_thread = saved_thread;
d50171e4
PA
1880 }
1881
15c66dd6 1882 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
c4d9ceb6
YQ
1883}
1884
fa96cb38
PA
1885/* Do low-level handling of the event, and check if we should go on
1886 and pass it to caller code. Return the affected lwp if we are, or
1887 NULL otherwise. */
1888
1889static struct lwp_info *
582511be 1890linux_low_filter_event (int lwpid, int wstat)
fa96cb38
PA
1891{
1892 struct lwp_info *child;
1893 struct thread_info *thread;
582511be 1894 int have_stop_pc = 0;
fa96cb38
PA
1895
1896 child = find_lwp_pid (pid_to_ptid (lwpid));
1897
1898 /* If we didn't find a process, one of two things presumably happened:
1899 - A process we started and then detached from has exited. Ignore it.
1900 - A process we are controlling has forked and the new child's stop
1901 was reported to us by the kernel. Save its PID. */
1902 if (child == NULL && WIFSTOPPED (wstat))
1903 {
1904 add_to_pid_list (&stopped_pids, lwpid, wstat);
1905 return NULL;
1906 }
1907 else if (child == NULL)
1908 return NULL;
1909
1910 thread = get_lwp_thread (child);
1911
1912 child->stopped = 1;
1913
1914 child->last_status = wstat;
1915
582511be
PA
1916 /* Check if the thread has exited. */
1917 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
1918 {
1919 if (debug_threads)
1920 debug_printf ("LLFE: %d exited.\n", lwpid);
1921 if (num_lwps (pid_of (thread)) > 1)
1922 {
1923
1924 /* If there is at least one more LWP, then the exit signal was
1925 not the end of the debugged application and should be
1926 ignored. */
1927 delete_lwp (child);
1928 return NULL;
1929 }
1930 else
1931 {
1932 /* This was the last lwp in the process. Since events are
1933 serialized to GDB core, and we can't report this one
1934 right now, but GDB core and the other target layers will
1935 want to be notified about the exit code/signal, leave the
1936 status pending for the next time we're able to report
1937 it. */
1938 mark_lwp_dead (child, wstat);
1939 return child;
1940 }
1941 }
1942
1943 gdb_assert (WIFSTOPPED (wstat));
1944
fa96cb38
PA
1945 if (WIFSTOPPED (wstat))
1946 {
1947 struct process_info *proc;
1948
1949 /* Architecture-specific setup after inferior is running. This
1950 needs to happen after we have attached to the inferior and it
1951 is stopped for the first time, but before we access any
1952 inferior registers. */
1953 proc = find_process_pid (pid_of (thread));
fe978cb0 1954 if (proc->priv->new_inferior)
fa96cb38 1955 {
0bfdf32f 1956 struct thread_info *saved_thread;
fa96cb38 1957
0bfdf32f
GB
1958 saved_thread = current_thread;
1959 current_thread = thread;
fa96cb38
PA
1960
1961 the_low_target.arch_setup ();
1962
0bfdf32f 1963 current_thread = saved_thread;
fa96cb38 1964
fe978cb0 1965 proc->priv->new_inferior = 0;
fa96cb38
PA
1966 }
1967 }
1968
fa96cb38
PA
1969 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1970 {
beed38b8
JB
1971 struct process_info *proc = find_process_pid (pid_of (thread));
1972
1973 linux_enable_event_reporting (lwpid, proc->attached);
fa96cb38
PA
1974 child->must_set_ptrace_flags = 0;
1975 }
1976
582511be
PA
1977 /* Be careful to not overwrite stop_pc until
1978 check_stopped_by_breakpoint is called. */
fa96cb38 1979 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 1980 && linux_is_extended_waitstatus (wstat))
fa96cb38 1981 {
582511be 1982 child->stop_pc = get_pc (child);
fa96cb38
PA
1983 handle_extended_wait (child, wstat);
1984 return NULL;
1985 }
1986
3e572f71
PA
1987 /* Check first whether this was a SW/HW breakpoint before checking
1988 watchpoints, because at least s390 can't tell the data address of
1989 hardware watchpoint hits, and returns stopped-by-watchpoint as
1990 long as there's a watchpoint set. */
1991 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
582511be
PA
1992 {
1993 if (check_stopped_by_breakpoint (child))
1994 have_stop_pc = 1;
1995 }
1996
3e572f71
PA
1997 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
1998 or hardware watchpoint. Check which is which if we got
1999 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2000 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2001 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2002 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2003 check_stopped_by_watchpoint (child);
2004
582511be
PA
2005 if (!have_stop_pc)
2006 child->stop_pc = get_pc (child);
2007
fa96cb38
PA
2008 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2009 && child->stop_expected)
2010 {
2011 if (debug_threads)
2012 debug_printf ("Expected stop.\n");
2013 child->stop_expected = 0;
2014
2015 if (thread->last_resume_kind == resume_stop)
2016 {
2017 /* We want to report the stop to the core. Treat the
2018 SIGSTOP as a normal event. */
2019 }
2020 else if (stopping_threads != NOT_STOPPING_THREADS)
2021 {
2022 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2023 pending. */
fa96cb38
PA
2024 return NULL;
2025 }
2026 else
2027 {
2028 /* Filter out the event. */
2029 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2030 return NULL;
2031 }
2032 }
2033
582511be
PA
2034 child->status_pending_p = 1;
2035 child->status_pending = wstat;
fa96cb38
PA
2036 return child;
2037}
2038
20ba1ce6
PA
2039/* Resume LWPs that are currently stopped without any pending status
2040 to report, but are resumed from the core's perspective. */
2041
2042static void
2043resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2044{
2045 struct thread_info *thread = (struct thread_info *) entry;
2046 struct lwp_info *lp = get_thread_lwp (thread);
2047
2048 if (lp->stopped
2049 && !lp->status_pending_p
2050 && thread->last_resume_kind != resume_stop
2051 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2052 {
2053 int step = thread->last_resume_kind == resume_step;
2054
2055 if (debug_threads)
2056 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2057 target_pid_to_str (ptid_of (thread)),
2058 paddress (lp->stop_pc),
2059 step);
2060
2061 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2062 }
2063}
2064
fa96cb38
PA
2065/* Wait for an event from child(ren) WAIT_PTID, and return any that
2066 match FILTER_PTID (leaving others pending). The PTIDs can be:
2067 minus_one_ptid, to specify any child; a pid PTID, specifying all
2068 lwps of a thread group; or a PTID representing a single lwp. Store
2069 the stop status through the status pointer WSTAT. OPTIONS is
2070 passed to the waitpid call. Return 0 if no event was found and
2071 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2072 was found. Return the PID of the stopped child otherwise. */
bd99dc85 2073
0d62e5e8 2074static int
fa96cb38
PA
2075linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2076 int *wstatp, int options)
0d62e5e8 2077{
d86d4aaf 2078 struct thread_info *event_thread;
d50171e4 2079 struct lwp_info *event_child, *requested_child;
fa96cb38 2080 sigset_t block_mask, prev_mask;
d50171e4 2081
fa96cb38 2082 retry:
d86d4aaf
DE
2083 /* N.B. event_thread points to the thread_info struct that contains
2084 event_child. Keep them in sync. */
2085 event_thread = NULL;
d50171e4
PA
2086 event_child = NULL;
2087 requested_child = NULL;
0d62e5e8 2088
95954743 2089 /* Check for a lwp with a pending status. */
bd99dc85 2090
fa96cb38 2091 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
0d62e5e8 2092 {
d86d4aaf 2093 event_thread = (struct thread_info *)
fa96cb38 2094 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
d86d4aaf
DE
2095 if (event_thread != NULL)
2096 event_child = get_thread_lwp (event_thread);
2097 if (debug_threads && event_thread)
2098 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2099 }
fa96cb38 2100 else if (!ptid_equal (filter_ptid, null_ptid))
0d62e5e8 2101 {
fa96cb38 2102 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2103
bde24c0a 2104 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66
PA
2105 && requested_child->status_pending_p
2106 && requested_child->collecting_fast_tracepoint)
2107 {
2108 enqueue_one_deferred_signal (requested_child,
2109 &requested_child->status_pending);
2110 requested_child->status_pending_p = 0;
2111 requested_child->status_pending = 0;
2112 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2113 }
2114
2115 if (requested_child->suspended
2116 && requested_child->status_pending_p)
38e08fca
GB
2117 {
2118 internal_error (__FILE__, __LINE__,
2119 "requesting an event out of a"
2120 " suspended child?");
2121 }
fa593d66 2122
d50171e4 2123 if (requested_child->status_pending_p)
d86d4aaf
DE
2124 {
2125 event_child = requested_child;
2126 event_thread = get_lwp_thread (event_child);
2127 }
0d62e5e8 2128 }
611cb4a5 2129
0d62e5e8
DJ
2130 if (event_child != NULL)
2131 {
bd99dc85 2132 if (debug_threads)
87ce2a04 2133 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2134 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2135 *wstatp = event_child->status_pending;
bd99dc85
PA
2136 event_child->status_pending_p = 0;
2137 event_child->status_pending = 0;
0bfdf32f 2138 current_thread = event_thread;
d86d4aaf 2139 return lwpid_of (event_thread);
0d62e5e8
DJ
2140 }
2141
fa96cb38
PA
2142 /* But if we don't find a pending event, we'll have to wait.
2143
2144 We only enter this loop if no process has a pending wait status.
2145 Thus any action taken in response to a wait status inside this
2146 loop is responding as soon as we detect the status, not after any
2147 pending events. */
d8301ad1 2148
fa96cb38
PA
2149 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2150 all signals while here. */
2151 sigfillset (&block_mask);
2152 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2153
582511be
PA
2154 /* Always pull all events out of the kernel. We'll randomly select
2155 an event LWP out of all that have events, to prevent
2156 starvation. */
fa96cb38 2157 while (event_child == NULL)
0d62e5e8 2158 {
fa96cb38 2159 pid_t ret = 0;
0d62e5e8 2160
fa96cb38
PA
2161 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2162 quirks:
0d62e5e8 2163
fa96cb38
PA
2164 - If the thread group leader exits while other threads in the
2165 thread group still exist, waitpid(TGID, ...) hangs. That
2166 waitpid won't return an exit status until the other threads
2167 in the group are reaped.
611cb4a5 2168
fa96cb38
PA
2169 - When a non-leader thread execs, that thread just vanishes
2170 without reporting an exit (so we'd hang if we waited for it
2171 explicitly in that case). The exec event is reported to
2172 the TGID pid (although we don't currently enable exec
2173 events). */
2174 errno = 0;
2175 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2176
fa96cb38
PA
2177 if (debug_threads)
2178 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2179 ret, errno ? strerror (errno) : "ERRNO-OK");
0d62e5e8 2180
fa96cb38 2181 if (ret > 0)
0d62e5e8 2182 {
89be2091 2183 if (debug_threads)
bd99dc85 2184 {
fa96cb38
PA
2185 debug_printf ("LLW: waitpid %ld received %s\n",
2186 (long) ret, status_to_str (*wstatp));
bd99dc85 2187 }
89be2091 2188
582511be
PA
2189 /* Filter all events. IOW, leave all events pending. We'll
2190 randomly select an event LWP out of all that have events
2191 below. */
2192 linux_low_filter_event (ret, *wstatp);
fa96cb38
PA
2193 /* Retry until nothing comes out of waitpid. A single
2194 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2195 continue;
2196 }
2197
20ba1ce6
PA
2198 /* Now that we've pulled all events out of the kernel, resume
2199 LWPs that don't have an interesting event to report. */
2200 if (stopping_threads == NOT_STOPPING_THREADS)
2201 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2202
2203 /* ... and find an LWP with a status to report to the core, if
2204 any. */
582511be
PA
2205 event_thread = (struct thread_info *)
2206 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2207 if (event_thread != NULL)
2208 {
2209 event_child = get_thread_lwp (event_thread);
2210 *wstatp = event_child->status_pending;
2211 event_child->status_pending_p = 0;
2212 event_child->status_pending = 0;
2213 break;
2214 }
2215
fa96cb38
PA
2216 /* Check for zombie thread group leaders. Those can't be reaped
2217 until all other threads in the thread group are. */
2218 check_zombie_leaders ();
2219
2220 /* If there are no resumed children left in the set of LWPs we
2221 want to wait for, bail. We can't just block in
2222 waitpid/sigsuspend, because lwps might have been left stopped
2223 in trace-stop state, and we'd be stuck forever waiting for
2224 their status to change (which would only happen if we resumed
2225 them). Even if WNOHANG is set, this return code is preferred
2226 over 0 (below), as it is more detailed. */
2227 if ((find_inferior (&all_threads,
2228 not_stopped_callback,
2229 &wait_ptid) == NULL))
a6dbe5df 2230 {
fa96cb38
PA
2231 if (debug_threads)
2232 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2233 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2234 return -1;
a6dbe5df
PA
2235 }
2236
fa96cb38
PA
2237 /* No interesting event to report to the caller. */
2238 if ((options & WNOHANG))
24a09b5f 2239 {
fa96cb38
PA
2240 if (debug_threads)
2241 debug_printf ("WNOHANG set, no event found\n");
2242
2243 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2244 return 0;
24a09b5f
DJ
2245 }
2246
fa96cb38
PA
2247 /* Block until we get an event reported with SIGCHLD. */
2248 if (debug_threads)
2249 debug_printf ("sigsuspend'ing\n");
d50171e4 2250
fa96cb38
PA
2251 sigsuspend (&prev_mask);
2252 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2253 goto retry;
2254 }
d50171e4 2255
fa96cb38 2256 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2257
0bfdf32f 2258 current_thread = event_thread;
d50171e4 2259
fa96cb38
PA
2260 /* Check for thread exit. */
2261 if (! WIFSTOPPED (*wstatp))
2262 {
2263 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2264
2265 if (debug_threads)
2266 debug_printf ("LWP %d is the last lwp of process. "
2267 "Process %ld exiting.\n",
2268 pid_of (event_thread), lwpid_of (event_thread));
d86d4aaf 2269 return lwpid_of (event_thread);
611cb4a5 2270 }
0d62e5e8 2271
fa96cb38
PA
2272 return lwpid_of (event_thread);
2273}
2274
2275/* Wait for an event from child(ren) PTID. PTIDs can be:
2276 minus_one_ptid, to specify any child; a pid PTID, specifying all
2277 lwps of a thread group; or a PTID representing a single lwp. Store
2278 the stop status through the status pointer WSTAT. OPTIONS is
2279 passed to the waitpid call. Return 0 if no event was found and
2280 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2281 was found. Return the PID of the stopped child otherwise. */
2282
2283static int
2284linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2285{
2286 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2287}
2288
6bf5e0ba
PA
2289/* Count the LWP's that have had events. */
2290
2291static int
2292count_events_callback (struct inferior_list_entry *entry, void *data)
2293{
d86d4aaf 2294 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2295 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2296 int *count = data;
2297
2298 gdb_assert (count != NULL);
2299
582511be 2300 /* Count only resumed LWPs that have an event pending. */
8336d594 2301 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2302 && lp->status_pending_p)
6bf5e0ba
PA
2303 (*count)++;
2304
2305 return 0;
2306}
2307
2308/* Select the LWP (if any) that is currently being single-stepped. */
2309
2310static int
2311select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2312{
d86d4aaf
DE
2313 struct thread_info *thread = (struct thread_info *) entry;
2314 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba 2315
8336d594
PA
2316 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2317 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
2318 && lp->status_pending_p)
2319 return 1;
2320 else
2321 return 0;
2322}
2323
b90fc188 2324/* Select the Nth LWP that has had an event. */
6bf5e0ba
PA
2325
2326static int
2327select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2328{
d86d4aaf 2329 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2330 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2331 int *selector = data;
2332
2333 gdb_assert (selector != NULL);
2334
582511be 2335 /* Select only resumed LWPs that have an event pending. */
91baf43f 2336 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2337 && lp->status_pending_p)
6bf5e0ba
PA
2338 if ((*selector)-- == 0)
2339 return 1;
2340
2341 return 0;
2342}
2343
6bf5e0ba
PA
2344/* Select one LWP out of those that have events pending. */
2345
2346static void
2347select_event_lwp (struct lwp_info **orig_lp)
2348{
2349 int num_events = 0;
2350 int random_selector;
582511be
PA
2351 struct thread_info *event_thread = NULL;
2352
2353 /* In all-stop, give preference to the LWP that is being
2354 single-stepped. There will be at most one, and it's the LWP that
2355 the core is most interested in. If we didn't do this, then we'd
2356 have to handle pending step SIGTRAPs somehow in case the core
2357 later continues the previously-stepped thread, otherwise we'd
2358 report the pending SIGTRAP, and the core, not having stepped the
2359 thread, wouldn't understand what the trap was for, and therefore
2360 would report it to the user as a random signal. */
2361 if (!non_stop)
6bf5e0ba 2362 {
582511be
PA
2363 event_thread
2364 = (struct thread_info *) find_inferior (&all_threads,
2365 select_singlestep_lwp_callback,
2366 NULL);
2367 if (event_thread != NULL)
2368 {
2369 if (debug_threads)
2370 debug_printf ("SEL: Select single-step %s\n",
2371 target_pid_to_str (ptid_of (event_thread)));
2372 }
6bf5e0ba 2373 }
582511be 2374 if (event_thread == NULL)
6bf5e0ba
PA
2375 {
2376 /* No single-stepping LWP. Select one at random, out of those
b90fc188 2377 which have had events. */
6bf5e0ba 2378
b90fc188 2379 /* First see how many events we have. */
d86d4aaf 2380 find_inferior (&all_threads, count_events_callback, &num_events);
8bf3b159 2381 gdb_assert (num_events > 0);
6bf5e0ba 2382
b90fc188
PA
2383 /* Now randomly pick a LWP out of those that have had
2384 events. */
6bf5e0ba
PA
2385 random_selector = (int)
2386 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2387
2388 if (debug_threads && num_events > 1)
87ce2a04
DE
2389 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2390 num_events, random_selector);
6bf5e0ba 2391
d86d4aaf
DE
2392 event_thread
2393 = (struct thread_info *) find_inferior (&all_threads,
2394 select_event_lwp_callback,
2395 &random_selector);
6bf5e0ba
PA
2396 }
2397
d86d4aaf 2398 if (event_thread != NULL)
6bf5e0ba 2399 {
d86d4aaf
DE
2400 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2401
6bf5e0ba
PA
2402 /* Switch the event LWP. */
2403 *orig_lp = event_lp;
2404 }
2405}
2406
7984d532
PA
2407/* Decrement the suspend count of an LWP. */
2408
2409static int
2410unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2411{
d86d4aaf
DE
2412 struct thread_info *thread = (struct thread_info *) entry;
2413 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
2414
2415 /* Ignore EXCEPT. */
2416 if (lwp == except)
2417 return 0;
2418
2419 lwp->suspended--;
2420
2421 gdb_assert (lwp->suspended >= 0);
2422 return 0;
2423}
2424
2425/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2426 NULL. */
2427
2428static void
2429unsuspend_all_lwps (struct lwp_info *except)
2430{
d86d4aaf 2431 find_inferior (&all_threads, unsuspend_one_lwp, except);
7984d532
PA
2432}
2433
fa593d66
PA
2434static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2435static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2436 void *data);
2437static int lwp_running (struct inferior_list_entry *entry, void *data);
2438static ptid_t linux_wait_1 (ptid_t ptid,
2439 struct target_waitstatus *ourstatus,
2440 int target_options);
2441
2442/* Stabilize threads (move out of jump pads).
2443
2444 If a thread is midway collecting a fast tracepoint, we need to
2445 finish the collection and move it out of the jump pad before
2446 reporting the signal.
2447
2448 This avoids recursion while collecting (when a signal arrives
2449 midway, and the signal handler itself collects), which would trash
2450 the trace buffer. In case the user set a breakpoint in a signal
2451 handler, this avoids the backtrace showing the jump pad, etc..
2452 Most importantly, there are certain things we can't do safely if
2453 threads are stopped in a jump pad (or in its callee's). For
2454 example:
2455
2456 - starting a new trace run. A thread still collecting the
2457 previous run, could trash the trace buffer when resumed. The trace
2458 buffer control structures would have been reset but the thread had
2459 no way to tell. The thread could even midway memcpy'ing to the
2460 buffer, which would mean that when resumed, it would clobber the
2461 trace buffer that had been set for a new run.
2462
2463 - we can't rewrite/reuse the jump pads for new tracepoints
2464 safely. Say you do tstart while a thread is stopped midway while
2465 collecting. When the thread is later resumed, it finishes the
2466 collection, and returns to the jump pad, to execute the original
2467 instruction that was under the tracepoint jump at the time the
2468 older run had been started. If the jump pad had been rewritten
2469 since for something else in the new run, the thread would now
2470 execute the wrong / random instructions. */
2471
2472static void
2473linux_stabilize_threads (void)
2474{
0bfdf32f 2475 struct thread_info *saved_thread;
d86d4aaf 2476 struct thread_info *thread_stuck;
fa593d66 2477
d86d4aaf
DE
2478 thread_stuck
2479 = (struct thread_info *) find_inferior (&all_threads,
2480 stuck_in_jump_pad_callback,
2481 NULL);
2482 if (thread_stuck != NULL)
fa593d66 2483 {
b4d51a55 2484 if (debug_threads)
87ce2a04 2485 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2486 lwpid_of (thread_stuck));
fa593d66
PA
2487 return;
2488 }
2489
0bfdf32f 2490 saved_thread = current_thread;
fa593d66
PA
2491
2492 stabilizing_threads = 1;
2493
2494 /* Kick 'em all. */
d86d4aaf 2495 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
fa593d66
PA
2496
2497 /* Loop until all are stopped out of the jump pads. */
d86d4aaf 2498 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
fa593d66
PA
2499 {
2500 struct target_waitstatus ourstatus;
2501 struct lwp_info *lwp;
fa593d66
PA
2502 int wstat;
2503
2504 /* Note that we go through the full wait even loop. While
2505 moving threads out of jump pad, we need to be able to step
2506 over internal breakpoints and such. */
32fcada3 2507 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2508
2509 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2510 {
0bfdf32f 2511 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2512
2513 /* Lock it. */
2514 lwp->suspended++;
2515
a493e3e2 2516 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2517 || current_thread->last_resume_kind == resume_stop)
fa593d66 2518 {
2ea28649 2519 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2520 enqueue_one_deferred_signal (lwp, &wstat);
2521 }
2522 }
2523 }
2524
d86d4aaf 2525 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
fa593d66
PA
2526
2527 stabilizing_threads = 0;
2528
0bfdf32f 2529 current_thread = saved_thread;
fa593d66 2530
b4d51a55 2531 if (debug_threads)
fa593d66 2532 {
d86d4aaf
DE
2533 thread_stuck
2534 = (struct thread_info *) find_inferior (&all_threads,
2535 stuck_in_jump_pad_callback,
2536 NULL);
2537 if (thread_stuck != NULL)
87ce2a04 2538 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2539 lwpid_of (thread_stuck));
fa593d66
PA
2540 }
2541}
2542
582511be
PA
2543static void async_file_mark (void);
2544
2545/* Convenience function that is called when the kernel reports an
2546 event that is not passed out to GDB. */
2547
2548static ptid_t
2549ignore_event (struct target_waitstatus *ourstatus)
2550{
2551 /* If we got an event, there may still be others, as a single
2552 SIGCHLD can indicate more than one child stopped. This forces
2553 another target_wait call. */
2554 async_file_mark ();
2555
2556 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2557 return null_ptid;
2558}
2559
0d62e5e8 2560/* Wait for process, returns status. */
da6d8c04 2561
95954743
PA
2562static ptid_t
2563linux_wait_1 (ptid_t ptid,
2564 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2565{
e5f1222d 2566 int w;
fc7238bb 2567 struct lwp_info *event_child;
bd99dc85 2568 int options;
bd99dc85 2569 int pid;
6bf5e0ba
PA
2570 int step_over_finished;
2571 int bp_explains_trap;
2572 int maybe_internal_trap;
2573 int report_to_gdb;
219f2f23 2574 int trace_event;
c2d6af84 2575 int in_step_range;
bd99dc85 2576
87ce2a04
DE
2577 if (debug_threads)
2578 {
2579 debug_enter ();
2580 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2581 }
2582
bd99dc85
PA
2583 /* Translate generic target options into linux options. */
2584 options = __WALL;
2585 if (target_options & TARGET_WNOHANG)
2586 options |= WNOHANG;
0d62e5e8 2587
fa593d66
PA
2588 bp_explains_trap = 0;
2589 trace_event = 0;
c2d6af84 2590 in_step_range = 0;
bd99dc85
PA
2591 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2592
6bf5e0ba
PA
2593 if (ptid_equal (step_over_bkpt, null_ptid))
2594 pid = linux_wait_for_event (ptid, &w, options);
2595 else
2596 {
2597 if (debug_threads)
87ce2a04
DE
2598 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2599 target_pid_to_str (step_over_bkpt));
6bf5e0ba
PA
2600 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2601 }
2602
fa96cb38 2603 if (pid == 0)
87ce2a04 2604 {
fa96cb38
PA
2605 gdb_assert (target_options & TARGET_WNOHANG);
2606
87ce2a04
DE
2607 if (debug_threads)
2608 {
fa96cb38
PA
2609 debug_printf ("linux_wait_1 ret = null_ptid, "
2610 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
2611 debug_exit ();
2612 }
fa96cb38
PA
2613
2614 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
2615 return null_ptid;
2616 }
fa96cb38
PA
2617 else if (pid == -1)
2618 {
2619 if (debug_threads)
2620 {
2621 debug_printf ("linux_wait_1 ret = null_ptid, "
2622 "TARGET_WAITKIND_NO_RESUMED\n");
2623 debug_exit ();
2624 }
bd99dc85 2625
fa96cb38
PA
2626 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2627 return null_ptid;
2628 }
0d62e5e8 2629
0bfdf32f 2630 event_child = get_thread_lwp (current_thread);
0d62e5e8 2631
fa96cb38
PA
2632 /* linux_wait_for_event only returns an exit status for the last
2633 child of a process. Report it. */
2634 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 2635 {
fa96cb38 2636 if (WIFEXITED (w))
0d62e5e8 2637 {
fa96cb38
PA
2638 ourstatus->kind = TARGET_WAITKIND_EXITED;
2639 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 2640
fa96cb38 2641 if (debug_threads)
bd99dc85 2642 {
fa96cb38
PA
2643 debug_printf ("linux_wait_1 ret = %s, exited with "
2644 "retcode %d\n",
0bfdf32f 2645 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2646 WEXITSTATUS (w));
2647 debug_exit ();
bd99dc85 2648 }
fa96cb38
PA
2649 }
2650 else
2651 {
2652 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2653 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 2654
fa96cb38
PA
2655 if (debug_threads)
2656 {
2657 debug_printf ("linux_wait_1 ret = %s, terminated with "
2658 "signal %d\n",
0bfdf32f 2659 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2660 WTERMSIG (w));
2661 debug_exit ();
2662 }
0d62e5e8 2663 }
fa96cb38 2664
0bfdf32f 2665 return ptid_of (current_thread);
da6d8c04
DJ
2666 }
2667
8090aef2
PA
2668 /* If step-over executes a breakpoint instruction, it means a
2669 gdb/gdbserver breakpoint had been planted on top of a permanent
2670 breakpoint. The PC has been adjusted by
2671 check_stopped_by_breakpoint to point at the breakpoint address.
2672 Advance the PC manually past the breakpoint, otherwise the
2673 program would keep trapping the permanent breakpoint forever. */
2674 if (!ptid_equal (step_over_bkpt, null_ptid)
15c66dd6 2675 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
8090aef2 2676 {
9beb7c4e 2677 unsigned int increment_pc = the_low_target.breakpoint_len;
8090aef2
PA
2678
2679 if (debug_threads)
2680 {
2681 debug_printf ("step-over for %s executed software breakpoint\n",
2682 target_pid_to_str (ptid_of (current_thread)));
2683 }
2684
2685 if (increment_pc != 0)
2686 {
2687 struct regcache *regcache
2688 = get_thread_regcache (current_thread, 1);
2689
2690 event_child->stop_pc += increment_pc;
2691 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2692
2693 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
15c66dd6 2694 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
2695 }
2696 }
2697
6bf5e0ba
PA
2698 /* If this event was not handled before, and is not a SIGTRAP, we
2699 report it. SIGILL and SIGSEGV are also treated as traps in case
2700 a breakpoint is inserted at the current PC. If this target does
2701 not support internal breakpoints at all, we also report the
2702 SIGTRAP without further processing; it's of no concern to us. */
2703 maybe_internal_trap
2704 = (supports_breakpoints ()
2705 && (WSTOPSIG (w) == SIGTRAP
2706 || ((WSTOPSIG (w) == SIGILL
2707 || WSTOPSIG (w) == SIGSEGV)
2708 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2709
2710 if (maybe_internal_trap)
2711 {
2712 /* Handle anything that requires bookkeeping before deciding to
2713 report the event or continue waiting. */
2714
2715 /* First check if we can explain the SIGTRAP with an internal
2716 breakpoint, or if we should possibly report the event to GDB.
2717 Do this before anything that may remove or insert a
2718 breakpoint. */
2719 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2720
2721 /* We have a SIGTRAP, possibly a step-over dance has just
2722 finished. If so, tweak the state machine accordingly,
2723 reinsert breakpoints and delete any reinsert (software
2724 single-step) breakpoints. */
2725 step_over_finished = finish_step_over (event_child);
2726
2727 /* Now invoke the callbacks of any internal breakpoints there. */
2728 check_breakpoints (event_child->stop_pc);
2729
219f2f23
PA
2730 /* Handle tracepoint data collecting. This may overflow the
2731 trace buffer, and cause a tracing stop, removing
2732 breakpoints. */
2733 trace_event = handle_tracepoints (event_child);
2734
6bf5e0ba
PA
2735 if (bp_explains_trap)
2736 {
2737 /* If we stepped or ran into an internal breakpoint, we've
2738 already handled it. So next time we resume (from this
2739 PC), we should step over it. */
2740 if (debug_threads)
87ce2a04 2741 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 2742
8b07ae33
PA
2743 if (breakpoint_here (event_child->stop_pc))
2744 event_child->need_step_over = 1;
6bf5e0ba
PA
2745 }
2746 }
2747 else
2748 {
2749 /* We have some other signal, possibly a step-over dance was in
2750 progress, and it should be cancelled too. */
2751 step_over_finished = finish_step_over (event_child);
fa593d66
PA
2752 }
2753
2754 /* We have all the data we need. Either report the event to GDB, or
2755 resume threads and keep waiting for more. */
2756
2757 /* If we're collecting a fast tracepoint, finish the collection and
2758 move out of the jump pad before delivering a signal. See
2759 linux_stabilize_threads. */
2760
2761 if (WIFSTOPPED (w)
2762 && WSTOPSIG (w) != SIGTRAP
2763 && supports_fast_tracepoints ()
58b4daa5 2764 && agent_loaded_p ())
fa593d66
PA
2765 {
2766 if (debug_threads)
87ce2a04
DE
2767 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2768 "to defer or adjust it.\n",
0bfdf32f 2769 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
2770
2771 /* Allow debugging the jump pad itself. */
0bfdf32f 2772 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
2773 && maybe_move_out_of_jump_pad (event_child, &w))
2774 {
2775 enqueue_one_deferred_signal (event_child, &w);
2776
2777 if (debug_threads)
87ce2a04 2778 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 2779 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
2780
2781 linux_resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
2782
2783 return ignore_event (ourstatus);
fa593d66
PA
2784 }
2785 }
219f2f23 2786
fa593d66
PA
2787 if (event_child->collecting_fast_tracepoint)
2788 {
2789 if (debug_threads)
87ce2a04
DE
2790 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2791 "Check if we're already there.\n",
0bfdf32f 2792 lwpid_of (current_thread),
87ce2a04 2793 event_child->collecting_fast_tracepoint);
fa593d66
PA
2794
2795 trace_event = 1;
2796
2797 event_child->collecting_fast_tracepoint
2798 = linux_fast_tracepoint_collecting (event_child, NULL);
2799
2800 if (event_child->collecting_fast_tracepoint != 1)
2801 {
2802 /* No longer need this breakpoint. */
2803 if (event_child->exit_jump_pad_bkpt != NULL)
2804 {
2805 if (debug_threads)
87ce2a04
DE
2806 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2807 "stopping all threads momentarily.\n");
fa593d66
PA
2808
2809 /* Other running threads could hit this breakpoint.
2810 We don't handle moribund locations like GDB does,
2811 instead we always pause all threads when removing
2812 breakpoints, so that any step-over or
2813 decr_pc_after_break adjustment is always taken
2814 care of while the breakpoint is still
2815 inserted. */
2816 stop_all_lwps (1, event_child);
fa593d66
PA
2817
2818 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2819 event_child->exit_jump_pad_bkpt = NULL;
2820
2821 unstop_all_lwps (1, event_child);
2822
2823 gdb_assert (event_child->suspended >= 0);
2824 }
2825 }
2826
2827 if (event_child->collecting_fast_tracepoint == 0)
2828 {
2829 if (debug_threads)
87ce2a04
DE
2830 debug_printf ("fast tracepoint finished "
2831 "collecting successfully.\n");
fa593d66
PA
2832
2833 /* We may have a deferred signal to report. */
2834 if (dequeue_one_deferred_signal (event_child, &w))
2835 {
2836 if (debug_threads)
87ce2a04 2837 debug_printf ("dequeued one signal.\n");
fa593d66 2838 }
3c11dd79 2839 else
fa593d66 2840 {
3c11dd79 2841 if (debug_threads)
87ce2a04 2842 debug_printf ("no deferred signals.\n");
fa593d66
PA
2843
2844 if (stabilizing_threads)
2845 {
2846 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 2847 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
2848
2849 if (debug_threads)
2850 {
2851 debug_printf ("linux_wait_1 ret = %s, stopped "
2852 "while stabilizing threads\n",
0bfdf32f 2853 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
2854 debug_exit ();
2855 }
2856
0bfdf32f 2857 return ptid_of (current_thread);
fa593d66
PA
2858 }
2859 }
2860 }
6bf5e0ba
PA
2861 }
2862
e471f25b
PA
2863 /* Check whether GDB would be interested in this event. */
2864
2865 /* If GDB is not interested in this signal, don't stop other
2866 threads, and don't report it to GDB. Just resume the inferior
2867 right away. We do this for threading-related signals as well as
2868 any that GDB specifically requested we ignore. But never ignore
2869 SIGSTOP if we sent it ourselves, and do not ignore signals when
2870 stepping - they may require special handling to skip the signal
c9587f88
AT
2871 handler. Also never ignore signals that could be caused by a
2872 breakpoint. */
e471f25b
PA
2873 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2874 thread library? */
2875 if (WIFSTOPPED (w)
0bfdf32f 2876 && current_thread->last_resume_kind != resume_step
e471f25b 2877 && (
1a981360 2878#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 2879 (current_process ()->priv->thread_db != NULL
e471f25b
PA
2880 && (WSTOPSIG (w) == __SIGRTMIN
2881 || WSTOPSIG (w) == __SIGRTMIN + 1))
2882 ||
2883#endif
2ea28649 2884 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 2885 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
2886 && current_thread->last_resume_kind == resume_stop)
2887 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
2888 {
2889 siginfo_t info, *info_p;
2890
2891 if (debug_threads)
87ce2a04 2892 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 2893 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 2894
0bfdf32f 2895 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2896 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
2897 info_p = &info;
2898 else
2899 info_p = NULL;
2900 linux_resume_one_lwp (event_child, event_child->stepping,
2901 WSTOPSIG (w), info_p);
582511be 2902 return ignore_event (ourstatus);
e471f25b
PA
2903 }
2904
c2d6af84
PA
2905 /* Note that all addresses are always "out of the step range" when
2906 there's no range to begin with. */
2907 in_step_range = lwp_in_step_range (event_child);
2908
2909 /* If GDB wanted this thread to single step, and the thread is out
2910 of the step range, we always want to report the SIGTRAP, and let
2911 GDB handle it. Watchpoints should always be reported. So should
2912 signals we can't explain. A SIGTRAP we can't explain could be a
2913 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2914 do, we're be able to handle GDB breakpoints on top of internal
2915 breakpoints, by handling the internal breakpoint and still
2916 reporting the event to GDB. If we don't, we're out of luck, GDB
2917 won't see the breakpoint hit. */
6bf5e0ba 2918 report_to_gdb = (!maybe_internal_trap
0bfdf32f 2919 || (current_thread->last_resume_kind == resume_step
c2d6af84 2920 && !in_step_range)
15c66dd6 2921 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
c2d6af84 2922 || (!step_over_finished && !in_step_range
493e2a69 2923 && !bp_explains_trap && !trace_event)
9f3a5c85 2924 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5
SS
2925 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2926 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2927
2928 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
2929
2930 /* We found no reason GDB would want us to stop. We either hit one
2931 of our own breakpoints, or finished an internal step GDB
2932 shouldn't know about. */
2933 if (!report_to_gdb)
2934 {
2935 if (debug_threads)
2936 {
2937 if (bp_explains_trap)
87ce2a04 2938 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 2939 if (step_over_finished)
87ce2a04 2940 debug_printf ("Step-over finished.\n");
219f2f23 2941 if (trace_event)
87ce2a04 2942 debug_printf ("Tracepoint event.\n");
c2d6af84 2943 if (lwp_in_step_range (event_child))
87ce2a04
DE
2944 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2945 paddress (event_child->stop_pc),
2946 paddress (event_child->step_range_start),
2947 paddress (event_child->step_range_end));
6bf5e0ba
PA
2948 }
2949
2950 /* We're not reporting this breakpoint to GDB, so apply the
2951 decr_pc_after_break adjustment to the inferior's regcache
2952 ourselves. */
2953
2954 if (the_low_target.set_pc != NULL)
2955 {
2956 struct regcache *regcache
0bfdf32f 2957 = get_thread_regcache (current_thread, 1);
6bf5e0ba
PA
2958 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2959 }
2960
7984d532
PA
2961 /* We may have finished stepping over a breakpoint. If so,
2962 we've stopped and suspended all LWPs momentarily except the
2963 stepping one. This is where we resume them all again. We're
2964 going to keep waiting, so use proceed, which handles stepping
2965 over the next breakpoint. */
6bf5e0ba 2966 if (debug_threads)
87ce2a04 2967 debug_printf ("proceeding all threads.\n");
7984d532
PA
2968
2969 if (step_over_finished)
2970 unsuspend_all_lwps (event_child);
2971
6bf5e0ba 2972 proceed_all_lwps ();
582511be 2973 return ignore_event (ourstatus);
6bf5e0ba
PA
2974 }
2975
2976 if (debug_threads)
2977 {
0bfdf32f 2978 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
2979 {
2980 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 2981 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 2982 else if (!lwp_in_step_range (event_child))
87ce2a04 2983 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 2984 }
15c66dd6 2985 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 2986 debug_printf ("Stopped by watchpoint.\n");
582511be 2987 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 2988 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 2989 if (debug_threads)
87ce2a04 2990 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
2991 }
2992
2993 /* Alright, we're going to report a stop. */
2994
582511be 2995 if (!stabilizing_threads)
6bf5e0ba
PA
2996 {
2997 /* In all-stop, stop all threads. */
582511be
PA
2998 if (!non_stop)
2999 stop_all_lwps (0, NULL);
6bf5e0ba
PA
3000
3001 /* If we're not waiting for a specific LWP, choose an event LWP
3002 from among those that have had events. Giving equal priority
3003 to all LWPs that have had events helps prevent
3004 starvation. */
3005 if (ptid_equal (ptid, minus_one_ptid))
3006 {
3007 event_child->status_pending_p = 1;
3008 event_child->status_pending = w;
3009
3010 select_event_lwp (&event_child);
3011
0bfdf32f
GB
3012 /* current_thread and event_child must stay in sync. */
3013 current_thread = get_lwp_thread (event_child);
ee1e2d4f 3014
6bf5e0ba
PA
3015 event_child->status_pending_p = 0;
3016 w = event_child->status_pending;
3017 }
3018
c03e6ccc 3019 if (step_over_finished)
582511be
PA
3020 {
3021 if (!non_stop)
3022 {
3023 /* If we were doing a step-over, all other threads but
3024 the stepping one had been paused in start_step_over,
3025 with their suspend counts incremented. We don't want
3026 to do a full unstop/unpause, because we're in
3027 all-stop mode (so we want threads stopped), but we
3028 still need to unsuspend the other threads, to
3029 decrement their `suspended' count back. */
3030 unsuspend_all_lwps (event_child);
3031 }
3032 else
3033 {
3034 /* If we just finished a step-over, then all threads had
3035 been momentarily paused. In all-stop, that's fine,
3036 we want threads stopped by now anyway. In non-stop,
3037 we need to re-resume threads that GDB wanted to be
3038 running. */
3039 unstop_all_lwps (1, event_child);
3040 }
3041 }
c03e6ccc 3042
fa593d66 3043 /* Stabilize threads (move out of jump pads). */
582511be
PA
3044 if (!non_stop)
3045 stabilize_threads ();
6bf5e0ba
PA
3046 }
3047 else
3048 {
3049 /* If we just finished a step-over, then all threads had been
3050 momentarily paused. In all-stop, that's fine, we want
3051 threads stopped by now anyway. In non-stop, we need to
3052 re-resume threads that GDB wanted to be running. */
3053 if (step_over_finished)
7984d532 3054 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3055 }
3056
5b1c542e 3057 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3058
582511be 3059 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3060 it was a software breakpoint, and the client doesn't know we can
3061 adjust the breakpoint ourselves. */
3062 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3063 && !swbreak_feature)
582511be
PA
3064 {
3065 int decr_pc = the_low_target.decr_pc_after_break;
3066
3067 if (decr_pc != 0)
3068 {
3069 struct regcache *regcache
3070 = get_thread_regcache (current_thread, 1);
3071 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3072 }
3073 }
3074
0bfdf32f 3075 if (current_thread->last_resume_kind == resume_stop
8336d594 3076 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3077 {
3078 /* A thread that has been requested to stop by GDB with vCont;t,
3079 and it stopped cleanly, so report as SIG0. The use of
3080 SIGSTOP is an implementation detail. */
a493e3e2 3081 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3082 }
0bfdf32f 3083 else if (current_thread->last_resume_kind == resume_stop
8336d594 3084 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3085 {
3086 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3087 but, it stopped for other reasons. */
2ea28649 3088 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3089 }
3090 else
3091 {
2ea28649 3092 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3093 }
3094
d50171e4
PA
3095 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3096
bd99dc85 3097 if (debug_threads)
87ce2a04
DE
3098 {
3099 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
0bfdf32f 3100 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3101 ourstatus->kind, ourstatus->value.sig);
3102 debug_exit ();
3103 }
bd99dc85 3104
0bfdf32f 3105 return ptid_of (current_thread);
bd99dc85
PA
3106}
3107
3108/* Get rid of any pending event in the pipe. */
3109static void
3110async_file_flush (void)
3111{
3112 int ret;
3113 char buf;
3114
3115 do
3116 ret = read (linux_event_pipe[0], &buf, 1);
3117 while (ret >= 0 || (ret == -1 && errno == EINTR));
3118}
3119
3120/* Put something in the pipe, so the event loop wakes up. */
3121static void
3122async_file_mark (void)
3123{
3124 int ret;
3125
3126 async_file_flush ();
3127
3128 do
3129 ret = write (linux_event_pipe[1], "+", 1);
3130 while (ret == 0 || (ret == -1 && errno == EINTR));
3131
3132 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3133 be awakened anyway. */
3134}
3135
95954743
PA
3136static ptid_t
3137linux_wait (ptid_t ptid,
3138 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 3139{
95954743 3140 ptid_t event_ptid;
bd99dc85 3141
bd99dc85
PA
3142 /* Flush the async file first. */
3143 if (target_is_async_p ())
3144 async_file_flush ();
3145
582511be
PA
3146 do
3147 {
3148 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3149 }
3150 while ((target_options & TARGET_WNOHANG) == 0
3151 && ptid_equal (event_ptid, null_ptid)
3152 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3153
3154 /* If at least one stop was reported, there may be more. A single
3155 SIGCHLD can signal more than one child stop. */
3156 if (target_is_async_p ()
3157 && (target_options & TARGET_WNOHANG) != 0
95954743 3158 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
3159 async_file_mark ();
3160
3161 return event_ptid;
da6d8c04
DJ
3162}
3163
c5f62d5f 3164/* Send a signal to an LWP. */
fd500816
DJ
3165
3166static int
a1928bad 3167kill_lwp (unsigned long lwpid, int signo)
fd500816 3168{
c5f62d5f
DE
3169 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3170 fails, then we are not using nptl threads and we should be using kill. */
fd500816 3171
c5f62d5f
DE
3172#ifdef __NR_tkill
3173 {
3174 static int tkill_failed;
fd500816 3175
c5f62d5f
DE
3176 if (!tkill_failed)
3177 {
3178 int ret;
3179
3180 errno = 0;
3181 ret = syscall (__NR_tkill, lwpid, signo);
3182 if (errno != ENOSYS)
3183 return ret;
3184 tkill_failed = 1;
3185 }
3186 }
fd500816
DJ
3187#endif
3188
3189 return kill (lwpid, signo);
3190}
3191
964e4306
PA
3192void
3193linux_stop_lwp (struct lwp_info *lwp)
3194{
3195 send_sigstop (lwp);
3196}
3197
0d62e5e8 3198static void
02fc4de7 3199send_sigstop (struct lwp_info *lwp)
0d62e5e8 3200{
bd99dc85 3201 int pid;
0d62e5e8 3202
d86d4aaf 3203 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3204
0d62e5e8
DJ
3205 /* If we already have a pending stop signal for this process, don't
3206 send another. */
54a0b537 3207 if (lwp->stop_expected)
0d62e5e8 3208 {
ae13219e 3209 if (debug_threads)
87ce2a04 3210 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3211
0d62e5e8
DJ
3212 return;
3213 }
3214
3215 if (debug_threads)
87ce2a04 3216 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3217
d50171e4 3218 lwp->stop_expected = 1;
bd99dc85 3219 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3220}
3221
7984d532
PA
3222static int
3223send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7 3224{
d86d4aaf
DE
3225 struct thread_info *thread = (struct thread_info *) entry;
3226 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3227
7984d532
PA
3228 /* Ignore EXCEPT. */
3229 if (lwp == except)
3230 return 0;
3231
02fc4de7 3232 if (lwp->stopped)
7984d532 3233 return 0;
02fc4de7
PA
3234
3235 send_sigstop (lwp);
7984d532
PA
3236 return 0;
3237}
3238
3239/* Increment the suspend count of an LWP, and stop it, if not stopped
3240 yet. */
3241static int
3242suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3243 void *except)
3244{
d86d4aaf
DE
3245 struct thread_info *thread = (struct thread_info *) entry;
3246 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3247
3248 /* Ignore EXCEPT. */
3249 if (lwp == except)
3250 return 0;
3251
3252 lwp->suspended++;
3253
3254 return send_sigstop_callback (entry, except);
02fc4de7
PA
3255}
3256
95954743
PA
3257static void
3258mark_lwp_dead (struct lwp_info *lwp, int wstat)
3259{
3260 /* It's dead, really. */
3261 lwp->dead = 1;
3262
3263 /* Store the exit status for later. */
3264 lwp->status_pending_p = 1;
3265 lwp->status_pending = wstat;
3266
95954743
PA
3267 /* Prevent trying to stop it. */
3268 lwp->stopped = 1;
3269
3270 /* No further stops are expected from a dead lwp. */
3271 lwp->stop_expected = 0;
3272}
3273
fa96cb38
PA
3274/* Wait for all children to stop for the SIGSTOPs we just queued. */
3275
0d62e5e8 3276static void
fa96cb38 3277wait_for_sigstop (void)
0d62e5e8 3278{
0bfdf32f 3279 struct thread_info *saved_thread;
95954743 3280 ptid_t saved_tid;
fa96cb38
PA
3281 int wstat;
3282 int ret;
0d62e5e8 3283
0bfdf32f
GB
3284 saved_thread = current_thread;
3285 if (saved_thread != NULL)
3286 saved_tid = saved_thread->entry.id;
bd99dc85 3287 else
95954743 3288 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3289
d50171e4 3290 if (debug_threads)
fa96cb38 3291 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3292
fa96cb38
PA
3293 /* Passing NULL_PTID as filter indicates we want all events to be
3294 left pending. Eventually this returns when there are no
3295 unwaited-for children left. */
3296 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3297 &wstat, __WALL);
3298 gdb_assert (ret == -1);
0d62e5e8 3299
0bfdf32f
GB
3300 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3301 current_thread = saved_thread;
0d62e5e8
DJ
3302 else
3303 {
3304 if (debug_threads)
87ce2a04 3305 debug_printf ("Previously current thread died.\n");
0d62e5e8 3306
bd99dc85
PA
3307 if (non_stop)
3308 {
3309 /* We can't change the current inferior behind GDB's back,
3310 otherwise, a subsequent command may apply to the wrong
3311 process. */
0bfdf32f 3312 current_thread = NULL;
bd99dc85
PA
3313 }
3314 else
3315 {
3316 /* Set a valid thread as current. */
0bfdf32f 3317 set_desired_thread (0);
bd99dc85 3318 }
0d62e5e8
DJ
3319 }
3320}
3321
fa593d66
PA
3322/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3323 move it out, because we need to report the stop event to GDB. For
3324 example, if the user puts a breakpoint in the jump pad, it's
3325 because she wants to debug it. */
3326
3327static int
3328stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3329{
d86d4aaf
DE
3330 struct thread_info *thread = (struct thread_info *) entry;
3331 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3332
3333 gdb_assert (lwp->suspended == 0);
3334 gdb_assert (lwp->stopped);
3335
3336 /* Allow debugging the jump pad, gdb_collect, etc.. */
3337 return (supports_fast_tracepoints ()
58b4daa5 3338 && agent_loaded_p ()
fa593d66 3339 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3340 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3341 || thread->last_resume_kind == resume_step)
3342 && linux_fast_tracepoint_collecting (lwp, NULL));
3343}
3344
3345static void
3346move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3347{
d86d4aaf
DE
3348 struct thread_info *thread = (struct thread_info *) entry;
3349 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3350 int *wstat;
3351
3352 gdb_assert (lwp->suspended == 0);
3353 gdb_assert (lwp->stopped);
3354
3355 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3356
3357 /* Allow debugging the jump pad, gdb_collect, etc. */
3358 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3359 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3360 && thread->last_resume_kind != resume_step
3361 && maybe_move_out_of_jump_pad (lwp, wstat))
3362 {
3363 if (debug_threads)
87ce2a04 3364 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 3365 lwpid_of (thread));
fa593d66
PA
3366
3367 if (wstat)
3368 {
3369 lwp->status_pending_p = 0;
3370 enqueue_one_deferred_signal (lwp, wstat);
3371
3372 if (debug_threads)
87ce2a04
DE
3373 debug_printf ("Signal %d for LWP %ld deferred "
3374 "(in jump pad)\n",
d86d4aaf 3375 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3376 }
3377
3378 linux_resume_one_lwp (lwp, 0, 0, NULL);
3379 }
3380 else
3381 lwp->suspended++;
3382}
3383
3384static int
3385lwp_running (struct inferior_list_entry *entry, void *data)
3386{
d86d4aaf
DE
3387 struct thread_info *thread = (struct thread_info *) entry;
3388 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3389
3390 if (lwp->dead)
3391 return 0;
3392 if (lwp->stopped)
3393 return 0;
3394 return 1;
3395}
3396
7984d532
PA
3397/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3398 If SUSPEND, then also increase the suspend count of every LWP,
3399 except EXCEPT. */
3400
0d62e5e8 3401static void
7984d532 3402stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8 3403{
bde24c0a
PA
3404 /* Should not be called recursively. */
3405 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3406
87ce2a04
DE
3407 if (debug_threads)
3408 {
3409 debug_enter ();
3410 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3411 suspend ? "stop-and-suspend" : "stop",
3412 except != NULL
d86d4aaf 3413 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
3414 : "none");
3415 }
3416
bde24c0a
PA
3417 stopping_threads = (suspend
3418 ? STOPPING_AND_SUSPENDING_THREADS
3419 : STOPPING_THREADS);
7984d532
PA
3420
3421 if (suspend)
d86d4aaf 3422 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
7984d532 3423 else
d86d4aaf 3424 find_inferior (&all_threads, send_sigstop_callback, except);
fa96cb38 3425 wait_for_sigstop ();
bde24c0a 3426 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
3427
3428 if (debug_threads)
3429 {
3430 debug_printf ("stop_all_lwps done, setting stopping_threads "
3431 "back to !stopping\n");
3432 debug_exit ();
3433 }
0d62e5e8
DJ
3434}
3435
23f238d3
PA
3436/* Resume execution of LWP. If STEP is nonzero, single-step it. If
3437 SIGNAL is nonzero, give it that signal. */
da6d8c04 3438
ce3a066d 3439static void
23f238d3
PA
3440linux_resume_one_lwp_throw (struct lwp_info *lwp,
3441 int step, int signal, siginfo_t *info)
da6d8c04 3442{
d86d4aaf 3443 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 3444 struct thread_info *saved_thread;
fa593d66 3445 int fast_tp_collecting;
0d62e5e8 3446
54a0b537 3447 if (lwp->stopped == 0)
0d62e5e8
DJ
3448 return;
3449
fa593d66
PA
3450 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3451
3452 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3453
219f2f23
PA
3454 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3455 user used the "jump" command, or "set $pc = foo"). */
3456 if (lwp->stop_pc != get_pc (lwp))
3457 {
3458 /* Collecting 'while-stepping' actions doesn't make sense
3459 anymore. */
d86d4aaf 3460 release_while_stepping_state_list (thread);
219f2f23
PA
3461 }
3462
0d62e5e8
DJ
3463 /* If we have pending signals or status, and a new signal, enqueue the
3464 signal. Also enqueue the signal if we are waiting to reinsert a
3465 breakpoint; it will be picked up again below. */
3466 if (signal != 0
fa593d66
PA
3467 && (lwp->status_pending_p
3468 || lwp->pending_signals != NULL
3469 || lwp->bp_reinsert != 0
3470 || fast_tp_collecting))
0d62e5e8
DJ
3471 {
3472 struct pending_signals *p_sig;
bca929d3 3473 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 3474 p_sig->prev = lwp->pending_signals;
0d62e5e8 3475 p_sig->signal = signal;
32ca6d61
DJ
3476 if (info == NULL)
3477 memset (&p_sig->info, 0, sizeof (siginfo_t));
3478 else
3479 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 3480 lwp->pending_signals = p_sig;
0d62e5e8
DJ
3481 }
3482
d50171e4
PA
3483 if (lwp->status_pending_p)
3484 {
3485 if (debug_threads)
87ce2a04
DE
3486 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3487 " has pending status\n",
d86d4aaf 3488 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3489 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
3490 return;
3491 }
0d62e5e8 3492
0bfdf32f
GB
3493 saved_thread = current_thread;
3494 current_thread = thread;
0d62e5e8
DJ
3495
3496 if (debug_threads)
87ce2a04 3497 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
d86d4aaf 3498 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3499 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
3500
3501 /* This bit needs some thinking about. If we get a signal that
3502 we must report while a single-step reinsert is still pending,
3503 we often end up resuming the thread. It might be better to
3504 (ew) allow a stack of pending events; then we could be sure that
3505 the reinsert happened right away and not lose any signals.
3506
3507 Making this stack would also shrink the window in which breakpoints are
54a0b537 3508 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
3509 complete correctness, so it won't solve that problem. It may be
3510 worthwhile just to solve this one, however. */
54a0b537 3511 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
3512 {
3513 if (debug_threads)
87ce2a04
DE
3514 debug_printf (" pending reinsert at 0x%s\n",
3515 paddress (lwp->bp_reinsert));
d50171e4 3516
85e00e85 3517 if (can_hardware_single_step ())
d50171e4 3518 {
fa593d66
PA
3519 if (fast_tp_collecting == 0)
3520 {
3521 if (step == 0)
3522 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3523 if (lwp->suspended)
3524 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3525 lwp->suspended);
3526 }
d50171e4
PA
3527
3528 step = 1;
3529 }
0d62e5e8
DJ
3530
3531 /* Postpone any pending signal. It was enqueued above. */
3532 signal = 0;
3533 }
3534
fa593d66
PA
3535 if (fast_tp_collecting == 1)
3536 {
3537 if (debug_threads)
87ce2a04
DE
3538 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3539 " (exit-jump-pad-bkpt)\n",
d86d4aaf 3540 lwpid_of (thread));
fa593d66
PA
3541
3542 /* Postpone any pending signal. It was enqueued above. */
3543 signal = 0;
3544 }
3545 else if (fast_tp_collecting == 2)
3546 {
3547 if (debug_threads)
87ce2a04
DE
3548 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3549 " single-stepping\n",
d86d4aaf 3550 lwpid_of (thread));
fa593d66
PA
3551
3552 if (can_hardware_single_step ())
3553 step = 1;
3554 else
38e08fca
GB
3555 {
3556 internal_error (__FILE__, __LINE__,
3557 "moving out of jump pad single-stepping"
3558 " not implemented on this target");
3559 }
fa593d66
PA
3560
3561 /* Postpone any pending signal. It was enqueued above. */
3562 signal = 0;
3563 }
3564
219f2f23
PA
3565 /* If we have while-stepping actions in this thread set it stepping.
3566 If we have a signal to deliver, it may or may not be set to
3567 SIG_IGN, we don't know. Assume so, and allow collecting
3568 while-stepping into a signal handler. A possible smart thing to
3569 do would be to set an internal breakpoint at the signal return
3570 address, continue, and carry on catching this while-stepping
3571 action only when that breakpoint is hit. A future
3572 enhancement. */
d86d4aaf 3573 if (thread->while_stepping != NULL
219f2f23
PA
3574 && can_hardware_single_step ())
3575 {
3576 if (debug_threads)
87ce2a04 3577 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 3578 lwpid_of (thread));
219f2f23
PA
3579 step = 1;
3580 }
3581
582511be 3582 if (the_low_target.get_pc != NULL)
0d62e5e8 3583 {
0bfdf32f 3584 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be
PA
3585
3586 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3587
3588 if (debug_threads)
3589 {
3590 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3591 (long) lwp->stop_pc);
3592 }
0d62e5e8
DJ
3593 }
3594
fa593d66
PA
3595 /* If we have pending signals, consume one unless we are trying to
3596 reinsert a breakpoint or we're trying to finish a fast tracepoint
3597 collect. */
3598 if (lwp->pending_signals != NULL
3599 && lwp->bp_reinsert == 0
3600 && fast_tp_collecting == 0)
0d62e5e8
DJ
3601 {
3602 struct pending_signals **p_sig;
3603
54a0b537 3604 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
3605 while ((*p_sig)->prev != NULL)
3606 p_sig = &(*p_sig)->prev;
3607
3608 signal = (*p_sig)->signal;
32ca6d61 3609 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 3610 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 3611 &(*p_sig)->info);
32ca6d61 3612
0d62e5e8
DJ
3613 free (*p_sig);
3614 *p_sig = NULL;
3615 }
3616
aa5ca48f
DE
3617 if (the_low_target.prepare_to_resume != NULL)
3618 the_low_target.prepare_to_resume (lwp);
3619
d86d4aaf 3620 regcache_invalidate_thread (thread);
da6d8c04 3621 errno = 0;
54a0b537 3622 lwp->stepping = step;
d86d4aaf 3623 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
b8e1b30e 3624 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
3625 /* Coerce to a uintptr_t first to avoid potential gcc warning
3626 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 3627 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 3628
0bfdf32f 3629 current_thread = saved_thread;
da6d8c04 3630 if (errno)
23f238d3
PA
3631 perror_with_name ("resuming thread");
3632
3633 /* Successfully resumed. Clear state that no longer makes sense,
3634 and mark the LWP as running. Must not do this before resuming
3635 otherwise if that fails other code will be confused. E.g., we'd
3636 later try to stop the LWP and hang forever waiting for a stop
3637 status. Note that we must not throw after this is cleared,
3638 otherwise handle_zombie_lwp_error would get confused. */
3639 lwp->stopped = 0;
3640 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3641}
3642
3643/* Called when we try to resume a stopped LWP and that errors out. If
3644 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3645 or about to become), discard the error, clear any pending status
3646 the LWP may have, and return true (we'll collect the exit status
3647 soon enough). Otherwise, return false. */
3648
3649static int
3650check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3651{
3652 struct thread_info *thread = get_lwp_thread (lp);
3653
3654 /* If we get an error after resuming the LWP successfully, we'd
3655 confuse !T state for the LWP being gone. */
3656 gdb_assert (lp->stopped);
3657
3658 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3659 because even if ptrace failed with ESRCH, the tracee may be "not
3660 yet fully dead", but already refusing ptrace requests. In that
3661 case the tracee has 'R (Running)' state for a little bit
3662 (observed in Linux 3.18). See also the note on ESRCH in the
3663 ptrace(2) man page. Instead, check whether the LWP has any state
3664 other than ptrace-stopped. */
3665
3666 /* Don't assume anything if /proc/PID/status can't be read. */
3667 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 3668 {
23f238d3
PA
3669 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3670 lp->status_pending_p = 0;
3671 return 1;
3672 }
3673 return 0;
3674}
3675
3676/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3677 disappears while we try to resume it. */
3221518c 3678
23f238d3
PA
3679static void
3680linux_resume_one_lwp (struct lwp_info *lwp,
3681 int step, int signal, siginfo_t *info)
3682{
3683 TRY
3684 {
3685 linux_resume_one_lwp_throw (lwp, step, signal, info);
3686 }
3687 CATCH (ex, RETURN_MASK_ERROR)
3688 {
3689 if (!check_ptrace_stopped_lwp_gone (lwp))
3690 throw_exception (ex);
3221518c 3691 }
23f238d3 3692 END_CATCH
da6d8c04
DJ
3693}
3694
2bd7c093
PA
3695struct thread_resume_array
3696{
3697 struct thread_resume *resume;
3698 size_t n;
3699};
64386c31 3700
ebcf782c
DE
3701/* This function is called once per thread via find_inferior.
3702 ARG is a pointer to a thread_resume_array struct.
3703 We look up the thread specified by ENTRY in ARG, and mark the thread
3704 with a pointer to the appropriate resume request.
5544ad89
DJ
3705
3706 This algorithm is O(threads * resume elements), but resume elements
3707 is small (and will remain small at least until GDB supports thread
3708 suspension). */
ebcf782c 3709
2bd7c093
PA
3710static int
3711linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 3712{
d86d4aaf
DE
3713 struct thread_info *thread = (struct thread_info *) entry;
3714 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 3715 int ndx;
2bd7c093 3716 struct thread_resume_array *r;
64386c31 3717
2bd7c093 3718 r = arg;
64386c31 3719
2bd7c093 3720 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
3721 {
3722 ptid_t ptid = r->resume[ndx].thread;
3723 if (ptid_equal (ptid, minus_one_ptid)
3724 || ptid_equal (ptid, entry->id)
0c9070b3
YQ
3725 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3726 of PID'. */
d86d4aaf 3727 || (ptid_get_pid (ptid) == pid_of (thread)
0c9070b3
YQ
3728 && (ptid_is_pid (ptid)
3729 || ptid_get_lwp (ptid) == -1)))
95954743 3730 {
d50171e4 3731 if (r->resume[ndx].kind == resume_stop
8336d594 3732 && thread->last_resume_kind == resume_stop)
d50171e4
PA
3733 {
3734 if (debug_threads)
87ce2a04
DE
3735 debug_printf ("already %s LWP %ld at GDB's request\n",
3736 (thread->last_status.kind
3737 == TARGET_WAITKIND_STOPPED)
3738 ? "stopped"
3739 : "stopping",
d86d4aaf 3740 lwpid_of (thread));
d50171e4
PA
3741
3742 continue;
3743 }
3744
95954743 3745 lwp->resume = &r->resume[ndx];
8336d594 3746 thread->last_resume_kind = lwp->resume->kind;
fa593d66 3747
c2d6af84
PA
3748 lwp->step_range_start = lwp->resume->step_range_start;
3749 lwp->step_range_end = lwp->resume->step_range_end;
3750
fa593d66
PA
3751 /* If we had a deferred signal to report, dequeue one now.
3752 This can happen if LWP gets more than one signal while
3753 trying to get out of a jump pad. */
3754 if (lwp->stopped
3755 && !lwp->status_pending_p
3756 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3757 {
3758 lwp->status_pending_p = 1;
3759
3760 if (debug_threads)
87ce2a04
DE
3761 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3762 "leaving status pending.\n",
d86d4aaf
DE
3763 WSTOPSIG (lwp->status_pending),
3764 lwpid_of (thread));
fa593d66
PA
3765 }
3766
95954743
PA
3767 return 0;
3768 }
3769 }
2bd7c093
PA
3770
3771 /* No resume action for this thread. */
3772 lwp->resume = NULL;
64386c31 3773
2bd7c093 3774 return 0;
5544ad89
DJ
3775}
3776
20ad9378
DE
3777/* find_inferior callback for linux_resume.
3778 Set *FLAG_P if this lwp has an interesting status pending. */
5544ad89 3779
bd99dc85
PA
3780static int
3781resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 3782{
d86d4aaf
DE
3783 struct thread_info *thread = (struct thread_info *) entry;
3784 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 3785
bd99dc85
PA
3786 /* LWPs which will not be resumed are not interesting, because
3787 we might not wait for them next time through linux_wait. */
2bd7c093 3788 if (lwp->resume == NULL)
bd99dc85 3789 return 0;
64386c31 3790
582511be 3791 if (thread_still_has_status_pending_p (thread))
d50171e4
PA
3792 * (int *) flag_p = 1;
3793
3794 return 0;
3795}
3796
3797/* Return 1 if this lwp that GDB wants running is stopped at an
3798 internal breakpoint that we need to step over. It assumes that any
3799 required STOP_PC adjustment has already been propagated to the
3800 inferior's regcache. */
3801
3802static int
3803need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3804{
d86d4aaf
DE
3805 struct thread_info *thread = (struct thread_info *) entry;
3806 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 3807 struct thread_info *saved_thread;
d50171e4
PA
3808 CORE_ADDR pc;
3809
3810 /* LWPs which will not be resumed are not interesting, because we
3811 might not wait for them next time through linux_wait. */
3812
3813 if (!lwp->stopped)
3814 {
3815 if (debug_threads)
87ce2a04 3816 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 3817 lwpid_of (thread));
d50171e4
PA
3818 return 0;
3819 }
3820
8336d594 3821 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
3822 {
3823 if (debug_threads)
87ce2a04
DE
3824 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3825 " stopped\n",
d86d4aaf 3826 lwpid_of (thread));
d50171e4
PA
3827 return 0;
3828 }
3829
7984d532
PA
3830 gdb_assert (lwp->suspended >= 0);
3831
3832 if (lwp->suspended)
3833 {
3834 if (debug_threads)
87ce2a04 3835 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 3836 lwpid_of (thread));
7984d532
PA
3837 return 0;
3838 }
3839
d50171e4
PA
3840 if (!lwp->need_step_over)
3841 {
3842 if (debug_threads)
d86d4aaf 3843 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
d50171e4 3844 }
5544ad89 3845
bd99dc85 3846 if (lwp->status_pending_p)
d50171e4
PA
3847 {
3848 if (debug_threads)
87ce2a04
DE
3849 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3850 " status.\n",
d86d4aaf 3851 lwpid_of (thread));
d50171e4
PA
3852 return 0;
3853 }
3854
3855 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3856 or we have. */
3857 pc = get_pc (lwp);
3858
3859 /* If the PC has changed since we stopped, then don't do anything,
3860 and let the breakpoint/tracepoint be hit. This happens if, for
3861 instance, GDB handled the decr_pc_after_break subtraction itself,
3862 GDB is OOL stepping this thread, or the user has issued a "jump"
3863 command, or poked thread's registers herself. */
3864 if (pc != lwp->stop_pc)
3865 {
3866 if (debug_threads)
87ce2a04
DE
3867 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3868 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
3869 lwpid_of (thread),
3870 paddress (lwp->stop_pc), paddress (pc));
d50171e4
PA
3871
3872 lwp->need_step_over = 0;
3873 return 0;
3874 }
3875
0bfdf32f
GB
3876 saved_thread = current_thread;
3877 current_thread = thread;
d50171e4 3878
8b07ae33 3879 /* We can only step over breakpoints we know about. */
fa593d66 3880 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 3881 {
8b07ae33 3882 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
3883 though. If the condition is being evaluated on the target's side
3884 and it evaluate to false, step over this breakpoint as well. */
3885 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
3886 && gdb_condition_true_at_breakpoint (pc)
3887 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
3888 {
3889 if (debug_threads)
87ce2a04
DE
3890 debug_printf ("Need step over [LWP %ld]? yes, but found"
3891 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 3892 lwpid_of (thread), paddress (pc));
d50171e4 3893
0bfdf32f 3894 current_thread = saved_thread;
8b07ae33
PA
3895 return 0;
3896 }
3897 else
3898 {
3899 if (debug_threads)
87ce2a04
DE
3900 debug_printf ("Need step over [LWP %ld]? yes, "
3901 "found breakpoint at 0x%s\n",
d86d4aaf 3902 lwpid_of (thread), paddress (pc));
d50171e4 3903
8b07ae33
PA
3904 /* We've found an lwp that needs stepping over --- return 1 so
3905 that find_inferior stops looking. */
0bfdf32f 3906 current_thread = saved_thread;
8b07ae33
PA
3907
3908 /* If the step over is cancelled, this is set again. */
3909 lwp->need_step_over = 0;
3910 return 1;
3911 }
d50171e4
PA
3912 }
3913
0bfdf32f 3914 current_thread = saved_thread;
d50171e4
PA
3915
3916 if (debug_threads)
87ce2a04
DE
3917 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3918 " at 0x%s\n",
d86d4aaf 3919 lwpid_of (thread), paddress (pc));
c6ecbae5 3920
bd99dc85 3921 return 0;
5544ad89
DJ
3922}
3923
d50171e4
PA
3924/* Start a step-over operation on LWP. When LWP stopped at a
3925 breakpoint, to make progress, we need to remove the breakpoint out
3926 of the way. If we let other threads run while we do that, they may
3927 pass by the breakpoint location and miss hitting it. To avoid
3928 that, a step-over momentarily stops all threads while LWP is
3929 single-stepped while the breakpoint is temporarily uninserted from
3930 the inferior. When the single-step finishes, we reinsert the
3931 breakpoint, and let all threads that are supposed to be running,
3932 run again.
3933
3934 On targets that don't support hardware single-step, we don't
3935 currently support full software single-stepping. Instead, we only
3936 support stepping over the thread event breakpoint, by asking the
3937 low target where to place a reinsert breakpoint. Since this
3938 routine assumes the breakpoint being stepped over is a thread event
3939 breakpoint, it usually assumes the return address of the current
3940 function is a good enough place to set the reinsert breakpoint. */
3941
3942static int
3943start_step_over (struct lwp_info *lwp)
3944{
d86d4aaf 3945 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 3946 struct thread_info *saved_thread;
d50171e4
PA
3947 CORE_ADDR pc;
3948 int step;
3949
3950 if (debug_threads)
87ce2a04 3951 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 3952 lwpid_of (thread));
d50171e4 3953
7984d532
PA
3954 stop_all_lwps (1, lwp);
3955 gdb_assert (lwp->suspended == 0);
d50171e4
PA
3956
3957 if (debug_threads)
87ce2a04 3958 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
3959
3960 /* Note, we should always reach here with an already adjusted PC,
3961 either by GDB (if we're resuming due to GDB's request), or by our
3962 caller, if we just finished handling an internal breakpoint GDB
3963 shouldn't care about. */
3964 pc = get_pc (lwp);
3965
0bfdf32f
GB
3966 saved_thread = current_thread;
3967 current_thread = thread;
d50171e4
PA
3968
3969 lwp->bp_reinsert = pc;
3970 uninsert_breakpoints_at (pc);
fa593d66 3971 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
3972
3973 if (can_hardware_single_step ())
3974 {
3975 step = 1;
3976 }
3977 else
3978 {
3979 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3980 set_reinsert_breakpoint (raddr);
3981 step = 0;
3982 }
3983
0bfdf32f 3984 current_thread = saved_thread;
d50171e4
PA
3985
3986 linux_resume_one_lwp (lwp, step, 0, NULL);
3987
3988 /* Require next event from this LWP. */
d86d4aaf 3989 step_over_bkpt = thread->entry.id;
d50171e4
PA
3990 return 1;
3991}
3992
3993/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3994 start_step_over, if still there, and delete any reinsert
3995 breakpoints we've set, on non hardware single-step targets. */
3996
3997static int
3998finish_step_over (struct lwp_info *lwp)
3999{
4000 if (lwp->bp_reinsert != 0)
4001 {
4002 if (debug_threads)
87ce2a04 4003 debug_printf ("Finished step over.\n");
d50171e4
PA
4004
4005 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4006 may be no breakpoint to reinsert there by now. */
4007 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4008 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4009
4010 lwp->bp_reinsert = 0;
4011
4012 /* Delete any software-single-step reinsert breakpoints. No
4013 longer needed. We don't have to worry about other threads
4014 hitting this trap, and later not being able to explain it,
4015 because we were stepping over a breakpoint, and we hold all
4016 threads but LWP stopped while doing that. */
4017 if (!can_hardware_single_step ())
4018 delete_reinsert_breakpoints ();
4019
4020 step_over_bkpt = null_ptid;
4021 return 1;
4022 }
4023 else
4024 return 0;
4025}
4026
5544ad89
DJ
4027/* This function is called once per thread. We check the thread's resume
4028 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 4029 stopped; and what signal, if any, it should be sent.
5544ad89 4030
bd99dc85
PA
4031 For threads which we aren't explicitly told otherwise, we preserve
4032 the stepping flag; this is used for stepping over gdbserver-placed
4033 breakpoints.
4034
4035 If pending_flags was set in any thread, we queue any needed
4036 signals, since we won't actually resume. We already have a pending
4037 event to report, so we don't need to preserve any step requests;
4038 they should be re-issued if necessary. */
4039
4040static int
4041linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 4042{
d86d4aaf
DE
4043 struct thread_info *thread = (struct thread_info *) entry;
4044 struct lwp_info *lwp = get_thread_lwp (thread);
bd99dc85 4045 int step;
d50171e4
PA
4046 int leave_all_stopped = * (int *) arg;
4047 int leave_pending;
5544ad89 4048
2bd7c093 4049 if (lwp->resume == NULL)
bd99dc85 4050 return 0;
5544ad89 4051
bd99dc85 4052 if (lwp->resume->kind == resume_stop)
5544ad89 4053 {
bd99dc85 4054 if (debug_threads)
d86d4aaf 4055 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4056
4057 if (!lwp->stopped)
4058 {
4059 if (debug_threads)
d86d4aaf 4060 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4061
d50171e4
PA
4062 /* Stop the thread, and wait for the event asynchronously,
4063 through the event loop. */
02fc4de7 4064 send_sigstop (lwp);
bd99dc85
PA
4065 }
4066 else
4067 {
4068 if (debug_threads)
87ce2a04 4069 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4070 lwpid_of (thread));
d50171e4
PA
4071
4072 /* The LWP may have been stopped in an internal event that
4073 was not meant to be notified back to GDB (e.g., gdbserver
4074 breakpoint), so we should be reporting a stop event in
4075 this case too. */
4076
4077 /* If the thread already has a pending SIGSTOP, this is a
4078 no-op. Otherwise, something later will presumably resume
4079 the thread and this will cause it to cancel any pending
4080 operation, due to last_resume_kind == resume_stop. If
4081 the thread already has a pending status to report, we
4082 will still report it the next time we wait - see
4083 status_pending_p_callback. */
1a981360
PA
4084
4085 /* If we already have a pending signal to report, then
4086 there's no need to queue a SIGSTOP, as this means we're
4087 midway through moving the LWP out of the jumppad, and we
4088 will report the pending signal as soon as that is
4089 finished. */
4090 if (lwp->pending_signals_to_report == NULL)
4091 send_sigstop (lwp);
bd99dc85 4092 }
32ca6d61 4093
bd99dc85
PA
4094 /* For stop requests, we're done. */
4095 lwp->resume = NULL;
fc7238bb 4096 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4097 return 0;
5544ad89
DJ
4098 }
4099
bd99dc85
PA
4100 /* If this thread which is about to be resumed has a pending status,
4101 then don't resume any threads - we can just report the pending
4102 status. Make sure to queue any signals that would otherwise be
4103 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
4104 thread has a pending status. If there's a thread that needs the
4105 step-over-breakpoint dance, then don't resume any other thread
4106 but that particular one. */
4107 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 4108
d50171e4 4109 if (!leave_pending)
bd99dc85
PA
4110 {
4111 if (debug_threads)
d86d4aaf 4112 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4113
d50171e4 4114 step = (lwp->resume->kind == resume_step);
2acc282a 4115 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
4116 }
4117 else
4118 {
4119 if (debug_threads)
d86d4aaf 4120 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5544ad89 4121
bd99dc85
PA
4122 /* If we have a new signal, enqueue the signal. */
4123 if (lwp->resume->sig != 0)
4124 {
4125 struct pending_signals *p_sig;
4126 p_sig = xmalloc (sizeof (*p_sig));
4127 p_sig->prev = lwp->pending_signals;
4128 p_sig->signal = lwp->resume->sig;
4129 memset (&p_sig->info, 0, sizeof (siginfo_t));
4130
4131 /* If this is the same signal we were previously stopped by,
4132 make sure to queue its siginfo. We can ignore the return
4133 value of ptrace; if it fails, we'll skip
4134 PTRACE_SETSIGINFO. */
4135 if (WIFSTOPPED (lwp->last_status)
4136 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
d86d4aaf 4137 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4138 &p_sig->info);
bd99dc85
PA
4139
4140 lwp->pending_signals = p_sig;
4141 }
4142 }
5544ad89 4143
fc7238bb 4144 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4145 lwp->resume = NULL;
5544ad89 4146 return 0;
0d62e5e8
DJ
4147}
4148
4149static void
2bd7c093 4150linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 4151{
2bd7c093 4152 struct thread_resume_array array = { resume_info, n };
d86d4aaf 4153 struct thread_info *need_step_over = NULL;
d50171e4
PA
4154 int any_pending;
4155 int leave_all_stopped;
c6ecbae5 4156
87ce2a04
DE
4157 if (debug_threads)
4158 {
4159 debug_enter ();
4160 debug_printf ("linux_resume:\n");
4161 }
4162
2bd7c093 4163 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 4164
d50171e4
PA
4165 /* If there is a thread which would otherwise be resumed, which has
4166 a pending status, then don't resume any threads - we can just
4167 report the pending status. Make sure to queue any signals that
4168 would otherwise be sent. In non-stop mode, we'll apply this
4169 logic to each thread individually. We consume all pending events
4170 before considering to start a step-over (in all-stop). */
4171 any_pending = 0;
bd99dc85 4172 if (!non_stop)
d86d4aaf 4173 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
d50171e4
PA
4174
4175 /* If there is a thread which would otherwise be resumed, which is
4176 stopped at a breakpoint that needs stepping over, then don't
4177 resume any threads - have it step over the breakpoint with all
4178 other threads stopped, then resume all threads again. Make sure
4179 to queue any signals that would otherwise be delivered or
4180 queued. */
4181 if (!any_pending && supports_breakpoints ())
4182 need_step_over
d86d4aaf
DE
4183 = (struct thread_info *) find_inferior (&all_threads,
4184 need_step_over_p, NULL);
d50171e4
PA
4185
4186 leave_all_stopped = (need_step_over != NULL || any_pending);
4187
4188 if (debug_threads)
4189 {
4190 if (need_step_over != NULL)
87ce2a04 4191 debug_printf ("Not resuming all, need step over\n");
d50171e4 4192 else if (any_pending)
87ce2a04
DE
4193 debug_printf ("Not resuming, all-stop and found "
4194 "an LWP with pending status\n");
d50171e4 4195 else
87ce2a04 4196 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4197 }
4198
4199 /* Even if we're leaving threads stopped, queue all signals we'd
4200 otherwise deliver. */
4201 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4202
4203 if (need_step_over)
d86d4aaf 4204 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4205
4206 if (debug_threads)
4207 {
4208 debug_printf ("linux_resume done\n");
4209 debug_exit ();
4210 }
d50171e4
PA
4211}
4212
4213/* This function is called once per thread. We check the thread's
4214 last resume request, which will tell us whether to resume, step, or
4215 leave the thread stopped. Any signal the client requested to be
4216 delivered has already been enqueued at this point.
4217
4218 If any thread that GDB wants running is stopped at an internal
4219 breakpoint that needs stepping over, we start a step-over operation
4220 on that particular thread, and leave all others stopped. */
4221
7984d532
PA
4222static int
4223proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 4224{
d86d4aaf
DE
4225 struct thread_info *thread = (struct thread_info *) entry;
4226 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4227 int step;
4228
7984d532
PA
4229 if (lwp == except)
4230 return 0;
d50171e4
PA
4231
4232 if (debug_threads)
d86d4aaf 4233 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
4234
4235 if (!lwp->stopped)
4236 {
4237 if (debug_threads)
d86d4aaf 4238 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
7984d532 4239 return 0;
d50171e4
PA
4240 }
4241
02fc4de7
PA
4242 if (thread->last_resume_kind == resume_stop
4243 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
4244 {
4245 if (debug_threads)
87ce2a04 4246 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 4247 lwpid_of (thread));
7984d532 4248 return 0;
d50171e4
PA
4249 }
4250
4251 if (lwp->status_pending_p)
4252 {
4253 if (debug_threads)
87ce2a04 4254 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 4255 lwpid_of (thread));
7984d532 4256 return 0;
d50171e4
PA
4257 }
4258
7984d532
PA
4259 gdb_assert (lwp->suspended >= 0);
4260
d50171e4
PA
4261 if (lwp->suspended)
4262 {
4263 if (debug_threads)
d86d4aaf 4264 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
7984d532 4265 return 0;
d50171e4
PA
4266 }
4267
1a981360
PA
4268 if (thread->last_resume_kind == resume_stop
4269 && lwp->pending_signals_to_report == NULL
4270 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
4271 {
4272 /* We haven't reported this LWP as stopped yet (otherwise, the
4273 last_status.kind check above would catch it, and we wouldn't
4274 reach here. This LWP may have been momentarily paused by a
4275 stop_all_lwps call while handling for example, another LWP's
4276 step-over. In that case, the pending expected SIGSTOP signal
4277 that was queued at vCont;t handling time will have already
4278 been consumed by wait_for_sigstop, and so we need to requeue
4279 another one here. Note that if the LWP already has a SIGSTOP
4280 pending, this is a no-op. */
4281
4282 if (debug_threads)
87ce2a04
DE
4283 debug_printf ("Client wants LWP %ld to stop. "
4284 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 4285 lwpid_of (thread));
02fc4de7
PA
4286
4287 send_sigstop (lwp);
4288 }
4289
8336d594 4290 step = thread->last_resume_kind == resume_step;
d50171e4 4291 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4292 return 0;
4293}
4294
4295static int
4296unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4297{
d86d4aaf
DE
4298 struct thread_info *thread = (struct thread_info *) entry;
4299 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4300
4301 if (lwp == except)
4302 return 0;
4303
4304 lwp->suspended--;
4305 gdb_assert (lwp->suspended >= 0);
4306
4307 return proceed_one_lwp (entry, except);
d50171e4
PA
4308}
4309
4310/* When we finish a step-over, set threads running again. If there's
4311 another thread that may need a step-over, now's the time to start
4312 it. Eventually, we'll move all threads past their breakpoints. */
4313
4314static void
4315proceed_all_lwps (void)
4316{
d86d4aaf 4317 struct thread_info *need_step_over;
d50171e4
PA
4318
4319 /* If there is a thread which would otherwise be resumed, which is
4320 stopped at a breakpoint that needs stepping over, then don't
4321 resume any threads - have it step over the breakpoint with all
4322 other threads stopped, then resume all threads again. */
4323
4324 if (supports_breakpoints ())
4325 {
4326 need_step_over
d86d4aaf
DE
4327 = (struct thread_info *) find_inferior (&all_threads,
4328 need_step_over_p, NULL);
d50171e4
PA
4329
4330 if (need_step_over != NULL)
4331 {
4332 if (debug_threads)
87ce2a04
DE
4333 debug_printf ("proceed_all_lwps: found "
4334 "thread %ld needing a step-over\n",
4335 lwpid_of (need_step_over));
d50171e4 4336
d86d4aaf 4337 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4338 return;
4339 }
4340 }
5544ad89 4341
d50171e4 4342 if (debug_threads)
87ce2a04 4343 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 4344
d86d4aaf 4345 find_inferior (&all_threads, proceed_one_lwp, NULL);
d50171e4
PA
4346}
4347
4348/* Stopped LWPs that the client wanted to be running, that don't have
4349 pending statuses, are set to run again, except for EXCEPT, if not
4350 NULL. This undoes a stop_all_lwps call. */
4351
4352static void
7984d532 4353unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 4354{
5544ad89
DJ
4355 if (debug_threads)
4356 {
87ce2a04 4357 debug_enter ();
d50171e4 4358 if (except)
87ce2a04 4359 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 4360 lwpid_of (get_lwp_thread (except)));
5544ad89 4361 else
87ce2a04 4362 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
4363 }
4364
7984d532 4365 if (unsuspend)
d86d4aaf 4366 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
7984d532 4367 else
d86d4aaf 4368 find_inferior (&all_threads, proceed_one_lwp, except);
87ce2a04
DE
4369
4370 if (debug_threads)
4371 {
4372 debug_printf ("unstop_all_lwps done\n");
4373 debug_exit ();
4374 }
0d62e5e8
DJ
4375}
4376
58caa3dc
DJ
4377
4378#ifdef HAVE_LINUX_REGSETS
4379
1faeff08
MR
4380#define use_linux_regsets 1
4381
030031ee
PA
4382/* Returns true if REGSET has been disabled. */
4383
4384static int
4385regset_disabled (struct regsets_info *info, struct regset_info *regset)
4386{
4387 return (info->disabled_regsets != NULL
4388 && info->disabled_regsets[regset - info->regsets]);
4389}
4390
4391/* Disable REGSET. */
4392
4393static void
4394disable_regset (struct regsets_info *info, struct regset_info *regset)
4395{
4396 int dr_offset;
4397
4398 dr_offset = regset - info->regsets;
4399 if (info->disabled_regsets == NULL)
4400 info->disabled_regsets = xcalloc (1, info->num_regsets);
4401 info->disabled_regsets[dr_offset] = 1;
4402}
4403
58caa3dc 4404static int
3aee8918
PA
4405regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4406 struct regcache *regcache)
58caa3dc
DJ
4407{
4408 struct regset_info *regset;
e9d25b98 4409 int saw_general_regs = 0;
95954743 4410 int pid;
1570b33e 4411 struct iovec iov;
58caa3dc 4412
0bfdf32f 4413 pid = lwpid_of (current_thread);
28eef672 4414 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4415 {
1570b33e
L
4416 void *buf, *data;
4417 int nt_type, res;
58caa3dc 4418
030031ee 4419 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 4420 continue;
58caa3dc 4421
bca929d3 4422 buf = xmalloc (regset->size);
1570b33e
L
4423
4424 nt_type = regset->nt_type;
4425 if (nt_type)
4426 {
4427 iov.iov_base = buf;
4428 iov.iov_len = regset->size;
4429 data = (void *) &iov;
4430 }
4431 else
4432 data = buf;
4433
dfb64f85 4434#ifndef __sparc__
f15f9948 4435 res = ptrace (regset->get_request, pid,
b8e1b30e 4436 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4437#else
1570b33e 4438 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4439#endif
58caa3dc
DJ
4440 if (res < 0)
4441 {
4442 if (errno == EIO)
4443 {
52fa2412 4444 /* If we get EIO on a regset, do not try it again for
3aee8918 4445 this process mode. */
030031ee 4446 disable_regset (regsets_info, regset);
58caa3dc 4447 }
e5a9158d
AA
4448 else if (errno == ENODATA)
4449 {
4450 /* ENODATA may be returned if the regset is currently
4451 not "active". This can happen in normal operation,
4452 so suppress the warning in this case. */
4453 }
58caa3dc
DJ
4454 else
4455 {
0d62e5e8 4456 char s[256];
95954743
PA
4457 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4458 pid);
0d62e5e8 4459 perror (s);
58caa3dc
DJ
4460 }
4461 }
098dbe61
AA
4462 else
4463 {
4464 if (regset->type == GENERAL_REGS)
4465 saw_general_regs = 1;
4466 regset->store_function (regcache, buf);
4467 }
fdeb2a12 4468 free (buf);
58caa3dc 4469 }
e9d25b98
DJ
4470 if (saw_general_regs)
4471 return 0;
4472 else
4473 return 1;
58caa3dc
DJ
4474}
4475
4476static int
3aee8918
PA
4477regsets_store_inferior_registers (struct regsets_info *regsets_info,
4478 struct regcache *regcache)
58caa3dc
DJ
4479{
4480 struct regset_info *regset;
e9d25b98 4481 int saw_general_regs = 0;
95954743 4482 int pid;
1570b33e 4483 struct iovec iov;
58caa3dc 4484
0bfdf32f 4485 pid = lwpid_of (current_thread);
28eef672 4486 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4487 {
1570b33e
L
4488 void *buf, *data;
4489 int nt_type, res;
58caa3dc 4490
feea5f36
AA
4491 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4492 || regset->fill_function == NULL)
28eef672 4493 continue;
58caa3dc 4494
bca929d3 4495 buf = xmalloc (regset->size);
545587ee
DJ
4496
4497 /* First fill the buffer with the current register set contents,
4498 in case there are any items in the kernel's regset that are
4499 not in gdbserver's regcache. */
1570b33e
L
4500
4501 nt_type = regset->nt_type;
4502 if (nt_type)
4503 {
4504 iov.iov_base = buf;
4505 iov.iov_len = regset->size;
4506 data = (void *) &iov;
4507 }
4508 else
4509 data = buf;
4510
dfb64f85 4511#ifndef __sparc__
f15f9948 4512 res = ptrace (regset->get_request, pid,
b8e1b30e 4513 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4514#else
689cc2ae 4515 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4516#endif
545587ee
DJ
4517
4518 if (res == 0)
4519 {
4520 /* Then overlay our cached registers on that. */
442ea881 4521 regset->fill_function (regcache, buf);
545587ee
DJ
4522
4523 /* Only now do we write the register set. */
dfb64f85 4524#ifndef __sparc__
f15f9948 4525 res = ptrace (regset->set_request, pid,
b8e1b30e 4526 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4527#else
1570b33e 4528 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 4529#endif
545587ee
DJ
4530 }
4531
58caa3dc
DJ
4532 if (res < 0)
4533 {
4534 if (errno == EIO)
4535 {
52fa2412 4536 /* If we get EIO on a regset, do not try it again for
3aee8918 4537 this process mode. */
030031ee 4538 disable_regset (regsets_info, regset);
58caa3dc 4539 }
3221518c
UW
4540 else if (errno == ESRCH)
4541 {
1b3f6016
PA
4542 /* At this point, ESRCH should mean the process is
4543 already gone, in which case we simply ignore attempts
4544 to change its registers. See also the related
4545 comment in linux_resume_one_lwp. */
fdeb2a12 4546 free (buf);
3221518c
UW
4547 return 0;
4548 }
58caa3dc
DJ
4549 else
4550 {
ce3a066d 4551 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
4552 }
4553 }
e9d25b98
DJ
4554 else if (regset->type == GENERAL_REGS)
4555 saw_general_regs = 1;
09ec9b38 4556 free (buf);
58caa3dc 4557 }
e9d25b98
DJ
4558 if (saw_general_regs)
4559 return 0;
4560 else
4561 return 1;
58caa3dc
DJ
4562}
4563
1faeff08 4564#else /* !HAVE_LINUX_REGSETS */
58caa3dc 4565
1faeff08 4566#define use_linux_regsets 0
3aee8918
PA
4567#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4568#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 4569
58caa3dc 4570#endif
1faeff08
MR
4571
4572/* Return 1 if register REGNO is supported by one of the regset ptrace
4573 calls or 0 if it has to be transferred individually. */
4574
4575static int
3aee8918 4576linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
4577{
4578 unsigned char mask = 1 << (regno % 8);
4579 size_t index = regno / 8;
4580
4581 return (use_linux_regsets
3aee8918
PA
4582 && (regs_info->regset_bitmap == NULL
4583 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
4584}
4585
58caa3dc 4586#ifdef HAVE_LINUX_USRREGS
1faeff08
MR
4587
4588int
3aee8918 4589register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
4590{
4591 int addr;
4592
3aee8918 4593 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
4594 error ("Invalid register number %d.", regnum);
4595
3aee8918 4596 addr = usrregs->regmap[regnum];
1faeff08
MR
4597
4598 return addr;
4599}
4600
4601/* Fetch one register. */
4602static void
3aee8918
PA
4603fetch_register (const struct usrregs_info *usrregs,
4604 struct regcache *regcache, int regno)
1faeff08
MR
4605{
4606 CORE_ADDR regaddr;
4607 int i, size;
4608 char *buf;
4609 int pid;
4610
3aee8918 4611 if (regno >= usrregs->num_regs)
1faeff08
MR
4612 return;
4613 if ((*the_low_target.cannot_fetch_register) (regno))
4614 return;
4615
3aee8918 4616 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4617 if (regaddr == -1)
4618 return;
4619
3aee8918
PA
4620 size = ((register_size (regcache->tdesc, regno)
4621 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4622 & -sizeof (PTRACE_XFER_TYPE));
4623 buf = alloca (size);
4624
0bfdf32f 4625 pid = lwpid_of (current_thread);
1faeff08
MR
4626 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4627 {
4628 errno = 0;
4629 *(PTRACE_XFER_TYPE *) (buf + i) =
4630 ptrace (PTRACE_PEEKUSER, pid,
4631 /* Coerce to a uintptr_t first to avoid potential gcc warning
4632 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4633 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
4634 regaddr += sizeof (PTRACE_XFER_TYPE);
4635 if (errno != 0)
4636 error ("reading register %d: %s", regno, strerror (errno));
4637 }
4638
4639 if (the_low_target.supply_ptrace_register)
4640 the_low_target.supply_ptrace_register (regcache, regno, buf);
4641 else
4642 supply_register (regcache, regno, buf);
4643}
4644
4645/* Store one register. */
4646static void
3aee8918
PA
4647store_register (const struct usrregs_info *usrregs,
4648 struct regcache *regcache, int regno)
1faeff08
MR
4649{
4650 CORE_ADDR regaddr;
4651 int i, size;
4652 char *buf;
4653 int pid;
4654
3aee8918 4655 if (regno >= usrregs->num_regs)
1faeff08
MR
4656 return;
4657 if ((*the_low_target.cannot_store_register) (regno))
4658 return;
4659
3aee8918 4660 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4661 if (regaddr == -1)
4662 return;
4663
3aee8918
PA
4664 size = ((register_size (regcache->tdesc, regno)
4665 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4666 & -sizeof (PTRACE_XFER_TYPE));
4667 buf = alloca (size);
4668 memset (buf, 0, size);
4669
4670 if (the_low_target.collect_ptrace_register)
4671 the_low_target.collect_ptrace_register (regcache, regno, buf);
4672 else
4673 collect_register (regcache, regno, buf);
4674
0bfdf32f 4675 pid = lwpid_of (current_thread);
1faeff08
MR
4676 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4677 {
4678 errno = 0;
4679 ptrace (PTRACE_POKEUSER, pid,
4680 /* Coerce to a uintptr_t first to avoid potential gcc warning
4681 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
4682 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4683 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
4684 if (errno != 0)
4685 {
4686 /* At this point, ESRCH should mean the process is
4687 already gone, in which case we simply ignore attempts
4688 to change its registers. See also the related
4689 comment in linux_resume_one_lwp. */
4690 if (errno == ESRCH)
4691 return;
4692
4693 if ((*the_low_target.cannot_store_register) (regno) == 0)
4694 error ("writing register %d: %s", regno, strerror (errno));
4695 }
4696 regaddr += sizeof (PTRACE_XFER_TYPE);
4697 }
4698}
4699
4700/* Fetch all registers, or just one, from the child process.
4701 If REGNO is -1, do this for all registers, skipping any that are
4702 assumed to have been retrieved by regsets_fetch_inferior_registers,
4703 unless ALL is non-zero.
4704 Otherwise, REGNO specifies which register (so we can save time). */
4705static void
3aee8918
PA
4706usr_fetch_inferior_registers (const struct regs_info *regs_info,
4707 struct regcache *regcache, int regno, int all)
1faeff08 4708{
3aee8918
PA
4709 struct usrregs_info *usr = regs_info->usrregs;
4710
1faeff08
MR
4711 if (regno == -1)
4712 {
3aee8918
PA
4713 for (regno = 0; regno < usr->num_regs; regno++)
4714 if (all || !linux_register_in_regsets (regs_info, regno))
4715 fetch_register (usr, regcache, regno);
1faeff08
MR
4716 }
4717 else
3aee8918 4718 fetch_register (usr, regcache, regno);
1faeff08
MR
4719}
4720
4721/* Store our register values back into the inferior.
4722 If REGNO is -1, do this for all registers, skipping any that are
4723 assumed to have been saved by regsets_store_inferior_registers,
4724 unless ALL is non-zero.
4725 Otherwise, REGNO specifies which register (so we can save time). */
4726static void
3aee8918
PA
4727usr_store_inferior_registers (const struct regs_info *regs_info,
4728 struct regcache *regcache, int regno, int all)
1faeff08 4729{
3aee8918
PA
4730 struct usrregs_info *usr = regs_info->usrregs;
4731
1faeff08
MR
4732 if (regno == -1)
4733 {
3aee8918
PA
4734 for (regno = 0; regno < usr->num_regs; regno++)
4735 if (all || !linux_register_in_regsets (regs_info, regno))
4736 store_register (usr, regcache, regno);
1faeff08
MR
4737 }
4738 else
3aee8918 4739 store_register (usr, regcache, regno);
1faeff08
MR
4740}
4741
4742#else /* !HAVE_LINUX_USRREGS */
4743
3aee8918
PA
4744#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4745#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
1faeff08 4746
58caa3dc 4747#endif
1faeff08
MR
4748
4749
4750void
4751linux_fetch_registers (struct regcache *regcache, int regno)
4752{
4753 int use_regsets;
4754 int all = 0;
3aee8918 4755 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
4756
4757 if (regno == -1)
4758 {
3aee8918
PA
4759 if (the_low_target.fetch_register != NULL
4760 && regs_info->usrregs != NULL)
4761 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
c14dfd32
PA
4762 (*the_low_target.fetch_register) (regcache, regno);
4763
3aee8918
PA
4764 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4765 if (regs_info->usrregs != NULL)
4766 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
4767 }
4768 else
4769 {
c14dfd32
PA
4770 if (the_low_target.fetch_register != NULL
4771 && (*the_low_target.fetch_register) (regcache, regno))
4772 return;
4773
3aee8918 4774 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 4775 if (use_regsets)
3aee8918
PA
4776 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4777 regcache);
4778 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4779 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 4780 }
58caa3dc
DJ
4781}
4782
4783void
442ea881 4784linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 4785{
1faeff08
MR
4786 int use_regsets;
4787 int all = 0;
3aee8918 4788 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
4789
4790 if (regno == -1)
4791 {
3aee8918
PA
4792 all = regsets_store_inferior_registers (regs_info->regsets_info,
4793 regcache);
4794 if (regs_info->usrregs != NULL)
4795 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
4796 }
4797 else
4798 {
3aee8918 4799 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 4800 if (use_regsets)
3aee8918
PA
4801 all = regsets_store_inferior_registers (regs_info->regsets_info,
4802 regcache);
4803 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4804 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 4805 }
58caa3dc
DJ
4806}
4807
da6d8c04 4808
da6d8c04
DJ
4809/* Copy LEN bytes from inferior's memory starting at MEMADDR
4810 to debugger memory starting at MYADDR. */
4811
c3e735a6 4812static int
f450004a 4813linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04 4814{
0bfdf32f 4815 int pid = lwpid_of (current_thread);
4934b29e
MR
4816 register PTRACE_XFER_TYPE *buffer;
4817 register CORE_ADDR addr;
4818 register int count;
4819 char filename[64];
da6d8c04 4820 register int i;
4934b29e 4821 int ret;
fd462a61 4822 int fd;
fd462a61
DJ
4823
4824 /* Try using /proc. Don't bother for one word. */
4825 if (len >= 3 * sizeof (long))
4826 {
4934b29e
MR
4827 int bytes;
4828
fd462a61
DJ
4829 /* We could keep this file open and cache it - possibly one per
4830 thread. That requires some juggling, but is even faster. */
95954743 4831 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
4832 fd = open (filename, O_RDONLY | O_LARGEFILE);
4833 if (fd == -1)
4834 goto no_proc;
4835
4836 /* If pread64 is available, use it. It's faster if the kernel
4837 supports it (only one syscall), and it's 64-bit safe even on
4838 32-bit platforms (for instance, SPARC debugging a SPARC64
4839 application). */
4840#ifdef HAVE_PREAD64
4934b29e 4841 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 4842#else
4934b29e
MR
4843 bytes = -1;
4844 if (lseek (fd, memaddr, SEEK_SET) != -1)
4845 bytes = read (fd, myaddr, len);
fd462a61 4846#endif
fd462a61
DJ
4847
4848 close (fd);
4934b29e
MR
4849 if (bytes == len)
4850 return 0;
4851
4852 /* Some data was read, we'll try to get the rest with ptrace. */
4853 if (bytes > 0)
4854 {
4855 memaddr += bytes;
4856 myaddr += bytes;
4857 len -= bytes;
4858 }
fd462a61 4859 }
da6d8c04 4860
fd462a61 4861 no_proc:
4934b29e
MR
4862 /* Round starting address down to longword boundary. */
4863 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4864 /* Round ending address up; get number of longwords that makes. */
4865 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4866 / sizeof (PTRACE_XFER_TYPE));
4867 /* Allocate buffer of that many longwords. */
4868 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4869
da6d8c04 4870 /* Read all the longwords */
4934b29e 4871 errno = 0;
da6d8c04
DJ
4872 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4873 {
14ce3065
DE
4874 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4875 about coercing an 8 byte integer to a 4 byte pointer. */
4876 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
4877 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4878 (PTRACE_TYPE_ARG4) 0);
c3e735a6 4879 if (errno)
4934b29e 4880 break;
da6d8c04 4881 }
4934b29e 4882 ret = errno;
da6d8c04
DJ
4883
4884 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
4885 if (i > 0)
4886 {
4887 i *= sizeof (PTRACE_XFER_TYPE);
4888 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4889 memcpy (myaddr,
4890 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4891 i < len ? i : len);
4892 }
c3e735a6 4893
4934b29e 4894 return ret;
da6d8c04
DJ
4895}
4896
93ae6fdc
PA
4897/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4898 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 4899 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 4900
ce3a066d 4901static int
f450004a 4902linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
4903{
4904 register int i;
4905 /* Round starting address down to longword boundary. */
4906 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4907 /* Round ending address up; get number of longwords that makes. */
4908 register int count
493e2a69
MS
4909 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4910 / sizeof (PTRACE_XFER_TYPE);
4911
da6d8c04 4912 /* Allocate buffer of that many longwords. */
493e2a69
MS
4913 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4914 alloca (count * sizeof (PTRACE_XFER_TYPE));
4915
0bfdf32f 4916 int pid = lwpid_of (current_thread);
da6d8c04 4917
f0ae6fc3
PA
4918 if (len == 0)
4919 {
4920 /* Zero length write always succeeds. */
4921 return 0;
4922 }
4923
0d62e5e8
DJ
4924 if (debug_threads)
4925 {
58d6951d
DJ
4926 /* Dump up to four bytes. */
4927 unsigned int val = * (unsigned int *) myaddr;
4928 if (len == 1)
4929 val = val & 0xff;
4930 else if (len == 2)
4931 val = val & 0xffff;
4932 else if (len == 3)
4933 val = val & 0xffffff;
87ce2a04
DE
4934 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4935 val, (long)memaddr);
0d62e5e8
DJ
4936 }
4937
da6d8c04
DJ
4938 /* Fill start and end extra bytes of buffer with existing memory data. */
4939
93ae6fdc 4940 errno = 0;
14ce3065
DE
4941 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4942 about coercing an 8 byte integer to a 4 byte pointer. */
4943 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
4944 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4945 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
4946 if (errno)
4947 return errno;
da6d8c04
DJ
4948
4949 if (count > 1)
4950 {
93ae6fdc 4951 errno = 0;
da6d8c04 4952 buffer[count - 1]
95954743 4953 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
4954 /* Coerce to a uintptr_t first to avoid potential gcc warning
4955 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4956 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 4957 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 4958 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
4959 if (errno)
4960 return errno;
da6d8c04
DJ
4961 }
4962
93ae6fdc 4963 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 4964
493e2a69
MS
4965 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4966 myaddr, len);
da6d8c04
DJ
4967
4968 /* Write the entire buffer. */
4969
4970 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4971 {
4972 errno = 0;
14ce3065
DE
4973 ptrace (PTRACE_POKETEXT, pid,
4974 /* Coerce to a uintptr_t first to avoid potential gcc warning
4975 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
4976 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4977 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
4978 if (errno)
4979 return errno;
4980 }
4981
4982 return 0;
4983}
2f2893d9
DJ
4984
4985static void
4986linux_look_up_symbols (void)
4987{
0d62e5e8 4988#ifdef USE_THREAD_DB
95954743
PA
4989 struct process_info *proc = current_process ();
4990
fe978cb0 4991 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
4992 return;
4993
96d7229d
LM
4994 /* If the kernel supports tracing clones, then we don't need to
4995 use the magic thread event breakpoint to learn about
4996 threads. */
4997 thread_db_init (!linux_supports_traceclone ());
0d62e5e8
DJ
4998#endif
4999}
5000
e5379b03 5001static void
ef57601b 5002linux_request_interrupt (void)
e5379b03 5003{
a1928bad 5004 extern unsigned long signal_pid;
e5379b03 5005
78708b7c
PA
5006 /* Send a SIGINT to the process group. This acts just like the user
5007 typed a ^C on the controlling terminal. */
5008 kill (-signal_pid, SIGINT);
e5379b03
DJ
5009}
5010
aa691b87
RM
5011/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5012 to debugger memory starting at MYADDR. */
5013
5014static int
f450004a 5015linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
5016{
5017 char filename[PATH_MAX];
5018 int fd, n;
0bfdf32f 5019 int pid = lwpid_of (current_thread);
aa691b87 5020
6cebaf6e 5021 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5022
5023 fd = open (filename, O_RDONLY);
5024 if (fd < 0)
5025 return -1;
5026
5027 if (offset != (CORE_ADDR) 0
5028 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5029 n = -1;
5030 else
5031 n = read (fd, myaddr, len);
5032
5033 close (fd);
5034
5035 return n;
5036}
5037
d993e290
PA
5038/* These breakpoint and watchpoint related wrapper functions simply
5039 pass on the function call if the target has registered a
5040 corresponding function. */
e013ee27
OF
5041
5042static int
802e8e6d
PA
5043linux_supports_z_point_type (char z_type)
5044{
5045 return (the_low_target.supports_z_point_type != NULL
5046 && the_low_target.supports_z_point_type (z_type));
5047}
5048
5049static int
5050linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5051 int size, struct raw_breakpoint *bp)
e013ee27 5052{
d993e290 5053 if (the_low_target.insert_point != NULL)
802e8e6d 5054 return the_low_target.insert_point (type, addr, size, bp);
e013ee27
OF
5055 else
5056 /* Unsupported (see target.h). */
5057 return 1;
5058}
5059
5060static int
802e8e6d
PA
5061linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5062 int size, struct raw_breakpoint *bp)
e013ee27 5063{
d993e290 5064 if (the_low_target.remove_point != NULL)
802e8e6d 5065 return the_low_target.remove_point (type, addr, size, bp);
e013ee27
OF
5066 else
5067 /* Unsupported (see target.h). */
5068 return 1;
5069}
5070
3e572f71
PA
5071/* Implement the to_stopped_by_sw_breakpoint target_ops
5072 method. */
5073
5074static int
5075linux_stopped_by_sw_breakpoint (void)
5076{
5077 struct lwp_info *lwp = get_thread_lwp (current_thread);
5078
5079 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5080}
5081
5082/* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5083 method. */
5084
5085static int
5086linux_supports_stopped_by_sw_breakpoint (void)
5087{
5088 return USE_SIGTRAP_SIGINFO;
5089}
5090
5091/* Implement the to_stopped_by_hw_breakpoint target_ops
5092 method. */
5093
5094static int
5095linux_stopped_by_hw_breakpoint (void)
5096{
5097 struct lwp_info *lwp = get_thread_lwp (current_thread);
5098
5099 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5100}
5101
5102/* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5103 method. */
5104
5105static int
5106linux_supports_stopped_by_hw_breakpoint (void)
5107{
5108 return USE_SIGTRAP_SIGINFO;
5109}
5110
e013ee27
OF
5111static int
5112linux_stopped_by_watchpoint (void)
5113{
0bfdf32f 5114 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5115
15c66dd6 5116 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5117}
5118
5119static CORE_ADDR
5120linux_stopped_data_address (void)
5121{
0bfdf32f 5122 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5123
5124 return lwp->stopped_data_address;
e013ee27
OF
5125}
5126
db0dfaa0
LM
5127#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5128 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5129 && defined(PT_TEXT_END_ADDR)
5130
5131/* This is only used for targets that define PT_TEXT_ADDR,
5132 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5133 the target has different ways of acquiring this information, like
5134 loadmaps. */
52fb6437
NS
5135
5136/* Under uClinux, programs are loaded at non-zero offsets, which we need
5137 to tell gdb about. */
5138
5139static int
5140linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5141{
52fb6437 5142 unsigned long text, text_end, data;
0bfdf32f 5143 int pid = lwpid_of (get_thread_lwp (current_thread));
52fb6437
NS
5144
5145 errno = 0;
5146
b8e1b30e
LM
5147 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5148 (PTRACE_TYPE_ARG4) 0);
5149 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5150 (PTRACE_TYPE_ARG4) 0);
5151 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5152 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5153
5154 if (errno == 0)
5155 {
5156 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5157 used by gdb) are relative to the beginning of the program,
5158 with the data segment immediately following the text segment.
5159 However, the actual runtime layout in memory may put the data
5160 somewhere else, so when we send gdb a data base-address, we
5161 use the real data base address and subtract the compile-time
5162 data base-address from it (which is just the length of the
5163 text segment). BSS immediately follows data in both
5164 cases. */
52fb6437
NS
5165 *text_p = text;
5166 *data_p = data - (text_end - text);
1b3f6016 5167
52fb6437
NS
5168 return 1;
5169 }
52fb6437
NS
5170 return 0;
5171}
5172#endif
5173
07e059b5
VP
5174static int
5175linux_qxfer_osdata (const char *annex,
1b3f6016
PA
5176 unsigned char *readbuf, unsigned const char *writebuf,
5177 CORE_ADDR offset, int len)
07e059b5 5178{
d26e3629 5179 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5180}
5181
d0722149
DE
5182/* Convert a native/host siginfo object, into/from the siginfo in the
5183 layout of the inferiors' architecture. */
5184
5185static void
a5362b9a 5186siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
d0722149
DE
5187{
5188 int done = 0;
5189
5190 if (the_low_target.siginfo_fixup != NULL)
5191 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5192
5193 /* If there was no callback, or the callback didn't do anything,
5194 then just do a straight memcpy. */
5195 if (!done)
5196 {
5197 if (direction == 1)
a5362b9a 5198 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5199 else
a5362b9a 5200 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5201 }
5202}
5203
4aa995e1
PA
5204static int
5205linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5206 unsigned const char *writebuf, CORE_ADDR offset, int len)
5207{
d0722149 5208 int pid;
a5362b9a
TS
5209 siginfo_t siginfo;
5210 char inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5211
0bfdf32f 5212 if (current_thread == NULL)
4aa995e1
PA
5213 return -1;
5214
0bfdf32f 5215 pid = lwpid_of (current_thread);
4aa995e1
PA
5216
5217 if (debug_threads)
87ce2a04
DE
5218 debug_printf ("%s siginfo for lwp %d.\n",
5219 readbuf != NULL ? "Reading" : "Writing",
5220 pid);
4aa995e1 5221
0adea5f7 5222 if (offset >= sizeof (siginfo))
4aa995e1
PA
5223 return -1;
5224
b8e1b30e 5225 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5226 return -1;
5227
d0722149
DE
5228 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5229 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5230 inferior with a 64-bit GDBSERVER should look the same as debugging it
5231 with a 32-bit GDBSERVER, we need to convert it. */
5232 siginfo_fixup (&siginfo, inf_siginfo, 0);
5233
4aa995e1
PA
5234 if (offset + len > sizeof (siginfo))
5235 len = sizeof (siginfo) - offset;
5236
5237 if (readbuf != NULL)
d0722149 5238 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5239 else
5240 {
d0722149
DE
5241 memcpy (inf_siginfo + offset, writebuf, len);
5242
5243 /* Convert back to ptrace layout before flushing it out. */
5244 siginfo_fixup (&siginfo, inf_siginfo, 1);
5245
b8e1b30e 5246 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5247 return -1;
5248 }
5249
5250 return len;
5251}
5252
bd99dc85
PA
5253/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5254 so we notice when children change state; as the handler for the
5255 sigsuspend in my_waitpid. */
5256
5257static void
5258sigchld_handler (int signo)
5259{
5260 int old_errno = errno;
5261
5262 if (debug_threads)
e581f2b4
PA
5263 {
5264 do
5265 {
5266 /* fprintf is not async-signal-safe, so call write
5267 directly. */
5268 if (write (2, "sigchld_handler\n",
5269 sizeof ("sigchld_handler\n") - 1) < 0)
5270 break; /* just ignore */
5271 } while (0);
5272 }
bd99dc85
PA
5273
5274 if (target_is_async_p ())
5275 async_file_mark (); /* trigger a linux_wait */
5276
5277 errno = old_errno;
5278}
5279
5280static int
5281linux_supports_non_stop (void)
5282{
5283 return 1;
5284}
5285
5286static int
5287linux_async (int enable)
5288{
7089dca4 5289 int previous = target_is_async_p ();
bd99dc85 5290
8336d594 5291 if (debug_threads)
87ce2a04
DE
5292 debug_printf ("linux_async (%d), previous=%d\n",
5293 enable, previous);
8336d594 5294
bd99dc85
PA
5295 if (previous != enable)
5296 {
5297 sigset_t mask;
5298 sigemptyset (&mask);
5299 sigaddset (&mask, SIGCHLD);
5300
5301 sigprocmask (SIG_BLOCK, &mask, NULL);
5302
5303 if (enable)
5304 {
5305 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
5306 {
5307 linux_event_pipe[0] = -1;
5308 linux_event_pipe[1] = -1;
5309 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5310
5311 warning ("creating event pipe failed.");
5312 return previous;
5313 }
bd99dc85
PA
5314
5315 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5316 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5317
5318 /* Register the event loop handler. */
5319 add_file_handler (linux_event_pipe[0],
5320 handle_target_event, NULL);
5321
5322 /* Always trigger a linux_wait. */
5323 async_file_mark ();
5324 }
5325 else
5326 {
5327 delete_file_handler (linux_event_pipe[0]);
5328
5329 close (linux_event_pipe[0]);
5330 close (linux_event_pipe[1]);
5331 linux_event_pipe[0] = -1;
5332 linux_event_pipe[1] = -1;
5333 }
5334
5335 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5336 }
5337
5338 return previous;
5339}
5340
5341static int
5342linux_start_non_stop (int nonstop)
5343{
5344 /* Register or unregister from event-loop accordingly. */
5345 linux_async (nonstop);
aa96c426
GB
5346
5347 if (target_is_async_p () != (nonstop != 0))
5348 return -1;
5349
bd99dc85
PA
5350 return 0;
5351}
5352
cf8fd78b
PA
5353static int
5354linux_supports_multi_process (void)
5355{
5356 return 1;
5357}
5358
03583c20
UW
5359static int
5360linux_supports_disable_randomization (void)
5361{
5362#ifdef HAVE_PERSONALITY
5363 return 1;
5364#else
5365 return 0;
5366#endif
5367}
efcbbd14 5368
d1feda86
YQ
5369static int
5370linux_supports_agent (void)
5371{
5372 return 1;
5373}
5374
c2d6af84
PA
5375static int
5376linux_supports_range_stepping (void)
5377{
5378 if (*the_low_target.supports_range_stepping == NULL)
5379 return 0;
5380
5381 return (*the_low_target.supports_range_stepping) ();
5382}
5383
efcbbd14
UW
5384/* Enumerate spufs IDs for process PID. */
5385static int
5386spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5387{
5388 int pos = 0;
5389 int written = 0;
5390 char path[128];
5391 DIR *dir;
5392 struct dirent *entry;
5393
5394 sprintf (path, "/proc/%ld/fd", pid);
5395 dir = opendir (path);
5396 if (!dir)
5397 return -1;
5398
5399 rewinddir (dir);
5400 while ((entry = readdir (dir)) != NULL)
5401 {
5402 struct stat st;
5403 struct statfs stfs;
5404 int fd;
5405
5406 fd = atoi (entry->d_name);
5407 if (!fd)
5408 continue;
5409
5410 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5411 if (stat (path, &st) != 0)
5412 continue;
5413 if (!S_ISDIR (st.st_mode))
5414 continue;
5415
5416 if (statfs (path, &stfs) != 0)
5417 continue;
5418 if (stfs.f_type != SPUFS_MAGIC)
5419 continue;
5420
5421 if (pos >= offset && pos + 4 <= offset + len)
5422 {
5423 *(unsigned int *)(buf + pos - offset) = fd;
5424 written += 4;
5425 }
5426 pos += 4;
5427 }
5428
5429 closedir (dir);
5430 return written;
5431}
5432
5433/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5434 object type, using the /proc file system. */
5435static int
5436linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5437 unsigned const char *writebuf,
5438 CORE_ADDR offset, int len)
5439{
0bfdf32f 5440 long pid = lwpid_of (current_thread);
efcbbd14
UW
5441 char buf[128];
5442 int fd = 0;
5443 int ret = 0;
5444
5445 if (!writebuf && !readbuf)
5446 return -1;
5447
5448 if (!*annex)
5449 {
5450 if (!readbuf)
5451 return -1;
5452 else
5453 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5454 }
5455
5456 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5457 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5458 if (fd <= 0)
5459 return -1;
5460
5461 if (offset != 0
5462 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5463 {
5464 close (fd);
5465 return 0;
5466 }
5467
5468 if (writebuf)
5469 ret = write (fd, writebuf, (size_t) len);
5470 else
5471 ret = read (fd, readbuf, (size_t) len);
5472
5473 close (fd);
5474 return ret;
5475}
5476
723b724b 5477#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
5478struct target_loadseg
5479{
5480 /* Core address to which the segment is mapped. */
5481 Elf32_Addr addr;
5482 /* VMA recorded in the program header. */
5483 Elf32_Addr p_vaddr;
5484 /* Size of this segment in memory. */
5485 Elf32_Word p_memsz;
5486};
5487
723b724b 5488# if defined PT_GETDSBT
78d85199
YQ
5489struct target_loadmap
5490{
5491 /* Protocol version number, must be zero. */
5492 Elf32_Word version;
5493 /* Pointer to the DSBT table, its size, and the DSBT index. */
5494 unsigned *dsbt_table;
5495 unsigned dsbt_size, dsbt_index;
5496 /* Number of segments in this map. */
5497 Elf32_Word nsegs;
5498 /* The actual memory map. */
5499 struct target_loadseg segs[/*nsegs*/];
5500};
723b724b
MF
5501# define LINUX_LOADMAP PT_GETDSBT
5502# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5503# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5504# else
5505struct target_loadmap
5506{
5507 /* Protocol version number, must be zero. */
5508 Elf32_Half version;
5509 /* Number of segments in this map. */
5510 Elf32_Half nsegs;
5511 /* The actual memory map. */
5512 struct target_loadseg segs[/*nsegs*/];
5513};
5514# define LINUX_LOADMAP PTRACE_GETFDPIC
5515# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5516# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5517# endif
78d85199 5518
78d85199
YQ
5519static int
5520linux_read_loadmap (const char *annex, CORE_ADDR offset,
5521 unsigned char *myaddr, unsigned int len)
5522{
0bfdf32f 5523 int pid = lwpid_of (current_thread);
78d85199
YQ
5524 int addr = -1;
5525 struct target_loadmap *data = NULL;
5526 unsigned int actual_length, copy_length;
5527
5528 if (strcmp (annex, "exec") == 0)
723b724b 5529 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 5530 else if (strcmp (annex, "interp") == 0)
723b724b 5531 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
5532 else
5533 return -1;
5534
723b724b 5535 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
5536 return -1;
5537
5538 if (data == NULL)
5539 return -1;
5540
5541 actual_length = sizeof (struct target_loadmap)
5542 + sizeof (struct target_loadseg) * data->nsegs;
5543
5544 if (offset < 0 || offset > actual_length)
5545 return -1;
5546
5547 copy_length = actual_length - offset < len ? actual_length - offset : len;
5548 memcpy (myaddr, (char *) data + offset, copy_length);
5549 return copy_length;
5550}
723b724b
MF
5551#else
5552# define linux_read_loadmap NULL
5553#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 5554
1570b33e
L
5555static void
5556linux_process_qsupported (const char *query)
5557{
5558 if (the_low_target.process_qsupported != NULL)
5559 the_low_target.process_qsupported (query);
5560}
5561
219f2f23
PA
5562static int
5563linux_supports_tracepoints (void)
5564{
5565 if (*the_low_target.supports_tracepoints == NULL)
5566 return 0;
5567
5568 return (*the_low_target.supports_tracepoints) ();
5569}
5570
5571static CORE_ADDR
5572linux_read_pc (struct regcache *regcache)
5573{
5574 if (the_low_target.get_pc == NULL)
5575 return 0;
5576
5577 return (*the_low_target.get_pc) (regcache);
5578}
5579
5580static void
5581linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5582{
5583 gdb_assert (the_low_target.set_pc != NULL);
5584
5585 (*the_low_target.set_pc) (regcache, pc);
5586}
5587
8336d594
PA
5588static int
5589linux_thread_stopped (struct thread_info *thread)
5590{
5591 return get_thread_lwp (thread)->stopped;
5592}
5593
5594/* This exposes stop-all-threads functionality to other modules. */
5595
5596static void
7984d532 5597linux_pause_all (int freeze)
8336d594 5598{
7984d532
PA
5599 stop_all_lwps (freeze, NULL);
5600}
5601
5602/* This exposes unstop-all-threads functionality to other gdbserver
5603 modules. */
5604
5605static void
5606linux_unpause_all (int unfreeze)
5607{
5608 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
5609}
5610
90d74c30
PA
5611static int
5612linux_prepare_to_access_memory (void)
5613{
5614 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5615 running LWP. */
5616 if (non_stop)
5617 linux_pause_all (1);
5618 return 0;
5619}
5620
5621static void
0146f85b 5622linux_done_accessing_memory (void)
90d74c30
PA
5623{
5624 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5625 running LWP. */
5626 if (non_stop)
5627 linux_unpause_all (1);
5628}
5629
fa593d66
PA
5630static int
5631linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5632 CORE_ADDR collector,
5633 CORE_ADDR lockaddr,
5634 ULONGEST orig_size,
5635 CORE_ADDR *jump_entry,
405f8e94
SS
5636 CORE_ADDR *trampoline,
5637 ULONGEST *trampoline_size,
fa593d66
PA
5638 unsigned char *jjump_pad_insn,
5639 ULONGEST *jjump_pad_insn_size,
5640 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
5641 CORE_ADDR *adjusted_insn_addr_end,
5642 char *err)
fa593d66
PA
5643{
5644 return (*the_low_target.install_fast_tracepoint_jump_pad)
5645 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
5646 jump_entry, trampoline, trampoline_size,
5647 jjump_pad_insn, jjump_pad_insn_size,
5648 adjusted_insn_addr, adjusted_insn_addr_end,
5649 err);
fa593d66
PA
5650}
5651
6a271cae
PA
5652static struct emit_ops *
5653linux_emit_ops (void)
5654{
5655 if (the_low_target.emit_ops != NULL)
5656 return (*the_low_target.emit_ops) ();
5657 else
5658 return NULL;
5659}
5660
405f8e94
SS
5661static int
5662linux_get_min_fast_tracepoint_insn_len (void)
5663{
5664 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5665}
5666
2268b414
JK
5667/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5668
5669static int
5670get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5671 CORE_ADDR *phdr_memaddr, int *num_phdr)
5672{
5673 char filename[PATH_MAX];
5674 int fd;
5675 const int auxv_size = is_elf64
5676 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5677 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5678
5679 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5680
5681 fd = open (filename, O_RDONLY);
5682 if (fd < 0)
5683 return 1;
5684
5685 *phdr_memaddr = 0;
5686 *num_phdr = 0;
5687 while (read (fd, buf, auxv_size) == auxv_size
5688 && (*phdr_memaddr == 0 || *num_phdr == 0))
5689 {
5690 if (is_elf64)
5691 {
5692 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5693
5694 switch (aux->a_type)
5695 {
5696 case AT_PHDR:
5697 *phdr_memaddr = aux->a_un.a_val;
5698 break;
5699 case AT_PHNUM:
5700 *num_phdr = aux->a_un.a_val;
5701 break;
5702 }
5703 }
5704 else
5705 {
5706 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5707
5708 switch (aux->a_type)
5709 {
5710 case AT_PHDR:
5711 *phdr_memaddr = aux->a_un.a_val;
5712 break;
5713 case AT_PHNUM:
5714 *num_phdr = aux->a_un.a_val;
5715 break;
5716 }
5717 }
5718 }
5719
5720 close (fd);
5721
5722 if (*phdr_memaddr == 0 || *num_phdr == 0)
5723 {
5724 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5725 "phdr_memaddr = %ld, phdr_num = %d",
5726 (long) *phdr_memaddr, *num_phdr);
5727 return 2;
5728 }
5729
5730 return 0;
5731}
5732
5733/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5734
5735static CORE_ADDR
5736get_dynamic (const int pid, const int is_elf64)
5737{
5738 CORE_ADDR phdr_memaddr, relocation;
5739 int num_phdr, i;
5740 unsigned char *phdr_buf;
5741 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5742
5743 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5744 return 0;
5745
5746 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5747 phdr_buf = alloca (num_phdr * phdr_size);
5748
5749 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5750 return 0;
5751
5752 /* Compute relocation: it is expected to be 0 for "regular" executables,
5753 non-zero for PIE ones. */
5754 relocation = -1;
5755 for (i = 0; relocation == -1 && i < num_phdr; i++)
5756 if (is_elf64)
5757 {
5758 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5759
5760 if (p->p_type == PT_PHDR)
5761 relocation = phdr_memaddr - p->p_vaddr;
5762 }
5763 else
5764 {
5765 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5766
5767 if (p->p_type == PT_PHDR)
5768 relocation = phdr_memaddr - p->p_vaddr;
5769 }
5770
5771 if (relocation == -1)
5772 {
e237a7e2
JK
5773 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5774 any real world executables, including PIE executables, have always
5775 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5776 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5777 or present DT_DEBUG anyway (fpc binaries are statically linked).
5778
5779 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5780
5781 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5782
2268b414
JK
5783 return 0;
5784 }
5785
5786 for (i = 0; i < num_phdr; i++)
5787 {
5788 if (is_elf64)
5789 {
5790 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5791
5792 if (p->p_type == PT_DYNAMIC)
5793 return p->p_vaddr + relocation;
5794 }
5795 else
5796 {
5797 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5798
5799 if (p->p_type == PT_DYNAMIC)
5800 return p->p_vaddr + relocation;
5801 }
5802 }
5803
5804 return 0;
5805}
5806
5807/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
5808 can be 0 if the inferior does not yet have the library list initialized.
5809 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5810 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
5811
5812static CORE_ADDR
5813get_r_debug (const int pid, const int is_elf64)
5814{
5815 CORE_ADDR dynamic_memaddr;
5816 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5817 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 5818 CORE_ADDR map = -1;
2268b414
JK
5819
5820 dynamic_memaddr = get_dynamic (pid, is_elf64);
5821 if (dynamic_memaddr == 0)
367ba2c2 5822 return map;
2268b414
JK
5823
5824 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5825 {
5826 if (is_elf64)
5827 {
5828 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
75f62ce7 5829#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
5830 union
5831 {
5832 Elf64_Xword map;
5833 unsigned char buf[sizeof (Elf64_Xword)];
5834 }
5835 rld_map;
5836
5837 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5838 {
5839 if (linux_read_memory (dyn->d_un.d_val,
5840 rld_map.buf, sizeof (rld_map.buf)) == 0)
5841 return rld_map.map;
5842 else
5843 break;
5844 }
75f62ce7 5845#endif /* DT_MIPS_RLD_MAP */
2268b414 5846
367ba2c2
MR
5847 if (dyn->d_tag == DT_DEBUG && map == -1)
5848 map = dyn->d_un.d_val;
2268b414
JK
5849
5850 if (dyn->d_tag == DT_NULL)
5851 break;
5852 }
5853 else
5854 {
5855 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
75f62ce7 5856#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
5857 union
5858 {
5859 Elf32_Word map;
5860 unsigned char buf[sizeof (Elf32_Word)];
5861 }
5862 rld_map;
5863
5864 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5865 {
5866 if (linux_read_memory (dyn->d_un.d_val,
5867 rld_map.buf, sizeof (rld_map.buf)) == 0)
5868 return rld_map.map;
5869 else
5870 break;
5871 }
75f62ce7 5872#endif /* DT_MIPS_RLD_MAP */
2268b414 5873
367ba2c2
MR
5874 if (dyn->d_tag == DT_DEBUG && map == -1)
5875 map = dyn->d_un.d_val;
2268b414
JK
5876
5877 if (dyn->d_tag == DT_NULL)
5878 break;
5879 }
5880
5881 dynamic_memaddr += dyn_size;
5882 }
5883
367ba2c2 5884 return map;
2268b414
JK
5885}
5886
5887/* Read one pointer from MEMADDR in the inferior. */
5888
5889static int
5890read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5891{
485f1ee4
PA
5892 int ret;
5893
5894 /* Go through a union so this works on either big or little endian
5895 hosts, when the inferior's pointer size is smaller than the size
5896 of CORE_ADDR. It is assumed the inferior's endianness is the
5897 same of the superior's. */
5898 union
5899 {
5900 CORE_ADDR core_addr;
5901 unsigned int ui;
5902 unsigned char uc;
5903 } addr;
5904
5905 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5906 if (ret == 0)
5907 {
5908 if (ptr_size == sizeof (CORE_ADDR))
5909 *ptr = addr.core_addr;
5910 else if (ptr_size == sizeof (unsigned int))
5911 *ptr = addr.ui;
5912 else
5913 gdb_assert_not_reached ("unhandled pointer size");
5914 }
5915 return ret;
2268b414
JK
5916}
5917
5918struct link_map_offsets
5919 {
5920 /* Offset and size of r_debug.r_version. */
5921 int r_version_offset;
5922
5923 /* Offset and size of r_debug.r_map. */
5924 int r_map_offset;
5925
5926 /* Offset to l_addr field in struct link_map. */
5927 int l_addr_offset;
5928
5929 /* Offset to l_name field in struct link_map. */
5930 int l_name_offset;
5931
5932 /* Offset to l_ld field in struct link_map. */
5933 int l_ld_offset;
5934
5935 /* Offset to l_next field in struct link_map. */
5936 int l_next_offset;
5937
5938 /* Offset to l_prev field in struct link_map. */
5939 int l_prev_offset;
5940 };
5941
fb723180 5942/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
5943
5944static int
5945linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5946 unsigned const char *writebuf,
5947 CORE_ADDR offset, int len)
5948{
5949 char *document;
5950 unsigned document_len;
fe978cb0 5951 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
5952 char filename[PATH_MAX];
5953 int pid, is_elf64;
5954
5955 static const struct link_map_offsets lmo_32bit_offsets =
5956 {
5957 0, /* r_version offset. */
5958 4, /* r_debug.r_map offset. */
5959 0, /* l_addr offset in link_map. */
5960 4, /* l_name offset in link_map. */
5961 8, /* l_ld offset in link_map. */
5962 12, /* l_next offset in link_map. */
5963 16 /* l_prev offset in link_map. */
5964 };
5965
5966 static const struct link_map_offsets lmo_64bit_offsets =
5967 {
5968 0, /* r_version offset. */
5969 8, /* r_debug.r_map offset. */
5970 0, /* l_addr offset in link_map. */
5971 8, /* l_name offset in link_map. */
5972 16, /* l_ld offset in link_map. */
5973 24, /* l_next offset in link_map. */
5974 32 /* l_prev offset in link_map. */
5975 };
5976 const struct link_map_offsets *lmo;
214d508e 5977 unsigned int machine;
b1fbec62
GB
5978 int ptr_size;
5979 CORE_ADDR lm_addr = 0, lm_prev = 0;
5980 int allocated = 1024;
5981 char *p;
5982 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5983 int header_done = 0;
2268b414
JK
5984
5985 if (writebuf != NULL)
5986 return -2;
5987 if (readbuf == NULL)
5988 return -1;
5989
0bfdf32f 5990 pid = lwpid_of (current_thread);
2268b414 5991 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 5992 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 5993 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 5994 ptr_size = is_elf64 ? 8 : 4;
2268b414 5995
b1fbec62
GB
5996 while (annex[0] != '\0')
5997 {
5998 const char *sep;
5999 CORE_ADDR *addrp;
6000 int len;
2268b414 6001
b1fbec62
GB
6002 sep = strchr (annex, '=');
6003 if (sep == NULL)
6004 break;
0c5bf5a9 6005
b1fbec62 6006 len = sep - annex;
61012eef 6007 if (len == 5 && startswith (annex, "start"))
b1fbec62 6008 addrp = &lm_addr;
61012eef 6009 else if (len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6010 addrp = &lm_prev;
6011 else
6012 {
6013 annex = strchr (sep, ';');
6014 if (annex == NULL)
6015 break;
6016 annex++;
6017 continue;
6018 }
6019
6020 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6021 }
b1fbec62
GB
6022
6023 if (lm_addr == 0)
2268b414 6024 {
b1fbec62
GB
6025 int r_version = 0;
6026
6027 if (priv->r_debug == 0)
6028 priv->r_debug = get_r_debug (pid, is_elf64);
6029
6030 /* We failed to find DT_DEBUG. Such situation will not change
6031 for this inferior - do not retry it. Report it to GDB as
6032 E01, see for the reasons at the GDB solib-svr4.c side. */
6033 if (priv->r_debug == (CORE_ADDR) -1)
6034 return -1;
6035
6036 if (priv->r_debug != 0)
2268b414 6037 {
b1fbec62
GB
6038 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6039 (unsigned char *) &r_version,
6040 sizeof (r_version)) != 0
6041 || r_version != 1)
6042 {
6043 warning ("unexpected r_debug version %d", r_version);
6044 }
6045 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6046 &lm_addr, ptr_size) != 0)
6047 {
6048 warning ("unable to read r_map from 0x%lx",
6049 (long) priv->r_debug + lmo->r_map_offset);
6050 }
2268b414 6051 }
b1fbec62 6052 }
2268b414 6053
b1fbec62
GB
6054 document = xmalloc (allocated);
6055 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6056 p = document + strlen (document);
6057
6058 while (lm_addr
6059 && read_one_ptr (lm_addr + lmo->l_name_offset,
6060 &l_name, ptr_size) == 0
6061 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6062 &l_addr, ptr_size) == 0
6063 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6064 &l_ld, ptr_size) == 0
6065 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6066 &l_prev, ptr_size) == 0
6067 && read_one_ptr (lm_addr + lmo->l_next_offset,
6068 &l_next, ptr_size) == 0)
6069 {
6070 unsigned char libname[PATH_MAX];
6071
6072 if (lm_prev != l_prev)
2268b414 6073 {
b1fbec62
GB
6074 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6075 (long) lm_prev, (long) l_prev);
6076 break;
2268b414
JK
6077 }
6078
d878444c
JK
6079 /* Ignore the first entry even if it has valid name as the first entry
6080 corresponds to the main executable. The first entry should not be
6081 skipped if the dynamic loader was loaded late by a static executable
6082 (see solib-svr4.c parameter ignore_first). But in such case the main
6083 executable does not have PT_DYNAMIC present and this function already
6084 exited above due to failed get_r_debug. */
6085 if (lm_prev == 0)
2268b414 6086 {
d878444c
JK
6087 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6088 p = p + strlen (p);
6089 }
6090 else
6091 {
6092 /* Not checking for error because reading may stop before
6093 we've got PATH_MAX worth of characters. */
6094 libname[0] = '\0';
6095 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6096 libname[sizeof (libname) - 1] = '\0';
6097 if (libname[0] != '\0')
2268b414 6098 {
d878444c
JK
6099 /* 6x the size for xml_escape_text below. */
6100 size_t len = 6 * strlen ((char *) libname);
6101 char *name;
2268b414 6102
d878444c
JK
6103 if (!header_done)
6104 {
6105 /* Terminate `<library-list-svr4'. */
6106 *p++ = '>';
6107 header_done = 1;
6108 }
2268b414 6109
d878444c
JK
6110 while (allocated < p - document + len + 200)
6111 {
6112 /* Expand to guarantee sufficient storage. */
6113 uintptr_t document_len = p - document;
2268b414 6114
d878444c
JK
6115 document = xrealloc (document, 2 * allocated);
6116 allocated *= 2;
6117 p = document + document_len;
6118 }
6119
6120 name = xml_escape_text ((char *) libname);
6121 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6122 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6123 name, (unsigned long) lm_addr,
6124 (unsigned long) l_addr, (unsigned long) l_ld);
6125 free (name);
6126 }
0afae3cf 6127 }
b1fbec62
GB
6128
6129 lm_prev = lm_addr;
6130 lm_addr = l_next;
2268b414
JK
6131 }
6132
b1fbec62
GB
6133 if (!header_done)
6134 {
6135 /* Empty list; terminate `<library-list-svr4'. */
6136 strcpy (p, "/>");
6137 }
6138 else
6139 strcpy (p, "</library-list-svr4>");
6140
2268b414
JK
6141 document_len = strlen (document);
6142 if (offset < document_len)
6143 document_len -= offset;
6144 else
6145 document_len = 0;
6146 if (len > document_len)
6147 len = document_len;
6148
6149 memcpy (readbuf, document + offset, len);
6150 xfree (document);
6151
6152 return len;
6153}
6154
9accd112
MM
6155#ifdef HAVE_LINUX_BTRACE
6156
969c39fb 6157/* See to_enable_btrace target method. */
9accd112
MM
6158
6159static struct btrace_target_info *
f4abbc16 6160linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
9accd112
MM
6161{
6162 struct btrace_target_info *tinfo;
6163
f4abbc16 6164 tinfo = linux_enable_btrace (ptid, conf);
3aee8918 6165
d68e53f4 6166 if (tinfo != NULL && tinfo->ptr_bits == 0)
3aee8918
PA
6167 {
6168 struct thread_info *thread = find_thread_ptid (ptid);
6169 struct regcache *regcache = get_thread_regcache (thread, 0);
6170
6171 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6172 }
9accd112
MM
6173
6174 return tinfo;
6175}
6176
969c39fb 6177/* See to_disable_btrace target method. */
9accd112 6178
969c39fb
MM
6179static int
6180linux_low_disable_btrace (struct btrace_target_info *tinfo)
6181{
6182 enum btrace_error err;
6183
6184 err = linux_disable_btrace (tinfo);
6185 return (err == BTRACE_ERR_NONE ? 0 : -1);
6186}
6187
6188/* See to_read_btrace target method. */
6189
6190static int
9accd112
MM
6191linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6192 int type)
6193{
734b0e4b 6194 struct btrace_data btrace;
9accd112 6195 struct btrace_block *block;
969c39fb 6196 enum btrace_error err;
9accd112
MM
6197 int i;
6198
734b0e4b
MM
6199 btrace_data_init (&btrace);
6200
969c39fb
MM
6201 err = linux_read_btrace (&btrace, tinfo, type);
6202 if (err != BTRACE_ERR_NONE)
6203 {
6204 if (err == BTRACE_ERR_OVERFLOW)
6205 buffer_grow_str0 (buffer, "E.Overflow.");
6206 else
6207 buffer_grow_str0 (buffer, "E.Generic Error.");
6208
734b0e4b 6209 btrace_data_fini (&btrace);
969c39fb
MM
6210 return -1;
6211 }
9accd112 6212
734b0e4b
MM
6213 switch (btrace.format)
6214 {
6215 case BTRACE_FORMAT_NONE:
6216 buffer_grow_str0 (buffer, "E.No Trace.");
6217 break;
6218
6219 case BTRACE_FORMAT_BTS:
6220 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6221 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 6222
734b0e4b
MM
6223 for (i = 0;
6224 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6225 i++)
6226 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6227 paddress (block->begin), paddress (block->end));
9accd112 6228
734b0e4b
MM
6229 buffer_grow_str0 (buffer, "</btrace>\n");
6230 break;
6231
6232 default:
6233 buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
9accd112 6234
734b0e4b
MM
6235 btrace_data_fini (&btrace);
6236 return -1;
6237 }
969c39fb 6238
734b0e4b 6239 btrace_data_fini (&btrace);
969c39fb 6240 return 0;
9accd112 6241}
f4abbc16
MM
6242
6243/* See to_btrace_conf target method. */
6244
6245static int
6246linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6247 struct buffer *buffer)
6248{
6249 const struct btrace_config *conf;
6250
6251 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6252 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6253
6254 conf = linux_btrace_conf (tinfo);
6255 if (conf != NULL)
6256 {
6257 switch (conf->format)
6258 {
6259 case BTRACE_FORMAT_NONE:
6260 break;
6261
6262 case BTRACE_FORMAT_BTS:
d33501a5
MM
6263 buffer_xml_printf (buffer, "<bts");
6264 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6265 buffer_xml_printf (buffer, " />\n");
f4abbc16
MM
6266 break;
6267 }
6268 }
6269
6270 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6271 return 0;
6272}
9accd112
MM
6273#endif /* HAVE_LINUX_BTRACE */
6274
7b669087
GB
6275/* See nat/linux-nat.h. */
6276
6277ptid_t
6278current_lwp_ptid (void)
6279{
6280 return ptid_of (current_thread);
6281}
6282
ce3a066d
DJ
6283static struct target_ops linux_target_ops = {
6284 linux_create_inferior,
6285 linux_attach,
6286 linux_kill,
6ad8ae5c 6287 linux_detach,
8336d594 6288 linux_mourn,
444d6139 6289 linux_join,
ce3a066d
DJ
6290 linux_thread_alive,
6291 linux_resume,
6292 linux_wait,
6293 linux_fetch_registers,
6294 linux_store_registers,
90d74c30 6295 linux_prepare_to_access_memory,
0146f85b 6296 linux_done_accessing_memory,
ce3a066d
DJ
6297 linux_read_memory,
6298 linux_write_memory,
2f2893d9 6299 linux_look_up_symbols,
ef57601b 6300 linux_request_interrupt,
aa691b87 6301 linux_read_auxv,
802e8e6d 6302 linux_supports_z_point_type,
d993e290
PA
6303 linux_insert_point,
6304 linux_remove_point,
3e572f71
PA
6305 linux_stopped_by_sw_breakpoint,
6306 linux_supports_stopped_by_sw_breakpoint,
6307 linux_stopped_by_hw_breakpoint,
6308 linux_supports_stopped_by_hw_breakpoint,
e013ee27
OF
6309 linux_stopped_by_watchpoint,
6310 linux_stopped_data_address,
db0dfaa0
LM
6311#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6312 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6313 && defined(PT_TEXT_END_ADDR)
52fb6437 6314 linux_read_offsets,
dae5f5cf
DJ
6315#else
6316 NULL,
6317#endif
6318#ifdef USE_THREAD_DB
6319 thread_db_get_tls_address,
6320#else
6321 NULL,
52fb6437 6322#endif
efcbbd14 6323 linux_qxfer_spu,
59a016f0 6324 hostio_last_error_from_errno,
07e059b5 6325 linux_qxfer_osdata,
4aa995e1 6326 linux_xfer_siginfo,
bd99dc85
PA
6327 linux_supports_non_stop,
6328 linux_async,
6329 linux_start_non_stop,
cdbfd419
PP
6330 linux_supports_multi_process,
6331#ifdef USE_THREAD_DB
dc146f7c 6332 thread_db_handle_monitor_command,
cdbfd419 6333#else
dc146f7c 6334 NULL,
cdbfd419 6335#endif
d26e3629 6336 linux_common_core_of_thread,
78d85199 6337 linux_read_loadmap,
219f2f23
PA
6338 linux_process_qsupported,
6339 linux_supports_tracepoints,
6340 linux_read_pc,
8336d594
PA
6341 linux_write_pc,
6342 linux_thread_stopped,
7984d532 6343 NULL,
711e434b 6344 linux_pause_all,
7984d532 6345 linux_unpause_all,
fa593d66 6346 linux_stabilize_threads,
6a271cae 6347 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
6348 linux_emit_ops,
6349 linux_supports_disable_randomization,
405f8e94 6350 linux_get_min_fast_tracepoint_insn_len,
2268b414 6351 linux_qxfer_libraries_svr4,
d1feda86 6352 linux_supports_agent,
9accd112
MM
6353#ifdef HAVE_LINUX_BTRACE
6354 linux_supports_btrace,
6355 linux_low_enable_btrace,
969c39fb 6356 linux_low_disable_btrace,
9accd112 6357 linux_low_read_btrace,
f4abbc16 6358 linux_low_btrace_conf,
9accd112
MM
6359#else
6360 NULL,
6361 NULL,
6362 NULL,
6363 NULL,
f4abbc16 6364 NULL,
9accd112 6365#endif
c2d6af84 6366 linux_supports_range_stepping,
ce3a066d
DJ
6367};
6368
0d62e5e8
DJ
6369static void
6370linux_init_signals ()
6371{
6372 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6373 to find what the cancel signal actually is. */
1a981360 6374#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 6375 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 6376#endif
0d62e5e8
DJ
6377}
6378
3aee8918
PA
6379#ifdef HAVE_LINUX_REGSETS
6380void
6381initialize_regsets_info (struct regsets_info *info)
6382{
6383 for (info->num_regsets = 0;
6384 info->regsets[info->num_regsets].size >= 0;
6385 info->num_regsets++)
6386 ;
3aee8918
PA
6387}
6388#endif
6389
da6d8c04
DJ
6390void
6391initialize_low (void)
6392{
bd99dc85
PA
6393 struct sigaction sigchld_action;
6394 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 6395 set_target_ops (&linux_target_ops);
611cb4a5
DJ
6396 set_breakpoint_data (the_low_target.breakpoint,
6397 the_low_target.breakpoint_len);
0d62e5e8 6398 linux_init_signals ();
aa7c7447 6399 linux_ptrace_init_warnings ();
bd99dc85
PA
6400
6401 sigchld_action.sa_handler = sigchld_handler;
6402 sigemptyset (&sigchld_action.sa_mask);
6403 sigchld_action.sa_flags = SA_RESTART;
6404 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
6405
6406 initialize_low_arch ();
da6d8c04 6407}