]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
* python/python.c (gdbpy_decode_line): Move cleanup creation out
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
0b302171 2 Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
d26e3629 21#include "linux-osdata.h"
58b4daa5 22#include "agent.h"
da6d8c04 23
58caa3dc 24#include <sys/wait.h>
da6d8c04
DJ
25#include <stdio.h>
26#include <sys/param.h>
da6d8c04 27#include <sys/ptrace.h>
af96c192 28#include "linux-ptrace.h"
e3deef73 29#include "linux-procfs.h"
da6d8c04
DJ
30#include <signal.h>
31#include <sys/ioctl.h>
32#include <fcntl.h>
d07c63e7 33#include <string.h>
0a30fbc4
DJ
34#include <stdlib.h>
35#include <unistd.h>
fa6a77dc 36#include <errno.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
efcbbd14
UW
43#include <sys/stat.h>
44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
957f3f49
DE
46#ifndef ELFMAG0
47/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51#include <elf.h>
52#endif
efcbbd14
UW
53
54#ifndef SPUFS_MAGIC
55#define SPUFS_MAGIC 0x23c9b64e
56#endif
da6d8c04 57
03583c20
UW
58#ifdef HAVE_PERSONALITY
59# include <sys/personality.h>
60# if !HAVE_DECL_ADDR_NO_RANDOMIZE
61# define ADDR_NO_RANDOMIZE 0x0040000
62# endif
63#endif
64
fd462a61
DJ
65#ifndef O_LARGEFILE
66#define O_LARGEFILE 0
67#endif
68
ec8ebe72
DE
69#ifndef W_STOPCODE
70#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71#endif
72
1a981360
PA
73/* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75#ifndef __SIGRTMIN
76#define __SIGRTMIN 32
77#endif
78
42c81e2a
DJ
79#ifdef __UCLIBC__
80#if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
81#define HAS_NOMMU
82#endif
83#endif
84
24a09b5f
DJ
85/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
86 representation of the thread ID.
611cb4a5 87
54a0b537 88 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
95954743
PA
89 the same as the LWP ID.
90
91 ``all_processes'' is keyed by the "overall process ID", which
92 GNU/Linux calls tgid, "thread group ID". */
0d62e5e8 93
54a0b537 94struct inferior_list all_lwps;
0d62e5e8 95
05044653
PA
96/* A list of all unknown processes which receive stop signals. Some
97 other process will presumably claim each of these as forked
98 children momentarily. */
24a09b5f 99
05044653
PA
100struct simple_pid_list
101{
102 /* The process ID. */
103 int pid;
104
105 /* The status as reported by waitpid. */
106 int status;
107
108 /* Next in chain. */
109 struct simple_pid_list *next;
110};
111struct simple_pid_list *stopped_pids;
112
113/* Trivial list manipulation functions to keep track of a list of new
114 stopped processes. */
115
116static void
117add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
118{
119 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
120
121 new_pid->pid = pid;
122 new_pid->status = status;
123 new_pid->next = *listp;
124 *listp = new_pid;
125}
126
127static int
128pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
129{
130 struct simple_pid_list **p;
131
132 for (p = listp; *p != NULL; p = &(*p)->next)
133 if ((*p)->pid == pid)
134 {
135 struct simple_pid_list *next = (*p)->next;
136
137 *statusp = (*p)->status;
138 xfree (*p);
139 *p = next;
140 return 1;
141 }
142 return 0;
143}
24a09b5f 144
0d62e5e8
DJ
145/* FIXME this is a bit of a hack, and could be removed. */
146int stopping_threads;
147
148/* FIXME make into a target method? */
24a09b5f 149int using_threads = 1;
24a09b5f 150
fa593d66
PA
151/* True if we're presently stabilizing threads (moving them out of
152 jump pads). */
153static int stabilizing_threads;
154
95954743
PA
155/* This flag is true iff we've just created or attached to our first
156 inferior but it has not stopped yet. As soon as it does, we need
157 to call the low target's arch_setup callback. Doing this only on
158 the first inferior avoids reinializing the architecture on every
159 inferior, and avoids messing with the register caches of the
160 already running inferiors. NOTE: this assumes all inferiors under
161 control of gdbserver have the same architecture. */
d61ddec4
UW
162static int new_inferior;
163
2acc282a 164static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 165 int step, int signal, siginfo_t *info);
2bd7c093 166static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
167static void stop_all_lwps (int suspend, struct lwp_info *except);
168static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
95954743 169static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
95954743 170static void *add_lwp (ptid_t ptid);
c35fafde 171static int linux_stopped_by_watchpoint (void);
95954743 172static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
d50171e4 173static void proceed_all_lwps (void);
d50171e4
PA
174static int finish_step_over (struct lwp_info *lwp);
175static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
176static int kill_lwp (unsigned long lwpid, int signo);
1e7fc18c 177static void linux_enable_event_reporting (int pid);
d50171e4
PA
178
179/* True if the low target can hardware single-step. Such targets
180 don't need a BREAKPOINT_REINSERT_ADDR callback. */
181
182static int
183can_hardware_single_step (void)
184{
185 return (the_low_target.breakpoint_reinsert_addr == NULL);
186}
187
188/* True if the low target supports memory breakpoints. If so, we'll
189 have a GET_PC implementation. */
190
191static int
192supports_breakpoints (void)
193{
194 return (the_low_target.get_pc != NULL);
195}
0d62e5e8 196
fa593d66
PA
197/* Returns true if this target can support fast tracepoints. This
198 does not mean that the in-process agent has been loaded in the
199 inferior. */
200
201static int
202supports_fast_tracepoints (void)
203{
204 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
205}
206
0d62e5e8
DJ
207struct pending_signals
208{
209 int signal;
32ca6d61 210 siginfo_t info;
0d62e5e8
DJ
211 struct pending_signals *prev;
212};
611cb4a5 213
14ce3065
DE
214#define PTRACE_ARG3_TYPE void *
215#define PTRACE_ARG4_TYPE void *
c6ecbae5 216#define PTRACE_XFER_TYPE long
da6d8c04 217
58caa3dc 218#ifdef HAVE_LINUX_REGSETS
52fa2412
UW
219static char *disabled_regsets;
220static int num_regsets;
58caa3dc
DJ
221#endif
222
bd99dc85
PA
223/* The read/write ends of the pipe registered as waitable file in the
224 event loop. */
225static int linux_event_pipe[2] = { -1, -1 };
226
227/* True if we're currently in async mode. */
228#define target_is_async_p() (linux_event_pipe[0] != -1)
229
02fc4de7 230static void send_sigstop (struct lwp_info *lwp);
bd99dc85
PA
231static void wait_for_sigstop (struct inferior_list_entry *entry);
232
d0722149
DE
233/* Return non-zero if HEADER is a 64-bit ELF file. */
234
235static int
957f3f49 236elf_64_header_p (const Elf64_Ehdr *header)
d0722149
DE
237{
238 return (header->e_ident[EI_MAG0] == ELFMAG0
239 && header->e_ident[EI_MAG1] == ELFMAG1
240 && header->e_ident[EI_MAG2] == ELFMAG2
241 && header->e_ident[EI_MAG3] == ELFMAG3
242 && header->e_ident[EI_CLASS] == ELFCLASS64);
243}
244
245/* Return non-zero if FILE is a 64-bit ELF file,
246 zero if the file is not a 64-bit ELF file,
247 and -1 if the file is not accessible or doesn't exist. */
248
be07f1a2 249static int
d0722149
DE
250elf_64_file_p (const char *file)
251{
957f3f49 252 Elf64_Ehdr header;
d0722149
DE
253 int fd;
254
255 fd = open (file, O_RDONLY);
256 if (fd < 0)
257 return -1;
258
259 if (read (fd, &header, sizeof (header)) != sizeof (header))
260 {
261 close (fd);
262 return 0;
263 }
264 close (fd);
265
266 return elf_64_header_p (&header);
267}
268
be07f1a2
PA
269/* Accepts an integer PID; Returns true if the executable PID is
270 running is a 64-bit ELF file.. */
271
272int
273linux_pid_exe_is_elf_64_file (int pid)
274{
275 char file[MAXPATHLEN];
276
277 sprintf (file, "/proc/%d/exe", pid);
278 return elf_64_file_p (file);
279}
280
bd99dc85
PA
281static void
282delete_lwp (struct lwp_info *lwp)
283{
284 remove_thread (get_lwp_thread (lwp));
285 remove_inferior (&all_lwps, &lwp->head);
aa5ca48f 286 free (lwp->arch_private);
bd99dc85
PA
287 free (lwp);
288}
289
95954743
PA
290/* Add a process to the common process list, and set its private
291 data. */
292
293static struct process_info *
294linux_add_process (int pid, int attached)
295{
296 struct process_info *proc;
297
298 /* Is this the first process? If so, then set the arch. */
299 if (all_processes.head == NULL)
300 new_inferior = 1;
301
302 proc = add_process (pid, attached);
303 proc->private = xcalloc (1, sizeof (*proc->private));
304
aa5ca48f
DE
305 if (the_low_target.new_process != NULL)
306 proc->private->arch_private = the_low_target.new_process ();
307
95954743
PA
308 return proc;
309}
310
07d4f67e
DE
311/* Wrapper function for waitpid which handles EINTR, and emulates
312 __WALL for systems where that is not available. */
313
314static int
315my_waitpid (int pid, int *status, int flags)
316{
317 int ret, out_errno;
318
319 if (debug_threads)
320 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
321
322 if (flags & __WALL)
323 {
324 sigset_t block_mask, org_mask, wake_mask;
325 int wnohang;
326
327 wnohang = (flags & WNOHANG) != 0;
328 flags &= ~(__WALL | __WCLONE);
329 flags |= WNOHANG;
330
331 /* Block all signals while here. This avoids knowing about
332 LinuxThread's signals. */
333 sigfillset (&block_mask);
334 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
335
336 /* ... except during the sigsuspend below. */
337 sigemptyset (&wake_mask);
338
339 while (1)
340 {
341 /* Since all signals are blocked, there's no need to check
342 for EINTR here. */
343 ret = waitpid (pid, status, flags);
344 out_errno = errno;
345
346 if (ret == -1 && out_errno != ECHILD)
347 break;
348 else if (ret > 0)
349 break;
350
351 if (flags & __WCLONE)
352 {
353 /* We've tried both flavors now. If WNOHANG is set,
354 there's nothing else to do, just bail out. */
355 if (wnohang)
356 break;
357
358 if (debug_threads)
359 fprintf (stderr, "blocking\n");
360
361 /* Block waiting for signals. */
362 sigsuspend (&wake_mask);
363 }
364
365 flags ^= __WCLONE;
366 }
367
368 sigprocmask (SIG_SETMASK, &org_mask, NULL);
369 }
370 else
371 {
372 do
373 ret = waitpid (pid, status, flags);
374 while (ret == -1 && errno == EINTR);
375 out_errno = errno;
376 }
377
378 if (debug_threads)
379 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
380 pid, flags, status ? *status : -1, ret);
381
382 errno = out_errno;
383 return ret;
384}
385
bd99dc85
PA
386/* Handle a GNU/Linux extended wait response. If we see a clone
387 event, we need to add the new LWP to our list (and not report the
388 trap to higher layers). */
0d62e5e8 389
24a09b5f 390static void
54a0b537 391handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f
DJ
392{
393 int event = wstat >> 16;
54a0b537 394 struct lwp_info *new_lwp;
24a09b5f
DJ
395
396 if (event == PTRACE_EVENT_CLONE)
397 {
95954743 398 ptid_t ptid;
24a09b5f 399 unsigned long new_pid;
05044653 400 int ret, status;
24a09b5f 401
bd99dc85 402 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
24a09b5f
DJ
403
404 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 405 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
406 {
407 /* The new child has a pending SIGSTOP. We can't affect it until it
408 hits the SIGSTOP, but we're already attached. */
409
97438e3f 410 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
411
412 if (ret == -1)
413 perror_with_name ("waiting for new child");
414 else if (ret != new_pid)
415 warning ("wait returned unexpected PID %d", ret);
da5898ce 416 else if (!WIFSTOPPED (status))
24a09b5f
DJ
417 warning ("wait returned unexpected status 0x%x", status);
418 }
419
1e7fc18c 420 linux_enable_event_reporting (new_pid);
24a09b5f 421
95954743
PA
422 ptid = ptid_build (pid_of (event_child), new_pid, 0);
423 new_lwp = (struct lwp_info *) add_lwp (ptid);
424 add_thread (ptid, new_lwp);
24a09b5f 425
e27d73f6
DE
426 /* Either we're going to immediately resume the new thread
427 or leave it stopped. linux_resume_one_lwp is a nop if it
428 thinks the thread is currently running, so set this first
429 before calling linux_resume_one_lwp. */
430 new_lwp->stopped = 1;
431
da5898ce
DJ
432 /* Normally we will get the pending SIGSTOP. But in some cases
433 we might get another signal delivered to the group first.
f21cc1a2 434 If we do get another signal, be sure not to lose it. */
da5898ce
DJ
435 if (WSTOPSIG (status) == SIGSTOP)
436 {
d50171e4
PA
437 if (stopping_threads)
438 new_lwp->stop_pc = get_stop_pc (new_lwp);
439 else
e27d73f6 440 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
da5898ce 441 }
24a09b5f 442 else
da5898ce 443 {
54a0b537 444 new_lwp->stop_expected = 1;
d50171e4 445
da5898ce
DJ
446 if (stopping_threads)
447 {
d50171e4 448 new_lwp->stop_pc = get_stop_pc (new_lwp);
54a0b537
PA
449 new_lwp->status_pending_p = 1;
450 new_lwp->status_pending = status;
da5898ce
DJ
451 }
452 else
453 /* Pass the signal on. This is what GDB does - except
454 shouldn't we really report it instead? */
e27d73f6 455 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
da5898ce 456 }
24a09b5f
DJ
457
458 /* Always resume the current thread. If we are stopping
459 threads, it will have a pending SIGSTOP; we may as well
460 collect it now. */
2acc282a 461 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
24a09b5f
DJ
462 }
463}
464
d50171e4
PA
465/* Return the PC as read from the regcache of LWP, without any
466 adjustment. */
467
468static CORE_ADDR
469get_pc (struct lwp_info *lwp)
470{
471 struct thread_info *saved_inferior;
472 struct regcache *regcache;
473 CORE_ADDR pc;
474
475 if (the_low_target.get_pc == NULL)
476 return 0;
477
478 saved_inferior = current_inferior;
479 current_inferior = get_lwp_thread (lwp);
480
481 regcache = get_thread_regcache (current_inferior, 1);
482 pc = (*the_low_target.get_pc) (regcache);
483
484 if (debug_threads)
485 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
486
487 current_inferior = saved_inferior;
488 return pc;
489}
490
491/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
492 The SIGTRAP could mean several things.
493
494 On i386, where decr_pc_after_break is non-zero:
495 If we were single-stepping this process using PTRACE_SINGLESTEP,
496 we will get only the one SIGTRAP (even if the instruction we
497 stepped over was a breakpoint). The value of $eip will be the
498 next instruction.
499 If we continue the process using PTRACE_CONT, we will get a
500 SIGTRAP when we hit a breakpoint. The value of $eip will be
501 the instruction after the breakpoint (i.e. needs to be
502 decremented). If we report the SIGTRAP to GDB, we must also
503 report the undecremented PC. If we cancel the SIGTRAP, we
504 must resume at the decremented PC.
505
506 (Presumably, not yet tested) On a non-decr_pc_after_break machine
507 with hardware or kernel single-step:
508 If we single-step over a breakpoint instruction, our PC will
509 point at the following instruction. If we continue and hit a
510 breakpoint instruction, our PC will point at the breakpoint
511 instruction. */
512
513static CORE_ADDR
d50171e4 514get_stop_pc (struct lwp_info *lwp)
0d62e5e8 515{
d50171e4
PA
516 CORE_ADDR stop_pc;
517
518 if (the_low_target.get_pc == NULL)
519 return 0;
0d62e5e8 520
d50171e4
PA
521 stop_pc = get_pc (lwp);
522
bdabb078
PA
523 if (WSTOPSIG (lwp->last_status) == SIGTRAP
524 && !lwp->stepping
525 && !lwp->stopped_by_watchpoint
526 && lwp->last_status >> 16 == 0)
47c0c975
DE
527 stop_pc -= the_low_target.decr_pc_after_break;
528
529 if (debug_threads)
530 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
531
532 return stop_pc;
0d62e5e8 533}
ce3a066d 534
0d62e5e8 535static void *
95954743 536add_lwp (ptid_t ptid)
611cb4a5 537{
54a0b537 538 struct lwp_info *lwp;
0d62e5e8 539
54a0b537
PA
540 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
541 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 542
95954743 543 lwp->head.id = ptid;
0d62e5e8 544
aa5ca48f
DE
545 if (the_low_target.new_thread != NULL)
546 lwp->arch_private = the_low_target.new_thread ();
547
54a0b537 548 add_inferior_to_list (&all_lwps, &lwp->head);
0d62e5e8 549
54a0b537 550 return lwp;
0d62e5e8 551}
611cb4a5 552
da6d8c04
DJ
553/* Start an inferior process and returns its pid.
554 ALLARGS is a vector of program-name and args. */
555
ce3a066d
DJ
556static int
557linux_create_inferior (char *program, char **allargs)
da6d8c04 558{
03583c20
UW
559#ifdef HAVE_PERSONALITY
560 int personality_orig = 0, personality_set = 0;
561#endif
a6dbe5df 562 struct lwp_info *new_lwp;
da6d8c04 563 int pid;
95954743 564 ptid_t ptid;
da6d8c04 565
03583c20
UW
566#ifdef HAVE_PERSONALITY
567 if (disable_randomization)
568 {
569 errno = 0;
570 personality_orig = personality (0xffffffff);
571 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
572 {
573 personality_set = 1;
574 personality (personality_orig | ADDR_NO_RANDOMIZE);
575 }
576 if (errno != 0 || (personality_set
577 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
578 warning ("Error disabling address space randomization: %s",
579 strerror (errno));
580 }
581#endif
582
42c81e2a 583#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
584 pid = vfork ();
585#else
da6d8c04 586 pid = fork ();
52fb6437 587#endif
da6d8c04
DJ
588 if (pid < 0)
589 perror_with_name ("fork");
590
591 if (pid == 0)
592 {
593 ptrace (PTRACE_TRACEME, 0, 0, 0);
594
1a981360 595#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 596 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 597#endif
0d62e5e8 598
a9fa9f7d
DJ
599 setpgid (0, 0);
600
e0f9f062
DE
601 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
602 stdout to stderr so that inferior i/o doesn't corrupt the connection.
603 Also, redirect stdin to /dev/null. */
604 if (remote_connection_is_stdio ())
605 {
606 close (0);
607 open ("/dev/null", O_RDONLY);
608 dup2 (2, 1);
3e52c33d
JK
609 if (write (2, "stdin/stdout redirected\n",
610 sizeof ("stdin/stdout redirected\n") - 1) < 0)
611 /* Errors ignored. */;
e0f9f062
DE
612 }
613
2b876972
DJ
614 execv (program, allargs);
615 if (errno == ENOENT)
616 execvp (program, allargs);
da6d8c04
DJ
617
618 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 619 strerror (errno));
da6d8c04
DJ
620 fflush (stderr);
621 _exit (0177);
622 }
623
03583c20
UW
624#ifdef HAVE_PERSONALITY
625 if (personality_set)
626 {
627 errno = 0;
628 personality (personality_orig);
629 if (errno != 0)
630 warning ("Error restoring address space randomization: %s",
631 strerror (errno));
632 }
633#endif
634
95954743
PA
635 linux_add_process (pid, 0);
636
637 ptid = ptid_build (pid, pid, 0);
638 new_lwp = add_lwp (ptid);
639 add_thread (ptid, new_lwp);
a6dbe5df 640 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 641
a9fa9f7d 642 return pid;
da6d8c04
DJ
643}
644
645/* Attach to an inferior process. */
646
95954743
PA
647static void
648linux_attach_lwp_1 (unsigned long lwpid, int initial)
da6d8c04 649{
95954743 650 ptid_t ptid;
54a0b537 651 struct lwp_info *new_lwp;
611cb4a5 652
95954743 653 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
da6d8c04 654 {
87b0bb13
JK
655 struct buffer buffer;
656
95954743 657 if (!initial)
2d717e4f
DJ
658 {
659 /* If we fail to attach to an LWP, just warn. */
95954743 660 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
2d717e4f
DJ
661 strerror (errno), errno);
662 fflush (stderr);
663 return;
664 }
5f572dec
JK
665
666 /* If we fail to attach to a process, report an error. */
87b0bb13
JK
667 buffer_init (&buffer);
668 linux_ptrace_attach_warnings (lwpid, &buffer);
669 buffer_grow_str0 (&buffer, "");
670 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
671 lwpid, strerror (errno), errno);
da6d8c04
DJ
672 }
673
95954743 674 if (initial)
e3deef73
LM
675 /* If lwp is the tgid, we handle adding existing threads later.
676 Otherwise we just add lwp without bothering about any other
677 threads. */
95954743
PA
678 ptid = ptid_build (lwpid, lwpid, 0);
679 else
680 {
681 /* Note that extracting the pid from the current inferior is
682 safe, since we're always called in the context of the same
683 process as this new thread. */
684 int pid = pid_of (get_thread_lwp (current_inferior));
685 ptid = ptid_build (pid, lwpid, 0);
686 }
24a09b5f 687
95954743
PA
688 new_lwp = (struct lwp_info *) add_lwp (ptid);
689 add_thread (ptid, new_lwp);
0d62e5e8 690
a6dbe5df
PA
691 /* We need to wait for SIGSTOP before being able to make the next
692 ptrace call on this LWP. */
693 new_lwp->must_set_ptrace_flags = 1;
694
644cebc9 695 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
696 {
697 if (debug_threads)
698 fprintf (stderr,
699 "Attached to a stopped process\n");
700
701 /* The process is definitely stopped. It is in a job control
702 stop, unless the kernel predates the TASK_STOPPED /
703 TASK_TRACED distinction, in which case it might be in a
704 ptrace stop. Make sure it is in a ptrace stop; from there we
705 can kill it, signal it, et cetera.
706
707 First make sure there is a pending SIGSTOP. Since we are
708 already attached, the process can not transition from stopped
709 to running without a PTRACE_CONT; so we know this signal will
710 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
711 probably already in the queue (unless this kernel is old
712 enough to use TASK_STOPPED for ptrace stops); but since
713 SIGSTOP is not an RT signal, it can only be queued once. */
714 kill_lwp (lwpid, SIGSTOP);
715
716 /* Finally, resume the stopped process. This will deliver the
717 SIGSTOP (or a higher priority signal, just like normal
718 PTRACE_ATTACH), which we'll catch later on. */
719 ptrace (PTRACE_CONT, lwpid, 0, 0);
720 }
721
0d62e5e8 722 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
723 brings it to a halt.
724
725 There are several cases to consider here:
726
727 1) gdbserver has already attached to the process and is being notified
1b3f6016 728 of a new thread that is being created.
d50171e4
PA
729 In this case we should ignore that SIGSTOP and resume the
730 process. This is handled below by setting stop_expected = 1,
8336d594 731 and the fact that add_thread sets last_resume_kind ==
d50171e4 732 resume_continue.
0e21c1ec
DE
733
734 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
735 to it via attach_inferior.
736 In this case we want the process thread to stop.
d50171e4
PA
737 This is handled by having linux_attach set last_resume_kind ==
738 resume_stop after we return.
e3deef73
LM
739
740 If the pid we are attaching to is also the tgid, we attach to and
741 stop all the existing threads. Otherwise, we attach to pid and
742 ignore any other threads in the same group as this pid.
0e21c1ec
DE
743
744 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
745 existing threads.
746 In this case we want the thread to stop.
747 FIXME: This case is currently not properly handled.
748 We should wait for the SIGSTOP but don't. Things work apparently
749 because enough time passes between when we ptrace (ATTACH) and when
750 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
751
752 On the other hand, if we are currently trying to stop all threads, we
753 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 754 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
755 end of the list, and so the new thread has not yet reached
756 wait_for_sigstop (but will). */
d50171e4 757 new_lwp->stop_expected = 1;
0d62e5e8
DJ
758}
759
95954743
PA
760void
761linux_attach_lwp (unsigned long lwpid)
762{
763 linux_attach_lwp_1 (lwpid, 0);
764}
765
e3deef73
LM
766/* Attach to PID. If PID is the tgid, attach to it and all
767 of its threads. */
768
0d62e5e8 769int
a1928bad 770linux_attach (unsigned long pid)
0d62e5e8 771{
e3deef73
LM
772 /* Attach to PID. We will check for other threads
773 soon. */
95954743 774 linux_attach_lwp_1 (pid, 1);
95954743 775 linux_add_process (pid, 1);
0d62e5e8 776
bd99dc85
PA
777 if (!non_stop)
778 {
8336d594
PA
779 struct thread_info *thread;
780
781 /* Don't ignore the initial SIGSTOP if we just attached to this
782 process. It will be collected by wait shortly. */
783 thread = find_thread_ptid (ptid_build (pid, pid, 0));
784 thread->last_resume_kind = resume_stop;
bd99dc85 785 }
0d62e5e8 786
e3deef73
LM
787 if (linux_proc_get_tgid (pid) == pid)
788 {
789 DIR *dir;
790 char pathname[128];
791
792 sprintf (pathname, "/proc/%ld/task", pid);
793
794 dir = opendir (pathname);
795
796 if (!dir)
797 {
798 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
799 fflush (stderr);
800 }
801 else
802 {
803 /* At this point we attached to the tgid. Scan the task for
804 existing threads. */
805 unsigned long lwp;
806 int new_threads_found;
807 int iterations = 0;
808 struct dirent *dp;
809
810 while (iterations < 2)
811 {
812 new_threads_found = 0;
813 /* Add all the other threads. While we go through the
814 threads, new threads may be spawned. Cycle through
815 the list of threads until we have done two iterations without
816 finding new threads. */
817 while ((dp = readdir (dir)) != NULL)
818 {
819 /* Fetch one lwp. */
820 lwp = strtoul (dp->d_name, NULL, 10);
821
822 /* Is this a new thread? */
823 if (lwp
824 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
825 {
826 linux_attach_lwp_1 (lwp, 0);
827 new_threads_found++;
828
829 if (debug_threads)
830 fprintf (stderr, "\
831Found and attached to new lwp %ld\n", lwp);
832 }
833 }
834
835 if (!new_threads_found)
836 iterations++;
837 else
838 iterations = 0;
839
840 rewinddir (dir);
841 }
842 closedir (dir);
843 }
844 }
845
95954743
PA
846 return 0;
847}
848
849struct counter
850{
851 int pid;
852 int count;
853};
854
855static int
856second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
857{
858 struct counter *counter = args;
859
860 if (ptid_get_pid (entry->id) == counter->pid)
861 {
862 if (++counter->count > 1)
863 return 1;
864 }
d61ddec4 865
da6d8c04
DJ
866 return 0;
867}
868
95954743
PA
869static int
870last_thread_of_process_p (struct thread_info *thread)
871{
872 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
873 int pid = ptid_get_pid (ptid);
874 struct counter counter = { pid , 0 };
da6d8c04 875
95954743
PA
876 return (find_inferior (&all_threads,
877 second_thread_of_pid_p, &counter) == NULL);
878}
879
da84f473
PA
880/* Kill LWP. */
881
882static void
883linux_kill_one_lwp (struct lwp_info *lwp)
884{
885 int pid = lwpid_of (lwp);
886
887 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
888 there is no signal context, and ptrace(PTRACE_KILL) (or
889 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
890 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
891 alternative is to kill with SIGKILL. We only need one SIGKILL
892 per process, not one for each thread. But since we still support
893 linuxthreads, and we also support debugging programs using raw
894 clone without CLONE_THREAD, we send one for each thread. For
895 years, we used PTRACE_KILL only, so we're being a bit paranoid
896 about some old kernels where PTRACE_KILL might work better
897 (dubious if there are any such, but that's why it's paranoia), so
898 we try SIGKILL first, PTRACE_KILL second, and so we're fine
899 everywhere. */
900
901 errno = 0;
902 kill (pid, SIGKILL);
903 if (debug_threads)
904 fprintf (stderr,
905 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
906 target_pid_to_str (ptid_of (lwp)),
907 errno ? strerror (errno) : "OK");
908
909 errno = 0;
910 ptrace (PTRACE_KILL, pid, 0, 0);
911 if (debug_threads)
912 fprintf (stderr,
913 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
914 target_pid_to_str (ptid_of (lwp)),
915 errno ? strerror (errno) : "OK");
916}
917
918/* Callback for `find_inferior'. Kills an lwp of a given process,
919 except the leader. */
95954743
PA
920
921static int
da84f473 922kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
da6d8c04 923{
0d62e5e8 924 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 925 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 926 int wstat;
95954743
PA
927 int pid = * (int *) args;
928
929 if (ptid_get_pid (entry->id) != pid)
930 return 0;
0d62e5e8 931
fd500816
DJ
932 /* We avoid killing the first thread here, because of a Linux kernel (at
933 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
934 the children get a chance to be reaped, it will remain a zombie
935 forever. */
95954743 936
12b42a12 937 if (lwpid_of (lwp) == pid)
95954743
PA
938 {
939 if (debug_threads)
940 fprintf (stderr, "lkop: is last of process %s\n",
941 target_pid_to_str (entry->id));
942 return 0;
943 }
fd500816 944
0d62e5e8
DJ
945 do
946 {
da84f473 947 linux_kill_one_lwp (lwp);
0d62e5e8
DJ
948
949 /* Make sure it died. The loop is most likely unnecessary. */
95954743 950 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
bd99dc85 951 } while (pid > 0 && WIFSTOPPED (wstat));
95954743
PA
952
953 return 0;
da6d8c04
DJ
954}
955
95954743
PA
956static int
957linux_kill (int pid)
0d62e5e8 958{
95954743 959 struct process_info *process;
54a0b537 960 struct lwp_info *lwp;
fd500816 961 int wstat;
95954743 962 int lwpid;
fd500816 963
95954743
PA
964 process = find_process_pid (pid);
965 if (process == NULL)
966 return -1;
9d606399 967
f9e39928
PA
968 /* If we're killing a running inferior, make sure it is stopped
969 first, as PTRACE_KILL will not work otherwise. */
7984d532 970 stop_all_lwps (0, NULL);
f9e39928 971
da84f473 972 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
fd500816 973
54a0b537 974 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 975 thread in the list, so do so now. */
95954743 976 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 977
784867a5 978 if (lwp == NULL)
fd500816 979 {
784867a5
JK
980 if (debug_threads)
981 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
982 lwpid_of (lwp), pid);
983 }
984 else
985 {
986 if (debug_threads)
987 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
988 lwpid_of (lwp), pid);
fd500816 989
784867a5
JK
990 do
991 {
da84f473 992 linux_kill_one_lwp (lwp);
784867a5
JK
993
994 /* Make sure it died. The loop is most likely unnecessary. */
995 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
996 } while (lwpid > 0 && WIFSTOPPED (wstat));
997 }
2d717e4f 998
8336d594 999 the_target->mourn (process);
f9e39928
PA
1000
1001 /* Since we presently can only stop all lwps of all processes, we
1002 need to unstop lwps of other processes. */
7984d532 1003 unstop_all_lwps (0, NULL);
95954743 1004 return 0;
0d62e5e8
DJ
1005}
1006
9b224c5e
PA
1007/* Get pending signal of THREAD, for detaching purposes. This is the
1008 signal the thread last stopped for, which we need to deliver to the
1009 thread when detaching, otherwise, it'd be suppressed/lost. */
1010
1011static int
1012get_detach_signal (struct thread_info *thread)
1013{
1014 enum target_signal signo = TARGET_SIGNAL_0;
1015 int status;
1016 struct lwp_info *lp = get_thread_lwp (thread);
1017
1018 if (lp->status_pending_p)
1019 status = lp->status_pending;
1020 else
1021 {
1022 /* If the thread had been suspended by gdbserver, and it stopped
1023 cleanly, then it'll have stopped with SIGSTOP. But we don't
1024 want to deliver that SIGSTOP. */
1025 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1026 || thread->last_status.value.sig == TARGET_SIGNAL_0)
1027 return 0;
1028
1029 /* Otherwise, we may need to deliver the signal we
1030 intercepted. */
1031 status = lp->last_status;
1032 }
1033
1034 if (!WIFSTOPPED (status))
1035 {
1036 if (debug_threads)
1037 fprintf (stderr,
1038 "GPS: lwp %s hasn't stopped: no pending signal\n",
1039 target_pid_to_str (ptid_of (lp)));
1040 return 0;
1041 }
1042
1043 /* Extended wait statuses aren't real SIGTRAPs. */
1044 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1045 {
1046 if (debug_threads)
1047 fprintf (stderr,
1048 "GPS: lwp %s had stopped with extended "
1049 "status: no pending signal\n",
1050 target_pid_to_str (ptid_of (lp)));
1051 return 0;
1052 }
1053
1054 signo = target_signal_from_host (WSTOPSIG (status));
1055
1056 if (program_signals_p && !program_signals[signo])
1057 {
1058 if (debug_threads)
1059 fprintf (stderr,
1060 "GPS: lwp %s had signal %s, but it is in nopass state\n",
1061 target_pid_to_str (ptid_of (lp)),
1062 target_signal_to_string (signo));
1063 return 0;
1064 }
1065 else if (!program_signals_p
1066 /* If we have no way to know which signals GDB does not
1067 want to have passed to the program, assume
1068 SIGTRAP/SIGINT, which is GDB's default. */
1069 && (signo == TARGET_SIGNAL_TRAP || signo == TARGET_SIGNAL_INT))
1070 {
1071 if (debug_threads)
1072 fprintf (stderr,
1073 "GPS: lwp %s had signal %s, "
1074 "but we don't know if we should pass it. Default to not.\n",
1075 target_pid_to_str (ptid_of (lp)),
1076 target_signal_to_string (signo));
1077 return 0;
1078 }
1079 else
1080 {
1081 if (debug_threads)
1082 fprintf (stderr,
1083 "GPS: lwp %s has pending signal %s: delivering it.\n",
1084 target_pid_to_str (ptid_of (lp)),
1085 target_signal_to_string (signo));
1086
1087 return WSTOPSIG (status);
1088 }
1089}
1090
95954743
PA
1091static int
1092linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
1093{
1094 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1095 struct lwp_info *lwp = get_thread_lwp (thread);
95954743 1096 int pid = * (int *) args;
9b224c5e 1097 int sig;
95954743
PA
1098
1099 if (ptid_get_pid (entry->id) != pid)
1100 return 0;
6ad8ae5c 1101
9b224c5e 1102 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1103 if (lwp->stop_expected)
ae13219e 1104 {
9b224c5e
PA
1105 if (debug_threads)
1106 fprintf (stderr,
1107 "Sending SIGCONT to %s\n",
1108 target_pid_to_str (ptid_of (lwp)));
1109
1110 kill_lwp (lwpid_of (lwp), SIGCONT);
54a0b537 1111 lwp->stop_expected = 0;
ae13219e
DJ
1112 }
1113
1114 /* Flush any pending changes to the process's registers. */
1115 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 1116 get_lwp_thread (lwp));
ae13219e 1117
9b224c5e
PA
1118 /* Pass on any pending signal for this thread. */
1119 sig = get_detach_signal (thread);
1120
ae13219e 1121 /* Finally, let it resume. */
82bfbe7e
PA
1122 if (the_low_target.prepare_to_resume != NULL)
1123 the_low_target.prepare_to_resume (lwp);
9b224c5e
PA
1124 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, sig) < 0)
1125 error (_("Can't detach %s: %s"),
1126 target_pid_to_str (ptid_of (lwp)),
1127 strerror (errno));
bd99dc85
PA
1128
1129 delete_lwp (lwp);
95954743 1130 return 0;
6ad8ae5c
DJ
1131}
1132
95954743
PA
1133static int
1134linux_detach (int pid)
1135{
1136 struct process_info *process;
1137
1138 process = find_process_pid (pid);
1139 if (process == NULL)
1140 return -1;
1141
f9e39928
PA
1142 /* Stop all threads before detaching. First, ptrace requires that
1143 the thread is stopped to sucessfully detach. Second, thread_db
1144 may need to uninstall thread event breakpoints from memory, which
1145 only works with a stopped process anyway. */
7984d532 1146 stop_all_lwps (0, NULL);
f9e39928 1147
ca5c370d 1148#ifdef USE_THREAD_DB
8336d594 1149 thread_db_detach (process);
ca5c370d
PA
1150#endif
1151
fa593d66
PA
1152 /* Stabilize threads (move out of jump pads). */
1153 stabilize_threads ();
1154
95954743 1155 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
1156
1157 the_target->mourn (process);
f9e39928
PA
1158
1159 /* Since we presently can only stop all lwps of all processes, we
1160 need to unstop lwps of other processes. */
7984d532 1161 unstop_all_lwps (0, NULL);
f9e39928
PA
1162 return 0;
1163}
1164
1165/* Remove all LWPs that belong to process PROC from the lwp list. */
1166
1167static int
1168delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1169{
1170 struct lwp_info *lwp = (struct lwp_info *) entry;
1171 struct process_info *process = proc;
1172
1173 if (pid_of (lwp) == pid_of (process))
1174 delete_lwp (lwp);
1175
dd6953e1 1176 return 0;
6ad8ae5c
DJ
1177}
1178
8336d594
PA
1179static void
1180linux_mourn (struct process_info *process)
1181{
1182 struct process_info_private *priv;
1183
1184#ifdef USE_THREAD_DB
1185 thread_db_mourn (process);
1186#endif
1187
f9e39928
PA
1188 find_inferior (&all_lwps, delete_lwp_callback, process);
1189
8336d594
PA
1190 /* Freeing all private data. */
1191 priv = process->private;
1192 free (priv->arch_private);
1193 free (priv);
1194 process->private = NULL;
505106cd
PA
1195
1196 remove_process (process);
8336d594
PA
1197}
1198
444d6139 1199static void
95954743 1200linux_join (int pid)
444d6139 1201{
444d6139
PA
1202 int status, ret;
1203
1204 do {
95954743 1205 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1206 if (WIFEXITED (status) || WIFSIGNALED (status))
1207 break;
1208 } while (ret != -1 || errno != ECHILD);
1209}
1210
6ad8ae5c 1211/* Return nonzero if the given thread is still alive. */
0d62e5e8 1212static int
95954743 1213linux_thread_alive (ptid_t ptid)
0d62e5e8 1214{
95954743
PA
1215 struct lwp_info *lwp = find_lwp_pid (ptid);
1216
1217 /* We assume we always know if a thread exits. If a whole process
1218 exited but we still haven't been able to report it to GDB, we'll
1219 hold on to the last lwp of the dead process. */
1220 if (lwp != NULL)
1221 return !lwp->dead;
0d62e5e8
DJ
1222 else
1223 return 0;
1224}
1225
6bf5e0ba 1226/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1227static int
d50171e4 1228status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1229{
54a0b537 1230 struct lwp_info *lwp = (struct lwp_info *) entry;
95954743 1231 ptid_t ptid = * (ptid_t *) arg;
7984d532 1232 struct thread_info *thread;
95954743
PA
1233
1234 /* Check if we're only interested in events from a specific process
1235 or its lwps. */
1236 if (!ptid_equal (minus_one_ptid, ptid)
1237 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1238 return 0;
0d62e5e8 1239
d50171e4
PA
1240 thread = get_lwp_thread (lwp);
1241
1242 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1243 report any status pending the LWP may have. */
8336d594 1244 if (thread->last_resume_kind == resume_stop
7984d532 1245 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4 1246 return 0;
0d62e5e8 1247
d50171e4 1248 return lwp->status_pending_p;
0d62e5e8
DJ
1249}
1250
95954743
PA
1251static int
1252same_lwp (struct inferior_list_entry *entry, void *data)
1253{
1254 ptid_t ptid = *(ptid_t *) data;
1255 int lwp;
1256
1257 if (ptid_get_lwp (ptid) != 0)
1258 lwp = ptid_get_lwp (ptid);
1259 else
1260 lwp = ptid_get_pid (ptid);
1261
1262 if (ptid_get_lwp (entry->id) == lwp)
1263 return 1;
1264
1265 return 0;
1266}
1267
1268struct lwp_info *
1269find_lwp_pid (ptid_t ptid)
1270{
1271 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1272}
1273
bd99dc85 1274static struct lwp_info *
95954743 1275linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
611cb4a5 1276{
0d62e5e8 1277 int ret;
95954743 1278 int to_wait_for = -1;
bd99dc85 1279 struct lwp_info *child = NULL;
0d62e5e8 1280
bd99dc85 1281 if (debug_threads)
95954743
PA
1282 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1283
1284 if (ptid_equal (ptid, minus_one_ptid))
1285 to_wait_for = -1; /* any child */
1286 else
1287 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
0d62e5e8 1288
bd99dc85 1289 options |= __WALL;
0d62e5e8 1290
bd99dc85 1291retry:
0d62e5e8 1292
bd99dc85
PA
1293 ret = my_waitpid (to_wait_for, wstatp, options);
1294 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1295 return NULL;
1296 else if (ret == -1)
1297 perror_with_name ("waitpid");
0d62e5e8
DJ
1298
1299 if (debug_threads
1300 && (!WIFSTOPPED (*wstatp)
1301 || (WSTOPSIG (*wstatp) != 32
1302 && WSTOPSIG (*wstatp) != 33)))
1303 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1304
95954743 1305 child = find_lwp_pid (pid_to_ptid (ret));
0d62e5e8 1306
24a09b5f
DJ
1307 /* If we didn't find a process, one of two things presumably happened:
1308 - A process we started and then detached from has exited. Ignore it.
1309 - A process we are controlling has forked and the new child's stop
1310 was reported to us by the kernel. Save its PID. */
bd99dc85 1311 if (child == NULL && WIFSTOPPED (*wstatp))
24a09b5f 1312 {
05044653 1313 add_to_pid_list (&stopped_pids, ret, *wstatp);
24a09b5f
DJ
1314 goto retry;
1315 }
bd99dc85 1316 else if (child == NULL)
24a09b5f
DJ
1317 goto retry;
1318
bd99dc85 1319 child->stopped = 1;
0d62e5e8 1320
bd99dc85 1321 child->last_status = *wstatp;
32ca6d61 1322
d61ddec4
UW
1323 /* Architecture-specific setup after inferior is running.
1324 This needs to happen after we have attached to the inferior
1325 and it is stopped for the first time, but before we access
1326 any inferior registers. */
1327 if (new_inferior)
1328 {
1329 the_low_target.arch_setup ();
52fa2412
UW
1330#ifdef HAVE_LINUX_REGSETS
1331 memset (disabled_regsets, 0, num_regsets);
1332#endif
d61ddec4
UW
1333 new_inferior = 0;
1334 }
1335
c3adc08c
PA
1336 /* Fetch the possibly triggered data watchpoint info and store it in
1337 CHILD.
1338
1339 On some archs, like x86, that use debug registers to set
1340 watchpoints, it's possible that the way to know which watched
1341 address trapped, is to check the register that is used to select
1342 which address to watch. Problem is, between setting the
1343 watchpoint and reading back which data address trapped, the user
1344 may change the set of watchpoints, and, as a consequence, GDB
1345 changes the debug registers in the inferior. To avoid reading
1346 back a stale stopped-data-address when that happens, we cache in
1347 LP the fact that a watchpoint trapped, and the corresponding data
1348 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1349 changes the debug registers meanwhile, we have the cached data we
1350 can rely on. */
1351
1352 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1353 {
1354 if (the_low_target.stopped_by_watchpoint == NULL)
1355 {
1356 child->stopped_by_watchpoint = 0;
1357 }
1358 else
1359 {
1360 struct thread_info *saved_inferior;
1361
1362 saved_inferior = current_inferior;
1363 current_inferior = get_lwp_thread (child);
1364
1365 child->stopped_by_watchpoint
1366 = the_low_target.stopped_by_watchpoint ();
1367
1368 if (child->stopped_by_watchpoint)
1369 {
1370 if (the_low_target.stopped_data_address != NULL)
1371 child->stopped_data_address
1372 = the_low_target.stopped_data_address ();
1373 else
1374 child->stopped_data_address = 0;
1375 }
1376
1377 current_inferior = saved_inferior;
1378 }
1379 }
1380
d50171e4
PA
1381 /* Store the STOP_PC, with adjustment applied. This depends on the
1382 architecture being defined already (so that CHILD has a valid
1383 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1384 not). */
1385 if (WIFSTOPPED (*wstatp))
1386 child->stop_pc = get_stop_pc (child);
1387
0d62e5e8 1388 if (debug_threads
47c0c975
DE
1389 && WIFSTOPPED (*wstatp)
1390 && the_low_target.get_pc != NULL)
0d62e5e8 1391 {
896c7fbb 1392 struct thread_info *saved_inferior = current_inferior;
bce522a2 1393 struct regcache *regcache;
47c0c975
DE
1394 CORE_ADDR pc;
1395
d50171e4 1396 current_inferior = get_lwp_thread (child);
bce522a2 1397 regcache = get_thread_regcache (current_inferior, 1);
442ea881 1398 pc = (*the_low_target.get_pc) (regcache);
47c0c975 1399 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
896c7fbb 1400 current_inferior = saved_inferior;
0d62e5e8 1401 }
bd99dc85
PA
1402
1403 return child;
0d62e5e8 1404}
611cb4a5 1405
219f2f23
PA
1406/* This function should only be called if the LWP got a SIGTRAP.
1407
1408 Handle any tracepoint steps or hits. Return true if a tracepoint
1409 event was handled, 0 otherwise. */
1410
1411static int
1412handle_tracepoints (struct lwp_info *lwp)
1413{
1414 struct thread_info *tinfo = get_lwp_thread (lwp);
1415 int tpoint_related_event = 0;
1416
7984d532
PA
1417 /* If this tracepoint hit causes a tracing stop, we'll immediately
1418 uninsert tracepoints. To do this, we temporarily pause all
1419 threads, unpatch away, and then unpause threads. We need to make
1420 sure the unpausing doesn't resume LWP too. */
1421 lwp->suspended++;
1422
219f2f23
PA
1423 /* And we need to be sure that any all-threads-stopping doesn't try
1424 to move threads out of the jump pads, as it could deadlock the
1425 inferior (LWP could be in the jump pad, maybe even holding the
1426 lock.) */
1427
1428 /* Do any necessary step collect actions. */
1429 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1430
fa593d66
PA
1431 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1432
219f2f23
PA
1433 /* See if we just hit a tracepoint and do its main collect
1434 actions. */
1435 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1436
7984d532
PA
1437 lwp->suspended--;
1438
1439 gdb_assert (lwp->suspended == 0);
fa593d66 1440 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1441
219f2f23
PA
1442 if (tpoint_related_event)
1443 {
1444 if (debug_threads)
1445 fprintf (stderr, "got a tracepoint event\n");
1446 return 1;
1447 }
1448
1449 return 0;
1450}
1451
fa593d66
PA
1452/* Convenience wrapper. Returns true if LWP is presently collecting a
1453 fast tracepoint. */
1454
1455static int
1456linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1457 struct fast_tpoint_collect_status *status)
1458{
1459 CORE_ADDR thread_area;
1460
1461 if (the_low_target.get_thread_area == NULL)
1462 return 0;
1463
1464 /* Get the thread area address. This is used to recognize which
1465 thread is which when tracing with the in-process agent library.
1466 We don't read anything from the address, and treat it as opaque;
1467 it's the address itself that we assume is unique per-thread. */
1468 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1469 return 0;
1470
1471 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1472}
1473
1474/* The reason we resume in the caller, is because we want to be able
1475 to pass lwp->status_pending as WSTAT, and we need to clear
1476 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1477 refuses to resume. */
1478
1479static int
1480maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1481{
1482 struct thread_info *saved_inferior;
1483
1484 saved_inferior = current_inferior;
1485 current_inferior = get_lwp_thread (lwp);
1486
1487 if ((wstat == NULL
1488 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1489 && supports_fast_tracepoints ()
58b4daa5 1490 && agent_loaded_p ())
fa593d66
PA
1491 {
1492 struct fast_tpoint_collect_status status;
1493 int r;
1494
1495 if (debug_threads)
1496 fprintf (stderr, "\
1497Checking whether LWP %ld needs to move out of the jump pad.\n",
1498 lwpid_of (lwp));
1499
1500 r = linux_fast_tracepoint_collecting (lwp, &status);
1501
1502 if (wstat == NULL
1503 || (WSTOPSIG (*wstat) != SIGILL
1504 && WSTOPSIG (*wstat) != SIGFPE
1505 && WSTOPSIG (*wstat) != SIGSEGV
1506 && WSTOPSIG (*wstat) != SIGBUS))
1507 {
1508 lwp->collecting_fast_tracepoint = r;
1509
1510 if (r != 0)
1511 {
1512 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1513 {
1514 /* Haven't executed the original instruction yet.
1515 Set breakpoint there, and wait till it's hit,
1516 then single-step until exiting the jump pad. */
1517 lwp->exit_jump_pad_bkpt
1518 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1519 }
1520
1521 if (debug_threads)
1522 fprintf (stderr, "\
1523Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1524 lwpid_of (lwp));
0cccb683 1525 current_inferior = saved_inferior;
fa593d66
PA
1526
1527 return 1;
1528 }
1529 }
1530 else
1531 {
1532 /* If we get a synchronous signal while collecting, *and*
1533 while executing the (relocated) original instruction,
1534 reset the PC to point at the tpoint address, before
1535 reporting to GDB. Otherwise, it's an IPA lib bug: just
1536 report the signal to GDB, and pray for the best. */
1537
1538 lwp->collecting_fast_tracepoint = 0;
1539
1540 if (r != 0
1541 && (status.adjusted_insn_addr <= lwp->stop_pc
1542 && lwp->stop_pc < status.adjusted_insn_addr_end))
1543 {
1544 siginfo_t info;
1545 struct regcache *regcache;
1546
1547 /* The si_addr on a few signals references the address
1548 of the faulting instruction. Adjust that as
1549 well. */
1550 if ((WSTOPSIG (*wstat) == SIGILL
1551 || WSTOPSIG (*wstat) == SIGFPE
1552 || WSTOPSIG (*wstat) == SIGBUS
1553 || WSTOPSIG (*wstat) == SIGSEGV)
1554 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1555 /* Final check just to make sure we don't clobber
1556 the siginfo of non-kernel-sent signals. */
1557 && (uintptr_t) info.si_addr == lwp->stop_pc)
1558 {
1559 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1560 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1561 }
1562
1563 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1564 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1565 lwp->stop_pc = status.tpoint_addr;
1566
1567 /* Cancel any fast tracepoint lock this thread was
1568 holding. */
1569 force_unlock_trace_buffer ();
1570 }
1571
1572 if (lwp->exit_jump_pad_bkpt != NULL)
1573 {
1574 if (debug_threads)
1575 fprintf (stderr,
1576 "Cancelling fast exit-jump-pad: removing bkpt. "
1577 "stopping all threads momentarily.\n");
1578
1579 stop_all_lwps (1, lwp);
1580 cancel_breakpoints ();
1581
1582 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1583 lwp->exit_jump_pad_bkpt = NULL;
1584
1585 unstop_all_lwps (1, lwp);
1586
1587 gdb_assert (lwp->suspended >= 0);
1588 }
1589 }
1590 }
1591
1592 if (debug_threads)
1593 fprintf (stderr, "\
1594Checking whether LWP %ld needs to move out of the jump pad...no\n",
1595 lwpid_of (lwp));
0cccb683
YQ
1596
1597 current_inferior = saved_inferior;
fa593d66
PA
1598 return 0;
1599}
1600
1601/* Enqueue one signal in the "signals to report later when out of the
1602 jump pad" list. */
1603
1604static void
1605enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1606{
1607 struct pending_signals *p_sig;
1608
1609 if (debug_threads)
1610 fprintf (stderr, "\
1611Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1612
1613 if (debug_threads)
1614 {
1615 struct pending_signals *sig;
1616
1617 for (sig = lwp->pending_signals_to_report;
1618 sig != NULL;
1619 sig = sig->prev)
1620 fprintf (stderr,
1621 " Already queued %d\n",
1622 sig->signal);
1623
1624 fprintf (stderr, " (no more currently queued signals)\n");
1625 }
1626
1a981360
PA
1627 /* Don't enqueue non-RT signals if they are already in the deferred
1628 queue. (SIGSTOP being the easiest signal to see ending up here
1629 twice) */
1630 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1631 {
1632 struct pending_signals *sig;
1633
1634 for (sig = lwp->pending_signals_to_report;
1635 sig != NULL;
1636 sig = sig->prev)
1637 {
1638 if (sig->signal == WSTOPSIG (*wstat))
1639 {
1640 if (debug_threads)
1641 fprintf (stderr,
1642 "Not requeuing already queued non-RT signal %d"
1643 " for LWP %ld\n",
1644 sig->signal,
1645 lwpid_of (lwp));
1646 return;
1647 }
1648 }
1649 }
1650
fa593d66
PA
1651 p_sig = xmalloc (sizeof (*p_sig));
1652 p_sig->prev = lwp->pending_signals_to_report;
1653 p_sig->signal = WSTOPSIG (*wstat);
1654 memset (&p_sig->info, 0, sizeof (siginfo_t));
1655 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1656
1657 lwp->pending_signals_to_report = p_sig;
1658}
1659
1660/* Dequeue one signal from the "signals to report later when out of
1661 the jump pad" list. */
1662
1663static int
1664dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1665{
1666 if (lwp->pending_signals_to_report != NULL)
1667 {
1668 struct pending_signals **p_sig;
1669
1670 p_sig = &lwp->pending_signals_to_report;
1671 while ((*p_sig)->prev != NULL)
1672 p_sig = &(*p_sig)->prev;
1673
1674 *wstat = W_STOPCODE ((*p_sig)->signal);
1675 if ((*p_sig)->info.si_signo != 0)
1676 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1677 free (*p_sig);
1678 *p_sig = NULL;
1679
1680 if (debug_threads)
1681 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1682 WSTOPSIG (*wstat), lwpid_of (lwp));
1683
1684 if (debug_threads)
1685 {
1686 struct pending_signals *sig;
1687
1688 for (sig = lwp->pending_signals_to_report;
1689 sig != NULL;
1690 sig = sig->prev)
1691 fprintf (stderr,
1692 " Still queued %d\n",
1693 sig->signal);
1694
1695 fprintf (stderr, " (no more queued signals)\n");
1696 }
1697
1698 return 1;
1699 }
1700
1701 return 0;
1702}
1703
d50171e4
PA
1704/* Arrange for a breakpoint to be hit again later. We don't keep the
1705 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1706 will handle the current event, eventually we will resume this LWP,
1707 and this breakpoint will trap again. */
1708
1709static int
1710cancel_breakpoint (struct lwp_info *lwp)
1711{
1712 struct thread_info *saved_inferior;
d50171e4
PA
1713
1714 /* There's nothing to do if we don't support breakpoints. */
1715 if (!supports_breakpoints ())
1716 return 0;
1717
d50171e4
PA
1718 /* breakpoint_at reads from current inferior. */
1719 saved_inferior = current_inferior;
1720 current_inferior = get_lwp_thread (lwp);
1721
1722 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1723 {
1724 if (debug_threads)
1725 fprintf (stderr,
1726 "CB: Push back breakpoint for %s\n",
fc7238bb 1727 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1728
1729 /* Back up the PC if necessary. */
1730 if (the_low_target.decr_pc_after_break)
1731 {
1732 struct regcache *regcache
fc7238bb 1733 = get_thread_regcache (current_inferior, 1);
d50171e4
PA
1734 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1735 }
1736
1737 current_inferior = saved_inferior;
1738 return 1;
1739 }
1740 else
1741 {
1742 if (debug_threads)
1743 fprintf (stderr,
1744 "CB: No breakpoint found at %s for [%s]\n",
1745 paddress (lwp->stop_pc),
fc7238bb 1746 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1747 }
1748
1749 current_inferior = saved_inferior;
1750 return 0;
1751}
1752
1753/* When the event-loop is doing a step-over, this points at the thread
1754 being stepped. */
1755ptid_t step_over_bkpt;
1756
bd99dc85
PA
1757/* Wait for an event from child PID. If PID is -1, wait for any
1758 child. Store the stop status through the status pointer WSTAT.
1759 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1760 event was found and OPTIONS contains WNOHANG. Return the PID of
1761 the stopped child otherwise. */
1762
0d62e5e8 1763static int
d8301ad1 1764linux_wait_for_event (ptid_t ptid, int *wstat, int options)
0d62e5e8 1765{
d50171e4 1766 struct lwp_info *event_child, *requested_child;
d8301ad1 1767 ptid_t wait_ptid;
d50171e4 1768
d50171e4
PA
1769 event_child = NULL;
1770 requested_child = NULL;
0d62e5e8 1771
95954743 1772 /* Check for a lwp with a pending status. */
bd99dc85 1773
e825046f 1774 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
0d62e5e8 1775 {
54a0b537 1776 event_child = (struct lwp_info *)
d50171e4 1777 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
0d62e5e8 1778 if (debug_threads && event_child)
bd99dc85 1779 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
0d62e5e8
DJ
1780 }
1781 else
1782 {
95954743 1783 requested_child = find_lwp_pid (ptid);
d50171e4 1784
fa593d66
PA
1785 if (!stopping_threads
1786 && requested_child->status_pending_p
1787 && requested_child->collecting_fast_tracepoint)
1788 {
1789 enqueue_one_deferred_signal (requested_child,
1790 &requested_child->status_pending);
1791 requested_child->status_pending_p = 0;
1792 requested_child->status_pending = 0;
1793 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1794 }
1795
1796 if (requested_child->suspended
1797 && requested_child->status_pending_p)
1798 fatal ("requesting an event out of a suspended child?");
1799
d50171e4 1800 if (requested_child->status_pending_p)
bd99dc85 1801 event_child = requested_child;
0d62e5e8 1802 }
611cb4a5 1803
0d62e5e8
DJ
1804 if (event_child != NULL)
1805 {
bd99dc85
PA
1806 if (debug_threads)
1807 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1808 lwpid_of (event_child), event_child->status_pending);
1809 *wstat = event_child->status_pending;
1810 event_child->status_pending_p = 0;
1811 event_child->status_pending = 0;
1812 current_inferior = get_lwp_thread (event_child);
1813 return lwpid_of (event_child);
0d62e5e8
DJ
1814 }
1815
d8301ad1
JK
1816 if (ptid_is_pid (ptid))
1817 {
1818 /* A request to wait for a specific tgid. This is not possible
1819 with waitpid, so instead, we wait for any child, and leave
1820 children we're not interested in right now with a pending
1821 status to report later. */
1822 wait_ptid = minus_one_ptid;
1823 }
1824 else
1825 wait_ptid = ptid;
1826
0d62e5e8
DJ
1827 /* We only enter this loop if no process has a pending wait status. Thus
1828 any action taken in response to a wait status inside this loop is
1829 responding as soon as we detect the status, not after any pending
1830 events. */
1831 while (1)
1832 {
d8301ad1 1833 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
0d62e5e8 1834
bd99dc85 1835 if ((options & WNOHANG) && event_child == NULL)
d50171e4
PA
1836 {
1837 if (debug_threads)
1838 fprintf (stderr, "WNOHANG set, no event found\n");
1839 return 0;
1840 }
0d62e5e8
DJ
1841
1842 if (event_child == NULL)
1843 error ("event from unknown child");
611cb4a5 1844
d8301ad1
JK
1845 if (ptid_is_pid (ptid)
1846 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1847 {
1848 if (! WIFSTOPPED (*wstat))
1849 mark_lwp_dead (event_child, *wstat);
1850 else
1851 {
1852 event_child->status_pending_p = 1;
1853 event_child->status_pending = *wstat;
1854 }
1855 continue;
1856 }
1857
bd99dc85 1858 current_inferior = get_lwp_thread (event_child);
0d62e5e8 1859
89be2091 1860 /* Check for thread exit. */
bd99dc85 1861 if (! WIFSTOPPED (*wstat))
0d62e5e8 1862 {
89be2091 1863 if (debug_threads)
95954743 1864 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
89be2091
DJ
1865
1866 /* If the last thread is exiting, just return. */
95954743 1867 if (last_thread_of_process_p (current_inferior))
bd99dc85
PA
1868 {
1869 if (debug_threads)
95954743
PA
1870 fprintf (stderr, "LWP %ld is last lwp of process\n",
1871 lwpid_of (event_child));
bd99dc85
PA
1872 return lwpid_of (event_child);
1873 }
89be2091 1874
bd99dc85
PA
1875 if (!non_stop)
1876 {
1877 current_inferior = (struct thread_info *) all_threads.head;
1878 if (debug_threads)
1879 fprintf (stderr, "Current inferior is now %ld\n",
1880 lwpid_of (get_thread_lwp (current_inferior)));
1881 }
1882 else
1883 {
1884 current_inferior = NULL;
1885 if (debug_threads)
1886 fprintf (stderr, "Current inferior is now <NULL>\n");
1887 }
89be2091
DJ
1888
1889 /* If we were waiting for this particular child to do something...
1890 well, it did something. */
bd99dc85 1891 if (requested_child != NULL)
d50171e4
PA
1892 {
1893 int lwpid = lwpid_of (event_child);
1894
1895 /* Cancel the step-over operation --- the thread that
1896 started it is gone. */
1897 if (finish_step_over (event_child))
7984d532 1898 unstop_all_lwps (1, event_child);
d50171e4
PA
1899 delete_lwp (event_child);
1900 return lwpid;
1901 }
1902
1903 delete_lwp (event_child);
89be2091
DJ
1904
1905 /* Wait for a more interesting event. */
1906 continue;
1907 }
1908
a6dbe5df
PA
1909 if (event_child->must_set_ptrace_flags)
1910 {
1e7fc18c 1911 linux_enable_event_reporting (lwpid_of (event_child));
a6dbe5df
PA
1912 event_child->must_set_ptrace_flags = 0;
1913 }
1914
bd99dc85
PA
1915 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1916 && *wstat >> 16 != 0)
24a09b5f 1917 {
bd99dc85 1918 handle_extended_wait (event_child, *wstat);
24a09b5f
DJ
1919 continue;
1920 }
1921
d50171e4
PA
1922 if (WIFSTOPPED (*wstat)
1923 && WSTOPSIG (*wstat) == SIGSTOP
1924 && event_child->stop_expected)
1925 {
1926 int should_stop;
1927
1928 if (debug_threads)
1929 fprintf (stderr, "Expected stop.\n");
1930 event_child->stop_expected = 0;
1931
8336d594 1932 should_stop = (current_inferior->last_resume_kind == resume_stop
d50171e4
PA
1933 || stopping_threads);
1934
1935 if (!should_stop)
1936 {
1937 linux_resume_one_lwp (event_child,
1938 event_child->stepping, 0, NULL);
1939 continue;
1940 }
1941 }
1942
bd99dc85 1943 return lwpid_of (event_child);
611cb4a5 1944 }
0d62e5e8 1945
611cb4a5
DJ
1946 /* NOTREACHED */
1947 return 0;
1948}
1949
6bf5e0ba
PA
1950/* Count the LWP's that have had events. */
1951
1952static int
1953count_events_callback (struct inferior_list_entry *entry, void *data)
1954{
1955 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1956 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1957 int *count = data;
1958
1959 gdb_assert (count != NULL);
1960
1961 /* Count only resumed LWPs that have a SIGTRAP event pending that
1962 should be reported to GDB. */
8336d594
PA
1963 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1964 && thread->last_resume_kind != resume_stop
6bf5e0ba
PA
1965 && lp->status_pending_p
1966 && WIFSTOPPED (lp->status_pending)
1967 && WSTOPSIG (lp->status_pending) == SIGTRAP
1968 && !breakpoint_inserted_here (lp->stop_pc))
1969 (*count)++;
1970
1971 return 0;
1972}
1973
1974/* Select the LWP (if any) that is currently being single-stepped. */
1975
1976static int
1977select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1978{
1979 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1980 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba 1981
8336d594
PA
1982 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1983 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
1984 && lp->status_pending_p)
1985 return 1;
1986 else
1987 return 0;
1988}
1989
1990/* Select the Nth LWP that has had a SIGTRAP event that should be
1991 reported to GDB. */
1992
1993static int
1994select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1995{
1996 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1997 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1998 int *selector = data;
1999
2000 gdb_assert (selector != NULL);
2001
2002 /* Select only resumed LWPs that have a SIGTRAP event pending. */
8336d594
PA
2003 if (thread->last_resume_kind != resume_stop
2004 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
2005 && lp->status_pending_p
2006 && WIFSTOPPED (lp->status_pending)
2007 && WSTOPSIG (lp->status_pending) == SIGTRAP
2008 && !breakpoint_inserted_here (lp->stop_pc))
2009 if ((*selector)-- == 0)
2010 return 1;
2011
2012 return 0;
2013}
2014
2015static int
2016cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2017{
2018 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 2019 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
2020 struct lwp_info *event_lp = data;
2021
2022 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2023 if (lp == event_lp)
2024 return 0;
2025
2026 /* If a LWP other than the LWP that we're reporting an event for has
2027 hit a GDB breakpoint (as opposed to some random trap signal),
2028 then just arrange for it to hit it again later. We don't keep
2029 the SIGTRAP status and don't forward the SIGTRAP signal to the
2030 LWP. We will handle the current event, eventually we will resume
2031 all LWPs, and this one will get its breakpoint trap again.
2032
2033 If we do not do this, then we run the risk that the user will
2034 delete or disable the breakpoint, but the LWP will have already
2035 tripped on it. */
2036
8336d594
PA
2037 if (thread->last_resume_kind != resume_stop
2038 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
2039 && lp->status_pending_p
2040 && WIFSTOPPED (lp->status_pending)
2041 && WSTOPSIG (lp->status_pending) == SIGTRAP
bdabb078
PA
2042 && !lp->stepping
2043 && !lp->stopped_by_watchpoint
6bf5e0ba
PA
2044 && cancel_breakpoint (lp))
2045 /* Throw away the SIGTRAP. */
2046 lp->status_pending_p = 0;
2047
2048 return 0;
2049}
2050
7984d532
PA
2051static void
2052linux_cancel_breakpoints (void)
2053{
2054 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2055}
2056
6bf5e0ba
PA
2057/* Select one LWP out of those that have events pending. */
2058
2059static void
2060select_event_lwp (struct lwp_info **orig_lp)
2061{
2062 int num_events = 0;
2063 int random_selector;
2064 struct lwp_info *event_lp;
2065
2066 /* Give preference to any LWP that is being single-stepped. */
2067 event_lp
2068 = (struct lwp_info *) find_inferior (&all_lwps,
2069 select_singlestep_lwp_callback, NULL);
2070 if (event_lp != NULL)
2071 {
2072 if (debug_threads)
2073 fprintf (stderr,
2074 "SEL: Select single-step %s\n",
2075 target_pid_to_str (ptid_of (event_lp)));
2076 }
2077 else
2078 {
2079 /* No single-stepping LWP. Select one at random, out of those
2080 which have had SIGTRAP events. */
2081
2082 /* First see how many SIGTRAP events we have. */
2083 find_inferior (&all_lwps, count_events_callback, &num_events);
2084
2085 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2086 random_selector = (int)
2087 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2088
2089 if (debug_threads && num_events > 1)
2090 fprintf (stderr,
2091 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2092 num_events, random_selector);
2093
2094 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2095 select_event_lwp_callback,
2096 &random_selector);
2097 }
2098
2099 if (event_lp != NULL)
2100 {
2101 /* Switch the event LWP. */
2102 *orig_lp = event_lp;
2103 }
2104}
2105
7984d532
PA
2106/* Decrement the suspend count of an LWP. */
2107
2108static int
2109unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2110{
2111 struct lwp_info *lwp = (struct lwp_info *) entry;
2112
2113 /* Ignore EXCEPT. */
2114 if (lwp == except)
2115 return 0;
2116
2117 lwp->suspended--;
2118
2119 gdb_assert (lwp->suspended >= 0);
2120 return 0;
2121}
2122
2123/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2124 NULL. */
2125
2126static void
2127unsuspend_all_lwps (struct lwp_info *except)
2128{
2129 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2130}
2131
fa593d66
PA
2132static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2133static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2134 void *data);
2135static int lwp_running (struct inferior_list_entry *entry, void *data);
2136static ptid_t linux_wait_1 (ptid_t ptid,
2137 struct target_waitstatus *ourstatus,
2138 int target_options);
2139
2140/* Stabilize threads (move out of jump pads).
2141
2142 If a thread is midway collecting a fast tracepoint, we need to
2143 finish the collection and move it out of the jump pad before
2144 reporting the signal.
2145
2146 This avoids recursion while collecting (when a signal arrives
2147 midway, and the signal handler itself collects), which would trash
2148 the trace buffer. In case the user set a breakpoint in a signal
2149 handler, this avoids the backtrace showing the jump pad, etc..
2150 Most importantly, there are certain things we can't do safely if
2151 threads are stopped in a jump pad (or in its callee's). For
2152 example:
2153
2154 - starting a new trace run. A thread still collecting the
2155 previous run, could trash the trace buffer when resumed. The trace
2156 buffer control structures would have been reset but the thread had
2157 no way to tell. The thread could even midway memcpy'ing to the
2158 buffer, which would mean that when resumed, it would clobber the
2159 trace buffer that had been set for a new run.
2160
2161 - we can't rewrite/reuse the jump pads for new tracepoints
2162 safely. Say you do tstart while a thread is stopped midway while
2163 collecting. When the thread is later resumed, it finishes the
2164 collection, and returns to the jump pad, to execute the original
2165 instruction that was under the tracepoint jump at the time the
2166 older run had been started. If the jump pad had been rewritten
2167 since for something else in the new run, the thread would now
2168 execute the wrong / random instructions. */
2169
2170static void
2171linux_stabilize_threads (void)
2172{
2173 struct thread_info *save_inferior;
2174 struct lwp_info *lwp_stuck;
2175
2176 lwp_stuck
2177 = (struct lwp_info *) find_inferior (&all_lwps,
2178 stuck_in_jump_pad_callback, NULL);
2179 if (lwp_stuck != NULL)
2180 {
b4d51a55
PA
2181 if (debug_threads)
2182 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2183 lwpid_of (lwp_stuck));
fa593d66
PA
2184 return;
2185 }
2186
2187 save_inferior = current_inferior;
2188
2189 stabilizing_threads = 1;
2190
2191 /* Kick 'em all. */
2192 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2193
2194 /* Loop until all are stopped out of the jump pads. */
2195 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2196 {
2197 struct target_waitstatus ourstatus;
2198 struct lwp_info *lwp;
fa593d66
PA
2199 int wstat;
2200
2201 /* Note that we go through the full wait even loop. While
2202 moving threads out of jump pad, we need to be able to step
2203 over internal breakpoints and such. */
32fcada3 2204 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2205
2206 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2207 {
2208 lwp = get_thread_lwp (current_inferior);
2209
2210 /* Lock it. */
2211 lwp->suspended++;
2212
2213 if (ourstatus.value.sig != TARGET_SIGNAL_0
2214 || current_inferior->last_resume_kind == resume_stop)
2215 {
2216 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
2217 enqueue_one_deferred_signal (lwp, &wstat);
2218 }
2219 }
2220 }
2221
2222 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2223
2224 stabilizing_threads = 0;
2225
2226 current_inferior = save_inferior;
2227
b4d51a55 2228 if (debug_threads)
fa593d66 2229 {
b4d51a55
PA
2230 lwp_stuck
2231 = (struct lwp_info *) find_inferior (&all_lwps,
2232 stuck_in_jump_pad_callback, NULL);
2233 if (lwp_stuck != NULL)
fa593d66
PA
2234 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2235 lwpid_of (lwp_stuck));
2236 }
2237}
2238
0d62e5e8 2239/* Wait for process, returns status. */
da6d8c04 2240
95954743
PA
2241static ptid_t
2242linux_wait_1 (ptid_t ptid,
2243 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2244{
e5f1222d 2245 int w;
fc7238bb 2246 struct lwp_info *event_child;
bd99dc85 2247 int options;
bd99dc85 2248 int pid;
6bf5e0ba
PA
2249 int step_over_finished;
2250 int bp_explains_trap;
2251 int maybe_internal_trap;
2252 int report_to_gdb;
219f2f23 2253 int trace_event;
bd99dc85
PA
2254
2255 /* Translate generic target options into linux options. */
2256 options = __WALL;
2257 if (target_options & TARGET_WNOHANG)
2258 options |= WNOHANG;
0d62e5e8
DJ
2259
2260retry:
fa593d66
PA
2261 bp_explains_trap = 0;
2262 trace_event = 0;
bd99dc85
PA
2263 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2264
0d62e5e8
DJ
2265 /* If we were only supposed to resume one thread, only wait for
2266 that thread - if it's still alive. If it died, however - which
2267 can happen if we're coming from the thread death case below -
2268 then we need to make sure we restart the other threads. We could
2269 pick a thread at random or restart all; restarting all is less
2270 arbitrary. */
95954743
PA
2271 if (!non_stop
2272 && !ptid_equal (cont_thread, null_ptid)
2273 && !ptid_equal (cont_thread, minus_one_ptid))
0d62e5e8 2274 {
fc7238bb
PA
2275 struct thread_info *thread;
2276
bd99dc85
PA
2277 thread = (struct thread_info *) find_inferior_id (&all_threads,
2278 cont_thread);
0d62e5e8
DJ
2279
2280 /* No stepping, no signal - unless one is pending already, of course. */
bd99dc85 2281 if (thread == NULL)
64386c31
DJ
2282 {
2283 struct thread_resume resume_info;
95954743 2284 resume_info.thread = minus_one_ptid;
bd99dc85
PA
2285 resume_info.kind = resume_continue;
2286 resume_info.sig = 0;
2bd7c093 2287 linux_resume (&resume_info, 1);
64386c31 2288 }
bd99dc85 2289 else
95954743 2290 ptid = cont_thread;
0d62e5e8 2291 }
da6d8c04 2292
6bf5e0ba
PA
2293 if (ptid_equal (step_over_bkpt, null_ptid))
2294 pid = linux_wait_for_event (ptid, &w, options);
2295 else
2296 {
2297 if (debug_threads)
2298 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2299 target_pid_to_str (step_over_bkpt));
2300 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2301 }
2302
bd99dc85 2303 if (pid == 0) /* only if TARGET_WNOHANG */
95954743 2304 return null_ptid;
bd99dc85 2305
6bf5e0ba 2306 event_child = get_thread_lwp (current_inferior);
da6d8c04 2307
0d62e5e8
DJ
2308 /* If we are waiting for a particular child, and it exited,
2309 linux_wait_for_event will return its exit status. Similarly if
2310 the last child exited. If this is not the last child, however,
2311 do not report it as exited until there is a 'thread exited' response
2312 available in the remote protocol. Instead, just wait for another event.
2313 This should be safe, because if the thread crashed we will already
2314 have reported the termination signal to GDB; that should stop any
2315 in-progress stepping operations, etc.
2316
2317 Report the exit status of the last thread to exit. This matches
2318 LinuxThreads' behavior. */
2319
95954743 2320 if (last_thread_of_process_p (current_inferior))
da6d8c04 2321 {
bd99dc85 2322 if (WIFEXITED (w) || WIFSIGNALED (w))
0d62e5e8 2323 {
bd99dc85
PA
2324 if (WIFEXITED (w))
2325 {
2326 ourstatus->kind = TARGET_WAITKIND_EXITED;
2327 ourstatus->value.integer = WEXITSTATUS (w);
2328
2329 if (debug_threads)
493e2a69
MS
2330 fprintf (stderr,
2331 "\nChild exited with retcode = %x \n",
2332 WEXITSTATUS (w));
bd99dc85
PA
2333 }
2334 else
2335 {
2336 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2337 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2338
2339 if (debug_threads)
493e2a69
MS
2340 fprintf (stderr,
2341 "\nChild terminated with signal = %x \n",
2342 WTERMSIG (w));
bd99dc85
PA
2343
2344 }
5b1c542e 2345
3e4c1235 2346 return ptid_of (event_child);
0d62e5e8 2347 }
da6d8c04 2348 }
0d62e5e8 2349 else
da6d8c04 2350 {
0d62e5e8
DJ
2351 if (!WIFSTOPPED (w))
2352 goto retry;
da6d8c04
DJ
2353 }
2354
6bf5e0ba
PA
2355 /* If this event was not handled before, and is not a SIGTRAP, we
2356 report it. SIGILL and SIGSEGV are also treated as traps in case
2357 a breakpoint is inserted at the current PC. If this target does
2358 not support internal breakpoints at all, we also report the
2359 SIGTRAP without further processing; it's of no concern to us. */
2360 maybe_internal_trap
2361 = (supports_breakpoints ()
2362 && (WSTOPSIG (w) == SIGTRAP
2363 || ((WSTOPSIG (w) == SIGILL
2364 || WSTOPSIG (w) == SIGSEGV)
2365 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2366
2367 if (maybe_internal_trap)
2368 {
2369 /* Handle anything that requires bookkeeping before deciding to
2370 report the event or continue waiting. */
2371
2372 /* First check if we can explain the SIGTRAP with an internal
2373 breakpoint, or if we should possibly report the event to GDB.
2374 Do this before anything that may remove or insert a
2375 breakpoint. */
2376 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2377
2378 /* We have a SIGTRAP, possibly a step-over dance has just
2379 finished. If so, tweak the state machine accordingly,
2380 reinsert breakpoints and delete any reinsert (software
2381 single-step) breakpoints. */
2382 step_over_finished = finish_step_over (event_child);
2383
2384 /* Now invoke the callbacks of any internal breakpoints there. */
2385 check_breakpoints (event_child->stop_pc);
2386
219f2f23
PA
2387 /* Handle tracepoint data collecting. This may overflow the
2388 trace buffer, and cause a tracing stop, removing
2389 breakpoints. */
2390 trace_event = handle_tracepoints (event_child);
2391
6bf5e0ba
PA
2392 if (bp_explains_trap)
2393 {
2394 /* If we stepped or ran into an internal breakpoint, we've
2395 already handled it. So next time we resume (from this
2396 PC), we should step over it. */
2397 if (debug_threads)
2398 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2399
8b07ae33
PA
2400 if (breakpoint_here (event_child->stop_pc))
2401 event_child->need_step_over = 1;
6bf5e0ba
PA
2402 }
2403 }
2404 else
2405 {
2406 /* We have some other signal, possibly a step-over dance was in
2407 progress, and it should be cancelled too. */
2408 step_over_finished = finish_step_over (event_child);
fa593d66
PA
2409 }
2410
2411 /* We have all the data we need. Either report the event to GDB, or
2412 resume threads and keep waiting for more. */
2413
2414 /* If we're collecting a fast tracepoint, finish the collection and
2415 move out of the jump pad before delivering a signal. See
2416 linux_stabilize_threads. */
2417
2418 if (WIFSTOPPED (w)
2419 && WSTOPSIG (w) != SIGTRAP
2420 && supports_fast_tracepoints ()
58b4daa5 2421 && agent_loaded_p ())
fa593d66
PA
2422 {
2423 if (debug_threads)
2424 fprintf (stderr,
2425 "Got signal %d for LWP %ld. Check if we need "
2426 "to defer or adjust it.\n",
2427 WSTOPSIG (w), lwpid_of (event_child));
2428
2429 /* Allow debugging the jump pad itself. */
2430 if (current_inferior->last_resume_kind != resume_step
2431 && maybe_move_out_of_jump_pad (event_child, &w))
2432 {
2433 enqueue_one_deferred_signal (event_child, &w);
2434
2435 if (debug_threads)
2436 fprintf (stderr,
2437 "Signal %d for LWP %ld deferred (in jump pad)\n",
2438 WSTOPSIG (w), lwpid_of (event_child));
2439
2440 linux_resume_one_lwp (event_child, 0, 0, NULL);
2441 goto retry;
2442 }
2443 }
219f2f23 2444
fa593d66
PA
2445 if (event_child->collecting_fast_tracepoint)
2446 {
2447 if (debug_threads)
2448 fprintf (stderr, "\
2449LWP %ld was trying to move out of the jump pad (%d). \
2450Check if we're already there.\n",
2451 lwpid_of (event_child),
2452 event_child->collecting_fast_tracepoint);
2453
2454 trace_event = 1;
2455
2456 event_child->collecting_fast_tracepoint
2457 = linux_fast_tracepoint_collecting (event_child, NULL);
2458
2459 if (event_child->collecting_fast_tracepoint != 1)
2460 {
2461 /* No longer need this breakpoint. */
2462 if (event_child->exit_jump_pad_bkpt != NULL)
2463 {
2464 if (debug_threads)
2465 fprintf (stderr,
2466 "No longer need exit-jump-pad bkpt; removing it."
2467 "stopping all threads momentarily.\n");
2468
2469 /* Other running threads could hit this breakpoint.
2470 We don't handle moribund locations like GDB does,
2471 instead we always pause all threads when removing
2472 breakpoints, so that any step-over or
2473 decr_pc_after_break adjustment is always taken
2474 care of while the breakpoint is still
2475 inserted. */
2476 stop_all_lwps (1, event_child);
2477 cancel_breakpoints ();
2478
2479 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2480 event_child->exit_jump_pad_bkpt = NULL;
2481
2482 unstop_all_lwps (1, event_child);
2483
2484 gdb_assert (event_child->suspended >= 0);
2485 }
2486 }
2487
2488 if (event_child->collecting_fast_tracepoint == 0)
2489 {
2490 if (debug_threads)
2491 fprintf (stderr,
2492 "fast tracepoint finished "
2493 "collecting successfully.\n");
2494
2495 /* We may have a deferred signal to report. */
2496 if (dequeue_one_deferred_signal (event_child, &w))
2497 {
2498 if (debug_threads)
2499 fprintf (stderr, "dequeued one signal.\n");
2500 }
3c11dd79 2501 else
fa593d66 2502 {
3c11dd79
PA
2503 if (debug_threads)
2504 fprintf (stderr, "no deferred signals.\n");
fa593d66
PA
2505
2506 if (stabilizing_threads)
2507 {
2508 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2509 ourstatus->value.sig = TARGET_SIGNAL_0;
2510 return ptid_of (event_child);
2511 }
2512 }
2513 }
6bf5e0ba
PA
2514 }
2515
e471f25b
PA
2516 /* Check whether GDB would be interested in this event. */
2517
2518 /* If GDB is not interested in this signal, don't stop other
2519 threads, and don't report it to GDB. Just resume the inferior
2520 right away. We do this for threading-related signals as well as
2521 any that GDB specifically requested we ignore. But never ignore
2522 SIGSTOP if we sent it ourselves, and do not ignore signals when
2523 stepping - they may require special handling to skip the signal
2524 handler. */
2525 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2526 thread library? */
2527 if (WIFSTOPPED (w)
2528 && current_inferior->last_resume_kind != resume_step
2529 && (
1a981360 2530#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
e471f25b
PA
2531 (current_process ()->private->thread_db != NULL
2532 && (WSTOPSIG (w) == __SIGRTMIN
2533 || WSTOPSIG (w) == __SIGRTMIN + 1))
2534 ||
2535#endif
2536 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2537 && !(WSTOPSIG (w) == SIGSTOP
2538 && current_inferior->last_resume_kind == resume_stop))))
2539 {
2540 siginfo_t info, *info_p;
2541
2542 if (debug_threads)
2543 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2544 WSTOPSIG (w), lwpid_of (event_child));
2545
2546 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2547 info_p = &info;
2548 else
2549 info_p = NULL;
2550 linux_resume_one_lwp (event_child, event_child->stepping,
2551 WSTOPSIG (w), info_p);
2552 goto retry;
2553 }
2554
2555 /* If GDB wanted this thread to single step, we always want to
2556 report the SIGTRAP, and let GDB handle it. Watchpoints should
2557 always be reported. So should signals we can't explain. A
2558 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2559 not support Z0 breakpoints. If we do, we're be able to handle
2560 GDB breakpoints on top of internal breakpoints, by handling the
2561 internal breakpoint and still reporting the event to GDB. If we
2562 don't, we're out of luck, GDB won't see the breakpoint hit. */
6bf5e0ba 2563 report_to_gdb = (!maybe_internal_trap
8336d594 2564 || current_inferior->last_resume_kind == resume_step
6bf5e0ba 2565 || event_child->stopped_by_watchpoint
493e2a69
MS
2566 || (!step_over_finished
2567 && !bp_explains_trap && !trace_event)
9f3a5c85
LM
2568 || (gdb_breakpoint_here (event_child->stop_pc)
2569 && gdb_condition_true_at_breakpoint (event_child->stop_pc)));
6bf5e0ba
PA
2570
2571 /* We found no reason GDB would want us to stop. We either hit one
2572 of our own breakpoints, or finished an internal step GDB
2573 shouldn't know about. */
2574 if (!report_to_gdb)
2575 {
2576 if (debug_threads)
2577 {
2578 if (bp_explains_trap)
2579 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2580 if (step_over_finished)
2581 fprintf (stderr, "Step-over finished.\n");
219f2f23
PA
2582 if (trace_event)
2583 fprintf (stderr, "Tracepoint event.\n");
6bf5e0ba
PA
2584 }
2585
2586 /* We're not reporting this breakpoint to GDB, so apply the
2587 decr_pc_after_break adjustment to the inferior's regcache
2588 ourselves. */
2589
2590 if (the_low_target.set_pc != NULL)
2591 {
2592 struct regcache *regcache
2593 = get_thread_regcache (get_lwp_thread (event_child), 1);
2594 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2595 }
2596
7984d532
PA
2597 /* We may have finished stepping over a breakpoint. If so,
2598 we've stopped and suspended all LWPs momentarily except the
2599 stepping one. This is where we resume them all again. We're
2600 going to keep waiting, so use proceed, which handles stepping
2601 over the next breakpoint. */
6bf5e0ba
PA
2602 if (debug_threads)
2603 fprintf (stderr, "proceeding all threads.\n");
7984d532
PA
2604
2605 if (step_over_finished)
2606 unsuspend_all_lwps (event_child);
2607
6bf5e0ba
PA
2608 proceed_all_lwps ();
2609 goto retry;
2610 }
2611
2612 if (debug_threads)
2613 {
8336d594 2614 if (current_inferior->last_resume_kind == resume_step)
6bf5e0ba
PA
2615 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2616 if (event_child->stopped_by_watchpoint)
2617 fprintf (stderr, "Stopped by watchpoint.\n");
8b07ae33
PA
2618 if (gdb_breakpoint_here (event_child->stop_pc))
2619 fprintf (stderr, "Stopped by GDB breakpoint.\n");
6bf5e0ba
PA
2620 if (debug_threads)
2621 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2622 }
2623
2624 /* Alright, we're going to report a stop. */
2625
fa593d66 2626 if (!non_stop && !stabilizing_threads)
6bf5e0ba
PA
2627 {
2628 /* In all-stop, stop all threads. */
7984d532 2629 stop_all_lwps (0, NULL);
6bf5e0ba
PA
2630
2631 /* If we're not waiting for a specific LWP, choose an event LWP
2632 from among those that have had events. Giving equal priority
2633 to all LWPs that have had events helps prevent
2634 starvation. */
2635 if (ptid_equal (ptid, minus_one_ptid))
2636 {
2637 event_child->status_pending_p = 1;
2638 event_child->status_pending = w;
2639
2640 select_event_lwp (&event_child);
2641
2642 event_child->status_pending_p = 0;
2643 w = event_child->status_pending;
2644 }
2645
2646 /* Now that we've selected our final event LWP, cancel any
2647 breakpoints in other LWPs that have hit a GDB breakpoint.
2648 See the comment in cancel_breakpoints_callback to find out
2649 why. */
2650 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
fa593d66 2651
c03e6ccc
YQ
2652 /* If we were going a step-over, all other threads but the stepping one
2653 had been paused in start_step_over, with their suspend counts
2654 incremented. We don't want to do a full unstop/unpause, because we're
2655 in all-stop mode (so we want threads stopped), but we still need to
2656 unsuspend the other threads, to decrement their `suspended' count
2657 back. */
2658 if (step_over_finished)
2659 unsuspend_all_lwps (event_child);
2660
fa593d66
PA
2661 /* Stabilize threads (move out of jump pads). */
2662 stabilize_threads ();
6bf5e0ba
PA
2663 }
2664 else
2665 {
2666 /* If we just finished a step-over, then all threads had been
2667 momentarily paused. In all-stop, that's fine, we want
2668 threads stopped by now anyway. In non-stop, we need to
2669 re-resume threads that GDB wanted to be running. */
2670 if (step_over_finished)
7984d532 2671 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
2672 }
2673
5b1c542e 2674 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 2675
8336d594
PA
2676 if (current_inferior->last_resume_kind == resume_stop
2677 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
2678 {
2679 /* A thread that has been requested to stop by GDB with vCont;t,
2680 and it stopped cleanly, so report as SIG0. The use of
2681 SIGSTOP is an implementation detail. */
2682 ourstatus->value.sig = TARGET_SIGNAL_0;
2683 }
8336d594
PA
2684 else if (current_inferior->last_resume_kind == resume_stop
2685 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
2686 {
2687 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 2688 but, it stopped for other reasons. */
bd99dc85
PA
2689 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2690 }
2691 else
2692 {
2693 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2694 }
2695
d50171e4
PA
2696 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2697
bd99dc85 2698 if (debug_threads)
95954743 2699 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
6bf5e0ba 2700 target_pid_to_str (ptid_of (event_child)),
bd99dc85
PA
2701 ourstatus->kind,
2702 ourstatus->value.sig);
2703
6bf5e0ba 2704 return ptid_of (event_child);
bd99dc85
PA
2705}
2706
2707/* Get rid of any pending event in the pipe. */
2708static void
2709async_file_flush (void)
2710{
2711 int ret;
2712 char buf;
2713
2714 do
2715 ret = read (linux_event_pipe[0], &buf, 1);
2716 while (ret >= 0 || (ret == -1 && errno == EINTR));
2717}
2718
2719/* Put something in the pipe, so the event loop wakes up. */
2720static void
2721async_file_mark (void)
2722{
2723 int ret;
2724
2725 async_file_flush ();
2726
2727 do
2728 ret = write (linux_event_pipe[1], "+", 1);
2729 while (ret == 0 || (ret == -1 && errno == EINTR));
2730
2731 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2732 be awakened anyway. */
2733}
2734
95954743
PA
2735static ptid_t
2736linux_wait (ptid_t ptid,
2737 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 2738{
95954743 2739 ptid_t event_ptid;
bd99dc85
PA
2740
2741 if (debug_threads)
95954743 2742 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
bd99dc85
PA
2743
2744 /* Flush the async file first. */
2745 if (target_is_async_p ())
2746 async_file_flush ();
2747
95954743 2748 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
bd99dc85
PA
2749
2750 /* If at least one stop was reported, there may be more. A single
2751 SIGCHLD can signal more than one child stop. */
2752 if (target_is_async_p ()
2753 && (target_options & TARGET_WNOHANG) != 0
95954743 2754 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
2755 async_file_mark ();
2756
2757 return event_ptid;
da6d8c04
DJ
2758}
2759
c5f62d5f 2760/* Send a signal to an LWP. */
fd500816
DJ
2761
2762static int
a1928bad 2763kill_lwp (unsigned long lwpid, int signo)
fd500816 2764{
c5f62d5f
DE
2765 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2766 fails, then we are not using nptl threads and we should be using kill. */
fd500816 2767
c5f62d5f
DE
2768#ifdef __NR_tkill
2769 {
2770 static int tkill_failed;
fd500816 2771
c5f62d5f
DE
2772 if (!tkill_failed)
2773 {
2774 int ret;
2775
2776 errno = 0;
2777 ret = syscall (__NR_tkill, lwpid, signo);
2778 if (errno != ENOSYS)
2779 return ret;
2780 tkill_failed = 1;
2781 }
2782 }
fd500816
DJ
2783#endif
2784
2785 return kill (lwpid, signo);
2786}
2787
964e4306
PA
2788void
2789linux_stop_lwp (struct lwp_info *lwp)
2790{
2791 send_sigstop (lwp);
2792}
2793
0d62e5e8 2794static void
02fc4de7 2795send_sigstop (struct lwp_info *lwp)
0d62e5e8 2796{
bd99dc85 2797 int pid;
0d62e5e8 2798
bd99dc85
PA
2799 pid = lwpid_of (lwp);
2800
0d62e5e8
DJ
2801 /* If we already have a pending stop signal for this process, don't
2802 send another. */
54a0b537 2803 if (lwp->stop_expected)
0d62e5e8 2804 {
ae13219e 2805 if (debug_threads)
bd99dc85 2806 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
ae13219e 2807
0d62e5e8
DJ
2808 return;
2809 }
2810
2811 if (debug_threads)
bd99dc85 2812 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
0d62e5e8 2813
d50171e4 2814 lwp->stop_expected = 1;
bd99dc85 2815 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
2816}
2817
7984d532
PA
2818static int
2819send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7
PA
2820{
2821 struct lwp_info *lwp = (struct lwp_info *) entry;
2822
7984d532
PA
2823 /* Ignore EXCEPT. */
2824 if (lwp == except)
2825 return 0;
2826
02fc4de7 2827 if (lwp->stopped)
7984d532 2828 return 0;
02fc4de7
PA
2829
2830 send_sigstop (lwp);
7984d532
PA
2831 return 0;
2832}
2833
2834/* Increment the suspend count of an LWP, and stop it, if not stopped
2835 yet. */
2836static int
2837suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2838 void *except)
2839{
2840 struct lwp_info *lwp = (struct lwp_info *) entry;
2841
2842 /* Ignore EXCEPT. */
2843 if (lwp == except)
2844 return 0;
2845
2846 lwp->suspended++;
2847
2848 return send_sigstop_callback (entry, except);
02fc4de7
PA
2849}
2850
95954743
PA
2851static void
2852mark_lwp_dead (struct lwp_info *lwp, int wstat)
2853{
2854 /* It's dead, really. */
2855 lwp->dead = 1;
2856
2857 /* Store the exit status for later. */
2858 lwp->status_pending_p = 1;
2859 lwp->status_pending = wstat;
2860
95954743
PA
2861 /* Prevent trying to stop it. */
2862 lwp->stopped = 1;
2863
2864 /* No further stops are expected from a dead lwp. */
2865 lwp->stop_expected = 0;
2866}
2867
0d62e5e8
DJ
2868static void
2869wait_for_sigstop (struct inferior_list_entry *entry)
2870{
54a0b537 2871 struct lwp_info *lwp = (struct lwp_info *) entry;
bd99dc85 2872 struct thread_info *saved_inferior;
a1928bad 2873 int wstat;
95954743
PA
2874 ptid_t saved_tid;
2875 ptid_t ptid;
d50171e4 2876 int pid;
0d62e5e8 2877
54a0b537 2878 if (lwp->stopped)
d50171e4
PA
2879 {
2880 if (debug_threads)
2881 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2882 lwpid_of (lwp));
2883 return;
2884 }
0d62e5e8
DJ
2885
2886 saved_inferior = current_inferior;
bd99dc85
PA
2887 if (saved_inferior != NULL)
2888 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2889 else
95954743 2890 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 2891
95954743 2892 ptid = lwp->head.id;
bd99dc85 2893
d50171e4
PA
2894 if (debug_threads)
2895 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2896
2897 pid = linux_wait_for_event (ptid, &wstat, __WALL);
0d62e5e8
DJ
2898
2899 /* If we stopped with a non-SIGSTOP signal, save it for later
2900 and record the pending SIGSTOP. If the process exited, just
2901 return. */
d50171e4 2902 if (WIFSTOPPED (wstat))
0d62e5e8
DJ
2903 {
2904 if (debug_threads)
d50171e4
PA
2905 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2906 lwpid_of (lwp), WSTOPSIG (wstat));
c35fafde 2907
d50171e4 2908 if (WSTOPSIG (wstat) != SIGSTOP)
c35fafde
PA
2909 {
2910 if (debug_threads)
d50171e4
PA
2911 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2912 lwpid_of (lwp), wstat);
2913
c35fafde
PA
2914 lwp->status_pending_p = 1;
2915 lwp->status_pending = wstat;
2916 }
0d62e5e8 2917 }
d50171e4 2918 else
95954743
PA
2919 {
2920 if (debug_threads)
d50171e4 2921 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
95954743 2922
d50171e4
PA
2923 lwp = find_lwp_pid (pid_to_ptid (pid));
2924 if (lwp)
2925 {
2926 /* Leave this status pending for the next time we're able to
2927 report it. In the mean time, we'll report this lwp as
2928 dead to GDB, so GDB doesn't try to read registers and
2929 memory from it. This can only happen if this was the
2930 last thread of the process; otherwise, PID is removed
2931 from the thread tables before linux_wait_for_event
2932 returns. */
2933 mark_lwp_dead (lwp, wstat);
2934 }
95954743 2935 }
0d62e5e8 2936
bd99dc85 2937 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
0d62e5e8
DJ
2938 current_inferior = saved_inferior;
2939 else
2940 {
2941 if (debug_threads)
2942 fprintf (stderr, "Previously current thread died.\n");
2943
bd99dc85
PA
2944 if (non_stop)
2945 {
2946 /* We can't change the current inferior behind GDB's back,
2947 otherwise, a subsequent command may apply to the wrong
2948 process. */
2949 current_inferior = NULL;
2950 }
2951 else
2952 {
2953 /* Set a valid thread as current. */
2954 set_desired_inferior (0);
2955 }
0d62e5e8
DJ
2956 }
2957}
2958
fa593d66
PA
2959/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2960 move it out, because we need to report the stop event to GDB. For
2961 example, if the user puts a breakpoint in the jump pad, it's
2962 because she wants to debug it. */
2963
2964static int
2965stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2966{
2967 struct lwp_info *lwp = (struct lwp_info *) entry;
2968 struct thread_info *thread = get_lwp_thread (lwp);
2969
2970 gdb_assert (lwp->suspended == 0);
2971 gdb_assert (lwp->stopped);
2972
2973 /* Allow debugging the jump pad, gdb_collect, etc.. */
2974 return (supports_fast_tracepoints ()
58b4daa5 2975 && agent_loaded_p ()
fa593d66
PA
2976 && (gdb_breakpoint_here (lwp->stop_pc)
2977 || lwp->stopped_by_watchpoint
2978 || thread->last_resume_kind == resume_step)
2979 && linux_fast_tracepoint_collecting (lwp, NULL));
2980}
2981
2982static void
2983move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2984{
2985 struct lwp_info *lwp = (struct lwp_info *) entry;
2986 struct thread_info *thread = get_lwp_thread (lwp);
2987 int *wstat;
2988
2989 gdb_assert (lwp->suspended == 0);
2990 gdb_assert (lwp->stopped);
2991
2992 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2993
2994 /* Allow debugging the jump pad, gdb_collect, etc. */
2995 if (!gdb_breakpoint_here (lwp->stop_pc)
2996 && !lwp->stopped_by_watchpoint
2997 && thread->last_resume_kind != resume_step
2998 && maybe_move_out_of_jump_pad (lwp, wstat))
2999 {
3000 if (debug_threads)
3001 fprintf (stderr,
3002 "LWP %ld needs stabilizing (in jump pad)\n",
3003 lwpid_of (lwp));
3004
3005 if (wstat)
3006 {
3007 lwp->status_pending_p = 0;
3008 enqueue_one_deferred_signal (lwp, wstat);
3009
3010 if (debug_threads)
3011 fprintf (stderr,
3012 "Signal %d for LWP %ld deferred "
3013 "(in jump pad)\n",
3014 WSTOPSIG (*wstat), lwpid_of (lwp));
3015 }
3016
3017 linux_resume_one_lwp (lwp, 0, 0, NULL);
3018 }
3019 else
3020 lwp->suspended++;
3021}
3022
3023static int
3024lwp_running (struct inferior_list_entry *entry, void *data)
3025{
3026 struct lwp_info *lwp = (struct lwp_info *) entry;
3027
3028 if (lwp->dead)
3029 return 0;
3030 if (lwp->stopped)
3031 return 0;
3032 return 1;
3033}
3034
7984d532
PA
3035/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3036 If SUSPEND, then also increase the suspend count of every LWP,
3037 except EXCEPT. */
3038
0d62e5e8 3039static void
7984d532 3040stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8
DJ
3041{
3042 stopping_threads = 1;
7984d532
PA
3043
3044 if (suspend)
3045 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3046 else
3047 find_inferior (&all_lwps, send_sigstop_callback, except);
54a0b537 3048 for_each_inferior (&all_lwps, wait_for_sigstop);
0d62e5e8
DJ
3049 stopping_threads = 0;
3050}
3051
da6d8c04
DJ
3052/* Resume execution of the inferior process.
3053 If STEP is nonzero, single-step it.
3054 If SIGNAL is nonzero, give it that signal. */
3055
ce3a066d 3056static void
2acc282a 3057linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 3058 int step, int signal, siginfo_t *info)
da6d8c04 3059{
0d62e5e8 3060 struct thread_info *saved_inferior;
fa593d66 3061 int fast_tp_collecting;
0d62e5e8 3062
54a0b537 3063 if (lwp->stopped == 0)
0d62e5e8
DJ
3064 return;
3065
fa593d66
PA
3066 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3067
3068 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3069
219f2f23
PA
3070 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3071 user used the "jump" command, or "set $pc = foo"). */
3072 if (lwp->stop_pc != get_pc (lwp))
3073 {
3074 /* Collecting 'while-stepping' actions doesn't make sense
3075 anymore. */
3076 release_while_stepping_state_list (get_lwp_thread (lwp));
3077 }
3078
0d62e5e8
DJ
3079 /* If we have pending signals or status, and a new signal, enqueue the
3080 signal. Also enqueue the signal if we are waiting to reinsert a
3081 breakpoint; it will be picked up again below. */
3082 if (signal != 0
fa593d66
PA
3083 && (lwp->status_pending_p
3084 || lwp->pending_signals != NULL
3085 || lwp->bp_reinsert != 0
3086 || fast_tp_collecting))
0d62e5e8
DJ
3087 {
3088 struct pending_signals *p_sig;
bca929d3 3089 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 3090 p_sig->prev = lwp->pending_signals;
0d62e5e8 3091 p_sig->signal = signal;
32ca6d61
DJ
3092 if (info == NULL)
3093 memset (&p_sig->info, 0, sizeof (siginfo_t));
3094 else
3095 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 3096 lwp->pending_signals = p_sig;
0d62e5e8
DJ
3097 }
3098
d50171e4
PA
3099 if (lwp->status_pending_p)
3100 {
3101 if (debug_threads)
3102 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3103 " has pending status\n",
3104 lwpid_of (lwp), step ? "step" : "continue", signal,
3105 lwp->stop_expected ? "expected" : "not expected");
3106 return;
3107 }
0d62e5e8
DJ
3108
3109 saved_inferior = current_inferior;
54a0b537 3110 current_inferior = get_lwp_thread (lwp);
0d62e5e8
DJ
3111
3112 if (debug_threads)
1b3f6016 3113 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
bd99dc85 3114 lwpid_of (lwp), step ? "step" : "continue", signal,
54a0b537 3115 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
3116
3117 /* This bit needs some thinking about. If we get a signal that
3118 we must report while a single-step reinsert is still pending,
3119 we often end up resuming the thread. It might be better to
3120 (ew) allow a stack of pending events; then we could be sure that
3121 the reinsert happened right away and not lose any signals.
3122
3123 Making this stack would also shrink the window in which breakpoints are
54a0b537 3124 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
3125 complete correctness, so it won't solve that problem. It may be
3126 worthwhile just to solve this one, however. */
54a0b537 3127 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
3128 {
3129 if (debug_threads)
d50171e4
PA
3130 fprintf (stderr, " pending reinsert at 0x%s\n",
3131 paddress (lwp->bp_reinsert));
3132
3133 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
3134 {
fa593d66
PA
3135 if (fast_tp_collecting == 0)
3136 {
3137 if (step == 0)
3138 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3139 if (lwp->suspended)
3140 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3141 lwp->suspended);
3142 }
d50171e4
PA
3143
3144 step = 1;
3145 }
0d62e5e8
DJ
3146
3147 /* Postpone any pending signal. It was enqueued above. */
3148 signal = 0;
3149 }
3150
fa593d66
PA
3151 if (fast_tp_collecting == 1)
3152 {
3153 if (debug_threads)
3154 fprintf (stderr, "\
3155lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3156 lwpid_of (lwp));
3157
3158 /* Postpone any pending signal. It was enqueued above. */
3159 signal = 0;
3160 }
3161 else if (fast_tp_collecting == 2)
3162 {
3163 if (debug_threads)
3164 fprintf (stderr, "\
3165lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3166 lwpid_of (lwp));
3167
3168 if (can_hardware_single_step ())
3169 step = 1;
3170 else
3171 fatal ("moving out of jump pad single-stepping"
3172 " not implemented on this target");
3173
3174 /* Postpone any pending signal. It was enqueued above. */
3175 signal = 0;
3176 }
3177
219f2f23
PA
3178 /* If we have while-stepping actions in this thread set it stepping.
3179 If we have a signal to deliver, it may or may not be set to
3180 SIG_IGN, we don't know. Assume so, and allow collecting
3181 while-stepping into a signal handler. A possible smart thing to
3182 do would be to set an internal breakpoint at the signal return
3183 address, continue, and carry on catching this while-stepping
3184 action only when that breakpoint is hit. A future
3185 enhancement. */
3186 if (get_lwp_thread (lwp)->while_stepping != NULL
3187 && can_hardware_single_step ())
3188 {
3189 if (debug_threads)
3190 fprintf (stderr,
3191 "lwp %ld has a while-stepping action -> forcing step.\n",
3192 lwpid_of (lwp));
3193 step = 1;
3194 }
3195
aa691b87 3196 if (debug_threads && the_low_target.get_pc != NULL)
0d62e5e8 3197 {
442ea881
PA
3198 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3199 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
47c0c975 3200 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
0d62e5e8
DJ
3201 }
3202
fa593d66
PA
3203 /* If we have pending signals, consume one unless we are trying to
3204 reinsert a breakpoint or we're trying to finish a fast tracepoint
3205 collect. */
3206 if (lwp->pending_signals != NULL
3207 && lwp->bp_reinsert == 0
3208 && fast_tp_collecting == 0)
0d62e5e8
DJ
3209 {
3210 struct pending_signals **p_sig;
3211
54a0b537 3212 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
3213 while ((*p_sig)->prev != NULL)
3214 p_sig = &(*p_sig)->prev;
3215
3216 signal = (*p_sig)->signal;
32ca6d61 3217 if ((*p_sig)->info.si_signo != 0)
bd99dc85 3218 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
32ca6d61 3219
0d62e5e8
DJ
3220 free (*p_sig);
3221 *p_sig = NULL;
3222 }
3223
aa5ca48f
DE
3224 if (the_low_target.prepare_to_resume != NULL)
3225 the_low_target.prepare_to_resume (lwp);
3226
0d62e5e8 3227 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 3228 get_lwp_thread (lwp));
da6d8c04 3229 errno = 0;
54a0b537 3230 lwp->stopped = 0;
c3adc08c 3231 lwp->stopped_by_watchpoint = 0;
54a0b537 3232 lwp->stepping = step;
14ce3065
DE
3233 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
3234 /* Coerce to a uintptr_t first to avoid potential gcc warning
3235 of coercing an 8 byte integer to a 4 byte pointer. */
3236 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
0d62e5e8
DJ
3237
3238 current_inferior = saved_inferior;
da6d8c04 3239 if (errno)
3221518c
UW
3240 {
3241 /* ESRCH from ptrace either means that the thread was already
3242 running (an error) or that it is gone (a race condition). If
3243 it's gone, we will get a notification the next time we wait,
3244 so we can ignore the error. We could differentiate these
3245 two, but it's tricky without waiting; the thread still exists
3246 as a zombie, so sending it signal 0 would succeed. So just
3247 ignore ESRCH. */
3248 if (errno == ESRCH)
3249 return;
3250
3251 perror_with_name ("ptrace");
3252 }
da6d8c04
DJ
3253}
3254
2bd7c093
PA
3255struct thread_resume_array
3256{
3257 struct thread_resume *resume;
3258 size_t n;
3259};
64386c31
DJ
3260
3261/* This function is called once per thread. We look up the thread
5544ad89
DJ
3262 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3263 resume request.
3264
3265 This algorithm is O(threads * resume elements), but resume elements
3266 is small (and will remain small at least until GDB supports thread
3267 suspension). */
2bd7c093
PA
3268static int
3269linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 3270{
54a0b537 3271 struct lwp_info *lwp;
64386c31 3272 struct thread_info *thread;
5544ad89 3273 int ndx;
2bd7c093 3274 struct thread_resume_array *r;
64386c31
DJ
3275
3276 thread = (struct thread_info *) entry;
54a0b537 3277 lwp = get_thread_lwp (thread);
2bd7c093 3278 r = arg;
64386c31 3279
2bd7c093 3280 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
3281 {
3282 ptid_t ptid = r->resume[ndx].thread;
3283 if (ptid_equal (ptid, minus_one_ptid)
3284 || ptid_equal (ptid, entry->id)
3285 || (ptid_is_pid (ptid)
3286 && (ptid_get_pid (ptid) == pid_of (lwp)))
3287 || (ptid_get_lwp (ptid) == -1
3288 && (ptid_get_pid (ptid) == pid_of (lwp))))
3289 {
d50171e4 3290 if (r->resume[ndx].kind == resume_stop
8336d594 3291 && thread->last_resume_kind == resume_stop)
d50171e4
PA
3292 {
3293 if (debug_threads)
3294 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3295 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3296 ? "stopped"
3297 : "stopping",
3298 lwpid_of (lwp));
3299
3300 continue;
3301 }
3302
95954743 3303 lwp->resume = &r->resume[ndx];
8336d594 3304 thread->last_resume_kind = lwp->resume->kind;
fa593d66
PA
3305
3306 /* If we had a deferred signal to report, dequeue one now.
3307 This can happen if LWP gets more than one signal while
3308 trying to get out of a jump pad. */
3309 if (lwp->stopped
3310 && !lwp->status_pending_p
3311 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3312 {
3313 lwp->status_pending_p = 1;
3314
3315 if (debug_threads)
3316 fprintf (stderr,
3317 "Dequeueing deferred signal %d for LWP %ld, "
3318 "leaving status pending.\n",
3319 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3320 }
3321
95954743
PA
3322 return 0;
3323 }
3324 }
2bd7c093
PA
3325
3326 /* No resume action for this thread. */
3327 lwp->resume = NULL;
64386c31 3328
2bd7c093 3329 return 0;
5544ad89
DJ
3330}
3331
5544ad89 3332
bd99dc85
PA
3333/* Set *FLAG_P if this lwp has an interesting status pending. */
3334static int
3335resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 3336{
bd99dc85 3337 struct lwp_info *lwp = (struct lwp_info *) entry;
5544ad89 3338
bd99dc85
PA
3339 /* LWPs which will not be resumed are not interesting, because
3340 we might not wait for them next time through linux_wait. */
2bd7c093 3341 if (lwp->resume == NULL)
bd99dc85 3342 return 0;
64386c31 3343
bd99dc85 3344 if (lwp->status_pending_p)
d50171e4
PA
3345 * (int *) flag_p = 1;
3346
3347 return 0;
3348}
3349
3350/* Return 1 if this lwp that GDB wants running is stopped at an
3351 internal breakpoint that we need to step over. It assumes that any
3352 required STOP_PC adjustment has already been propagated to the
3353 inferior's regcache. */
3354
3355static int
3356need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3357{
3358 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 3359 struct thread_info *thread;
d50171e4
PA
3360 struct thread_info *saved_inferior;
3361 CORE_ADDR pc;
3362
3363 /* LWPs which will not be resumed are not interesting, because we
3364 might not wait for them next time through linux_wait. */
3365
3366 if (!lwp->stopped)
3367 {
3368 if (debug_threads)
3369 fprintf (stderr,
3370 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3371 lwpid_of (lwp));
3372 return 0;
3373 }
3374
8336d594
PA
3375 thread = get_lwp_thread (lwp);
3376
3377 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
3378 {
3379 if (debug_threads)
3380 fprintf (stderr,
3381 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3382 lwpid_of (lwp));
3383 return 0;
3384 }
3385
7984d532
PA
3386 gdb_assert (lwp->suspended >= 0);
3387
3388 if (lwp->suspended)
3389 {
3390 if (debug_threads)
3391 fprintf (stderr,
3392 "Need step over [LWP %ld]? Ignoring, suspended\n",
3393 lwpid_of (lwp));
3394 return 0;
3395 }
3396
d50171e4
PA
3397 if (!lwp->need_step_over)
3398 {
3399 if (debug_threads)
3400 fprintf (stderr,
3401 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3402 }
5544ad89 3403
bd99dc85 3404 if (lwp->status_pending_p)
d50171e4
PA
3405 {
3406 if (debug_threads)
3407 fprintf (stderr,
3408 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3409 lwpid_of (lwp));
3410 return 0;
3411 }
3412
3413 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3414 or we have. */
3415 pc = get_pc (lwp);
3416
3417 /* If the PC has changed since we stopped, then don't do anything,
3418 and let the breakpoint/tracepoint be hit. This happens if, for
3419 instance, GDB handled the decr_pc_after_break subtraction itself,
3420 GDB is OOL stepping this thread, or the user has issued a "jump"
3421 command, or poked thread's registers herself. */
3422 if (pc != lwp->stop_pc)
3423 {
3424 if (debug_threads)
3425 fprintf (stderr,
3426 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3427 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3428 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3429
3430 lwp->need_step_over = 0;
3431 return 0;
3432 }
3433
3434 saved_inferior = current_inferior;
8336d594 3435 current_inferior = thread;
d50171e4 3436
8b07ae33 3437 /* We can only step over breakpoints we know about. */
fa593d66 3438 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 3439 {
8b07ae33 3440 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
3441 though. If the condition is being evaluated on the target's side
3442 and it evaluate to false, step over this breakpoint as well. */
3443 if (gdb_breakpoint_here (pc)
3444 && gdb_condition_true_at_breakpoint (pc))
8b07ae33
PA
3445 {
3446 if (debug_threads)
3447 fprintf (stderr,
3448 "Need step over [LWP %ld]? yes, but found"
3449 " GDB breakpoint at 0x%s; skipping step over\n",
3450 lwpid_of (lwp), paddress (pc));
d50171e4 3451
8b07ae33
PA
3452 current_inferior = saved_inferior;
3453 return 0;
3454 }
3455 else
3456 {
3457 if (debug_threads)
3458 fprintf (stderr,
493e2a69
MS
3459 "Need step over [LWP %ld]? yes, "
3460 "found breakpoint at 0x%s\n",
8b07ae33 3461 lwpid_of (lwp), paddress (pc));
d50171e4 3462
8b07ae33
PA
3463 /* We've found an lwp that needs stepping over --- return 1 so
3464 that find_inferior stops looking. */
3465 current_inferior = saved_inferior;
3466
3467 /* If the step over is cancelled, this is set again. */
3468 lwp->need_step_over = 0;
3469 return 1;
3470 }
d50171e4
PA
3471 }
3472
3473 current_inferior = saved_inferior;
3474
3475 if (debug_threads)
3476 fprintf (stderr,
3477 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3478 lwpid_of (lwp), paddress (pc));
c6ecbae5 3479
bd99dc85 3480 return 0;
5544ad89
DJ
3481}
3482
d50171e4
PA
3483/* Start a step-over operation on LWP. When LWP stopped at a
3484 breakpoint, to make progress, we need to remove the breakpoint out
3485 of the way. If we let other threads run while we do that, they may
3486 pass by the breakpoint location and miss hitting it. To avoid
3487 that, a step-over momentarily stops all threads while LWP is
3488 single-stepped while the breakpoint is temporarily uninserted from
3489 the inferior. When the single-step finishes, we reinsert the
3490 breakpoint, and let all threads that are supposed to be running,
3491 run again.
3492
3493 On targets that don't support hardware single-step, we don't
3494 currently support full software single-stepping. Instead, we only
3495 support stepping over the thread event breakpoint, by asking the
3496 low target where to place a reinsert breakpoint. Since this
3497 routine assumes the breakpoint being stepped over is a thread event
3498 breakpoint, it usually assumes the return address of the current
3499 function is a good enough place to set the reinsert breakpoint. */
3500
3501static int
3502start_step_over (struct lwp_info *lwp)
3503{
3504 struct thread_info *saved_inferior;
3505 CORE_ADDR pc;
3506 int step;
3507
3508 if (debug_threads)
3509 fprintf (stderr,
3510 "Starting step-over on LWP %ld. Stopping all threads\n",
3511 lwpid_of (lwp));
3512
7984d532
PA
3513 stop_all_lwps (1, lwp);
3514 gdb_assert (lwp->suspended == 0);
d50171e4
PA
3515
3516 if (debug_threads)
3517 fprintf (stderr, "Done stopping all threads for step-over.\n");
3518
3519 /* Note, we should always reach here with an already adjusted PC,
3520 either by GDB (if we're resuming due to GDB's request), or by our
3521 caller, if we just finished handling an internal breakpoint GDB
3522 shouldn't care about. */
3523 pc = get_pc (lwp);
3524
3525 saved_inferior = current_inferior;
3526 current_inferior = get_lwp_thread (lwp);
3527
3528 lwp->bp_reinsert = pc;
3529 uninsert_breakpoints_at (pc);
fa593d66 3530 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
3531
3532 if (can_hardware_single_step ())
3533 {
3534 step = 1;
3535 }
3536 else
3537 {
3538 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3539 set_reinsert_breakpoint (raddr);
3540 step = 0;
3541 }
3542
3543 current_inferior = saved_inferior;
3544
3545 linux_resume_one_lwp (lwp, step, 0, NULL);
3546
3547 /* Require next event from this LWP. */
3548 step_over_bkpt = lwp->head.id;
3549 return 1;
3550}
3551
3552/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3553 start_step_over, if still there, and delete any reinsert
3554 breakpoints we've set, on non hardware single-step targets. */
3555
3556static int
3557finish_step_over (struct lwp_info *lwp)
3558{
3559 if (lwp->bp_reinsert != 0)
3560 {
3561 if (debug_threads)
3562 fprintf (stderr, "Finished step over.\n");
3563
3564 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3565 may be no breakpoint to reinsert there by now. */
3566 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 3567 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
3568
3569 lwp->bp_reinsert = 0;
3570
3571 /* Delete any software-single-step reinsert breakpoints. No
3572 longer needed. We don't have to worry about other threads
3573 hitting this trap, and later not being able to explain it,
3574 because we were stepping over a breakpoint, and we hold all
3575 threads but LWP stopped while doing that. */
3576 if (!can_hardware_single_step ())
3577 delete_reinsert_breakpoints ();
3578
3579 step_over_bkpt = null_ptid;
3580 return 1;
3581 }
3582 else
3583 return 0;
3584}
3585
5544ad89
DJ
3586/* This function is called once per thread. We check the thread's resume
3587 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 3588 stopped; and what signal, if any, it should be sent.
5544ad89 3589
bd99dc85
PA
3590 For threads which we aren't explicitly told otherwise, we preserve
3591 the stepping flag; this is used for stepping over gdbserver-placed
3592 breakpoints.
3593
3594 If pending_flags was set in any thread, we queue any needed
3595 signals, since we won't actually resume. We already have a pending
3596 event to report, so we don't need to preserve any step requests;
3597 they should be re-issued if necessary. */
3598
3599static int
3600linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 3601{
54a0b537 3602 struct lwp_info *lwp;
5544ad89 3603 struct thread_info *thread;
bd99dc85 3604 int step;
d50171e4
PA
3605 int leave_all_stopped = * (int *) arg;
3606 int leave_pending;
5544ad89
DJ
3607
3608 thread = (struct thread_info *) entry;
54a0b537 3609 lwp = get_thread_lwp (thread);
5544ad89 3610
2bd7c093 3611 if (lwp->resume == NULL)
bd99dc85 3612 return 0;
5544ad89 3613
bd99dc85 3614 if (lwp->resume->kind == resume_stop)
5544ad89 3615 {
bd99dc85 3616 if (debug_threads)
d50171e4 3617 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
bd99dc85
PA
3618
3619 if (!lwp->stopped)
3620 {
3621 if (debug_threads)
d50171e4 3622 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
bd99dc85 3623
d50171e4
PA
3624 /* Stop the thread, and wait for the event asynchronously,
3625 through the event loop. */
02fc4de7 3626 send_sigstop (lwp);
bd99dc85
PA
3627 }
3628 else
3629 {
3630 if (debug_threads)
d50171e4
PA
3631 fprintf (stderr, "already stopped LWP %ld\n",
3632 lwpid_of (lwp));
3633
3634 /* The LWP may have been stopped in an internal event that
3635 was not meant to be notified back to GDB (e.g., gdbserver
3636 breakpoint), so we should be reporting a stop event in
3637 this case too. */
3638
3639 /* If the thread already has a pending SIGSTOP, this is a
3640 no-op. Otherwise, something later will presumably resume
3641 the thread and this will cause it to cancel any pending
3642 operation, due to last_resume_kind == resume_stop. If
3643 the thread already has a pending status to report, we
3644 will still report it the next time we wait - see
3645 status_pending_p_callback. */
1a981360
PA
3646
3647 /* If we already have a pending signal to report, then
3648 there's no need to queue a SIGSTOP, as this means we're
3649 midway through moving the LWP out of the jumppad, and we
3650 will report the pending signal as soon as that is
3651 finished. */
3652 if (lwp->pending_signals_to_report == NULL)
3653 send_sigstop (lwp);
bd99dc85 3654 }
32ca6d61 3655
bd99dc85
PA
3656 /* For stop requests, we're done. */
3657 lwp->resume = NULL;
fc7238bb 3658 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3659 return 0;
5544ad89
DJ
3660 }
3661
bd99dc85
PA
3662 /* If this thread which is about to be resumed has a pending status,
3663 then don't resume any threads - we can just report the pending
3664 status. Make sure to queue any signals that would otherwise be
3665 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
3666 thread has a pending status. If there's a thread that needs the
3667 step-over-breakpoint dance, then don't resume any other thread
3668 but that particular one. */
3669 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 3670
d50171e4 3671 if (!leave_pending)
bd99dc85
PA
3672 {
3673 if (debug_threads)
3674 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
5544ad89 3675
d50171e4 3676 step = (lwp->resume->kind == resume_step);
2acc282a 3677 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
3678 }
3679 else
3680 {
3681 if (debug_threads)
3682 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
5544ad89 3683
bd99dc85
PA
3684 /* If we have a new signal, enqueue the signal. */
3685 if (lwp->resume->sig != 0)
3686 {
3687 struct pending_signals *p_sig;
3688 p_sig = xmalloc (sizeof (*p_sig));
3689 p_sig->prev = lwp->pending_signals;
3690 p_sig->signal = lwp->resume->sig;
3691 memset (&p_sig->info, 0, sizeof (siginfo_t));
3692
3693 /* If this is the same signal we were previously stopped by,
3694 make sure to queue its siginfo. We can ignore the return
3695 value of ptrace; if it fails, we'll skip
3696 PTRACE_SETSIGINFO. */
3697 if (WIFSTOPPED (lwp->last_status)
3698 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3699 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3700
3701 lwp->pending_signals = p_sig;
3702 }
3703 }
5544ad89 3704
fc7238bb 3705 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3706 lwp->resume = NULL;
5544ad89 3707 return 0;
0d62e5e8
DJ
3708}
3709
3710static void
2bd7c093 3711linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 3712{
2bd7c093 3713 struct thread_resume_array array = { resume_info, n };
d50171e4
PA
3714 struct lwp_info *need_step_over = NULL;
3715 int any_pending;
3716 int leave_all_stopped;
c6ecbae5 3717
2bd7c093 3718 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 3719
d50171e4
PA
3720 /* If there is a thread which would otherwise be resumed, which has
3721 a pending status, then don't resume any threads - we can just
3722 report the pending status. Make sure to queue any signals that
3723 would otherwise be sent. In non-stop mode, we'll apply this
3724 logic to each thread individually. We consume all pending events
3725 before considering to start a step-over (in all-stop). */
3726 any_pending = 0;
bd99dc85 3727 if (!non_stop)
d50171e4
PA
3728 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3729
3730 /* If there is a thread which would otherwise be resumed, which is
3731 stopped at a breakpoint that needs stepping over, then don't
3732 resume any threads - have it step over the breakpoint with all
3733 other threads stopped, then resume all threads again. Make sure
3734 to queue any signals that would otherwise be delivered or
3735 queued. */
3736 if (!any_pending && supports_breakpoints ())
3737 need_step_over
3738 = (struct lwp_info *) find_inferior (&all_lwps,
3739 need_step_over_p, NULL);
3740
3741 leave_all_stopped = (need_step_over != NULL || any_pending);
3742
3743 if (debug_threads)
3744 {
3745 if (need_step_over != NULL)
3746 fprintf (stderr, "Not resuming all, need step over\n");
3747 else if (any_pending)
3748 fprintf (stderr,
3749 "Not resuming, all-stop and found "
3750 "an LWP with pending status\n");
3751 else
3752 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3753 }
3754
3755 /* Even if we're leaving threads stopped, queue all signals we'd
3756 otherwise deliver. */
3757 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3758
3759 if (need_step_over)
3760 start_step_over (need_step_over);
3761}
3762
3763/* This function is called once per thread. We check the thread's
3764 last resume request, which will tell us whether to resume, step, or
3765 leave the thread stopped. Any signal the client requested to be
3766 delivered has already been enqueued at this point.
3767
3768 If any thread that GDB wants running is stopped at an internal
3769 breakpoint that needs stepping over, we start a step-over operation
3770 on that particular thread, and leave all others stopped. */
3771
7984d532
PA
3772static int
3773proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 3774{
7984d532 3775 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 3776 struct thread_info *thread;
d50171e4
PA
3777 int step;
3778
7984d532
PA
3779 if (lwp == except)
3780 return 0;
d50171e4
PA
3781
3782 if (debug_threads)
3783 fprintf (stderr,
3784 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3785
3786 if (!lwp->stopped)
3787 {
3788 if (debug_threads)
3789 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
7984d532 3790 return 0;
d50171e4
PA
3791 }
3792
8336d594
PA
3793 thread = get_lwp_thread (lwp);
3794
02fc4de7
PA
3795 if (thread->last_resume_kind == resume_stop
3796 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
3797 {
3798 if (debug_threads)
02fc4de7
PA
3799 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3800 lwpid_of (lwp));
7984d532 3801 return 0;
d50171e4
PA
3802 }
3803
3804 if (lwp->status_pending_p)
3805 {
3806 if (debug_threads)
3807 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3808 lwpid_of (lwp));
7984d532 3809 return 0;
d50171e4
PA
3810 }
3811
7984d532
PA
3812 gdb_assert (lwp->suspended >= 0);
3813
d50171e4
PA
3814 if (lwp->suspended)
3815 {
3816 if (debug_threads)
3817 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
7984d532 3818 return 0;
d50171e4
PA
3819 }
3820
1a981360
PA
3821 if (thread->last_resume_kind == resume_stop
3822 && lwp->pending_signals_to_report == NULL
3823 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
3824 {
3825 /* We haven't reported this LWP as stopped yet (otherwise, the
3826 last_status.kind check above would catch it, and we wouldn't
3827 reach here. This LWP may have been momentarily paused by a
3828 stop_all_lwps call while handling for example, another LWP's
3829 step-over. In that case, the pending expected SIGSTOP signal
3830 that was queued at vCont;t handling time will have already
3831 been consumed by wait_for_sigstop, and so we need to requeue
3832 another one here. Note that if the LWP already has a SIGSTOP
3833 pending, this is a no-op. */
3834
3835 if (debug_threads)
3836 fprintf (stderr,
3837 "Client wants LWP %ld to stop. "
3838 "Making sure it has a SIGSTOP pending\n",
3839 lwpid_of (lwp));
3840
3841 send_sigstop (lwp);
3842 }
3843
8336d594 3844 step = thread->last_resume_kind == resume_step;
d50171e4 3845 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
3846 return 0;
3847}
3848
3849static int
3850unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3851{
3852 struct lwp_info *lwp = (struct lwp_info *) entry;
3853
3854 if (lwp == except)
3855 return 0;
3856
3857 lwp->suspended--;
3858 gdb_assert (lwp->suspended >= 0);
3859
3860 return proceed_one_lwp (entry, except);
d50171e4
PA
3861}
3862
3863/* When we finish a step-over, set threads running again. If there's
3864 another thread that may need a step-over, now's the time to start
3865 it. Eventually, we'll move all threads past their breakpoints. */
3866
3867static void
3868proceed_all_lwps (void)
3869{
3870 struct lwp_info *need_step_over;
3871
3872 /* If there is a thread which would otherwise be resumed, which is
3873 stopped at a breakpoint that needs stepping over, then don't
3874 resume any threads - have it step over the breakpoint with all
3875 other threads stopped, then resume all threads again. */
3876
3877 if (supports_breakpoints ())
3878 {
3879 need_step_over
3880 = (struct lwp_info *) find_inferior (&all_lwps,
3881 need_step_over_p, NULL);
3882
3883 if (need_step_over != NULL)
3884 {
3885 if (debug_threads)
3886 fprintf (stderr, "proceed_all_lwps: found "
3887 "thread %ld needing a step-over\n",
3888 lwpid_of (need_step_over));
3889
3890 start_step_over (need_step_over);
3891 return;
3892 }
3893 }
5544ad89 3894
d50171e4
PA
3895 if (debug_threads)
3896 fprintf (stderr, "Proceeding, no step-over needed\n");
3897
7984d532 3898 find_inferior (&all_lwps, proceed_one_lwp, NULL);
d50171e4
PA
3899}
3900
3901/* Stopped LWPs that the client wanted to be running, that don't have
3902 pending statuses, are set to run again, except for EXCEPT, if not
3903 NULL. This undoes a stop_all_lwps call. */
3904
3905static void
7984d532 3906unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 3907{
5544ad89
DJ
3908 if (debug_threads)
3909 {
d50171e4
PA
3910 if (except)
3911 fprintf (stderr,
3912 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
5544ad89 3913 else
d50171e4
PA
3914 fprintf (stderr,
3915 "unstopping all lwps\n");
5544ad89
DJ
3916 }
3917
7984d532
PA
3918 if (unsuspend)
3919 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3920 else
3921 find_inferior (&all_lwps, proceed_one_lwp, except);
0d62e5e8
DJ
3922}
3923
58caa3dc
DJ
3924
3925#ifdef HAVE_LINUX_REGSETS
3926
1faeff08
MR
3927#define use_linux_regsets 1
3928
58caa3dc 3929static int
442ea881 3930regsets_fetch_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3931{
3932 struct regset_info *regset;
e9d25b98 3933 int saw_general_regs = 0;
95954743 3934 int pid;
1570b33e 3935 struct iovec iov;
58caa3dc
DJ
3936
3937 regset = target_regsets;
3938
95954743 3939 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3940 while (regset->size >= 0)
3941 {
1570b33e
L
3942 void *buf, *data;
3943 int nt_type, res;
58caa3dc 3944
52fa2412 3945 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3946 {
3947 regset ++;
3948 continue;
3949 }
3950
bca929d3 3951 buf = xmalloc (regset->size);
1570b33e
L
3952
3953 nt_type = regset->nt_type;
3954 if (nt_type)
3955 {
3956 iov.iov_base = buf;
3957 iov.iov_len = regset->size;
3958 data = (void *) &iov;
3959 }
3960 else
3961 data = buf;
3962
dfb64f85 3963#ifndef __sparc__
1570b33e 3964 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3965#else
1570b33e 3966 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 3967#endif
58caa3dc
DJ
3968 if (res < 0)
3969 {
3970 if (errno == EIO)
3971 {
52fa2412
UW
3972 /* If we get EIO on a regset, do not try it again for
3973 this process. */
3974 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3975 free (buf);
52fa2412 3976 continue;
58caa3dc
DJ
3977 }
3978 else
3979 {
0d62e5e8 3980 char s[256];
95954743
PA
3981 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3982 pid);
0d62e5e8 3983 perror (s);
58caa3dc
DJ
3984 }
3985 }
e9d25b98
DJ
3986 else if (regset->type == GENERAL_REGS)
3987 saw_general_regs = 1;
442ea881 3988 regset->store_function (regcache, buf);
58caa3dc 3989 regset ++;
fdeb2a12 3990 free (buf);
58caa3dc 3991 }
e9d25b98
DJ
3992 if (saw_general_regs)
3993 return 0;
3994 else
3995 return 1;
58caa3dc
DJ
3996}
3997
3998static int
442ea881 3999regsets_store_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
4000{
4001 struct regset_info *regset;
e9d25b98 4002 int saw_general_regs = 0;
95954743 4003 int pid;
1570b33e 4004 struct iovec iov;
58caa3dc
DJ
4005
4006 regset = target_regsets;
4007
95954743 4008 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
4009 while (regset->size >= 0)
4010 {
1570b33e
L
4011 void *buf, *data;
4012 int nt_type, res;
58caa3dc 4013
52fa2412 4014 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
4015 {
4016 regset ++;
4017 continue;
4018 }
4019
bca929d3 4020 buf = xmalloc (regset->size);
545587ee
DJ
4021
4022 /* First fill the buffer with the current register set contents,
4023 in case there are any items in the kernel's regset that are
4024 not in gdbserver's regcache. */
1570b33e
L
4025
4026 nt_type = regset->nt_type;
4027 if (nt_type)
4028 {
4029 iov.iov_base = buf;
4030 iov.iov_len = regset->size;
4031 data = (void *) &iov;
4032 }
4033 else
4034 data = buf;
4035
dfb64f85 4036#ifndef __sparc__
1570b33e 4037 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 4038#else
689cc2ae 4039 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4040#endif
545587ee
DJ
4041
4042 if (res == 0)
4043 {
4044 /* Then overlay our cached registers on that. */
442ea881 4045 regset->fill_function (regcache, buf);
545587ee
DJ
4046
4047 /* Only now do we write the register set. */
dfb64f85 4048#ifndef __sparc__
1570b33e 4049 res = ptrace (regset->set_request, pid, nt_type, data);
dfb64f85 4050#else
1570b33e 4051 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 4052#endif
545587ee
DJ
4053 }
4054
58caa3dc
DJ
4055 if (res < 0)
4056 {
4057 if (errno == EIO)
4058 {
52fa2412
UW
4059 /* If we get EIO on a regset, do not try it again for
4060 this process. */
4061 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 4062 free (buf);
52fa2412 4063 continue;
58caa3dc 4064 }
3221518c
UW
4065 else if (errno == ESRCH)
4066 {
1b3f6016
PA
4067 /* At this point, ESRCH should mean the process is
4068 already gone, in which case we simply ignore attempts
4069 to change its registers. See also the related
4070 comment in linux_resume_one_lwp. */
fdeb2a12 4071 free (buf);
3221518c
UW
4072 return 0;
4073 }
58caa3dc
DJ
4074 else
4075 {
ce3a066d 4076 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
4077 }
4078 }
e9d25b98
DJ
4079 else if (regset->type == GENERAL_REGS)
4080 saw_general_regs = 1;
58caa3dc 4081 regset ++;
09ec9b38 4082 free (buf);
58caa3dc 4083 }
e9d25b98
DJ
4084 if (saw_general_regs)
4085 return 0;
4086 else
4087 return 1;
58caa3dc
DJ
4088}
4089
1faeff08 4090#else /* !HAVE_LINUX_REGSETS */
58caa3dc 4091
1faeff08
MR
4092#define use_linux_regsets 0
4093#define regsets_fetch_inferior_registers(regcache) 1
4094#define regsets_store_inferior_registers(regcache) 1
58caa3dc 4095
58caa3dc 4096#endif
1faeff08
MR
4097
4098/* Return 1 if register REGNO is supported by one of the regset ptrace
4099 calls or 0 if it has to be transferred individually. */
4100
4101static int
4102linux_register_in_regsets (int regno)
4103{
4104 unsigned char mask = 1 << (regno % 8);
4105 size_t index = regno / 8;
4106
4107 return (use_linux_regsets
4108 && (the_low_target.regset_bitmap == NULL
4109 || (the_low_target.regset_bitmap[index] & mask) != 0));
4110}
4111
58caa3dc 4112#ifdef HAVE_LINUX_USRREGS
1faeff08
MR
4113
4114int
4115register_addr (int regnum)
4116{
4117 int addr;
4118
4119 if (regnum < 0 || regnum >= the_low_target.num_regs)
4120 error ("Invalid register number %d.", regnum);
4121
4122 addr = the_low_target.regmap[regnum];
4123
4124 return addr;
4125}
4126
4127/* Fetch one register. */
4128static void
4129fetch_register (struct regcache *regcache, int regno)
4130{
4131 CORE_ADDR regaddr;
4132 int i, size;
4133 char *buf;
4134 int pid;
4135
4136 if (regno >= the_low_target.num_regs)
4137 return;
4138 if ((*the_low_target.cannot_fetch_register) (regno))
4139 return;
4140
4141 regaddr = register_addr (regno);
4142 if (regaddr == -1)
4143 return;
4144
4145 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4146 & -sizeof (PTRACE_XFER_TYPE));
4147 buf = alloca (size);
4148
4149 pid = lwpid_of (get_thread_lwp (current_inferior));
4150 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4151 {
4152 errno = 0;
4153 *(PTRACE_XFER_TYPE *) (buf + i) =
4154 ptrace (PTRACE_PEEKUSER, pid,
4155 /* Coerce to a uintptr_t first to avoid potential gcc warning
4156 of coercing an 8 byte integer to a 4 byte pointer. */
4157 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
4158 regaddr += sizeof (PTRACE_XFER_TYPE);
4159 if (errno != 0)
4160 error ("reading register %d: %s", regno, strerror (errno));
4161 }
4162
4163 if (the_low_target.supply_ptrace_register)
4164 the_low_target.supply_ptrace_register (regcache, regno, buf);
4165 else
4166 supply_register (regcache, regno, buf);
4167}
4168
4169/* Store one register. */
4170static void
4171store_register (struct regcache *regcache, int regno)
4172{
4173 CORE_ADDR regaddr;
4174 int i, size;
4175 char *buf;
4176 int pid;
4177
4178 if (regno >= the_low_target.num_regs)
4179 return;
4180 if ((*the_low_target.cannot_store_register) (regno))
4181 return;
4182
4183 regaddr = register_addr (regno);
4184 if (regaddr == -1)
4185 return;
4186
4187 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4188 & -sizeof (PTRACE_XFER_TYPE));
4189 buf = alloca (size);
4190 memset (buf, 0, size);
4191
4192 if (the_low_target.collect_ptrace_register)
4193 the_low_target.collect_ptrace_register (regcache, regno, buf);
4194 else
4195 collect_register (regcache, regno, buf);
4196
4197 pid = lwpid_of (get_thread_lwp (current_inferior));
4198 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4199 {
4200 errno = 0;
4201 ptrace (PTRACE_POKEUSER, pid,
4202 /* Coerce to a uintptr_t first to avoid potential gcc warning
4203 about coercing an 8 byte integer to a 4 byte pointer. */
4204 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
4205 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
4206 if (errno != 0)
4207 {
4208 /* At this point, ESRCH should mean the process is
4209 already gone, in which case we simply ignore attempts
4210 to change its registers. See also the related
4211 comment in linux_resume_one_lwp. */
4212 if (errno == ESRCH)
4213 return;
4214
4215 if ((*the_low_target.cannot_store_register) (regno) == 0)
4216 error ("writing register %d: %s", regno, strerror (errno));
4217 }
4218 regaddr += sizeof (PTRACE_XFER_TYPE);
4219 }
4220}
4221
4222/* Fetch all registers, or just one, from the child process.
4223 If REGNO is -1, do this for all registers, skipping any that are
4224 assumed to have been retrieved by regsets_fetch_inferior_registers,
4225 unless ALL is non-zero.
4226 Otherwise, REGNO specifies which register (so we can save time). */
4227static void
4228usr_fetch_inferior_registers (struct regcache *regcache, int regno, int all)
4229{
4230 if (regno == -1)
4231 {
4232 for (regno = 0; regno < the_low_target.num_regs; regno++)
4233 if (all || !linux_register_in_regsets (regno))
4234 fetch_register (regcache, regno);
4235 }
4236 else
4237 fetch_register (regcache, regno);
4238}
4239
4240/* Store our register values back into the inferior.
4241 If REGNO is -1, do this for all registers, skipping any that are
4242 assumed to have been saved by regsets_store_inferior_registers,
4243 unless ALL is non-zero.
4244 Otherwise, REGNO specifies which register (so we can save time). */
4245static void
4246usr_store_inferior_registers (struct regcache *regcache, int regno, int all)
4247{
4248 if (regno == -1)
4249 {
4250 for (regno = 0; regno < the_low_target.num_regs; regno++)
4251 if (all || !linux_register_in_regsets (regno))
4252 store_register (regcache, regno);
4253 }
4254 else
4255 store_register (regcache, regno);
4256}
4257
4258#else /* !HAVE_LINUX_USRREGS */
4259
4260#define usr_fetch_inferior_registers(regcache, regno, all) do {} while (0)
4261#define usr_store_inferior_registers(regcache, regno, all) do {} while (0)
4262
58caa3dc 4263#endif
1faeff08
MR
4264
4265
4266void
4267linux_fetch_registers (struct regcache *regcache, int regno)
4268{
4269 int use_regsets;
4270 int all = 0;
4271
4272 if (regno == -1)
4273 {
c14dfd32
PA
4274 if (the_low_target.fetch_register != NULL)
4275 for (regno = 0; regno < the_low_target.num_regs; regno++)
4276 (*the_low_target.fetch_register) (regcache, regno);
4277
1faeff08 4278 all = regsets_fetch_inferior_registers (regcache);
c14dfd32 4279 usr_fetch_inferior_registers (regcache, -1, all);
1faeff08
MR
4280 }
4281 else
4282 {
c14dfd32
PA
4283 if (the_low_target.fetch_register != NULL
4284 && (*the_low_target.fetch_register) (regcache, regno))
4285 return;
4286
1faeff08
MR
4287 use_regsets = linux_register_in_regsets (regno);
4288 if (use_regsets)
4289 all = regsets_fetch_inferior_registers (regcache);
4290 if (!use_regsets || all)
4291 usr_fetch_inferior_registers (regcache, regno, 1);
4292 }
58caa3dc
DJ
4293}
4294
4295void
442ea881 4296linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 4297{
1faeff08
MR
4298 int use_regsets;
4299 int all = 0;
4300
4301 if (regno == -1)
4302 {
4303 all = regsets_store_inferior_registers (regcache);
4304 usr_store_inferior_registers (regcache, regno, all);
4305 }
4306 else
4307 {
4308 use_regsets = linux_register_in_regsets (regno);
4309 if (use_regsets)
4310 all = regsets_store_inferior_registers (regcache);
4311 if (!use_regsets || all)
4312 usr_store_inferior_registers (regcache, regno, 1);
4313 }
58caa3dc
DJ
4314}
4315
da6d8c04 4316
da6d8c04
DJ
4317/* Copy LEN bytes from inferior's memory starting at MEMADDR
4318 to debugger memory starting at MYADDR. */
4319
c3e735a6 4320static int
f450004a 4321linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04
DJ
4322{
4323 register int i;
4324 /* Round starting address down to longword boundary. */
4325 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4326 /* Round ending address up; get number of longwords that makes. */
aa691b87
RM
4327 register int count
4328 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
da6d8c04
DJ
4329 / sizeof (PTRACE_XFER_TYPE);
4330 /* Allocate buffer of that many longwords. */
aa691b87 4331 register PTRACE_XFER_TYPE *buffer
da6d8c04 4332 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
fd462a61
DJ
4333 int fd;
4334 char filename[64];
95954743 4335 int pid = lwpid_of (get_thread_lwp (current_inferior));
fd462a61
DJ
4336
4337 /* Try using /proc. Don't bother for one word. */
4338 if (len >= 3 * sizeof (long))
4339 {
4340 /* We could keep this file open and cache it - possibly one per
4341 thread. That requires some juggling, but is even faster. */
95954743 4342 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
4343 fd = open (filename, O_RDONLY | O_LARGEFILE);
4344 if (fd == -1)
4345 goto no_proc;
4346
4347 /* If pread64 is available, use it. It's faster if the kernel
4348 supports it (only one syscall), and it's 64-bit safe even on
4349 32-bit platforms (for instance, SPARC debugging a SPARC64
4350 application). */
4351#ifdef HAVE_PREAD64
4352 if (pread64 (fd, myaddr, len, memaddr) != len)
4353#else
1de1badb 4354 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
fd462a61
DJ
4355#endif
4356 {
4357 close (fd);
4358 goto no_proc;
4359 }
4360
4361 close (fd);
4362 return 0;
4363 }
da6d8c04 4364
fd462a61 4365 no_proc:
da6d8c04
DJ
4366 /* Read all the longwords */
4367 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4368 {
c3e735a6 4369 errno = 0;
14ce3065
DE
4370 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4371 about coercing an 8 byte integer to a 4 byte pointer. */
4372 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4373 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
c3e735a6
DJ
4374 if (errno)
4375 return errno;
da6d8c04
DJ
4376 }
4377
4378 /* Copy appropriate bytes out of the buffer. */
1b3f6016
PA
4379 memcpy (myaddr,
4380 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4381 len);
c3e735a6
DJ
4382
4383 return 0;
da6d8c04
DJ
4384}
4385
93ae6fdc
PA
4386/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4387 memory at MEMADDR. On failure (cannot write to the inferior)
da6d8c04
DJ
4388 returns the value of errno. */
4389
ce3a066d 4390static int
f450004a 4391linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
4392{
4393 register int i;
4394 /* Round starting address down to longword boundary. */
4395 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4396 /* Round ending address up; get number of longwords that makes. */
4397 register int count
493e2a69
MS
4398 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4399 / sizeof (PTRACE_XFER_TYPE);
4400
da6d8c04 4401 /* Allocate buffer of that many longwords. */
493e2a69
MS
4402 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4403 alloca (count * sizeof (PTRACE_XFER_TYPE));
4404
95954743 4405 int pid = lwpid_of (get_thread_lwp (current_inferior));
da6d8c04 4406
0d62e5e8
DJ
4407 if (debug_threads)
4408 {
58d6951d
DJ
4409 /* Dump up to four bytes. */
4410 unsigned int val = * (unsigned int *) myaddr;
4411 if (len == 1)
4412 val = val & 0xff;
4413 else if (len == 2)
4414 val = val & 0xffff;
4415 else if (len == 3)
4416 val = val & 0xffffff;
4417 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4418 val, (long)memaddr);
0d62e5e8
DJ
4419 }
4420
da6d8c04
DJ
4421 /* Fill start and end extra bytes of buffer with existing memory data. */
4422
93ae6fdc 4423 errno = 0;
14ce3065
DE
4424 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4425 about coercing an 8 byte integer to a 4 byte pointer. */
4426 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4427 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
93ae6fdc
PA
4428 if (errno)
4429 return errno;
da6d8c04
DJ
4430
4431 if (count > 1)
4432 {
93ae6fdc 4433 errno = 0;
da6d8c04 4434 buffer[count - 1]
95954743 4435 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
4436 /* Coerce to a uintptr_t first to avoid potential gcc warning
4437 about coercing an 8 byte integer to a 4 byte pointer. */
4438 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4439 * sizeof (PTRACE_XFER_TYPE)),
d844cde6 4440 0);
93ae6fdc
PA
4441 if (errno)
4442 return errno;
da6d8c04
DJ
4443 }
4444
93ae6fdc 4445 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 4446
493e2a69
MS
4447 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4448 myaddr, len);
da6d8c04
DJ
4449
4450 /* Write the entire buffer. */
4451
4452 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4453 {
4454 errno = 0;
14ce3065
DE
4455 ptrace (PTRACE_POKETEXT, pid,
4456 /* Coerce to a uintptr_t first to avoid potential gcc warning
4457 about coercing an 8 byte integer to a 4 byte pointer. */
4458 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4459 (PTRACE_ARG4_TYPE) buffer[i]);
da6d8c04
DJ
4460 if (errno)
4461 return errno;
4462 }
4463
4464 return 0;
4465}
2f2893d9 4466
6076632b 4467/* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
24a09b5f
DJ
4468static int linux_supports_tracefork_flag;
4469
1e7fc18c
PA
4470static void
4471linux_enable_event_reporting (int pid)
4472{
4473 if (!linux_supports_tracefork_flag)
4474 return;
4475
4476 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4477}
4478
51c2684e 4479/* Helper functions for linux_test_for_tracefork, called via clone (). */
24a09b5f 4480
51c2684e
DJ
4481static int
4482linux_tracefork_grandchild (void *arg)
4483{
4484 _exit (0);
4485}
4486
7407e2de
AS
4487#define STACK_SIZE 4096
4488
51c2684e
DJ
4489static int
4490linux_tracefork_child (void *arg)
24a09b5f
DJ
4491{
4492 ptrace (PTRACE_TRACEME, 0, 0, 0);
4493 kill (getpid (), SIGSTOP);
e4b7f41c
JK
4494
4495#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4496
4497 if (fork () == 0)
4498 linux_tracefork_grandchild (NULL);
4499
4500#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4501
7407e2de
AS
4502#ifdef __ia64__
4503 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4504 CLONE_VM | SIGCHLD, NULL);
4505#else
a1f2ce7d 4506 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
7407e2de
AS
4507 CLONE_VM | SIGCHLD, NULL);
4508#endif
e4b7f41c
JK
4509
4510#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4511
24a09b5f
DJ
4512 _exit (0);
4513}
4514
24a09b5f
DJ
4515/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4516 sure that we can enable the option, and that it had the desired
4517 effect. */
4518
4519static void
4520linux_test_for_tracefork (void)
4521{
4522 int child_pid, ret, status;
4523 long second_pid;
e4b7f41c 4524#if defined(__UCLIBC__) && defined(HAS_NOMMU)
bca929d3 4525 char *stack = xmalloc (STACK_SIZE * 4);
e4b7f41c 4526#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
4527
4528 linux_supports_tracefork_flag = 0;
4529
e4b7f41c
JK
4530#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4531
4532 child_pid = fork ();
4533 if (child_pid == 0)
4534 linux_tracefork_child (NULL);
4535
4536#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4537
51c2684e 4538 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
7407e2de
AS
4539#ifdef __ia64__
4540 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4541 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c 4542#else /* !__ia64__ */
7407e2de
AS
4543 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4544 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c
JK
4545#endif /* !__ia64__ */
4546
4547#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4548
24a09b5f 4549 if (child_pid == -1)
51c2684e 4550 perror_with_name ("clone");
24a09b5f
DJ
4551
4552 ret = my_waitpid (child_pid, &status, 0);
4553 if (ret == -1)
4554 perror_with_name ("waitpid");
4555 else if (ret != child_pid)
4556 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4557 if (! WIFSTOPPED (status))
4558 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4559
14ce3065
DE
4560 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4561 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
24a09b5f
DJ
4562 if (ret != 0)
4563 {
4564 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4565 if (ret != 0)
4566 {
4567 warning ("linux_test_for_tracefork: failed to kill child");
4568 return;
4569 }
4570
4571 ret = my_waitpid (child_pid, &status, 0);
4572 if (ret != child_pid)
4573 warning ("linux_test_for_tracefork: failed to wait for killed child");
4574 else if (!WIFSIGNALED (status))
4575 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4576 "killed child", status);
4577
4578 return;
4579 }
4580
4581 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4582 if (ret != 0)
4583 warning ("linux_test_for_tracefork: failed to resume child");
4584
4585 ret = my_waitpid (child_pid, &status, 0);
4586
4587 if (ret == child_pid && WIFSTOPPED (status)
4588 && status >> 16 == PTRACE_EVENT_FORK)
4589 {
4590 second_pid = 0;
4591 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4592 if (ret == 0 && second_pid != 0)
4593 {
4594 int second_status;
4595
4596 linux_supports_tracefork_flag = 1;
4597 my_waitpid (second_pid, &second_status, 0);
4598 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4599 if (ret != 0)
4600 warning ("linux_test_for_tracefork: failed to kill second child");
4601 my_waitpid (second_pid, &status, 0);
4602 }
4603 }
4604 else
4605 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4606 "(%d, status 0x%x)", ret, status);
4607
4608 do
4609 {
4610 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4611 if (ret != 0)
4612 warning ("linux_test_for_tracefork: failed to kill child");
4613 my_waitpid (child_pid, &status, 0);
4614 }
4615 while (WIFSTOPPED (status));
51c2684e 4616
e4b7f41c 4617#if defined(__UCLIBC__) && defined(HAS_NOMMU)
51c2684e 4618 free (stack);
e4b7f41c 4619#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
4620}
4621
4622
2f2893d9
DJ
4623static void
4624linux_look_up_symbols (void)
4625{
0d62e5e8 4626#ifdef USE_THREAD_DB
95954743
PA
4627 struct process_info *proc = current_process ();
4628
cdbfd419 4629 if (proc->private->thread_db != NULL)
0d62e5e8
DJ
4630 return;
4631
6076632b
DE
4632 /* If the kernel supports tracing forks then it also supports tracing
4633 clones, and then we don't need to use the magic thread event breakpoint
4634 to learn about threads. */
cdbfd419 4635 thread_db_init (!linux_supports_tracefork_flag);
0d62e5e8
DJ
4636#endif
4637}
4638
e5379b03 4639static void
ef57601b 4640linux_request_interrupt (void)
e5379b03 4641{
a1928bad 4642 extern unsigned long signal_pid;
e5379b03 4643
95954743
PA
4644 if (!ptid_equal (cont_thread, null_ptid)
4645 && !ptid_equal (cont_thread, minus_one_ptid))
e5379b03 4646 {
54a0b537 4647 struct lwp_info *lwp;
bd99dc85 4648 int lwpid;
e5379b03 4649
54a0b537 4650 lwp = get_thread_lwp (current_inferior);
bd99dc85
PA
4651 lwpid = lwpid_of (lwp);
4652 kill_lwp (lwpid, SIGINT);
e5379b03
DJ
4653 }
4654 else
ef57601b 4655 kill_lwp (signal_pid, SIGINT);
e5379b03
DJ
4656}
4657
aa691b87
RM
4658/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4659 to debugger memory starting at MYADDR. */
4660
4661static int
f450004a 4662linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
4663{
4664 char filename[PATH_MAX];
4665 int fd, n;
95954743 4666 int pid = lwpid_of (get_thread_lwp (current_inferior));
aa691b87 4667
6cebaf6e 4668 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
4669
4670 fd = open (filename, O_RDONLY);
4671 if (fd < 0)
4672 return -1;
4673
4674 if (offset != (CORE_ADDR) 0
4675 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4676 n = -1;
4677 else
4678 n = read (fd, myaddr, len);
4679
4680 close (fd);
4681
4682 return n;
4683}
4684
d993e290
PA
4685/* These breakpoint and watchpoint related wrapper functions simply
4686 pass on the function call if the target has registered a
4687 corresponding function. */
e013ee27
OF
4688
4689static int
d993e290 4690linux_insert_point (char type, CORE_ADDR addr, int len)
e013ee27 4691{
d993e290
PA
4692 if (the_low_target.insert_point != NULL)
4693 return the_low_target.insert_point (type, addr, len);
e013ee27
OF
4694 else
4695 /* Unsupported (see target.h). */
4696 return 1;
4697}
4698
4699static int
d993e290 4700linux_remove_point (char type, CORE_ADDR addr, int len)
e013ee27 4701{
d993e290
PA
4702 if (the_low_target.remove_point != NULL)
4703 return the_low_target.remove_point (type, addr, len);
e013ee27
OF
4704 else
4705 /* Unsupported (see target.h). */
4706 return 1;
4707}
4708
4709static int
4710linux_stopped_by_watchpoint (void)
4711{
c3adc08c
PA
4712 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4713
4714 return lwp->stopped_by_watchpoint;
e013ee27
OF
4715}
4716
4717static CORE_ADDR
4718linux_stopped_data_address (void)
4719{
c3adc08c
PA
4720 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4721
4722 return lwp->stopped_data_address;
e013ee27
OF
4723}
4724
42c81e2a 4725#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
4726#if defined(__mcoldfire__)
4727/* These should really be defined in the kernel's ptrace.h header. */
4728#define PT_TEXT_ADDR 49*4
4729#define PT_DATA_ADDR 50*4
4730#define PT_TEXT_END_ADDR 51*4
eb826dc6
MF
4731#elif defined(BFIN)
4732#define PT_TEXT_ADDR 220
4733#define PT_TEXT_END_ADDR 224
4734#define PT_DATA_ADDR 228
58dbd541
YQ
4735#elif defined(__TMS320C6X__)
4736#define PT_TEXT_ADDR (0x10000*4)
4737#define PT_DATA_ADDR (0x10004*4)
4738#define PT_TEXT_END_ADDR (0x10008*4)
52fb6437
NS
4739#endif
4740
4741/* Under uClinux, programs are loaded at non-zero offsets, which we need
4742 to tell gdb about. */
4743
4744static int
4745linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4746{
4747#if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4748 unsigned long text, text_end, data;
bd99dc85 4749 int pid = lwpid_of (get_thread_lwp (current_inferior));
52fb6437
NS
4750
4751 errno = 0;
4752
4753 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4754 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4755 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4756
4757 if (errno == 0)
4758 {
4759 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
4760 used by gdb) are relative to the beginning of the program,
4761 with the data segment immediately following the text segment.
4762 However, the actual runtime layout in memory may put the data
4763 somewhere else, so when we send gdb a data base-address, we
4764 use the real data base address and subtract the compile-time
4765 data base-address from it (which is just the length of the
4766 text segment). BSS immediately follows data in both
4767 cases. */
52fb6437
NS
4768 *text_p = text;
4769 *data_p = data - (text_end - text);
1b3f6016 4770
52fb6437
NS
4771 return 1;
4772 }
4773#endif
4774 return 0;
4775}
4776#endif
4777
07e059b5
VP
4778static int
4779linux_qxfer_osdata (const char *annex,
1b3f6016
PA
4780 unsigned char *readbuf, unsigned const char *writebuf,
4781 CORE_ADDR offset, int len)
07e059b5 4782{
d26e3629 4783 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
4784}
4785
d0722149
DE
4786/* Convert a native/host siginfo object, into/from the siginfo in the
4787 layout of the inferiors' architecture. */
4788
4789static void
a5362b9a 4790siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
d0722149
DE
4791{
4792 int done = 0;
4793
4794 if (the_low_target.siginfo_fixup != NULL)
4795 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4796
4797 /* If there was no callback, or the callback didn't do anything,
4798 then just do a straight memcpy. */
4799 if (!done)
4800 {
4801 if (direction == 1)
a5362b9a 4802 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 4803 else
a5362b9a 4804 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
4805 }
4806}
4807
4aa995e1
PA
4808static int
4809linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4810 unsigned const char *writebuf, CORE_ADDR offset, int len)
4811{
d0722149 4812 int pid;
a5362b9a
TS
4813 siginfo_t siginfo;
4814 char inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
4815
4816 if (current_inferior == NULL)
4817 return -1;
4818
bd99dc85 4819 pid = lwpid_of (get_thread_lwp (current_inferior));
4aa995e1
PA
4820
4821 if (debug_threads)
d0722149 4822 fprintf (stderr, "%s siginfo for lwp %d.\n",
4aa995e1
PA
4823 readbuf != NULL ? "Reading" : "Writing",
4824 pid);
4825
0adea5f7 4826 if (offset >= sizeof (siginfo))
4aa995e1
PA
4827 return -1;
4828
4829 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4830 return -1;
4831
d0722149
DE
4832 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4833 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4834 inferior with a 64-bit GDBSERVER should look the same as debugging it
4835 with a 32-bit GDBSERVER, we need to convert it. */
4836 siginfo_fixup (&siginfo, inf_siginfo, 0);
4837
4aa995e1
PA
4838 if (offset + len > sizeof (siginfo))
4839 len = sizeof (siginfo) - offset;
4840
4841 if (readbuf != NULL)
d0722149 4842 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4843 else
4844 {
d0722149
DE
4845 memcpy (inf_siginfo + offset, writebuf, len);
4846
4847 /* Convert back to ptrace layout before flushing it out. */
4848 siginfo_fixup (&siginfo, inf_siginfo, 1);
4849
4aa995e1
PA
4850 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4851 return -1;
4852 }
4853
4854 return len;
4855}
4856
bd99dc85
PA
4857/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4858 so we notice when children change state; as the handler for the
4859 sigsuspend in my_waitpid. */
4860
4861static void
4862sigchld_handler (int signo)
4863{
4864 int old_errno = errno;
4865
4866 if (debug_threads)
e581f2b4
PA
4867 {
4868 do
4869 {
4870 /* fprintf is not async-signal-safe, so call write
4871 directly. */
4872 if (write (2, "sigchld_handler\n",
4873 sizeof ("sigchld_handler\n") - 1) < 0)
4874 break; /* just ignore */
4875 } while (0);
4876 }
bd99dc85
PA
4877
4878 if (target_is_async_p ())
4879 async_file_mark (); /* trigger a linux_wait */
4880
4881 errno = old_errno;
4882}
4883
4884static int
4885linux_supports_non_stop (void)
4886{
4887 return 1;
4888}
4889
4890static int
4891linux_async (int enable)
4892{
4893 int previous = (linux_event_pipe[0] != -1);
4894
8336d594
PA
4895 if (debug_threads)
4896 fprintf (stderr, "linux_async (%d), previous=%d\n",
4897 enable, previous);
4898
bd99dc85
PA
4899 if (previous != enable)
4900 {
4901 sigset_t mask;
4902 sigemptyset (&mask);
4903 sigaddset (&mask, SIGCHLD);
4904
4905 sigprocmask (SIG_BLOCK, &mask, NULL);
4906
4907 if (enable)
4908 {
4909 if (pipe (linux_event_pipe) == -1)
4910 fatal ("creating event pipe failed.");
4911
4912 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4913 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4914
4915 /* Register the event loop handler. */
4916 add_file_handler (linux_event_pipe[0],
4917 handle_target_event, NULL);
4918
4919 /* Always trigger a linux_wait. */
4920 async_file_mark ();
4921 }
4922 else
4923 {
4924 delete_file_handler (linux_event_pipe[0]);
4925
4926 close (linux_event_pipe[0]);
4927 close (linux_event_pipe[1]);
4928 linux_event_pipe[0] = -1;
4929 linux_event_pipe[1] = -1;
4930 }
4931
4932 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4933 }
4934
4935 return previous;
4936}
4937
4938static int
4939linux_start_non_stop (int nonstop)
4940{
4941 /* Register or unregister from event-loop accordingly. */
4942 linux_async (nonstop);
4943 return 0;
4944}
4945
cf8fd78b
PA
4946static int
4947linux_supports_multi_process (void)
4948{
4949 return 1;
4950}
4951
03583c20
UW
4952static int
4953linux_supports_disable_randomization (void)
4954{
4955#ifdef HAVE_PERSONALITY
4956 return 1;
4957#else
4958 return 0;
4959#endif
4960}
efcbbd14 4961
d1feda86
YQ
4962static int
4963linux_supports_agent (void)
4964{
4965 return 1;
4966}
4967
efcbbd14
UW
4968/* Enumerate spufs IDs for process PID. */
4969static int
4970spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4971{
4972 int pos = 0;
4973 int written = 0;
4974 char path[128];
4975 DIR *dir;
4976 struct dirent *entry;
4977
4978 sprintf (path, "/proc/%ld/fd", pid);
4979 dir = opendir (path);
4980 if (!dir)
4981 return -1;
4982
4983 rewinddir (dir);
4984 while ((entry = readdir (dir)) != NULL)
4985 {
4986 struct stat st;
4987 struct statfs stfs;
4988 int fd;
4989
4990 fd = atoi (entry->d_name);
4991 if (!fd)
4992 continue;
4993
4994 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4995 if (stat (path, &st) != 0)
4996 continue;
4997 if (!S_ISDIR (st.st_mode))
4998 continue;
4999
5000 if (statfs (path, &stfs) != 0)
5001 continue;
5002 if (stfs.f_type != SPUFS_MAGIC)
5003 continue;
5004
5005 if (pos >= offset && pos + 4 <= offset + len)
5006 {
5007 *(unsigned int *)(buf + pos - offset) = fd;
5008 written += 4;
5009 }
5010 pos += 4;
5011 }
5012
5013 closedir (dir);
5014 return written;
5015}
5016
5017/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5018 object type, using the /proc file system. */
5019static int
5020linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5021 unsigned const char *writebuf,
5022 CORE_ADDR offset, int len)
5023{
5024 long pid = lwpid_of (get_thread_lwp (current_inferior));
5025 char buf[128];
5026 int fd = 0;
5027 int ret = 0;
5028
5029 if (!writebuf && !readbuf)
5030 return -1;
5031
5032 if (!*annex)
5033 {
5034 if (!readbuf)
5035 return -1;
5036 else
5037 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5038 }
5039
5040 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5041 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5042 if (fd <= 0)
5043 return -1;
5044
5045 if (offset != 0
5046 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5047 {
5048 close (fd);
5049 return 0;
5050 }
5051
5052 if (writebuf)
5053 ret = write (fd, writebuf, (size_t) len);
5054 else
5055 ret = read (fd, readbuf, (size_t) len);
5056
5057 close (fd);
5058 return ret;
5059}
5060
723b724b 5061#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
5062struct target_loadseg
5063{
5064 /* Core address to which the segment is mapped. */
5065 Elf32_Addr addr;
5066 /* VMA recorded in the program header. */
5067 Elf32_Addr p_vaddr;
5068 /* Size of this segment in memory. */
5069 Elf32_Word p_memsz;
5070};
5071
723b724b 5072# if defined PT_GETDSBT
78d85199
YQ
5073struct target_loadmap
5074{
5075 /* Protocol version number, must be zero. */
5076 Elf32_Word version;
5077 /* Pointer to the DSBT table, its size, and the DSBT index. */
5078 unsigned *dsbt_table;
5079 unsigned dsbt_size, dsbt_index;
5080 /* Number of segments in this map. */
5081 Elf32_Word nsegs;
5082 /* The actual memory map. */
5083 struct target_loadseg segs[/*nsegs*/];
5084};
723b724b
MF
5085# define LINUX_LOADMAP PT_GETDSBT
5086# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5087# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5088# else
5089struct target_loadmap
5090{
5091 /* Protocol version number, must be zero. */
5092 Elf32_Half version;
5093 /* Number of segments in this map. */
5094 Elf32_Half nsegs;
5095 /* The actual memory map. */
5096 struct target_loadseg segs[/*nsegs*/];
5097};
5098# define LINUX_LOADMAP PTRACE_GETFDPIC
5099# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5100# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5101# endif
78d85199 5102
78d85199
YQ
5103static int
5104linux_read_loadmap (const char *annex, CORE_ADDR offset,
5105 unsigned char *myaddr, unsigned int len)
5106{
5107 int pid = lwpid_of (get_thread_lwp (current_inferior));
5108 int addr = -1;
5109 struct target_loadmap *data = NULL;
5110 unsigned int actual_length, copy_length;
5111
5112 if (strcmp (annex, "exec") == 0)
723b724b 5113 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 5114 else if (strcmp (annex, "interp") == 0)
723b724b 5115 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
5116 else
5117 return -1;
5118
723b724b 5119 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
5120 return -1;
5121
5122 if (data == NULL)
5123 return -1;
5124
5125 actual_length = sizeof (struct target_loadmap)
5126 + sizeof (struct target_loadseg) * data->nsegs;
5127
5128 if (offset < 0 || offset > actual_length)
5129 return -1;
5130
5131 copy_length = actual_length - offset < len ? actual_length - offset : len;
5132 memcpy (myaddr, (char *) data + offset, copy_length);
5133 return copy_length;
5134}
723b724b
MF
5135#else
5136# define linux_read_loadmap NULL
5137#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 5138
1570b33e
L
5139static void
5140linux_process_qsupported (const char *query)
5141{
5142 if (the_low_target.process_qsupported != NULL)
5143 the_low_target.process_qsupported (query);
5144}
5145
219f2f23
PA
5146static int
5147linux_supports_tracepoints (void)
5148{
5149 if (*the_low_target.supports_tracepoints == NULL)
5150 return 0;
5151
5152 return (*the_low_target.supports_tracepoints) ();
5153}
5154
5155static CORE_ADDR
5156linux_read_pc (struct regcache *regcache)
5157{
5158 if (the_low_target.get_pc == NULL)
5159 return 0;
5160
5161 return (*the_low_target.get_pc) (regcache);
5162}
5163
5164static void
5165linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5166{
5167 gdb_assert (the_low_target.set_pc != NULL);
5168
5169 (*the_low_target.set_pc) (regcache, pc);
5170}
5171
8336d594
PA
5172static int
5173linux_thread_stopped (struct thread_info *thread)
5174{
5175 return get_thread_lwp (thread)->stopped;
5176}
5177
5178/* This exposes stop-all-threads functionality to other modules. */
5179
5180static void
7984d532 5181linux_pause_all (int freeze)
8336d594 5182{
7984d532
PA
5183 stop_all_lwps (freeze, NULL);
5184}
5185
5186/* This exposes unstop-all-threads functionality to other gdbserver
5187 modules. */
5188
5189static void
5190linux_unpause_all (int unfreeze)
5191{
5192 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
5193}
5194
90d74c30
PA
5195static int
5196linux_prepare_to_access_memory (void)
5197{
5198 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5199 running LWP. */
5200 if (non_stop)
5201 linux_pause_all (1);
5202 return 0;
5203}
5204
5205static void
0146f85b 5206linux_done_accessing_memory (void)
90d74c30
PA
5207{
5208 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5209 running LWP. */
5210 if (non_stop)
5211 linux_unpause_all (1);
5212}
5213
fa593d66
PA
5214static int
5215linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5216 CORE_ADDR collector,
5217 CORE_ADDR lockaddr,
5218 ULONGEST orig_size,
5219 CORE_ADDR *jump_entry,
405f8e94
SS
5220 CORE_ADDR *trampoline,
5221 ULONGEST *trampoline_size,
fa593d66
PA
5222 unsigned char *jjump_pad_insn,
5223 ULONGEST *jjump_pad_insn_size,
5224 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
5225 CORE_ADDR *adjusted_insn_addr_end,
5226 char *err)
fa593d66
PA
5227{
5228 return (*the_low_target.install_fast_tracepoint_jump_pad)
5229 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
5230 jump_entry, trampoline, trampoline_size,
5231 jjump_pad_insn, jjump_pad_insn_size,
5232 adjusted_insn_addr, adjusted_insn_addr_end,
5233 err);
fa593d66
PA
5234}
5235
6a271cae
PA
5236static struct emit_ops *
5237linux_emit_ops (void)
5238{
5239 if (the_low_target.emit_ops != NULL)
5240 return (*the_low_target.emit_ops) ();
5241 else
5242 return NULL;
5243}
5244
405f8e94
SS
5245static int
5246linux_get_min_fast_tracepoint_insn_len (void)
5247{
5248 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5249}
5250
2268b414
JK
5251/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5252
5253static int
5254get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5255 CORE_ADDR *phdr_memaddr, int *num_phdr)
5256{
5257 char filename[PATH_MAX];
5258 int fd;
5259 const int auxv_size = is_elf64
5260 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5261 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5262
5263 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5264
5265 fd = open (filename, O_RDONLY);
5266 if (fd < 0)
5267 return 1;
5268
5269 *phdr_memaddr = 0;
5270 *num_phdr = 0;
5271 while (read (fd, buf, auxv_size) == auxv_size
5272 && (*phdr_memaddr == 0 || *num_phdr == 0))
5273 {
5274 if (is_elf64)
5275 {
5276 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5277
5278 switch (aux->a_type)
5279 {
5280 case AT_PHDR:
5281 *phdr_memaddr = aux->a_un.a_val;
5282 break;
5283 case AT_PHNUM:
5284 *num_phdr = aux->a_un.a_val;
5285 break;
5286 }
5287 }
5288 else
5289 {
5290 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5291
5292 switch (aux->a_type)
5293 {
5294 case AT_PHDR:
5295 *phdr_memaddr = aux->a_un.a_val;
5296 break;
5297 case AT_PHNUM:
5298 *num_phdr = aux->a_un.a_val;
5299 break;
5300 }
5301 }
5302 }
5303
5304 close (fd);
5305
5306 if (*phdr_memaddr == 0 || *num_phdr == 0)
5307 {
5308 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5309 "phdr_memaddr = %ld, phdr_num = %d",
5310 (long) *phdr_memaddr, *num_phdr);
5311 return 2;
5312 }
5313
5314 return 0;
5315}
5316
5317/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5318
5319static CORE_ADDR
5320get_dynamic (const int pid, const int is_elf64)
5321{
5322 CORE_ADDR phdr_memaddr, relocation;
5323 int num_phdr, i;
5324 unsigned char *phdr_buf;
5325 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5326
5327 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5328 return 0;
5329
5330 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5331 phdr_buf = alloca (num_phdr * phdr_size);
5332
5333 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5334 return 0;
5335
5336 /* Compute relocation: it is expected to be 0 for "regular" executables,
5337 non-zero for PIE ones. */
5338 relocation = -1;
5339 for (i = 0; relocation == -1 && i < num_phdr; i++)
5340 if (is_elf64)
5341 {
5342 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5343
5344 if (p->p_type == PT_PHDR)
5345 relocation = phdr_memaddr - p->p_vaddr;
5346 }
5347 else
5348 {
5349 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5350
5351 if (p->p_type == PT_PHDR)
5352 relocation = phdr_memaddr - p->p_vaddr;
5353 }
5354
5355 if (relocation == -1)
5356 {
e237a7e2
JK
5357 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5358 any real world executables, including PIE executables, have always
5359 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5360 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5361 or present DT_DEBUG anyway (fpc binaries are statically linked).
5362
5363 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5364
5365 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5366
2268b414
JK
5367 return 0;
5368 }
5369
5370 for (i = 0; i < num_phdr; i++)
5371 {
5372 if (is_elf64)
5373 {
5374 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5375
5376 if (p->p_type == PT_DYNAMIC)
5377 return p->p_vaddr + relocation;
5378 }
5379 else
5380 {
5381 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5382
5383 if (p->p_type == PT_DYNAMIC)
5384 return p->p_vaddr + relocation;
5385 }
5386 }
5387
5388 return 0;
5389}
5390
5391/* Return &_r_debug in the inferior, or -1 if not present. Return value
5392 can be 0 if the inferior does not yet have the library list initialized. */
5393
5394static CORE_ADDR
5395get_r_debug (const int pid, const int is_elf64)
5396{
5397 CORE_ADDR dynamic_memaddr;
5398 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5399 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5400
5401 dynamic_memaddr = get_dynamic (pid, is_elf64);
5402 if (dynamic_memaddr == 0)
5403 return (CORE_ADDR) -1;
5404
5405 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5406 {
5407 if (is_elf64)
5408 {
5409 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5410
5411 if (dyn->d_tag == DT_DEBUG)
5412 return dyn->d_un.d_val;
5413
5414 if (dyn->d_tag == DT_NULL)
5415 break;
5416 }
5417 else
5418 {
5419 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5420
5421 if (dyn->d_tag == DT_DEBUG)
5422 return dyn->d_un.d_val;
5423
5424 if (dyn->d_tag == DT_NULL)
5425 break;
5426 }
5427
5428 dynamic_memaddr += dyn_size;
5429 }
5430
5431 return (CORE_ADDR) -1;
5432}
5433
5434/* Read one pointer from MEMADDR in the inferior. */
5435
5436static int
5437read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5438{
485f1ee4
PA
5439 int ret;
5440
5441 /* Go through a union so this works on either big or little endian
5442 hosts, when the inferior's pointer size is smaller than the size
5443 of CORE_ADDR. It is assumed the inferior's endianness is the
5444 same of the superior's. */
5445 union
5446 {
5447 CORE_ADDR core_addr;
5448 unsigned int ui;
5449 unsigned char uc;
5450 } addr;
5451
5452 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5453 if (ret == 0)
5454 {
5455 if (ptr_size == sizeof (CORE_ADDR))
5456 *ptr = addr.core_addr;
5457 else if (ptr_size == sizeof (unsigned int))
5458 *ptr = addr.ui;
5459 else
5460 gdb_assert_not_reached ("unhandled pointer size");
5461 }
5462 return ret;
2268b414
JK
5463}
5464
5465struct link_map_offsets
5466 {
5467 /* Offset and size of r_debug.r_version. */
5468 int r_version_offset;
5469
5470 /* Offset and size of r_debug.r_map. */
5471 int r_map_offset;
5472
5473 /* Offset to l_addr field in struct link_map. */
5474 int l_addr_offset;
5475
5476 /* Offset to l_name field in struct link_map. */
5477 int l_name_offset;
5478
5479 /* Offset to l_ld field in struct link_map. */
5480 int l_ld_offset;
5481
5482 /* Offset to l_next field in struct link_map. */
5483 int l_next_offset;
5484
5485 /* Offset to l_prev field in struct link_map. */
5486 int l_prev_offset;
5487 };
5488
fb723180 5489/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
5490
5491static int
5492linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5493 unsigned const char *writebuf,
5494 CORE_ADDR offset, int len)
5495{
5496 char *document;
5497 unsigned document_len;
5498 struct process_info_private *const priv = current_process ()->private;
5499 char filename[PATH_MAX];
5500 int pid, is_elf64;
5501
5502 static const struct link_map_offsets lmo_32bit_offsets =
5503 {
5504 0, /* r_version offset. */
5505 4, /* r_debug.r_map offset. */
5506 0, /* l_addr offset in link_map. */
5507 4, /* l_name offset in link_map. */
5508 8, /* l_ld offset in link_map. */
5509 12, /* l_next offset in link_map. */
5510 16 /* l_prev offset in link_map. */
5511 };
5512
5513 static const struct link_map_offsets lmo_64bit_offsets =
5514 {
5515 0, /* r_version offset. */
5516 8, /* r_debug.r_map offset. */
5517 0, /* l_addr offset in link_map. */
5518 8, /* l_name offset in link_map. */
5519 16, /* l_ld offset in link_map. */
5520 24, /* l_next offset in link_map. */
5521 32 /* l_prev offset in link_map. */
5522 };
5523 const struct link_map_offsets *lmo;
5524
5525 if (writebuf != NULL)
5526 return -2;
5527 if (readbuf == NULL)
5528 return -1;
5529
5530 pid = lwpid_of (get_thread_lwp (current_inferior));
5531 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5532 is_elf64 = elf_64_file_p (filename);
5533 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5534
5535 if (priv->r_debug == 0)
5536 priv->r_debug = get_r_debug (pid, is_elf64);
5537
5538 if (priv->r_debug == (CORE_ADDR) -1 || priv->r_debug == 0)
5539 {
5540 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5541 }
5542 else
5543 {
5544 int allocated = 1024;
5545 char *p;
5546 const int ptr_size = is_elf64 ? 8 : 4;
5547 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5548 int r_version, header_done = 0;
5549
5550 document = xmalloc (allocated);
5551 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5552 p = document + strlen (document);
5553
5554 r_version = 0;
5555 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5556 (unsigned char *) &r_version,
5557 sizeof (r_version)) != 0
5558 || r_version != 1)
5559 {
5560 warning ("unexpected r_debug version %d", r_version);
5561 goto done;
5562 }
5563
5564 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5565 &lm_addr, ptr_size) != 0)
5566 {
5567 warning ("unable to read r_map from 0x%lx",
5568 (long) priv->r_debug + lmo->r_map_offset);
5569 goto done;
5570 }
5571
5572 lm_prev = 0;
5573 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5574 &l_name, ptr_size) == 0
5575 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5576 &l_addr, ptr_size) == 0
5577 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5578 &l_ld, ptr_size) == 0
5579 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5580 &l_prev, ptr_size) == 0
5581 && read_one_ptr (lm_addr + lmo->l_next_offset,
5582 &l_next, ptr_size) == 0)
5583 {
5584 unsigned char libname[PATH_MAX];
5585
5586 if (lm_prev != l_prev)
5587 {
5588 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5589 (long) lm_prev, (long) l_prev);
5590 break;
5591 }
5592
5593 /* Not checking for error because reading may stop before
5594 we've got PATH_MAX worth of characters. */
5595 libname[0] = '\0';
5596 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5597 libname[sizeof (libname) - 1] = '\0';
5598 if (libname[0] != '\0')
5599 {
5600 /* 6x the size for xml_escape_text below. */
5601 size_t len = 6 * strlen ((char *) libname);
5602 char *name;
5603
5604 if (!header_done)
5605 {
5606 /* Terminate `<library-list-svr4'. */
5607 *p++ = '>';
5608 header_done = 1;
5609 }
5610
5611 while (allocated < p - document + len + 200)
5612 {
5613 /* Expand to guarantee sufficient storage. */
5614 uintptr_t document_len = p - document;
5615
5616 document = xrealloc (document, 2 * allocated);
5617 allocated *= 2;
5618 p = document + document_len;
5619 }
5620
5621 name = xml_escape_text ((char *) libname);
5622 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5623 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5624 name, (unsigned long) lm_addr,
5625 (unsigned long) l_addr, (unsigned long) l_ld);
5626 free (name);
5627 }
5628 else if (lm_prev == 0)
5629 {
5630 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5631 p = p + strlen (p);
5632 }
5633
5634 if (l_next == 0)
5635 break;
5636
5637 lm_prev = lm_addr;
5638 lm_addr = l_next;
5639 }
5640 done:
0afae3cf
PA
5641 if (!header_done)
5642 {
5643 /* Empty list; terminate `<library-list-svr4'. */
5644 strcpy (p, "/>");
5645 }
5646 else
5647 strcpy (p, "</library-list-svr4>");
2268b414
JK
5648 }
5649
5650 document_len = strlen (document);
5651 if (offset < document_len)
5652 document_len -= offset;
5653 else
5654 document_len = 0;
5655 if (len > document_len)
5656 len = document_len;
5657
5658 memcpy (readbuf, document + offset, len);
5659 xfree (document);
5660
5661 return len;
5662}
5663
ce3a066d
DJ
5664static struct target_ops linux_target_ops = {
5665 linux_create_inferior,
5666 linux_attach,
5667 linux_kill,
6ad8ae5c 5668 linux_detach,
8336d594 5669 linux_mourn,
444d6139 5670 linux_join,
ce3a066d
DJ
5671 linux_thread_alive,
5672 linux_resume,
5673 linux_wait,
5674 linux_fetch_registers,
5675 linux_store_registers,
90d74c30 5676 linux_prepare_to_access_memory,
0146f85b 5677 linux_done_accessing_memory,
ce3a066d
DJ
5678 linux_read_memory,
5679 linux_write_memory,
2f2893d9 5680 linux_look_up_symbols,
ef57601b 5681 linux_request_interrupt,
aa691b87 5682 linux_read_auxv,
d993e290
PA
5683 linux_insert_point,
5684 linux_remove_point,
e013ee27
OF
5685 linux_stopped_by_watchpoint,
5686 linux_stopped_data_address,
42c81e2a 5687#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437 5688 linux_read_offsets,
dae5f5cf
DJ
5689#else
5690 NULL,
5691#endif
5692#ifdef USE_THREAD_DB
5693 thread_db_get_tls_address,
5694#else
5695 NULL,
52fb6437 5696#endif
efcbbd14 5697 linux_qxfer_spu,
59a016f0 5698 hostio_last_error_from_errno,
07e059b5 5699 linux_qxfer_osdata,
4aa995e1 5700 linux_xfer_siginfo,
bd99dc85
PA
5701 linux_supports_non_stop,
5702 linux_async,
5703 linux_start_non_stop,
cdbfd419
PP
5704 linux_supports_multi_process,
5705#ifdef USE_THREAD_DB
dc146f7c 5706 thread_db_handle_monitor_command,
cdbfd419 5707#else
dc146f7c 5708 NULL,
cdbfd419 5709#endif
d26e3629 5710 linux_common_core_of_thread,
78d85199 5711 linux_read_loadmap,
219f2f23
PA
5712 linux_process_qsupported,
5713 linux_supports_tracepoints,
5714 linux_read_pc,
8336d594
PA
5715 linux_write_pc,
5716 linux_thread_stopped,
7984d532 5717 NULL,
711e434b 5718 linux_pause_all,
7984d532 5719 linux_unpause_all,
fa593d66
PA
5720 linux_cancel_breakpoints,
5721 linux_stabilize_threads,
6a271cae 5722 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
5723 linux_emit_ops,
5724 linux_supports_disable_randomization,
405f8e94 5725 linux_get_min_fast_tracepoint_insn_len,
2268b414 5726 linux_qxfer_libraries_svr4,
d1feda86 5727 linux_supports_agent,
ce3a066d
DJ
5728};
5729
0d62e5e8
DJ
5730static void
5731linux_init_signals ()
5732{
5733 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5734 to find what the cancel signal actually is. */
1a981360 5735#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 5736 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 5737#endif
0d62e5e8
DJ
5738}
5739
da6d8c04
DJ
5740void
5741initialize_low (void)
5742{
bd99dc85
PA
5743 struct sigaction sigchld_action;
5744 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 5745 set_target_ops (&linux_target_ops);
611cb4a5
DJ
5746 set_breakpoint_data (the_low_target.breakpoint,
5747 the_low_target.breakpoint_len);
0d62e5e8 5748 linux_init_signals ();
24a09b5f 5749 linux_test_for_tracefork ();
52fa2412
UW
5750#ifdef HAVE_LINUX_REGSETS
5751 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5752 ;
bca929d3 5753 disabled_regsets = xmalloc (num_regsets);
52fa2412 5754#endif
bd99dc85
PA
5755
5756 sigchld_action.sa_handler = sigchld_handler;
5757 sigemptyset (&sigchld_action.sa_mask);
5758 sigchld_action.sa_flags = SA_RESTART;
5759 sigaction (SIGCHLD, &sigchld_action, NULL);
da6d8c04 5760}