]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
gdb:
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
0b302171 2 Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
d26e3629 21#include "linux-osdata.h"
58b4daa5 22#include "agent.h"
da6d8c04 23
58caa3dc 24#include <sys/wait.h>
da6d8c04
DJ
25#include <stdio.h>
26#include <sys/param.h>
da6d8c04 27#include <sys/ptrace.h>
af96c192 28#include "linux-ptrace.h"
e3deef73 29#include "linux-procfs.h"
da6d8c04
DJ
30#include <signal.h>
31#include <sys/ioctl.h>
32#include <fcntl.h>
d07c63e7 33#include <string.h>
0a30fbc4
DJ
34#include <stdlib.h>
35#include <unistd.h>
fa6a77dc 36#include <errno.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
efcbbd14
UW
43#include <sys/stat.h>
44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
957f3f49
DE
46#ifndef ELFMAG0
47/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51#include <elf.h>
52#endif
efcbbd14
UW
53
54#ifndef SPUFS_MAGIC
55#define SPUFS_MAGIC 0x23c9b64e
56#endif
da6d8c04 57
03583c20
UW
58#ifdef HAVE_PERSONALITY
59# include <sys/personality.h>
60# if !HAVE_DECL_ADDR_NO_RANDOMIZE
61# define ADDR_NO_RANDOMIZE 0x0040000
62# endif
63#endif
64
fd462a61
DJ
65#ifndef O_LARGEFILE
66#define O_LARGEFILE 0
67#endif
68
ec8ebe72
DE
69#ifndef W_STOPCODE
70#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71#endif
72
1a981360
PA
73/* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75#ifndef __SIGRTMIN
76#define __SIGRTMIN 32
77#endif
78
42c81e2a
DJ
79#ifdef __UCLIBC__
80#if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
81#define HAS_NOMMU
82#endif
83#endif
84
8365dcf5
TJB
85#ifndef HAVE_ELF32_AUXV_T
86/* Copied from glibc's elf.h. */
87typedef struct
88{
89 uint32_t a_type; /* Entry type */
90 union
91 {
92 uint32_t a_val; /* Integer value */
93 /* We use to have pointer elements added here. We cannot do that,
94 though, since it does not work when using 32-bit definitions
95 on 64-bit platforms and vice versa. */
96 } a_un;
97} Elf32_auxv_t;
98#endif
99
100#ifndef HAVE_ELF64_AUXV_T
101/* Copied from glibc's elf.h. */
102typedef struct
103{
104 uint64_t a_type; /* Entry type */
105 union
106 {
107 uint64_t a_val; /* Integer value */
108 /* We use to have pointer elements added here. We cannot do that,
109 though, since it does not work when using 32-bit definitions
110 on 64-bit platforms and vice versa. */
111 } a_un;
112} Elf64_auxv_t;
113#endif
114
24a09b5f
DJ
115/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
116 representation of the thread ID.
611cb4a5 117
54a0b537 118 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
95954743
PA
119 the same as the LWP ID.
120
121 ``all_processes'' is keyed by the "overall process ID", which
122 GNU/Linux calls tgid, "thread group ID". */
0d62e5e8 123
54a0b537 124struct inferior_list all_lwps;
0d62e5e8 125
05044653
PA
126/* A list of all unknown processes which receive stop signals. Some
127 other process will presumably claim each of these as forked
128 children momentarily. */
24a09b5f 129
05044653
PA
130struct simple_pid_list
131{
132 /* The process ID. */
133 int pid;
134
135 /* The status as reported by waitpid. */
136 int status;
137
138 /* Next in chain. */
139 struct simple_pid_list *next;
140};
141struct simple_pid_list *stopped_pids;
142
143/* Trivial list manipulation functions to keep track of a list of new
144 stopped processes. */
145
146static void
147add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
148{
149 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
150
151 new_pid->pid = pid;
152 new_pid->status = status;
153 new_pid->next = *listp;
154 *listp = new_pid;
155}
156
157static int
158pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
159{
160 struct simple_pid_list **p;
161
162 for (p = listp; *p != NULL; p = &(*p)->next)
163 if ((*p)->pid == pid)
164 {
165 struct simple_pid_list *next = (*p)->next;
166
167 *statusp = (*p)->status;
168 xfree (*p);
169 *p = next;
170 return 1;
171 }
172 return 0;
173}
24a09b5f 174
0d62e5e8
DJ
175/* FIXME this is a bit of a hack, and could be removed. */
176int stopping_threads;
177
178/* FIXME make into a target method? */
24a09b5f 179int using_threads = 1;
24a09b5f 180
fa593d66
PA
181/* True if we're presently stabilizing threads (moving them out of
182 jump pads). */
183static int stabilizing_threads;
184
95954743
PA
185/* This flag is true iff we've just created or attached to our first
186 inferior but it has not stopped yet. As soon as it does, we need
187 to call the low target's arch_setup callback. Doing this only on
188 the first inferior avoids reinializing the architecture on every
189 inferior, and avoids messing with the register caches of the
190 already running inferiors. NOTE: this assumes all inferiors under
191 control of gdbserver have the same architecture. */
d61ddec4
UW
192static int new_inferior;
193
2acc282a 194static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 195 int step, int signal, siginfo_t *info);
2bd7c093 196static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
197static void stop_all_lwps (int suspend, struct lwp_info *except);
198static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
95954743 199static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
95954743 200static void *add_lwp (ptid_t ptid);
c35fafde 201static int linux_stopped_by_watchpoint (void);
95954743 202static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
d50171e4 203static void proceed_all_lwps (void);
d50171e4
PA
204static int finish_step_over (struct lwp_info *lwp);
205static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
206static int kill_lwp (unsigned long lwpid, int signo);
1e7fc18c 207static void linux_enable_event_reporting (int pid);
d50171e4
PA
208
209/* True if the low target can hardware single-step. Such targets
210 don't need a BREAKPOINT_REINSERT_ADDR callback. */
211
212static int
213can_hardware_single_step (void)
214{
215 return (the_low_target.breakpoint_reinsert_addr == NULL);
216}
217
218/* True if the low target supports memory breakpoints. If so, we'll
219 have a GET_PC implementation. */
220
221static int
222supports_breakpoints (void)
223{
224 return (the_low_target.get_pc != NULL);
225}
0d62e5e8 226
fa593d66
PA
227/* Returns true if this target can support fast tracepoints. This
228 does not mean that the in-process agent has been loaded in the
229 inferior. */
230
231static int
232supports_fast_tracepoints (void)
233{
234 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
235}
236
0d62e5e8
DJ
237struct pending_signals
238{
239 int signal;
32ca6d61 240 siginfo_t info;
0d62e5e8
DJ
241 struct pending_signals *prev;
242};
611cb4a5 243
14ce3065
DE
244#define PTRACE_ARG3_TYPE void *
245#define PTRACE_ARG4_TYPE void *
c6ecbae5 246#define PTRACE_XFER_TYPE long
da6d8c04 247
58caa3dc 248#ifdef HAVE_LINUX_REGSETS
52fa2412
UW
249static char *disabled_regsets;
250static int num_regsets;
58caa3dc
DJ
251#endif
252
bd99dc85
PA
253/* The read/write ends of the pipe registered as waitable file in the
254 event loop. */
255static int linux_event_pipe[2] = { -1, -1 };
256
257/* True if we're currently in async mode. */
258#define target_is_async_p() (linux_event_pipe[0] != -1)
259
02fc4de7 260static void send_sigstop (struct lwp_info *lwp);
bd99dc85
PA
261static void wait_for_sigstop (struct inferior_list_entry *entry);
262
d0722149
DE
263/* Return non-zero if HEADER is a 64-bit ELF file. */
264
265static int
214d508e 266elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 267{
214d508e
L
268 if (header->e_ident[EI_MAG0] == ELFMAG0
269 && header->e_ident[EI_MAG1] == ELFMAG1
270 && header->e_ident[EI_MAG2] == ELFMAG2
271 && header->e_ident[EI_MAG3] == ELFMAG3)
272 {
273 *machine = header->e_machine;
274 return header->e_ident[EI_CLASS] == ELFCLASS64;
275
276 }
277 *machine = EM_NONE;
278 return -1;
d0722149
DE
279}
280
281/* Return non-zero if FILE is a 64-bit ELF file,
282 zero if the file is not a 64-bit ELF file,
283 and -1 if the file is not accessible or doesn't exist. */
284
be07f1a2 285static int
214d508e 286elf_64_file_p (const char *file, unsigned int *machine)
d0722149 287{
957f3f49 288 Elf64_Ehdr header;
d0722149
DE
289 int fd;
290
291 fd = open (file, O_RDONLY);
292 if (fd < 0)
293 return -1;
294
295 if (read (fd, &header, sizeof (header)) != sizeof (header))
296 {
297 close (fd);
298 return 0;
299 }
300 close (fd);
301
214d508e 302 return elf_64_header_p (&header, machine);
d0722149
DE
303}
304
be07f1a2
PA
305/* Accepts an integer PID; Returns true if the executable PID is
306 running is a 64-bit ELF file.. */
307
308int
214d508e 309linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2
PA
310{
311 char file[MAXPATHLEN];
312
313 sprintf (file, "/proc/%d/exe", pid);
214d508e 314 return elf_64_file_p (file, machine);
be07f1a2
PA
315}
316
bd99dc85
PA
317static void
318delete_lwp (struct lwp_info *lwp)
319{
320 remove_thread (get_lwp_thread (lwp));
321 remove_inferior (&all_lwps, &lwp->head);
aa5ca48f 322 free (lwp->arch_private);
bd99dc85
PA
323 free (lwp);
324}
325
95954743
PA
326/* Add a process to the common process list, and set its private
327 data. */
328
329static struct process_info *
330linux_add_process (int pid, int attached)
331{
332 struct process_info *proc;
333
334 /* Is this the first process? If so, then set the arch. */
335 if (all_processes.head == NULL)
336 new_inferior = 1;
337
338 proc = add_process (pid, attached);
339 proc->private = xcalloc (1, sizeof (*proc->private));
340
aa5ca48f
DE
341 if (the_low_target.new_process != NULL)
342 proc->private->arch_private = the_low_target.new_process ();
343
95954743
PA
344 return proc;
345}
346
07d4f67e
DE
347/* Wrapper function for waitpid which handles EINTR, and emulates
348 __WALL for systems where that is not available. */
349
350static int
351my_waitpid (int pid, int *status, int flags)
352{
353 int ret, out_errno;
354
355 if (debug_threads)
356 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
357
358 if (flags & __WALL)
359 {
360 sigset_t block_mask, org_mask, wake_mask;
361 int wnohang;
362
363 wnohang = (flags & WNOHANG) != 0;
364 flags &= ~(__WALL | __WCLONE);
365 flags |= WNOHANG;
366
367 /* Block all signals while here. This avoids knowing about
368 LinuxThread's signals. */
369 sigfillset (&block_mask);
370 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
371
372 /* ... except during the sigsuspend below. */
373 sigemptyset (&wake_mask);
374
375 while (1)
376 {
377 /* Since all signals are blocked, there's no need to check
378 for EINTR here. */
379 ret = waitpid (pid, status, flags);
380 out_errno = errno;
381
382 if (ret == -1 && out_errno != ECHILD)
383 break;
384 else if (ret > 0)
385 break;
386
387 if (flags & __WCLONE)
388 {
389 /* We've tried both flavors now. If WNOHANG is set,
390 there's nothing else to do, just bail out. */
391 if (wnohang)
392 break;
393
394 if (debug_threads)
395 fprintf (stderr, "blocking\n");
396
397 /* Block waiting for signals. */
398 sigsuspend (&wake_mask);
399 }
400
401 flags ^= __WCLONE;
402 }
403
404 sigprocmask (SIG_SETMASK, &org_mask, NULL);
405 }
406 else
407 {
408 do
409 ret = waitpid (pid, status, flags);
410 while (ret == -1 && errno == EINTR);
411 out_errno = errno;
412 }
413
414 if (debug_threads)
415 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
416 pid, flags, status ? *status : -1, ret);
417
418 errno = out_errno;
419 return ret;
420}
421
bd99dc85
PA
422/* Handle a GNU/Linux extended wait response. If we see a clone
423 event, we need to add the new LWP to our list (and not report the
424 trap to higher layers). */
0d62e5e8 425
24a09b5f 426static void
54a0b537 427handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f
DJ
428{
429 int event = wstat >> 16;
54a0b537 430 struct lwp_info *new_lwp;
24a09b5f
DJ
431
432 if (event == PTRACE_EVENT_CLONE)
433 {
95954743 434 ptid_t ptid;
24a09b5f 435 unsigned long new_pid;
05044653 436 int ret, status;
24a09b5f 437
bd99dc85 438 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
24a09b5f
DJ
439
440 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 441 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
442 {
443 /* The new child has a pending SIGSTOP. We can't affect it until it
444 hits the SIGSTOP, but we're already attached. */
445
97438e3f 446 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
447
448 if (ret == -1)
449 perror_with_name ("waiting for new child");
450 else if (ret != new_pid)
451 warning ("wait returned unexpected PID %d", ret);
da5898ce 452 else if (!WIFSTOPPED (status))
24a09b5f
DJ
453 warning ("wait returned unexpected status 0x%x", status);
454 }
455
1e7fc18c 456 linux_enable_event_reporting (new_pid);
24a09b5f 457
95954743
PA
458 ptid = ptid_build (pid_of (event_child), new_pid, 0);
459 new_lwp = (struct lwp_info *) add_lwp (ptid);
460 add_thread (ptid, new_lwp);
24a09b5f 461
e27d73f6
DE
462 /* Either we're going to immediately resume the new thread
463 or leave it stopped. linux_resume_one_lwp is a nop if it
464 thinks the thread is currently running, so set this first
465 before calling linux_resume_one_lwp. */
466 new_lwp->stopped = 1;
467
da5898ce
DJ
468 /* Normally we will get the pending SIGSTOP. But in some cases
469 we might get another signal delivered to the group first.
f21cc1a2 470 If we do get another signal, be sure not to lose it. */
da5898ce
DJ
471 if (WSTOPSIG (status) == SIGSTOP)
472 {
d50171e4
PA
473 if (stopping_threads)
474 new_lwp->stop_pc = get_stop_pc (new_lwp);
475 else
e27d73f6 476 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
da5898ce 477 }
24a09b5f 478 else
da5898ce 479 {
54a0b537 480 new_lwp->stop_expected = 1;
d50171e4 481
da5898ce
DJ
482 if (stopping_threads)
483 {
d50171e4 484 new_lwp->stop_pc = get_stop_pc (new_lwp);
54a0b537
PA
485 new_lwp->status_pending_p = 1;
486 new_lwp->status_pending = status;
da5898ce
DJ
487 }
488 else
489 /* Pass the signal on. This is what GDB does - except
490 shouldn't we really report it instead? */
e27d73f6 491 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
da5898ce 492 }
24a09b5f
DJ
493
494 /* Always resume the current thread. If we are stopping
495 threads, it will have a pending SIGSTOP; we may as well
496 collect it now. */
2acc282a 497 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
24a09b5f
DJ
498 }
499}
500
d50171e4
PA
501/* Return the PC as read from the regcache of LWP, without any
502 adjustment. */
503
504static CORE_ADDR
505get_pc (struct lwp_info *lwp)
506{
507 struct thread_info *saved_inferior;
508 struct regcache *regcache;
509 CORE_ADDR pc;
510
511 if (the_low_target.get_pc == NULL)
512 return 0;
513
514 saved_inferior = current_inferior;
515 current_inferior = get_lwp_thread (lwp);
516
517 regcache = get_thread_regcache (current_inferior, 1);
518 pc = (*the_low_target.get_pc) (regcache);
519
520 if (debug_threads)
521 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
522
523 current_inferior = saved_inferior;
524 return pc;
525}
526
527/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
528 The SIGTRAP could mean several things.
529
530 On i386, where decr_pc_after_break is non-zero:
531 If we were single-stepping this process using PTRACE_SINGLESTEP,
532 we will get only the one SIGTRAP (even if the instruction we
533 stepped over was a breakpoint). The value of $eip will be the
534 next instruction.
535 If we continue the process using PTRACE_CONT, we will get a
536 SIGTRAP when we hit a breakpoint. The value of $eip will be
537 the instruction after the breakpoint (i.e. needs to be
538 decremented). If we report the SIGTRAP to GDB, we must also
539 report the undecremented PC. If we cancel the SIGTRAP, we
540 must resume at the decremented PC.
541
542 (Presumably, not yet tested) On a non-decr_pc_after_break machine
543 with hardware or kernel single-step:
544 If we single-step over a breakpoint instruction, our PC will
545 point at the following instruction. If we continue and hit a
546 breakpoint instruction, our PC will point at the breakpoint
547 instruction. */
548
549static CORE_ADDR
d50171e4 550get_stop_pc (struct lwp_info *lwp)
0d62e5e8 551{
d50171e4
PA
552 CORE_ADDR stop_pc;
553
554 if (the_low_target.get_pc == NULL)
555 return 0;
0d62e5e8 556
d50171e4
PA
557 stop_pc = get_pc (lwp);
558
bdabb078
PA
559 if (WSTOPSIG (lwp->last_status) == SIGTRAP
560 && !lwp->stepping
561 && !lwp->stopped_by_watchpoint
562 && lwp->last_status >> 16 == 0)
47c0c975
DE
563 stop_pc -= the_low_target.decr_pc_after_break;
564
565 if (debug_threads)
566 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
567
568 return stop_pc;
0d62e5e8 569}
ce3a066d 570
0d62e5e8 571static void *
95954743 572add_lwp (ptid_t ptid)
611cb4a5 573{
54a0b537 574 struct lwp_info *lwp;
0d62e5e8 575
54a0b537
PA
576 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
577 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 578
95954743 579 lwp->head.id = ptid;
0d62e5e8 580
aa5ca48f
DE
581 if (the_low_target.new_thread != NULL)
582 lwp->arch_private = the_low_target.new_thread ();
583
54a0b537 584 add_inferior_to_list (&all_lwps, &lwp->head);
0d62e5e8 585
54a0b537 586 return lwp;
0d62e5e8 587}
611cb4a5 588
da6d8c04
DJ
589/* Start an inferior process and returns its pid.
590 ALLARGS is a vector of program-name and args. */
591
ce3a066d
DJ
592static int
593linux_create_inferior (char *program, char **allargs)
da6d8c04 594{
03583c20
UW
595#ifdef HAVE_PERSONALITY
596 int personality_orig = 0, personality_set = 0;
597#endif
a6dbe5df 598 struct lwp_info *new_lwp;
da6d8c04 599 int pid;
95954743 600 ptid_t ptid;
da6d8c04 601
03583c20
UW
602#ifdef HAVE_PERSONALITY
603 if (disable_randomization)
604 {
605 errno = 0;
606 personality_orig = personality (0xffffffff);
607 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
608 {
609 personality_set = 1;
610 personality (personality_orig | ADDR_NO_RANDOMIZE);
611 }
612 if (errno != 0 || (personality_set
613 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
614 warning ("Error disabling address space randomization: %s",
615 strerror (errno));
616 }
617#endif
618
42c81e2a 619#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
620 pid = vfork ();
621#else
da6d8c04 622 pid = fork ();
52fb6437 623#endif
da6d8c04
DJ
624 if (pid < 0)
625 perror_with_name ("fork");
626
627 if (pid == 0)
628 {
629 ptrace (PTRACE_TRACEME, 0, 0, 0);
630
1a981360 631#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 632 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 633#endif
0d62e5e8 634
a9fa9f7d
DJ
635 setpgid (0, 0);
636
e0f9f062
DE
637 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
638 stdout to stderr so that inferior i/o doesn't corrupt the connection.
639 Also, redirect stdin to /dev/null. */
640 if (remote_connection_is_stdio ())
641 {
642 close (0);
643 open ("/dev/null", O_RDONLY);
644 dup2 (2, 1);
3e52c33d
JK
645 if (write (2, "stdin/stdout redirected\n",
646 sizeof ("stdin/stdout redirected\n") - 1) < 0)
647 /* Errors ignored. */;
e0f9f062
DE
648 }
649
2b876972
DJ
650 execv (program, allargs);
651 if (errno == ENOENT)
652 execvp (program, allargs);
da6d8c04
DJ
653
654 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 655 strerror (errno));
da6d8c04
DJ
656 fflush (stderr);
657 _exit (0177);
658 }
659
03583c20
UW
660#ifdef HAVE_PERSONALITY
661 if (personality_set)
662 {
663 errno = 0;
664 personality (personality_orig);
665 if (errno != 0)
666 warning ("Error restoring address space randomization: %s",
667 strerror (errno));
668 }
669#endif
670
95954743
PA
671 linux_add_process (pid, 0);
672
673 ptid = ptid_build (pid, pid, 0);
674 new_lwp = add_lwp (ptid);
675 add_thread (ptid, new_lwp);
a6dbe5df 676 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 677
a9fa9f7d 678 return pid;
da6d8c04
DJ
679}
680
681/* Attach to an inferior process. */
682
95954743
PA
683static void
684linux_attach_lwp_1 (unsigned long lwpid, int initial)
da6d8c04 685{
95954743 686 ptid_t ptid;
54a0b537 687 struct lwp_info *new_lwp;
611cb4a5 688
95954743 689 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
da6d8c04 690 {
87b0bb13
JK
691 struct buffer buffer;
692
95954743 693 if (!initial)
2d717e4f
DJ
694 {
695 /* If we fail to attach to an LWP, just warn. */
95954743 696 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
2d717e4f
DJ
697 strerror (errno), errno);
698 fflush (stderr);
699 return;
700 }
5f572dec
JK
701
702 /* If we fail to attach to a process, report an error. */
87b0bb13
JK
703 buffer_init (&buffer);
704 linux_ptrace_attach_warnings (lwpid, &buffer);
705 buffer_grow_str0 (&buffer, "");
706 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
707 lwpid, strerror (errno), errno);
da6d8c04
DJ
708 }
709
95954743 710 if (initial)
e3deef73
LM
711 /* If lwp is the tgid, we handle adding existing threads later.
712 Otherwise we just add lwp without bothering about any other
713 threads. */
95954743
PA
714 ptid = ptid_build (lwpid, lwpid, 0);
715 else
716 {
717 /* Note that extracting the pid from the current inferior is
718 safe, since we're always called in the context of the same
719 process as this new thread. */
720 int pid = pid_of (get_thread_lwp (current_inferior));
721 ptid = ptid_build (pid, lwpid, 0);
722 }
24a09b5f 723
95954743
PA
724 new_lwp = (struct lwp_info *) add_lwp (ptid);
725 add_thread (ptid, new_lwp);
0d62e5e8 726
a6dbe5df
PA
727 /* We need to wait for SIGSTOP before being able to make the next
728 ptrace call on this LWP. */
729 new_lwp->must_set_ptrace_flags = 1;
730
644cebc9 731 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
732 {
733 if (debug_threads)
734 fprintf (stderr,
735 "Attached to a stopped process\n");
736
737 /* The process is definitely stopped. It is in a job control
738 stop, unless the kernel predates the TASK_STOPPED /
739 TASK_TRACED distinction, in which case it might be in a
740 ptrace stop. Make sure it is in a ptrace stop; from there we
741 can kill it, signal it, et cetera.
742
743 First make sure there is a pending SIGSTOP. Since we are
744 already attached, the process can not transition from stopped
745 to running without a PTRACE_CONT; so we know this signal will
746 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
747 probably already in the queue (unless this kernel is old
748 enough to use TASK_STOPPED for ptrace stops); but since
749 SIGSTOP is not an RT signal, it can only be queued once. */
750 kill_lwp (lwpid, SIGSTOP);
751
752 /* Finally, resume the stopped process. This will deliver the
753 SIGSTOP (or a higher priority signal, just like normal
754 PTRACE_ATTACH), which we'll catch later on. */
755 ptrace (PTRACE_CONT, lwpid, 0, 0);
756 }
757
0d62e5e8 758 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
759 brings it to a halt.
760
761 There are several cases to consider here:
762
763 1) gdbserver has already attached to the process and is being notified
1b3f6016 764 of a new thread that is being created.
d50171e4
PA
765 In this case we should ignore that SIGSTOP and resume the
766 process. This is handled below by setting stop_expected = 1,
8336d594 767 and the fact that add_thread sets last_resume_kind ==
d50171e4 768 resume_continue.
0e21c1ec
DE
769
770 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
771 to it via attach_inferior.
772 In this case we want the process thread to stop.
d50171e4
PA
773 This is handled by having linux_attach set last_resume_kind ==
774 resume_stop after we return.
e3deef73
LM
775
776 If the pid we are attaching to is also the tgid, we attach to and
777 stop all the existing threads. Otherwise, we attach to pid and
778 ignore any other threads in the same group as this pid.
0e21c1ec
DE
779
780 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
781 existing threads.
782 In this case we want the thread to stop.
783 FIXME: This case is currently not properly handled.
784 We should wait for the SIGSTOP but don't. Things work apparently
785 because enough time passes between when we ptrace (ATTACH) and when
786 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
787
788 On the other hand, if we are currently trying to stop all threads, we
789 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 790 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
791 end of the list, and so the new thread has not yet reached
792 wait_for_sigstop (but will). */
d50171e4 793 new_lwp->stop_expected = 1;
0d62e5e8
DJ
794}
795
95954743
PA
796void
797linux_attach_lwp (unsigned long lwpid)
798{
799 linux_attach_lwp_1 (lwpid, 0);
800}
801
e3deef73
LM
802/* Attach to PID. If PID is the tgid, attach to it and all
803 of its threads. */
804
0d62e5e8 805int
a1928bad 806linux_attach (unsigned long pid)
0d62e5e8 807{
e3deef73
LM
808 /* Attach to PID. We will check for other threads
809 soon. */
95954743 810 linux_attach_lwp_1 (pid, 1);
95954743 811 linux_add_process (pid, 1);
0d62e5e8 812
bd99dc85
PA
813 if (!non_stop)
814 {
8336d594
PA
815 struct thread_info *thread;
816
817 /* Don't ignore the initial SIGSTOP if we just attached to this
818 process. It will be collected by wait shortly. */
819 thread = find_thread_ptid (ptid_build (pid, pid, 0));
820 thread->last_resume_kind = resume_stop;
bd99dc85 821 }
0d62e5e8 822
e3deef73
LM
823 if (linux_proc_get_tgid (pid) == pid)
824 {
825 DIR *dir;
826 char pathname[128];
827
828 sprintf (pathname, "/proc/%ld/task", pid);
829
830 dir = opendir (pathname);
831
832 if (!dir)
833 {
834 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
835 fflush (stderr);
836 }
837 else
838 {
839 /* At this point we attached to the tgid. Scan the task for
840 existing threads. */
841 unsigned long lwp;
842 int new_threads_found;
843 int iterations = 0;
844 struct dirent *dp;
845
846 while (iterations < 2)
847 {
848 new_threads_found = 0;
849 /* Add all the other threads. While we go through the
850 threads, new threads may be spawned. Cycle through
851 the list of threads until we have done two iterations without
852 finding new threads. */
853 while ((dp = readdir (dir)) != NULL)
854 {
855 /* Fetch one lwp. */
856 lwp = strtoul (dp->d_name, NULL, 10);
857
858 /* Is this a new thread? */
859 if (lwp
860 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
861 {
862 linux_attach_lwp_1 (lwp, 0);
863 new_threads_found++;
864
865 if (debug_threads)
866 fprintf (stderr, "\
867Found and attached to new lwp %ld\n", lwp);
868 }
869 }
870
871 if (!new_threads_found)
872 iterations++;
873 else
874 iterations = 0;
875
876 rewinddir (dir);
877 }
878 closedir (dir);
879 }
880 }
881
95954743
PA
882 return 0;
883}
884
885struct counter
886{
887 int pid;
888 int count;
889};
890
891static int
892second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
893{
894 struct counter *counter = args;
895
896 if (ptid_get_pid (entry->id) == counter->pid)
897 {
898 if (++counter->count > 1)
899 return 1;
900 }
d61ddec4 901
da6d8c04
DJ
902 return 0;
903}
904
95954743
PA
905static int
906last_thread_of_process_p (struct thread_info *thread)
907{
908 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
909 int pid = ptid_get_pid (ptid);
910 struct counter counter = { pid , 0 };
da6d8c04 911
95954743
PA
912 return (find_inferior (&all_threads,
913 second_thread_of_pid_p, &counter) == NULL);
914}
915
da84f473
PA
916/* Kill LWP. */
917
918static void
919linux_kill_one_lwp (struct lwp_info *lwp)
920{
921 int pid = lwpid_of (lwp);
922
923 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
924 there is no signal context, and ptrace(PTRACE_KILL) (or
925 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
926 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
927 alternative is to kill with SIGKILL. We only need one SIGKILL
928 per process, not one for each thread. But since we still support
929 linuxthreads, and we also support debugging programs using raw
930 clone without CLONE_THREAD, we send one for each thread. For
931 years, we used PTRACE_KILL only, so we're being a bit paranoid
932 about some old kernels where PTRACE_KILL might work better
933 (dubious if there are any such, but that's why it's paranoia), so
934 we try SIGKILL first, PTRACE_KILL second, and so we're fine
935 everywhere. */
936
937 errno = 0;
938 kill (pid, SIGKILL);
939 if (debug_threads)
940 fprintf (stderr,
941 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
942 target_pid_to_str (ptid_of (lwp)),
943 errno ? strerror (errno) : "OK");
944
945 errno = 0;
946 ptrace (PTRACE_KILL, pid, 0, 0);
947 if (debug_threads)
948 fprintf (stderr,
949 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
950 target_pid_to_str (ptid_of (lwp)),
951 errno ? strerror (errno) : "OK");
952}
953
954/* Callback for `find_inferior'. Kills an lwp of a given process,
955 except the leader. */
95954743
PA
956
957static int
da84f473 958kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
da6d8c04 959{
0d62e5e8 960 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 961 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 962 int wstat;
95954743
PA
963 int pid = * (int *) args;
964
965 if (ptid_get_pid (entry->id) != pid)
966 return 0;
0d62e5e8 967
fd500816
DJ
968 /* We avoid killing the first thread here, because of a Linux kernel (at
969 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
970 the children get a chance to be reaped, it will remain a zombie
971 forever. */
95954743 972
12b42a12 973 if (lwpid_of (lwp) == pid)
95954743
PA
974 {
975 if (debug_threads)
976 fprintf (stderr, "lkop: is last of process %s\n",
977 target_pid_to_str (entry->id));
978 return 0;
979 }
fd500816 980
0d62e5e8
DJ
981 do
982 {
da84f473 983 linux_kill_one_lwp (lwp);
0d62e5e8
DJ
984
985 /* Make sure it died. The loop is most likely unnecessary. */
95954743 986 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
bd99dc85 987 } while (pid > 0 && WIFSTOPPED (wstat));
95954743
PA
988
989 return 0;
da6d8c04
DJ
990}
991
95954743
PA
992static int
993linux_kill (int pid)
0d62e5e8 994{
95954743 995 struct process_info *process;
54a0b537 996 struct lwp_info *lwp;
fd500816 997 int wstat;
95954743 998 int lwpid;
fd500816 999
95954743
PA
1000 process = find_process_pid (pid);
1001 if (process == NULL)
1002 return -1;
9d606399 1003
f9e39928
PA
1004 /* If we're killing a running inferior, make sure it is stopped
1005 first, as PTRACE_KILL will not work otherwise. */
7984d532 1006 stop_all_lwps (0, NULL);
f9e39928 1007
da84f473 1008 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
fd500816 1009
54a0b537 1010 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1011 thread in the list, so do so now. */
95954743 1012 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 1013
784867a5 1014 if (lwp == NULL)
fd500816 1015 {
784867a5
JK
1016 if (debug_threads)
1017 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
1018 lwpid_of (lwp), pid);
1019 }
1020 else
1021 {
1022 if (debug_threads)
1023 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
1024 lwpid_of (lwp), pid);
fd500816 1025
784867a5
JK
1026 do
1027 {
da84f473 1028 linux_kill_one_lwp (lwp);
784867a5
JK
1029
1030 /* Make sure it died. The loop is most likely unnecessary. */
1031 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1032 } while (lwpid > 0 && WIFSTOPPED (wstat));
1033 }
2d717e4f 1034
8336d594 1035 the_target->mourn (process);
f9e39928
PA
1036
1037 /* Since we presently can only stop all lwps of all processes, we
1038 need to unstop lwps of other processes. */
7984d532 1039 unstop_all_lwps (0, NULL);
95954743 1040 return 0;
0d62e5e8
DJ
1041}
1042
9b224c5e
PA
1043/* Get pending signal of THREAD, for detaching purposes. This is the
1044 signal the thread last stopped for, which we need to deliver to the
1045 thread when detaching, otherwise, it'd be suppressed/lost. */
1046
1047static int
1048get_detach_signal (struct thread_info *thread)
1049{
1050 enum target_signal signo = TARGET_SIGNAL_0;
1051 int status;
1052 struct lwp_info *lp = get_thread_lwp (thread);
1053
1054 if (lp->status_pending_p)
1055 status = lp->status_pending;
1056 else
1057 {
1058 /* If the thread had been suspended by gdbserver, and it stopped
1059 cleanly, then it'll have stopped with SIGSTOP. But we don't
1060 want to deliver that SIGSTOP. */
1061 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1062 || thread->last_status.value.sig == TARGET_SIGNAL_0)
1063 return 0;
1064
1065 /* Otherwise, we may need to deliver the signal we
1066 intercepted. */
1067 status = lp->last_status;
1068 }
1069
1070 if (!WIFSTOPPED (status))
1071 {
1072 if (debug_threads)
1073 fprintf (stderr,
1074 "GPS: lwp %s hasn't stopped: no pending signal\n",
1075 target_pid_to_str (ptid_of (lp)));
1076 return 0;
1077 }
1078
1079 /* Extended wait statuses aren't real SIGTRAPs. */
1080 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1081 {
1082 if (debug_threads)
1083 fprintf (stderr,
1084 "GPS: lwp %s had stopped with extended "
1085 "status: no pending signal\n",
1086 target_pid_to_str (ptid_of (lp)));
1087 return 0;
1088 }
1089
1090 signo = target_signal_from_host (WSTOPSIG (status));
1091
1092 if (program_signals_p && !program_signals[signo])
1093 {
1094 if (debug_threads)
1095 fprintf (stderr,
1096 "GPS: lwp %s had signal %s, but it is in nopass state\n",
1097 target_pid_to_str (ptid_of (lp)),
1098 target_signal_to_string (signo));
1099 return 0;
1100 }
1101 else if (!program_signals_p
1102 /* If we have no way to know which signals GDB does not
1103 want to have passed to the program, assume
1104 SIGTRAP/SIGINT, which is GDB's default. */
1105 && (signo == TARGET_SIGNAL_TRAP || signo == TARGET_SIGNAL_INT))
1106 {
1107 if (debug_threads)
1108 fprintf (stderr,
1109 "GPS: lwp %s had signal %s, "
1110 "but we don't know if we should pass it. Default to not.\n",
1111 target_pid_to_str (ptid_of (lp)),
1112 target_signal_to_string (signo));
1113 return 0;
1114 }
1115 else
1116 {
1117 if (debug_threads)
1118 fprintf (stderr,
1119 "GPS: lwp %s has pending signal %s: delivering it.\n",
1120 target_pid_to_str (ptid_of (lp)),
1121 target_signal_to_string (signo));
1122
1123 return WSTOPSIG (status);
1124 }
1125}
1126
95954743
PA
1127static int
1128linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
1129{
1130 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1131 struct lwp_info *lwp = get_thread_lwp (thread);
95954743 1132 int pid = * (int *) args;
9b224c5e 1133 int sig;
95954743
PA
1134
1135 if (ptid_get_pid (entry->id) != pid)
1136 return 0;
6ad8ae5c 1137
9b224c5e 1138 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1139 if (lwp->stop_expected)
ae13219e 1140 {
9b224c5e
PA
1141 if (debug_threads)
1142 fprintf (stderr,
1143 "Sending SIGCONT to %s\n",
1144 target_pid_to_str (ptid_of (lwp)));
1145
1146 kill_lwp (lwpid_of (lwp), SIGCONT);
54a0b537 1147 lwp->stop_expected = 0;
ae13219e
DJ
1148 }
1149
1150 /* Flush any pending changes to the process's registers. */
1151 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 1152 get_lwp_thread (lwp));
ae13219e 1153
9b224c5e
PA
1154 /* Pass on any pending signal for this thread. */
1155 sig = get_detach_signal (thread);
1156
ae13219e 1157 /* Finally, let it resume. */
82bfbe7e
PA
1158 if (the_low_target.prepare_to_resume != NULL)
1159 the_low_target.prepare_to_resume (lwp);
9b224c5e
PA
1160 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, sig) < 0)
1161 error (_("Can't detach %s: %s"),
1162 target_pid_to_str (ptid_of (lwp)),
1163 strerror (errno));
bd99dc85
PA
1164
1165 delete_lwp (lwp);
95954743 1166 return 0;
6ad8ae5c
DJ
1167}
1168
95954743
PA
1169static int
1170linux_detach (int pid)
1171{
1172 struct process_info *process;
1173
1174 process = find_process_pid (pid);
1175 if (process == NULL)
1176 return -1;
1177
f9e39928
PA
1178 /* Stop all threads before detaching. First, ptrace requires that
1179 the thread is stopped to sucessfully detach. Second, thread_db
1180 may need to uninstall thread event breakpoints from memory, which
1181 only works with a stopped process anyway. */
7984d532 1182 stop_all_lwps (0, NULL);
f9e39928 1183
ca5c370d 1184#ifdef USE_THREAD_DB
8336d594 1185 thread_db_detach (process);
ca5c370d
PA
1186#endif
1187
fa593d66
PA
1188 /* Stabilize threads (move out of jump pads). */
1189 stabilize_threads ();
1190
95954743 1191 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
1192
1193 the_target->mourn (process);
f9e39928
PA
1194
1195 /* Since we presently can only stop all lwps of all processes, we
1196 need to unstop lwps of other processes. */
7984d532 1197 unstop_all_lwps (0, NULL);
f9e39928
PA
1198 return 0;
1199}
1200
1201/* Remove all LWPs that belong to process PROC from the lwp list. */
1202
1203static int
1204delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1205{
1206 struct lwp_info *lwp = (struct lwp_info *) entry;
1207 struct process_info *process = proc;
1208
1209 if (pid_of (lwp) == pid_of (process))
1210 delete_lwp (lwp);
1211
dd6953e1 1212 return 0;
6ad8ae5c
DJ
1213}
1214
8336d594
PA
1215static void
1216linux_mourn (struct process_info *process)
1217{
1218 struct process_info_private *priv;
1219
1220#ifdef USE_THREAD_DB
1221 thread_db_mourn (process);
1222#endif
1223
f9e39928
PA
1224 find_inferior (&all_lwps, delete_lwp_callback, process);
1225
8336d594
PA
1226 /* Freeing all private data. */
1227 priv = process->private;
1228 free (priv->arch_private);
1229 free (priv);
1230 process->private = NULL;
505106cd
PA
1231
1232 remove_process (process);
8336d594
PA
1233}
1234
444d6139 1235static void
95954743 1236linux_join (int pid)
444d6139 1237{
444d6139
PA
1238 int status, ret;
1239
1240 do {
95954743 1241 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1242 if (WIFEXITED (status) || WIFSIGNALED (status))
1243 break;
1244 } while (ret != -1 || errno != ECHILD);
1245}
1246
6ad8ae5c 1247/* Return nonzero if the given thread is still alive. */
0d62e5e8 1248static int
95954743 1249linux_thread_alive (ptid_t ptid)
0d62e5e8 1250{
95954743
PA
1251 struct lwp_info *lwp = find_lwp_pid (ptid);
1252
1253 /* We assume we always know if a thread exits. If a whole process
1254 exited but we still haven't been able to report it to GDB, we'll
1255 hold on to the last lwp of the dead process. */
1256 if (lwp != NULL)
1257 return !lwp->dead;
0d62e5e8
DJ
1258 else
1259 return 0;
1260}
1261
6bf5e0ba 1262/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1263static int
d50171e4 1264status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1265{
54a0b537 1266 struct lwp_info *lwp = (struct lwp_info *) entry;
95954743 1267 ptid_t ptid = * (ptid_t *) arg;
7984d532 1268 struct thread_info *thread;
95954743
PA
1269
1270 /* Check if we're only interested in events from a specific process
1271 or its lwps. */
1272 if (!ptid_equal (minus_one_ptid, ptid)
1273 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1274 return 0;
0d62e5e8 1275
d50171e4
PA
1276 thread = get_lwp_thread (lwp);
1277
1278 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1279 report any status pending the LWP may have. */
8336d594 1280 if (thread->last_resume_kind == resume_stop
7984d532 1281 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4 1282 return 0;
0d62e5e8 1283
d50171e4 1284 return lwp->status_pending_p;
0d62e5e8
DJ
1285}
1286
95954743
PA
1287static int
1288same_lwp (struct inferior_list_entry *entry, void *data)
1289{
1290 ptid_t ptid = *(ptid_t *) data;
1291 int lwp;
1292
1293 if (ptid_get_lwp (ptid) != 0)
1294 lwp = ptid_get_lwp (ptid);
1295 else
1296 lwp = ptid_get_pid (ptid);
1297
1298 if (ptid_get_lwp (entry->id) == lwp)
1299 return 1;
1300
1301 return 0;
1302}
1303
1304struct lwp_info *
1305find_lwp_pid (ptid_t ptid)
1306{
1307 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1308}
1309
bd99dc85 1310static struct lwp_info *
95954743 1311linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
611cb4a5 1312{
0d62e5e8 1313 int ret;
95954743 1314 int to_wait_for = -1;
bd99dc85 1315 struct lwp_info *child = NULL;
0d62e5e8 1316
bd99dc85 1317 if (debug_threads)
95954743
PA
1318 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1319
1320 if (ptid_equal (ptid, minus_one_ptid))
1321 to_wait_for = -1; /* any child */
1322 else
1323 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
0d62e5e8 1324
bd99dc85 1325 options |= __WALL;
0d62e5e8 1326
bd99dc85 1327retry:
0d62e5e8 1328
bd99dc85
PA
1329 ret = my_waitpid (to_wait_for, wstatp, options);
1330 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1331 return NULL;
1332 else if (ret == -1)
1333 perror_with_name ("waitpid");
0d62e5e8
DJ
1334
1335 if (debug_threads
1336 && (!WIFSTOPPED (*wstatp)
1337 || (WSTOPSIG (*wstatp) != 32
1338 && WSTOPSIG (*wstatp) != 33)))
1339 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1340
95954743 1341 child = find_lwp_pid (pid_to_ptid (ret));
0d62e5e8 1342
24a09b5f
DJ
1343 /* If we didn't find a process, one of two things presumably happened:
1344 - A process we started and then detached from has exited. Ignore it.
1345 - A process we are controlling has forked and the new child's stop
1346 was reported to us by the kernel. Save its PID. */
bd99dc85 1347 if (child == NULL && WIFSTOPPED (*wstatp))
24a09b5f 1348 {
05044653 1349 add_to_pid_list (&stopped_pids, ret, *wstatp);
24a09b5f
DJ
1350 goto retry;
1351 }
bd99dc85 1352 else if (child == NULL)
24a09b5f
DJ
1353 goto retry;
1354
bd99dc85 1355 child->stopped = 1;
0d62e5e8 1356
bd99dc85 1357 child->last_status = *wstatp;
32ca6d61 1358
d61ddec4
UW
1359 /* Architecture-specific setup after inferior is running.
1360 This needs to happen after we have attached to the inferior
1361 and it is stopped for the first time, but before we access
1362 any inferior registers. */
1363 if (new_inferior)
1364 {
1365 the_low_target.arch_setup ();
52fa2412
UW
1366#ifdef HAVE_LINUX_REGSETS
1367 memset (disabled_regsets, 0, num_regsets);
1368#endif
d61ddec4
UW
1369 new_inferior = 0;
1370 }
1371
c3adc08c
PA
1372 /* Fetch the possibly triggered data watchpoint info and store it in
1373 CHILD.
1374
1375 On some archs, like x86, that use debug registers to set
1376 watchpoints, it's possible that the way to know which watched
1377 address trapped, is to check the register that is used to select
1378 which address to watch. Problem is, between setting the
1379 watchpoint and reading back which data address trapped, the user
1380 may change the set of watchpoints, and, as a consequence, GDB
1381 changes the debug registers in the inferior. To avoid reading
1382 back a stale stopped-data-address when that happens, we cache in
1383 LP the fact that a watchpoint trapped, and the corresponding data
1384 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1385 changes the debug registers meanwhile, we have the cached data we
1386 can rely on. */
1387
1388 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1389 {
1390 if (the_low_target.stopped_by_watchpoint == NULL)
1391 {
1392 child->stopped_by_watchpoint = 0;
1393 }
1394 else
1395 {
1396 struct thread_info *saved_inferior;
1397
1398 saved_inferior = current_inferior;
1399 current_inferior = get_lwp_thread (child);
1400
1401 child->stopped_by_watchpoint
1402 = the_low_target.stopped_by_watchpoint ();
1403
1404 if (child->stopped_by_watchpoint)
1405 {
1406 if (the_low_target.stopped_data_address != NULL)
1407 child->stopped_data_address
1408 = the_low_target.stopped_data_address ();
1409 else
1410 child->stopped_data_address = 0;
1411 }
1412
1413 current_inferior = saved_inferior;
1414 }
1415 }
1416
d50171e4
PA
1417 /* Store the STOP_PC, with adjustment applied. This depends on the
1418 architecture being defined already (so that CHILD has a valid
1419 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1420 not). */
1421 if (WIFSTOPPED (*wstatp))
1422 child->stop_pc = get_stop_pc (child);
1423
0d62e5e8 1424 if (debug_threads
47c0c975
DE
1425 && WIFSTOPPED (*wstatp)
1426 && the_low_target.get_pc != NULL)
0d62e5e8 1427 {
896c7fbb 1428 struct thread_info *saved_inferior = current_inferior;
bce522a2 1429 struct regcache *regcache;
47c0c975
DE
1430 CORE_ADDR pc;
1431
d50171e4 1432 current_inferior = get_lwp_thread (child);
bce522a2 1433 regcache = get_thread_regcache (current_inferior, 1);
442ea881 1434 pc = (*the_low_target.get_pc) (regcache);
47c0c975 1435 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
896c7fbb 1436 current_inferior = saved_inferior;
0d62e5e8 1437 }
bd99dc85
PA
1438
1439 return child;
0d62e5e8 1440}
611cb4a5 1441
219f2f23
PA
1442/* This function should only be called if the LWP got a SIGTRAP.
1443
1444 Handle any tracepoint steps or hits. Return true if a tracepoint
1445 event was handled, 0 otherwise. */
1446
1447static int
1448handle_tracepoints (struct lwp_info *lwp)
1449{
1450 struct thread_info *tinfo = get_lwp_thread (lwp);
1451 int tpoint_related_event = 0;
1452
7984d532
PA
1453 /* If this tracepoint hit causes a tracing stop, we'll immediately
1454 uninsert tracepoints. To do this, we temporarily pause all
1455 threads, unpatch away, and then unpause threads. We need to make
1456 sure the unpausing doesn't resume LWP too. */
1457 lwp->suspended++;
1458
219f2f23
PA
1459 /* And we need to be sure that any all-threads-stopping doesn't try
1460 to move threads out of the jump pads, as it could deadlock the
1461 inferior (LWP could be in the jump pad, maybe even holding the
1462 lock.) */
1463
1464 /* Do any necessary step collect actions. */
1465 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1466
fa593d66
PA
1467 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1468
219f2f23
PA
1469 /* See if we just hit a tracepoint and do its main collect
1470 actions. */
1471 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1472
7984d532
PA
1473 lwp->suspended--;
1474
1475 gdb_assert (lwp->suspended == 0);
fa593d66 1476 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1477
219f2f23
PA
1478 if (tpoint_related_event)
1479 {
1480 if (debug_threads)
1481 fprintf (stderr, "got a tracepoint event\n");
1482 return 1;
1483 }
1484
1485 return 0;
1486}
1487
fa593d66
PA
1488/* Convenience wrapper. Returns true if LWP is presently collecting a
1489 fast tracepoint. */
1490
1491static int
1492linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1493 struct fast_tpoint_collect_status *status)
1494{
1495 CORE_ADDR thread_area;
1496
1497 if (the_low_target.get_thread_area == NULL)
1498 return 0;
1499
1500 /* Get the thread area address. This is used to recognize which
1501 thread is which when tracing with the in-process agent library.
1502 We don't read anything from the address, and treat it as opaque;
1503 it's the address itself that we assume is unique per-thread. */
1504 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1505 return 0;
1506
1507 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1508}
1509
1510/* The reason we resume in the caller, is because we want to be able
1511 to pass lwp->status_pending as WSTAT, and we need to clear
1512 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1513 refuses to resume. */
1514
1515static int
1516maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1517{
1518 struct thread_info *saved_inferior;
1519
1520 saved_inferior = current_inferior;
1521 current_inferior = get_lwp_thread (lwp);
1522
1523 if ((wstat == NULL
1524 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1525 && supports_fast_tracepoints ()
58b4daa5 1526 && agent_loaded_p ())
fa593d66
PA
1527 {
1528 struct fast_tpoint_collect_status status;
1529 int r;
1530
1531 if (debug_threads)
1532 fprintf (stderr, "\
1533Checking whether LWP %ld needs to move out of the jump pad.\n",
1534 lwpid_of (lwp));
1535
1536 r = linux_fast_tracepoint_collecting (lwp, &status);
1537
1538 if (wstat == NULL
1539 || (WSTOPSIG (*wstat) != SIGILL
1540 && WSTOPSIG (*wstat) != SIGFPE
1541 && WSTOPSIG (*wstat) != SIGSEGV
1542 && WSTOPSIG (*wstat) != SIGBUS))
1543 {
1544 lwp->collecting_fast_tracepoint = r;
1545
1546 if (r != 0)
1547 {
1548 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1549 {
1550 /* Haven't executed the original instruction yet.
1551 Set breakpoint there, and wait till it's hit,
1552 then single-step until exiting the jump pad. */
1553 lwp->exit_jump_pad_bkpt
1554 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1555 }
1556
1557 if (debug_threads)
1558 fprintf (stderr, "\
1559Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1560 lwpid_of (lwp));
0cccb683 1561 current_inferior = saved_inferior;
fa593d66
PA
1562
1563 return 1;
1564 }
1565 }
1566 else
1567 {
1568 /* If we get a synchronous signal while collecting, *and*
1569 while executing the (relocated) original instruction,
1570 reset the PC to point at the tpoint address, before
1571 reporting to GDB. Otherwise, it's an IPA lib bug: just
1572 report the signal to GDB, and pray for the best. */
1573
1574 lwp->collecting_fast_tracepoint = 0;
1575
1576 if (r != 0
1577 && (status.adjusted_insn_addr <= lwp->stop_pc
1578 && lwp->stop_pc < status.adjusted_insn_addr_end))
1579 {
1580 siginfo_t info;
1581 struct regcache *regcache;
1582
1583 /* The si_addr on a few signals references the address
1584 of the faulting instruction. Adjust that as
1585 well. */
1586 if ((WSTOPSIG (*wstat) == SIGILL
1587 || WSTOPSIG (*wstat) == SIGFPE
1588 || WSTOPSIG (*wstat) == SIGBUS
1589 || WSTOPSIG (*wstat) == SIGSEGV)
1590 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1591 /* Final check just to make sure we don't clobber
1592 the siginfo of non-kernel-sent signals. */
1593 && (uintptr_t) info.si_addr == lwp->stop_pc)
1594 {
1595 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1596 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1597 }
1598
1599 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1600 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1601 lwp->stop_pc = status.tpoint_addr;
1602
1603 /* Cancel any fast tracepoint lock this thread was
1604 holding. */
1605 force_unlock_trace_buffer ();
1606 }
1607
1608 if (lwp->exit_jump_pad_bkpt != NULL)
1609 {
1610 if (debug_threads)
1611 fprintf (stderr,
1612 "Cancelling fast exit-jump-pad: removing bkpt. "
1613 "stopping all threads momentarily.\n");
1614
1615 stop_all_lwps (1, lwp);
1616 cancel_breakpoints ();
1617
1618 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1619 lwp->exit_jump_pad_bkpt = NULL;
1620
1621 unstop_all_lwps (1, lwp);
1622
1623 gdb_assert (lwp->suspended >= 0);
1624 }
1625 }
1626 }
1627
1628 if (debug_threads)
1629 fprintf (stderr, "\
1630Checking whether LWP %ld needs to move out of the jump pad...no\n",
1631 lwpid_of (lwp));
0cccb683
YQ
1632
1633 current_inferior = saved_inferior;
fa593d66
PA
1634 return 0;
1635}
1636
1637/* Enqueue one signal in the "signals to report later when out of the
1638 jump pad" list. */
1639
1640static void
1641enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1642{
1643 struct pending_signals *p_sig;
1644
1645 if (debug_threads)
1646 fprintf (stderr, "\
1647Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1648
1649 if (debug_threads)
1650 {
1651 struct pending_signals *sig;
1652
1653 for (sig = lwp->pending_signals_to_report;
1654 sig != NULL;
1655 sig = sig->prev)
1656 fprintf (stderr,
1657 " Already queued %d\n",
1658 sig->signal);
1659
1660 fprintf (stderr, " (no more currently queued signals)\n");
1661 }
1662
1a981360
PA
1663 /* Don't enqueue non-RT signals if they are already in the deferred
1664 queue. (SIGSTOP being the easiest signal to see ending up here
1665 twice) */
1666 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1667 {
1668 struct pending_signals *sig;
1669
1670 for (sig = lwp->pending_signals_to_report;
1671 sig != NULL;
1672 sig = sig->prev)
1673 {
1674 if (sig->signal == WSTOPSIG (*wstat))
1675 {
1676 if (debug_threads)
1677 fprintf (stderr,
1678 "Not requeuing already queued non-RT signal %d"
1679 " for LWP %ld\n",
1680 sig->signal,
1681 lwpid_of (lwp));
1682 return;
1683 }
1684 }
1685 }
1686
fa593d66
PA
1687 p_sig = xmalloc (sizeof (*p_sig));
1688 p_sig->prev = lwp->pending_signals_to_report;
1689 p_sig->signal = WSTOPSIG (*wstat);
1690 memset (&p_sig->info, 0, sizeof (siginfo_t));
1691 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1692
1693 lwp->pending_signals_to_report = p_sig;
1694}
1695
1696/* Dequeue one signal from the "signals to report later when out of
1697 the jump pad" list. */
1698
1699static int
1700dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1701{
1702 if (lwp->pending_signals_to_report != NULL)
1703 {
1704 struct pending_signals **p_sig;
1705
1706 p_sig = &lwp->pending_signals_to_report;
1707 while ((*p_sig)->prev != NULL)
1708 p_sig = &(*p_sig)->prev;
1709
1710 *wstat = W_STOPCODE ((*p_sig)->signal);
1711 if ((*p_sig)->info.si_signo != 0)
1712 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1713 free (*p_sig);
1714 *p_sig = NULL;
1715
1716 if (debug_threads)
1717 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1718 WSTOPSIG (*wstat), lwpid_of (lwp));
1719
1720 if (debug_threads)
1721 {
1722 struct pending_signals *sig;
1723
1724 for (sig = lwp->pending_signals_to_report;
1725 sig != NULL;
1726 sig = sig->prev)
1727 fprintf (stderr,
1728 " Still queued %d\n",
1729 sig->signal);
1730
1731 fprintf (stderr, " (no more queued signals)\n");
1732 }
1733
1734 return 1;
1735 }
1736
1737 return 0;
1738}
1739
d50171e4
PA
1740/* Arrange for a breakpoint to be hit again later. We don't keep the
1741 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1742 will handle the current event, eventually we will resume this LWP,
1743 and this breakpoint will trap again. */
1744
1745static int
1746cancel_breakpoint (struct lwp_info *lwp)
1747{
1748 struct thread_info *saved_inferior;
d50171e4
PA
1749
1750 /* There's nothing to do if we don't support breakpoints. */
1751 if (!supports_breakpoints ())
1752 return 0;
1753
d50171e4
PA
1754 /* breakpoint_at reads from current inferior. */
1755 saved_inferior = current_inferior;
1756 current_inferior = get_lwp_thread (lwp);
1757
1758 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1759 {
1760 if (debug_threads)
1761 fprintf (stderr,
1762 "CB: Push back breakpoint for %s\n",
fc7238bb 1763 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1764
1765 /* Back up the PC if necessary. */
1766 if (the_low_target.decr_pc_after_break)
1767 {
1768 struct regcache *regcache
fc7238bb 1769 = get_thread_regcache (current_inferior, 1);
d50171e4
PA
1770 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1771 }
1772
1773 current_inferior = saved_inferior;
1774 return 1;
1775 }
1776 else
1777 {
1778 if (debug_threads)
1779 fprintf (stderr,
1780 "CB: No breakpoint found at %s for [%s]\n",
1781 paddress (lwp->stop_pc),
fc7238bb 1782 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1783 }
1784
1785 current_inferior = saved_inferior;
1786 return 0;
1787}
1788
1789/* When the event-loop is doing a step-over, this points at the thread
1790 being stepped. */
1791ptid_t step_over_bkpt;
1792
bd99dc85
PA
1793/* Wait for an event from child PID. If PID is -1, wait for any
1794 child. Store the stop status through the status pointer WSTAT.
1795 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1796 event was found and OPTIONS contains WNOHANG. Return the PID of
1797 the stopped child otherwise. */
1798
0d62e5e8 1799static int
d8301ad1 1800linux_wait_for_event (ptid_t ptid, int *wstat, int options)
0d62e5e8 1801{
d50171e4 1802 struct lwp_info *event_child, *requested_child;
d8301ad1 1803 ptid_t wait_ptid;
d50171e4 1804
d50171e4
PA
1805 event_child = NULL;
1806 requested_child = NULL;
0d62e5e8 1807
95954743 1808 /* Check for a lwp with a pending status. */
bd99dc85 1809
e825046f 1810 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
0d62e5e8 1811 {
54a0b537 1812 event_child = (struct lwp_info *)
d50171e4 1813 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
0d62e5e8 1814 if (debug_threads && event_child)
bd99dc85 1815 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
0d62e5e8
DJ
1816 }
1817 else
1818 {
95954743 1819 requested_child = find_lwp_pid (ptid);
d50171e4 1820
fa593d66
PA
1821 if (!stopping_threads
1822 && requested_child->status_pending_p
1823 && requested_child->collecting_fast_tracepoint)
1824 {
1825 enqueue_one_deferred_signal (requested_child,
1826 &requested_child->status_pending);
1827 requested_child->status_pending_p = 0;
1828 requested_child->status_pending = 0;
1829 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1830 }
1831
1832 if (requested_child->suspended
1833 && requested_child->status_pending_p)
1834 fatal ("requesting an event out of a suspended child?");
1835
d50171e4 1836 if (requested_child->status_pending_p)
bd99dc85 1837 event_child = requested_child;
0d62e5e8 1838 }
611cb4a5 1839
0d62e5e8
DJ
1840 if (event_child != NULL)
1841 {
bd99dc85
PA
1842 if (debug_threads)
1843 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1844 lwpid_of (event_child), event_child->status_pending);
1845 *wstat = event_child->status_pending;
1846 event_child->status_pending_p = 0;
1847 event_child->status_pending = 0;
1848 current_inferior = get_lwp_thread (event_child);
1849 return lwpid_of (event_child);
0d62e5e8
DJ
1850 }
1851
d8301ad1
JK
1852 if (ptid_is_pid (ptid))
1853 {
1854 /* A request to wait for a specific tgid. This is not possible
1855 with waitpid, so instead, we wait for any child, and leave
1856 children we're not interested in right now with a pending
1857 status to report later. */
1858 wait_ptid = minus_one_ptid;
1859 }
1860 else
1861 wait_ptid = ptid;
1862
0d62e5e8
DJ
1863 /* We only enter this loop if no process has a pending wait status. Thus
1864 any action taken in response to a wait status inside this loop is
1865 responding as soon as we detect the status, not after any pending
1866 events. */
1867 while (1)
1868 {
d8301ad1 1869 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
0d62e5e8 1870
bd99dc85 1871 if ((options & WNOHANG) && event_child == NULL)
d50171e4
PA
1872 {
1873 if (debug_threads)
1874 fprintf (stderr, "WNOHANG set, no event found\n");
1875 return 0;
1876 }
0d62e5e8
DJ
1877
1878 if (event_child == NULL)
1879 error ("event from unknown child");
611cb4a5 1880
d8301ad1
JK
1881 if (ptid_is_pid (ptid)
1882 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1883 {
1884 if (! WIFSTOPPED (*wstat))
1885 mark_lwp_dead (event_child, *wstat);
1886 else
1887 {
1888 event_child->status_pending_p = 1;
1889 event_child->status_pending = *wstat;
1890 }
1891 continue;
1892 }
1893
bd99dc85 1894 current_inferior = get_lwp_thread (event_child);
0d62e5e8 1895
89be2091 1896 /* Check for thread exit. */
bd99dc85 1897 if (! WIFSTOPPED (*wstat))
0d62e5e8 1898 {
89be2091 1899 if (debug_threads)
95954743 1900 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
89be2091
DJ
1901
1902 /* If the last thread is exiting, just return. */
95954743 1903 if (last_thread_of_process_p (current_inferior))
bd99dc85
PA
1904 {
1905 if (debug_threads)
95954743
PA
1906 fprintf (stderr, "LWP %ld is last lwp of process\n",
1907 lwpid_of (event_child));
bd99dc85
PA
1908 return lwpid_of (event_child);
1909 }
89be2091 1910
bd99dc85
PA
1911 if (!non_stop)
1912 {
1913 current_inferior = (struct thread_info *) all_threads.head;
1914 if (debug_threads)
1915 fprintf (stderr, "Current inferior is now %ld\n",
1916 lwpid_of (get_thread_lwp (current_inferior)));
1917 }
1918 else
1919 {
1920 current_inferior = NULL;
1921 if (debug_threads)
1922 fprintf (stderr, "Current inferior is now <NULL>\n");
1923 }
89be2091
DJ
1924
1925 /* If we were waiting for this particular child to do something...
1926 well, it did something. */
bd99dc85 1927 if (requested_child != NULL)
d50171e4
PA
1928 {
1929 int lwpid = lwpid_of (event_child);
1930
1931 /* Cancel the step-over operation --- the thread that
1932 started it is gone. */
1933 if (finish_step_over (event_child))
7984d532 1934 unstop_all_lwps (1, event_child);
d50171e4
PA
1935 delete_lwp (event_child);
1936 return lwpid;
1937 }
1938
1939 delete_lwp (event_child);
89be2091
DJ
1940
1941 /* Wait for a more interesting event. */
1942 continue;
1943 }
1944
a6dbe5df
PA
1945 if (event_child->must_set_ptrace_flags)
1946 {
1e7fc18c 1947 linux_enable_event_reporting (lwpid_of (event_child));
a6dbe5df
PA
1948 event_child->must_set_ptrace_flags = 0;
1949 }
1950
bd99dc85
PA
1951 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1952 && *wstat >> 16 != 0)
24a09b5f 1953 {
bd99dc85 1954 handle_extended_wait (event_child, *wstat);
24a09b5f
DJ
1955 continue;
1956 }
1957
d50171e4
PA
1958 if (WIFSTOPPED (*wstat)
1959 && WSTOPSIG (*wstat) == SIGSTOP
1960 && event_child->stop_expected)
1961 {
1962 int should_stop;
1963
1964 if (debug_threads)
1965 fprintf (stderr, "Expected stop.\n");
1966 event_child->stop_expected = 0;
1967
8336d594 1968 should_stop = (current_inferior->last_resume_kind == resume_stop
d50171e4
PA
1969 || stopping_threads);
1970
1971 if (!should_stop)
1972 {
1973 linux_resume_one_lwp (event_child,
1974 event_child->stepping, 0, NULL);
1975 continue;
1976 }
1977 }
1978
bd99dc85 1979 return lwpid_of (event_child);
611cb4a5 1980 }
0d62e5e8 1981
611cb4a5
DJ
1982 /* NOTREACHED */
1983 return 0;
1984}
1985
6bf5e0ba
PA
1986/* Count the LWP's that have had events. */
1987
1988static int
1989count_events_callback (struct inferior_list_entry *entry, void *data)
1990{
1991 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1992 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1993 int *count = data;
1994
1995 gdb_assert (count != NULL);
1996
1997 /* Count only resumed LWPs that have a SIGTRAP event pending that
1998 should be reported to GDB. */
8336d594
PA
1999 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2000 && thread->last_resume_kind != resume_stop
6bf5e0ba
PA
2001 && lp->status_pending_p
2002 && WIFSTOPPED (lp->status_pending)
2003 && WSTOPSIG (lp->status_pending) == SIGTRAP
2004 && !breakpoint_inserted_here (lp->stop_pc))
2005 (*count)++;
2006
2007 return 0;
2008}
2009
2010/* Select the LWP (if any) that is currently being single-stepped. */
2011
2012static int
2013select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2014{
2015 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 2016 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba 2017
8336d594
PA
2018 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2019 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
2020 && lp->status_pending_p)
2021 return 1;
2022 else
2023 return 0;
2024}
2025
2026/* Select the Nth LWP that has had a SIGTRAP event that should be
2027 reported to GDB. */
2028
2029static int
2030select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2031{
2032 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 2033 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
2034 int *selector = data;
2035
2036 gdb_assert (selector != NULL);
2037
2038 /* Select only resumed LWPs that have a SIGTRAP event pending. */
8336d594
PA
2039 if (thread->last_resume_kind != resume_stop
2040 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
2041 && lp->status_pending_p
2042 && WIFSTOPPED (lp->status_pending)
2043 && WSTOPSIG (lp->status_pending) == SIGTRAP
2044 && !breakpoint_inserted_here (lp->stop_pc))
2045 if ((*selector)-- == 0)
2046 return 1;
2047
2048 return 0;
2049}
2050
2051static int
2052cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2053{
2054 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 2055 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
2056 struct lwp_info *event_lp = data;
2057
2058 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2059 if (lp == event_lp)
2060 return 0;
2061
2062 /* If a LWP other than the LWP that we're reporting an event for has
2063 hit a GDB breakpoint (as opposed to some random trap signal),
2064 then just arrange for it to hit it again later. We don't keep
2065 the SIGTRAP status and don't forward the SIGTRAP signal to the
2066 LWP. We will handle the current event, eventually we will resume
2067 all LWPs, and this one will get its breakpoint trap again.
2068
2069 If we do not do this, then we run the risk that the user will
2070 delete or disable the breakpoint, but the LWP will have already
2071 tripped on it. */
2072
8336d594
PA
2073 if (thread->last_resume_kind != resume_stop
2074 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
2075 && lp->status_pending_p
2076 && WIFSTOPPED (lp->status_pending)
2077 && WSTOPSIG (lp->status_pending) == SIGTRAP
bdabb078
PA
2078 && !lp->stepping
2079 && !lp->stopped_by_watchpoint
6bf5e0ba
PA
2080 && cancel_breakpoint (lp))
2081 /* Throw away the SIGTRAP. */
2082 lp->status_pending_p = 0;
2083
2084 return 0;
2085}
2086
7984d532
PA
2087static void
2088linux_cancel_breakpoints (void)
2089{
2090 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2091}
2092
6bf5e0ba
PA
2093/* Select one LWP out of those that have events pending. */
2094
2095static void
2096select_event_lwp (struct lwp_info **orig_lp)
2097{
2098 int num_events = 0;
2099 int random_selector;
2100 struct lwp_info *event_lp;
2101
2102 /* Give preference to any LWP that is being single-stepped. */
2103 event_lp
2104 = (struct lwp_info *) find_inferior (&all_lwps,
2105 select_singlestep_lwp_callback, NULL);
2106 if (event_lp != NULL)
2107 {
2108 if (debug_threads)
2109 fprintf (stderr,
2110 "SEL: Select single-step %s\n",
2111 target_pid_to_str (ptid_of (event_lp)));
2112 }
2113 else
2114 {
2115 /* No single-stepping LWP. Select one at random, out of those
2116 which have had SIGTRAP events. */
2117
2118 /* First see how many SIGTRAP events we have. */
2119 find_inferior (&all_lwps, count_events_callback, &num_events);
2120
2121 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2122 random_selector = (int)
2123 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2124
2125 if (debug_threads && num_events > 1)
2126 fprintf (stderr,
2127 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2128 num_events, random_selector);
2129
2130 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2131 select_event_lwp_callback,
2132 &random_selector);
2133 }
2134
2135 if (event_lp != NULL)
2136 {
2137 /* Switch the event LWP. */
2138 *orig_lp = event_lp;
2139 }
2140}
2141
7984d532
PA
2142/* Decrement the suspend count of an LWP. */
2143
2144static int
2145unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2146{
2147 struct lwp_info *lwp = (struct lwp_info *) entry;
2148
2149 /* Ignore EXCEPT. */
2150 if (lwp == except)
2151 return 0;
2152
2153 lwp->suspended--;
2154
2155 gdb_assert (lwp->suspended >= 0);
2156 return 0;
2157}
2158
2159/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2160 NULL. */
2161
2162static void
2163unsuspend_all_lwps (struct lwp_info *except)
2164{
2165 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2166}
2167
fa593d66
PA
2168static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2169static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2170 void *data);
2171static int lwp_running (struct inferior_list_entry *entry, void *data);
2172static ptid_t linux_wait_1 (ptid_t ptid,
2173 struct target_waitstatus *ourstatus,
2174 int target_options);
2175
2176/* Stabilize threads (move out of jump pads).
2177
2178 If a thread is midway collecting a fast tracepoint, we need to
2179 finish the collection and move it out of the jump pad before
2180 reporting the signal.
2181
2182 This avoids recursion while collecting (when a signal arrives
2183 midway, and the signal handler itself collects), which would trash
2184 the trace buffer. In case the user set a breakpoint in a signal
2185 handler, this avoids the backtrace showing the jump pad, etc..
2186 Most importantly, there are certain things we can't do safely if
2187 threads are stopped in a jump pad (or in its callee's). For
2188 example:
2189
2190 - starting a new trace run. A thread still collecting the
2191 previous run, could trash the trace buffer when resumed. The trace
2192 buffer control structures would have been reset but the thread had
2193 no way to tell. The thread could even midway memcpy'ing to the
2194 buffer, which would mean that when resumed, it would clobber the
2195 trace buffer that had been set for a new run.
2196
2197 - we can't rewrite/reuse the jump pads for new tracepoints
2198 safely. Say you do tstart while a thread is stopped midway while
2199 collecting. When the thread is later resumed, it finishes the
2200 collection, and returns to the jump pad, to execute the original
2201 instruction that was under the tracepoint jump at the time the
2202 older run had been started. If the jump pad had been rewritten
2203 since for something else in the new run, the thread would now
2204 execute the wrong / random instructions. */
2205
2206static void
2207linux_stabilize_threads (void)
2208{
2209 struct thread_info *save_inferior;
2210 struct lwp_info *lwp_stuck;
2211
2212 lwp_stuck
2213 = (struct lwp_info *) find_inferior (&all_lwps,
2214 stuck_in_jump_pad_callback, NULL);
2215 if (lwp_stuck != NULL)
2216 {
b4d51a55
PA
2217 if (debug_threads)
2218 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2219 lwpid_of (lwp_stuck));
fa593d66
PA
2220 return;
2221 }
2222
2223 save_inferior = current_inferior;
2224
2225 stabilizing_threads = 1;
2226
2227 /* Kick 'em all. */
2228 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2229
2230 /* Loop until all are stopped out of the jump pads. */
2231 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2232 {
2233 struct target_waitstatus ourstatus;
2234 struct lwp_info *lwp;
fa593d66
PA
2235 int wstat;
2236
2237 /* Note that we go through the full wait even loop. While
2238 moving threads out of jump pad, we need to be able to step
2239 over internal breakpoints and such. */
32fcada3 2240 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2241
2242 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2243 {
2244 lwp = get_thread_lwp (current_inferior);
2245
2246 /* Lock it. */
2247 lwp->suspended++;
2248
2249 if (ourstatus.value.sig != TARGET_SIGNAL_0
2250 || current_inferior->last_resume_kind == resume_stop)
2251 {
2252 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
2253 enqueue_one_deferred_signal (lwp, &wstat);
2254 }
2255 }
2256 }
2257
2258 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2259
2260 stabilizing_threads = 0;
2261
2262 current_inferior = save_inferior;
2263
b4d51a55 2264 if (debug_threads)
fa593d66 2265 {
b4d51a55
PA
2266 lwp_stuck
2267 = (struct lwp_info *) find_inferior (&all_lwps,
2268 stuck_in_jump_pad_callback, NULL);
2269 if (lwp_stuck != NULL)
fa593d66
PA
2270 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2271 lwpid_of (lwp_stuck));
2272 }
2273}
2274
0d62e5e8 2275/* Wait for process, returns status. */
da6d8c04 2276
95954743
PA
2277static ptid_t
2278linux_wait_1 (ptid_t ptid,
2279 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2280{
e5f1222d 2281 int w;
fc7238bb 2282 struct lwp_info *event_child;
bd99dc85 2283 int options;
bd99dc85 2284 int pid;
6bf5e0ba
PA
2285 int step_over_finished;
2286 int bp_explains_trap;
2287 int maybe_internal_trap;
2288 int report_to_gdb;
219f2f23 2289 int trace_event;
bd99dc85
PA
2290
2291 /* Translate generic target options into linux options. */
2292 options = __WALL;
2293 if (target_options & TARGET_WNOHANG)
2294 options |= WNOHANG;
0d62e5e8
DJ
2295
2296retry:
fa593d66
PA
2297 bp_explains_trap = 0;
2298 trace_event = 0;
bd99dc85
PA
2299 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2300
0d62e5e8
DJ
2301 /* If we were only supposed to resume one thread, only wait for
2302 that thread - if it's still alive. If it died, however - which
2303 can happen if we're coming from the thread death case below -
2304 then we need to make sure we restart the other threads. We could
2305 pick a thread at random or restart all; restarting all is less
2306 arbitrary. */
95954743
PA
2307 if (!non_stop
2308 && !ptid_equal (cont_thread, null_ptid)
2309 && !ptid_equal (cont_thread, minus_one_ptid))
0d62e5e8 2310 {
fc7238bb
PA
2311 struct thread_info *thread;
2312
bd99dc85
PA
2313 thread = (struct thread_info *) find_inferior_id (&all_threads,
2314 cont_thread);
0d62e5e8
DJ
2315
2316 /* No stepping, no signal - unless one is pending already, of course. */
bd99dc85 2317 if (thread == NULL)
64386c31
DJ
2318 {
2319 struct thread_resume resume_info;
95954743 2320 resume_info.thread = minus_one_ptid;
bd99dc85
PA
2321 resume_info.kind = resume_continue;
2322 resume_info.sig = 0;
2bd7c093 2323 linux_resume (&resume_info, 1);
64386c31 2324 }
bd99dc85 2325 else
95954743 2326 ptid = cont_thread;
0d62e5e8 2327 }
da6d8c04 2328
6bf5e0ba
PA
2329 if (ptid_equal (step_over_bkpt, null_ptid))
2330 pid = linux_wait_for_event (ptid, &w, options);
2331 else
2332 {
2333 if (debug_threads)
2334 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2335 target_pid_to_str (step_over_bkpt));
2336 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2337 }
2338
bd99dc85 2339 if (pid == 0) /* only if TARGET_WNOHANG */
95954743 2340 return null_ptid;
bd99dc85 2341
6bf5e0ba 2342 event_child = get_thread_lwp (current_inferior);
da6d8c04 2343
0d62e5e8
DJ
2344 /* If we are waiting for a particular child, and it exited,
2345 linux_wait_for_event will return its exit status. Similarly if
2346 the last child exited. If this is not the last child, however,
2347 do not report it as exited until there is a 'thread exited' response
2348 available in the remote protocol. Instead, just wait for another event.
2349 This should be safe, because if the thread crashed we will already
2350 have reported the termination signal to GDB; that should stop any
2351 in-progress stepping operations, etc.
2352
2353 Report the exit status of the last thread to exit. This matches
2354 LinuxThreads' behavior. */
2355
95954743 2356 if (last_thread_of_process_p (current_inferior))
da6d8c04 2357 {
bd99dc85 2358 if (WIFEXITED (w) || WIFSIGNALED (w))
0d62e5e8 2359 {
bd99dc85
PA
2360 if (WIFEXITED (w))
2361 {
2362 ourstatus->kind = TARGET_WAITKIND_EXITED;
2363 ourstatus->value.integer = WEXITSTATUS (w);
2364
2365 if (debug_threads)
493e2a69
MS
2366 fprintf (stderr,
2367 "\nChild exited with retcode = %x \n",
2368 WEXITSTATUS (w));
bd99dc85
PA
2369 }
2370 else
2371 {
2372 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2373 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2374
2375 if (debug_threads)
493e2a69
MS
2376 fprintf (stderr,
2377 "\nChild terminated with signal = %x \n",
2378 WTERMSIG (w));
bd99dc85
PA
2379
2380 }
5b1c542e 2381
3e4c1235 2382 return ptid_of (event_child);
0d62e5e8 2383 }
da6d8c04 2384 }
0d62e5e8 2385 else
da6d8c04 2386 {
0d62e5e8
DJ
2387 if (!WIFSTOPPED (w))
2388 goto retry;
da6d8c04
DJ
2389 }
2390
6bf5e0ba
PA
2391 /* If this event was not handled before, and is not a SIGTRAP, we
2392 report it. SIGILL and SIGSEGV are also treated as traps in case
2393 a breakpoint is inserted at the current PC. If this target does
2394 not support internal breakpoints at all, we also report the
2395 SIGTRAP without further processing; it's of no concern to us. */
2396 maybe_internal_trap
2397 = (supports_breakpoints ()
2398 && (WSTOPSIG (w) == SIGTRAP
2399 || ((WSTOPSIG (w) == SIGILL
2400 || WSTOPSIG (w) == SIGSEGV)
2401 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2402
2403 if (maybe_internal_trap)
2404 {
2405 /* Handle anything that requires bookkeeping before deciding to
2406 report the event or continue waiting. */
2407
2408 /* First check if we can explain the SIGTRAP with an internal
2409 breakpoint, or if we should possibly report the event to GDB.
2410 Do this before anything that may remove or insert a
2411 breakpoint. */
2412 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2413
2414 /* We have a SIGTRAP, possibly a step-over dance has just
2415 finished. If so, tweak the state machine accordingly,
2416 reinsert breakpoints and delete any reinsert (software
2417 single-step) breakpoints. */
2418 step_over_finished = finish_step_over (event_child);
2419
2420 /* Now invoke the callbacks of any internal breakpoints there. */
2421 check_breakpoints (event_child->stop_pc);
2422
219f2f23
PA
2423 /* Handle tracepoint data collecting. This may overflow the
2424 trace buffer, and cause a tracing stop, removing
2425 breakpoints. */
2426 trace_event = handle_tracepoints (event_child);
2427
6bf5e0ba
PA
2428 if (bp_explains_trap)
2429 {
2430 /* If we stepped or ran into an internal breakpoint, we've
2431 already handled it. So next time we resume (from this
2432 PC), we should step over it. */
2433 if (debug_threads)
2434 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2435
8b07ae33
PA
2436 if (breakpoint_here (event_child->stop_pc))
2437 event_child->need_step_over = 1;
6bf5e0ba
PA
2438 }
2439 }
2440 else
2441 {
2442 /* We have some other signal, possibly a step-over dance was in
2443 progress, and it should be cancelled too. */
2444 step_over_finished = finish_step_over (event_child);
fa593d66
PA
2445 }
2446
2447 /* We have all the data we need. Either report the event to GDB, or
2448 resume threads and keep waiting for more. */
2449
2450 /* If we're collecting a fast tracepoint, finish the collection and
2451 move out of the jump pad before delivering a signal. See
2452 linux_stabilize_threads. */
2453
2454 if (WIFSTOPPED (w)
2455 && WSTOPSIG (w) != SIGTRAP
2456 && supports_fast_tracepoints ()
58b4daa5 2457 && agent_loaded_p ())
fa593d66
PA
2458 {
2459 if (debug_threads)
2460 fprintf (stderr,
2461 "Got signal %d for LWP %ld. Check if we need "
2462 "to defer or adjust it.\n",
2463 WSTOPSIG (w), lwpid_of (event_child));
2464
2465 /* Allow debugging the jump pad itself. */
2466 if (current_inferior->last_resume_kind != resume_step
2467 && maybe_move_out_of_jump_pad (event_child, &w))
2468 {
2469 enqueue_one_deferred_signal (event_child, &w);
2470
2471 if (debug_threads)
2472 fprintf (stderr,
2473 "Signal %d for LWP %ld deferred (in jump pad)\n",
2474 WSTOPSIG (w), lwpid_of (event_child));
2475
2476 linux_resume_one_lwp (event_child, 0, 0, NULL);
2477 goto retry;
2478 }
2479 }
219f2f23 2480
fa593d66
PA
2481 if (event_child->collecting_fast_tracepoint)
2482 {
2483 if (debug_threads)
2484 fprintf (stderr, "\
2485LWP %ld was trying to move out of the jump pad (%d). \
2486Check if we're already there.\n",
2487 lwpid_of (event_child),
2488 event_child->collecting_fast_tracepoint);
2489
2490 trace_event = 1;
2491
2492 event_child->collecting_fast_tracepoint
2493 = linux_fast_tracepoint_collecting (event_child, NULL);
2494
2495 if (event_child->collecting_fast_tracepoint != 1)
2496 {
2497 /* No longer need this breakpoint. */
2498 if (event_child->exit_jump_pad_bkpt != NULL)
2499 {
2500 if (debug_threads)
2501 fprintf (stderr,
2502 "No longer need exit-jump-pad bkpt; removing it."
2503 "stopping all threads momentarily.\n");
2504
2505 /* Other running threads could hit this breakpoint.
2506 We don't handle moribund locations like GDB does,
2507 instead we always pause all threads when removing
2508 breakpoints, so that any step-over or
2509 decr_pc_after_break adjustment is always taken
2510 care of while the breakpoint is still
2511 inserted. */
2512 stop_all_lwps (1, event_child);
2513 cancel_breakpoints ();
2514
2515 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2516 event_child->exit_jump_pad_bkpt = NULL;
2517
2518 unstop_all_lwps (1, event_child);
2519
2520 gdb_assert (event_child->suspended >= 0);
2521 }
2522 }
2523
2524 if (event_child->collecting_fast_tracepoint == 0)
2525 {
2526 if (debug_threads)
2527 fprintf (stderr,
2528 "fast tracepoint finished "
2529 "collecting successfully.\n");
2530
2531 /* We may have a deferred signal to report. */
2532 if (dequeue_one_deferred_signal (event_child, &w))
2533 {
2534 if (debug_threads)
2535 fprintf (stderr, "dequeued one signal.\n");
2536 }
3c11dd79 2537 else
fa593d66 2538 {
3c11dd79
PA
2539 if (debug_threads)
2540 fprintf (stderr, "no deferred signals.\n");
fa593d66
PA
2541
2542 if (stabilizing_threads)
2543 {
2544 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2545 ourstatus->value.sig = TARGET_SIGNAL_0;
2546 return ptid_of (event_child);
2547 }
2548 }
2549 }
6bf5e0ba
PA
2550 }
2551
e471f25b
PA
2552 /* Check whether GDB would be interested in this event. */
2553
2554 /* If GDB is not interested in this signal, don't stop other
2555 threads, and don't report it to GDB. Just resume the inferior
2556 right away. We do this for threading-related signals as well as
2557 any that GDB specifically requested we ignore. But never ignore
2558 SIGSTOP if we sent it ourselves, and do not ignore signals when
2559 stepping - they may require special handling to skip the signal
2560 handler. */
2561 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2562 thread library? */
2563 if (WIFSTOPPED (w)
2564 && current_inferior->last_resume_kind != resume_step
2565 && (
1a981360 2566#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
e471f25b
PA
2567 (current_process ()->private->thread_db != NULL
2568 && (WSTOPSIG (w) == __SIGRTMIN
2569 || WSTOPSIG (w) == __SIGRTMIN + 1))
2570 ||
2571#endif
2572 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2573 && !(WSTOPSIG (w) == SIGSTOP
2574 && current_inferior->last_resume_kind == resume_stop))))
2575 {
2576 siginfo_t info, *info_p;
2577
2578 if (debug_threads)
2579 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2580 WSTOPSIG (w), lwpid_of (event_child));
2581
2582 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2583 info_p = &info;
2584 else
2585 info_p = NULL;
2586 linux_resume_one_lwp (event_child, event_child->stepping,
2587 WSTOPSIG (w), info_p);
2588 goto retry;
2589 }
2590
2591 /* If GDB wanted this thread to single step, we always want to
2592 report the SIGTRAP, and let GDB handle it. Watchpoints should
2593 always be reported. So should signals we can't explain. A
2594 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2595 not support Z0 breakpoints. If we do, we're be able to handle
2596 GDB breakpoints on top of internal breakpoints, by handling the
2597 internal breakpoint and still reporting the event to GDB. If we
2598 don't, we're out of luck, GDB won't see the breakpoint hit. */
6bf5e0ba 2599 report_to_gdb = (!maybe_internal_trap
8336d594 2600 || current_inferior->last_resume_kind == resume_step
6bf5e0ba 2601 || event_child->stopped_by_watchpoint
493e2a69
MS
2602 || (!step_over_finished
2603 && !bp_explains_trap && !trace_event)
9f3a5c85
LM
2604 || (gdb_breakpoint_here (event_child->stop_pc)
2605 && gdb_condition_true_at_breakpoint (event_child->stop_pc)));
6bf5e0ba
PA
2606
2607 /* We found no reason GDB would want us to stop. We either hit one
2608 of our own breakpoints, or finished an internal step GDB
2609 shouldn't know about. */
2610 if (!report_to_gdb)
2611 {
2612 if (debug_threads)
2613 {
2614 if (bp_explains_trap)
2615 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2616 if (step_over_finished)
2617 fprintf (stderr, "Step-over finished.\n");
219f2f23
PA
2618 if (trace_event)
2619 fprintf (stderr, "Tracepoint event.\n");
6bf5e0ba
PA
2620 }
2621
2622 /* We're not reporting this breakpoint to GDB, so apply the
2623 decr_pc_after_break adjustment to the inferior's regcache
2624 ourselves. */
2625
2626 if (the_low_target.set_pc != NULL)
2627 {
2628 struct regcache *regcache
2629 = get_thread_regcache (get_lwp_thread (event_child), 1);
2630 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2631 }
2632
7984d532
PA
2633 /* We may have finished stepping over a breakpoint. If so,
2634 we've stopped and suspended all LWPs momentarily except the
2635 stepping one. This is where we resume them all again. We're
2636 going to keep waiting, so use proceed, which handles stepping
2637 over the next breakpoint. */
6bf5e0ba
PA
2638 if (debug_threads)
2639 fprintf (stderr, "proceeding all threads.\n");
7984d532
PA
2640
2641 if (step_over_finished)
2642 unsuspend_all_lwps (event_child);
2643
6bf5e0ba
PA
2644 proceed_all_lwps ();
2645 goto retry;
2646 }
2647
2648 if (debug_threads)
2649 {
8336d594 2650 if (current_inferior->last_resume_kind == resume_step)
6bf5e0ba
PA
2651 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2652 if (event_child->stopped_by_watchpoint)
2653 fprintf (stderr, "Stopped by watchpoint.\n");
8b07ae33
PA
2654 if (gdb_breakpoint_here (event_child->stop_pc))
2655 fprintf (stderr, "Stopped by GDB breakpoint.\n");
6bf5e0ba
PA
2656 if (debug_threads)
2657 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2658 }
2659
2660 /* Alright, we're going to report a stop. */
2661
fa593d66 2662 if (!non_stop && !stabilizing_threads)
6bf5e0ba
PA
2663 {
2664 /* In all-stop, stop all threads. */
7984d532 2665 stop_all_lwps (0, NULL);
6bf5e0ba
PA
2666
2667 /* If we're not waiting for a specific LWP, choose an event LWP
2668 from among those that have had events. Giving equal priority
2669 to all LWPs that have had events helps prevent
2670 starvation. */
2671 if (ptid_equal (ptid, minus_one_ptid))
2672 {
2673 event_child->status_pending_p = 1;
2674 event_child->status_pending = w;
2675
2676 select_event_lwp (&event_child);
2677
2678 event_child->status_pending_p = 0;
2679 w = event_child->status_pending;
2680 }
2681
2682 /* Now that we've selected our final event LWP, cancel any
2683 breakpoints in other LWPs that have hit a GDB breakpoint.
2684 See the comment in cancel_breakpoints_callback to find out
2685 why. */
2686 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
fa593d66 2687
c03e6ccc
YQ
2688 /* If we were going a step-over, all other threads but the stepping one
2689 had been paused in start_step_over, with their suspend counts
2690 incremented. We don't want to do a full unstop/unpause, because we're
2691 in all-stop mode (so we want threads stopped), but we still need to
2692 unsuspend the other threads, to decrement their `suspended' count
2693 back. */
2694 if (step_over_finished)
2695 unsuspend_all_lwps (event_child);
2696
fa593d66
PA
2697 /* Stabilize threads (move out of jump pads). */
2698 stabilize_threads ();
6bf5e0ba
PA
2699 }
2700 else
2701 {
2702 /* If we just finished a step-over, then all threads had been
2703 momentarily paused. In all-stop, that's fine, we want
2704 threads stopped by now anyway. In non-stop, we need to
2705 re-resume threads that GDB wanted to be running. */
2706 if (step_over_finished)
7984d532 2707 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
2708 }
2709
5b1c542e 2710 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 2711
8336d594
PA
2712 if (current_inferior->last_resume_kind == resume_stop
2713 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
2714 {
2715 /* A thread that has been requested to stop by GDB with vCont;t,
2716 and it stopped cleanly, so report as SIG0. The use of
2717 SIGSTOP is an implementation detail. */
2718 ourstatus->value.sig = TARGET_SIGNAL_0;
2719 }
8336d594
PA
2720 else if (current_inferior->last_resume_kind == resume_stop
2721 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
2722 {
2723 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 2724 but, it stopped for other reasons. */
bd99dc85
PA
2725 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2726 }
2727 else
2728 {
2729 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2730 }
2731
d50171e4
PA
2732 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2733
bd99dc85 2734 if (debug_threads)
95954743 2735 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
6bf5e0ba 2736 target_pid_to_str (ptid_of (event_child)),
bd99dc85
PA
2737 ourstatus->kind,
2738 ourstatus->value.sig);
2739
6bf5e0ba 2740 return ptid_of (event_child);
bd99dc85
PA
2741}
2742
2743/* Get rid of any pending event in the pipe. */
2744static void
2745async_file_flush (void)
2746{
2747 int ret;
2748 char buf;
2749
2750 do
2751 ret = read (linux_event_pipe[0], &buf, 1);
2752 while (ret >= 0 || (ret == -1 && errno == EINTR));
2753}
2754
2755/* Put something in the pipe, so the event loop wakes up. */
2756static void
2757async_file_mark (void)
2758{
2759 int ret;
2760
2761 async_file_flush ();
2762
2763 do
2764 ret = write (linux_event_pipe[1], "+", 1);
2765 while (ret == 0 || (ret == -1 && errno == EINTR));
2766
2767 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2768 be awakened anyway. */
2769}
2770
95954743
PA
2771static ptid_t
2772linux_wait (ptid_t ptid,
2773 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 2774{
95954743 2775 ptid_t event_ptid;
bd99dc85
PA
2776
2777 if (debug_threads)
95954743 2778 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
bd99dc85
PA
2779
2780 /* Flush the async file first. */
2781 if (target_is_async_p ())
2782 async_file_flush ();
2783
95954743 2784 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
bd99dc85
PA
2785
2786 /* If at least one stop was reported, there may be more. A single
2787 SIGCHLD can signal more than one child stop. */
2788 if (target_is_async_p ()
2789 && (target_options & TARGET_WNOHANG) != 0
95954743 2790 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
2791 async_file_mark ();
2792
2793 return event_ptid;
da6d8c04
DJ
2794}
2795
c5f62d5f 2796/* Send a signal to an LWP. */
fd500816
DJ
2797
2798static int
a1928bad 2799kill_lwp (unsigned long lwpid, int signo)
fd500816 2800{
c5f62d5f
DE
2801 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2802 fails, then we are not using nptl threads and we should be using kill. */
fd500816 2803
c5f62d5f
DE
2804#ifdef __NR_tkill
2805 {
2806 static int tkill_failed;
fd500816 2807
c5f62d5f
DE
2808 if (!tkill_failed)
2809 {
2810 int ret;
2811
2812 errno = 0;
2813 ret = syscall (__NR_tkill, lwpid, signo);
2814 if (errno != ENOSYS)
2815 return ret;
2816 tkill_failed = 1;
2817 }
2818 }
fd500816
DJ
2819#endif
2820
2821 return kill (lwpid, signo);
2822}
2823
964e4306
PA
2824void
2825linux_stop_lwp (struct lwp_info *lwp)
2826{
2827 send_sigstop (lwp);
2828}
2829
0d62e5e8 2830static void
02fc4de7 2831send_sigstop (struct lwp_info *lwp)
0d62e5e8 2832{
bd99dc85 2833 int pid;
0d62e5e8 2834
bd99dc85
PA
2835 pid = lwpid_of (lwp);
2836
0d62e5e8
DJ
2837 /* If we already have a pending stop signal for this process, don't
2838 send another. */
54a0b537 2839 if (lwp->stop_expected)
0d62e5e8 2840 {
ae13219e 2841 if (debug_threads)
bd99dc85 2842 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
ae13219e 2843
0d62e5e8
DJ
2844 return;
2845 }
2846
2847 if (debug_threads)
bd99dc85 2848 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
0d62e5e8 2849
d50171e4 2850 lwp->stop_expected = 1;
bd99dc85 2851 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
2852}
2853
7984d532
PA
2854static int
2855send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7
PA
2856{
2857 struct lwp_info *lwp = (struct lwp_info *) entry;
2858
7984d532
PA
2859 /* Ignore EXCEPT. */
2860 if (lwp == except)
2861 return 0;
2862
02fc4de7 2863 if (lwp->stopped)
7984d532 2864 return 0;
02fc4de7
PA
2865
2866 send_sigstop (lwp);
7984d532
PA
2867 return 0;
2868}
2869
2870/* Increment the suspend count of an LWP, and stop it, if not stopped
2871 yet. */
2872static int
2873suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2874 void *except)
2875{
2876 struct lwp_info *lwp = (struct lwp_info *) entry;
2877
2878 /* Ignore EXCEPT. */
2879 if (lwp == except)
2880 return 0;
2881
2882 lwp->suspended++;
2883
2884 return send_sigstop_callback (entry, except);
02fc4de7
PA
2885}
2886
95954743
PA
2887static void
2888mark_lwp_dead (struct lwp_info *lwp, int wstat)
2889{
2890 /* It's dead, really. */
2891 lwp->dead = 1;
2892
2893 /* Store the exit status for later. */
2894 lwp->status_pending_p = 1;
2895 lwp->status_pending = wstat;
2896
95954743
PA
2897 /* Prevent trying to stop it. */
2898 lwp->stopped = 1;
2899
2900 /* No further stops are expected from a dead lwp. */
2901 lwp->stop_expected = 0;
2902}
2903
0d62e5e8
DJ
2904static void
2905wait_for_sigstop (struct inferior_list_entry *entry)
2906{
54a0b537 2907 struct lwp_info *lwp = (struct lwp_info *) entry;
bd99dc85 2908 struct thread_info *saved_inferior;
a1928bad 2909 int wstat;
95954743
PA
2910 ptid_t saved_tid;
2911 ptid_t ptid;
d50171e4 2912 int pid;
0d62e5e8 2913
54a0b537 2914 if (lwp->stopped)
d50171e4
PA
2915 {
2916 if (debug_threads)
2917 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2918 lwpid_of (lwp));
2919 return;
2920 }
0d62e5e8
DJ
2921
2922 saved_inferior = current_inferior;
bd99dc85
PA
2923 if (saved_inferior != NULL)
2924 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2925 else
95954743 2926 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 2927
95954743 2928 ptid = lwp->head.id;
bd99dc85 2929
d50171e4
PA
2930 if (debug_threads)
2931 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2932
2933 pid = linux_wait_for_event (ptid, &wstat, __WALL);
0d62e5e8
DJ
2934
2935 /* If we stopped with a non-SIGSTOP signal, save it for later
2936 and record the pending SIGSTOP. If the process exited, just
2937 return. */
d50171e4 2938 if (WIFSTOPPED (wstat))
0d62e5e8
DJ
2939 {
2940 if (debug_threads)
d50171e4
PA
2941 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2942 lwpid_of (lwp), WSTOPSIG (wstat));
c35fafde 2943
d50171e4 2944 if (WSTOPSIG (wstat) != SIGSTOP)
c35fafde
PA
2945 {
2946 if (debug_threads)
d50171e4
PA
2947 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2948 lwpid_of (lwp), wstat);
2949
c35fafde
PA
2950 lwp->status_pending_p = 1;
2951 lwp->status_pending = wstat;
2952 }
0d62e5e8 2953 }
d50171e4 2954 else
95954743
PA
2955 {
2956 if (debug_threads)
d50171e4 2957 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
95954743 2958
d50171e4
PA
2959 lwp = find_lwp_pid (pid_to_ptid (pid));
2960 if (lwp)
2961 {
2962 /* Leave this status pending for the next time we're able to
2963 report it. In the mean time, we'll report this lwp as
2964 dead to GDB, so GDB doesn't try to read registers and
2965 memory from it. This can only happen if this was the
2966 last thread of the process; otherwise, PID is removed
2967 from the thread tables before linux_wait_for_event
2968 returns. */
2969 mark_lwp_dead (lwp, wstat);
2970 }
95954743 2971 }
0d62e5e8 2972
bd99dc85 2973 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
0d62e5e8
DJ
2974 current_inferior = saved_inferior;
2975 else
2976 {
2977 if (debug_threads)
2978 fprintf (stderr, "Previously current thread died.\n");
2979
bd99dc85
PA
2980 if (non_stop)
2981 {
2982 /* We can't change the current inferior behind GDB's back,
2983 otherwise, a subsequent command may apply to the wrong
2984 process. */
2985 current_inferior = NULL;
2986 }
2987 else
2988 {
2989 /* Set a valid thread as current. */
2990 set_desired_inferior (0);
2991 }
0d62e5e8
DJ
2992 }
2993}
2994
fa593d66
PA
2995/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2996 move it out, because we need to report the stop event to GDB. For
2997 example, if the user puts a breakpoint in the jump pad, it's
2998 because she wants to debug it. */
2999
3000static int
3001stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3002{
3003 struct lwp_info *lwp = (struct lwp_info *) entry;
3004 struct thread_info *thread = get_lwp_thread (lwp);
3005
3006 gdb_assert (lwp->suspended == 0);
3007 gdb_assert (lwp->stopped);
3008
3009 /* Allow debugging the jump pad, gdb_collect, etc.. */
3010 return (supports_fast_tracepoints ()
58b4daa5 3011 && agent_loaded_p ()
fa593d66
PA
3012 && (gdb_breakpoint_here (lwp->stop_pc)
3013 || lwp->stopped_by_watchpoint
3014 || thread->last_resume_kind == resume_step)
3015 && linux_fast_tracepoint_collecting (lwp, NULL));
3016}
3017
3018static void
3019move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3020{
3021 struct lwp_info *lwp = (struct lwp_info *) entry;
3022 struct thread_info *thread = get_lwp_thread (lwp);
3023 int *wstat;
3024
3025 gdb_assert (lwp->suspended == 0);
3026 gdb_assert (lwp->stopped);
3027
3028 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3029
3030 /* Allow debugging the jump pad, gdb_collect, etc. */
3031 if (!gdb_breakpoint_here (lwp->stop_pc)
3032 && !lwp->stopped_by_watchpoint
3033 && thread->last_resume_kind != resume_step
3034 && maybe_move_out_of_jump_pad (lwp, wstat))
3035 {
3036 if (debug_threads)
3037 fprintf (stderr,
3038 "LWP %ld needs stabilizing (in jump pad)\n",
3039 lwpid_of (lwp));
3040
3041 if (wstat)
3042 {
3043 lwp->status_pending_p = 0;
3044 enqueue_one_deferred_signal (lwp, wstat);
3045
3046 if (debug_threads)
3047 fprintf (stderr,
3048 "Signal %d for LWP %ld deferred "
3049 "(in jump pad)\n",
3050 WSTOPSIG (*wstat), lwpid_of (lwp));
3051 }
3052
3053 linux_resume_one_lwp (lwp, 0, 0, NULL);
3054 }
3055 else
3056 lwp->suspended++;
3057}
3058
3059static int
3060lwp_running (struct inferior_list_entry *entry, void *data)
3061{
3062 struct lwp_info *lwp = (struct lwp_info *) entry;
3063
3064 if (lwp->dead)
3065 return 0;
3066 if (lwp->stopped)
3067 return 0;
3068 return 1;
3069}
3070
7984d532
PA
3071/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3072 If SUSPEND, then also increase the suspend count of every LWP,
3073 except EXCEPT. */
3074
0d62e5e8 3075static void
7984d532 3076stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8
DJ
3077{
3078 stopping_threads = 1;
7984d532
PA
3079
3080 if (suspend)
3081 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3082 else
3083 find_inferior (&all_lwps, send_sigstop_callback, except);
54a0b537 3084 for_each_inferior (&all_lwps, wait_for_sigstop);
0d62e5e8
DJ
3085 stopping_threads = 0;
3086}
3087
da6d8c04
DJ
3088/* Resume execution of the inferior process.
3089 If STEP is nonzero, single-step it.
3090 If SIGNAL is nonzero, give it that signal. */
3091
ce3a066d 3092static void
2acc282a 3093linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 3094 int step, int signal, siginfo_t *info)
da6d8c04 3095{
0d62e5e8 3096 struct thread_info *saved_inferior;
fa593d66 3097 int fast_tp_collecting;
0d62e5e8 3098
54a0b537 3099 if (lwp->stopped == 0)
0d62e5e8
DJ
3100 return;
3101
fa593d66
PA
3102 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3103
3104 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3105
219f2f23
PA
3106 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3107 user used the "jump" command, or "set $pc = foo"). */
3108 if (lwp->stop_pc != get_pc (lwp))
3109 {
3110 /* Collecting 'while-stepping' actions doesn't make sense
3111 anymore. */
3112 release_while_stepping_state_list (get_lwp_thread (lwp));
3113 }
3114
0d62e5e8
DJ
3115 /* If we have pending signals or status, and a new signal, enqueue the
3116 signal. Also enqueue the signal if we are waiting to reinsert a
3117 breakpoint; it will be picked up again below. */
3118 if (signal != 0
fa593d66
PA
3119 && (lwp->status_pending_p
3120 || lwp->pending_signals != NULL
3121 || lwp->bp_reinsert != 0
3122 || fast_tp_collecting))
0d62e5e8
DJ
3123 {
3124 struct pending_signals *p_sig;
bca929d3 3125 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 3126 p_sig->prev = lwp->pending_signals;
0d62e5e8 3127 p_sig->signal = signal;
32ca6d61
DJ
3128 if (info == NULL)
3129 memset (&p_sig->info, 0, sizeof (siginfo_t));
3130 else
3131 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 3132 lwp->pending_signals = p_sig;
0d62e5e8
DJ
3133 }
3134
d50171e4
PA
3135 if (lwp->status_pending_p)
3136 {
3137 if (debug_threads)
3138 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3139 " has pending status\n",
3140 lwpid_of (lwp), step ? "step" : "continue", signal,
3141 lwp->stop_expected ? "expected" : "not expected");
3142 return;
3143 }
0d62e5e8
DJ
3144
3145 saved_inferior = current_inferior;
54a0b537 3146 current_inferior = get_lwp_thread (lwp);
0d62e5e8
DJ
3147
3148 if (debug_threads)
1b3f6016 3149 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
bd99dc85 3150 lwpid_of (lwp), step ? "step" : "continue", signal,
54a0b537 3151 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
3152
3153 /* This bit needs some thinking about. If we get a signal that
3154 we must report while a single-step reinsert is still pending,
3155 we often end up resuming the thread. It might be better to
3156 (ew) allow a stack of pending events; then we could be sure that
3157 the reinsert happened right away and not lose any signals.
3158
3159 Making this stack would also shrink the window in which breakpoints are
54a0b537 3160 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
3161 complete correctness, so it won't solve that problem. It may be
3162 worthwhile just to solve this one, however. */
54a0b537 3163 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
3164 {
3165 if (debug_threads)
d50171e4
PA
3166 fprintf (stderr, " pending reinsert at 0x%s\n",
3167 paddress (lwp->bp_reinsert));
3168
3169 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
3170 {
fa593d66
PA
3171 if (fast_tp_collecting == 0)
3172 {
3173 if (step == 0)
3174 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3175 if (lwp->suspended)
3176 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3177 lwp->suspended);
3178 }
d50171e4
PA
3179
3180 step = 1;
3181 }
0d62e5e8
DJ
3182
3183 /* Postpone any pending signal. It was enqueued above. */
3184 signal = 0;
3185 }
3186
fa593d66
PA
3187 if (fast_tp_collecting == 1)
3188 {
3189 if (debug_threads)
3190 fprintf (stderr, "\
3191lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3192 lwpid_of (lwp));
3193
3194 /* Postpone any pending signal. It was enqueued above. */
3195 signal = 0;
3196 }
3197 else if (fast_tp_collecting == 2)
3198 {
3199 if (debug_threads)
3200 fprintf (stderr, "\
3201lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3202 lwpid_of (lwp));
3203
3204 if (can_hardware_single_step ())
3205 step = 1;
3206 else
3207 fatal ("moving out of jump pad single-stepping"
3208 " not implemented on this target");
3209
3210 /* Postpone any pending signal. It was enqueued above. */
3211 signal = 0;
3212 }
3213
219f2f23
PA
3214 /* If we have while-stepping actions in this thread set it stepping.
3215 If we have a signal to deliver, it may or may not be set to
3216 SIG_IGN, we don't know. Assume so, and allow collecting
3217 while-stepping into a signal handler. A possible smart thing to
3218 do would be to set an internal breakpoint at the signal return
3219 address, continue, and carry on catching this while-stepping
3220 action only when that breakpoint is hit. A future
3221 enhancement. */
3222 if (get_lwp_thread (lwp)->while_stepping != NULL
3223 && can_hardware_single_step ())
3224 {
3225 if (debug_threads)
3226 fprintf (stderr,
3227 "lwp %ld has a while-stepping action -> forcing step.\n",
3228 lwpid_of (lwp));
3229 step = 1;
3230 }
3231
aa691b87 3232 if (debug_threads && the_low_target.get_pc != NULL)
0d62e5e8 3233 {
442ea881
PA
3234 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3235 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
47c0c975 3236 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
0d62e5e8
DJ
3237 }
3238
fa593d66
PA
3239 /* If we have pending signals, consume one unless we are trying to
3240 reinsert a breakpoint or we're trying to finish a fast tracepoint
3241 collect. */
3242 if (lwp->pending_signals != NULL
3243 && lwp->bp_reinsert == 0
3244 && fast_tp_collecting == 0)
0d62e5e8
DJ
3245 {
3246 struct pending_signals **p_sig;
3247
54a0b537 3248 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
3249 while ((*p_sig)->prev != NULL)
3250 p_sig = &(*p_sig)->prev;
3251
3252 signal = (*p_sig)->signal;
32ca6d61 3253 if ((*p_sig)->info.si_signo != 0)
bd99dc85 3254 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
32ca6d61 3255
0d62e5e8
DJ
3256 free (*p_sig);
3257 *p_sig = NULL;
3258 }
3259
aa5ca48f
DE
3260 if (the_low_target.prepare_to_resume != NULL)
3261 the_low_target.prepare_to_resume (lwp);
3262
0d62e5e8 3263 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 3264 get_lwp_thread (lwp));
da6d8c04 3265 errno = 0;
54a0b537 3266 lwp->stopped = 0;
c3adc08c 3267 lwp->stopped_by_watchpoint = 0;
54a0b537 3268 lwp->stepping = step;
14ce3065
DE
3269 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
3270 /* Coerce to a uintptr_t first to avoid potential gcc warning
3271 of coercing an 8 byte integer to a 4 byte pointer. */
3272 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
0d62e5e8
DJ
3273
3274 current_inferior = saved_inferior;
da6d8c04 3275 if (errno)
3221518c
UW
3276 {
3277 /* ESRCH from ptrace either means that the thread was already
3278 running (an error) or that it is gone (a race condition). If
3279 it's gone, we will get a notification the next time we wait,
3280 so we can ignore the error. We could differentiate these
3281 two, but it's tricky without waiting; the thread still exists
3282 as a zombie, so sending it signal 0 would succeed. So just
3283 ignore ESRCH. */
3284 if (errno == ESRCH)
3285 return;
3286
3287 perror_with_name ("ptrace");
3288 }
da6d8c04
DJ
3289}
3290
2bd7c093
PA
3291struct thread_resume_array
3292{
3293 struct thread_resume *resume;
3294 size_t n;
3295};
64386c31
DJ
3296
3297/* This function is called once per thread. We look up the thread
5544ad89
DJ
3298 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3299 resume request.
3300
3301 This algorithm is O(threads * resume elements), but resume elements
3302 is small (and will remain small at least until GDB supports thread
3303 suspension). */
2bd7c093
PA
3304static int
3305linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 3306{
54a0b537 3307 struct lwp_info *lwp;
64386c31 3308 struct thread_info *thread;
5544ad89 3309 int ndx;
2bd7c093 3310 struct thread_resume_array *r;
64386c31
DJ
3311
3312 thread = (struct thread_info *) entry;
54a0b537 3313 lwp = get_thread_lwp (thread);
2bd7c093 3314 r = arg;
64386c31 3315
2bd7c093 3316 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
3317 {
3318 ptid_t ptid = r->resume[ndx].thread;
3319 if (ptid_equal (ptid, minus_one_ptid)
3320 || ptid_equal (ptid, entry->id)
3321 || (ptid_is_pid (ptid)
3322 && (ptid_get_pid (ptid) == pid_of (lwp)))
3323 || (ptid_get_lwp (ptid) == -1
3324 && (ptid_get_pid (ptid) == pid_of (lwp))))
3325 {
d50171e4 3326 if (r->resume[ndx].kind == resume_stop
8336d594 3327 && thread->last_resume_kind == resume_stop)
d50171e4
PA
3328 {
3329 if (debug_threads)
3330 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3331 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3332 ? "stopped"
3333 : "stopping",
3334 lwpid_of (lwp));
3335
3336 continue;
3337 }
3338
95954743 3339 lwp->resume = &r->resume[ndx];
8336d594 3340 thread->last_resume_kind = lwp->resume->kind;
fa593d66
PA
3341
3342 /* If we had a deferred signal to report, dequeue one now.
3343 This can happen if LWP gets more than one signal while
3344 trying to get out of a jump pad. */
3345 if (lwp->stopped
3346 && !lwp->status_pending_p
3347 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3348 {
3349 lwp->status_pending_p = 1;
3350
3351 if (debug_threads)
3352 fprintf (stderr,
3353 "Dequeueing deferred signal %d for LWP %ld, "
3354 "leaving status pending.\n",
3355 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3356 }
3357
95954743
PA
3358 return 0;
3359 }
3360 }
2bd7c093
PA
3361
3362 /* No resume action for this thread. */
3363 lwp->resume = NULL;
64386c31 3364
2bd7c093 3365 return 0;
5544ad89
DJ
3366}
3367
5544ad89 3368
bd99dc85
PA
3369/* Set *FLAG_P if this lwp has an interesting status pending. */
3370static int
3371resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 3372{
bd99dc85 3373 struct lwp_info *lwp = (struct lwp_info *) entry;
5544ad89 3374
bd99dc85
PA
3375 /* LWPs which will not be resumed are not interesting, because
3376 we might not wait for them next time through linux_wait. */
2bd7c093 3377 if (lwp->resume == NULL)
bd99dc85 3378 return 0;
64386c31 3379
bd99dc85 3380 if (lwp->status_pending_p)
d50171e4
PA
3381 * (int *) flag_p = 1;
3382
3383 return 0;
3384}
3385
3386/* Return 1 if this lwp that GDB wants running is stopped at an
3387 internal breakpoint that we need to step over. It assumes that any
3388 required STOP_PC adjustment has already been propagated to the
3389 inferior's regcache. */
3390
3391static int
3392need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3393{
3394 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 3395 struct thread_info *thread;
d50171e4
PA
3396 struct thread_info *saved_inferior;
3397 CORE_ADDR pc;
3398
3399 /* LWPs which will not be resumed are not interesting, because we
3400 might not wait for them next time through linux_wait. */
3401
3402 if (!lwp->stopped)
3403 {
3404 if (debug_threads)
3405 fprintf (stderr,
3406 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3407 lwpid_of (lwp));
3408 return 0;
3409 }
3410
8336d594
PA
3411 thread = get_lwp_thread (lwp);
3412
3413 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
3414 {
3415 if (debug_threads)
3416 fprintf (stderr,
3417 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3418 lwpid_of (lwp));
3419 return 0;
3420 }
3421
7984d532
PA
3422 gdb_assert (lwp->suspended >= 0);
3423
3424 if (lwp->suspended)
3425 {
3426 if (debug_threads)
3427 fprintf (stderr,
3428 "Need step over [LWP %ld]? Ignoring, suspended\n",
3429 lwpid_of (lwp));
3430 return 0;
3431 }
3432
d50171e4
PA
3433 if (!lwp->need_step_over)
3434 {
3435 if (debug_threads)
3436 fprintf (stderr,
3437 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3438 }
5544ad89 3439
bd99dc85 3440 if (lwp->status_pending_p)
d50171e4
PA
3441 {
3442 if (debug_threads)
3443 fprintf (stderr,
3444 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3445 lwpid_of (lwp));
3446 return 0;
3447 }
3448
3449 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3450 or we have. */
3451 pc = get_pc (lwp);
3452
3453 /* If the PC has changed since we stopped, then don't do anything,
3454 and let the breakpoint/tracepoint be hit. This happens if, for
3455 instance, GDB handled the decr_pc_after_break subtraction itself,
3456 GDB is OOL stepping this thread, or the user has issued a "jump"
3457 command, or poked thread's registers herself. */
3458 if (pc != lwp->stop_pc)
3459 {
3460 if (debug_threads)
3461 fprintf (stderr,
3462 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3463 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3464 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3465
3466 lwp->need_step_over = 0;
3467 return 0;
3468 }
3469
3470 saved_inferior = current_inferior;
8336d594 3471 current_inferior = thread;
d50171e4 3472
8b07ae33 3473 /* We can only step over breakpoints we know about. */
fa593d66 3474 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 3475 {
8b07ae33 3476 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
3477 though. If the condition is being evaluated on the target's side
3478 and it evaluate to false, step over this breakpoint as well. */
3479 if (gdb_breakpoint_here (pc)
3480 && gdb_condition_true_at_breakpoint (pc))
8b07ae33
PA
3481 {
3482 if (debug_threads)
3483 fprintf (stderr,
3484 "Need step over [LWP %ld]? yes, but found"
3485 " GDB breakpoint at 0x%s; skipping step over\n",
3486 lwpid_of (lwp), paddress (pc));
d50171e4 3487
8b07ae33
PA
3488 current_inferior = saved_inferior;
3489 return 0;
3490 }
3491 else
3492 {
3493 if (debug_threads)
3494 fprintf (stderr,
493e2a69
MS
3495 "Need step over [LWP %ld]? yes, "
3496 "found breakpoint at 0x%s\n",
8b07ae33 3497 lwpid_of (lwp), paddress (pc));
d50171e4 3498
8b07ae33
PA
3499 /* We've found an lwp that needs stepping over --- return 1 so
3500 that find_inferior stops looking. */
3501 current_inferior = saved_inferior;
3502
3503 /* If the step over is cancelled, this is set again. */
3504 lwp->need_step_over = 0;
3505 return 1;
3506 }
d50171e4
PA
3507 }
3508
3509 current_inferior = saved_inferior;
3510
3511 if (debug_threads)
3512 fprintf (stderr,
3513 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3514 lwpid_of (lwp), paddress (pc));
c6ecbae5 3515
bd99dc85 3516 return 0;
5544ad89
DJ
3517}
3518
d50171e4
PA
3519/* Start a step-over operation on LWP. When LWP stopped at a
3520 breakpoint, to make progress, we need to remove the breakpoint out
3521 of the way. If we let other threads run while we do that, they may
3522 pass by the breakpoint location and miss hitting it. To avoid
3523 that, a step-over momentarily stops all threads while LWP is
3524 single-stepped while the breakpoint is temporarily uninserted from
3525 the inferior. When the single-step finishes, we reinsert the
3526 breakpoint, and let all threads that are supposed to be running,
3527 run again.
3528
3529 On targets that don't support hardware single-step, we don't
3530 currently support full software single-stepping. Instead, we only
3531 support stepping over the thread event breakpoint, by asking the
3532 low target where to place a reinsert breakpoint. Since this
3533 routine assumes the breakpoint being stepped over is a thread event
3534 breakpoint, it usually assumes the return address of the current
3535 function is a good enough place to set the reinsert breakpoint. */
3536
3537static int
3538start_step_over (struct lwp_info *lwp)
3539{
3540 struct thread_info *saved_inferior;
3541 CORE_ADDR pc;
3542 int step;
3543
3544 if (debug_threads)
3545 fprintf (stderr,
3546 "Starting step-over on LWP %ld. Stopping all threads\n",
3547 lwpid_of (lwp));
3548
7984d532
PA
3549 stop_all_lwps (1, lwp);
3550 gdb_assert (lwp->suspended == 0);
d50171e4
PA
3551
3552 if (debug_threads)
3553 fprintf (stderr, "Done stopping all threads for step-over.\n");
3554
3555 /* Note, we should always reach here with an already adjusted PC,
3556 either by GDB (if we're resuming due to GDB's request), or by our
3557 caller, if we just finished handling an internal breakpoint GDB
3558 shouldn't care about. */
3559 pc = get_pc (lwp);
3560
3561 saved_inferior = current_inferior;
3562 current_inferior = get_lwp_thread (lwp);
3563
3564 lwp->bp_reinsert = pc;
3565 uninsert_breakpoints_at (pc);
fa593d66 3566 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
3567
3568 if (can_hardware_single_step ())
3569 {
3570 step = 1;
3571 }
3572 else
3573 {
3574 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3575 set_reinsert_breakpoint (raddr);
3576 step = 0;
3577 }
3578
3579 current_inferior = saved_inferior;
3580
3581 linux_resume_one_lwp (lwp, step, 0, NULL);
3582
3583 /* Require next event from this LWP. */
3584 step_over_bkpt = lwp->head.id;
3585 return 1;
3586}
3587
3588/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3589 start_step_over, if still there, and delete any reinsert
3590 breakpoints we've set, on non hardware single-step targets. */
3591
3592static int
3593finish_step_over (struct lwp_info *lwp)
3594{
3595 if (lwp->bp_reinsert != 0)
3596 {
3597 if (debug_threads)
3598 fprintf (stderr, "Finished step over.\n");
3599
3600 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3601 may be no breakpoint to reinsert there by now. */
3602 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 3603 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
3604
3605 lwp->bp_reinsert = 0;
3606
3607 /* Delete any software-single-step reinsert breakpoints. No
3608 longer needed. We don't have to worry about other threads
3609 hitting this trap, and later not being able to explain it,
3610 because we were stepping over a breakpoint, and we hold all
3611 threads but LWP stopped while doing that. */
3612 if (!can_hardware_single_step ())
3613 delete_reinsert_breakpoints ();
3614
3615 step_over_bkpt = null_ptid;
3616 return 1;
3617 }
3618 else
3619 return 0;
3620}
3621
5544ad89
DJ
3622/* This function is called once per thread. We check the thread's resume
3623 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 3624 stopped; and what signal, if any, it should be sent.
5544ad89 3625
bd99dc85
PA
3626 For threads which we aren't explicitly told otherwise, we preserve
3627 the stepping flag; this is used for stepping over gdbserver-placed
3628 breakpoints.
3629
3630 If pending_flags was set in any thread, we queue any needed
3631 signals, since we won't actually resume. We already have a pending
3632 event to report, so we don't need to preserve any step requests;
3633 they should be re-issued if necessary. */
3634
3635static int
3636linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 3637{
54a0b537 3638 struct lwp_info *lwp;
5544ad89 3639 struct thread_info *thread;
bd99dc85 3640 int step;
d50171e4
PA
3641 int leave_all_stopped = * (int *) arg;
3642 int leave_pending;
5544ad89
DJ
3643
3644 thread = (struct thread_info *) entry;
54a0b537 3645 lwp = get_thread_lwp (thread);
5544ad89 3646
2bd7c093 3647 if (lwp->resume == NULL)
bd99dc85 3648 return 0;
5544ad89 3649
bd99dc85 3650 if (lwp->resume->kind == resume_stop)
5544ad89 3651 {
bd99dc85 3652 if (debug_threads)
d50171e4 3653 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
bd99dc85
PA
3654
3655 if (!lwp->stopped)
3656 {
3657 if (debug_threads)
d50171e4 3658 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
bd99dc85 3659
d50171e4
PA
3660 /* Stop the thread, and wait for the event asynchronously,
3661 through the event loop. */
02fc4de7 3662 send_sigstop (lwp);
bd99dc85
PA
3663 }
3664 else
3665 {
3666 if (debug_threads)
d50171e4
PA
3667 fprintf (stderr, "already stopped LWP %ld\n",
3668 lwpid_of (lwp));
3669
3670 /* The LWP may have been stopped in an internal event that
3671 was not meant to be notified back to GDB (e.g., gdbserver
3672 breakpoint), so we should be reporting a stop event in
3673 this case too. */
3674
3675 /* If the thread already has a pending SIGSTOP, this is a
3676 no-op. Otherwise, something later will presumably resume
3677 the thread and this will cause it to cancel any pending
3678 operation, due to last_resume_kind == resume_stop. If
3679 the thread already has a pending status to report, we
3680 will still report it the next time we wait - see
3681 status_pending_p_callback. */
1a981360
PA
3682
3683 /* If we already have a pending signal to report, then
3684 there's no need to queue a SIGSTOP, as this means we're
3685 midway through moving the LWP out of the jumppad, and we
3686 will report the pending signal as soon as that is
3687 finished. */
3688 if (lwp->pending_signals_to_report == NULL)
3689 send_sigstop (lwp);
bd99dc85 3690 }
32ca6d61 3691
bd99dc85
PA
3692 /* For stop requests, we're done. */
3693 lwp->resume = NULL;
fc7238bb 3694 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3695 return 0;
5544ad89
DJ
3696 }
3697
bd99dc85
PA
3698 /* If this thread which is about to be resumed has a pending status,
3699 then don't resume any threads - we can just report the pending
3700 status. Make sure to queue any signals that would otherwise be
3701 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
3702 thread has a pending status. If there's a thread that needs the
3703 step-over-breakpoint dance, then don't resume any other thread
3704 but that particular one. */
3705 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 3706
d50171e4 3707 if (!leave_pending)
bd99dc85
PA
3708 {
3709 if (debug_threads)
3710 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
5544ad89 3711
d50171e4 3712 step = (lwp->resume->kind == resume_step);
2acc282a 3713 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
3714 }
3715 else
3716 {
3717 if (debug_threads)
3718 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
5544ad89 3719
bd99dc85
PA
3720 /* If we have a new signal, enqueue the signal. */
3721 if (lwp->resume->sig != 0)
3722 {
3723 struct pending_signals *p_sig;
3724 p_sig = xmalloc (sizeof (*p_sig));
3725 p_sig->prev = lwp->pending_signals;
3726 p_sig->signal = lwp->resume->sig;
3727 memset (&p_sig->info, 0, sizeof (siginfo_t));
3728
3729 /* If this is the same signal we were previously stopped by,
3730 make sure to queue its siginfo. We can ignore the return
3731 value of ptrace; if it fails, we'll skip
3732 PTRACE_SETSIGINFO. */
3733 if (WIFSTOPPED (lwp->last_status)
3734 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3735 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3736
3737 lwp->pending_signals = p_sig;
3738 }
3739 }
5544ad89 3740
fc7238bb 3741 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3742 lwp->resume = NULL;
5544ad89 3743 return 0;
0d62e5e8
DJ
3744}
3745
3746static void
2bd7c093 3747linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 3748{
2bd7c093 3749 struct thread_resume_array array = { resume_info, n };
d50171e4
PA
3750 struct lwp_info *need_step_over = NULL;
3751 int any_pending;
3752 int leave_all_stopped;
c6ecbae5 3753
2bd7c093 3754 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 3755
d50171e4
PA
3756 /* If there is a thread which would otherwise be resumed, which has
3757 a pending status, then don't resume any threads - we can just
3758 report the pending status. Make sure to queue any signals that
3759 would otherwise be sent. In non-stop mode, we'll apply this
3760 logic to each thread individually. We consume all pending events
3761 before considering to start a step-over (in all-stop). */
3762 any_pending = 0;
bd99dc85 3763 if (!non_stop)
d50171e4
PA
3764 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3765
3766 /* If there is a thread which would otherwise be resumed, which is
3767 stopped at a breakpoint that needs stepping over, then don't
3768 resume any threads - have it step over the breakpoint with all
3769 other threads stopped, then resume all threads again. Make sure
3770 to queue any signals that would otherwise be delivered or
3771 queued. */
3772 if (!any_pending && supports_breakpoints ())
3773 need_step_over
3774 = (struct lwp_info *) find_inferior (&all_lwps,
3775 need_step_over_p, NULL);
3776
3777 leave_all_stopped = (need_step_over != NULL || any_pending);
3778
3779 if (debug_threads)
3780 {
3781 if (need_step_over != NULL)
3782 fprintf (stderr, "Not resuming all, need step over\n");
3783 else if (any_pending)
3784 fprintf (stderr,
3785 "Not resuming, all-stop and found "
3786 "an LWP with pending status\n");
3787 else
3788 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3789 }
3790
3791 /* Even if we're leaving threads stopped, queue all signals we'd
3792 otherwise deliver. */
3793 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3794
3795 if (need_step_over)
3796 start_step_over (need_step_over);
3797}
3798
3799/* This function is called once per thread. We check the thread's
3800 last resume request, which will tell us whether to resume, step, or
3801 leave the thread stopped. Any signal the client requested to be
3802 delivered has already been enqueued at this point.
3803
3804 If any thread that GDB wants running is stopped at an internal
3805 breakpoint that needs stepping over, we start a step-over operation
3806 on that particular thread, and leave all others stopped. */
3807
7984d532
PA
3808static int
3809proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 3810{
7984d532 3811 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 3812 struct thread_info *thread;
d50171e4
PA
3813 int step;
3814
7984d532
PA
3815 if (lwp == except)
3816 return 0;
d50171e4
PA
3817
3818 if (debug_threads)
3819 fprintf (stderr,
3820 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3821
3822 if (!lwp->stopped)
3823 {
3824 if (debug_threads)
3825 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
7984d532 3826 return 0;
d50171e4
PA
3827 }
3828
8336d594
PA
3829 thread = get_lwp_thread (lwp);
3830
02fc4de7
PA
3831 if (thread->last_resume_kind == resume_stop
3832 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
3833 {
3834 if (debug_threads)
02fc4de7
PA
3835 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3836 lwpid_of (lwp));
7984d532 3837 return 0;
d50171e4
PA
3838 }
3839
3840 if (lwp->status_pending_p)
3841 {
3842 if (debug_threads)
3843 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3844 lwpid_of (lwp));
7984d532 3845 return 0;
d50171e4
PA
3846 }
3847
7984d532
PA
3848 gdb_assert (lwp->suspended >= 0);
3849
d50171e4
PA
3850 if (lwp->suspended)
3851 {
3852 if (debug_threads)
3853 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
7984d532 3854 return 0;
d50171e4
PA
3855 }
3856
1a981360
PA
3857 if (thread->last_resume_kind == resume_stop
3858 && lwp->pending_signals_to_report == NULL
3859 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
3860 {
3861 /* We haven't reported this LWP as stopped yet (otherwise, the
3862 last_status.kind check above would catch it, and we wouldn't
3863 reach here. This LWP may have been momentarily paused by a
3864 stop_all_lwps call while handling for example, another LWP's
3865 step-over. In that case, the pending expected SIGSTOP signal
3866 that was queued at vCont;t handling time will have already
3867 been consumed by wait_for_sigstop, and so we need to requeue
3868 another one here. Note that if the LWP already has a SIGSTOP
3869 pending, this is a no-op. */
3870
3871 if (debug_threads)
3872 fprintf (stderr,
3873 "Client wants LWP %ld to stop. "
3874 "Making sure it has a SIGSTOP pending\n",
3875 lwpid_of (lwp));
3876
3877 send_sigstop (lwp);
3878 }
3879
8336d594 3880 step = thread->last_resume_kind == resume_step;
d50171e4 3881 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
3882 return 0;
3883}
3884
3885static int
3886unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3887{
3888 struct lwp_info *lwp = (struct lwp_info *) entry;
3889
3890 if (lwp == except)
3891 return 0;
3892
3893 lwp->suspended--;
3894 gdb_assert (lwp->suspended >= 0);
3895
3896 return proceed_one_lwp (entry, except);
d50171e4
PA
3897}
3898
3899/* When we finish a step-over, set threads running again. If there's
3900 another thread that may need a step-over, now's the time to start
3901 it. Eventually, we'll move all threads past their breakpoints. */
3902
3903static void
3904proceed_all_lwps (void)
3905{
3906 struct lwp_info *need_step_over;
3907
3908 /* If there is a thread which would otherwise be resumed, which is
3909 stopped at a breakpoint that needs stepping over, then don't
3910 resume any threads - have it step over the breakpoint with all
3911 other threads stopped, then resume all threads again. */
3912
3913 if (supports_breakpoints ())
3914 {
3915 need_step_over
3916 = (struct lwp_info *) find_inferior (&all_lwps,
3917 need_step_over_p, NULL);
3918
3919 if (need_step_over != NULL)
3920 {
3921 if (debug_threads)
3922 fprintf (stderr, "proceed_all_lwps: found "
3923 "thread %ld needing a step-over\n",
3924 lwpid_of (need_step_over));
3925
3926 start_step_over (need_step_over);
3927 return;
3928 }
3929 }
5544ad89 3930
d50171e4
PA
3931 if (debug_threads)
3932 fprintf (stderr, "Proceeding, no step-over needed\n");
3933
7984d532 3934 find_inferior (&all_lwps, proceed_one_lwp, NULL);
d50171e4
PA
3935}
3936
3937/* Stopped LWPs that the client wanted to be running, that don't have
3938 pending statuses, are set to run again, except for EXCEPT, if not
3939 NULL. This undoes a stop_all_lwps call. */
3940
3941static void
7984d532 3942unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 3943{
5544ad89
DJ
3944 if (debug_threads)
3945 {
d50171e4
PA
3946 if (except)
3947 fprintf (stderr,
3948 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
5544ad89 3949 else
d50171e4
PA
3950 fprintf (stderr,
3951 "unstopping all lwps\n");
5544ad89
DJ
3952 }
3953
7984d532
PA
3954 if (unsuspend)
3955 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3956 else
3957 find_inferior (&all_lwps, proceed_one_lwp, except);
0d62e5e8
DJ
3958}
3959
58caa3dc
DJ
3960
3961#ifdef HAVE_LINUX_REGSETS
3962
1faeff08
MR
3963#define use_linux_regsets 1
3964
58caa3dc 3965static int
442ea881 3966regsets_fetch_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3967{
3968 struct regset_info *regset;
e9d25b98 3969 int saw_general_regs = 0;
95954743 3970 int pid;
1570b33e 3971 struct iovec iov;
58caa3dc
DJ
3972
3973 regset = target_regsets;
3974
95954743 3975 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3976 while (regset->size >= 0)
3977 {
1570b33e
L
3978 void *buf, *data;
3979 int nt_type, res;
58caa3dc 3980
52fa2412 3981 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3982 {
3983 regset ++;
3984 continue;
3985 }
3986
bca929d3 3987 buf = xmalloc (regset->size);
1570b33e
L
3988
3989 nt_type = regset->nt_type;
3990 if (nt_type)
3991 {
3992 iov.iov_base = buf;
3993 iov.iov_len = regset->size;
3994 data = (void *) &iov;
3995 }
3996 else
3997 data = buf;
3998
dfb64f85 3999#ifndef __sparc__
1570b33e 4000 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 4001#else
1570b33e 4002 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4003#endif
58caa3dc
DJ
4004 if (res < 0)
4005 {
4006 if (errno == EIO)
4007 {
52fa2412
UW
4008 /* If we get EIO on a regset, do not try it again for
4009 this process. */
4010 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 4011 free (buf);
52fa2412 4012 continue;
58caa3dc
DJ
4013 }
4014 else
4015 {
0d62e5e8 4016 char s[256];
95954743
PA
4017 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4018 pid);
0d62e5e8 4019 perror (s);
58caa3dc
DJ
4020 }
4021 }
e9d25b98
DJ
4022 else if (regset->type == GENERAL_REGS)
4023 saw_general_regs = 1;
442ea881 4024 regset->store_function (regcache, buf);
58caa3dc 4025 regset ++;
fdeb2a12 4026 free (buf);
58caa3dc 4027 }
e9d25b98
DJ
4028 if (saw_general_regs)
4029 return 0;
4030 else
4031 return 1;
58caa3dc
DJ
4032}
4033
4034static int
442ea881 4035regsets_store_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
4036{
4037 struct regset_info *regset;
e9d25b98 4038 int saw_general_regs = 0;
95954743 4039 int pid;
1570b33e 4040 struct iovec iov;
58caa3dc
DJ
4041
4042 regset = target_regsets;
4043
95954743 4044 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
4045 while (regset->size >= 0)
4046 {
1570b33e
L
4047 void *buf, *data;
4048 int nt_type, res;
58caa3dc 4049
52fa2412 4050 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
4051 {
4052 regset ++;
4053 continue;
4054 }
4055
bca929d3 4056 buf = xmalloc (regset->size);
545587ee
DJ
4057
4058 /* First fill the buffer with the current register set contents,
4059 in case there are any items in the kernel's regset that are
4060 not in gdbserver's regcache. */
1570b33e
L
4061
4062 nt_type = regset->nt_type;
4063 if (nt_type)
4064 {
4065 iov.iov_base = buf;
4066 iov.iov_len = regset->size;
4067 data = (void *) &iov;
4068 }
4069 else
4070 data = buf;
4071
dfb64f85 4072#ifndef __sparc__
1570b33e 4073 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 4074#else
689cc2ae 4075 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4076#endif
545587ee
DJ
4077
4078 if (res == 0)
4079 {
4080 /* Then overlay our cached registers on that. */
442ea881 4081 regset->fill_function (regcache, buf);
545587ee
DJ
4082
4083 /* Only now do we write the register set. */
dfb64f85 4084#ifndef __sparc__
1570b33e 4085 res = ptrace (regset->set_request, pid, nt_type, data);
dfb64f85 4086#else
1570b33e 4087 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 4088#endif
545587ee
DJ
4089 }
4090
58caa3dc
DJ
4091 if (res < 0)
4092 {
4093 if (errno == EIO)
4094 {
52fa2412
UW
4095 /* If we get EIO on a regset, do not try it again for
4096 this process. */
4097 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 4098 free (buf);
52fa2412 4099 continue;
58caa3dc 4100 }
3221518c
UW
4101 else if (errno == ESRCH)
4102 {
1b3f6016
PA
4103 /* At this point, ESRCH should mean the process is
4104 already gone, in which case we simply ignore attempts
4105 to change its registers. See also the related
4106 comment in linux_resume_one_lwp. */
fdeb2a12 4107 free (buf);
3221518c
UW
4108 return 0;
4109 }
58caa3dc
DJ
4110 else
4111 {
ce3a066d 4112 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
4113 }
4114 }
e9d25b98
DJ
4115 else if (regset->type == GENERAL_REGS)
4116 saw_general_regs = 1;
58caa3dc 4117 regset ++;
09ec9b38 4118 free (buf);
58caa3dc 4119 }
e9d25b98
DJ
4120 if (saw_general_regs)
4121 return 0;
4122 else
4123 return 1;
58caa3dc
DJ
4124}
4125
1faeff08 4126#else /* !HAVE_LINUX_REGSETS */
58caa3dc 4127
1faeff08
MR
4128#define use_linux_regsets 0
4129#define regsets_fetch_inferior_registers(regcache) 1
4130#define regsets_store_inferior_registers(regcache) 1
58caa3dc 4131
58caa3dc 4132#endif
1faeff08
MR
4133
4134/* Return 1 if register REGNO is supported by one of the regset ptrace
4135 calls or 0 if it has to be transferred individually. */
4136
4137static int
4138linux_register_in_regsets (int regno)
4139{
4140 unsigned char mask = 1 << (regno % 8);
4141 size_t index = regno / 8;
4142
4143 return (use_linux_regsets
4144 && (the_low_target.regset_bitmap == NULL
4145 || (the_low_target.regset_bitmap[index] & mask) != 0));
4146}
4147
58caa3dc 4148#ifdef HAVE_LINUX_USRREGS
1faeff08
MR
4149
4150int
4151register_addr (int regnum)
4152{
4153 int addr;
4154
4155 if (regnum < 0 || regnum >= the_low_target.num_regs)
4156 error ("Invalid register number %d.", regnum);
4157
4158 addr = the_low_target.regmap[regnum];
4159
4160 return addr;
4161}
4162
4163/* Fetch one register. */
4164static void
4165fetch_register (struct regcache *regcache, int regno)
4166{
4167 CORE_ADDR regaddr;
4168 int i, size;
4169 char *buf;
4170 int pid;
4171
4172 if (regno >= the_low_target.num_regs)
4173 return;
4174 if ((*the_low_target.cannot_fetch_register) (regno))
4175 return;
4176
4177 regaddr = register_addr (regno);
4178 if (regaddr == -1)
4179 return;
4180
4181 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4182 & -sizeof (PTRACE_XFER_TYPE));
4183 buf = alloca (size);
4184
4185 pid = lwpid_of (get_thread_lwp (current_inferior));
4186 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4187 {
4188 errno = 0;
4189 *(PTRACE_XFER_TYPE *) (buf + i) =
4190 ptrace (PTRACE_PEEKUSER, pid,
4191 /* Coerce to a uintptr_t first to avoid potential gcc warning
4192 of coercing an 8 byte integer to a 4 byte pointer. */
4193 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
4194 regaddr += sizeof (PTRACE_XFER_TYPE);
4195 if (errno != 0)
4196 error ("reading register %d: %s", regno, strerror (errno));
4197 }
4198
4199 if (the_low_target.supply_ptrace_register)
4200 the_low_target.supply_ptrace_register (regcache, regno, buf);
4201 else
4202 supply_register (regcache, regno, buf);
4203}
4204
4205/* Store one register. */
4206static void
4207store_register (struct regcache *regcache, int regno)
4208{
4209 CORE_ADDR regaddr;
4210 int i, size;
4211 char *buf;
4212 int pid;
4213
4214 if (regno >= the_low_target.num_regs)
4215 return;
4216 if ((*the_low_target.cannot_store_register) (regno))
4217 return;
4218
4219 regaddr = register_addr (regno);
4220 if (regaddr == -1)
4221 return;
4222
4223 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4224 & -sizeof (PTRACE_XFER_TYPE));
4225 buf = alloca (size);
4226 memset (buf, 0, size);
4227
4228 if (the_low_target.collect_ptrace_register)
4229 the_low_target.collect_ptrace_register (regcache, regno, buf);
4230 else
4231 collect_register (regcache, regno, buf);
4232
4233 pid = lwpid_of (get_thread_lwp (current_inferior));
4234 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4235 {
4236 errno = 0;
4237 ptrace (PTRACE_POKEUSER, pid,
4238 /* Coerce to a uintptr_t first to avoid potential gcc warning
4239 about coercing an 8 byte integer to a 4 byte pointer. */
4240 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
4241 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
4242 if (errno != 0)
4243 {
4244 /* At this point, ESRCH should mean the process is
4245 already gone, in which case we simply ignore attempts
4246 to change its registers. See also the related
4247 comment in linux_resume_one_lwp. */
4248 if (errno == ESRCH)
4249 return;
4250
4251 if ((*the_low_target.cannot_store_register) (regno) == 0)
4252 error ("writing register %d: %s", regno, strerror (errno));
4253 }
4254 regaddr += sizeof (PTRACE_XFER_TYPE);
4255 }
4256}
4257
4258/* Fetch all registers, or just one, from the child process.
4259 If REGNO is -1, do this for all registers, skipping any that are
4260 assumed to have been retrieved by regsets_fetch_inferior_registers,
4261 unless ALL is non-zero.
4262 Otherwise, REGNO specifies which register (so we can save time). */
4263static void
4264usr_fetch_inferior_registers (struct regcache *regcache, int regno, int all)
4265{
4266 if (regno == -1)
4267 {
4268 for (regno = 0; regno < the_low_target.num_regs; regno++)
4269 if (all || !linux_register_in_regsets (regno))
4270 fetch_register (regcache, regno);
4271 }
4272 else
4273 fetch_register (regcache, regno);
4274}
4275
4276/* Store our register values back into the inferior.
4277 If REGNO is -1, do this for all registers, skipping any that are
4278 assumed to have been saved by regsets_store_inferior_registers,
4279 unless ALL is non-zero.
4280 Otherwise, REGNO specifies which register (so we can save time). */
4281static void
4282usr_store_inferior_registers (struct regcache *regcache, int regno, int all)
4283{
4284 if (regno == -1)
4285 {
4286 for (regno = 0; regno < the_low_target.num_regs; regno++)
4287 if (all || !linux_register_in_regsets (regno))
4288 store_register (regcache, regno);
4289 }
4290 else
4291 store_register (regcache, regno);
4292}
4293
4294#else /* !HAVE_LINUX_USRREGS */
4295
4296#define usr_fetch_inferior_registers(regcache, regno, all) do {} while (0)
4297#define usr_store_inferior_registers(regcache, regno, all) do {} while (0)
4298
58caa3dc 4299#endif
1faeff08
MR
4300
4301
4302void
4303linux_fetch_registers (struct regcache *regcache, int regno)
4304{
4305 int use_regsets;
4306 int all = 0;
4307
4308 if (regno == -1)
4309 {
c14dfd32
PA
4310 if (the_low_target.fetch_register != NULL)
4311 for (regno = 0; regno < the_low_target.num_regs; regno++)
4312 (*the_low_target.fetch_register) (regcache, regno);
4313
1faeff08 4314 all = regsets_fetch_inferior_registers (regcache);
c14dfd32 4315 usr_fetch_inferior_registers (regcache, -1, all);
1faeff08
MR
4316 }
4317 else
4318 {
c14dfd32
PA
4319 if (the_low_target.fetch_register != NULL
4320 && (*the_low_target.fetch_register) (regcache, regno))
4321 return;
4322
1faeff08
MR
4323 use_regsets = linux_register_in_regsets (regno);
4324 if (use_regsets)
4325 all = regsets_fetch_inferior_registers (regcache);
4326 if (!use_regsets || all)
4327 usr_fetch_inferior_registers (regcache, regno, 1);
4328 }
58caa3dc
DJ
4329}
4330
4331void
442ea881 4332linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 4333{
1faeff08
MR
4334 int use_regsets;
4335 int all = 0;
4336
4337 if (regno == -1)
4338 {
4339 all = regsets_store_inferior_registers (regcache);
4340 usr_store_inferior_registers (regcache, regno, all);
4341 }
4342 else
4343 {
4344 use_regsets = linux_register_in_regsets (regno);
4345 if (use_regsets)
4346 all = regsets_store_inferior_registers (regcache);
4347 if (!use_regsets || all)
4348 usr_store_inferior_registers (regcache, regno, 1);
4349 }
58caa3dc
DJ
4350}
4351
da6d8c04 4352
da6d8c04
DJ
4353/* Copy LEN bytes from inferior's memory starting at MEMADDR
4354 to debugger memory starting at MYADDR. */
4355
c3e735a6 4356static int
f450004a 4357linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04
DJ
4358{
4359 register int i;
4360 /* Round starting address down to longword boundary. */
4361 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4362 /* Round ending address up; get number of longwords that makes. */
aa691b87
RM
4363 register int count
4364 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
da6d8c04
DJ
4365 / sizeof (PTRACE_XFER_TYPE);
4366 /* Allocate buffer of that many longwords. */
aa691b87 4367 register PTRACE_XFER_TYPE *buffer
da6d8c04 4368 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
fd462a61
DJ
4369 int fd;
4370 char filename[64];
95954743 4371 int pid = lwpid_of (get_thread_lwp (current_inferior));
fd462a61
DJ
4372
4373 /* Try using /proc. Don't bother for one word. */
4374 if (len >= 3 * sizeof (long))
4375 {
4376 /* We could keep this file open and cache it - possibly one per
4377 thread. That requires some juggling, but is even faster. */
95954743 4378 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
4379 fd = open (filename, O_RDONLY | O_LARGEFILE);
4380 if (fd == -1)
4381 goto no_proc;
4382
4383 /* If pread64 is available, use it. It's faster if the kernel
4384 supports it (only one syscall), and it's 64-bit safe even on
4385 32-bit platforms (for instance, SPARC debugging a SPARC64
4386 application). */
4387#ifdef HAVE_PREAD64
4388 if (pread64 (fd, myaddr, len, memaddr) != len)
4389#else
1de1badb 4390 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
fd462a61
DJ
4391#endif
4392 {
4393 close (fd);
4394 goto no_proc;
4395 }
4396
4397 close (fd);
4398 return 0;
4399 }
da6d8c04 4400
fd462a61 4401 no_proc:
da6d8c04
DJ
4402 /* Read all the longwords */
4403 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4404 {
c3e735a6 4405 errno = 0;
14ce3065
DE
4406 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4407 about coercing an 8 byte integer to a 4 byte pointer. */
4408 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4409 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
c3e735a6
DJ
4410 if (errno)
4411 return errno;
da6d8c04
DJ
4412 }
4413
4414 /* Copy appropriate bytes out of the buffer. */
1b3f6016
PA
4415 memcpy (myaddr,
4416 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4417 len);
c3e735a6
DJ
4418
4419 return 0;
da6d8c04
DJ
4420}
4421
93ae6fdc
PA
4422/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4423 memory at MEMADDR. On failure (cannot write to the inferior)
da6d8c04
DJ
4424 returns the value of errno. */
4425
ce3a066d 4426static int
f450004a 4427linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
4428{
4429 register int i;
4430 /* Round starting address down to longword boundary. */
4431 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4432 /* Round ending address up; get number of longwords that makes. */
4433 register int count
493e2a69
MS
4434 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4435 / sizeof (PTRACE_XFER_TYPE);
4436
da6d8c04 4437 /* Allocate buffer of that many longwords. */
493e2a69
MS
4438 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4439 alloca (count * sizeof (PTRACE_XFER_TYPE));
4440
95954743 4441 int pid = lwpid_of (get_thread_lwp (current_inferior));
da6d8c04 4442
0d62e5e8
DJ
4443 if (debug_threads)
4444 {
58d6951d
DJ
4445 /* Dump up to four bytes. */
4446 unsigned int val = * (unsigned int *) myaddr;
4447 if (len == 1)
4448 val = val & 0xff;
4449 else if (len == 2)
4450 val = val & 0xffff;
4451 else if (len == 3)
4452 val = val & 0xffffff;
4453 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4454 val, (long)memaddr);
0d62e5e8
DJ
4455 }
4456
da6d8c04
DJ
4457 /* Fill start and end extra bytes of buffer with existing memory data. */
4458
93ae6fdc 4459 errno = 0;
14ce3065
DE
4460 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4461 about coercing an 8 byte integer to a 4 byte pointer. */
4462 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4463 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
93ae6fdc
PA
4464 if (errno)
4465 return errno;
da6d8c04
DJ
4466
4467 if (count > 1)
4468 {
93ae6fdc 4469 errno = 0;
da6d8c04 4470 buffer[count - 1]
95954743 4471 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
4472 /* Coerce to a uintptr_t first to avoid potential gcc warning
4473 about coercing an 8 byte integer to a 4 byte pointer. */
4474 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4475 * sizeof (PTRACE_XFER_TYPE)),
d844cde6 4476 0);
93ae6fdc
PA
4477 if (errno)
4478 return errno;
da6d8c04
DJ
4479 }
4480
93ae6fdc 4481 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 4482
493e2a69
MS
4483 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4484 myaddr, len);
da6d8c04
DJ
4485
4486 /* Write the entire buffer. */
4487
4488 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4489 {
4490 errno = 0;
14ce3065
DE
4491 ptrace (PTRACE_POKETEXT, pid,
4492 /* Coerce to a uintptr_t first to avoid potential gcc warning
4493 about coercing an 8 byte integer to a 4 byte pointer. */
4494 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4495 (PTRACE_ARG4_TYPE) buffer[i]);
da6d8c04
DJ
4496 if (errno)
4497 return errno;
4498 }
4499
4500 return 0;
4501}
2f2893d9 4502
6076632b 4503/* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
24a09b5f
DJ
4504static int linux_supports_tracefork_flag;
4505
1e7fc18c
PA
4506static void
4507linux_enable_event_reporting (int pid)
4508{
4509 if (!linux_supports_tracefork_flag)
4510 return;
4511
4512 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4513}
4514
51c2684e 4515/* Helper functions for linux_test_for_tracefork, called via clone (). */
24a09b5f 4516
51c2684e
DJ
4517static int
4518linux_tracefork_grandchild (void *arg)
4519{
4520 _exit (0);
4521}
4522
7407e2de
AS
4523#define STACK_SIZE 4096
4524
51c2684e
DJ
4525static int
4526linux_tracefork_child (void *arg)
24a09b5f
DJ
4527{
4528 ptrace (PTRACE_TRACEME, 0, 0, 0);
4529 kill (getpid (), SIGSTOP);
e4b7f41c
JK
4530
4531#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4532
4533 if (fork () == 0)
4534 linux_tracefork_grandchild (NULL);
4535
4536#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4537
7407e2de
AS
4538#ifdef __ia64__
4539 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4540 CLONE_VM | SIGCHLD, NULL);
4541#else
a1f2ce7d 4542 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
7407e2de
AS
4543 CLONE_VM | SIGCHLD, NULL);
4544#endif
e4b7f41c
JK
4545
4546#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4547
24a09b5f
DJ
4548 _exit (0);
4549}
4550
24a09b5f
DJ
4551/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4552 sure that we can enable the option, and that it had the desired
4553 effect. */
4554
4555static void
4556linux_test_for_tracefork (void)
4557{
4558 int child_pid, ret, status;
4559 long second_pid;
e4b7f41c 4560#if defined(__UCLIBC__) && defined(HAS_NOMMU)
bca929d3 4561 char *stack = xmalloc (STACK_SIZE * 4);
e4b7f41c 4562#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
4563
4564 linux_supports_tracefork_flag = 0;
4565
e4b7f41c
JK
4566#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4567
4568 child_pid = fork ();
4569 if (child_pid == 0)
4570 linux_tracefork_child (NULL);
4571
4572#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4573
51c2684e 4574 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
7407e2de
AS
4575#ifdef __ia64__
4576 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4577 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c 4578#else /* !__ia64__ */
7407e2de
AS
4579 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4580 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c
JK
4581#endif /* !__ia64__ */
4582
4583#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4584
24a09b5f 4585 if (child_pid == -1)
51c2684e 4586 perror_with_name ("clone");
24a09b5f
DJ
4587
4588 ret = my_waitpid (child_pid, &status, 0);
4589 if (ret == -1)
4590 perror_with_name ("waitpid");
4591 else if (ret != child_pid)
4592 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4593 if (! WIFSTOPPED (status))
4594 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4595
14ce3065
DE
4596 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4597 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
24a09b5f
DJ
4598 if (ret != 0)
4599 {
4600 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4601 if (ret != 0)
4602 {
4603 warning ("linux_test_for_tracefork: failed to kill child");
4604 return;
4605 }
4606
4607 ret = my_waitpid (child_pid, &status, 0);
4608 if (ret != child_pid)
4609 warning ("linux_test_for_tracefork: failed to wait for killed child");
4610 else if (!WIFSIGNALED (status))
4611 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4612 "killed child", status);
4613
4614 return;
4615 }
4616
4617 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4618 if (ret != 0)
4619 warning ("linux_test_for_tracefork: failed to resume child");
4620
4621 ret = my_waitpid (child_pid, &status, 0);
4622
4623 if (ret == child_pid && WIFSTOPPED (status)
4624 && status >> 16 == PTRACE_EVENT_FORK)
4625 {
4626 second_pid = 0;
4627 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4628 if (ret == 0 && second_pid != 0)
4629 {
4630 int second_status;
4631
4632 linux_supports_tracefork_flag = 1;
4633 my_waitpid (second_pid, &second_status, 0);
4634 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4635 if (ret != 0)
4636 warning ("linux_test_for_tracefork: failed to kill second child");
4637 my_waitpid (second_pid, &status, 0);
4638 }
4639 }
4640 else
4641 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4642 "(%d, status 0x%x)", ret, status);
4643
4644 do
4645 {
4646 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4647 if (ret != 0)
4648 warning ("linux_test_for_tracefork: failed to kill child");
4649 my_waitpid (child_pid, &status, 0);
4650 }
4651 while (WIFSTOPPED (status));
51c2684e 4652
e4b7f41c 4653#if defined(__UCLIBC__) && defined(HAS_NOMMU)
51c2684e 4654 free (stack);
e4b7f41c 4655#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
4656}
4657
4658
2f2893d9
DJ
4659static void
4660linux_look_up_symbols (void)
4661{
0d62e5e8 4662#ifdef USE_THREAD_DB
95954743
PA
4663 struct process_info *proc = current_process ();
4664
cdbfd419 4665 if (proc->private->thread_db != NULL)
0d62e5e8
DJ
4666 return;
4667
6076632b
DE
4668 /* If the kernel supports tracing forks then it also supports tracing
4669 clones, and then we don't need to use the magic thread event breakpoint
4670 to learn about threads. */
cdbfd419 4671 thread_db_init (!linux_supports_tracefork_flag);
0d62e5e8
DJ
4672#endif
4673}
4674
e5379b03 4675static void
ef57601b 4676linux_request_interrupt (void)
e5379b03 4677{
a1928bad 4678 extern unsigned long signal_pid;
e5379b03 4679
95954743
PA
4680 if (!ptid_equal (cont_thread, null_ptid)
4681 && !ptid_equal (cont_thread, minus_one_ptid))
e5379b03 4682 {
54a0b537 4683 struct lwp_info *lwp;
bd99dc85 4684 int lwpid;
e5379b03 4685
54a0b537 4686 lwp = get_thread_lwp (current_inferior);
bd99dc85
PA
4687 lwpid = lwpid_of (lwp);
4688 kill_lwp (lwpid, SIGINT);
e5379b03
DJ
4689 }
4690 else
ef57601b 4691 kill_lwp (signal_pid, SIGINT);
e5379b03
DJ
4692}
4693
aa691b87
RM
4694/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4695 to debugger memory starting at MYADDR. */
4696
4697static int
f450004a 4698linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
4699{
4700 char filename[PATH_MAX];
4701 int fd, n;
95954743 4702 int pid = lwpid_of (get_thread_lwp (current_inferior));
aa691b87 4703
6cebaf6e 4704 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
4705
4706 fd = open (filename, O_RDONLY);
4707 if (fd < 0)
4708 return -1;
4709
4710 if (offset != (CORE_ADDR) 0
4711 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4712 n = -1;
4713 else
4714 n = read (fd, myaddr, len);
4715
4716 close (fd);
4717
4718 return n;
4719}
4720
d993e290
PA
4721/* These breakpoint and watchpoint related wrapper functions simply
4722 pass on the function call if the target has registered a
4723 corresponding function. */
e013ee27
OF
4724
4725static int
d993e290 4726linux_insert_point (char type, CORE_ADDR addr, int len)
e013ee27 4727{
d993e290
PA
4728 if (the_low_target.insert_point != NULL)
4729 return the_low_target.insert_point (type, addr, len);
e013ee27
OF
4730 else
4731 /* Unsupported (see target.h). */
4732 return 1;
4733}
4734
4735static int
d993e290 4736linux_remove_point (char type, CORE_ADDR addr, int len)
e013ee27 4737{
d993e290
PA
4738 if (the_low_target.remove_point != NULL)
4739 return the_low_target.remove_point (type, addr, len);
e013ee27
OF
4740 else
4741 /* Unsupported (see target.h). */
4742 return 1;
4743}
4744
4745static int
4746linux_stopped_by_watchpoint (void)
4747{
c3adc08c
PA
4748 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4749
4750 return lwp->stopped_by_watchpoint;
e013ee27
OF
4751}
4752
4753static CORE_ADDR
4754linux_stopped_data_address (void)
4755{
c3adc08c
PA
4756 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4757
4758 return lwp->stopped_data_address;
e013ee27
OF
4759}
4760
42c81e2a 4761#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
4762#if defined(__mcoldfire__)
4763/* These should really be defined in the kernel's ptrace.h header. */
4764#define PT_TEXT_ADDR 49*4
4765#define PT_DATA_ADDR 50*4
4766#define PT_TEXT_END_ADDR 51*4
eb826dc6
MF
4767#elif defined(BFIN)
4768#define PT_TEXT_ADDR 220
4769#define PT_TEXT_END_ADDR 224
4770#define PT_DATA_ADDR 228
58dbd541
YQ
4771#elif defined(__TMS320C6X__)
4772#define PT_TEXT_ADDR (0x10000*4)
4773#define PT_DATA_ADDR (0x10004*4)
4774#define PT_TEXT_END_ADDR (0x10008*4)
52fb6437
NS
4775#endif
4776
4777/* Under uClinux, programs are loaded at non-zero offsets, which we need
4778 to tell gdb about. */
4779
4780static int
4781linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4782{
4783#if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4784 unsigned long text, text_end, data;
bd99dc85 4785 int pid = lwpid_of (get_thread_lwp (current_inferior));
52fb6437
NS
4786
4787 errno = 0;
4788
4789 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4790 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4791 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4792
4793 if (errno == 0)
4794 {
4795 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
4796 used by gdb) are relative to the beginning of the program,
4797 with the data segment immediately following the text segment.
4798 However, the actual runtime layout in memory may put the data
4799 somewhere else, so when we send gdb a data base-address, we
4800 use the real data base address and subtract the compile-time
4801 data base-address from it (which is just the length of the
4802 text segment). BSS immediately follows data in both
4803 cases. */
52fb6437
NS
4804 *text_p = text;
4805 *data_p = data - (text_end - text);
1b3f6016 4806
52fb6437
NS
4807 return 1;
4808 }
4809#endif
4810 return 0;
4811}
4812#endif
4813
07e059b5
VP
4814static int
4815linux_qxfer_osdata (const char *annex,
1b3f6016
PA
4816 unsigned char *readbuf, unsigned const char *writebuf,
4817 CORE_ADDR offset, int len)
07e059b5 4818{
d26e3629 4819 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
4820}
4821
d0722149
DE
4822/* Convert a native/host siginfo object, into/from the siginfo in the
4823 layout of the inferiors' architecture. */
4824
4825static void
a5362b9a 4826siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
d0722149
DE
4827{
4828 int done = 0;
4829
4830 if (the_low_target.siginfo_fixup != NULL)
4831 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4832
4833 /* If there was no callback, or the callback didn't do anything,
4834 then just do a straight memcpy. */
4835 if (!done)
4836 {
4837 if (direction == 1)
a5362b9a 4838 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 4839 else
a5362b9a 4840 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
4841 }
4842}
4843
4aa995e1
PA
4844static int
4845linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4846 unsigned const char *writebuf, CORE_ADDR offset, int len)
4847{
d0722149 4848 int pid;
a5362b9a
TS
4849 siginfo_t siginfo;
4850 char inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
4851
4852 if (current_inferior == NULL)
4853 return -1;
4854
bd99dc85 4855 pid = lwpid_of (get_thread_lwp (current_inferior));
4aa995e1
PA
4856
4857 if (debug_threads)
d0722149 4858 fprintf (stderr, "%s siginfo for lwp %d.\n",
4aa995e1
PA
4859 readbuf != NULL ? "Reading" : "Writing",
4860 pid);
4861
0adea5f7 4862 if (offset >= sizeof (siginfo))
4aa995e1
PA
4863 return -1;
4864
4865 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4866 return -1;
4867
d0722149
DE
4868 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4869 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4870 inferior with a 64-bit GDBSERVER should look the same as debugging it
4871 with a 32-bit GDBSERVER, we need to convert it. */
4872 siginfo_fixup (&siginfo, inf_siginfo, 0);
4873
4aa995e1
PA
4874 if (offset + len > sizeof (siginfo))
4875 len = sizeof (siginfo) - offset;
4876
4877 if (readbuf != NULL)
d0722149 4878 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4879 else
4880 {
d0722149
DE
4881 memcpy (inf_siginfo + offset, writebuf, len);
4882
4883 /* Convert back to ptrace layout before flushing it out. */
4884 siginfo_fixup (&siginfo, inf_siginfo, 1);
4885
4aa995e1
PA
4886 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4887 return -1;
4888 }
4889
4890 return len;
4891}
4892
bd99dc85
PA
4893/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4894 so we notice when children change state; as the handler for the
4895 sigsuspend in my_waitpid. */
4896
4897static void
4898sigchld_handler (int signo)
4899{
4900 int old_errno = errno;
4901
4902 if (debug_threads)
e581f2b4
PA
4903 {
4904 do
4905 {
4906 /* fprintf is not async-signal-safe, so call write
4907 directly. */
4908 if (write (2, "sigchld_handler\n",
4909 sizeof ("sigchld_handler\n") - 1) < 0)
4910 break; /* just ignore */
4911 } while (0);
4912 }
bd99dc85
PA
4913
4914 if (target_is_async_p ())
4915 async_file_mark (); /* trigger a linux_wait */
4916
4917 errno = old_errno;
4918}
4919
4920static int
4921linux_supports_non_stop (void)
4922{
4923 return 1;
4924}
4925
4926static int
4927linux_async (int enable)
4928{
4929 int previous = (linux_event_pipe[0] != -1);
4930
8336d594
PA
4931 if (debug_threads)
4932 fprintf (stderr, "linux_async (%d), previous=%d\n",
4933 enable, previous);
4934
bd99dc85
PA
4935 if (previous != enable)
4936 {
4937 sigset_t mask;
4938 sigemptyset (&mask);
4939 sigaddset (&mask, SIGCHLD);
4940
4941 sigprocmask (SIG_BLOCK, &mask, NULL);
4942
4943 if (enable)
4944 {
4945 if (pipe (linux_event_pipe) == -1)
4946 fatal ("creating event pipe failed.");
4947
4948 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4949 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4950
4951 /* Register the event loop handler. */
4952 add_file_handler (linux_event_pipe[0],
4953 handle_target_event, NULL);
4954
4955 /* Always trigger a linux_wait. */
4956 async_file_mark ();
4957 }
4958 else
4959 {
4960 delete_file_handler (linux_event_pipe[0]);
4961
4962 close (linux_event_pipe[0]);
4963 close (linux_event_pipe[1]);
4964 linux_event_pipe[0] = -1;
4965 linux_event_pipe[1] = -1;
4966 }
4967
4968 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4969 }
4970
4971 return previous;
4972}
4973
4974static int
4975linux_start_non_stop (int nonstop)
4976{
4977 /* Register or unregister from event-loop accordingly. */
4978 linux_async (nonstop);
4979 return 0;
4980}
4981
cf8fd78b
PA
4982static int
4983linux_supports_multi_process (void)
4984{
4985 return 1;
4986}
4987
03583c20
UW
4988static int
4989linux_supports_disable_randomization (void)
4990{
4991#ifdef HAVE_PERSONALITY
4992 return 1;
4993#else
4994 return 0;
4995#endif
4996}
efcbbd14 4997
d1feda86
YQ
4998static int
4999linux_supports_agent (void)
5000{
5001 return 1;
5002}
5003
efcbbd14
UW
5004/* Enumerate spufs IDs for process PID. */
5005static int
5006spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5007{
5008 int pos = 0;
5009 int written = 0;
5010 char path[128];
5011 DIR *dir;
5012 struct dirent *entry;
5013
5014 sprintf (path, "/proc/%ld/fd", pid);
5015 dir = opendir (path);
5016 if (!dir)
5017 return -1;
5018
5019 rewinddir (dir);
5020 while ((entry = readdir (dir)) != NULL)
5021 {
5022 struct stat st;
5023 struct statfs stfs;
5024 int fd;
5025
5026 fd = atoi (entry->d_name);
5027 if (!fd)
5028 continue;
5029
5030 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5031 if (stat (path, &st) != 0)
5032 continue;
5033 if (!S_ISDIR (st.st_mode))
5034 continue;
5035
5036 if (statfs (path, &stfs) != 0)
5037 continue;
5038 if (stfs.f_type != SPUFS_MAGIC)
5039 continue;
5040
5041 if (pos >= offset && pos + 4 <= offset + len)
5042 {
5043 *(unsigned int *)(buf + pos - offset) = fd;
5044 written += 4;
5045 }
5046 pos += 4;
5047 }
5048
5049 closedir (dir);
5050 return written;
5051}
5052
5053/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5054 object type, using the /proc file system. */
5055static int
5056linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5057 unsigned const char *writebuf,
5058 CORE_ADDR offset, int len)
5059{
5060 long pid = lwpid_of (get_thread_lwp (current_inferior));
5061 char buf[128];
5062 int fd = 0;
5063 int ret = 0;
5064
5065 if (!writebuf && !readbuf)
5066 return -1;
5067
5068 if (!*annex)
5069 {
5070 if (!readbuf)
5071 return -1;
5072 else
5073 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5074 }
5075
5076 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5077 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5078 if (fd <= 0)
5079 return -1;
5080
5081 if (offset != 0
5082 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5083 {
5084 close (fd);
5085 return 0;
5086 }
5087
5088 if (writebuf)
5089 ret = write (fd, writebuf, (size_t) len);
5090 else
5091 ret = read (fd, readbuf, (size_t) len);
5092
5093 close (fd);
5094 return ret;
5095}
5096
723b724b 5097#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
5098struct target_loadseg
5099{
5100 /* Core address to which the segment is mapped. */
5101 Elf32_Addr addr;
5102 /* VMA recorded in the program header. */
5103 Elf32_Addr p_vaddr;
5104 /* Size of this segment in memory. */
5105 Elf32_Word p_memsz;
5106};
5107
723b724b 5108# if defined PT_GETDSBT
78d85199
YQ
5109struct target_loadmap
5110{
5111 /* Protocol version number, must be zero. */
5112 Elf32_Word version;
5113 /* Pointer to the DSBT table, its size, and the DSBT index. */
5114 unsigned *dsbt_table;
5115 unsigned dsbt_size, dsbt_index;
5116 /* Number of segments in this map. */
5117 Elf32_Word nsegs;
5118 /* The actual memory map. */
5119 struct target_loadseg segs[/*nsegs*/];
5120};
723b724b
MF
5121# define LINUX_LOADMAP PT_GETDSBT
5122# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5123# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5124# else
5125struct target_loadmap
5126{
5127 /* Protocol version number, must be zero. */
5128 Elf32_Half version;
5129 /* Number of segments in this map. */
5130 Elf32_Half nsegs;
5131 /* The actual memory map. */
5132 struct target_loadseg segs[/*nsegs*/];
5133};
5134# define LINUX_LOADMAP PTRACE_GETFDPIC
5135# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5136# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5137# endif
78d85199 5138
78d85199
YQ
5139static int
5140linux_read_loadmap (const char *annex, CORE_ADDR offset,
5141 unsigned char *myaddr, unsigned int len)
5142{
5143 int pid = lwpid_of (get_thread_lwp (current_inferior));
5144 int addr = -1;
5145 struct target_loadmap *data = NULL;
5146 unsigned int actual_length, copy_length;
5147
5148 if (strcmp (annex, "exec") == 0)
723b724b 5149 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 5150 else if (strcmp (annex, "interp") == 0)
723b724b 5151 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
5152 else
5153 return -1;
5154
723b724b 5155 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
5156 return -1;
5157
5158 if (data == NULL)
5159 return -1;
5160
5161 actual_length = sizeof (struct target_loadmap)
5162 + sizeof (struct target_loadseg) * data->nsegs;
5163
5164 if (offset < 0 || offset > actual_length)
5165 return -1;
5166
5167 copy_length = actual_length - offset < len ? actual_length - offset : len;
5168 memcpy (myaddr, (char *) data + offset, copy_length);
5169 return copy_length;
5170}
723b724b
MF
5171#else
5172# define linux_read_loadmap NULL
5173#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 5174
1570b33e
L
5175static void
5176linux_process_qsupported (const char *query)
5177{
5178 if (the_low_target.process_qsupported != NULL)
5179 the_low_target.process_qsupported (query);
5180}
5181
219f2f23
PA
5182static int
5183linux_supports_tracepoints (void)
5184{
5185 if (*the_low_target.supports_tracepoints == NULL)
5186 return 0;
5187
5188 return (*the_low_target.supports_tracepoints) ();
5189}
5190
5191static CORE_ADDR
5192linux_read_pc (struct regcache *regcache)
5193{
5194 if (the_low_target.get_pc == NULL)
5195 return 0;
5196
5197 return (*the_low_target.get_pc) (regcache);
5198}
5199
5200static void
5201linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5202{
5203 gdb_assert (the_low_target.set_pc != NULL);
5204
5205 (*the_low_target.set_pc) (regcache, pc);
5206}
5207
8336d594
PA
5208static int
5209linux_thread_stopped (struct thread_info *thread)
5210{
5211 return get_thread_lwp (thread)->stopped;
5212}
5213
5214/* This exposes stop-all-threads functionality to other modules. */
5215
5216static void
7984d532 5217linux_pause_all (int freeze)
8336d594 5218{
7984d532
PA
5219 stop_all_lwps (freeze, NULL);
5220}
5221
5222/* This exposes unstop-all-threads functionality to other gdbserver
5223 modules. */
5224
5225static void
5226linux_unpause_all (int unfreeze)
5227{
5228 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
5229}
5230
90d74c30
PA
5231static int
5232linux_prepare_to_access_memory (void)
5233{
5234 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5235 running LWP. */
5236 if (non_stop)
5237 linux_pause_all (1);
5238 return 0;
5239}
5240
5241static void
0146f85b 5242linux_done_accessing_memory (void)
90d74c30
PA
5243{
5244 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5245 running LWP. */
5246 if (non_stop)
5247 linux_unpause_all (1);
5248}
5249
fa593d66
PA
5250static int
5251linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5252 CORE_ADDR collector,
5253 CORE_ADDR lockaddr,
5254 ULONGEST orig_size,
5255 CORE_ADDR *jump_entry,
405f8e94
SS
5256 CORE_ADDR *trampoline,
5257 ULONGEST *trampoline_size,
fa593d66
PA
5258 unsigned char *jjump_pad_insn,
5259 ULONGEST *jjump_pad_insn_size,
5260 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
5261 CORE_ADDR *adjusted_insn_addr_end,
5262 char *err)
fa593d66
PA
5263{
5264 return (*the_low_target.install_fast_tracepoint_jump_pad)
5265 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
5266 jump_entry, trampoline, trampoline_size,
5267 jjump_pad_insn, jjump_pad_insn_size,
5268 adjusted_insn_addr, adjusted_insn_addr_end,
5269 err);
fa593d66
PA
5270}
5271
6a271cae
PA
5272static struct emit_ops *
5273linux_emit_ops (void)
5274{
5275 if (the_low_target.emit_ops != NULL)
5276 return (*the_low_target.emit_ops) ();
5277 else
5278 return NULL;
5279}
5280
405f8e94
SS
5281static int
5282linux_get_min_fast_tracepoint_insn_len (void)
5283{
5284 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5285}
5286
2268b414
JK
5287/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5288
5289static int
5290get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5291 CORE_ADDR *phdr_memaddr, int *num_phdr)
5292{
5293 char filename[PATH_MAX];
5294 int fd;
5295 const int auxv_size = is_elf64
5296 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5297 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5298
5299 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5300
5301 fd = open (filename, O_RDONLY);
5302 if (fd < 0)
5303 return 1;
5304
5305 *phdr_memaddr = 0;
5306 *num_phdr = 0;
5307 while (read (fd, buf, auxv_size) == auxv_size
5308 && (*phdr_memaddr == 0 || *num_phdr == 0))
5309 {
5310 if (is_elf64)
5311 {
5312 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5313
5314 switch (aux->a_type)
5315 {
5316 case AT_PHDR:
5317 *phdr_memaddr = aux->a_un.a_val;
5318 break;
5319 case AT_PHNUM:
5320 *num_phdr = aux->a_un.a_val;
5321 break;
5322 }
5323 }
5324 else
5325 {
5326 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5327
5328 switch (aux->a_type)
5329 {
5330 case AT_PHDR:
5331 *phdr_memaddr = aux->a_un.a_val;
5332 break;
5333 case AT_PHNUM:
5334 *num_phdr = aux->a_un.a_val;
5335 break;
5336 }
5337 }
5338 }
5339
5340 close (fd);
5341
5342 if (*phdr_memaddr == 0 || *num_phdr == 0)
5343 {
5344 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5345 "phdr_memaddr = %ld, phdr_num = %d",
5346 (long) *phdr_memaddr, *num_phdr);
5347 return 2;
5348 }
5349
5350 return 0;
5351}
5352
5353/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5354
5355static CORE_ADDR
5356get_dynamic (const int pid, const int is_elf64)
5357{
5358 CORE_ADDR phdr_memaddr, relocation;
5359 int num_phdr, i;
5360 unsigned char *phdr_buf;
5361 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5362
5363 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5364 return 0;
5365
5366 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5367 phdr_buf = alloca (num_phdr * phdr_size);
5368
5369 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5370 return 0;
5371
5372 /* Compute relocation: it is expected to be 0 for "regular" executables,
5373 non-zero for PIE ones. */
5374 relocation = -1;
5375 for (i = 0; relocation == -1 && i < num_phdr; i++)
5376 if (is_elf64)
5377 {
5378 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5379
5380 if (p->p_type == PT_PHDR)
5381 relocation = phdr_memaddr - p->p_vaddr;
5382 }
5383 else
5384 {
5385 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5386
5387 if (p->p_type == PT_PHDR)
5388 relocation = phdr_memaddr - p->p_vaddr;
5389 }
5390
5391 if (relocation == -1)
5392 {
e237a7e2
JK
5393 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5394 any real world executables, including PIE executables, have always
5395 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5396 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5397 or present DT_DEBUG anyway (fpc binaries are statically linked).
5398
5399 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5400
5401 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5402
2268b414
JK
5403 return 0;
5404 }
5405
5406 for (i = 0; i < num_phdr; i++)
5407 {
5408 if (is_elf64)
5409 {
5410 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5411
5412 if (p->p_type == PT_DYNAMIC)
5413 return p->p_vaddr + relocation;
5414 }
5415 else
5416 {
5417 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5418
5419 if (p->p_type == PT_DYNAMIC)
5420 return p->p_vaddr + relocation;
5421 }
5422 }
5423
5424 return 0;
5425}
5426
5427/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
5428 can be 0 if the inferior does not yet have the library list initialized.
5429 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5430 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
5431
5432static CORE_ADDR
5433get_r_debug (const int pid, const int is_elf64)
5434{
5435 CORE_ADDR dynamic_memaddr;
5436 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5437 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 5438 CORE_ADDR map = -1;
2268b414
JK
5439
5440 dynamic_memaddr = get_dynamic (pid, is_elf64);
5441 if (dynamic_memaddr == 0)
367ba2c2 5442 return map;
2268b414
JK
5443
5444 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5445 {
5446 if (is_elf64)
5447 {
5448 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
367ba2c2
MR
5449 union
5450 {
5451 Elf64_Xword map;
5452 unsigned char buf[sizeof (Elf64_Xword)];
5453 }
5454 rld_map;
5455
5456 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5457 {
5458 if (linux_read_memory (dyn->d_un.d_val,
5459 rld_map.buf, sizeof (rld_map.buf)) == 0)
5460 return rld_map.map;
5461 else
5462 break;
5463 }
2268b414 5464
367ba2c2
MR
5465 if (dyn->d_tag == DT_DEBUG && map == -1)
5466 map = dyn->d_un.d_val;
2268b414
JK
5467
5468 if (dyn->d_tag == DT_NULL)
5469 break;
5470 }
5471 else
5472 {
5473 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
367ba2c2
MR
5474 union
5475 {
5476 Elf32_Word map;
5477 unsigned char buf[sizeof (Elf32_Word)];
5478 }
5479 rld_map;
5480
5481 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5482 {
5483 if (linux_read_memory (dyn->d_un.d_val,
5484 rld_map.buf, sizeof (rld_map.buf)) == 0)
5485 return rld_map.map;
5486 else
5487 break;
5488 }
2268b414 5489
367ba2c2
MR
5490 if (dyn->d_tag == DT_DEBUG && map == -1)
5491 map = dyn->d_un.d_val;
2268b414
JK
5492
5493 if (dyn->d_tag == DT_NULL)
5494 break;
5495 }
5496
5497 dynamic_memaddr += dyn_size;
5498 }
5499
367ba2c2 5500 return map;
2268b414
JK
5501}
5502
5503/* Read one pointer from MEMADDR in the inferior. */
5504
5505static int
5506read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5507{
485f1ee4
PA
5508 int ret;
5509
5510 /* Go through a union so this works on either big or little endian
5511 hosts, when the inferior's pointer size is smaller than the size
5512 of CORE_ADDR. It is assumed the inferior's endianness is the
5513 same of the superior's. */
5514 union
5515 {
5516 CORE_ADDR core_addr;
5517 unsigned int ui;
5518 unsigned char uc;
5519 } addr;
5520
5521 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5522 if (ret == 0)
5523 {
5524 if (ptr_size == sizeof (CORE_ADDR))
5525 *ptr = addr.core_addr;
5526 else if (ptr_size == sizeof (unsigned int))
5527 *ptr = addr.ui;
5528 else
5529 gdb_assert_not_reached ("unhandled pointer size");
5530 }
5531 return ret;
2268b414
JK
5532}
5533
5534struct link_map_offsets
5535 {
5536 /* Offset and size of r_debug.r_version. */
5537 int r_version_offset;
5538
5539 /* Offset and size of r_debug.r_map. */
5540 int r_map_offset;
5541
5542 /* Offset to l_addr field in struct link_map. */
5543 int l_addr_offset;
5544
5545 /* Offset to l_name field in struct link_map. */
5546 int l_name_offset;
5547
5548 /* Offset to l_ld field in struct link_map. */
5549 int l_ld_offset;
5550
5551 /* Offset to l_next field in struct link_map. */
5552 int l_next_offset;
5553
5554 /* Offset to l_prev field in struct link_map. */
5555 int l_prev_offset;
5556 };
5557
fb723180 5558/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
5559
5560static int
5561linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5562 unsigned const char *writebuf,
5563 CORE_ADDR offset, int len)
5564{
5565 char *document;
5566 unsigned document_len;
5567 struct process_info_private *const priv = current_process ()->private;
5568 char filename[PATH_MAX];
5569 int pid, is_elf64;
5570
5571 static const struct link_map_offsets lmo_32bit_offsets =
5572 {
5573 0, /* r_version offset. */
5574 4, /* r_debug.r_map offset. */
5575 0, /* l_addr offset in link_map. */
5576 4, /* l_name offset in link_map. */
5577 8, /* l_ld offset in link_map. */
5578 12, /* l_next offset in link_map. */
5579 16 /* l_prev offset in link_map. */
5580 };
5581
5582 static const struct link_map_offsets lmo_64bit_offsets =
5583 {
5584 0, /* r_version offset. */
5585 8, /* r_debug.r_map offset. */
5586 0, /* l_addr offset in link_map. */
5587 8, /* l_name offset in link_map. */
5588 16, /* l_ld offset in link_map. */
5589 24, /* l_next offset in link_map. */
5590 32 /* l_prev offset in link_map. */
5591 };
5592 const struct link_map_offsets *lmo;
214d508e 5593 unsigned int machine;
2268b414
JK
5594
5595 if (writebuf != NULL)
5596 return -2;
5597 if (readbuf == NULL)
5598 return -1;
5599
5600 pid = lwpid_of (get_thread_lwp (current_inferior));
5601 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 5602 is_elf64 = elf_64_file_p (filename, &machine);
2268b414
JK
5603 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5604
5605 if (priv->r_debug == 0)
5606 priv->r_debug = get_r_debug (pid, is_elf64);
5607
5608 if (priv->r_debug == (CORE_ADDR) -1 || priv->r_debug == 0)
5609 {
5610 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5611 }
5612 else
5613 {
5614 int allocated = 1024;
5615 char *p;
5616 const int ptr_size = is_elf64 ? 8 : 4;
5617 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5618 int r_version, header_done = 0;
5619
5620 document = xmalloc (allocated);
5621 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5622 p = document + strlen (document);
5623
5624 r_version = 0;
5625 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5626 (unsigned char *) &r_version,
5627 sizeof (r_version)) != 0
5628 || r_version != 1)
5629 {
5630 warning ("unexpected r_debug version %d", r_version);
5631 goto done;
5632 }
5633
5634 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5635 &lm_addr, ptr_size) != 0)
5636 {
5637 warning ("unable to read r_map from 0x%lx",
5638 (long) priv->r_debug + lmo->r_map_offset);
5639 goto done;
5640 }
5641
5642 lm_prev = 0;
5643 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5644 &l_name, ptr_size) == 0
5645 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5646 &l_addr, ptr_size) == 0
5647 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5648 &l_ld, ptr_size) == 0
5649 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5650 &l_prev, ptr_size) == 0
5651 && read_one_ptr (lm_addr + lmo->l_next_offset,
5652 &l_next, ptr_size) == 0)
5653 {
5654 unsigned char libname[PATH_MAX];
5655
5656 if (lm_prev != l_prev)
5657 {
5658 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5659 (long) lm_prev, (long) l_prev);
5660 break;
5661 }
5662
5663 /* Not checking for error because reading may stop before
5664 we've got PATH_MAX worth of characters. */
5665 libname[0] = '\0';
5666 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5667 libname[sizeof (libname) - 1] = '\0';
5668 if (libname[0] != '\0')
5669 {
5670 /* 6x the size for xml_escape_text below. */
5671 size_t len = 6 * strlen ((char *) libname);
5672 char *name;
5673
5674 if (!header_done)
5675 {
5676 /* Terminate `<library-list-svr4'. */
5677 *p++ = '>';
5678 header_done = 1;
5679 }
5680
5681 while (allocated < p - document + len + 200)
5682 {
5683 /* Expand to guarantee sufficient storage. */
5684 uintptr_t document_len = p - document;
5685
5686 document = xrealloc (document, 2 * allocated);
5687 allocated *= 2;
5688 p = document + document_len;
5689 }
5690
5691 name = xml_escape_text ((char *) libname);
5692 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5693 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5694 name, (unsigned long) lm_addr,
5695 (unsigned long) l_addr, (unsigned long) l_ld);
5696 free (name);
5697 }
5698 else if (lm_prev == 0)
5699 {
5700 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5701 p = p + strlen (p);
5702 }
5703
5704 if (l_next == 0)
5705 break;
5706
5707 lm_prev = lm_addr;
5708 lm_addr = l_next;
5709 }
5710 done:
0afae3cf
PA
5711 if (!header_done)
5712 {
5713 /* Empty list; terminate `<library-list-svr4'. */
5714 strcpy (p, "/>");
5715 }
5716 else
5717 strcpy (p, "</library-list-svr4>");
2268b414
JK
5718 }
5719
5720 document_len = strlen (document);
5721 if (offset < document_len)
5722 document_len -= offset;
5723 else
5724 document_len = 0;
5725 if (len > document_len)
5726 len = document_len;
5727
5728 memcpy (readbuf, document + offset, len);
5729 xfree (document);
5730
5731 return len;
5732}
5733
ce3a066d
DJ
5734static struct target_ops linux_target_ops = {
5735 linux_create_inferior,
5736 linux_attach,
5737 linux_kill,
6ad8ae5c 5738 linux_detach,
8336d594 5739 linux_mourn,
444d6139 5740 linux_join,
ce3a066d
DJ
5741 linux_thread_alive,
5742 linux_resume,
5743 linux_wait,
5744 linux_fetch_registers,
5745 linux_store_registers,
90d74c30 5746 linux_prepare_to_access_memory,
0146f85b 5747 linux_done_accessing_memory,
ce3a066d
DJ
5748 linux_read_memory,
5749 linux_write_memory,
2f2893d9 5750 linux_look_up_symbols,
ef57601b 5751 linux_request_interrupt,
aa691b87 5752 linux_read_auxv,
d993e290
PA
5753 linux_insert_point,
5754 linux_remove_point,
e013ee27
OF
5755 linux_stopped_by_watchpoint,
5756 linux_stopped_data_address,
42c81e2a 5757#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437 5758 linux_read_offsets,
dae5f5cf
DJ
5759#else
5760 NULL,
5761#endif
5762#ifdef USE_THREAD_DB
5763 thread_db_get_tls_address,
5764#else
5765 NULL,
52fb6437 5766#endif
efcbbd14 5767 linux_qxfer_spu,
59a016f0 5768 hostio_last_error_from_errno,
07e059b5 5769 linux_qxfer_osdata,
4aa995e1 5770 linux_xfer_siginfo,
bd99dc85
PA
5771 linux_supports_non_stop,
5772 linux_async,
5773 linux_start_non_stop,
cdbfd419
PP
5774 linux_supports_multi_process,
5775#ifdef USE_THREAD_DB
dc146f7c 5776 thread_db_handle_monitor_command,
cdbfd419 5777#else
dc146f7c 5778 NULL,
cdbfd419 5779#endif
d26e3629 5780 linux_common_core_of_thread,
78d85199 5781 linux_read_loadmap,
219f2f23
PA
5782 linux_process_qsupported,
5783 linux_supports_tracepoints,
5784 linux_read_pc,
8336d594
PA
5785 linux_write_pc,
5786 linux_thread_stopped,
7984d532 5787 NULL,
711e434b 5788 linux_pause_all,
7984d532 5789 linux_unpause_all,
fa593d66
PA
5790 linux_cancel_breakpoints,
5791 linux_stabilize_threads,
6a271cae 5792 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
5793 linux_emit_ops,
5794 linux_supports_disable_randomization,
405f8e94 5795 linux_get_min_fast_tracepoint_insn_len,
2268b414 5796 linux_qxfer_libraries_svr4,
d1feda86 5797 linux_supports_agent,
ce3a066d
DJ
5798};
5799
0d62e5e8
DJ
5800static void
5801linux_init_signals ()
5802{
5803 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5804 to find what the cancel signal actually is. */
1a981360 5805#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 5806 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 5807#endif
0d62e5e8
DJ
5808}
5809
da6d8c04
DJ
5810void
5811initialize_low (void)
5812{
bd99dc85
PA
5813 struct sigaction sigchld_action;
5814 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 5815 set_target_ops (&linux_target_ops);
611cb4a5
DJ
5816 set_breakpoint_data (the_low_target.breakpoint,
5817 the_low_target.breakpoint_len);
0d62e5e8 5818 linux_init_signals ();
24a09b5f 5819 linux_test_for_tracefork ();
52fa2412
UW
5820#ifdef HAVE_LINUX_REGSETS
5821 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5822 ;
bca929d3 5823 disabled_regsets = xmalloc (num_regsets);
52fa2412 5824#endif
bd99dc85
PA
5825
5826 sigchld_action.sa_handler = sigchld_handler;
5827 sigemptyset (&sigchld_action.sa_mask);
5828 sigchld_action.sa_flags = SA_RESTART;
5829 sigaction (SIGCHLD, &sigchld_action, NULL);
da6d8c04 5830}