]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
gdb/
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
545587ee 2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
7b6bb8da 3 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
da6d8c04
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
19
20#include "server.h"
58caa3dc 21#include "linux-low.h"
d26e3629 22#include "linux-osdata.h"
da6d8c04 23
58caa3dc 24#include <sys/wait.h>
da6d8c04
DJ
25#include <stdio.h>
26#include <sys/param.h>
da6d8c04 27#include <sys/ptrace.h>
af96c192 28#include "linux-ptrace.h"
e3deef73 29#include "linux-procfs.h"
da6d8c04
DJ
30#include <signal.h>
31#include <sys/ioctl.h>
32#include <fcntl.h>
d07c63e7 33#include <string.h>
0a30fbc4
DJ
34#include <stdlib.h>
35#include <unistd.h>
fa6a77dc 36#include <errno.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
efcbbd14
UW
43#include <sys/stat.h>
44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
957f3f49
DE
46#ifndef ELFMAG0
47/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51#include <elf.h>
52#endif
efcbbd14
UW
53
54#ifndef SPUFS_MAGIC
55#define SPUFS_MAGIC 0x23c9b64e
56#endif
da6d8c04 57
03583c20
UW
58#ifdef HAVE_PERSONALITY
59# include <sys/personality.h>
60# if !HAVE_DECL_ADDR_NO_RANDOMIZE
61# define ADDR_NO_RANDOMIZE 0x0040000
62# endif
63#endif
64
fd462a61
DJ
65#ifndef O_LARGEFILE
66#define O_LARGEFILE 0
67#endif
68
ec8ebe72
DE
69#ifndef W_STOPCODE
70#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71#endif
72
1a981360
PA
73/* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75#ifndef __SIGRTMIN
76#define __SIGRTMIN 32
77#endif
78
42c81e2a
DJ
79#ifdef __UCLIBC__
80#if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
81#define HAS_NOMMU
82#endif
83#endif
84
24a09b5f
DJ
85/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
86 representation of the thread ID.
611cb4a5 87
54a0b537 88 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
95954743
PA
89 the same as the LWP ID.
90
91 ``all_processes'' is keyed by the "overall process ID", which
92 GNU/Linux calls tgid, "thread group ID". */
0d62e5e8 93
54a0b537 94struct inferior_list all_lwps;
0d62e5e8 95
24a09b5f
DJ
96/* A list of all unknown processes which receive stop signals. Some other
97 process will presumably claim each of these as forked children
98 momentarily. */
99
100struct inferior_list stopped_pids;
101
0d62e5e8
DJ
102/* FIXME this is a bit of a hack, and could be removed. */
103int stopping_threads;
104
105/* FIXME make into a target method? */
24a09b5f 106int using_threads = 1;
24a09b5f 107
fa593d66
PA
108/* True if we're presently stabilizing threads (moving them out of
109 jump pads). */
110static int stabilizing_threads;
111
95954743
PA
112/* This flag is true iff we've just created or attached to our first
113 inferior but it has not stopped yet. As soon as it does, we need
114 to call the low target's arch_setup callback. Doing this only on
115 the first inferior avoids reinializing the architecture on every
116 inferior, and avoids messing with the register caches of the
117 already running inferiors. NOTE: this assumes all inferiors under
118 control of gdbserver have the same architecture. */
d61ddec4
UW
119static int new_inferior;
120
2acc282a 121static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 122 int step, int signal, siginfo_t *info);
2bd7c093 123static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
124static void stop_all_lwps (int suspend, struct lwp_info *except);
125static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
95954743 126static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
95954743 127static void *add_lwp (ptid_t ptid);
c35fafde 128static int linux_stopped_by_watchpoint (void);
95954743 129static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
d50171e4 130static void proceed_all_lwps (void);
d50171e4
PA
131static int finish_step_over (struct lwp_info *lwp);
132static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
133static int kill_lwp (unsigned long lwpid, int signo);
1e7fc18c 134static void linux_enable_event_reporting (int pid);
d50171e4
PA
135
136/* True if the low target can hardware single-step. Such targets
137 don't need a BREAKPOINT_REINSERT_ADDR callback. */
138
139static int
140can_hardware_single_step (void)
141{
142 return (the_low_target.breakpoint_reinsert_addr == NULL);
143}
144
145/* True if the low target supports memory breakpoints. If so, we'll
146 have a GET_PC implementation. */
147
148static int
149supports_breakpoints (void)
150{
151 return (the_low_target.get_pc != NULL);
152}
0d62e5e8 153
fa593d66
PA
154/* Returns true if this target can support fast tracepoints. This
155 does not mean that the in-process agent has been loaded in the
156 inferior. */
157
158static int
159supports_fast_tracepoints (void)
160{
161 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
162}
163
0d62e5e8
DJ
164struct pending_signals
165{
166 int signal;
32ca6d61 167 siginfo_t info;
0d62e5e8
DJ
168 struct pending_signals *prev;
169};
611cb4a5 170
14ce3065
DE
171#define PTRACE_ARG3_TYPE void *
172#define PTRACE_ARG4_TYPE void *
c6ecbae5 173#define PTRACE_XFER_TYPE long
da6d8c04 174
58caa3dc 175#ifdef HAVE_LINUX_REGSETS
52fa2412
UW
176static char *disabled_regsets;
177static int num_regsets;
58caa3dc
DJ
178#endif
179
bd99dc85
PA
180/* The read/write ends of the pipe registered as waitable file in the
181 event loop. */
182static int linux_event_pipe[2] = { -1, -1 };
183
184/* True if we're currently in async mode. */
185#define target_is_async_p() (linux_event_pipe[0] != -1)
186
02fc4de7 187static void send_sigstop (struct lwp_info *lwp);
bd99dc85
PA
188static void wait_for_sigstop (struct inferior_list_entry *entry);
189
d0722149
DE
190/* Accepts an integer PID; Returns a string representing a file that
191 can be opened to get info for the child process.
192 Space for the result is malloc'd, caller must free. */
193
194char *
195linux_child_pid_to_exec_file (int pid)
196{
197 char *name1, *name2;
198
199 name1 = xmalloc (MAXPATHLEN);
200 name2 = xmalloc (MAXPATHLEN);
201 memset (name2, 0, MAXPATHLEN);
202
203 sprintf (name1, "/proc/%d/exe", pid);
204 if (readlink (name1, name2, MAXPATHLEN) > 0)
205 {
206 free (name1);
207 return name2;
208 }
209 else
210 {
211 free (name2);
212 return name1;
213 }
214}
215
216/* Return non-zero if HEADER is a 64-bit ELF file. */
217
218static int
957f3f49 219elf_64_header_p (const Elf64_Ehdr *header)
d0722149
DE
220{
221 return (header->e_ident[EI_MAG0] == ELFMAG0
222 && header->e_ident[EI_MAG1] == ELFMAG1
223 && header->e_ident[EI_MAG2] == ELFMAG2
224 && header->e_ident[EI_MAG3] == ELFMAG3
225 && header->e_ident[EI_CLASS] == ELFCLASS64);
226}
227
228/* Return non-zero if FILE is a 64-bit ELF file,
229 zero if the file is not a 64-bit ELF file,
230 and -1 if the file is not accessible or doesn't exist. */
231
232int
233elf_64_file_p (const char *file)
234{
957f3f49 235 Elf64_Ehdr header;
d0722149
DE
236 int fd;
237
238 fd = open (file, O_RDONLY);
239 if (fd < 0)
240 return -1;
241
242 if (read (fd, &header, sizeof (header)) != sizeof (header))
243 {
244 close (fd);
245 return 0;
246 }
247 close (fd);
248
249 return elf_64_header_p (&header);
250}
251
bd99dc85
PA
252static void
253delete_lwp (struct lwp_info *lwp)
254{
255 remove_thread (get_lwp_thread (lwp));
256 remove_inferior (&all_lwps, &lwp->head);
aa5ca48f 257 free (lwp->arch_private);
bd99dc85
PA
258 free (lwp);
259}
260
95954743
PA
261/* Add a process to the common process list, and set its private
262 data. */
263
264static struct process_info *
265linux_add_process (int pid, int attached)
266{
267 struct process_info *proc;
268
269 /* Is this the first process? If so, then set the arch. */
270 if (all_processes.head == NULL)
271 new_inferior = 1;
272
273 proc = add_process (pid, attached);
274 proc->private = xcalloc (1, sizeof (*proc->private));
275
aa5ca48f
DE
276 if (the_low_target.new_process != NULL)
277 proc->private->arch_private = the_low_target.new_process ();
278
95954743
PA
279 return proc;
280}
281
07d4f67e
DE
282/* Wrapper function for waitpid which handles EINTR, and emulates
283 __WALL for systems where that is not available. */
284
285static int
286my_waitpid (int pid, int *status, int flags)
287{
288 int ret, out_errno;
289
290 if (debug_threads)
291 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
292
293 if (flags & __WALL)
294 {
295 sigset_t block_mask, org_mask, wake_mask;
296 int wnohang;
297
298 wnohang = (flags & WNOHANG) != 0;
299 flags &= ~(__WALL | __WCLONE);
300 flags |= WNOHANG;
301
302 /* Block all signals while here. This avoids knowing about
303 LinuxThread's signals. */
304 sigfillset (&block_mask);
305 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
306
307 /* ... except during the sigsuspend below. */
308 sigemptyset (&wake_mask);
309
310 while (1)
311 {
312 /* Since all signals are blocked, there's no need to check
313 for EINTR here. */
314 ret = waitpid (pid, status, flags);
315 out_errno = errno;
316
317 if (ret == -1 && out_errno != ECHILD)
318 break;
319 else if (ret > 0)
320 break;
321
322 if (flags & __WCLONE)
323 {
324 /* We've tried both flavors now. If WNOHANG is set,
325 there's nothing else to do, just bail out. */
326 if (wnohang)
327 break;
328
329 if (debug_threads)
330 fprintf (stderr, "blocking\n");
331
332 /* Block waiting for signals. */
333 sigsuspend (&wake_mask);
334 }
335
336 flags ^= __WCLONE;
337 }
338
339 sigprocmask (SIG_SETMASK, &org_mask, NULL);
340 }
341 else
342 {
343 do
344 ret = waitpid (pid, status, flags);
345 while (ret == -1 && errno == EINTR);
346 out_errno = errno;
347 }
348
349 if (debug_threads)
350 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
351 pid, flags, status ? *status : -1, ret);
352
353 errno = out_errno;
354 return ret;
355}
356
bd99dc85
PA
357/* Handle a GNU/Linux extended wait response. If we see a clone
358 event, we need to add the new LWP to our list (and not report the
359 trap to higher layers). */
0d62e5e8 360
24a09b5f 361static void
54a0b537 362handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f
DJ
363{
364 int event = wstat >> 16;
54a0b537 365 struct lwp_info *new_lwp;
24a09b5f
DJ
366
367 if (event == PTRACE_EVENT_CLONE)
368 {
95954743 369 ptid_t ptid;
24a09b5f 370 unsigned long new_pid;
836acd6d 371 int ret, status = W_STOPCODE (SIGSTOP);
24a09b5f 372
bd99dc85 373 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
24a09b5f
DJ
374
375 /* If we haven't already seen the new PID stop, wait for it now. */
376 if (! pull_pid_from_list (&stopped_pids, new_pid))
377 {
378 /* The new child has a pending SIGSTOP. We can't affect it until it
379 hits the SIGSTOP, but we're already attached. */
380
97438e3f 381 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
382
383 if (ret == -1)
384 perror_with_name ("waiting for new child");
385 else if (ret != new_pid)
386 warning ("wait returned unexpected PID %d", ret);
da5898ce 387 else if (!WIFSTOPPED (status))
24a09b5f
DJ
388 warning ("wait returned unexpected status 0x%x", status);
389 }
390
1e7fc18c 391 linux_enable_event_reporting (new_pid);
24a09b5f 392
95954743
PA
393 ptid = ptid_build (pid_of (event_child), new_pid, 0);
394 new_lwp = (struct lwp_info *) add_lwp (ptid);
395 add_thread (ptid, new_lwp);
24a09b5f 396
e27d73f6
DE
397 /* Either we're going to immediately resume the new thread
398 or leave it stopped. linux_resume_one_lwp is a nop if it
399 thinks the thread is currently running, so set this first
400 before calling linux_resume_one_lwp. */
401 new_lwp->stopped = 1;
402
da5898ce
DJ
403 /* Normally we will get the pending SIGSTOP. But in some cases
404 we might get another signal delivered to the group first.
f21cc1a2 405 If we do get another signal, be sure not to lose it. */
da5898ce
DJ
406 if (WSTOPSIG (status) == SIGSTOP)
407 {
d50171e4
PA
408 if (stopping_threads)
409 new_lwp->stop_pc = get_stop_pc (new_lwp);
410 else
e27d73f6 411 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
da5898ce 412 }
24a09b5f 413 else
da5898ce 414 {
54a0b537 415 new_lwp->stop_expected = 1;
d50171e4 416
da5898ce
DJ
417 if (stopping_threads)
418 {
d50171e4 419 new_lwp->stop_pc = get_stop_pc (new_lwp);
54a0b537
PA
420 new_lwp->status_pending_p = 1;
421 new_lwp->status_pending = status;
da5898ce
DJ
422 }
423 else
424 /* Pass the signal on. This is what GDB does - except
425 shouldn't we really report it instead? */
e27d73f6 426 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
da5898ce 427 }
24a09b5f
DJ
428
429 /* Always resume the current thread. If we are stopping
430 threads, it will have a pending SIGSTOP; we may as well
431 collect it now. */
2acc282a 432 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
24a09b5f
DJ
433 }
434}
435
d50171e4
PA
436/* Return the PC as read from the regcache of LWP, without any
437 adjustment. */
438
439static CORE_ADDR
440get_pc (struct lwp_info *lwp)
441{
442 struct thread_info *saved_inferior;
443 struct regcache *regcache;
444 CORE_ADDR pc;
445
446 if (the_low_target.get_pc == NULL)
447 return 0;
448
449 saved_inferior = current_inferior;
450 current_inferior = get_lwp_thread (lwp);
451
452 regcache = get_thread_regcache (current_inferior, 1);
453 pc = (*the_low_target.get_pc) (regcache);
454
455 if (debug_threads)
456 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
457
458 current_inferior = saved_inferior;
459 return pc;
460}
461
462/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
463 The SIGTRAP could mean several things.
464
465 On i386, where decr_pc_after_break is non-zero:
466 If we were single-stepping this process using PTRACE_SINGLESTEP,
467 we will get only the one SIGTRAP (even if the instruction we
468 stepped over was a breakpoint). The value of $eip will be the
469 next instruction.
470 If we continue the process using PTRACE_CONT, we will get a
471 SIGTRAP when we hit a breakpoint. The value of $eip will be
472 the instruction after the breakpoint (i.e. needs to be
473 decremented). If we report the SIGTRAP to GDB, we must also
474 report the undecremented PC. If we cancel the SIGTRAP, we
475 must resume at the decremented PC.
476
477 (Presumably, not yet tested) On a non-decr_pc_after_break machine
478 with hardware or kernel single-step:
479 If we single-step over a breakpoint instruction, our PC will
480 point at the following instruction. If we continue and hit a
481 breakpoint instruction, our PC will point at the breakpoint
482 instruction. */
483
484static CORE_ADDR
d50171e4 485get_stop_pc (struct lwp_info *lwp)
0d62e5e8 486{
d50171e4
PA
487 CORE_ADDR stop_pc;
488
489 if (the_low_target.get_pc == NULL)
490 return 0;
0d62e5e8 491
d50171e4
PA
492 stop_pc = get_pc (lwp);
493
bdabb078
PA
494 if (WSTOPSIG (lwp->last_status) == SIGTRAP
495 && !lwp->stepping
496 && !lwp->stopped_by_watchpoint
497 && lwp->last_status >> 16 == 0)
47c0c975
DE
498 stop_pc -= the_low_target.decr_pc_after_break;
499
500 if (debug_threads)
501 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
502
503 return stop_pc;
0d62e5e8 504}
ce3a066d 505
0d62e5e8 506static void *
95954743 507add_lwp (ptid_t ptid)
611cb4a5 508{
54a0b537 509 struct lwp_info *lwp;
0d62e5e8 510
54a0b537
PA
511 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
512 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 513
95954743 514 lwp->head.id = ptid;
0d62e5e8 515
aa5ca48f
DE
516 if (the_low_target.new_thread != NULL)
517 lwp->arch_private = the_low_target.new_thread ();
518
54a0b537 519 add_inferior_to_list (&all_lwps, &lwp->head);
0d62e5e8 520
54a0b537 521 return lwp;
0d62e5e8 522}
611cb4a5 523
da6d8c04
DJ
524/* Start an inferior process and returns its pid.
525 ALLARGS is a vector of program-name and args. */
526
ce3a066d
DJ
527static int
528linux_create_inferior (char *program, char **allargs)
da6d8c04 529{
03583c20
UW
530#ifdef HAVE_PERSONALITY
531 int personality_orig = 0, personality_set = 0;
532#endif
a6dbe5df 533 struct lwp_info *new_lwp;
da6d8c04 534 int pid;
95954743 535 ptid_t ptid;
da6d8c04 536
03583c20
UW
537#ifdef HAVE_PERSONALITY
538 if (disable_randomization)
539 {
540 errno = 0;
541 personality_orig = personality (0xffffffff);
542 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
543 {
544 personality_set = 1;
545 personality (personality_orig | ADDR_NO_RANDOMIZE);
546 }
547 if (errno != 0 || (personality_set
548 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
549 warning ("Error disabling address space randomization: %s",
550 strerror (errno));
551 }
552#endif
553
42c81e2a 554#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
555 pid = vfork ();
556#else
da6d8c04 557 pid = fork ();
52fb6437 558#endif
da6d8c04
DJ
559 if (pid < 0)
560 perror_with_name ("fork");
561
562 if (pid == 0)
563 {
564 ptrace (PTRACE_TRACEME, 0, 0, 0);
565
1a981360 566#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 567 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 568#endif
0d62e5e8 569
a9fa9f7d
DJ
570 setpgid (0, 0);
571
2b876972
DJ
572 execv (program, allargs);
573 if (errno == ENOENT)
574 execvp (program, allargs);
da6d8c04
DJ
575
576 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 577 strerror (errno));
da6d8c04
DJ
578 fflush (stderr);
579 _exit (0177);
580 }
581
03583c20
UW
582#ifdef HAVE_PERSONALITY
583 if (personality_set)
584 {
585 errno = 0;
586 personality (personality_orig);
587 if (errno != 0)
588 warning ("Error restoring address space randomization: %s",
589 strerror (errno));
590 }
591#endif
592
95954743
PA
593 linux_add_process (pid, 0);
594
595 ptid = ptid_build (pid, pid, 0);
596 new_lwp = add_lwp (ptid);
597 add_thread (ptid, new_lwp);
a6dbe5df 598 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 599
a9fa9f7d 600 return pid;
da6d8c04
DJ
601}
602
603/* Attach to an inferior process. */
604
95954743
PA
605static void
606linux_attach_lwp_1 (unsigned long lwpid, int initial)
da6d8c04 607{
95954743 608 ptid_t ptid;
54a0b537 609 struct lwp_info *new_lwp;
611cb4a5 610
95954743 611 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
da6d8c04 612 {
95954743 613 if (!initial)
2d717e4f
DJ
614 {
615 /* If we fail to attach to an LWP, just warn. */
95954743 616 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
2d717e4f
DJ
617 strerror (errno), errno);
618 fflush (stderr);
619 return;
620 }
621 else
622 /* If we fail to attach to a process, report an error. */
95954743 623 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
43d5792c 624 strerror (errno), errno);
da6d8c04
DJ
625 }
626
95954743 627 if (initial)
e3deef73
LM
628 /* If lwp is the tgid, we handle adding existing threads later.
629 Otherwise we just add lwp without bothering about any other
630 threads. */
95954743
PA
631 ptid = ptid_build (lwpid, lwpid, 0);
632 else
633 {
634 /* Note that extracting the pid from the current inferior is
635 safe, since we're always called in the context of the same
636 process as this new thread. */
637 int pid = pid_of (get_thread_lwp (current_inferior));
638 ptid = ptid_build (pid, lwpid, 0);
639 }
24a09b5f 640
95954743
PA
641 new_lwp = (struct lwp_info *) add_lwp (ptid);
642 add_thread (ptid, new_lwp);
0d62e5e8 643
a6dbe5df
PA
644 /* We need to wait for SIGSTOP before being able to make the next
645 ptrace call on this LWP. */
646 new_lwp->must_set_ptrace_flags = 1;
647
0d62e5e8 648 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
649 brings it to a halt.
650
651 There are several cases to consider here:
652
653 1) gdbserver has already attached to the process and is being notified
1b3f6016 654 of a new thread that is being created.
d50171e4
PA
655 In this case we should ignore that SIGSTOP and resume the
656 process. This is handled below by setting stop_expected = 1,
8336d594 657 and the fact that add_thread sets last_resume_kind ==
d50171e4 658 resume_continue.
0e21c1ec
DE
659
660 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
661 to it via attach_inferior.
662 In this case we want the process thread to stop.
d50171e4
PA
663 This is handled by having linux_attach set last_resume_kind ==
664 resume_stop after we return.
e3deef73
LM
665
666 If the pid we are attaching to is also the tgid, we attach to and
667 stop all the existing threads. Otherwise, we attach to pid and
668 ignore any other threads in the same group as this pid.
0e21c1ec
DE
669
670 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
671 existing threads.
672 In this case we want the thread to stop.
673 FIXME: This case is currently not properly handled.
674 We should wait for the SIGSTOP but don't. Things work apparently
675 because enough time passes between when we ptrace (ATTACH) and when
676 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
677
678 On the other hand, if we are currently trying to stop all threads, we
679 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 680 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
681 end of the list, and so the new thread has not yet reached
682 wait_for_sigstop (but will). */
d50171e4 683 new_lwp->stop_expected = 1;
0d62e5e8
DJ
684}
685
95954743
PA
686void
687linux_attach_lwp (unsigned long lwpid)
688{
689 linux_attach_lwp_1 (lwpid, 0);
690}
691
e3deef73
LM
692/* Attach to PID. If PID is the tgid, attach to it and all
693 of its threads. */
694
0d62e5e8 695int
a1928bad 696linux_attach (unsigned long pid)
0d62e5e8 697{
e3deef73
LM
698 /* Attach to PID. We will check for other threads
699 soon. */
95954743 700 linux_attach_lwp_1 (pid, 1);
95954743 701 linux_add_process (pid, 1);
0d62e5e8 702
bd99dc85
PA
703 if (!non_stop)
704 {
8336d594
PA
705 struct thread_info *thread;
706
707 /* Don't ignore the initial SIGSTOP if we just attached to this
708 process. It will be collected by wait shortly. */
709 thread = find_thread_ptid (ptid_build (pid, pid, 0));
710 thread->last_resume_kind = resume_stop;
bd99dc85 711 }
0d62e5e8 712
e3deef73
LM
713 if (linux_proc_get_tgid (pid) == pid)
714 {
715 DIR *dir;
716 char pathname[128];
717
718 sprintf (pathname, "/proc/%ld/task", pid);
719
720 dir = opendir (pathname);
721
722 if (!dir)
723 {
724 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
725 fflush (stderr);
726 }
727 else
728 {
729 /* At this point we attached to the tgid. Scan the task for
730 existing threads. */
731 unsigned long lwp;
732 int new_threads_found;
733 int iterations = 0;
734 struct dirent *dp;
735
736 while (iterations < 2)
737 {
738 new_threads_found = 0;
739 /* Add all the other threads. While we go through the
740 threads, new threads may be spawned. Cycle through
741 the list of threads until we have done two iterations without
742 finding new threads. */
743 while ((dp = readdir (dir)) != NULL)
744 {
745 /* Fetch one lwp. */
746 lwp = strtoul (dp->d_name, NULL, 10);
747
748 /* Is this a new thread? */
749 if (lwp
750 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
751 {
752 linux_attach_lwp_1 (lwp, 0);
753 new_threads_found++;
754
755 if (debug_threads)
756 fprintf (stderr, "\
757Found and attached to new lwp %ld\n", lwp);
758 }
759 }
760
761 if (!new_threads_found)
762 iterations++;
763 else
764 iterations = 0;
765
766 rewinddir (dir);
767 }
768 closedir (dir);
769 }
770 }
771
95954743
PA
772 return 0;
773}
774
775struct counter
776{
777 int pid;
778 int count;
779};
780
781static int
782second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
783{
784 struct counter *counter = args;
785
786 if (ptid_get_pid (entry->id) == counter->pid)
787 {
788 if (++counter->count > 1)
789 return 1;
790 }
d61ddec4 791
da6d8c04
DJ
792 return 0;
793}
794
95954743
PA
795static int
796last_thread_of_process_p (struct thread_info *thread)
797{
798 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
799 int pid = ptid_get_pid (ptid);
800 struct counter counter = { pid , 0 };
da6d8c04 801
95954743
PA
802 return (find_inferior (&all_threads,
803 second_thread_of_pid_p, &counter) == NULL);
804}
805
806/* Kill the inferior lwp. */
807
808static int
809linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
da6d8c04 810{
0d62e5e8 811 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 812 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 813 int wstat;
95954743
PA
814 int pid = * (int *) args;
815
816 if (ptid_get_pid (entry->id) != pid)
817 return 0;
0d62e5e8 818
fd500816
DJ
819 /* We avoid killing the first thread here, because of a Linux kernel (at
820 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
821 the children get a chance to be reaped, it will remain a zombie
822 forever. */
95954743 823
12b42a12 824 if (lwpid_of (lwp) == pid)
95954743
PA
825 {
826 if (debug_threads)
827 fprintf (stderr, "lkop: is last of process %s\n",
828 target_pid_to_str (entry->id));
829 return 0;
830 }
fd500816 831
0d62e5e8
DJ
832 do
833 {
bd99dc85 834 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
0d62e5e8
DJ
835
836 /* Make sure it died. The loop is most likely unnecessary. */
95954743 837 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
bd99dc85 838 } while (pid > 0 && WIFSTOPPED (wstat));
95954743
PA
839
840 return 0;
da6d8c04
DJ
841}
842
95954743
PA
843static int
844linux_kill (int pid)
0d62e5e8 845{
95954743 846 struct process_info *process;
54a0b537 847 struct lwp_info *lwp;
fd500816 848 int wstat;
95954743 849 int lwpid;
fd500816 850
95954743
PA
851 process = find_process_pid (pid);
852 if (process == NULL)
853 return -1;
9d606399 854
f9e39928
PA
855 /* If we're killing a running inferior, make sure it is stopped
856 first, as PTRACE_KILL will not work otherwise. */
7984d532 857 stop_all_lwps (0, NULL);
f9e39928 858
95954743 859 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
fd500816 860
54a0b537 861 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 862 thread in the list, so do so now. */
95954743 863 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 864
784867a5 865 if (lwp == NULL)
fd500816 866 {
784867a5
JK
867 if (debug_threads)
868 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
869 lwpid_of (lwp), pid);
870 }
871 else
872 {
873 if (debug_threads)
874 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
875 lwpid_of (lwp), pid);
fd500816 876
784867a5
JK
877 do
878 {
879 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
880
881 /* Make sure it died. The loop is most likely unnecessary. */
882 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
883 } while (lwpid > 0 && WIFSTOPPED (wstat));
884 }
2d717e4f 885
8336d594 886 the_target->mourn (process);
f9e39928
PA
887
888 /* Since we presently can only stop all lwps of all processes, we
889 need to unstop lwps of other processes. */
7984d532 890 unstop_all_lwps (0, NULL);
95954743 891 return 0;
0d62e5e8
DJ
892}
893
95954743
PA
894static int
895linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
896{
897 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 898 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
899 int pid = * (int *) args;
900
901 if (ptid_get_pid (entry->id) != pid)
902 return 0;
6ad8ae5c 903
ae13219e
DJ
904 /* If this process is stopped but is expecting a SIGSTOP, then make
905 sure we take care of that now. This isn't absolutely guaranteed
906 to collect the SIGSTOP, but is fairly likely to. */
54a0b537 907 if (lwp->stop_expected)
ae13219e 908 {
bd99dc85 909 int wstat;
ae13219e 910 /* Clear stop_expected, so that the SIGSTOP will be reported. */
54a0b537 911 lwp->stop_expected = 0;
f9e39928 912 linux_resume_one_lwp (lwp, 0, 0, NULL);
95954743 913 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
ae13219e
DJ
914 }
915
916 /* Flush any pending changes to the process's registers. */
917 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 918 get_lwp_thread (lwp));
ae13219e
DJ
919
920 /* Finally, let it resume. */
bd99dc85
PA
921 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
922
923 delete_lwp (lwp);
95954743 924 return 0;
6ad8ae5c
DJ
925}
926
95954743
PA
927static int
928linux_detach (int pid)
929{
930 struct process_info *process;
931
932 process = find_process_pid (pid);
933 if (process == NULL)
934 return -1;
935
f9e39928
PA
936 /* Stop all threads before detaching. First, ptrace requires that
937 the thread is stopped to sucessfully detach. Second, thread_db
938 may need to uninstall thread event breakpoints from memory, which
939 only works with a stopped process anyway. */
7984d532 940 stop_all_lwps (0, NULL);
f9e39928 941
ca5c370d 942#ifdef USE_THREAD_DB
8336d594 943 thread_db_detach (process);
ca5c370d
PA
944#endif
945
fa593d66
PA
946 /* Stabilize threads (move out of jump pads). */
947 stabilize_threads ();
948
95954743 949 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
950
951 the_target->mourn (process);
f9e39928
PA
952
953 /* Since we presently can only stop all lwps of all processes, we
954 need to unstop lwps of other processes. */
7984d532 955 unstop_all_lwps (0, NULL);
f9e39928
PA
956 return 0;
957}
958
959/* Remove all LWPs that belong to process PROC from the lwp list. */
960
961static int
962delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
963{
964 struct lwp_info *lwp = (struct lwp_info *) entry;
965 struct process_info *process = proc;
966
967 if (pid_of (lwp) == pid_of (process))
968 delete_lwp (lwp);
969
dd6953e1 970 return 0;
6ad8ae5c
DJ
971}
972
8336d594
PA
973static void
974linux_mourn (struct process_info *process)
975{
976 struct process_info_private *priv;
977
978#ifdef USE_THREAD_DB
979 thread_db_mourn (process);
980#endif
981
f9e39928
PA
982 find_inferior (&all_lwps, delete_lwp_callback, process);
983
8336d594
PA
984 /* Freeing all private data. */
985 priv = process->private;
986 free (priv->arch_private);
987 free (priv);
988 process->private = NULL;
505106cd
PA
989
990 remove_process (process);
8336d594
PA
991}
992
444d6139 993static void
95954743 994linux_join (int pid)
444d6139 995{
444d6139
PA
996 int status, ret;
997
998 do {
95954743 999 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1000 if (WIFEXITED (status) || WIFSIGNALED (status))
1001 break;
1002 } while (ret != -1 || errno != ECHILD);
1003}
1004
6ad8ae5c 1005/* Return nonzero if the given thread is still alive. */
0d62e5e8 1006static int
95954743 1007linux_thread_alive (ptid_t ptid)
0d62e5e8 1008{
95954743
PA
1009 struct lwp_info *lwp = find_lwp_pid (ptid);
1010
1011 /* We assume we always know if a thread exits. If a whole process
1012 exited but we still haven't been able to report it to GDB, we'll
1013 hold on to the last lwp of the dead process. */
1014 if (lwp != NULL)
1015 return !lwp->dead;
0d62e5e8
DJ
1016 else
1017 return 0;
1018}
1019
6bf5e0ba 1020/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1021static int
d50171e4 1022status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1023{
54a0b537 1024 struct lwp_info *lwp = (struct lwp_info *) entry;
95954743 1025 ptid_t ptid = * (ptid_t *) arg;
7984d532 1026 struct thread_info *thread;
95954743
PA
1027
1028 /* Check if we're only interested in events from a specific process
1029 or its lwps. */
1030 if (!ptid_equal (minus_one_ptid, ptid)
1031 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1032 return 0;
0d62e5e8 1033
d50171e4
PA
1034 thread = get_lwp_thread (lwp);
1035
1036 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1037 report any status pending the LWP may have. */
8336d594 1038 if (thread->last_resume_kind == resume_stop
7984d532 1039 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4 1040 return 0;
0d62e5e8 1041
d50171e4 1042 return lwp->status_pending_p;
0d62e5e8
DJ
1043}
1044
95954743
PA
1045static int
1046same_lwp (struct inferior_list_entry *entry, void *data)
1047{
1048 ptid_t ptid = *(ptid_t *) data;
1049 int lwp;
1050
1051 if (ptid_get_lwp (ptid) != 0)
1052 lwp = ptid_get_lwp (ptid);
1053 else
1054 lwp = ptid_get_pid (ptid);
1055
1056 if (ptid_get_lwp (entry->id) == lwp)
1057 return 1;
1058
1059 return 0;
1060}
1061
1062struct lwp_info *
1063find_lwp_pid (ptid_t ptid)
1064{
1065 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1066}
1067
bd99dc85 1068static struct lwp_info *
95954743 1069linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
611cb4a5 1070{
0d62e5e8 1071 int ret;
95954743 1072 int to_wait_for = -1;
bd99dc85 1073 struct lwp_info *child = NULL;
0d62e5e8 1074
bd99dc85 1075 if (debug_threads)
95954743
PA
1076 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1077
1078 if (ptid_equal (ptid, minus_one_ptid))
1079 to_wait_for = -1; /* any child */
1080 else
1081 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
0d62e5e8 1082
bd99dc85 1083 options |= __WALL;
0d62e5e8 1084
bd99dc85 1085retry:
0d62e5e8 1086
bd99dc85
PA
1087 ret = my_waitpid (to_wait_for, wstatp, options);
1088 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1089 return NULL;
1090 else if (ret == -1)
1091 perror_with_name ("waitpid");
0d62e5e8
DJ
1092
1093 if (debug_threads
1094 && (!WIFSTOPPED (*wstatp)
1095 || (WSTOPSIG (*wstatp) != 32
1096 && WSTOPSIG (*wstatp) != 33)))
1097 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1098
95954743 1099 child = find_lwp_pid (pid_to_ptid (ret));
0d62e5e8 1100
24a09b5f
DJ
1101 /* If we didn't find a process, one of two things presumably happened:
1102 - A process we started and then detached from has exited. Ignore it.
1103 - A process we are controlling has forked and the new child's stop
1104 was reported to us by the kernel. Save its PID. */
bd99dc85 1105 if (child == NULL && WIFSTOPPED (*wstatp))
24a09b5f
DJ
1106 {
1107 add_pid_to_list (&stopped_pids, ret);
1108 goto retry;
1109 }
bd99dc85 1110 else if (child == NULL)
24a09b5f
DJ
1111 goto retry;
1112
bd99dc85 1113 child->stopped = 1;
0d62e5e8 1114
bd99dc85 1115 child->last_status = *wstatp;
32ca6d61 1116
d61ddec4
UW
1117 /* Architecture-specific setup after inferior is running.
1118 This needs to happen after we have attached to the inferior
1119 and it is stopped for the first time, but before we access
1120 any inferior registers. */
1121 if (new_inferior)
1122 {
1123 the_low_target.arch_setup ();
52fa2412
UW
1124#ifdef HAVE_LINUX_REGSETS
1125 memset (disabled_regsets, 0, num_regsets);
1126#endif
d61ddec4
UW
1127 new_inferior = 0;
1128 }
1129
c3adc08c
PA
1130 /* Fetch the possibly triggered data watchpoint info and store it in
1131 CHILD.
1132
1133 On some archs, like x86, that use debug registers to set
1134 watchpoints, it's possible that the way to know which watched
1135 address trapped, is to check the register that is used to select
1136 which address to watch. Problem is, between setting the
1137 watchpoint and reading back which data address trapped, the user
1138 may change the set of watchpoints, and, as a consequence, GDB
1139 changes the debug registers in the inferior. To avoid reading
1140 back a stale stopped-data-address when that happens, we cache in
1141 LP the fact that a watchpoint trapped, and the corresponding data
1142 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1143 changes the debug registers meanwhile, we have the cached data we
1144 can rely on. */
1145
1146 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1147 {
1148 if (the_low_target.stopped_by_watchpoint == NULL)
1149 {
1150 child->stopped_by_watchpoint = 0;
1151 }
1152 else
1153 {
1154 struct thread_info *saved_inferior;
1155
1156 saved_inferior = current_inferior;
1157 current_inferior = get_lwp_thread (child);
1158
1159 child->stopped_by_watchpoint
1160 = the_low_target.stopped_by_watchpoint ();
1161
1162 if (child->stopped_by_watchpoint)
1163 {
1164 if (the_low_target.stopped_data_address != NULL)
1165 child->stopped_data_address
1166 = the_low_target.stopped_data_address ();
1167 else
1168 child->stopped_data_address = 0;
1169 }
1170
1171 current_inferior = saved_inferior;
1172 }
1173 }
1174
d50171e4
PA
1175 /* Store the STOP_PC, with adjustment applied. This depends on the
1176 architecture being defined already (so that CHILD has a valid
1177 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1178 not). */
1179 if (WIFSTOPPED (*wstatp))
1180 child->stop_pc = get_stop_pc (child);
1181
0d62e5e8 1182 if (debug_threads
47c0c975
DE
1183 && WIFSTOPPED (*wstatp)
1184 && the_low_target.get_pc != NULL)
0d62e5e8 1185 {
896c7fbb 1186 struct thread_info *saved_inferior = current_inferior;
bce522a2 1187 struct regcache *regcache;
47c0c975
DE
1188 CORE_ADDR pc;
1189
d50171e4 1190 current_inferior = get_lwp_thread (child);
bce522a2 1191 regcache = get_thread_regcache (current_inferior, 1);
442ea881 1192 pc = (*the_low_target.get_pc) (regcache);
47c0c975 1193 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
896c7fbb 1194 current_inferior = saved_inferior;
0d62e5e8 1195 }
bd99dc85
PA
1196
1197 return child;
0d62e5e8 1198}
611cb4a5 1199
219f2f23
PA
1200/* This function should only be called if the LWP got a SIGTRAP.
1201
1202 Handle any tracepoint steps or hits. Return true if a tracepoint
1203 event was handled, 0 otherwise. */
1204
1205static int
1206handle_tracepoints (struct lwp_info *lwp)
1207{
1208 struct thread_info *tinfo = get_lwp_thread (lwp);
1209 int tpoint_related_event = 0;
1210
7984d532
PA
1211 /* If this tracepoint hit causes a tracing stop, we'll immediately
1212 uninsert tracepoints. To do this, we temporarily pause all
1213 threads, unpatch away, and then unpause threads. We need to make
1214 sure the unpausing doesn't resume LWP too. */
1215 lwp->suspended++;
1216
219f2f23
PA
1217 /* And we need to be sure that any all-threads-stopping doesn't try
1218 to move threads out of the jump pads, as it could deadlock the
1219 inferior (LWP could be in the jump pad, maybe even holding the
1220 lock.) */
1221
1222 /* Do any necessary step collect actions. */
1223 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1224
fa593d66
PA
1225 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1226
219f2f23
PA
1227 /* See if we just hit a tracepoint and do its main collect
1228 actions. */
1229 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1230
7984d532
PA
1231 lwp->suspended--;
1232
1233 gdb_assert (lwp->suspended == 0);
fa593d66 1234 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1235
219f2f23
PA
1236 if (tpoint_related_event)
1237 {
1238 if (debug_threads)
1239 fprintf (stderr, "got a tracepoint event\n");
1240 return 1;
1241 }
1242
1243 return 0;
1244}
1245
fa593d66
PA
1246/* Convenience wrapper. Returns true if LWP is presently collecting a
1247 fast tracepoint. */
1248
1249static int
1250linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1251 struct fast_tpoint_collect_status *status)
1252{
1253 CORE_ADDR thread_area;
1254
1255 if (the_low_target.get_thread_area == NULL)
1256 return 0;
1257
1258 /* Get the thread area address. This is used to recognize which
1259 thread is which when tracing with the in-process agent library.
1260 We don't read anything from the address, and treat it as opaque;
1261 it's the address itself that we assume is unique per-thread. */
1262 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1263 return 0;
1264
1265 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1266}
1267
1268/* The reason we resume in the caller, is because we want to be able
1269 to pass lwp->status_pending as WSTAT, and we need to clear
1270 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1271 refuses to resume. */
1272
1273static int
1274maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1275{
1276 struct thread_info *saved_inferior;
1277
1278 saved_inferior = current_inferior;
1279 current_inferior = get_lwp_thread (lwp);
1280
1281 if ((wstat == NULL
1282 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1283 && supports_fast_tracepoints ()
1284 && in_process_agent_loaded ())
1285 {
1286 struct fast_tpoint_collect_status status;
1287 int r;
1288
1289 if (debug_threads)
1290 fprintf (stderr, "\
1291Checking whether LWP %ld needs to move out of the jump pad.\n",
1292 lwpid_of (lwp));
1293
1294 r = linux_fast_tracepoint_collecting (lwp, &status);
1295
1296 if (wstat == NULL
1297 || (WSTOPSIG (*wstat) != SIGILL
1298 && WSTOPSIG (*wstat) != SIGFPE
1299 && WSTOPSIG (*wstat) != SIGSEGV
1300 && WSTOPSIG (*wstat) != SIGBUS))
1301 {
1302 lwp->collecting_fast_tracepoint = r;
1303
1304 if (r != 0)
1305 {
1306 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1307 {
1308 /* Haven't executed the original instruction yet.
1309 Set breakpoint there, and wait till it's hit,
1310 then single-step until exiting the jump pad. */
1311 lwp->exit_jump_pad_bkpt
1312 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1313 }
1314
1315 if (debug_threads)
1316 fprintf (stderr, "\
1317Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1318 lwpid_of (lwp));
0cccb683 1319 current_inferior = saved_inferior;
fa593d66
PA
1320
1321 return 1;
1322 }
1323 }
1324 else
1325 {
1326 /* If we get a synchronous signal while collecting, *and*
1327 while executing the (relocated) original instruction,
1328 reset the PC to point at the tpoint address, before
1329 reporting to GDB. Otherwise, it's an IPA lib bug: just
1330 report the signal to GDB, and pray for the best. */
1331
1332 lwp->collecting_fast_tracepoint = 0;
1333
1334 if (r != 0
1335 && (status.adjusted_insn_addr <= lwp->stop_pc
1336 && lwp->stop_pc < status.adjusted_insn_addr_end))
1337 {
1338 siginfo_t info;
1339 struct regcache *regcache;
1340
1341 /* The si_addr on a few signals references the address
1342 of the faulting instruction. Adjust that as
1343 well. */
1344 if ((WSTOPSIG (*wstat) == SIGILL
1345 || WSTOPSIG (*wstat) == SIGFPE
1346 || WSTOPSIG (*wstat) == SIGBUS
1347 || WSTOPSIG (*wstat) == SIGSEGV)
1348 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1349 /* Final check just to make sure we don't clobber
1350 the siginfo of non-kernel-sent signals. */
1351 && (uintptr_t) info.si_addr == lwp->stop_pc)
1352 {
1353 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1354 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1355 }
1356
1357 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1358 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1359 lwp->stop_pc = status.tpoint_addr;
1360
1361 /* Cancel any fast tracepoint lock this thread was
1362 holding. */
1363 force_unlock_trace_buffer ();
1364 }
1365
1366 if (lwp->exit_jump_pad_bkpt != NULL)
1367 {
1368 if (debug_threads)
1369 fprintf (stderr,
1370 "Cancelling fast exit-jump-pad: removing bkpt. "
1371 "stopping all threads momentarily.\n");
1372
1373 stop_all_lwps (1, lwp);
1374 cancel_breakpoints ();
1375
1376 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1377 lwp->exit_jump_pad_bkpt = NULL;
1378
1379 unstop_all_lwps (1, lwp);
1380
1381 gdb_assert (lwp->suspended >= 0);
1382 }
1383 }
1384 }
1385
1386 if (debug_threads)
1387 fprintf (stderr, "\
1388Checking whether LWP %ld needs to move out of the jump pad...no\n",
1389 lwpid_of (lwp));
0cccb683
YQ
1390
1391 current_inferior = saved_inferior;
fa593d66
PA
1392 return 0;
1393}
1394
1395/* Enqueue one signal in the "signals to report later when out of the
1396 jump pad" list. */
1397
1398static void
1399enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1400{
1401 struct pending_signals *p_sig;
1402
1403 if (debug_threads)
1404 fprintf (stderr, "\
1405Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1406
1407 if (debug_threads)
1408 {
1409 struct pending_signals *sig;
1410
1411 for (sig = lwp->pending_signals_to_report;
1412 sig != NULL;
1413 sig = sig->prev)
1414 fprintf (stderr,
1415 " Already queued %d\n",
1416 sig->signal);
1417
1418 fprintf (stderr, " (no more currently queued signals)\n");
1419 }
1420
1a981360
PA
1421 /* Don't enqueue non-RT signals if they are already in the deferred
1422 queue. (SIGSTOP being the easiest signal to see ending up here
1423 twice) */
1424 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1425 {
1426 struct pending_signals *sig;
1427
1428 for (sig = lwp->pending_signals_to_report;
1429 sig != NULL;
1430 sig = sig->prev)
1431 {
1432 if (sig->signal == WSTOPSIG (*wstat))
1433 {
1434 if (debug_threads)
1435 fprintf (stderr,
1436 "Not requeuing already queued non-RT signal %d"
1437 " for LWP %ld\n",
1438 sig->signal,
1439 lwpid_of (lwp));
1440 return;
1441 }
1442 }
1443 }
1444
fa593d66
PA
1445 p_sig = xmalloc (sizeof (*p_sig));
1446 p_sig->prev = lwp->pending_signals_to_report;
1447 p_sig->signal = WSTOPSIG (*wstat);
1448 memset (&p_sig->info, 0, sizeof (siginfo_t));
1449 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1450
1451 lwp->pending_signals_to_report = p_sig;
1452}
1453
1454/* Dequeue one signal from the "signals to report later when out of
1455 the jump pad" list. */
1456
1457static int
1458dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1459{
1460 if (lwp->pending_signals_to_report != NULL)
1461 {
1462 struct pending_signals **p_sig;
1463
1464 p_sig = &lwp->pending_signals_to_report;
1465 while ((*p_sig)->prev != NULL)
1466 p_sig = &(*p_sig)->prev;
1467
1468 *wstat = W_STOPCODE ((*p_sig)->signal);
1469 if ((*p_sig)->info.si_signo != 0)
1470 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1471 free (*p_sig);
1472 *p_sig = NULL;
1473
1474 if (debug_threads)
1475 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1476 WSTOPSIG (*wstat), lwpid_of (lwp));
1477
1478 if (debug_threads)
1479 {
1480 struct pending_signals *sig;
1481
1482 for (sig = lwp->pending_signals_to_report;
1483 sig != NULL;
1484 sig = sig->prev)
1485 fprintf (stderr,
1486 " Still queued %d\n",
1487 sig->signal);
1488
1489 fprintf (stderr, " (no more queued signals)\n");
1490 }
1491
1492 return 1;
1493 }
1494
1495 return 0;
1496}
1497
d50171e4
PA
1498/* Arrange for a breakpoint to be hit again later. We don't keep the
1499 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1500 will handle the current event, eventually we will resume this LWP,
1501 and this breakpoint will trap again. */
1502
1503static int
1504cancel_breakpoint (struct lwp_info *lwp)
1505{
1506 struct thread_info *saved_inferior;
d50171e4
PA
1507
1508 /* There's nothing to do if we don't support breakpoints. */
1509 if (!supports_breakpoints ())
1510 return 0;
1511
d50171e4
PA
1512 /* breakpoint_at reads from current inferior. */
1513 saved_inferior = current_inferior;
1514 current_inferior = get_lwp_thread (lwp);
1515
1516 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1517 {
1518 if (debug_threads)
1519 fprintf (stderr,
1520 "CB: Push back breakpoint for %s\n",
fc7238bb 1521 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1522
1523 /* Back up the PC if necessary. */
1524 if (the_low_target.decr_pc_after_break)
1525 {
1526 struct regcache *regcache
fc7238bb 1527 = get_thread_regcache (current_inferior, 1);
d50171e4
PA
1528 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1529 }
1530
1531 current_inferior = saved_inferior;
1532 return 1;
1533 }
1534 else
1535 {
1536 if (debug_threads)
1537 fprintf (stderr,
1538 "CB: No breakpoint found at %s for [%s]\n",
1539 paddress (lwp->stop_pc),
fc7238bb 1540 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1541 }
1542
1543 current_inferior = saved_inferior;
1544 return 0;
1545}
1546
1547/* When the event-loop is doing a step-over, this points at the thread
1548 being stepped. */
1549ptid_t step_over_bkpt;
1550
bd99dc85
PA
1551/* Wait for an event from child PID. If PID is -1, wait for any
1552 child. Store the stop status through the status pointer WSTAT.
1553 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1554 event was found and OPTIONS contains WNOHANG. Return the PID of
1555 the stopped child otherwise. */
1556
0d62e5e8 1557static int
95954743 1558linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
0d62e5e8 1559{
d50171e4
PA
1560 struct lwp_info *event_child, *requested_child;
1561
d50171e4
PA
1562 event_child = NULL;
1563 requested_child = NULL;
0d62e5e8 1564
95954743 1565 /* Check for a lwp with a pending status. */
bd99dc85 1566
95954743
PA
1567 if (ptid_equal (ptid, minus_one_ptid)
1568 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
0d62e5e8 1569 {
54a0b537 1570 event_child = (struct lwp_info *)
d50171e4 1571 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
0d62e5e8 1572 if (debug_threads && event_child)
bd99dc85 1573 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
0d62e5e8
DJ
1574 }
1575 else
1576 {
95954743 1577 requested_child = find_lwp_pid (ptid);
d50171e4 1578
fa593d66
PA
1579 if (!stopping_threads
1580 && requested_child->status_pending_p
1581 && requested_child->collecting_fast_tracepoint)
1582 {
1583 enqueue_one_deferred_signal (requested_child,
1584 &requested_child->status_pending);
1585 requested_child->status_pending_p = 0;
1586 requested_child->status_pending = 0;
1587 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1588 }
1589
1590 if (requested_child->suspended
1591 && requested_child->status_pending_p)
1592 fatal ("requesting an event out of a suspended child?");
1593
d50171e4 1594 if (requested_child->status_pending_p)
bd99dc85 1595 event_child = requested_child;
0d62e5e8 1596 }
611cb4a5 1597
0d62e5e8
DJ
1598 if (event_child != NULL)
1599 {
bd99dc85
PA
1600 if (debug_threads)
1601 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1602 lwpid_of (event_child), event_child->status_pending);
1603 *wstat = event_child->status_pending;
1604 event_child->status_pending_p = 0;
1605 event_child->status_pending = 0;
1606 current_inferior = get_lwp_thread (event_child);
1607 return lwpid_of (event_child);
0d62e5e8
DJ
1608 }
1609
1610 /* We only enter this loop if no process has a pending wait status. Thus
1611 any action taken in response to a wait status inside this loop is
1612 responding as soon as we detect the status, not after any pending
1613 events. */
1614 while (1)
1615 {
6bf5e0ba 1616 event_child = linux_wait_for_lwp (ptid, wstat, options);
0d62e5e8 1617
bd99dc85 1618 if ((options & WNOHANG) && event_child == NULL)
d50171e4
PA
1619 {
1620 if (debug_threads)
1621 fprintf (stderr, "WNOHANG set, no event found\n");
1622 return 0;
1623 }
0d62e5e8
DJ
1624
1625 if (event_child == NULL)
1626 error ("event from unknown child");
611cb4a5 1627
bd99dc85 1628 current_inferior = get_lwp_thread (event_child);
0d62e5e8 1629
89be2091 1630 /* Check for thread exit. */
bd99dc85 1631 if (! WIFSTOPPED (*wstat))
0d62e5e8 1632 {
89be2091 1633 if (debug_threads)
95954743 1634 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
89be2091
DJ
1635
1636 /* If the last thread is exiting, just return. */
95954743 1637 if (last_thread_of_process_p (current_inferior))
bd99dc85
PA
1638 {
1639 if (debug_threads)
95954743
PA
1640 fprintf (stderr, "LWP %ld is last lwp of process\n",
1641 lwpid_of (event_child));
bd99dc85
PA
1642 return lwpid_of (event_child);
1643 }
89be2091 1644
bd99dc85
PA
1645 if (!non_stop)
1646 {
1647 current_inferior = (struct thread_info *) all_threads.head;
1648 if (debug_threads)
1649 fprintf (stderr, "Current inferior is now %ld\n",
1650 lwpid_of (get_thread_lwp (current_inferior)));
1651 }
1652 else
1653 {
1654 current_inferior = NULL;
1655 if (debug_threads)
1656 fprintf (stderr, "Current inferior is now <NULL>\n");
1657 }
89be2091
DJ
1658
1659 /* If we were waiting for this particular child to do something...
1660 well, it did something. */
bd99dc85 1661 if (requested_child != NULL)
d50171e4
PA
1662 {
1663 int lwpid = lwpid_of (event_child);
1664
1665 /* Cancel the step-over operation --- the thread that
1666 started it is gone. */
1667 if (finish_step_over (event_child))
7984d532 1668 unstop_all_lwps (1, event_child);
d50171e4
PA
1669 delete_lwp (event_child);
1670 return lwpid;
1671 }
1672
1673 delete_lwp (event_child);
89be2091
DJ
1674
1675 /* Wait for a more interesting event. */
1676 continue;
1677 }
1678
a6dbe5df
PA
1679 if (event_child->must_set_ptrace_flags)
1680 {
1e7fc18c 1681 linux_enable_event_reporting (lwpid_of (event_child));
a6dbe5df
PA
1682 event_child->must_set_ptrace_flags = 0;
1683 }
1684
bd99dc85
PA
1685 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1686 && *wstat >> 16 != 0)
24a09b5f 1687 {
bd99dc85 1688 handle_extended_wait (event_child, *wstat);
24a09b5f
DJ
1689 continue;
1690 }
1691
d50171e4
PA
1692 if (WIFSTOPPED (*wstat)
1693 && WSTOPSIG (*wstat) == SIGSTOP
1694 && event_child->stop_expected)
1695 {
1696 int should_stop;
1697
1698 if (debug_threads)
1699 fprintf (stderr, "Expected stop.\n");
1700 event_child->stop_expected = 0;
1701
8336d594 1702 should_stop = (current_inferior->last_resume_kind == resume_stop
d50171e4
PA
1703 || stopping_threads);
1704
1705 if (!should_stop)
1706 {
1707 linux_resume_one_lwp (event_child,
1708 event_child->stepping, 0, NULL);
1709 continue;
1710 }
1711 }
1712
bd99dc85 1713 return lwpid_of (event_child);
611cb4a5 1714 }
0d62e5e8 1715
611cb4a5
DJ
1716 /* NOTREACHED */
1717 return 0;
1718}
1719
95954743
PA
1720static int
1721linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1722{
1723 ptid_t wait_ptid;
1724
1725 if (ptid_is_pid (ptid))
1726 {
1727 /* A request to wait for a specific tgid. This is not possible
1728 with waitpid, so instead, we wait for any child, and leave
1729 children we're not interested in right now with a pending
1730 status to report later. */
1731 wait_ptid = minus_one_ptid;
1732 }
1733 else
1734 wait_ptid = ptid;
1735
1736 while (1)
1737 {
1738 int event_pid;
1739
1740 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1741
1742 if (event_pid > 0
1743 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1744 {
493e2a69
MS
1745 struct lwp_info *event_child
1746 = find_lwp_pid (pid_to_ptid (event_pid));
95954743
PA
1747
1748 if (! WIFSTOPPED (*wstat))
1749 mark_lwp_dead (event_child, *wstat);
1750 else
1751 {
1752 event_child->status_pending_p = 1;
1753 event_child->status_pending = *wstat;
1754 }
1755 }
1756 else
1757 return event_pid;
1758 }
1759}
1760
6bf5e0ba
PA
1761
1762/* Count the LWP's that have had events. */
1763
1764static int
1765count_events_callback (struct inferior_list_entry *entry, void *data)
1766{
1767 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1768 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1769 int *count = data;
1770
1771 gdb_assert (count != NULL);
1772
1773 /* Count only resumed LWPs that have a SIGTRAP event pending that
1774 should be reported to GDB. */
8336d594
PA
1775 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1776 && thread->last_resume_kind != resume_stop
6bf5e0ba
PA
1777 && lp->status_pending_p
1778 && WIFSTOPPED (lp->status_pending)
1779 && WSTOPSIG (lp->status_pending) == SIGTRAP
1780 && !breakpoint_inserted_here (lp->stop_pc))
1781 (*count)++;
1782
1783 return 0;
1784}
1785
1786/* Select the LWP (if any) that is currently being single-stepped. */
1787
1788static int
1789select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1790{
1791 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1792 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba 1793
8336d594
PA
1794 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1795 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
1796 && lp->status_pending_p)
1797 return 1;
1798 else
1799 return 0;
1800}
1801
1802/* Select the Nth LWP that has had a SIGTRAP event that should be
1803 reported to GDB. */
1804
1805static int
1806select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1807{
1808 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1809 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1810 int *selector = data;
1811
1812 gdb_assert (selector != NULL);
1813
1814 /* Select only resumed LWPs that have a SIGTRAP event pending. */
8336d594
PA
1815 if (thread->last_resume_kind != resume_stop
1816 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
1817 && lp->status_pending_p
1818 && WIFSTOPPED (lp->status_pending)
1819 && WSTOPSIG (lp->status_pending) == SIGTRAP
1820 && !breakpoint_inserted_here (lp->stop_pc))
1821 if ((*selector)-- == 0)
1822 return 1;
1823
1824 return 0;
1825}
1826
1827static int
1828cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1829{
1830 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1831 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1832 struct lwp_info *event_lp = data;
1833
1834 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1835 if (lp == event_lp)
1836 return 0;
1837
1838 /* If a LWP other than the LWP that we're reporting an event for has
1839 hit a GDB breakpoint (as opposed to some random trap signal),
1840 then just arrange for it to hit it again later. We don't keep
1841 the SIGTRAP status and don't forward the SIGTRAP signal to the
1842 LWP. We will handle the current event, eventually we will resume
1843 all LWPs, and this one will get its breakpoint trap again.
1844
1845 If we do not do this, then we run the risk that the user will
1846 delete or disable the breakpoint, but the LWP will have already
1847 tripped on it. */
1848
8336d594
PA
1849 if (thread->last_resume_kind != resume_stop
1850 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
1851 && lp->status_pending_p
1852 && WIFSTOPPED (lp->status_pending)
1853 && WSTOPSIG (lp->status_pending) == SIGTRAP
bdabb078
PA
1854 && !lp->stepping
1855 && !lp->stopped_by_watchpoint
6bf5e0ba
PA
1856 && cancel_breakpoint (lp))
1857 /* Throw away the SIGTRAP. */
1858 lp->status_pending_p = 0;
1859
1860 return 0;
1861}
1862
7984d532
PA
1863static void
1864linux_cancel_breakpoints (void)
1865{
1866 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1867}
1868
6bf5e0ba
PA
1869/* Select one LWP out of those that have events pending. */
1870
1871static void
1872select_event_lwp (struct lwp_info **orig_lp)
1873{
1874 int num_events = 0;
1875 int random_selector;
1876 struct lwp_info *event_lp;
1877
1878 /* Give preference to any LWP that is being single-stepped. */
1879 event_lp
1880 = (struct lwp_info *) find_inferior (&all_lwps,
1881 select_singlestep_lwp_callback, NULL);
1882 if (event_lp != NULL)
1883 {
1884 if (debug_threads)
1885 fprintf (stderr,
1886 "SEL: Select single-step %s\n",
1887 target_pid_to_str (ptid_of (event_lp)));
1888 }
1889 else
1890 {
1891 /* No single-stepping LWP. Select one at random, out of those
1892 which have had SIGTRAP events. */
1893
1894 /* First see how many SIGTRAP events we have. */
1895 find_inferior (&all_lwps, count_events_callback, &num_events);
1896
1897 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1898 random_selector = (int)
1899 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1900
1901 if (debug_threads && num_events > 1)
1902 fprintf (stderr,
1903 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1904 num_events, random_selector);
1905
1906 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1907 select_event_lwp_callback,
1908 &random_selector);
1909 }
1910
1911 if (event_lp != NULL)
1912 {
1913 /* Switch the event LWP. */
1914 *orig_lp = event_lp;
1915 }
1916}
1917
7984d532
PA
1918/* Decrement the suspend count of an LWP. */
1919
1920static int
1921unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
1922{
1923 struct lwp_info *lwp = (struct lwp_info *) entry;
1924
1925 /* Ignore EXCEPT. */
1926 if (lwp == except)
1927 return 0;
1928
1929 lwp->suspended--;
1930
1931 gdb_assert (lwp->suspended >= 0);
1932 return 0;
1933}
1934
1935/* Decrement the suspend count of all LWPs, except EXCEPT, if non
1936 NULL. */
1937
1938static void
1939unsuspend_all_lwps (struct lwp_info *except)
1940{
1941 find_inferior (&all_lwps, unsuspend_one_lwp, except);
1942}
1943
fa593d66
PA
1944static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
1945static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
1946 void *data);
1947static int lwp_running (struct inferior_list_entry *entry, void *data);
1948static ptid_t linux_wait_1 (ptid_t ptid,
1949 struct target_waitstatus *ourstatus,
1950 int target_options);
1951
1952/* Stabilize threads (move out of jump pads).
1953
1954 If a thread is midway collecting a fast tracepoint, we need to
1955 finish the collection and move it out of the jump pad before
1956 reporting the signal.
1957
1958 This avoids recursion while collecting (when a signal arrives
1959 midway, and the signal handler itself collects), which would trash
1960 the trace buffer. In case the user set a breakpoint in a signal
1961 handler, this avoids the backtrace showing the jump pad, etc..
1962 Most importantly, there are certain things we can't do safely if
1963 threads are stopped in a jump pad (or in its callee's). For
1964 example:
1965
1966 - starting a new trace run. A thread still collecting the
1967 previous run, could trash the trace buffer when resumed. The trace
1968 buffer control structures would have been reset but the thread had
1969 no way to tell. The thread could even midway memcpy'ing to the
1970 buffer, which would mean that when resumed, it would clobber the
1971 trace buffer that had been set for a new run.
1972
1973 - we can't rewrite/reuse the jump pads for new tracepoints
1974 safely. Say you do tstart while a thread is stopped midway while
1975 collecting. When the thread is later resumed, it finishes the
1976 collection, and returns to the jump pad, to execute the original
1977 instruction that was under the tracepoint jump at the time the
1978 older run had been started. If the jump pad had been rewritten
1979 since for something else in the new run, the thread would now
1980 execute the wrong / random instructions. */
1981
1982static void
1983linux_stabilize_threads (void)
1984{
1985 struct thread_info *save_inferior;
1986 struct lwp_info *lwp_stuck;
1987
1988 lwp_stuck
1989 = (struct lwp_info *) find_inferior (&all_lwps,
1990 stuck_in_jump_pad_callback, NULL);
1991 if (lwp_stuck != NULL)
1992 {
b4d51a55
PA
1993 if (debug_threads)
1994 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
1995 lwpid_of (lwp_stuck));
fa593d66
PA
1996 return;
1997 }
1998
1999 save_inferior = current_inferior;
2000
2001 stabilizing_threads = 1;
2002
2003 /* Kick 'em all. */
2004 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2005
2006 /* Loop until all are stopped out of the jump pads. */
2007 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2008 {
2009 struct target_waitstatus ourstatus;
2010 struct lwp_info *lwp;
fa593d66
PA
2011 int wstat;
2012
2013 /* Note that we go through the full wait even loop. While
2014 moving threads out of jump pad, we need to be able to step
2015 over internal breakpoints and such. */
32fcada3 2016 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2017
2018 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2019 {
2020 lwp = get_thread_lwp (current_inferior);
2021
2022 /* Lock it. */
2023 lwp->suspended++;
2024
2025 if (ourstatus.value.sig != TARGET_SIGNAL_0
2026 || current_inferior->last_resume_kind == resume_stop)
2027 {
2028 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
2029 enqueue_one_deferred_signal (lwp, &wstat);
2030 }
2031 }
2032 }
2033
2034 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2035
2036 stabilizing_threads = 0;
2037
2038 current_inferior = save_inferior;
2039
b4d51a55 2040 if (debug_threads)
fa593d66 2041 {
b4d51a55
PA
2042 lwp_stuck
2043 = (struct lwp_info *) find_inferior (&all_lwps,
2044 stuck_in_jump_pad_callback, NULL);
2045 if (lwp_stuck != NULL)
fa593d66
PA
2046 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2047 lwpid_of (lwp_stuck));
2048 }
2049}
2050
0d62e5e8 2051/* Wait for process, returns status. */
da6d8c04 2052
95954743
PA
2053static ptid_t
2054linux_wait_1 (ptid_t ptid,
2055 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2056{
e5f1222d 2057 int w;
fc7238bb 2058 struct lwp_info *event_child;
bd99dc85 2059 int options;
bd99dc85 2060 int pid;
6bf5e0ba
PA
2061 int step_over_finished;
2062 int bp_explains_trap;
2063 int maybe_internal_trap;
2064 int report_to_gdb;
219f2f23 2065 int trace_event;
bd99dc85
PA
2066
2067 /* Translate generic target options into linux options. */
2068 options = __WALL;
2069 if (target_options & TARGET_WNOHANG)
2070 options |= WNOHANG;
0d62e5e8
DJ
2071
2072retry:
fa593d66
PA
2073 bp_explains_trap = 0;
2074 trace_event = 0;
bd99dc85
PA
2075 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2076
0d62e5e8
DJ
2077 /* If we were only supposed to resume one thread, only wait for
2078 that thread - if it's still alive. If it died, however - which
2079 can happen if we're coming from the thread death case below -
2080 then we need to make sure we restart the other threads. We could
2081 pick a thread at random or restart all; restarting all is less
2082 arbitrary. */
95954743
PA
2083 if (!non_stop
2084 && !ptid_equal (cont_thread, null_ptid)
2085 && !ptid_equal (cont_thread, minus_one_ptid))
0d62e5e8 2086 {
fc7238bb
PA
2087 struct thread_info *thread;
2088
bd99dc85
PA
2089 thread = (struct thread_info *) find_inferior_id (&all_threads,
2090 cont_thread);
0d62e5e8
DJ
2091
2092 /* No stepping, no signal - unless one is pending already, of course. */
bd99dc85 2093 if (thread == NULL)
64386c31
DJ
2094 {
2095 struct thread_resume resume_info;
95954743 2096 resume_info.thread = minus_one_ptid;
bd99dc85
PA
2097 resume_info.kind = resume_continue;
2098 resume_info.sig = 0;
2bd7c093 2099 linux_resume (&resume_info, 1);
64386c31 2100 }
bd99dc85 2101 else
95954743 2102 ptid = cont_thread;
0d62e5e8 2103 }
da6d8c04 2104
6bf5e0ba
PA
2105 if (ptid_equal (step_over_bkpt, null_ptid))
2106 pid = linux_wait_for_event (ptid, &w, options);
2107 else
2108 {
2109 if (debug_threads)
2110 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2111 target_pid_to_str (step_over_bkpt));
2112 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2113 }
2114
bd99dc85 2115 if (pid == 0) /* only if TARGET_WNOHANG */
95954743 2116 return null_ptid;
bd99dc85 2117
6bf5e0ba 2118 event_child = get_thread_lwp (current_inferior);
da6d8c04 2119
0d62e5e8
DJ
2120 /* If we are waiting for a particular child, and it exited,
2121 linux_wait_for_event will return its exit status. Similarly if
2122 the last child exited. If this is not the last child, however,
2123 do not report it as exited until there is a 'thread exited' response
2124 available in the remote protocol. Instead, just wait for another event.
2125 This should be safe, because if the thread crashed we will already
2126 have reported the termination signal to GDB; that should stop any
2127 in-progress stepping operations, etc.
2128
2129 Report the exit status of the last thread to exit. This matches
2130 LinuxThreads' behavior. */
2131
95954743 2132 if (last_thread_of_process_p (current_inferior))
da6d8c04 2133 {
bd99dc85 2134 if (WIFEXITED (w) || WIFSIGNALED (w))
0d62e5e8 2135 {
bd99dc85
PA
2136 if (WIFEXITED (w))
2137 {
2138 ourstatus->kind = TARGET_WAITKIND_EXITED;
2139 ourstatus->value.integer = WEXITSTATUS (w);
2140
2141 if (debug_threads)
493e2a69
MS
2142 fprintf (stderr,
2143 "\nChild exited with retcode = %x \n",
2144 WEXITSTATUS (w));
bd99dc85
PA
2145 }
2146 else
2147 {
2148 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2149 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2150
2151 if (debug_threads)
493e2a69
MS
2152 fprintf (stderr,
2153 "\nChild terminated with signal = %x \n",
2154 WTERMSIG (w));
bd99dc85
PA
2155
2156 }
5b1c542e 2157
3e4c1235 2158 return ptid_of (event_child);
0d62e5e8 2159 }
da6d8c04 2160 }
0d62e5e8 2161 else
da6d8c04 2162 {
0d62e5e8
DJ
2163 if (!WIFSTOPPED (w))
2164 goto retry;
da6d8c04
DJ
2165 }
2166
6bf5e0ba
PA
2167 /* If this event was not handled before, and is not a SIGTRAP, we
2168 report it. SIGILL and SIGSEGV are also treated as traps in case
2169 a breakpoint is inserted at the current PC. If this target does
2170 not support internal breakpoints at all, we also report the
2171 SIGTRAP without further processing; it's of no concern to us. */
2172 maybe_internal_trap
2173 = (supports_breakpoints ()
2174 && (WSTOPSIG (w) == SIGTRAP
2175 || ((WSTOPSIG (w) == SIGILL
2176 || WSTOPSIG (w) == SIGSEGV)
2177 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2178
2179 if (maybe_internal_trap)
2180 {
2181 /* Handle anything that requires bookkeeping before deciding to
2182 report the event or continue waiting. */
2183
2184 /* First check if we can explain the SIGTRAP with an internal
2185 breakpoint, or if we should possibly report the event to GDB.
2186 Do this before anything that may remove or insert a
2187 breakpoint. */
2188 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2189
2190 /* We have a SIGTRAP, possibly a step-over dance has just
2191 finished. If so, tweak the state machine accordingly,
2192 reinsert breakpoints and delete any reinsert (software
2193 single-step) breakpoints. */
2194 step_over_finished = finish_step_over (event_child);
2195
2196 /* Now invoke the callbacks of any internal breakpoints there. */
2197 check_breakpoints (event_child->stop_pc);
2198
219f2f23
PA
2199 /* Handle tracepoint data collecting. This may overflow the
2200 trace buffer, and cause a tracing stop, removing
2201 breakpoints. */
2202 trace_event = handle_tracepoints (event_child);
2203
6bf5e0ba
PA
2204 if (bp_explains_trap)
2205 {
2206 /* If we stepped or ran into an internal breakpoint, we've
2207 already handled it. So next time we resume (from this
2208 PC), we should step over it. */
2209 if (debug_threads)
2210 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2211
8b07ae33
PA
2212 if (breakpoint_here (event_child->stop_pc))
2213 event_child->need_step_over = 1;
6bf5e0ba
PA
2214 }
2215 }
2216 else
2217 {
2218 /* We have some other signal, possibly a step-over dance was in
2219 progress, and it should be cancelled too. */
2220 step_over_finished = finish_step_over (event_child);
fa593d66
PA
2221 }
2222
2223 /* We have all the data we need. Either report the event to GDB, or
2224 resume threads and keep waiting for more. */
2225
2226 /* If we're collecting a fast tracepoint, finish the collection and
2227 move out of the jump pad before delivering a signal. See
2228 linux_stabilize_threads. */
2229
2230 if (WIFSTOPPED (w)
2231 && WSTOPSIG (w) != SIGTRAP
2232 && supports_fast_tracepoints ()
2233 && in_process_agent_loaded ())
2234 {
2235 if (debug_threads)
2236 fprintf (stderr,
2237 "Got signal %d for LWP %ld. Check if we need "
2238 "to defer or adjust it.\n",
2239 WSTOPSIG (w), lwpid_of (event_child));
2240
2241 /* Allow debugging the jump pad itself. */
2242 if (current_inferior->last_resume_kind != resume_step
2243 && maybe_move_out_of_jump_pad (event_child, &w))
2244 {
2245 enqueue_one_deferred_signal (event_child, &w);
2246
2247 if (debug_threads)
2248 fprintf (stderr,
2249 "Signal %d for LWP %ld deferred (in jump pad)\n",
2250 WSTOPSIG (w), lwpid_of (event_child));
2251
2252 linux_resume_one_lwp (event_child, 0, 0, NULL);
2253 goto retry;
2254 }
2255 }
219f2f23 2256
fa593d66
PA
2257 if (event_child->collecting_fast_tracepoint)
2258 {
2259 if (debug_threads)
2260 fprintf (stderr, "\
2261LWP %ld was trying to move out of the jump pad (%d). \
2262Check if we're already there.\n",
2263 lwpid_of (event_child),
2264 event_child->collecting_fast_tracepoint);
2265
2266 trace_event = 1;
2267
2268 event_child->collecting_fast_tracepoint
2269 = linux_fast_tracepoint_collecting (event_child, NULL);
2270
2271 if (event_child->collecting_fast_tracepoint != 1)
2272 {
2273 /* No longer need this breakpoint. */
2274 if (event_child->exit_jump_pad_bkpt != NULL)
2275 {
2276 if (debug_threads)
2277 fprintf (stderr,
2278 "No longer need exit-jump-pad bkpt; removing it."
2279 "stopping all threads momentarily.\n");
2280
2281 /* Other running threads could hit this breakpoint.
2282 We don't handle moribund locations like GDB does,
2283 instead we always pause all threads when removing
2284 breakpoints, so that any step-over or
2285 decr_pc_after_break adjustment is always taken
2286 care of while the breakpoint is still
2287 inserted. */
2288 stop_all_lwps (1, event_child);
2289 cancel_breakpoints ();
2290
2291 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2292 event_child->exit_jump_pad_bkpt = NULL;
2293
2294 unstop_all_lwps (1, event_child);
2295
2296 gdb_assert (event_child->suspended >= 0);
2297 }
2298 }
2299
2300 if (event_child->collecting_fast_tracepoint == 0)
2301 {
2302 if (debug_threads)
2303 fprintf (stderr,
2304 "fast tracepoint finished "
2305 "collecting successfully.\n");
2306
2307 /* We may have a deferred signal to report. */
2308 if (dequeue_one_deferred_signal (event_child, &w))
2309 {
2310 if (debug_threads)
2311 fprintf (stderr, "dequeued one signal.\n");
2312 }
3c11dd79 2313 else
fa593d66 2314 {
3c11dd79
PA
2315 if (debug_threads)
2316 fprintf (stderr, "no deferred signals.\n");
fa593d66
PA
2317
2318 if (stabilizing_threads)
2319 {
2320 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2321 ourstatus->value.sig = TARGET_SIGNAL_0;
2322 return ptid_of (event_child);
2323 }
2324 }
2325 }
6bf5e0ba
PA
2326 }
2327
e471f25b
PA
2328 /* Check whether GDB would be interested in this event. */
2329
2330 /* If GDB is not interested in this signal, don't stop other
2331 threads, and don't report it to GDB. Just resume the inferior
2332 right away. We do this for threading-related signals as well as
2333 any that GDB specifically requested we ignore. But never ignore
2334 SIGSTOP if we sent it ourselves, and do not ignore signals when
2335 stepping - they may require special handling to skip the signal
2336 handler. */
2337 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2338 thread library? */
2339 if (WIFSTOPPED (w)
2340 && current_inferior->last_resume_kind != resume_step
2341 && (
1a981360 2342#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
e471f25b
PA
2343 (current_process ()->private->thread_db != NULL
2344 && (WSTOPSIG (w) == __SIGRTMIN
2345 || WSTOPSIG (w) == __SIGRTMIN + 1))
2346 ||
2347#endif
2348 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2349 && !(WSTOPSIG (w) == SIGSTOP
2350 && current_inferior->last_resume_kind == resume_stop))))
2351 {
2352 siginfo_t info, *info_p;
2353
2354 if (debug_threads)
2355 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2356 WSTOPSIG (w), lwpid_of (event_child));
2357
2358 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2359 info_p = &info;
2360 else
2361 info_p = NULL;
2362 linux_resume_one_lwp (event_child, event_child->stepping,
2363 WSTOPSIG (w), info_p);
2364 goto retry;
2365 }
2366
2367 /* If GDB wanted this thread to single step, we always want to
2368 report the SIGTRAP, and let GDB handle it. Watchpoints should
2369 always be reported. So should signals we can't explain. A
2370 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2371 not support Z0 breakpoints. If we do, we're be able to handle
2372 GDB breakpoints on top of internal breakpoints, by handling the
2373 internal breakpoint and still reporting the event to GDB. If we
2374 don't, we're out of luck, GDB won't see the breakpoint hit. */
6bf5e0ba 2375 report_to_gdb = (!maybe_internal_trap
8336d594 2376 || current_inferior->last_resume_kind == resume_step
6bf5e0ba 2377 || event_child->stopped_by_watchpoint
493e2a69
MS
2378 || (!step_over_finished
2379 && !bp_explains_trap && !trace_event)
8b07ae33 2380 || gdb_breakpoint_here (event_child->stop_pc));
6bf5e0ba
PA
2381
2382 /* We found no reason GDB would want us to stop. We either hit one
2383 of our own breakpoints, or finished an internal step GDB
2384 shouldn't know about. */
2385 if (!report_to_gdb)
2386 {
2387 if (debug_threads)
2388 {
2389 if (bp_explains_trap)
2390 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2391 if (step_over_finished)
2392 fprintf (stderr, "Step-over finished.\n");
219f2f23
PA
2393 if (trace_event)
2394 fprintf (stderr, "Tracepoint event.\n");
6bf5e0ba
PA
2395 }
2396
2397 /* We're not reporting this breakpoint to GDB, so apply the
2398 decr_pc_after_break adjustment to the inferior's regcache
2399 ourselves. */
2400
2401 if (the_low_target.set_pc != NULL)
2402 {
2403 struct regcache *regcache
2404 = get_thread_regcache (get_lwp_thread (event_child), 1);
2405 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2406 }
2407
7984d532
PA
2408 /* We may have finished stepping over a breakpoint. If so,
2409 we've stopped and suspended all LWPs momentarily except the
2410 stepping one. This is where we resume them all again. We're
2411 going to keep waiting, so use proceed, which handles stepping
2412 over the next breakpoint. */
6bf5e0ba
PA
2413 if (debug_threads)
2414 fprintf (stderr, "proceeding all threads.\n");
7984d532
PA
2415
2416 if (step_over_finished)
2417 unsuspend_all_lwps (event_child);
2418
6bf5e0ba
PA
2419 proceed_all_lwps ();
2420 goto retry;
2421 }
2422
2423 if (debug_threads)
2424 {
8336d594 2425 if (current_inferior->last_resume_kind == resume_step)
6bf5e0ba
PA
2426 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2427 if (event_child->stopped_by_watchpoint)
2428 fprintf (stderr, "Stopped by watchpoint.\n");
8b07ae33
PA
2429 if (gdb_breakpoint_here (event_child->stop_pc))
2430 fprintf (stderr, "Stopped by GDB breakpoint.\n");
6bf5e0ba
PA
2431 if (debug_threads)
2432 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2433 }
2434
2435 /* Alright, we're going to report a stop. */
2436
fa593d66 2437 if (!non_stop && !stabilizing_threads)
6bf5e0ba
PA
2438 {
2439 /* In all-stop, stop all threads. */
7984d532 2440 stop_all_lwps (0, NULL);
6bf5e0ba
PA
2441
2442 /* If we're not waiting for a specific LWP, choose an event LWP
2443 from among those that have had events. Giving equal priority
2444 to all LWPs that have had events helps prevent
2445 starvation. */
2446 if (ptid_equal (ptid, minus_one_ptid))
2447 {
2448 event_child->status_pending_p = 1;
2449 event_child->status_pending = w;
2450
2451 select_event_lwp (&event_child);
2452
2453 event_child->status_pending_p = 0;
2454 w = event_child->status_pending;
2455 }
2456
2457 /* Now that we've selected our final event LWP, cancel any
2458 breakpoints in other LWPs that have hit a GDB breakpoint.
2459 See the comment in cancel_breakpoints_callback to find out
2460 why. */
2461 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
fa593d66
PA
2462
2463 /* Stabilize threads (move out of jump pads). */
2464 stabilize_threads ();
6bf5e0ba
PA
2465 }
2466 else
2467 {
2468 /* If we just finished a step-over, then all threads had been
2469 momentarily paused. In all-stop, that's fine, we want
2470 threads stopped by now anyway. In non-stop, we need to
2471 re-resume threads that GDB wanted to be running. */
2472 if (step_over_finished)
7984d532 2473 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
2474 }
2475
5b1c542e 2476 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 2477
8336d594
PA
2478 if (current_inferior->last_resume_kind == resume_stop
2479 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
2480 {
2481 /* A thread that has been requested to stop by GDB with vCont;t,
2482 and it stopped cleanly, so report as SIG0. The use of
2483 SIGSTOP is an implementation detail. */
2484 ourstatus->value.sig = TARGET_SIGNAL_0;
2485 }
8336d594
PA
2486 else if (current_inferior->last_resume_kind == resume_stop
2487 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
2488 {
2489 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 2490 but, it stopped for other reasons. */
bd99dc85
PA
2491 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2492 }
2493 else
2494 {
2495 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2496 }
2497
d50171e4
PA
2498 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2499
bd99dc85 2500 if (debug_threads)
95954743 2501 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
6bf5e0ba 2502 target_pid_to_str (ptid_of (event_child)),
bd99dc85
PA
2503 ourstatus->kind,
2504 ourstatus->value.sig);
2505
6bf5e0ba 2506 return ptid_of (event_child);
bd99dc85
PA
2507}
2508
2509/* Get rid of any pending event in the pipe. */
2510static void
2511async_file_flush (void)
2512{
2513 int ret;
2514 char buf;
2515
2516 do
2517 ret = read (linux_event_pipe[0], &buf, 1);
2518 while (ret >= 0 || (ret == -1 && errno == EINTR));
2519}
2520
2521/* Put something in the pipe, so the event loop wakes up. */
2522static void
2523async_file_mark (void)
2524{
2525 int ret;
2526
2527 async_file_flush ();
2528
2529 do
2530 ret = write (linux_event_pipe[1], "+", 1);
2531 while (ret == 0 || (ret == -1 && errno == EINTR));
2532
2533 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2534 be awakened anyway. */
2535}
2536
95954743
PA
2537static ptid_t
2538linux_wait (ptid_t ptid,
2539 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 2540{
95954743 2541 ptid_t event_ptid;
bd99dc85
PA
2542
2543 if (debug_threads)
95954743 2544 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
bd99dc85
PA
2545
2546 /* Flush the async file first. */
2547 if (target_is_async_p ())
2548 async_file_flush ();
2549
95954743 2550 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
bd99dc85
PA
2551
2552 /* If at least one stop was reported, there may be more. A single
2553 SIGCHLD can signal more than one child stop. */
2554 if (target_is_async_p ()
2555 && (target_options & TARGET_WNOHANG) != 0
95954743 2556 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
2557 async_file_mark ();
2558
2559 return event_ptid;
da6d8c04
DJ
2560}
2561
c5f62d5f 2562/* Send a signal to an LWP. */
fd500816
DJ
2563
2564static int
a1928bad 2565kill_lwp (unsigned long lwpid, int signo)
fd500816 2566{
c5f62d5f
DE
2567 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2568 fails, then we are not using nptl threads and we should be using kill. */
fd500816 2569
c5f62d5f
DE
2570#ifdef __NR_tkill
2571 {
2572 static int tkill_failed;
fd500816 2573
c5f62d5f
DE
2574 if (!tkill_failed)
2575 {
2576 int ret;
2577
2578 errno = 0;
2579 ret = syscall (__NR_tkill, lwpid, signo);
2580 if (errno != ENOSYS)
2581 return ret;
2582 tkill_failed = 1;
2583 }
2584 }
fd500816
DJ
2585#endif
2586
2587 return kill (lwpid, signo);
2588}
2589
964e4306
PA
2590void
2591linux_stop_lwp (struct lwp_info *lwp)
2592{
2593 send_sigstop (lwp);
2594}
2595
0d62e5e8 2596static void
02fc4de7 2597send_sigstop (struct lwp_info *lwp)
0d62e5e8 2598{
bd99dc85 2599 int pid;
0d62e5e8 2600
bd99dc85
PA
2601 pid = lwpid_of (lwp);
2602
0d62e5e8
DJ
2603 /* If we already have a pending stop signal for this process, don't
2604 send another. */
54a0b537 2605 if (lwp->stop_expected)
0d62e5e8 2606 {
ae13219e 2607 if (debug_threads)
bd99dc85 2608 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
ae13219e 2609
0d62e5e8
DJ
2610 return;
2611 }
2612
2613 if (debug_threads)
bd99dc85 2614 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
0d62e5e8 2615
d50171e4 2616 lwp->stop_expected = 1;
bd99dc85 2617 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
2618}
2619
7984d532
PA
2620static int
2621send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7
PA
2622{
2623 struct lwp_info *lwp = (struct lwp_info *) entry;
2624
7984d532
PA
2625 /* Ignore EXCEPT. */
2626 if (lwp == except)
2627 return 0;
2628
02fc4de7 2629 if (lwp->stopped)
7984d532 2630 return 0;
02fc4de7
PA
2631
2632 send_sigstop (lwp);
7984d532
PA
2633 return 0;
2634}
2635
2636/* Increment the suspend count of an LWP, and stop it, if not stopped
2637 yet. */
2638static int
2639suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2640 void *except)
2641{
2642 struct lwp_info *lwp = (struct lwp_info *) entry;
2643
2644 /* Ignore EXCEPT. */
2645 if (lwp == except)
2646 return 0;
2647
2648 lwp->suspended++;
2649
2650 return send_sigstop_callback (entry, except);
02fc4de7
PA
2651}
2652
95954743
PA
2653static void
2654mark_lwp_dead (struct lwp_info *lwp, int wstat)
2655{
2656 /* It's dead, really. */
2657 lwp->dead = 1;
2658
2659 /* Store the exit status for later. */
2660 lwp->status_pending_p = 1;
2661 lwp->status_pending = wstat;
2662
95954743
PA
2663 /* Prevent trying to stop it. */
2664 lwp->stopped = 1;
2665
2666 /* No further stops are expected from a dead lwp. */
2667 lwp->stop_expected = 0;
2668}
2669
0d62e5e8
DJ
2670static void
2671wait_for_sigstop (struct inferior_list_entry *entry)
2672{
54a0b537 2673 struct lwp_info *lwp = (struct lwp_info *) entry;
bd99dc85 2674 struct thread_info *saved_inferior;
a1928bad 2675 int wstat;
95954743
PA
2676 ptid_t saved_tid;
2677 ptid_t ptid;
d50171e4 2678 int pid;
0d62e5e8 2679
54a0b537 2680 if (lwp->stopped)
d50171e4
PA
2681 {
2682 if (debug_threads)
2683 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2684 lwpid_of (lwp));
2685 return;
2686 }
0d62e5e8
DJ
2687
2688 saved_inferior = current_inferior;
bd99dc85
PA
2689 if (saved_inferior != NULL)
2690 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2691 else
95954743 2692 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 2693
95954743 2694 ptid = lwp->head.id;
bd99dc85 2695
d50171e4
PA
2696 if (debug_threads)
2697 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2698
2699 pid = linux_wait_for_event (ptid, &wstat, __WALL);
0d62e5e8
DJ
2700
2701 /* If we stopped with a non-SIGSTOP signal, save it for later
2702 and record the pending SIGSTOP. If the process exited, just
2703 return. */
d50171e4 2704 if (WIFSTOPPED (wstat))
0d62e5e8
DJ
2705 {
2706 if (debug_threads)
d50171e4
PA
2707 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2708 lwpid_of (lwp), WSTOPSIG (wstat));
c35fafde 2709
d50171e4 2710 if (WSTOPSIG (wstat) != SIGSTOP)
c35fafde
PA
2711 {
2712 if (debug_threads)
d50171e4
PA
2713 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2714 lwpid_of (lwp), wstat);
2715
c35fafde
PA
2716 lwp->status_pending_p = 1;
2717 lwp->status_pending = wstat;
2718 }
0d62e5e8 2719 }
d50171e4 2720 else
95954743
PA
2721 {
2722 if (debug_threads)
d50171e4 2723 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
95954743 2724
d50171e4
PA
2725 lwp = find_lwp_pid (pid_to_ptid (pid));
2726 if (lwp)
2727 {
2728 /* Leave this status pending for the next time we're able to
2729 report it. In the mean time, we'll report this lwp as
2730 dead to GDB, so GDB doesn't try to read registers and
2731 memory from it. This can only happen if this was the
2732 last thread of the process; otherwise, PID is removed
2733 from the thread tables before linux_wait_for_event
2734 returns. */
2735 mark_lwp_dead (lwp, wstat);
2736 }
95954743 2737 }
0d62e5e8 2738
bd99dc85 2739 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
0d62e5e8
DJ
2740 current_inferior = saved_inferior;
2741 else
2742 {
2743 if (debug_threads)
2744 fprintf (stderr, "Previously current thread died.\n");
2745
bd99dc85
PA
2746 if (non_stop)
2747 {
2748 /* We can't change the current inferior behind GDB's back,
2749 otherwise, a subsequent command may apply to the wrong
2750 process. */
2751 current_inferior = NULL;
2752 }
2753 else
2754 {
2755 /* Set a valid thread as current. */
2756 set_desired_inferior (0);
2757 }
0d62e5e8
DJ
2758 }
2759}
2760
fa593d66
PA
2761/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2762 move it out, because we need to report the stop event to GDB. For
2763 example, if the user puts a breakpoint in the jump pad, it's
2764 because she wants to debug it. */
2765
2766static int
2767stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2768{
2769 struct lwp_info *lwp = (struct lwp_info *) entry;
2770 struct thread_info *thread = get_lwp_thread (lwp);
2771
2772 gdb_assert (lwp->suspended == 0);
2773 gdb_assert (lwp->stopped);
2774
2775 /* Allow debugging the jump pad, gdb_collect, etc.. */
2776 return (supports_fast_tracepoints ()
2777 && in_process_agent_loaded ()
2778 && (gdb_breakpoint_here (lwp->stop_pc)
2779 || lwp->stopped_by_watchpoint
2780 || thread->last_resume_kind == resume_step)
2781 && linux_fast_tracepoint_collecting (lwp, NULL));
2782}
2783
2784static void
2785move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2786{
2787 struct lwp_info *lwp = (struct lwp_info *) entry;
2788 struct thread_info *thread = get_lwp_thread (lwp);
2789 int *wstat;
2790
2791 gdb_assert (lwp->suspended == 0);
2792 gdb_assert (lwp->stopped);
2793
2794 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2795
2796 /* Allow debugging the jump pad, gdb_collect, etc. */
2797 if (!gdb_breakpoint_here (lwp->stop_pc)
2798 && !lwp->stopped_by_watchpoint
2799 && thread->last_resume_kind != resume_step
2800 && maybe_move_out_of_jump_pad (lwp, wstat))
2801 {
2802 if (debug_threads)
2803 fprintf (stderr,
2804 "LWP %ld needs stabilizing (in jump pad)\n",
2805 lwpid_of (lwp));
2806
2807 if (wstat)
2808 {
2809 lwp->status_pending_p = 0;
2810 enqueue_one_deferred_signal (lwp, wstat);
2811
2812 if (debug_threads)
2813 fprintf (stderr,
2814 "Signal %d for LWP %ld deferred "
2815 "(in jump pad)\n",
2816 WSTOPSIG (*wstat), lwpid_of (lwp));
2817 }
2818
2819 linux_resume_one_lwp (lwp, 0, 0, NULL);
2820 }
2821 else
2822 lwp->suspended++;
2823}
2824
2825static int
2826lwp_running (struct inferior_list_entry *entry, void *data)
2827{
2828 struct lwp_info *lwp = (struct lwp_info *) entry;
2829
2830 if (lwp->dead)
2831 return 0;
2832 if (lwp->stopped)
2833 return 0;
2834 return 1;
2835}
2836
7984d532
PA
2837/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2838 If SUSPEND, then also increase the suspend count of every LWP,
2839 except EXCEPT. */
2840
0d62e5e8 2841static void
7984d532 2842stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8
DJ
2843{
2844 stopping_threads = 1;
7984d532
PA
2845
2846 if (suspend)
2847 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2848 else
2849 find_inferior (&all_lwps, send_sigstop_callback, except);
54a0b537 2850 for_each_inferior (&all_lwps, wait_for_sigstop);
0d62e5e8
DJ
2851 stopping_threads = 0;
2852}
2853
da6d8c04
DJ
2854/* Resume execution of the inferior process.
2855 If STEP is nonzero, single-step it.
2856 If SIGNAL is nonzero, give it that signal. */
2857
ce3a066d 2858static void
2acc282a 2859linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 2860 int step, int signal, siginfo_t *info)
da6d8c04 2861{
0d62e5e8 2862 struct thread_info *saved_inferior;
fa593d66 2863 int fast_tp_collecting;
0d62e5e8 2864
54a0b537 2865 if (lwp->stopped == 0)
0d62e5e8
DJ
2866 return;
2867
fa593d66
PA
2868 fast_tp_collecting = lwp->collecting_fast_tracepoint;
2869
2870 gdb_assert (!stabilizing_threads || fast_tp_collecting);
2871
219f2f23
PA
2872 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2873 user used the "jump" command, or "set $pc = foo"). */
2874 if (lwp->stop_pc != get_pc (lwp))
2875 {
2876 /* Collecting 'while-stepping' actions doesn't make sense
2877 anymore. */
2878 release_while_stepping_state_list (get_lwp_thread (lwp));
2879 }
2880
0d62e5e8
DJ
2881 /* If we have pending signals or status, and a new signal, enqueue the
2882 signal. Also enqueue the signal if we are waiting to reinsert a
2883 breakpoint; it will be picked up again below. */
2884 if (signal != 0
fa593d66
PA
2885 && (lwp->status_pending_p
2886 || lwp->pending_signals != NULL
2887 || lwp->bp_reinsert != 0
2888 || fast_tp_collecting))
0d62e5e8
DJ
2889 {
2890 struct pending_signals *p_sig;
bca929d3 2891 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 2892 p_sig->prev = lwp->pending_signals;
0d62e5e8 2893 p_sig->signal = signal;
32ca6d61
DJ
2894 if (info == NULL)
2895 memset (&p_sig->info, 0, sizeof (siginfo_t));
2896 else
2897 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 2898 lwp->pending_signals = p_sig;
0d62e5e8
DJ
2899 }
2900
d50171e4
PA
2901 if (lwp->status_pending_p)
2902 {
2903 if (debug_threads)
2904 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2905 " has pending status\n",
2906 lwpid_of (lwp), step ? "step" : "continue", signal,
2907 lwp->stop_expected ? "expected" : "not expected");
2908 return;
2909 }
0d62e5e8
DJ
2910
2911 saved_inferior = current_inferior;
54a0b537 2912 current_inferior = get_lwp_thread (lwp);
0d62e5e8
DJ
2913
2914 if (debug_threads)
1b3f6016 2915 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
bd99dc85 2916 lwpid_of (lwp), step ? "step" : "continue", signal,
54a0b537 2917 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
2918
2919 /* This bit needs some thinking about. If we get a signal that
2920 we must report while a single-step reinsert is still pending,
2921 we often end up resuming the thread. It might be better to
2922 (ew) allow a stack of pending events; then we could be sure that
2923 the reinsert happened right away and not lose any signals.
2924
2925 Making this stack would also shrink the window in which breakpoints are
54a0b537 2926 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
2927 complete correctness, so it won't solve that problem. It may be
2928 worthwhile just to solve this one, however. */
54a0b537 2929 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
2930 {
2931 if (debug_threads)
d50171e4
PA
2932 fprintf (stderr, " pending reinsert at 0x%s\n",
2933 paddress (lwp->bp_reinsert));
2934
2935 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2936 {
fa593d66
PA
2937 if (fast_tp_collecting == 0)
2938 {
2939 if (step == 0)
2940 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2941 if (lwp->suspended)
2942 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
2943 lwp->suspended);
2944 }
d50171e4
PA
2945
2946 step = 1;
2947 }
0d62e5e8
DJ
2948
2949 /* Postpone any pending signal. It was enqueued above. */
2950 signal = 0;
2951 }
2952
fa593d66
PA
2953 if (fast_tp_collecting == 1)
2954 {
2955 if (debug_threads)
2956 fprintf (stderr, "\
2957lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
2958 lwpid_of (lwp));
2959
2960 /* Postpone any pending signal. It was enqueued above. */
2961 signal = 0;
2962 }
2963 else if (fast_tp_collecting == 2)
2964 {
2965 if (debug_threads)
2966 fprintf (stderr, "\
2967lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
2968 lwpid_of (lwp));
2969
2970 if (can_hardware_single_step ())
2971 step = 1;
2972 else
2973 fatal ("moving out of jump pad single-stepping"
2974 " not implemented on this target");
2975
2976 /* Postpone any pending signal. It was enqueued above. */
2977 signal = 0;
2978 }
2979
219f2f23
PA
2980 /* If we have while-stepping actions in this thread set it stepping.
2981 If we have a signal to deliver, it may or may not be set to
2982 SIG_IGN, we don't know. Assume so, and allow collecting
2983 while-stepping into a signal handler. A possible smart thing to
2984 do would be to set an internal breakpoint at the signal return
2985 address, continue, and carry on catching this while-stepping
2986 action only when that breakpoint is hit. A future
2987 enhancement. */
2988 if (get_lwp_thread (lwp)->while_stepping != NULL
2989 && can_hardware_single_step ())
2990 {
2991 if (debug_threads)
2992 fprintf (stderr,
2993 "lwp %ld has a while-stepping action -> forcing step.\n",
2994 lwpid_of (lwp));
2995 step = 1;
2996 }
2997
aa691b87 2998 if (debug_threads && the_low_target.get_pc != NULL)
0d62e5e8 2999 {
442ea881
PA
3000 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3001 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
47c0c975 3002 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
0d62e5e8
DJ
3003 }
3004
fa593d66
PA
3005 /* If we have pending signals, consume one unless we are trying to
3006 reinsert a breakpoint or we're trying to finish a fast tracepoint
3007 collect. */
3008 if (lwp->pending_signals != NULL
3009 && lwp->bp_reinsert == 0
3010 && fast_tp_collecting == 0)
0d62e5e8
DJ
3011 {
3012 struct pending_signals **p_sig;
3013
54a0b537 3014 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
3015 while ((*p_sig)->prev != NULL)
3016 p_sig = &(*p_sig)->prev;
3017
3018 signal = (*p_sig)->signal;
32ca6d61 3019 if ((*p_sig)->info.si_signo != 0)
bd99dc85 3020 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
32ca6d61 3021
0d62e5e8
DJ
3022 free (*p_sig);
3023 *p_sig = NULL;
3024 }
3025
aa5ca48f
DE
3026 if (the_low_target.prepare_to_resume != NULL)
3027 the_low_target.prepare_to_resume (lwp);
3028
0d62e5e8 3029 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 3030 get_lwp_thread (lwp));
da6d8c04 3031 errno = 0;
54a0b537 3032 lwp->stopped = 0;
c3adc08c 3033 lwp->stopped_by_watchpoint = 0;
54a0b537 3034 lwp->stepping = step;
14ce3065
DE
3035 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
3036 /* Coerce to a uintptr_t first to avoid potential gcc warning
3037 of coercing an 8 byte integer to a 4 byte pointer. */
3038 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
0d62e5e8
DJ
3039
3040 current_inferior = saved_inferior;
da6d8c04 3041 if (errno)
3221518c
UW
3042 {
3043 /* ESRCH from ptrace either means that the thread was already
3044 running (an error) or that it is gone (a race condition). If
3045 it's gone, we will get a notification the next time we wait,
3046 so we can ignore the error. We could differentiate these
3047 two, but it's tricky without waiting; the thread still exists
3048 as a zombie, so sending it signal 0 would succeed. So just
3049 ignore ESRCH. */
3050 if (errno == ESRCH)
3051 return;
3052
3053 perror_with_name ("ptrace");
3054 }
da6d8c04
DJ
3055}
3056
2bd7c093
PA
3057struct thread_resume_array
3058{
3059 struct thread_resume *resume;
3060 size_t n;
3061};
64386c31
DJ
3062
3063/* This function is called once per thread. We look up the thread
5544ad89
DJ
3064 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3065 resume request.
3066
3067 This algorithm is O(threads * resume elements), but resume elements
3068 is small (and will remain small at least until GDB supports thread
3069 suspension). */
2bd7c093
PA
3070static int
3071linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 3072{
54a0b537 3073 struct lwp_info *lwp;
64386c31 3074 struct thread_info *thread;
5544ad89 3075 int ndx;
2bd7c093 3076 struct thread_resume_array *r;
64386c31
DJ
3077
3078 thread = (struct thread_info *) entry;
54a0b537 3079 lwp = get_thread_lwp (thread);
2bd7c093 3080 r = arg;
64386c31 3081
2bd7c093 3082 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
3083 {
3084 ptid_t ptid = r->resume[ndx].thread;
3085 if (ptid_equal (ptid, minus_one_ptid)
3086 || ptid_equal (ptid, entry->id)
3087 || (ptid_is_pid (ptid)
3088 && (ptid_get_pid (ptid) == pid_of (lwp)))
3089 || (ptid_get_lwp (ptid) == -1
3090 && (ptid_get_pid (ptid) == pid_of (lwp))))
3091 {
d50171e4 3092 if (r->resume[ndx].kind == resume_stop
8336d594 3093 && thread->last_resume_kind == resume_stop)
d50171e4
PA
3094 {
3095 if (debug_threads)
3096 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3097 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3098 ? "stopped"
3099 : "stopping",
3100 lwpid_of (lwp));
3101
3102 continue;
3103 }
3104
95954743 3105 lwp->resume = &r->resume[ndx];
8336d594 3106 thread->last_resume_kind = lwp->resume->kind;
fa593d66
PA
3107
3108 /* If we had a deferred signal to report, dequeue one now.
3109 This can happen if LWP gets more than one signal while
3110 trying to get out of a jump pad. */
3111 if (lwp->stopped
3112 && !lwp->status_pending_p
3113 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3114 {
3115 lwp->status_pending_p = 1;
3116
3117 if (debug_threads)
3118 fprintf (stderr,
3119 "Dequeueing deferred signal %d for LWP %ld, "
3120 "leaving status pending.\n",
3121 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3122 }
3123
95954743
PA
3124 return 0;
3125 }
3126 }
2bd7c093
PA
3127
3128 /* No resume action for this thread. */
3129 lwp->resume = NULL;
64386c31 3130
2bd7c093 3131 return 0;
5544ad89
DJ
3132}
3133
5544ad89 3134
bd99dc85
PA
3135/* Set *FLAG_P if this lwp has an interesting status pending. */
3136static int
3137resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 3138{
bd99dc85 3139 struct lwp_info *lwp = (struct lwp_info *) entry;
5544ad89 3140
bd99dc85
PA
3141 /* LWPs which will not be resumed are not interesting, because
3142 we might not wait for them next time through linux_wait. */
2bd7c093 3143 if (lwp->resume == NULL)
bd99dc85 3144 return 0;
64386c31 3145
bd99dc85 3146 if (lwp->status_pending_p)
d50171e4
PA
3147 * (int *) flag_p = 1;
3148
3149 return 0;
3150}
3151
3152/* Return 1 if this lwp that GDB wants running is stopped at an
3153 internal breakpoint that we need to step over. It assumes that any
3154 required STOP_PC adjustment has already been propagated to the
3155 inferior's regcache. */
3156
3157static int
3158need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3159{
3160 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 3161 struct thread_info *thread;
d50171e4
PA
3162 struct thread_info *saved_inferior;
3163 CORE_ADDR pc;
3164
3165 /* LWPs which will not be resumed are not interesting, because we
3166 might not wait for them next time through linux_wait. */
3167
3168 if (!lwp->stopped)
3169 {
3170 if (debug_threads)
3171 fprintf (stderr,
3172 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3173 lwpid_of (lwp));
3174 return 0;
3175 }
3176
8336d594
PA
3177 thread = get_lwp_thread (lwp);
3178
3179 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
3180 {
3181 if (debug_threads)
3182 fprintf (stderr,
3183 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3184 lwpid_of (lwp));
3185 return 0;
3186 }
3187
7984d532
PA
3188 gdb_assert (lwp->suspended >= 0);
3189
3190 if (lwp->suspended)
3191 {
3192 if (debug_threads)
3193 fprintf (stderr,
3194 "Need step over [LWP %ld]? Ignoring, suspended\n",
3195 lwpid_of (lwp));
3196 return 0;
3197 }
3198
d50171e4
PA
3199 if (!lwp->need_step_over)
3200 {
3201 if (debug_threads)
3202 fprintf (stderr,
3203 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3204 }
5544ad89 3205
bd99dc85 3206 if (lwp->status_pending_p)
d50171e4
PA
3207 {
3208 if (debug_threads)
3209 fprintf (stderr,
3210 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3211 lwpid_of (lwp));
3212 return 0;
3213 }
3214
3215 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3216 or we have. */
3217 pc = get_pc (lwp);
3218
3219 /* If the PC has changed since we stopped, then don't do anything,
3220 and let the breakpoint/tracepoint be hit. This happens if, for
3221 instance, GDB handled the decr_pc_after_break subtraction itself,
3222 GDB is OOL stepping this thread, or the user has issued a "jump"
3223 command, or poked thread's registers herself. */
3224 if (pc != lwp->stop_pc)
3225 {
3226 if (debug_threads)
3227 fprintf (stderr,
3228 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3229 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3230 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3231
3232 lwp->need_step_over = 0;
3233 return 0;
3234 }
3235
3236 saved_inferior = current_inferior;
8336d594 3237 current_inferior = thread;
d50171e4 3238
8b07ae33 3239 /* We can only step over breakpoints we know about. */
fa593d66 3240 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 3241 {
8b07ae33
PA
3242 /* Don't step over a breakpoint that GDB expects to hit
3243 though. */
3244 if (gdb_breakpoint_here (pc))
3245 {
3246 if (debug_threads)
3247 fprintf (stderr,
3248 "Need step over [LWP %ld]? yes, but found"
3249 " GDB breakpoint at 0x%s; skipping step over\n",
3250 lwpid_of (lwp), paddress (pc));
d50171e4 3251
8b07ae33
PA
3252 current_inferior = saved_inferior;
3253 return 0;
3254 }
3255 else
3256 {
3257 if (debug_threads)
3258 fprintf (stderr,
493e2a69
MS
3259 "Need step over [LWP %ld]? yes, "
3260 "found breakpoint at 0x%s\n",
8b07ae33 3261 lwpid_of (lwp), paddress (pc));
d50171e4 3262
8b07ae33
PA
3263 /* We've found an lwp that needs stepping over --- return 1 so
3264 that find_inferior stops looking. */
3265 current_inferior = saved_inferior;
3266
3267 /* If the step over is cancelled, this is set again. */
3268 lwp->need_step_over = 0;
3269 return 1;
3270 }
d50171e4
PA
3271 }
3272
3273 current_inferior = saved_inferior;
3274
3275 if (debug_threads)
3276 fprintf (stderr,
3277 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3278 lwpid_of (lwp), paddress (pc));
c6ecbae5 3279
bd99dc85 3280 return 0;
5544ad89
DJ
3281}
3282
d50171e4
PA
3283/* Start a step-over operation on LWP. When LWP stopped at a
3284 breakpoint, to make progress, we need to remove the breakpoint out
3285 of the way. If we let other threads run while we do that, they may
3286 pass by the breakpoint location and miss hitting it. To avoid
3287 that, a step-over momentarily stops all threads while LWP is
3288 single-stepped while the breakpoint is temporarily uninserted from
3289 the inferior. When the single-step finishes, we reinsert the
3290 breakpoint, and let all threads that are supposed to be running,
3291 run again.
3292
3293 On targets that don't support hardware single-step, we don't
3294 currently support full software single-stepping. Instead, we only
3295 support stepping over the thread event breakpoint, by asking the
3296 low target where to place a reinsert breakpoint. Since this
3297 routine assumes the breakpoint being stepped over is a thread event
3298 breakpoint, it usually assumes the return address of the current
3299 function is a good enough place to set the reinsert breakpoint. */
3300
3301static int
3302start_step_over (struct lwp_info *lwp)
3303{
3304 struct thread_info *saved_inferior;
3305 CORE_ADDR pc;
3306 int step;
3307
3308 if (debug_threads)
3309 fprintf (stderr,
3310 "Starting step-over on LWP %ld. Stopping all threads\n",
3311 lwpid_of (lwp));
3312
7984d532
PA
3313 stop_all_lwps (1, lwp);
3314 gdb_assert (lwp->suspended == 0);
d50171e4
PA
3315
3316 if (debug_threads)
3317 fprintf (stderr, "Done stopping all threads for step-over.\n");
3318
3319 /* Note, we should always reach here with an already adjusted PC,
3320 either by GDB (if we're resuming due to GDB's request), or by our
3321 caller, if we just finished handling an internal breakpoint GDB
3322 shouldn't care about. */
3323 pc = get_pc (lwp);
3324
3325 saved_inferior = current_inferior;
3326 current_inferior = get_lwp_thread (lwp);
3327
3328 lwp->bp_reinsert = pc;
3329 uninsert_breakpoints_at (pc);
fa593d66 3330 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
3331
3332 if (can_hardware_single_step ())
3333 {
3334 step = 1;
3335 }
3336 else
3337 {
3338 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3339 set_reinsert_breakpoint (raddr);
3340 step = 0;
3341 }
3342
3343 current_inferior = saved_inferior;
3344
3345 linux_resume_one_lwp (lwp, step, 0, NULL);
3346
3347 /* Require next event from this LWP. */
3348 step_over_bkpt = lwp->head.id;
3349 return 1;
3350}
3351
3352/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3353 start_step_over, if still there, and delete any reinsert
3354 breakpoints we've set, on non hardware single-step targets. */
3355
3356static int
3357finish_step_over (struct lwp_info *lwp)
3358{
3359 if (lwp->bp_reinsert != 0)
3360 {
3361 if (debug_threads)
3362 fprintf (stderr, "Finished step over.\n");
3363
3364 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3365 may be no breakpoint to reinsert there by now. */
3366 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 3367 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
3368
3369 lwp->bp_reinsert = 0;
3370
3371 /* Delete any software-single-step reinsert breakpoints. No
3372 longer needed. We don't have to worry about other threads
3373 hitting this trap, and later not being able to explain it,
3374 because we were stepping over a breakpoint, and we hold all
3375 threads but LWP stopped while doing that. */
3376 if (!can_hardware_single_step ())
3377 delete_reinsert_breakpoints ();
3378
3379 step_over_bkpt = null_ptid;
3380 return 1;
3381 }
3382 else
3383 return 0;
3384}
3385
5544ad89
DJ
3386/* This function is called once per thread. We check the thread's resume
3387 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 3388 stopped; and what signal, if any, it should be sent.
5544ad89 3389
bd99dc85
PA
3390 For threads which we aren't explicitly told otherwise, we preserve
3391 the stepping flag; this is used for stepping over gdbserver-placed
3392 breakpoints.
3393
3394 If pending_flags was set in any thread, we queue any needed
3395 signals, since we won't actually resume. We already have a pending
3396 event to report, so we don't need to preserve any step requests;
3397 they should be re-issued if necessary. */
3398
3399static int
3400linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 3401{
54a0b537 3402 struct lwp_info *lwp;
5544ad89 3403 struct thread_info *thread;
bd99dc85 3404 int step;
d50171e4
PA
3405 int leave_all_stopped = * (int *) arg;
3406 int leave_pending;
5544ad89
DJ
3407
3408 thread = (struct thread_info *) entry;
54a0b537 3409 lwp = get_thread_lwp (thread);
5544ad89 3410
2bd7c093 3411 if (lwp->resume == NULL)
bd99dc85 3412 return 0;
5544ad89 3413
bd99dc85 3414 if (lwp->resume->kind == resume_stop)
5544ad89 3415 {
bd99dc85 3416 if (debug_threads)
d50171e4 3417 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
bd99dc85
PA
3418
3419 if (!lwp->stopped)
3420 {
3421 if (debug_threads)
d50171e4 3422 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
bd99dc85 3423
d50171e4
PA
3424 /* Stop the thread, and wait for the event asynchronously,
3425 through the event loop. */
02fc4de7 3426 send_sigstop (lwp);
bd99dc85
PA
3427 }
3428 else
3429 {
3430 if (debug_threads)
d50171e4
PA
3431 fprintf (stderr, "already stopped LWP %ld\n",
3432 lwpid_of (lwp));
3433
3434 /* The LWP may have been stopped in an internal event that
3435 was not meant to be notified back to GDB (e.g., gdbserver
3436 breakpoint), so we should be reporting a stop event in
3437 this case too. */
3438
3439 /* If the thread already has a pending SIGSTOP, this is a
3440 no-op. Otherwise, something later will presumably resume
3441 the thread and this will cause it to cancel any pending
3442 operation, due to last_resume_kind == resume_stop. If
3443 the thread already has a pending status to report, we
3444 will still report it the next time we wait - see
3445 status_pending_p_callback. */
1a981360
PA
3446
3447 /* If we already have a pending signal to report, then
3448 there's no need to queue a SIGSTOP, as this means we're
3449 midway through moving the LWP out of the jumppad, and we
3450 will report the pending signal as soon as that is
3451 finished. */
3452 if (lwp->pending_signals_to_report == NULL)
3453 send_sigstop (lwp);
bd99dc85 3454 }
32ca6d61 3455
bd99dc85
PA
3456 /* For stop requests, we're done. */
3457 lwp->resume = NULL;
fc7238bb 3458 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3459 return 0;
5544ad89
DJ
3460 }
3461
bd99dc85
PA
3462 /* If this thread which is about to be resumed has a pending status,
3463 then don't resume any threads - we can just report the pending
3464 status. Make sure to queue any signals that would otherwise be
3465 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
3466 thread has a pending status. If there's a thread that needs the
3467 step-over-breakpoint dance, then don't resume any other thread
3468 but that particular one. */
3469 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 3470
d50171e4 3471 if (!leave_pending)
bd99dc85
PA
3472 {
3473 if (debug_threads)
3474 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
5544ad89 3475
d50171e4 3476 step = (lwp->resume->kind == resume_step);
2acc282a 3477 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
3478 }
3479 else
3480 {
3481 if (debug_threads)
3482 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
5544ad89 3483
bd99dc85
PA
3484 /* If we have a new signal, enqueue the signal. */
3485 if (lwp->resume->sig != 0)
3486 {
3487 struct pending_signals *p_sig;
3488 p_sig = xmalloc (sizeof (*p_sig));
3489 p_sig->prev = lwp->pending_signals;
3490 p_sig->signal = lwp->resume->sig;
3491 memset (&p_sig->info, 0, sizeof (siginfo_t));
3492
3493 /* If this is the same signal we were previously stopped by,
3494 make sure to queue its siginfo. We can ignore the return
3495 value of ptrace; if it fails, we'll skip
3496 PTRACE_SETSIGINFO. */
3497 if (WIFSTOPPED (lwp->last_status)
3498 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3499 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3500
3501 lwp->pending_signals = p_sig;
3502 }
3503 }
5544ad89 3504
fc7238bb 3505 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3506 lwp->resume = NULL;
5544ad89 3507 return 0;
0d62e5e8
DJ
3508}
3509
3510static void
2bd7c093 3511linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 3512{
2bd7c093 3513 struct thread_resume_array array = { resume_info, n };
d50171e4
PA
3514 struct lwp_info *need_step_over = NULL;
3515 int any_pending;
3516 int leave_all_stopped;
c6ecbae5 3517
2bd7c093 3518 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 3519
d50171e4
PA
3520 /* If there is a thread which would otherwise be resumed, which has
3521 a pending status, then don't resume any threads - we can just
3522 report the pending status. Make sure to queue any signals that
3523 would otherwise be sent. In non-stop mode, we'll apply this
3524 logic to each thread individually. We consume all pending events
3525 before considering to start a step-over (in all-stop). */
3526 any_pending = 0;
bd99dc85 3527 if (!non_stop)
d50171e4
PA
3528 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3529
3530 /* If there is a thread which would otherwise be resumed, which is
3531 stopped at a breakpoint that needs stepping over, then don't
3532 resume any threads - have it step over the breakpoint with all
3533 other threads stopped, then resume all threads again. Make sure
3534 to queue any signals that would otherwise be delivered or
3535 queued. */
3536 if (!any_pending && supports_breakpoints ())
3537 need_step_over
3538 = (struct lwp_info *) find_inferior (&all_lwps,
3539 need_step_over_p, NULL);
3540
3541 leave_all_stopped = (need_step_over != NULL || any_pending);
3542
3543 if (debug_threads)
3544 {
3545 if (need_step_over != NULL)
3546 fprintf (stderr, "Not resuming all, need step over\n");
3547 else if (any_pending)
3548 fprintf (stderr,
3549 "Not resuming, all-stop and found "
3550 "an LWP with pending status\n");
3551 else
3552 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3553 }
3554
3555 /* Even if we're leaving threads stopped, queue all signals we'd
3556 otherwise deliver. */
3557 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3558
3559 if (need_step_over)
3560 start_step_over (need_step_over);
3561}
3562
3563/* This function is called once per thread. We check the thread's
3564 last resume request, which will tell us whether to resume, step, or
3565 leave the thread stopped. Any signal the client requested to be
3566 delivered has already been enqueued at this point.
3567
3568 If any thread that GDB wants running is stopped at an internal
3569 breakpoint that needs stepping over, we start a step-over operation
3570 on that particular thread, and leave all others stopped. */
3571
7984d532
PA
3572static int
3573proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 3574{
7984d532 3575 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 3576 struct thread_info *thread;
d50171e4
PA
3577 int step;
3578
7984d532
PA
3579 if (lwp == except)
3580 return 0;
d50171e4
PA
3581
3582 if (debug_threads)
3583 fprintf (stderr,
3584 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3585
3586 if (!lwp->stopped)
3587 {
3588 if (debug_threads)
3589 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
7984d532 3590 return 0;
d50171e4
PA
3591 }
3592
8336d594
PA
3593 thread = get_lwp_thread (lwp);
3594
02fc4de7
PA
3595 if (thread->last_resume_kind == resume_stop
3596 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
3597 {
3598 if (debug_threads)
02fc4de7
PA
3599 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3600 lwpid_of (lwp));
7984d532 3601 return 0;
d50171e4
PA
3602 }
3603
3604 if (lwp->status_pending_p)
3605 {
3606 if (debug_threads)
3607 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3608 lwpid_of (lwp));
7984d532 3609 return 0;
d50171e4
PA
3610 }
3611
7984d532
PA
3612 gdb_assert (lwp->suspended >= 0);
3613
d50171e4
PA
3614 if (lwp->suspended)
3615 {
3616 if (debug_threads)
3617 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
7984d532 3618 return 0;
d50171e4
PA
3619 }
3620
1a981360
PA
3621 if (thread->last_resume_kind == resume_stop
3622 && lwp->pending_signals_to_report == NULL
3623 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
3624 {
3625 /* We haven't reported this LWP as stopped yet (otherwise, the
3626 last_status.kind check above would catch it, and we wouldn't
3627 reach here. This LWP may have been momentarily paused by a
3628 stop_all_lwps call while handling for example, another LWP's
3629 step-over. In that case, the pending expected SIGSTOP signal
3630 that was queued at vCont;t handling time will have already
3631 been consumed by wait_for_sigstop, and so we need to requeue
3632 another one here. Note that if the LWP already has a SIGSTOP
3633 pending, this is a no-op. */
3634
3635 if (debug_threads)
3636 fprintf (stderr,
3637 "Client wants LWP %ld to stop. "
3638 "Making sure it has a SIGSTOP pending\n",
3639 lwpid_of (lwp));
3640
3641 send_sigstop (lwp);
3642 }
3643
8336d594 3644 step = thread->last_resume_kind == resume_step;
d50171e4 3645 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
3646 return 0;
3647}
3648
3649static int
3650unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3651{
3652 struct lwp_info *lwp = (struct lwp_info *) entry;
3653
3654 if (lwp == except)
3655 return 0;
3656
3657 lwp->suspended--;
3658 gdb_assert (lwp->suspended >= 0);
3659
3660 return proceed_one_lwp (entry, except);
d50171e4
PA
3661}
3662
3663/* When we finish a step-over, set threads running again. If there's
3664 another thread that may need a step-over, now's the time to start
3665 it. Eventually, we'll move all threads past their breakpoints. */
3666
3667static void
3668proceed_all_lwps (void)
3669{
3670 struct lwp_info *need_step_over;
3671
3672 /* If there is a thread which would otherwise be resumed, which is
3673 stopped at a breakpoint that needs stepping over, then don't
3674 resume any threads - have it step over the breakpoint with all
3675 other threads stopped, then resume all threads again. */
3676
3677 if (supports_breakpoints ())
3678 {
3679 need_step_over
3680 = (struct lwp_info *) find_inferior (&all_lwps,
3681 need_step_over_p, NULL);
3682
3683 if (need_step_over != NULL)
3684 {
3685 if (debug_threads)
3686 fprintf (stderr, "proceed_all_lwps: found "
3687 "thread %ld needing a step-over\n",
3688 lwpid_of (need_step_over));
3689
3690 start_step_over (need_step_over);
3691 return;
3692 }
3693 }
5544ad89 3694
d50171e4
PA
3695 if (debug_threads)
3696 fprintf (stderr, "Proceeding, no step-over needed\n");
3697
7984d532 3698 find_inferior (&all_lwps, proceed_one_lwp, NULL);
d50171e4
PA
3699}
3700
3701/* Stopped LWPs that the client wanted to be running, that don't have
3702 pending statuses, are set to run again, except for EXCEPT, if not
3703 NULL. This undoes a stop_all_lwps call. */
3704
3705static void
7984d532 3706unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 3707{
5544ad89
DJ
3708 if (debug_threads)
3709 {
d50171e4
PA
3710 if (except)
3711 fprintf (stderr,
3712 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
5544ad89 3713 else
d50171e4
PA
3714 fprintf (stderr,
3715 "unstopping all lwps\n");
5544ad89
DJ
3716 }
3717
7984d532
PA
3718 if (unsuspend)
3719 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3720 else
3721 find_inferior (&all_lwps, proceed_one_lwp, except);
0d62e5e8
DJ
3722}
3723
3724#ifdef HAVE_LINUX_USRREGS
da6d8c04
DJ
3725
3726int
0a30fbc4 3727register_addr (int regnum)
da6d8c04
DJ
3728{
3729 int addr;
3730
2ec06d2e 3731 if (regnum < 0 || regnum >= the_low_target.num_regs)
da6d8c04
DJ
3732 error ("Invalid register number %d.", regnum);
3733
2ec06d2e 3734 addr = the_low_target.regmap[regnum];
da6d8c04
DJ
3735
3736 return addr;
3737}
3738
58caa3dc 3739/* Fetch one register. */
da6d8c04 3740static void
442ea881 3741fetch_register (struct regcache *regcache, int regno)
da6d8c04
DJ
3742{
3743 CORE_ADDR regaddr;
48d93c75 3744 int i, size;
0d62e5e8 3745 char *buf;
95954743 3746 int pid;
da6d8c04 3747
2ec06d2e 3748 if (regno >= the_low_target.num_regs)
0a30fbc4 3749 return;
2ec06d2e 3750 if ((*the_low_target.cannot_fetch_register) (regno))
0a30fbc4 3751 return;
da6d8c04 3752
0a30fbc4
DJ
3753 regaddr = register_addr (regno);
3754 if (regaddr == -1)
3755 return;
95954743 3756
1b3f6016 3757 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
50275556 3758 & -sizeof (PTRACE_XFER_TYPE));
48d93c75 3759 buf = alloca (size);
50275556
MR
3760
3761 pid = lwpid_of (get_thread_lwp (current_inferior));
48d93c75 3762 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
da6d8c04
DJ
3763 {
3764 errno = 0;
0d62e5e8 3765 *(PTRACE_XFER_TYPE *) (buf + i) =
14ce3065
DE
3766 ptrace (PTRACE_PEEKUSER, pid,
3767 /* Coerce to a uintptr_t first to avoid potential gcc warning
3768 of coercing an 8 byte integer to a 4 byte pointer. */
3769 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
da6d8c04
DJ
3770 regaddr += sizeof (PTRACE_XFER_TYPE);
3771 if (errno != 0)
f52cd8cd 3772 error ("reading register %d: %s", regno, strerror (errno));
da6d8c04 3773 }
ee1a7ae4
UW
3774
3775 if (the_low_target.supply_ptrace_register)
442ea881 3776 the_low_target.supply_ptrace_register (regcache, regno, buf);
5a1f5858 3777 else
442ea881 3778 supply_register (regcache, regno, buf);
da6d8c04
DJ
3779}
3780
7325beb4
MR
3781/* Store one register. */
3782static void
3783store_register (struct regcache *regcache, int regno)
3784{
3785 CORE_ADDR regaddr;
3786 int i, size;
3787 char *buf;
3788 int pid;
3789
3790 if (regno >= the_low_target.num_regs)
3791 return;
50275556 3792 if ((*the_low_target.cannot_store_register) (regno))
7325beb4
MR
3793 return;
3794
3795 regaddr = register_addr (regno);
3796 if (regaddr == -1)
3797 return;
50275556
MR
3798
3799 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3800 & -sizeof (PTRACE_XFER_TYPE));
7325beb4
MR
3801 buf = alloca (size);
3802 memset (buf, 0, size);
3803
3804 if (the_low_target.collect_ptrace_register)
3805 the_low_target.collect_ptrace_register (regcache, regno, buf);
3806 else
3807 collect_register (regcache, regno, buf);
3808
3809 pid = lwpid_of (get_thread_lwp (current_inferior));
3810 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3811 {
3812 errno = 0;
3813 ptrace (PTRACE_POKEUSER, pid,
3814 /* Coerce to a uintptr_t first to avoid potential gcc warning
3815 about coercing an 8 byte integer to a 4 byte pointer. */
3816 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3817 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3818 if (errno != 0)
3819 {
3820 /* At this point, ESRCH should mean the process is
3821 already gone, in which case we simply ignore attempts
3822 to change its registers. See also the related
3823 comment in linux_resume_one_lwp. */
3824 if (errno == ESRCH)
3825 return;
3826
3827 if ((*the_low_target.cannot_store_register) (regno) == 0)
3828 error ("writing register %d: %s", regno, strerror (errno));
3829 }
3830 regaddr += sizeof (PTRACE_XFER_TYPE);
3831 }
3832}
3833
da6d8c04 3834/* Fetch all registers, or just one, from the child process. */
58caa3dc 3835static void
442ea881 3836usr_fetch_inferior_registers (struct regcache *regcache, int regno)
da6d8c04 3837{
4463ce24 3838 if (regno == -1)
2ec06d2e 3839 for (regno = 0; regno < the_low_target.num_regs; regno++)
442ea881 3840 fetch_register (regcache, regno);
da6d8c04 3841 else
442ea881 3842 fetch_register (regcache, regno);
da6d8c04
DJ
3843}
3844
3845/* Store our register values back into the inferior.
3846 If REGNO is -1, do this for all registers.
3847 Otherwise, REGNO specifies which register (so we can save time). */
58caa3dc 3848static void
442ea881 3849usr_store_inferior_registers (struct regcache *regcache, int regno)
da6d8c04 3850{
7325beb4 3851 if (regno == -1)
2ec06d2e 3852 for (regno = 0; regno < the_low_target.num_regs; regno++)
7325beb4
MR
3853 store_register (regcache, regno);
3854 else
3855 store_register (regcache, regno);
da6d8c04 3856}
58caa3dc
DJ
3857#endif /* HAVE_LINUX_USRREGS */
3858
3859
3860
3861#ifdef HAVE_LINUX_REGSETS
3862
3863static int
442ea881 3864regsets_fetch_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3865{
3866 struct regset_info *regset;
e9d25b98 3867 int saw_general_regs = 0;
95954743 3868 int pid;
1570b33e 3869 struct iovec iov;
58caa3dc
DJ
3870
3871 regset = target_regsets;
3872
95954743 3873 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3874 while (regset->size >= 0)
3875 {
1570b33e
L
3876 void *buf, *data;
3877 int nt_type, res;
58caa3dc 3878
52fa2412 3879 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3880 {
3881 regset ++;
3882 continue;
3883 }
3884
bca929d3 3885 buf = xmalloc (regset->size);
1570b33e
L
3886
3887 nt_type = regset->nt_type;
3888 if (nt_type)
3889 {
3890 iov.iov_base = buf;
3891 iov.iov_len = regset->size;
3892 data = (void *) &iov;
3893 }
3894 else
3895 data = buf;
3896
dfb64f85 3897#ifndef __sparc__
1570b33e 3898 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3899#else
1570b33e 3900 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 3901#endif
58caa3dc
DJ
3902 if (res < 0)
3903 {
3904 if (errno == EIO)
3905 {
52fa2412
UW
3906 /* If we get EIO on a regset, do not try it again for
3907 this process. */
3908 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3909 free (buf);
52fa2412 3910 continue;
58caa3dc
DJ
3911 }
3912 else
3913 {
0d62e5e8 3914 char s[256];
95954743
PA
3915 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3916 pid);
0d62e5e8 3917 perror (s);
58caa3dc
DJ
3918 }
3919 }
e9d25b98
DJ
3920 else if (regset->type == GENERAL_REGS)
3921 saw_general_regs = 1;
442ea881 3922 regset->store_function (regcache, buf);
58caa3dc 3923 regset ++;
fdeb2a12 3924 free (buf);
58caa3dc 3925 }
e9d25b98
DJ
3926 if (saw_general_regs)
3927 return 0;
3928 else
3929 return 1;
58caa3dc
DJ
3930}
3931
3932static int
442ea881 3933regsets_store_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3934{
3935 struct regset_info *regset;
e9d25b98 3936 int saw_general_regs = 0;
95954743 3937 int pid;
1570b33e 3938 struct iovec iov;
58caa3dc
DJ
3939
3940 regset = target_regsets;
3941
95954743 3942 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3943 while (regset->size >= 0)
3944 {
1570b33e
L
3945 void *buf, *data;
3946 int nt_type, res;
58caa3dc 3947
52fa2412 3948 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3949 {
3950 regset ++;
3951 continue;
3952 }
3953
bca929d3 3954 buf = xmalloc (regset->size);
545587ee
DJ
3955
3956 /* First fill the buffer with the current register set contents,
3957 in case there are any items in the kernel's regset that are
3958 not in gdbserver's regcache. */
1570b33e
L
3959
3960 nt_type = regset->nt_type;
3961 if (nt_type)
3962 {
3963 iov.iov_base = buf;
3964 iov.iov_len = regset->size;
3965 data = (void *) &iov;
3966 }
3967 else
3968 data = buf;
3969
dfb64f85 3970#ifndef __sparc__
1570b33e 3971 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3972#else
1570b33e 3973 res = ptrace (regset->get_request, pid, &iov, data);
dfb64f85 3974#endif
545587ee
DJ
3975
3976 if (res == 0)
3977 {
3978 /* Then overlay our cached registers on that. */
442ea881 3979 regset->fill_function (regcache, buf);
545587ee
DJ
3980
3981 /* Only now do we write the register set. */
dfb64f85 3982#ifndef __sparc__
1570b33e 3983 res = ptrace (regset->set_request, pid, nt_type, data);
dfb64f85 3984#else
1570b33e 3985 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 3986#endif
545587ee
DJ
3987 }
3988
58caa3dc
DJ
3989 if (res < 0)
3990 {
3991 if (errno == EIO)
3992 {
52fa2412
UW
3993 /* If we get EIO on a regset, do not try it again for
3994 this process. */
3995 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3996 free (buf);
52fa2412 3997 continue;
58caa3dc 3998 }
3221518c
UW
3999 else if (errno == ESRCH)
4000 {
1b3f6016
PA
4001 /* At this point, ESRCH should mean the process is
4002 already gone, in which case we simply ignore attempts
4003 to change its registers. See also the related
4004 comment in linux_resume_one_lwp. */
fdeb2a12 4005 free (buf);
3221518c
UW
4006 return 0;
4007 }
58caa3dc
DJ
4008 else
4009 {
ce3a066d 4010 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
4011 }
4012 }
e9d25b98
DJ
4013 else if (regset->type == GENERAL_REGS)
4014 saw_general_regs = 1;
58caa3dc 4015 regset ++;
09ec9b38 4016 free (buf);
58caa3dc 4017 }
e9d25b98
DJ
4018 if (saw_general_regs)
4019 return 0;
4020 else
4021 return 1;
ce3a066d 4022 return 0;
58caa3dc
DJ
4023}
4024
4025#endif /* HAVE_LINUX_REGSETS */
4026
4027
4028void
442ea881 4029linux_fetch_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
4030{
4031#ifdef HAVE_LINUX_REGSETS
442ea881 4032 if (regsets_fetch_inferior_registers (regcache) == 0)
52fa2412 4033 return;
58caa3dc
DJ
4034#endif
4035#ifdef HAVE_LINUX_USRREGS
442ea881 4036 usr_fetch_inferior_registers (regcache, regno);
58caa3dc
DJ
4037#endif
4038}
4039
4040void
442ea881 4041linux_store_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
4042{
4043#ifdef HAVE_LINUX_REGSETS
442ea881 4044 if (regsets_store_inferior_registers (regcache) == 0)
52fa2412 4045 return;
58caa3dc
DJ
4046#endif
4047#ifdef HAVE_LINUX_USRREGS
442ea881 4048 usr_store_inferior_registers (regcache, regno);
58caa3dc
DJ
4049#endif
4050}
4051
da6d8c04 4052
da6d8c04
DJ
4053/* Copy LEN bytes from inferior's memory starting at MEMADDR
4054 to debugger memory starting at MYADDR. */
4055
c3e735a6 4056static int
f450004a 4057linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04
DJ
4058{
4059 register int i;
4060 /* Round starting address down to longword boundary. */
4061 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4062 /* Round ending address up; get number of longwords that makes. */
aa691b87
RM
4063 register int count
4064 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
da6d8c04
DJ
4065 / sizeof (PTRACE_XFER_TYPE);
4066 /* Allocate buffer of that many longwords. */
aa691b87 4067 register PTRACE_XFER_TYPE *buffer
da6d8c04 4068 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
fd462a61
DJ
4069 int fd;
4070 char filename[64];
95954743 4071 int pid = lwpid_of (get_thread_lwp (current_inferior));
fd462a61
DJ
4072
4073 /* Try using /proc. Don't bother for one word. */
4074 if (len >= 3 * sizeof (long))
4075 {
4076 /* We could keep this file open and cache it - possibly one per
4077 thread. That requires some juggling, but is even faster. */
95954743 4078 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
4079 fd = open (filename, O_RDONLY | O_LARGEFILE);
4080 if (fd == -1)
4081 goto no_proc;
4082
4083 /* If pread64 is available, use it. It's faster if the kernel
4084 supports it (only one syscall), and it's 64-bit safe even on
4085 32-bit platforms (for instance, SPARC debugging a SPARC64
4086 application). */
4087#ifdef HAVE_PREAD64
4088 if (pread64 (fd, myaddr, len, memaddr) != len)
4089#else
1de1badb 4090 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
fd462a61
DJ
4091#endif
4092 {
4093 close (fd);
4094 goto no_proc;
4095 }
4096
4097 close (fd);
4098 return 0;
4099 }
da6d8c04 4100
fd462a61 4101 no_proc:
da6d8c04
DJ
4102 /* Read all the longwords */
4103 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4104 {
c3e735a6 4105 errno = 0;
14ce3065
DE
4106 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4107 about coercing an 8 byte integer to a 4 byte pointer. */
4108 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4109 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
c3e735a6
DJ
4110 if (errno)
4111 return errno;
da6d8c04
DJ
4112 }
4113
4114 /* Copy appropriate bytes out of the buffer. */
1b3f6016
PA
4115 memcpy (myaddr,
4116 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4117 len);
c3e735a6
DJ
4118
4119 return 0;
da6d8c04
DJ
4120}
4121
93ae6fdc
PA
4122/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4123 memory at MEMADDR. On failure (cannot write to the inferior)
da6d8c04
DJ
4124 returns the value of errno. */
4125
ce3a066d 4126static int
f450004a 4127linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
4128{
4129 register int i;
4130 /* Round starting address down to longword boundary. */
4131 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4132 /* Round ending address up; get number of longwords that makes. */
4133 register int count
493e2a69
MS
4134 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4135 / sizeof (PTRACE_XFER_TYPE);
4136
da6d8c04 4137 /* Allocate buffer of that many longwords. */
493e2a69
MS
4138 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4139 alloca (count * sizeof (PTRACE_XFER_TYPE));
4140
95954743 4141 int pid = lwpid_of (get_thread_lwp (current_inferior));
da6d8c04 4142
0d62e5e8
DJ
4143 if (debug_threads)
4144 {
58d6951d
DJ
4145 /* Dump up to four bytes. */
4146 unsigned int val = * (unsigned int *) myaddr;
4147 if (len == 1)
4148 val = val & 0xff;
4149 else if (len == 2)
4150 val = val & 0xffff;
4151 else if (len == 3)
4152 val = val & 0xffffff;
4153 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4154 val, (long)memaddr);
0d62e5e8
DJ
4155 }
4156
da6d8c04
DJ
4157 /* Fill start and end extra bytes of buffer with existing memory data. */
4158
93ae6fdc 4159 errno = 0;
14ce3065
DE
4160 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4161 about coercing an 8 byte integer to a 4 byte pointer. */
4162 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4163 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
93ae6fdc
PA
4164 if (errno)
4165 return errno;
da6d8c04
DJ
4166
4167 if (count > 1)
4168 {
93ae6fdc 4169 errno = 0;
da6d8c04 4170 buffer[count - 1]
95954743 4171 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
4172 /* Coerce to a uintptr_t first to avoid potential gcc warning
4173 about coercing an 8 byte integer to a 4 byte pointer. */
4174 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4175 * sizeof (PTRACE_XFER_TYPE)),
d844cde6 4176 0);
93ae6fdc
PA
4177 if (errno)
4178 return errno;
da6d8c04
DJ
4179 }
4180
93ae6fdc 4181 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 4182
493e2a69
MS
4183 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4184 myaddr, len);
da6d8c04
DJ
4185
4186 /* Write the entire buffer. */
4187
4188 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4189 {
4190 errno = 0;
14ce3065
DE
4191 ptrace (PTRACE_POKETEXT, pid,
4192 /* Coerce to a uintptr_t first to avoid potential gcc warning
4193 about coercing an 8 byte integer to a 4 byte pointer. */
4194 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4195 (PTRACE_ARG4_TYPE) buffer[i]);
da6d8c04
DJ
4196 if (errno)
4197 return errno;
4198 }
4199
4200 return 0;
4201}
2f2893d9 4202
6076632b 4203/* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
24a09b5f
DJ
4204static int linux_supports_tracefork_flag;
4205
1e7fc18c
PA
4206static void
4207linux_enable_event_reporting (int pid)
4208{
4209 if (!linux_supports_tracefork_flag)
4210 return;
4211
4212 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4213}
4214
51c2684e 4215/* Helper functions for linux_test_for_tracefork, called via clone (). */
24a09b5f 4216
51c2684e
DJ
4217static int
4218linux_tracefork_grandchild (void *arg)
4219{
4220 _exit (0);
4221}
4222
7407e2de
AS
4223#define STACK_SIZE 4096
4224
51c2684e
DJ
4225static int
4226linux_tracefork_child (void *arg)
24a09b5f
DJ
4227{
4228 ptrace (PTRACE_TRACEME, 0, 0, 0);
4229 kill (getpid (), SIGSTOP);
e4b7f41c
JK
4230
4231#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4232
4233 if (fork () == 0)
4234 linux_tracefork_grandchild (NULL);
4235
4236#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4237
7407e2de
AS
4238#ifdef __ia64__
4239 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4240 CLONE_VM | SIGCHLD, NULL);
4241#else
a1f2ce7d 4242 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
7407e2de
AS
4243 CLONE_VM | SIGCHLD, NULL);
4244#endif
e4b7f41c
JK
4245
4246#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4247
24a09b5f
DJ
4248 _exit (0);
4249}
4250
24a09b5f
DJ
4251/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4252 sure that we can enable the option, and that it had the desired
4253 effect. */
4254
4255static void
4256linux_test_for_tracefork (void)
4257{
4258 int child_pid, ret, status;
4259 long second_pid;
e4b7f41c 4260#if defined(__UCLIBC__) && defined(HAS_NOMMU)
bca929d3 4261 char *stack = xmalloc (STACK_SIZE * 4);
e4b7f41c 4262#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
4263
4264 linux_supports_tracefork_flag = 0;
4265
e4b7f41c
JK
4266#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4267
4268 child_pid = fork ();
4269 if (child_pid == 0)
4270 linux_tracefork_child (NULL);
4271
4272#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4273
51c2684e 4274 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
7407e2de
AS
4275#ifdef __ia64__
4276 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4277 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c 4278#else /* !__ia64__ */
7407e2de
AS
4279 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4280 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c
JK
4281#endif /* !__ia64__ */
4282
4283#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4284
24a09b5f 4285 if (child_pid == -1)
51c2684e 4286 perror_with_name ("clone");
24a09b5f
DJ
4287
4288 ret = my_waitpid (child_pid, &status, 0);
4289 if (ret == -1)
4290 perror_with_name ("waitpid");
4291 else if (ret != child_pid)
4292 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4293 if (! WIFSTOPPED (status))
4294 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4295
14ce3065
DE
4296 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4297 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
24a09b5f
DJ
4298 if (ret != 0)
4299 {
4300 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4301 if (ret != 0)
4302 {
4303 warning ("linux_test_for_tracefork: failed to kill child");
4304 return;
4305 }
4306
4307 ret = my_waitpid (child_pid, &status, 0);
4308 if (ret != child_pid)
4309 warning ("linux_test_for_tracefork: failed to wait for killed child");
4310 else if (!WIFSIGNALED (status))
4311 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4312 "killed child", status);
4313
4314 return;
4315 }
4316
4317 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4318 if (ret != 0)
4319 warning ("linux_test_for_tracefork: failed to resume child");
4320
4321 ret = my_waitpid (child_pid, &status, 0);
4322
4323 if (ret == child_pid && WIFSTOPPED (status)
4324 && status >> 16 == PTRACE_EVENT_FORK)
4325 {
4326 second_pid = 0;
4327 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4328 if (ret == 0 && second_pid != 0)
4329 {
4330 int second_status;
4331
4332 linux_supports_tracefork_flag = 1;
4333 my_waitpid (second_pid, &second_status, 0);
4334 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4335 if (ret != 0)
4336 warning ("linux_test_for_tracefork: failed to kill second child");
4337 my_waitpid (second_pid, &status, 0);
4338 }
4339 }
4340 else
4341 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4342 "(%d, status 0x%x)", ret, status);
4343
4344 do
4345 {
4346 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4347 if (ret != 0)
4348 warning ("linux_test_for_tracefork: failed to kill child");
4349 my_waitpid (child_pid, &status, 0);
4350 }
4351 while (WIFSTOPPED (status));
51c2684e 4352
e4b7f41c 4353#if defined(__UCLIBC__) && defined(HAS_NOMMU)
51c2684e 4354 free (stack);
e4b7f41c 4355#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
4356}
4357
4358
2f2893d9
DJ
4359static void
4360linux_look_up_symbols (void)
4361{
0d62e5e8 4362#ifdef USE_THREAD_DB
95954743
PA
4363 struct process_info *proc = current_process ();
4364
cdbfd419 4365 if (proc->private->thread_db != NULL)
0d62e5e8
DJ
4366 return;
4367
6076632b
DE
4368 /* If the kernel supports tracing forks then it also supports tracing
4369 clones, and then we don't need to use the magic thread event breakpoint
4370 to learn about threads. */
cdbfd419 4371 thread_db_init (!linux_supports_tracefork_flag);
0d62e5e8
DJ
4372#endif
4373}
4374
e5379b03 4375static void
ef57601b 4376linux_request_interrupt (void)
e5379b03 4377{
a1928bad 4378 extern unsigned long signal_pid;
e5379b03 4379
95954743
PA
4380 if (!ptid_equal (cont_thread, null_ptid)
4381 && !ptid_equal (cont_thread, minus_one_ptid))
e5379b03 4382 {
54a0b537 4383 struct lwp_info *lwp;
bd99dc85 4384 int lwpid;
e5379b03 4385
54a0b537 4386 lwp = get_thread_lwp (current_inferior);
bd99dc85
PA
4387 lwpid = lwpid_of (lwp);
4388 kill_lwp (lwpid, SIGINT);
e5379b03
DJ
4389 }
4390 else
ef57601b 4391 kill_lwp (signal_pid, SIGINT);
e5379b03
DJ
4392}
4393
aa691b87
RM
4394/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4395 to debugger memory starting at MYADDR. */
4396
4397static int
f450004a 4398linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
4399{
4400 char filename[PATH_MAX];
4401 int fd, n;
95954743 4402 int pid = lwpid_of (get_thread_lwp (current_inferior));
aa691b87 4403
6cebaf6e 4404 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
4405
4406 fd = open (filename, O_RDONLY);
4407 if (fd < 0)
4408 return -1;
4409
4410 if (offset != (CORE_ADDR) 0
4411 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4412 n = -1;
4413 else
4414 n = read (fd, myaddr, len);
4415
4416 close (fd);
4417
4418 return n;
4419}
4420
d993e290
PA
4421/* These breakpoint and watchpoint related wrapper functions simply
4422 pass on the function call if the target has registered a
4423 corresponding function. */
e013ee27
OF
4424
4425static int
d993e290 4426linux_insert_point (char type, CORE_ADDR addr, int len)
e013ee27 4427{
d993e290
PA
4428 if (the_low_target.insert_point != NULL)
4429 return the_low_target.insert_point (type, addr, len);
e013ee27
OF
4430 else
4431 /* Unsupported (see target.h). */
4432 return 1;
4433}
4434
4435static int
d993e290 4436linux_remove_point (char type, CORE_ADDR addr, int len)
e013ee27 4437{
d993e290
PA
4438 if (the_low_target.remove_point != NULL)
4439 return the_low_target.remove_point (type, addr, len);
e013ee27
OF
4440 else
4441 /* Unsupported (see target.h). */
4442 return 1;
4443}
4444
4445static int
4446linux_stopped_by_watchpoint (void)
4447{
c3adc08c
PA
4448 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4449
4450 return lwp->stopped_by_watchpoint;
e013ee27
OF
4451}
4452
4453static CORE_ADDR
4454linux_stopped_data_address (void)
4455{
c3adc08c
PA
4456 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4457
4458 return lwp->stopped_data_address;
e013ee27
OF
4459}
4460
42c81e2a 4461#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
4462#if defined(__mcoldfire__)
4463/* These should really be defined in the kernel's ptrace.h header. */
4464#define PT_TEXT_ADDR 49*4
4465#define PT_DATA_ADDR 50*4
4466#define PT_TEXT_END_ADDR 51*4
eb826dc6
MF
4467#elif defined(BFIN)
4468#define PT_TEXT_ADDR 220
4469#define PT_TEXT_END_ADDR 224
4470#define PT_DATA_ADDR 228
58dbd541
YQ
4471#elif defined(__TMS320C6X__)
4472#define PT_TEXT_ADDR (0x10000*4)
4473#define PT_DATA_ADDR (0x10004*4)
4474#define PT_TEXT_END_ADDR (0x10008*4)
52fb6437
NS
4475#endif
4476
4477/* Under uClinux, programs are loaded at non-zero offsets, which we need
4478 to tell gdb about. */
4479
4480static int
4481linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4482{
4483#if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4484 unsigned long text, text_end, data;
bd99dc85 4485 int pid = lwpid_of (get_thread_lwp (current_inferior));
52fb6437
NS
4486
4487 errno = 0;
4488
4489 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4490 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4491 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4492
4493 if (errno == 0)
4494 {
4495 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
4496 used by gdb) are relative to the beginning of the program,
4497 with the data segment immediately following the text segment.
4498 However, the actual runtime layout in memory may put the data
4499 somewhere else, so when we send gdb a data base-address, we
4500 use the real data base address and subtract the compile-time
4501 data base-address from it (which is just the length of the
4502 text segment). BSS immediately follows data in both
4503 cases. */
52fb6437
NS
4504 *text_p = text;
4505 *data_p = data - (text_end - text);
1b3f6016 4506
52fb6437
NS
4507 return 1;
4508 }
4509#endif
4510 return 0;
4511}
4512#endif
4513
07e059b5
VP
4514static int
4515linux_qxfer_osdata (const char *annex,
1b3f6016
PA
4516 unsigned char *readbuf, unsigned const char *writebuf,
4517 CORE_ADDR offset, int len)
07e059b5 4518{
d26e3629 4519 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
4520}
4521
d0722149
DE
4522/* Convert a native/host siginfo object, into/from the siginfo in the
4523 layout of the inferiors' architecture. */
4524
4525static void
4526siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4527{
4528 int done = 0;
4529
4530 if (the_low_target.siginfo_fixup != NULL)
4531 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4532
4533 /* If there was no callback, or the callback didn't do anything,
4534 then just do a straight memcpy. */
4535 if (!done)
4536 {
4537 if (direction == 1)
4538 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4539 else
4540 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4541 }
4542}
4543
4aa995e1
PA
4544static int
4545linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4546 unsigned const char *writebuf, CORE_ADDR offset, int len)
4547{
d0722149 4548 int pid;
4aa995e1 4549 struct siginfo siginfo;
d0722149 4550 char inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
4551
4552 if (current_inferior == NULL)
4553 return -1;
4554
bd99dc85 4555 pid = lwpid_of (get_thread_lwp (current_inferior));
4aa995e1
PA
4556
4557 if (debug_threads)
d0722149 4558 fprintf (stderr, "%s siginfo for lwp %d.\n",
4aa995e1
PA
4559 readbuf != NULL ? "Reading" : "Writing",
4560 pid);
4561
0adea5f7 4562 if (offset >= sizeof (siginfo))
4aa995e1
PA
4563 return -1;
4564
4565 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4566 return -1;
4567
d0722149
DE
4568 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4569 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4570 inferior with a 64-bit GDBSERVER should look the same as debugging it
4571 with a 32-bit GDBSERVER, we need to convert it. */
4572 siginfo_fixup (&siginfo, inf_siginfo, 0);
4573
4aa995e1
PA
4574 if (offset + len > sizeof (siginfo))
4575 len = sizeof (siginfo) - offset;
4576
4577 if (readbuf != NULL)
d0722149 4578 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4579 else
4580 {
d0722149
DE
4581 memcpy (inf_siginfo + offset, writebuf, len);
4582
4583 /* Convert back to ptrace layout before flushing it out. */
4584 siginfo_fixup (&siginfo, inf_siginfo, 1);
4585
4aa995e1
PA
4586 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4587 return -1;
4588 }
4589
4590 return len;
4591}
4592
bd99dc85
PA
4593/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4594 so we notice when children change state; as the handler for the
4595 sigsuspend in my_waitpid. */
4596
4597static void
4598sigchld_handler (int signo)
4599{
4600 int old_errno = errno;
4601
4602 if (debug_threads)
e581f2b4
PA
4603 {
4604 do
4605 {
4606 /* fprintf is not async-signal-safe, so call write
4607 directly. */
4608 if (write (2, "sigchld_handler\n",
4609 sizeof ("sigchld_handler\n") - 1) < 0)
4610 break; /* just ignore */
4611 } while (0);
4612 }
bd99dc85
PA
4613
4614 if (target_is_async_p ())
4615 async_file_mark (); /* trigger a linux_wait */
4616
4617 errno = old_errno;
4618}
4619
4620static int
4621linux_supports_non_stop (void)
4622{
4623 return 1;
4624}
4625
4626static int
4627linux_async (int enable)
4628{
4629 int previous = (linux_event_pipe[0] != -1);
4630
8336d594
PA
4631 if (debug_threads)
4632 fprintf (stderr, "linux_async (%d), previous=%d\n",
4633 enable, previous);
4634
bd99dc85
PA
4635 if (previous != enable)
4636 {
4637 sigset_t mask;
4638 sigemptyset (&mask);
4639 sigaddset (&mask, SIGCHLD);
4640
4641 sigprocmask (SIG_BLOCK, &mask, NULL);
4642
4643 if (enable)
4644 {
4645 if (pipe (linux_event_pipe) == -1)
4646 fatal ("creating event pipe failed.");
4647
4648 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4649 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4650
4651 /* Register the event loop handler. */
4652 add_file_handler (linux_event_pipe[0],
4653 handle_target_event, NULL);
4654
4655 /* Always trigger a linux_wait. */
4656 async_file_mark ();
4657 }
4658 else
4659 {
4660 delete_file_handler (linux_event_pipe[0]);
4661
4662 close (linux_event_pipe[0]);
4663 close (linux_event_pipe[1]);
4664 linux_event_pipe[0] = -1;
4665 linux_event_pipe[1] = -1;
4666 }
4667
4668 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4669 }
4670
4671 return previous;
4672}
4673
4674static int
4675linux_start_non_stop (int nonstop)
4676{
4677 /* Register or unregister from event-loop accordingly. */
4678 linux_async (nonstop);
4679 return 0;
4680}
4681
cf8fd78b
PA
4682static int
4683linux_supports_multi_process (void)
4684{
4685 return 1;
4686}
4687
03583c20
UW
4688static int
4689linux_supports_disable_randomization (void)
4690{
4691#ifdef HAVE_PERSONALITY
4692 return 1;
4693#else
4694 return 0;
4695#endif
4696}
efcbbd14
UW
4697
4698/* Enumerate spufs IDs for process PID. */
4699static int
4700spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4701{
4702 int pos = 0;
4703 int written = 0;
4704 char path[128];
4705 DIR *dir;
4706 struct dirent *entry;
4707
4708 sprintf (path, "/proc/%ld/fd", pid);
4709 dir = opendir (path);
4710 if (!dir)
4711 return -1;
4712
4713 rewinddir (dir);
4714 while ((entry = readdir (dir)) != NULL)
4715 {
4716 struct stat st;
4717 struct statfs stfs;
4718 int fd;
4719
4720 fd = atoi (entry->d_name);
4721 if (!fd)
4722 continue;
4723
4724 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4725 if (stat (path, &st) != 0)
4726 continue;
4727 if (!S_ISDIR (st.st_mode))
4728 continue;
4729
4730 if (statfs (path, &stfs) != 0)
4731 continue;
4732 if (stfs.f_type != SPUFS_MAGIC)
4733 continue;
4734
4735 if (pos >= offset && pos + 4 <= offset + len)
4736 {
4737 *(unsigned int *)(buf + pos - offset) = fd;
4738 written += 4;
4739 }
4740 pos += 4;
4741 }
4742
4743 closedir (dir);
4744 return written;
4745}
4746
4747/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4748 object type, using the /proc file system. */
4749static int
4750linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4751 unsigned const char *writebuf,
4752 CORE_ADDR offset, int len)
4753{
4754 long pid = lwpid_of (get_thread_lwp (current_inferior));
4755 char buf[128];
4756 int fd = 0;
4757 int ret = 0;
4758
4759 if (!writebuf && !readbuf)
4760 return -1;
4761
4762 if (!*annex)
4763 {
4764 if (!readbuf)
4765 return -1;
4766 else
4767 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4768 }
4769
4770 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4771 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4772 if (fd <= 0)
4773 return -1;
4774
4775 if (offset != 0
4776 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4777 {
4778 close (fd);
4779 return 0;
4780 }
4781
4782 if (writebuf)
4783 ret = write (fd, writebuf, (size_t) len);
4784 else
4785 ret = read (fd, readbuf, (size_t) len);
4786
4787 close (fd);
4788 return ret;
4789}
4790
723b724b 4791#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
4792struct target_loadseg
4793{
4794 /* Core address to which the segment is mapped. */
4795 Elf32_Addr addr;
4796 /* VMA recorded in the program header. */
4797 Elf32_Addr p_vaddr;
4798 /* Size of this segment in memory. */
4799 Elf32_Word p_memsz;
4800};
4801
723b724b 4802# if defined PT_GETDSBT
78d85199
YQ
4803struct target_loadmap
4804{
4805 /* Protocol version number, must be zero. */
4806 Elf32_Word version;
4807 /* Pointer to the DSBT table, its size, and the DSBT index. */
4808 unsigned *dsbt_table;
4809 unsigned dsbt_size, dsbt_index;
4810 /* Number of segments in this map. */
4811 Elf32_Word nsegs;
4812 /* The actual memory map. */
4813 struct target_loadseg segs[/*nsegs*/];
4814};
723b724b
MF
4815# define LINUX_LOADMAP PT_GETDSBT
4816# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
4817# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
4818# else
4819struct target_loadmap
4820{
4821 /* Protocol version number, must be zero. */
4822 Elf32_Half version;
4823 /* Number of segments in this map. */
4824 Elf32_Half nsegs;
4825 /* The actual memory map. */
4826 struct target_loadseg segs[/*nsegs*/];
4827};
4828# define LINUX_LOADMAP PTRACE_GETFDPIC
4829# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
4830# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
4831# endif
78d85199 4832
78d85199
YQ
4833static int
4834linux_read_loadmap (const char *annex, CORE_ADDR offset,
4835 unsigned char *myaddr, unsigned int len)
4836{
4837 int pid = lwpid_of (get_thread_lwp (current_inferior));
4838 int addr = -1;
4839 struct target_loadmap *data = NULL;
4840 unsigned int actual_length, copy_length;
4841
4842 if (strcmp (annex, "exec") == 0)
723b724b 4843 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 4844 else if (strcmp (annex, "interp") == 0)
723b724b 4845 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
4846 else
4847 return -1;
4848
723b724b 4849 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
4850 return -1;
4851
4852 if (data == NULL)
4853 return -1;
4854
4855 actual_length = sizeof (struct target_loadmap)
4856 + sizeof (struct target_loadseg) * data->nsegs;
4857
4858 if (offset < 0 || offset > actual_length)
4859 return -1;
4860
4861 copy_length = actual_length - offset < len ? actual_length - offset : len;
4862 memcpy (myaddr, (char *) data + offset, copy_length);
4863 return copy_length;
4864}
723b724b
MF
4865#else
4866# define linux_read_loadmap NULL
4867#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 4868
1570b33e
L
4869static void
4870linux_process_qsupported (const char *query)
4871{
4872 if (the_low_target.process_qsupported != NULL)
4873 the_low_target.process_qsupported (query);
4874}
4875
219f2f23
PA
4876static int
4877linux_supports_tracepoints (void)
4878{
4879 if (*the_low_target.supports_tracepoints == NULL)
4880 return 0;
4881
4882 return (*the_low_target.supports_tracepoints) ();
4883}
4884
4885static CORE_ADDR
4886linux_read_pc (struct regcache *regcache)
4887{
4888 if (the_low_target.get_pc == NULL)
4889 return 0;
4890
4891 return (*the_low_target.get_pc) (regcache);
4892}
4893
4894static void
4895linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4896{
4897 gdb_assert (the_low_target.set_pc != NULL);
4898
4899 (*the_low_target.set_pc) (regcache, pc);
4900}
4901
8336d594
PA
4902static int
4903linux_thread_stopped (struct thread_info *thread)
4904{
4905 return get_thread_lwp (thread)->stopped;
4906}
4907
4908/* This exposes stop-all-threads functionality to other modules. */
4909
4910static void
7984d532 4911linux_pause_all (int freeze)
8336d594 4912{
7984d532
PA
4913 stop_all_lwps (freeze, NULL);
4914}
4915
4916/* This exposes unstop-all-threads functionality to other gdbserver
4917 modules. */
4918
4919static void
4920linux_unpause_all (int unfreeze)
4921{
4922 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
4923}
4924
90d74c30
PA
4925static int
4926linux_prepare_to_access_memory (void)
4927{
4928 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4929 running LWP. */
4930 if (non_stop)
4931 linux_pause_all (1);
4932 return 0;
4933}
4934
4935static void
0146f85b 4936linux_done_accessing_memory (void)
90d74c30
PA
4937{
4938 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4939 running LWP. */
4940 if (non_stop)
4941 linux_unpause_all (1);
4942}
4943
fa593d66
PA
4944static int
4945linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
4946 CORE_ADDR collector,
4947 CORE_ADDR lockaddr,
4948 ULONGEST orig_size,
4949 CORE_ADDR *jump_entry,
405f8e94
SS
4950 CORE_ADDR *trampoline,
4951 ULONGEST *trampoline_size,
fa593d66
PA
4952 unsigned char *jjump_pad_insn,
4953 ULONGEST *jjump_pad_insn_size,
4954 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
4955 CORE_ADDR *adjusted_insn_addr_end,
4956 char *err)
fa593d66
PA
4957{
4958 return (*the_low_target.install_fast_tracepoint_jump_pad)
4959 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
4960 jump_entry, trampoline, trampoline_size,
4961 jjump_pad_insn, jjump_pad_insn_size,
4962 adjusted_insn_addr, adjusted_insn_addr_end,
4963 err);
fa593d66
PA
4964}
4965
6a271cae
PA
4966static struct emit_ops *
4967linux_emit_ops (void)
4968{
4969 if (the_low_target.emit_ops != NULL)
4970 return (*the_low_target.emit_ops) ();
4971 else
4972 return NULL;
4973}
4974
405f8e94
SS
4975static int
4976linux_get_min_fast_tracepoint_insn_len (void)
4977{
4978 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
4979}
4980
2268b414
JK
4981/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
4982
4983static int
4984get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
4985 CORE_ADDR *phdr_memaddr, int *num_phdr)
4986{
4987 char filename[PATH_MAX];
4988 int fd;
4989 const int auxv_size = is_elf64
4990 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
4991 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
4992
4993 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4994
4995 fd = open (filename, O_RDONLY);
4996 if (fd < 0)
4997 return 1;
4998
4999 *phdr_memaddr = 0;
5000 *num_phdr = 0;
5001 while (read (fd, buf, auxv_size) == auxv_size
5002 && (*phdr_memaddr == 0 || *num_phdr == 0))
5003 {
5004 if (is_elf64)
5005 {
5006 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5007
5008 switch (aux->a_type)
5009 {
5010 case AT_PHDR:
5011 *phdr_memaddr = aux->a_un.a_val;
5012 break;
5013 case AT_PHNUM:
5014 *num_phdr = aux->a_un.a_val;
5015 break;
5016 }
5017 }
5018 else
5019 {
5020 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5021
5022 switch (aux->a_type)
5023 {
5024 case AT_PHDR:
5025 *phdr_memaddr = aux->a_un.a_val;
5026 break;
5027 case AT_PHNUM:
5028 *num_phdr = aux->a_un.a_val;
5029 break;
5030 }
5031 }
5032 }
5033
5034 close (fd);
5035
5036 if (*phdr_memaddr == 0 || *num_phdr == 0)
5037 {
5038 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5039 "phdr_memaddr = %ld, phdr_num = %d",
5040 (long) *phdr_memaddr, *num_phdr);
5041 return 2;
5042 }
5043
5044 return 0;
5045}
5046
5047/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5048
5049static CORE_ADDR
5050get_dynamic (const int pid, const int is_elf64)
5051{
5052 CORE_ADDR phdr_memaddr, relocation;
5053 int num_phdr, i;
5054 unsigned char *phdr_buf;
5055 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5056
5057 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5058 return 0;
5059
5060 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5061 phdr_buf = alloca (num_phdr * phdr_size);
5062
5063 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5064 return 0;
5065
5066 /* Compute relocation: it is expected to be 0 for "regular" executables,
5067 non-zero for PIE ones. */
5068 relocation = -1;
5069 for (i = 0; relocation == -1 && i < num_phdr; i++)
5070 if (is_elf64)
5071 {
5072 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5073
5074 if (p->p_type == PT_PHDR)
5075 relocation = phdr_memaddr - p->p_vaddr;
5076 }
5077 else
5078 {
5079 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5080
5081 if (p->p_type == PT_PHDR)
5082 relocation = phdr_memaddr - p->p_vaddr;
5083 }
5084
5085 if (relocation == -1)
5086 {
5087 warning ("Unexpected missing PT_PHDR");
5088 return 0;
5089 }
5090
5091 for (i = 0; i < num_phdr; i++)
5092 {
5093 if (is_elf64)
5094 {
5095 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5096
5097 if (p->p_type == PT_DYNAMIC)
5098 return p->p_vaddr + relocation;
5099 }
5100 else
5101 {
5102 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5103
5104 if (p->p_type == PT_DYNAMIC)
5105 return p->p_vaddr + relocation;
5106 }
5107 }
5108
5109 return 0;
5110}
5111
5112/* Return &_r_debug in the inferior, or -1 if not present. Return value
5113 can be 0 if the inferior does not yet have the library list initialized. */
5114
5115static CORE_ADDR
5116get_r_debug (const int pid, const int is_elf64)
5117{
5118 CORE_ADDR dynamic_memaddr;
5119 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5120 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5121
5122 dynamic_memaddr = get_dynamic (pid, is_elf64);
5123 if (dynamic_memaddr == 0)
5124 return (CORE_ADDR) -1;
5125
5126 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5127 {
5128 if (is_elf64)
5129 {
5130 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5131
5132 if (dyn->d_tag == DT_DEBUG)
5133 return dyn->d_un.d_val;
5134
5135 if (dyn->d_tag == DT_NULL)
5136 break;
5137 }
5138 else
5139 {
5140 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5141
5142 if (dyn->d_tag == DT_DEBUG)
5143 return dyn->d_un.d_val;
5144
5145 if (dyn->d_tag == DT_NULL)
5146 break;
5147 }
5148
5149 dynamic_memaddr += dyn_size;
5150 }
5151
5152 return (CORE_ADDR) -1;
5153}
5154
5155/* Read one pointer from MEMADDR in the inferior. */
5156
5157static int
5158read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5159{
5160 *ptr = 0;
5161 return linux_read_memory (memaddr, (unsigned char *) ptr, ptr_size);
5162}
5163
5164struct link_map_offsets
5165 {
5166 /* Offset and size of r_debug.r_version. */
5167 int r_version_offset;
5168
5169 /* Offset and size of r_debug.r_map. */
5170 int r_map_offset;
5171
5172 /* Offset to l_addr field in struct link_map. */
5173 int l_addr_offset;
5174
5175 /* Offset to l_name field in struct link_map. */
5176 int l_name_offset;
5177
5178 /* Offset to l_ld field in struct link_map. */
5179 int l_ld_offset;
5180
5181 /* Offset to l_next field in struct link_map. */
5182 int l_next_offset;
5183
5184 /* Offset to l_prev field in struct link_map. */
5185 int l_prev_offset;
5186 };
5187
5188/* Construct qXfer:libraries:read reply. */
5189
5190static int
5191linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5192 unsigned const char *writebuf,
5193 CORE_ADDR offset, int len)
5194{
5195 char *document;
5196 unsigned document_len;
5197 struct process_info_private *const priv = current_process ()->private;
5198 char filename[PATH_MAX];
5199 int pid, is_elf64;
5200
5201 static const struct link_map_offsets lmo_32bit_offsets =
5202 {
5203 0, /* r_version offset. */
5204 4, /* r_debug.r_map offset. */
5205 0, /* l_addr offset in link_map. */
5206 4, /* l_name offset in link_map. */
5207 8, /* l_ld offset in link_map. */
5208 12, /* l_next offset in link_map. */
5209 16 /* l_prev offset in link_map. */
5210 };
5211
5212 static const struct link_map_offsets lmo_64bit_offsets =
5213 {
5214 0, /* r_version offset. */
5215 8, /* r_debug.r_map offset. */
5216 0, /* l_addr offset in link_map. */
5217 8, /* l_name offset in link_map. */
5218 16, /* l_ld offset in link_map. */
5219 24, /* l_next offset in link_map. */
5220 32 /* l_prev offset in link_map. */
5221 };
5222 const struct link_map_offsets *lmo;
5223
5224 if (writebuf != NULL)
5225 return -2;
5226 if (readbuf == NULL)
5227 return -1;
5228
5229 pid = lwpid_of (get_thread_lwp (current_inferior));
5230 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5231 is_elf64 = elf_64_file_p (filename);
5232 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5233
5234 if (priv->r_debug == 0)
5235 priv->r_debug = get_r_debug (pid, is_elf64);
5236
5237 if (priv->r_debug == (CORE_ADDR) -1 || priv->r_debug == 0)
5238 {
5239 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5240 }
5241 else
5242 {
5243 int allocated = 1024;
5244 char *p;
5245 const int ptr_size = is_elf64 ? 8 : 4;
5246 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5247 int r_version, header_done = 0;
5248
5249 document = xmalloc (allocated);
5250 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5251 p = document + strlen (document);
5252
5253 r_version = 0;
5254 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5255 (unsigned char *) &r_version,
5256 sizeof (r_version)) != 0
5257 || r_version != 1)
5258 {
5259 warning ("unexpected r_debug version %d", r_version);
5260 goto done;
5261 }
5262
5263 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5264 &lm_addr, ptr_size) != 0)
5265 {
5266 warning ("unable to read r_map from 0x%lx",
5267 (long) priv->r_debug + lmo->r_map_offset);
5268 goto done;
5269 }
5270
5271 lm_prev = 0;
5272 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5273 &l_name, ptr_size) == 0
5274 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5275 &l_addr, ptr_size) == 0
5276 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5277 &l_ld, ptr_size) == 0
5278 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5279 &l_prev, ptr_size) == 0
5280 && read_one_ptr (lm_addr + lmo->l_next_offset,
5281 &l_next, ptr_size) == 0)
5282 {
5283 unsigned char libname[PATH_MAX];
5284
5285 if (lm_prev != l_prev)
5286 {
5287 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5288 (long) lm_prev, (long) l_prev);
5289 break;
5290 }
5291
5292 /* Not checking for error because reading may stop before
5293 we've got PATH_MAX worth of characters. */
5294 libname[0] = '\0';
5295 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5296 libname[sizeof (libname) - 1] = '\0';
5297 if (libname[0] != '\0')
5298 {
5299 /* 6x the size for xml_escape_text below. */
5300 size_t len = 6 * strlen ((char *) libname);
5301 char *name;
5302
5303 if (!header_done)
5304 {
5305 /* Terminate `<library-list-svr4'. */
5306 *p++ = '>';
5307 header_done = 1;
5308 }
5309
5310 while (allocated < p - document + len + 200)
5311 {
5312 /* Expand to guarantee sufficient storage. */
5313 uintptr_t document_len = p - document;
5314
5315 document = xrealloc (document, 2 * allocated);
5316 allocated *= 2;
5317 p = document + document_len;
5318 }
5319
5320 name = xml_escape_text ((char *) libname);
5321 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5322 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5323 name, (unsigned long) lm_addr,
5324 (unsigned long) l_addr, (unsigned long) l_ld);
5325 free (name);
5326 }
5327 else if (lm_prev == 0)
5328 {
5329 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5330 p = p + strlen (p);
5331 }
5332
5333 if (l_next == 0)
5334 break;
5335
5336 lm_prev = lm_addr;
5337 lm_addr = l_next;
5338 }
5339 done:
5340 strcpy (p, "</library-list-svr4>");
5341 }
5342
5343 document_len = strlen (document);
5344 if (offset < document_len)
5345 document_len -= offset;
5346 else
5347 document_len = 0;
5348 if (len > document_len)
5349 len = document_len;
5350
5351 memcpy (readbuf, document + offset, len);
5352 xfree (document);
5353
5354 return len;
5355}
5356
ce3a066d
DJ
5357static struct target_ops linux_target_ops = {
5358 linux_create_inferior,
5359 linux_attach,
5360 linux_kill,
6ad8ae5c 5361 linux_detach,
8336d594 5362 linux_mourn,
444d6139 5363 linux_join,
ce3a066d
DJ
5364 linux_thread_alive,
5365 linux_resume,
5366 linux_wait,
5367 linux_fetch_registers,
5368 linux_store_registers,
90d74c30 5369 linux_prepare_to_access_memory,
0146f85b 5370 linux_done_accessing_memory,
ce3a066d
DJ
5371 linux_read_memory,
5372 linux_write_memory,
2f2893d9 5373 linux_look_up_symbols,
ef57601b 5374 linux_request_interrupt,
aa691b87 5375 linux_read_auxv,
d993e290
PA
5376 linux_insert_point,
5377 linux_remove_point,
e013ee27
OF
5378 linux_stopped_by_watchpoint,
5379 linux_stopped_data_address,
42c81e2a 5380#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437 5381 linux_read_offsets,
dae5f5cf
DJ
5382#else
5383 NULL,
5384#endif
5385#ifdef USE_THREAD_DB
5386 thread_db_get_tls_address,
5387#else
5388 NULL,
52fb6437 5389#endif
efcbbd14 5390 linux_qxfer_spu,
59a016f0 5391 hostio_last_error_from_errno,
07e059b5 5392 linux_qxfer_osdata,
4aa995e1 5393 linux_xfer_siginfo,
bd99dc85
PA
5394 linux_supports_non_stop,
5395 linux_async,
5396 linux_start_non_stop,
cdbfd419
PP
5397 linux_supports_multi_process,
5398#ifdef USE_THREAD_DB
dc146f7c 5399 thread_db_handle_monitor_command,
cdbfd419 5400#else
dc146f7c 5401 NULL,
cdbfd419 5402#endif
d26e3629 5403 linux_common_core_of_thread,
78d85199 5404 linux_read_loadmap,
219f2f23
PA
5405 linux_process_qsupported,
5406 linux_supports_tracepoints,
5407 linux_read_pc,
8336d594
PA
5408 linux_write_pc,
5409 linux_thread_stopped,
7984d532 5410 NULL,
711e434b 5411 linux_pause_all,
7984d532 5412 linux_unpause_all,
fa593d66
PA
5413 linux_cancel_breakpoints,
5414 linux_stabilize_threads,
6a271cae 5415 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
5416 linux_emit_ops,
5417 linux_supports_disable_randomization,
405f8e94 5418 linux_get_min_fast_tracepoint_insn_len,
2268b414 5419 linux_qxfer_libraries_svr4,
ce3a066d
DJ
5420};
5421
0d62e5e8
DJ
5422static void
5423linux_init_signals ()
5424{
5425 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5426 to find what the cancel signal actually is. */
1a981360 5427#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 5428 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 5429#endif
0d62e5e8
DJ
5430}
5431
da6d8c04
DJ
5432void
5433initialize_low (void)
5434{
bd99dc85
PA
5435 struct sigaction sigchld_action;
5436 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 5437 set_target_ops (&linux_target_ops);
611cb4a5
DJ
5438 set_breakpoint_data (the_low_target.breakpoint,
5439 the_low_target.breakpoint_len);
0d62e5e8 5440 linux_init_signals ();
24a09b5f 5441 linux_test_for_tracefork ();
52fa2412
UW
5442#ifdef HAVE_LINUX_REGSETS
5443 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5444 ;
bca929d3 5445 disabled_regsets = xmalloc (num_regsets);
52fa2412 5446#endif
bd99dc85
PA
5447
5448 sigchld_action.sa_handler = sigchld_handler;
5449 sigemptyset (&sigchld_action.sa_mask);
5450 sigchld_action.sa_flags = SA_RESTART;
5451 sigaction (SIGCHLD, &sigchld_action, NULL);
da6d8c04 5452}