]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
Add missing files to previous commit (Allow Python notification of new object-file...
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
545587ee 2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
7b6bb8da 3 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
da6d8c04
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
19
20#include "server.h"
58caa3dc 21#include "linux-low.h"
d26e3629 22#include "linux-osdata.h"
da6d8c04 23
58caa3dc 24#include <sys/wait.h>
da6d8c04
DJ
25#include <stdio.h>
26#include <sys/param.h>
da6d8c04 27#include <sys/ptrace.h>
af96c192 28#include "linux-ptrace.h"
e3deef73 29#include "linux-procfs.h"
da6d8c04
DJ
30#include <signal.h>
31#include <sys/ioctl.h>
32#include <fcntl.h>
d07c63e7 33#include <string.h>
0a30fbc4
DJ
34#include <stdlib.h>
35#include <unistd.h>
fa6a77dc 36#include <errno.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
efcbbd14
UW
43#include <sys/stat.h>
44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
957f3f49
DE
46#ifndef ELFMAG0
47/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51#include <elf.h>
52#endif
efcbbd14
UW
53
54#ifndef SPUFS_MAGIC
55#define SPUFS_MAGIC 0x23c9b64e
56#endif
da6d8c04 57
fd462a61
DJ
58#ifndef O_LARGEFILE
59#define O_LARGEFILE 0
60#endif
61
ec8ebe72
DE
62#ifndef W_STOPCODE
63#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
64#endif
65
1a981360
PA
66/* This is the kernel's hard limit. Not to be confused with
67 SIGRTMIN. */
68#ifndef __SIGRTMIN
69#define __SIGRTMIN 32
70#endif
71
42c81e2a
DJ
72#ifdef __UCLIBC__
73#if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
74#define HAS_NOMMU
75#endif
76#endif
77
24a09b5f
DJ
78/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
79 representation of the thread ID.
611cb4a5 80
54a0b537 81 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
95954743
PA
82 the same as the LWP ID.
83
84 ``all_processes'' is keyed by the "overall process ID", which
85 GNU/Linux calls tgid, "thread group ID". */
0d62e5e8 86
54a0b537 87struct inferior_list all_lwps;
0d62e5e8 88
24a09b5f
DJ
89/* A list of all unknown processes which receive stop signals. Some other
90 process will presumably claim each of these as forked children
91 momentarily. */
92
93struct inferior_list stopped_pids;
94
0d62e5e8
DJ
95/* FIXME this is a bit of a hack, and could be removed. */
96int stopping_threads;
97
98/* FIXME make into a target method? */
24a09b5f 99int using_threads = 1;
24a09b5f 100
fa593d66
PA
101/* True if we're presently stabilizing threads (moving them out of
102 jump pads). */
103static int stabilizing_threads;
104
95954743
PA
105/* This flag is true iff we've just created or attached to our first
106 inferior but it has not stopped yet. As soon as it does, we need
107 to call the low target's arch_setup callback. Doing this only on
108 the first inferior avoids reinializing the architecture on every
109 inferior, and avoids messing with the register caches of the
110 already running inferiors. NOTE: this assumes all inferiors under
111 control of gdbserver have the same architecture. */
d61ddec4
UW
112static int new_inferior;
113
2acc282a 114static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 115 int step, int signal, siginfo_t *info);
2bd7c093 116static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
117static void stop_all_lwps (int suspend, struct lwp_info *except);
118static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
95954743 119static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
95954743 120static void *add_lwp (ptid_t ptid);
c35fafde 121static int linux_stopped_by_watchpoint (void);
95954743 122static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
d50171e4 123static void proceed_all_lwps (void);
d50171e4
PA
124static int finish_step_over (struct lwp_info *lwp);
125static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
126static int kill_lwp (unsigned long lwpid, int signo);
1e7fc18c 127static void linux_enable_event_reporting (int pid);
d50171e4
PA
128
129/* True if the low target can hardware single-step. Such targets
130 don't need a BREAKPOINT_REINSERT_ADDR callback. */
131
132static int
133can_hardware_single_step (void)
134{
135 return (the_low_target.breakpoint_reinsert_addr == NULL);
136}
137
138/* True if the low target supports memory breakpoints. If so, we'll
139 have a GET_PC implementation. */
140
141static int
142supports_breakpoints (void)
143{
144 return (the_low_target.get_pc != NULL);
145}
0d62e5e8 146
fa593d66
PA
147/* Returns true if this target can support fast tracepoints. This
148 does not mean that the in-process agent has been loaded in the
149 inferior. */
150
151static int
152supports_fast_tracepoints (void)
153{
154 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
155}
156
0d62e5e8
DJ
157struct pending_signals
158{
159 int signal;
32ca6d61 160 siginfo_t info;
0d62e5e8
DJ
161 struct pending_signals *prev;
162};
611cb4a5 163
14ce3065
DE
164#define PTRACE_ARG3_TYPE void *
165#define PTRACE_ARG4_TYPE void *
c6ecbae5 166#define PTRACE_XFER_TYPE long
da6d8c04 167
58caa3dc 168#ifdef HAVE_LINUX_REGSETS
52fa2412
UW
169static char *disabled_regsets;
170static int num_regsets;
58caa3dc
DJ
171#endif
172
bd99dc85
PA
173/* The read/write ends of the pipe registered as waitable file in the
174 event loop. */
175static int linux_event_pipe[2] = { -1, -1 };
176
177/* True if we're currently in async mode. */
178#define target_is_async_p() (linux_event_pipe[0] != -1)
179
02fc4de7 180static void send_sigstop (struct lwp_info *lwp);
bd99dc85
PA
181static void wait_for_sigstop (struct inferior_list_entry *entry);
182
d0722149
DE
183/* Accepts an integer PID; Returns a string representing a file that
184 can be opened to get info for the child process.
185 Space for the result is malloc'd, caller must free. */
186
187char *
188linux_child_pid_to_exec_file (int pid)
189{
190 char *name1, *name2;
191
192 name1 = xmalloc (MAXPATHLEN);
193 name2 = xmalloc (MAXPATHLEN);
194 memset (name2, 0, MAXPATHLEN);
195
196 sprintf (name1, "/proc/%d/exe", pid);
197 if (readlink (name1, name2, MAXPATHLEN) > 0)
198 {
199 free (name1);
200 return name2;
201 }
202 else
203 {
204 free (name2);
205 return name1;
206 }
207}
208
209/* Return non-zero if HEADER is a 64-bit ELF file. */
210
211static int
957f3f49 212elf_64_header_p (const Elf64_Ehdr *header)
d0722149
DE
213{
214 return (header->e_ident[EI_MAG0] == ELFMAG0
215 && header->e_ident[EI_MAG1] == ELFMAG1
216 && header->e_ident[EI_MAG2] == ELFMAG2
217 && header->e_ident[EI_MAG3] == ELFMAG3
218 && header->e_ident[EI_CLASS] == ELFCLASS64);
219}
220
221/* Return non-zero if FILE is a 64-bit ELF file,
222 zero if the file is not a 64-bit ELF file,
223 and -1 if the file is not accessible or doesn't exist. */
224
225int
226elf_64_file_p (const char *file)
227{
957f3f49 228 Elf64_Ehdr header;
d0722149
DE
229 int fd;
230
231 fd = open (file, O_RDONLY);
232 if (fd < 0)
233 return -1;
234
235 if (read (fd, &header, sizeof (header)) != sizeof (header))
236 {
237 close (fd);
238 return 0;
239 }
240 close (fd);
241
242 return elf_64_header_p (&header);
243}
244
bd99dc85
PA
245static void
246delete_lwp (struct lwp_info *lwp)
247{
248 remove_thread (get_lwp_thread (lwp));
249 remove_inferior (&all_lwps, &lwp->head);
aa5ca48f 250 free (lwp->arch_private);
bd99dc85
PA
251 free (lwp);
252}
253
95954743
PA
254/* Add a process to the common process list, and set its private
255 data. */
256
257static struct process_info *
258linux_add_process (int pid, int attached)
259{
260 struct process_info *proc;
261
262 /* Is this the first process? If so, then set the arch. */
263 if (all_processes.head == NULL)
264 new_inferior = 1;
265
266 proc = add_process (pid, attached);
267 proc->private = xcalloc (1, sizeof (*proc->private));
268
aa5ca48f
DE
269 if (the_low_target.new_process != NULL)
270 proc->private->arch_private = the_low_target.new_process ();
271
95954743
PA
272 return proc;
273}
274
07d4f67e
DE
275/* Wrapper function for waitpid which handles EINTR, and emulates
276 __WALL for systems where that is not available. */
277
278static int
279my_waitpid (int pid, int *status, int flags)
280{
281 int ret, out_errno;
282
283 if (debug_threads)
284 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
285
286 if (flags & __WALL)
287 {
288 sigset_t block_mask, org_mask, wake_mask;
289 int wnohang;
290
291 wnohang = (flags & WNOHANG) != 0;
292 flags &= ~(__WALL | __WCLONE);
293 flags |= WNOHANG;
294
295 /* Block all signals while here. This avoids knowing about
296 LinuxThread's signals. */
297 sigfillset (&block_mask);
298 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
299
300 /* ... except during the sigsuspend below. */
301 sigemptyset (&wake_mask);
302
303 while (1)
304 {
305 /* Since all signals are blocked, there's no need to check
306 for EINTR here. */
307 ret = waitpid (pid, status, flags);
308 out_errno = errno;
309
310 if (ret == -1 && out_errno != ECHILD)
311 break;
312 else if (ret > 0)
313 break;
314
315 if (flags & __WCLONE)
316 {
317 /* We've tried both flavors now. If WNOHANG is set,
318 there's nothing else to do, just bail out. */
319 if (wnohang)
320 break;
321
322 if (debug_threads)
323 fprintf (stderr, "blocking\n");
324
325 /* Block waiting for signals. */
326 sigsuspend (&wake_mask);
327 }
328
329 flags ^= __WCLONE;
330 }
331
332 sigprocmask (SIG_SETMASK, &org_mask, NULL);
333 }
334 else
335 {
336 do
337 ret = waitpid (pid, status, flags);
338 while (ret == -1 && errno == EINTR);
339 out_errno = errno;
340 }
341
342 if (debug_threads)
343 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
344 pid, flags, status ? *status : -1, ret);
345
346 errno = out_errno;
347 return ret;
348}
349
bd99dc85
PA
350/* Handle a GNU/Linux extended wait response. If we see a clone
351 event, we need to add the new LWP to our list (and not report the
352 trap to higher layers). */
0d62e5e8 353
24a09b5f 354static void
54a0b537 355handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f
DJ
356{
357 int event = wstat >> 16;
54a0b537 358 struct lwp_info *new_lwp;
24a09b5f
DJ
359
360 if (event == PTRACE_EVENT_CLONE)
361 {
95954743 362 ptid_t ptid;
24a09b5f 363 unsigned long new_pid;
836acd6d 364 int ret, status = W_STOPCODE (SIGSTOP);
24a09b5f 365
bd99dc85 366 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
24a09b5f
DJ
367
368 /* If we haven't already seen the new PID stop, wait for it now. */
369 if (! pull_pid_from_list (&stopped_pids, new_pid))
370 {
371 /* The new child has a pending SIGSTOP. We can't affect it until it
372 hits the SIGSTOP, but we're already attached. */
373
97438e3f 374 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
375
376 if (ret == -1)
377 perror_with_name ("waiting for new child");
378 else if (ret != new_pid)
379 warning ("wait returned unexpected PID %d", ret);
da5898ce 380 else if (!WIFSTOPPED (status))
24a09b5f
DJ
381 warning ("wait returned unexpected status 0x%x", status);
382 }
383
1e7fc18c 384 linux_enable_event_reporting (new_pid);
24a09b5f 385
95954743
PA
386 ptid = ptid_build (pid_of (event_child), new_pid, 0);
387 new_lwp = (struct lwp_info *) add_lwp (ptid);
388 add_thread (ptid, new_lwp);
24a09b5f 389
e27d73f6
DE
390 /* Either we're going to immediately resume the new thread
391 or leave it stopped. linux_resume_one_lwp is a nop if it
392 thinks the thread is currently running, so set this first
393 before calling linux_resume_one_lwp. */
394 new_lwp->stopped = 1;
395
da5898ce
DJ
396 /* Normally we will get the pending SIGSTOP. But in some cases
397 we might get another signal delivered to the group first.
f21cc1a2 398 If we do get another signal, be sure not to lose it. */
da5898ce
DJ
399 if (WSTOPSIG (status) == SIGSTOP)
400 {
d50171e4
PA
401 if (stopping_threads)
402 new_lwp->stop_pc = get_stop_pc (new_lwp);
403 else
e27d73f6 404 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
da5898ce 405 }
24a09b5f 406 else
da5898ce 407 {
54a0b537 408 new_lwp->stop_expected = 1;
d50171e4 409
da5898ce
DJ
410 if (stopping_threads)
411 {
d50171e4 412 new_lwp->stop_pc = get_stop_pc (new_lwp);
54a0b537
PA
413 new_lwp->status_pending_p = 1;
414 new_lwp->status_pending = status;
da5898ce
DJ
415 }
416 else
417 /* Pass the signal on. This is what GDB does - except
418 shouldn't we really report it instead? */
e27d73f6 419 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
da5898ce 420 }
24a09b5f
DJ
421
422 /* Always resume the current thread. If we are stopping
423 threads, it will have a pending SIGSTOP; we may as well
424 collect it now. */
2acc282a 425 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
24a09b5f
DJ
426 }
427}
428
d50171e4
PA
429/* Return the PC as read from the regcache of LWP, without any
430 adjustment. */
431
432static CORE_ADDR
433get_pc (struct lwp_info *lwp)
434{
435 struct thread_info *saved_inferior;
436 struct regcache *regcache;
437 CORE_ADDR pc;
438
439 if (the_low_target.get_pc == NULL)
440 return 0;
441
442 saved_inferior = current_inferior;
443 current_inferior = get_lwp_thread (lwp);
444
445 regcache = get_thread_regcache (current_inferior, 1);
446 pc = (*the_low_target.get_pc) (regcache);
447
448 if (debug_threads)
449 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
450
451 current_inferior = saved_inferior;
452 return pc;
453}
454
455/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
456 The SIGTRAP could mean several things.
457
458 On i386, where decr_pc_after_break is non-zero:
459 If we were single-stepping this process using PTRACE_SINGLESTEP,
460 we will get only the one SIGTRAP (even if the instruction we
461 stepped over was a breakpoint). The value of $eip will be the
462 next instruction.
463 If we continue the process using PTRACE_CONT, we will get a
464 SIGTRAP when we hit a breakpoint. The value of $eip will be
465 the instruction after the breakpoint (i.e. needs to be
466 decremented). If we report the SIGTRAP to GDB, we must also
467 report the undecremented PC. If we cancel the SIGTRAP, we
468 must resume at the decremented PC.
469
470 (Presumably, not yet tested) On a non-decr_pc_after_break machine
471 with hardware or kernel single-step:
472 If we single-step over a breakpoint instruction, our PC will
473 point at the following instruction. If we continue and hit a
474 breakpoint instruction, our PC will point at the breakpoint
475 instruction. */
476
477static CORE_ADDR
d50171e4 478get_stop_pc (struct lwp_info *lwp)
0d62e5e8 479{
d50171e4
PA
480 CORE_ADDR stop_pc;
481
482 if (the_low_target.get_pc == NULL)
483 return 0;
0d62e5e8 484
d50171e4
PA
485 stop_pc = get_pc (lwp);
486
bdabb078
PA
487 if (WSTOPSIG (lwp->last_status) == SIGTRAP
488 && !lwp->stepping
489 && !lwp->stopped_by_watchpoint
490 && lwp->last_status >> 16 == 0)
47c0c975
DE
491 stop_pc -= the_low_target.decr_pc_after_break;
492
493 if (debug_threads)
494 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
495
496 return stop_pc;
0d62e5e8 497}
ce3a066d 498
0d62e5e8 499static void *
95954743 500add_lwp (ptid_t ptid)
611cb4a5 501{
54a0b537 502 struct lwp_info *lwp;
0d62e5e8 503
54a0b537
PA
504 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
505 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 506
95954743 507 lwp->head.id = ptid;
0d62e5e8 508
aa5ca48f
DE
509 if (the_low_target.new_thread != NULL)
510 lwp->arch_private = the_low_target.new_thread ();
511
54a0b537 512 add_inferior_to_list (&all_lwps, &lwp->head);
0d62e5e8 513
54a0b537 514 return lwp;
0d62e5e8 515}
611cb4a5 516
da6d8c04
DJ
517/* Start an inferior process and returns its pid.
518 ALLARGS is a vector of program-name and args. */
519
ce3a066d
DJ
520static int
521linux_create_inferior (char *program, char **allargs)
da6d8c04 522{
a6dbe5df 523 struct lwp_info *new_lwp;
da6d8c04 524 int pid;
95954743 525 ptid_t ptid;
da6d8c04 526
42c81e2a 527#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
528 pid = vfork ();
529#else
da6d8c04 530 pid = fork ();
52fb6437 531#endif
da6d8c04
DJ
532 if (pid < 0)
533 perror_with_name ("fork");
534
535 if (pid == 0)
536 {
537 ptrace (PTRACE_TRACEME, 0, 0, 0);
538
1a981360 539#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 540 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 541#endif
0d62e5e8 542
a9fa9f7d
DJ
543 setpgid (0, 0);
544
2b876972
DJ
545 execv (program, allargs);
546 if (errno == ENOENT)
547 execvp (program, allargs);
da6d8c04
DJ
548
549 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 550 strerror (errno));
da6d8c04
DJ
551 fflush (stderr);
552 _exit (0177);
553 }
554
95954743
PA
555 linux_add_process (pid, 0);
556
557 ptid = ptid_build (pid, pid, 0);
558 new_lwp = add_lwp (ptid);
559 add_thread (ptid, new_lwp);
a6dbe5df 560 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 561
a9fa9f7d 562 return pid;
da6d8c04
DJ
563}
564
565/* Attach to an inferior process. */
566
95954743
PA
567static void
568linux_attach_lwp_1 (unsigned long lwpid, int initial)
da6d8c04 569{
95954743 570 ptid_t ptid;
54a0b537 571 struct lwp_info *new_lwp;
611cb4a5 572
95954743 573 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
da6d8c04 574 {
95954743 575 if (!initial)
2d717e4f
DJ
576 {
577 /* If we fail to attach to an LWP, just warn. */
95954743 578 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
2d717e4f
DJ
579 strerror (errno), errno);
580 fflush (stderr);
581 return;
582 }
583 else
584 /* If we fail to attach to a process, report an error. */
95954743 585 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
43d5792c 586 strerror (errno), errno);
da6d8c04
DJ
587 }
588
95954743 589 if (initial)
e3deef73
LM
590 /* If lwp is the tgid, we handle adding existing threads later.
591 Otherwise we just add lwp without bothering about any other
592 threads. */
95954743
PA
593 ptid = ptid_build (lwpid, lwpid, 0);
594 else
595 {
596 /* Note that extracting the pid from the current inferior is
597 safe, since we're always called in the context of the same
598 process as this new thread. */
599 int pid = pid_of (get_thread_lwp (current_inferior));
600 ptid = ptid_build (pid, lwpid, 0);
601 }
24a09b5f 602
95954743
PA
603 new_lwp = (struct lwp_info *) add_lwp (ptid);
604 add_thread (ptid, new_lwp);
0d62e5e8 605
a6dbe5df
PA
606 /* We need to wait for SIGSTOP before being able to make the next
607 ptrace call on this LWP. */
608 new_lwp->must_set_ptrace_flags = 1;
609
0d62e5e8 610 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
611 brings it to a halt.
612
613 There are several cases to consider here:
614
615 1) gdbserver has already attached to the process and is being notified
1b3f6016 616 of a new thread that is being created.
d50171e4
PA
617 In this case we should ignore that SIGSTOP and resume the
618 process. This is handled below by setting stop_expected = 1,
8336d594 619 and the fact that add_thread sets last_resume_kind ==
d50171e4 620 resume_continue.
0e21c1ec
DE
621
622 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
623 to it via attach_inferior.
624 In this case we want the process thread to stop.
d50171e4
PA
625 This is handled by having linux_attach set last_resume_kind ==
626 resume_stop after we return.
e3deef73
LM
627
628 If the pid we are attaching to is also the tgid, we attach to and
629 stop all the existing threads. Otherwise, we attach to pid and
630 ignore any other threads in the same group as this pid.
0e21c1ec
DE
631
632 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
633 existing threads.
634 In this case we want the thread to stop.
635 FIXME: This case is currently not properly handled.
636 We should wait for the SIGSTOP but don't. Things work apparently
637 because enough time passes between when we ptrace (ATTACH) and when
638 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
639
640 On the other hand, if we are currently trying to stop all threads, we
641 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 642 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
643 end of the list, and so the new thread has not yet reached
644 wait_for_sigstop (but will). */
d50171e4 645 new_lwp->stop_expected = 1;
0d62e5e8
DJ
646}
647
95954743
PA
648void
649linux_attach_lwp (unsigned long lwpid)
650{
651 linux_attach_lwp_1 (lwpid, 0);
652}
653
e3deef73
LM
654/* Attach to PID. If PID is the tgid, attach to it and all
655 of its threads. */
656
0d62e5e8 657int
a1928bad 658linux_attach (unsigned long pid)
0d62e5e8 659{
e3deef73
LM
660 /* Attach to PID. We will check for other threads
661 soon. */
95954743 662 linux_attach_lwp_1 (pid, 1);
95954743 663 linux_add_process (pid, 1);
0d62e5e8 664
bd99dc85
PA
665 if (!non_stop)
666 {
8336d594
PA
667 struct thread_info *thread;
668
669 /* Don't ignore the initial SIGSTOP if we just attached to this
670 process. It will be collected by wait shortly. */
671 thread = find_thread_ptid (ptid_build (pid, pid, 0));
672 thread->last_resume_kind = resume_stop;
bd99dc85 673 }
0d62e5e8 674
e3deef73
LM
675 if (linux_proc_get_tgid (pid) == pid)
676 {
677 DIR *dir;
678 char pathname[128];
679
680 sprintf (pathname, "/proc/%ld/task", pid);
681
682 dir = opendir (pathname);
683
684 if (!dir)
685 {
686 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
687 fflush (stderr);
688 }
689 else
690 {
691 /* At this point we attached to the tgid. Scan the task for
692 existing threads. */
693 unsigned long lwp;
694 int new_threads_found;
695 int iterations = 0;
696 struct dirent *dp;
697
698 while (iterations < 2)
699 {
700 new_threads_found = 0;
701 /* Add all the other threads. While we go through the
702 threads, new threads may be spawned. Cycle through
703 the list of threads until we have done two iterations without
704 finding new threads. */
705 while ((dp = readdir (dir)) != NULL)
706 {
707 /* Fetch one lwp. */
708 lwp = strtoul (dp->d_name, NULL, 10);
709
710 /* Is this a new thread? */
711 if (lwp
712 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
713 {
714 linux_attach_lwp_1 (lwp, 0);
715 new_threads_found++;
716
717 if (debug_threads)
718 fprintf (stderr, "\
719Found and attached to new lwp %ld\n", lwp);
720 }
721 }
722
723 if (!new_threads_found)
724 iterations++;
725 else
726 iterations = 0;
727
728 rewinddir (dir);
729 }
730 closedir (dir);
731 }
732 }
733
95954743
PA
734 return 0;
735}
736
737struct counter
738{
739 int pid;
740 int count;
741};
742
743static int
744second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
745{
746 struct counter *counter = args;
747
748 if (ptid_get_pid (entry->id) == counter->pid)
749 {
750 if (++counter->count > 1)
751 return 1;
752 }
d61ddec4 753
da6d8c04
DJ
754 return 0;
755}
756
95954743
PA
757static int
758last_thread_of_process_p (struct thread_info *thread)
759{
760 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
761 int pid = ptid_get_pid (ptid);
762 struct counter counter = { pid , 0 };
da6d8c04 763
95954743
PA
764 return (find_inferior (&all_threads,
765 second_thread_of_pid_p, &counter) == NULL);
766}
767
768/* Kill the inferior lwp. */
769
770static int
771linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
da6d8c04 772{
0d62e5e8 773 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 774 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 775 int wstat;
95954743
PA
776 int pid = * (int *) args;
777
778 if (ptid_get_pid (entry->id) != pid)
779 return 0;
0d62e5e8 780
fd500816
DJ
781 /* We avoid killing the first thread here, because of a Linux kernel (at
782 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
783 the children get a chance to be reaped, it will remain a zombie
784 forever. */
95954743 785
12b42a12 786 if (lwpid_of (lwp) == pid)
95954743
PA
787 {
788 if (debug_threads)
789 fprintf (stderr, "lkop: is last of process %s\n",
790 target_pid_to_str (entry->id));
791 return 0;
792 }
fd500816 793
0d62e5e8
DJ
794 do
795 {
bd99dc85 796 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
0d62e5e8
DJ
797
798 /* Make sure it died. The loop is most likely unnecessary. */
95954743 799 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
bd99dc85 800 } while (pid > 0 && WIFSTOPPED (wstat));
95954743
PA
801
802 return 0;
da6d8c04
DJ
803}
804
95954743
PA
805static int
806linux_kill (int pid)
0d62e5e8 807{
95954743 808 struct process_info *process;
54a0b537 809 struct lwp_info *lwp;
fd500816 810 int wstat;
95954743 811 int lwpid;
fd500816 812
95954743
PA
813 process = find_process_pid (pid);
814 if (process == NULL)
815 return -1;
9d606399 816
f9e39928
PA
817 /* If we're killing a running inferior, make sure it is stopped
818 first, as PTRACE_KILL will not work otherwise. */
7984d532 819 stop_all_lwps (0, NULL);
f9e39928 820
95954743 821 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
fd500816 822
54a0b537 823 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 824 thread in the list, so do so now. */
95954743 825 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85
PA
826
827 if (debug_threads)
95954743
PA
828 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
829 lwpid_of (lwp), pid);
bd99dc85 830
fd500816
DJ
831 do
832 {
bd99dc85 833 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
fd500816
DJ
834
835 /* Make sure it died. The loop is most likely unnecessary. */
95954743
PA
836 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
837 } while (lwpid > 0 && WIFSTOPPED (wstat));
2d717e4f 838
8336d594 839 the_target->mourn (process);
f9e39928
PA
840
841 /* Since we presently can only stop all lwps of all processes, we
842 need to unstop lwps of other processes. */
7984d532 843 unstop_all_lwps (0, NULL);
95954743 844 return 0;
0d62e5e8
DJ
845}
846
95954743
PA
847static int
848linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
849{
850 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 851 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
852 int pid = * (int *) args;
853
854 if (ptid_get_pid (entry->id) != pid)
855 return 0;
6ad8ae5c 856
ae13219e
DJ
857 /* If this process is stopped but is expecting a SIGSTOP, then make
858 sure we take care of that now. This isn't absolutely guaranteed
859 to collect the SIGSTOP, but is fairly likely to. */
54a0b537 860 if (lwp->stop_expected)
ae13219e 861 {
bd99dc85 862 int wstat;
ae13219e 863 /* Clear stop_expected, so that the SIGSTOP will be reported. */
54a0b537 864 lwp->stop_expected = 0;
f9e39928 865 linux_resume_one_lwp (lwp, 0, 0, NULL);
95954743 866 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
ae13219e
DJ
867 }
868
869 /* Flush any pending changes to the process's registers. */
870 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 871 get_lwp_thread (lwp));
ae13219e
DJ
872
873 /* Finally, let it resume. */
bd99dc85
PA
874 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
875
876 delete_lwp (lwp);
95954743 877 return 0;
6ad8ae5c
DJ
878}
879
95954743
PA
880static int
881linux_detach (int pid)
882{
883 struct process_info *process;
884
885 process = find_process_pid (pid);
886 if (process == NULL)
887 return -1;
888
f9e39928
PA
889 /* Stop all threads before detaching. First, ptrace requires that
890 the thread is stopped to sucessfully detach. Second, thread_db
891 may need to uninstall thread event breakpoints from memory, which
892 only works with a stopped process anyway. */
7984d532 893 stop_all_lwps (0, NULL);
f9e39928 894
ca5c370d 895#ifdef USE_THREAD_DB
8336d594 896 thread_db_detach (process);
ca5c370d
PA
897#endif
898
fa593d66
PA
899 /* Stabilize threads (move out of jump pads). */
900 stabilize_threads ();
901
95954743 902 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
903
904 the_target->mourn (process);
f9e39928
PA
905
906 /* Since we presently can only stop all lwps of all processes, we
907 need to unstop lwps of other processes. */
7984d532 908 unstop_all_lwps (0, NULL);
f9e39928
PA
909 return 0;
910}
911
912/* Remove all LWPs that belong to process PROC from the lwp list. */
913
914static int
915delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
916{
917 struct lwp_info *lwp = (struct lwp_info *) entry;
918 struct process_info *process = proc;
919
920 if (pid_of (lwp) == pid_of (process))
921 delete_lwp (lwp);
922
dd6953e1 923 return 0;
6ad8ae5c
DJ
924}
925
8336d594
PA
926static void
927linux_mourn (struct process_info *process)
928{
929 struct process_info_private *priv;
930
931#ifdef USE_THREAD_DB
932 thread_db_mourn (process);
933#endif
934
f9e39928
PA
935 find_inferior (&all_lwps, delete_lwp_callback, process);
936
8336d594
PA
937 /* Freeing all private data. */
938 priv = process->private;
939 free (priv->arch_private);
940 free (priv);
941 process->private = NULL;
505106cd
PA
942
943 remove_process (process);
8336d594
PA
944}
945
444d6139 946static void
95954743 947linux_join (int pid)
444d6139 948{
444d6139
PA
949 int status, ret;
950
951 do {
95954743 952 ret = my_waitpid (pid, &status, 0);
444d6139
PA
953 if (WIFEXITED (status) || WIFSIGNALED (status))
954 break;
955 } while (ret != -1 || errno != ECHILD);
956}
957
6ad8ae5c 958/* Return nonzero if the given thread is still alive. */
0d62e5e8 959static int
95954743 960linux_thread_alive (ptid_t ptid)
0d62e5e8 961{
95954743
PA
962 struct lwp_info *lwp = find_lwp_pid (ptid);
963
964 /* We assume we always know if a thread exits. If a whole process
965 exited but we still haven't been able to report it to GDB, we'll
966 hold on to the last lwp of the dead process. */
967 if (lwp != NULL)
968 return !lwp->dead;
0d62e5e8
DJ
969 else
970 return 0;
971}
972
6bf5e0ba 973/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 974static int
d50171e4 975status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 976{
54a0b537 977 struct lwp_info *lwp = (struct lwp_info *) entry;
95954743 978 ptid_t ptid = * (ptid_t *) arg;
7984d532 979 struct thread_info *thread;
95954743
PA
980
981 /* Check if we're only interested in events from a specific process
982 or its lwps. */
983 if (!ptid_equal (minus_one_ptid, ptid)
984 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
985 return 0;
0d62e5e8 986
d50171e4
PA
987 thread = get_lwp_thread (lwp);
988
989 /* If we got a `vCont;t', but we haven't reported a stop yet, do
990 report any status pending the LWP may have. */
8336d594 991 if (thread->last_resume_kind == resume_stop
7984d532 992 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4 993 return 0;
0d62e5e8 994
d50171e4 995 return lwp->status_pending_p;
0d62e5e8
DJ
996}
997
95954743
PA
998static int
999same_lwp (struct inferior_list_entry *entry, void *data)
1000{
1001 ptid_t ptid = *(ptid_t *) data;
1002 int lwp;
1003
1004 if (ptid_get_lwp (ptid) != 0)
1005 lwp = ptid_get_lwp (ptid);
1006 else
1007 lwp = ptid_get_pid (ptid);
1008
1009 if (ptid_get_lwp (entry->id) == lwp)
1010 return 1;
1011
1012 return 0;
1013}
1014
1015struct lwp_info *
1016find_lwp_pid (ptid_t ptid)
1017{
1018 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1019}
1020
bd99dc85 1021static struct lwp_info *
95954743 1022linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
611cb4a5 1023{
0d62e5e8 1024 int ret;
95954743 1025 int to_wait_for = -1;
bd99dc85 1026 struct lwp_info *child = NULL;
0d62e5e8 1027
bd99dc85 1028 if (debug_threads)
95954743
PA
1029 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1030
1031 if (ptid_equal (ptid, minus_one_ptid))
1032 to_wait_for = -1; /* any child */
1033 else
1034 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
0d62e5e8 1035
bd99dc85 1036 options |= __WALL;
0d62e5e8 1037
bd99dc85 1038retry:
0d62e5e8 1039
bd99dc85
PA
1040 ret = my_waitpid (to_wait_for, wstatp, options);
1041 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1042 return NULL;
1043 else if (ret == -1)
1044 perror_with_name ("waitpid");
0d62e5e8
DJ
1045
1046 if (debug_threads
1047 && (!WIFSTOPPED (*wstatp)
1048 || (WSTOPSIG (*wstatp) != 32
1049 && WSTOPSIG (*wstatp) != 33)))
1050 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1051
95954743 1052 child = find_lwp_pid (pid_to_ptid (ret));
0d62e5e8 1053
24a09b5f
DJ
1054 /* If we didn't find a process, one of two things presumably happened:
1055 - A process we started and then detached from has exited. Ignore it.
1056 - A process we are controlling has forked and the new child's stop
1057 was reported to us by the kernel. Save its PID. */
bd99dc85 1058 if (child == NULL && WIFSTOPPED (*wstatp))
24a09b5f
DJ
1059 {
1060 add_pid_to_list (&stopped_pids, ret);
1061 goto retry;
1062 }
bd99dc85 1063 else if (child == NULL)
24a09b5f
DJ
1064 goto retry;
1065
bd99dc85 1066 child->stopped = 1;
0d62e5e8 1067
bd99dc85 1068 child->last_status = *wstatp;
32ca6d61 1069
d61ddec4
UW
1070 /* Architecture-specific setup after inferior is running.
1071 This needs to happen after we have attached to the inferior
1072 and it is stopped for the first time, but before we access
1073 any inferior registers. */
1074 if (new_inferior)
1075 {
1076 the_low_target.arch_setup ();
52fa2412
UW
1077#ifdef HAVE_LINUX_REGSETS
1078 memset (disabled_regsets, 0, num_regsets);
1079#endif
d61ddec4
UW
1080 new_inferior = 0;
1081 }
1082
c3adc08c
PA
1083 /* Fetch the possibly triggered data watchpoint info and store it in
1084 CHILD.
1085
1086 On some archs, like x86, that use debug registers to set
1087 watchpoints, it's possible that the way to know which watched
1088 address trapped, is to check the register that is used to select
1089 which address to watch. Problem is, between setting the
1090 watchpoint and reading back which data address trapped, the user
1091 may change the set of watchpoints, and, as a consequence, GDB
1092 changes the debug registers in the inferior. To avoid reading
1093 back a stale stopped-data-address when that happens, we cache in
1094 LP the fact that a watchpoint trapped, and the corresponding data
1095 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1096 changes the debug registers meanwhile, we have the cached data we
1097 can rely on. */
1098
1099 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1100 {
1101 if (the_low_target.stopped_by_watchpoint == NULL)
1102 {
1103 child->stopped_by_watchpoint = 0;
1104 }
1105 else
1106 {
1107 struct thread_info *saved_inferior;
1108
1109 saved_inferior = current_inferior;
1110 current_inferior = get_lwp_thread (child);
1111
1112 child->stopped_by_watchpoint
1113 = the_low_target.stopped_by_watchpoint ();
1114
1115 if (child->stopped_by_watchpoint)
1116 {
1117 if (the_low_target.stopped_data_address != NULL)
1118 child->stopped_data_address
1119 = the_low_target.stopped_data_address ();
1120 else
1121 child->stopped_data_address = 0;
1122 }
1123
1124 current_inferior = saved_inferior;
1125 }
1126 }
1127
d50171e4
PA
1128 /* Store the STOP_PC, with adjustment applied. This depends on the
1129 architecture being defined already (so that CHILD has a valid
1130 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1131 not). */
1132 if (WIFSTOPPED (*wstatp))
1133 child->stop_pc = get_stop_pc (child);
1134
0d62e5e8 1135 if (debug_threads
47c0c975
DE
1136 && WIFSTOPPED (*wstatp)
1137 && the_low_target.get_pc != NULL)
0d62e5e8 1138 {
896c7fbb 1139 struct thread_info *saved_inferior = current_inferior;
bce522a2 1140 struct regcache *regcache;
47c0c975
DE
1141 CORE_ADDR pc;
1142
d50171e4 1143 current_inferior = get_lwp_thread (child);
bce522a2 1144 regcache = get_thread_regcache (current_inferior, 1);
442ea881 1145 pc = (*the_low_target.get_pc) (regcache);
47c0c975 1146 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
896c7fbb 1147 current_inferior = saved_inferior;
0d62e5e8 1148 }
bd99dc85
PA
1149
1150 return child;
0d62e5e8 1151}
611cb4a5 1152
219f2f23
PA
1153/* This function should only be called if the LWP got a SIGTRAP.
1154
1155 Handle any tracepoint steps or hits. Return true if a tracepoint
1156 event was handled, 0 otherwise. */
1157
1158static int
1159handle_tracepoints (struct lwp_info *lwp)
1160{
1161 struct thread_info *tinfo = get_lwp_thread (lwp);
1162 int tpoint_related_event = 0;
1163
7984d532
PA
1164 /* If this tracepoint hit causes a tracing stop, we'll immediately
1165 uninsert tracepoints. To do this, we temporarily pause all
1166 threads, unpatch away, and then unpause threads. We need to make
1167 sure the unpausing doesn't resume LWP too. */
1168 lwp->suspended++;
1169
219f2f23
PA
1170 /* And we need to be sure that any all-threads-stopping doesn't try
1171 to move threads out of the jump pads, as it could deadlock the
1172 inferior (LWP could be in the jump pad, maybe even holding the
1173 lock.) */
1174
1175 /* Do any necessary step collect actions. */
1176 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1177
fa593d66
PA
1178 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1179
219f2f23
PA
1180 /* See if we just hit a tracepoint and do its main collect
1181 actions. */
1182 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1183
7984d532
PA
1184 lwp->suspended--;
1185
1186 gdb_assert (lwp->suspended == 0);
fa593d66 1187 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1188
219f2f23
PA
1189 if (tpoint_related_event)
1190 {
1191 if (debug_threads)
1192 fprintf (stderr, "got a tracepoint event\n");
1193 return 1;
1194 }
1195
1196 return 0;
1197}
1198
fa593d66
PA
1199/* Convenience wrapper. Returns true if LWP is presently collecting a
1200 fast tracepoint. */
1201
1202static int
1203linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1204 struct fast_tpoint_collect_status *status)
1205{
1206 CORE_ADDR thread_area;
1207
1208 if (the_low_target.get_thread_area == NULL)
1209 return 0;
1210
1211 /* Get the thread area address. This is used to recognize which
1212 thread is which when tracing with the in-process agent library.
1213 We don't read anything from the address, and treat it as opaque;
1214 it's the address itself that we assume is unique per-thread. */
1215 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1216 return 0;
1217
1218 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1219}
1220
1221/* The reason we resume in the caller, is because we want to be able
1222 to pass lwp->status_pending as WSTAT, and we need to clear
1223 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1224 refuses to resume. */
1225
1226static int
1227maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1228{
1229 struct thread_info *saved_inferior;
1230
1231 saved_inferior = current_inferior;
1232 current_inferior = get_lwp_thread (lwp);
1233
1234 if ((wstat == NULL
1235 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1236 && supports_fast_tracepoints ()
1237 && in_process_agent_loaded ())
1238 {
1239 struct fast_tpoint_collect_status status;
1240 int r;
1241
1242 if (debug_threads)
1243 fprintf (stderr, "\
1244Checking whether LWP %ld needs to move out of the jump pad.\n",
1245 lwpid_of (lwp));
1246
1247 r = linux_fast_tracepoint_collecting (lwp, &status);
1248
1249 if (wstat == NULL
1250 || (WSTOPSIG (*wstat) != SIGILL
1251 && WSTOPSIG (*wstat) != SIGFPE
1252 && WSTOPSIG (*wstat) != SIGSEGV
1253 && WSTOPSIG (*wstat) != SIGBUS))
1254 {
1255 lwp->collecting_fast_tracepoint = r;
1256
1257 if (r != 0)
1258 {
1259 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1260 {
1261 /* Haven't executed the original instruction yet.
1262 Set breakpoint there, and wait till it's hit,
1263 then single-step until exiting the jump pad. */
1264 lwp->exit_jump_pad_bkpt
1265 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1266 }
1267
1268 if (debug_threads)
1269 fprintf (stderr, "\
1270Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1271 lwpid_of (lwp));
0cccb683 1272 current_inferior = saved_inferior;
fa593d66
PA
1273
1274 return 1;
1275 }
1276 }
1277 else
1278 {
1279 /* If we get a synchronous signal while collecting, *and*
1280 while executing the (relocated) original instruction,
1281 reset the PC to point at the tpoint address, before
1282 reporting to GDB. Otherwise, it's an IPA lib bug: just
1283 report the signal to GDB, and pray for the best. */
1284
1285 lwp->collecting_fast_tracepoint = 0;
1286
1287 if (r != 0
1288 && (status.adjusted_insn_addr <= lwp->stop_pc
1289 && lwp->stop_pc < status.adjusted_insn_addr_end))
1290 {
1291 siginfo_t info;
1292 struct regcache *regcache;
1293
1294 /* The si_addr on a few signals references the address
1295 of the faulting instruction. Adjust that as
1296 well. */
1297 if ((WSTOPSIG (*wstat) == SIGILL
1298 || WSTOPSIG (*wstat) == SIGFPE
1299 || WSTOPSIG (*wstat) == SIGBUS
1300 || WSTOPSIG (*wstat) == SIGSEGV)
1301 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1302 /* Final check just to make sure we don't clobber
1303 the siginfo of non-kernel-sent signals. */
1304 && (uintptr_t) info.si_addr == lwp->stop_pc)
1305 {
1306 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1307 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1308 }
1309
1310 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1311 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1312 lwp->stop_pc = status.tpoint_addr;
1313
1314 /* Cancel any fast tracepoint lock this thread was
1315 holding. */
1316 force_unlock_trace_buffer ();
1317 }
1318
1319 if (lwp->exit_jump_pad_bkpt != NULL)
1320 {
1321 if (debug_threads)
1322 fprintf (stderr,
1323 "Cancelling fast exit-jump-pad: removing bkpt. "
1324 "stopping all threads momentarily.\n");
1325
1326 stop_all_lwps (1, lwp);
1327 cancel_breakpoints ();
1328
1329 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1330 lwp->exit_jump_pad_bkpt = NULL;
1331
1332 unstop_all_lwps (1, lwp);
1333
1334 gdb_assert (lwp->suspended >= 0);
1335 }
1336 }
1337 }
1338
1339 if (debug_threads)
1340 fprintf (stderr, "\
1341Checking whether LWP %ld needs to move out of the jump pad...no\n",
1342 lwpid_of (lwp));
0cccb683
YQ
1343
1344 current_inferior = saved_inferior;
fa593d66
PA
1345 return 0;
1346}
1347
1348/* Enqueue one signal in the "signals to report later when out of the
1349 jump pad" list. */
1350
1351static void
1352enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1353{
1354 struct pending_signals *p_sig;
1355
1356 if (debug_threads)
1357 fprintf (stderr, "\
1358Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1359
1360 if (debug_threads)
1361 {
1362 struct pending_signals *sig;
1363
1364 for (sig = lwp->pending_signals_to_report;
1365 sig != NULL;
1366 sig = sig->prev)
1367 fprintf (stderr,
1368 " Already queued %d\n",
1369 sig->signal);
1370
1371 fprintf (stderr, " (no more currently queued signals)\n");
1372 }
1373
1a981360
PA
1374 /* Don't enqueue non-RT signals if they are already in the deferred
1375 queue. (SIGSTOP being the easiest signal to see ending up here
1376 twice) */
1377 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1378 {
1379 struct pending_signals *sig;
1380
1381 for (sig = lwp->pending_signals_to_report;
1382 sig != NULL;
1383 sig = sig->prev)
1384 {
1385 if (sig->signal == WSTOPSIG (*wstat))
1386 {
1387 if (debug_threads)
1388 fprintf (stderr,
1389 "Not requeuing already queued non-RT signal %d"
1390 " for LWP %ld\n",
1391 sig->signal,
1392 lwpid_of (lwp));
1393 return;
1394 }
1395 }
1396 }
1397
fa593d66
PA
1398 p_sig = xmalloc (sizeof (*p_sig));
1399 p_sig->prev = lwp->pending_signals_to_report;
1400 p_sig->signal = WSTOPSIG (*wstat);
1401 memset (&p_sig->info, 0, sizeof (siginfo_t));
1402 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1403
1404 lwp->pending_signals_to_report = p_sig;
1405}
1406
1407/* Dequeue one signal from the "signals to report later when out of
1408 the jump pad" list. */
1409
1410static int
1411dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1412{
1413 if (lwp->pending_signals_to_report != NULL)
1414 {
1415 struct pending_signals **p_sig;
1416
1417 p_sig = &lwp->pending_signals_to_report;
1418 while ((*p_sig)->prev != NULL)
1419 p_sig = &(*p_sig)->prev;
1420
1421 *wstat = W_STOPCODE ((*p_sig)->signal);
1422 if ((*p_sig)->info.si_signo != 0)
1423 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1424 free (*p_sig);
1425 *p_sig = NULL;
1426
1427 if (debug_threads)
1428 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1429 WSTOPSIG (*wstat), lwpid_of (lwp));
1430
1431 if (debug_threads)
1432 {
1433 struct pending_signals *sig;
1434
1435 for (sig = lwp->pending_signals_to_report;
1436 sig != NULL;
1437 sig = sig->prev)
1438 fprintf (stderr,
1439 " Still queued %d\n",
1440 sig->signal);
1441
1442 fprintf (stderr, " (no more queued signals)\n");
1443 }
1444
1445 return 1;
1446 }
1447
1448 return 0;
1449}
1450
d50171e4
PA
1451/* Arrange for a breakpoint to be hit again later. We don't keep the
1452 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1453 will handle the current event, eventually we will resume this LWP,
1454 and this breakpoint will trap again. */
1455
1456static int
1457cancel_breakpoint (struct lwp_info *lwp)
1458{
1459 struct thread_info *saved_inferior;
d50171e4
PA
1460
1461 /* There's nothing to do if we don't support breakpoints. */
1462 if (!supports_breakpoints ())
1463 return 0;
1464
d50171e4
PA
1465 /* breakpoint_at reads from current inferior. */
1466 saved_inferior = current_inferior;
1467 current_inferior = get_lwp_thread (lwp);
1468
1469 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1470 {
1471 if (debug_threads)
1472 fprintf (stderr,
1473 "CB: Push back breakpoint for %s\n",
fc7238bb 1474 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1475
1476 /* Back up the PC if necessary. */
1477 if (the_low_target.decr_pc_after_break)
1478 {
1479 struct regcache *regcache
fc7238bb 1480 = get_thread_regcache (current_inferior, 1);
d50171e4
PA
1481 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1482 }
1483
1484 current_inferior = saved_inferior;
1485 return 1;
1486 }
1487 else
1488 {
1489 if (debug_threads)
1490 fprintf (stderr,
1491 "CB: No breakpoint found at %s for [%s]\n",
1492 paddress (lwp->stop_pc),
fc7238bb 1493 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1494 }
1495
1496 current_inferior = saved_inferior;
1497 return 0;
1498}
1499
1500/* When the event-loop is doing a step-over, this points at the thread
1501 being stepped. */
1502ptid_t step_over_bkpt;
1503
bd99dc85
PA
1504/* Wait for an event from child PID. If PID is -1, wait for any
1505 child. Store the stop status through the status pointer WSTAT.
1506 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1507 event was found and OPTIONS contains WNOHANG. Return the PID of
1508 the stopped child otherwise. */
1509
0d62e5e8 1510static int
95954743 1511linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
0d62e5e8 1512{
d50171e4
PA
1513 struct lwp_info *event_child, *requested_child;
1514
d50171e4
PA
1515 event_child = NULL;
1516 requested_child = NULL;
0d62e5e8 1517
95954743 1518 /* Check for a lwp with a pending status. */
bd99dc85 1519
95954743
PA
1520 if (ptid_equal (ptid, minus_one_ptid)
1521 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
0d62e5e8 1522 {
54a0b537 1523 event_child = (struct lwp_info *)
d50171e4 1524 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
0d62e5e8 1525 if (debug_threads && event_child)
bd99dc85 1526 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
0d62e5e8
DJ
1527 }
1528 else
1529 {
95954743 1530 requested_child = find_lwp_pid (ptid);
d50171e4 1531
fa593d66
PA
1532 if (!stopping_threads
1533 && requested_child->status_pending_p
1534 && requested_child->collecting_fast_tracepoint)
1535 {
1536 enqueue_one_deferred_signal (requested_child,
1537 &requested_child->status_pending);
1538 requested_child->status_pending_p = 0;
1539 requested_child->status_pending = 0;
1540 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1541 }
1542
1543 if (requested_child->suspended
1544 && requested_child->status_pending_p)
1545 fatal ("requesting an event out of a suspended child?");
1546
d50171e4 1547 if (requested_child->status_pending_p)
bd99dc85 1548 event_child = requested_child;
0d62e5e8 1549 }
611cb4a5 1550
0d62e5e8
DJ
1551 if (event_child != NULL)
1552 {
bd99dc85
PA
1553 if (debug_threads)
1554 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1555 lwpid_of (event_child), event_child->status_pending);
1556 *wstat = event_child->status_pending;
1557 event_child->status_pending_p = 0;
1558 event_child->status_pending = 0;
1559 current_inferior = get_lwp_thread (event_child);
1560 return lwpid_of (event_child);
0d62e5e8
DJ
1561 }
1562
1563 /* We only enter this loop if no process has a pending wait status. Thus
1564 any action taken in response to a wait status inside this loop is
1565 responding as soon as we detect the status, not after any pending
1566 events. */
1567 while (1)
1568 {
6bf5e0ba 1569 event_child = linux_wait_for_lwp (ptid, wstat, options);
0d62e5e8 1570
bd99dc85 1571 if ((options & WNOHANG) && event_child == NULL)
d50171e4
PA
1572 {
1573 if (debug_threads)
1574 fprintf (stderr, "WNOHANG set, no event found\n");
1575 return 0;
1576 }
0d62e5e8
DJ
1577
1578 if (event_child == NULL)
1579 error ("event from unknown child");
611cb4a5 1580
bd99dc85 1581 current_inferior = get_lwp_thread (event_child);
0d62e5e8 1582
89be2091 1583 /* Check for thread exit. */
bd99dc85 1584 if (! WIFSTOPPED (*wstat))
0d62e5e8 1585 {
89be2091 1586 if (debug_threads)
95954743 1587 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
89be2091
DJ
1588
1589 /* If the last thread is exiting, just return. */
95954743 1590 if (last_thread_of_process_p (current_inferior))
bd99dc85
PA
1591 {
1592 if (debug_threads)
95954743
PA
1593 fprintf (stderr, "LWP %ld is last lwp of process\n",
1594 lwpid_of (event_child));
bd99dc85
PA
1595 return lwpid_of (event_child);
1596 }
89be2091 1597
bd99dc85
PA
1598 if (!non_stop)
1599 {
1600 current_inferior = (struct thread_info *) all_threads.head;
1601 if (debug_threads)
1602 fprintf (stderr, "Current inferior is now %ld\n",
1603 lwpid_of (get_thread_lwp (current_inferior)));
1604 }
1605 else
1606 {
1607 current_inferior = NULL;
1608 if (debug_threads)
1609 fprintf (stderr, "Current inferior is now <NULL>\n");
1610 }
89be2091
DJ
1611
1612 /* If we were waiting for this particular child to do something...
1613 well, it did something. */
bd99dc85 1614 if (requested_child != NULL)
d50171e4
PA
1615 {
1616 int lwpid = lwpid_of (event_child);
1617
1618 /* Cancel the step-over operation --- the thread that
1619 started it is gone. */
1620 if (finish_step_over (event_child))
7984d532 1621 unstop_all_lwps (1, event_child);
d50171e4
PA
1622 delete_lwp (event_child);
1623 return lwpid;
1624 }
1625
1626 delete_lwp (event_child);
89be2091
DJ
1627
1628 /* Wait for a more interesting event. */
1629 continue;
1630 }
1631
a6dbe5df
PA
1632 if (event_child->must_set_ptrace_flags)
1633 {
1e7fc18c 1634 linux_enable_event_reporting (lwpid_of (event_child));
a6dbe5df
PA
1635 event_child->must_set_ptrace_flags = 0;
1636 }
1637
bd99dc85
PA
1638 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1639 && *wstat >> 16 != 0)
24a09b5f 1640 {
bd99dc85 1641 handle_extended_wait (event_child, *wstat);
24a09b5f
DJ
1642 continue;
1643 }
1644
d50171e4
PA
1645 if (WIFSTOPPED (*wstat)
1646 && WSTOPSIG (*wstat) == SIGSTOP
1647 && event_child->stop_expected)
1648 {
1649 int should_stop;
1650
1651 if (debug_threads)
1652 fprintf (stderr, "Expected stop.\n");
1653 event_child->stop_expected = 0;
1654
8336d594 1655 should_stop = (current_inferior->last_resume_kind == resume_stop
d50171e4
PA
1656 || stopping_threads);
1657
1658 if (!should_stop)
1659 {
1660 linux_resume_one_lwp (event_child,
1661 event_child->stepping, 0, NULL);
1662 continue;
1663 }
1664 }
1665
bd99dc85 1666 return lwpid_of (event_child);
611cb4a5 1667 }
0d62e5e8 1668
611cb4a5
DJ
1669 /* NOTREACHED */
1670 return 0;
1671}
1672
95954743
PA
1673static int
1674linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1675{
1676 ptid_t wait_ptid;
1677
1678 if (ptid_is_pid (ptid))
1679 {
1680 /* A request to wait for a specific tgid. This is not possible
1681 with waitpid, so instead, we wait for any child, and leave
1682 children we're not interested in right now with a pending
1683 status to report later. */
1684 wait_ptid = minus_one_ptid;
1685 }
1686 else
1687 wait_ptid = ptid;
1688
1689 while (1)
1690 {
1691 int event_pid;
1692
1693 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1694
1695 if (event_pid > 0
1696 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1697 {
493e2a69
MS
1698 struct lwp_info *event_child
1699 = find_lwp_pid (pid_to_ptid (event_pid));
95954743
PA
1700
1701 if (! WIFSTOPPED (*wstat))
1702 mark_lwp_dead (event_child, *wstat);
1703 else
1704 {
1705 event_child->status_pending_p = 1;
1706 event_child->status_pending = *wstat;
1707 }
1708 }
1709 else
1710 return event_pid;
1711 }
1712}
1713
6bf5e0ba
PA
1714
1715/* Count the LWP's that have had events. */
1716
1717static int
1718count_events_callback (struct inferior_list_entry *entry, void *data)
1719{
1720 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1721 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1722 int *count = data;
1723
1724 gdb_assert (count != NULL);
1725
1726 /* Count only resumed LWPs that have a SIGTRAP event pending that
1727 should be reported to GDB. */
8336d594
PA
1728 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1729 && thread->last_resume_kind != resume_stop
6bf5e0ba
PA
1730 && lp->status_pending_p
1731 && WIFSTOPPED (lp->status_pending)
1732 && WSTOPSIG (lp->status_pending) == SIGTRAP
1733 && !breakpoint_inserted_here (lp->stop_pc))
1734 (*count)++;
1735
1736 return 0;
1737}
1738
1739/* Select the LWP (if any) that is currently being single-stepped. */
1740
1741static int
1742select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1743{
1744 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1745 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba 1746
8336d594
PA
1747 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1748 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
1749 && lp->status_pending_p)
1750 return 1;
1751 else
1752 return 0;
1753}
1754
1755/* Select the Nth LWP that has had a SIGTRAP event that should be
1756 reported to GDB. */
1757
1758static int
1759select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1760{
1761 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1762 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1763 int *selector = data;
1764
1765 gdb_assert (selector != NULL);
1766
1767 /* Select only resumed LWPs that have a SIGTRAP event pending. */
8336d594
PA
1768 if (thread->last_resume_kind != resume_stop
1769 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
1770 && lp->status_pending_p
1771 && WIFSTOPPED (lp->status_pending)
1772 && WSTOPSIG (lp->status_pending) == SIGTRAP
1773 && !breakpoint_inserted_here (lp->stop_pc))
1774 if ((*selector)-- == 0)
1775 return 1;
1776
1777 return 0;
1778}
1779
1780static int
1781cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1782{
1783 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1784 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1785 struct lwp_info *event_lp = data;
1786
1787 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1788 if (lp == event_lp)
1789 return 0;
1790
1791 /* If a LWP other than the LWP that we're reporting an event for has
1792 hit a GDB breakpoint (as opposed to some random trap signal),
1793 then just arrange for it to hit it again later. We don't keep
1794 the SIGTRAP status and don't forward the SIGTRAP signal to the
1795 LWP. We will handle the current event, eventually we will resume
1796 all LWPs, and this one will get its breakpoint trap again.
1797
1798 If we do not do this, then we run the risk that the user will
1799 delete or disable the breakpoint, but the LWP will have already
1800 tripped on it. */
1801
8336d594
PA
1802 if (thread->last_resume_kind != resume_stop
1803 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
1804 && lp->status_pending_p
1805 && WIFSTOPPED (lp->status_pending)
1806 && WSTOPSIG (lp->status_pending) == SIGTRAP
bdabb078
PA
1807 && !lp->stepping
1808 && !lp->stopped_by_watchpoint
6bf5e0ba
PA
1809 && cancel_breakpoint (lp))
1810 /* Throw away the SIGTRAP. */
1811 lp->status_pending_p = 0;
1812
1813 return 0;
1814}
1815
7984d532
PA
1816static void
1817linux_cancel_breakpoints (void)
1818{
1819 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1820}
1821
6bf5e0ba
PA
1822/* Select one LWP out of those that have events pending. */
1823
1824static void
1825select_event_lwp (struct lwp_info **orig_lp)
1826{
1827 int num_events = 0;
1828 int random_selector;
1829 struct lwp_info *event_lp;
1830
1831 /* Give preference to any LWP that is being single-stepped. */
1832 event_lp
1833 = (struct lwp_info *) find_inferior (&all_lwps,
1834 select_singlestep_lwp_callback, NULL);
1835 if (event_lp != NULL)
1836 {
1837 if (debug_threads)
1838 fprintf (stderr,
1839 "SEL: Select single-step %s\n",
1840 target_pid_to_str (ptid_of (event_lp)));
1841 }
1842 else
1843 {
1844 /* No single-stepping LWP. Select one at random, out of those
1845 which have had SIGTRAP events. */
1846
1847 /* First see how many SIGTRAP events we have. */
1848 find_inferior (&all_lwps, count_events_callback, &num_events);
1849
1850 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1851 random_selector = (int)
1852 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1853
1854 if (debug_threads && num_events > 1)
1855 fprintf (stderr,
1856 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1857 num_events, random_selector);
1858
1859 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1860 select_event_lwp_callback,
1861 &random_selector);
1862 }
1863
1864 if (event_lp != NULL)
1865 {
1866 /* Switch the event LWP. */
1867 *orig_lp = event_lp;
1868 }
1869}
1870
7984d532
PA
1871/* Decrement the suspend count of an LWP. */
1872
1873static int
1874unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
1875{
1876 struct lwp_info *lwp = (struct lwp_info *) entry;
1877
1878 /* Ignore EXCEPT. */
1879 if (lwp == except)
1880 return 0;
1881
1882 lwp->suspended--;
1883
1884 gdb_assert (lwp->suspended >= 0);
1885 return 0;
1886}
1887
1888/* Decrement the suspend count of all LWPs, except EXCEPT, if non
1889 NULL. */
1890
1891static void
1892unsuspend_all_lwps (struct lwp_info *except)
1893{
1894 find_inferior (&all_lwps, unsuspend_one_lwp, except);
1895}
1896
fa593d66
PA
1897static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
1898static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
1899 void *data);
1900static int lwp_running (struct inferior_list_entry *entry, void *data);
1901static ptid_t linux_wait_1 (ptid_t ptid,
1902 struct target_waitstatus *ourstatus,
1903 int target_options);
1904
1905/* Stabilize threads (move out of jump pads).
1906
1907 If a thread is midway collecting a fast tracepoint, we need to
1908 finish the collection and move it out of the jump pad before
1909 reporting the signal.
1910
1911 This avoids recursion while collecting (when a signal arrives
1912 midway, and the signal handler itself collects), which would trash
1913 the trace buffer. In case the user set a breakpoint in a signal
1914 handler, this avoids the backtrace showing the jump pad, etc..
1915 Most importantly, there are certain things we can't do safely if
1916 threads are stopped in a jump pad (or in its callee's). For
1917 example:
1918
1919 - starting a new trace run. A thread still collecting the
1920 previous run, could trash the trace buffer when resumed. The trace
1921 buffer control structures would have been reset but the thread had
1922 no way to tell. The thread could even midway memcpy'ing to the
1923 buffer, which would mean that when resumed, it would clobber the
1924 trace buffer that had been set for a new run.
1925
1926 - we can't rewrite/reuse the jump pads for new tracepoints
1927 safely. Say you do tstart while a thread is stopped midway while
1928 collecting. When the thread is later resumed, it finishes the
1929 collection, and returns to the jump pad, to execute the original
1930 instruction that was under the tracepoint jump at the time the
1931 older run had been started. If the jump pad had been rewritten
1932 since for something else in the new run, the thread would now
1933 execute the wrong / random instructions. */
1934
1935static void
1936linux_stabilize_threads (void)
1937{
1938 struct thread_info *save_inferior;
1939 struct lwp_info *lwp_stuck;
1940
1941 lwp_stuck
1942 = (struct lwp_info *) find_inferior (&all_lwps,
1943 stuck_in_jump_pad_callback, NULL);
1944 if (lwp_stuck != NULL)
1945 {
b4d51a55
PA
1946 if (debug_threads)
1947 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
1948 lwpid_of (lwp_stuck));
fa593d66
PA
1949 return;
1950 }
1951
1952 save_inferior = current_inferior;
1953
1954 stabilizing_threads = 1;
1955
1956 /* Kick 'em all. */
1957 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
1958
1959 /* Loop until all are stopped out of the jump pads. */
1960 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
1961 {
1962 struct target_waitstatus ourstatus;
1963 struct lwp_info *lwp;
fa593d66
PA
1964 int wstat;
1965
1966 /* Note that we go through the full wait even loop. While
1967 moving threads out of jump pad, we need to be able to step
1968 over internal breakpoints and such. */
32fcada3 1969 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
1970
1971 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
1972 {
1973 lwp = get_thread_lwp (current_inferior);
1974
1975 /* Lock it. */
1976 lwp->suspended++;
1977
1978 if (ourstatus.value.sig != TARGET_SIGNAL_0
1979 || current_inferior->last_resume_kind == resume_stop)
1980 {
1981 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
1982 enqueue_one_deferred_signal (lwp, &wstat);
1983 }
1984 }
1985 }
1986
1987 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
1988
1989 stabilizing_threads = 0;
1990
1991 current_inferior = save_inferior;
1992
b4d51a55 1993 if (debug_threads)
fa593d66 1994 {
b4d51a55
PA
1995 lwp_stuck
1996 = (struct lwp_info *) find_inferior (&all_lwps,
1997 stuck_in_jump_pad_callback, NULL);
1998 if (lwp_stuck != NULL)
fa593d66
PA
1999 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2000 lwpid_of (lwp_stuck));
2001 }
2002}
2003
0d62e5e8 2004/* Wait for process, returns status. */
da6d8c04 2005
95954743
PA
2006static ptid_t
2007linux_wait_1 (ptid_t ptid,
2008 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2009{
e5f1222d 2010 int w;
fc7238bb 2011 struct lwp_info *event_child;
bd99dc85 2012 int options;
bd99dc85 2013 int pid;
6bf5e0ba
PA
2014 int step_over_finished;
2015 int bp_explains_trap;
2016 int maybe_internal_trap;
2017 int report_to_gdb;
219f2f23 2018 int trace_event;
bd99dc85
PA
2019
2020 /* Translate generic target options into linux options. */
2021 options = __WALL;
2022 if (target_options & TARGET_WNOHANG)
2023 options |= WNOHANG;
0d62e5e8
DJ
2024
2025retry:
fa593d66
PA
2026 bp_explains_trap = 0;
2027 trace_event = 0;
bd99dc85
PA
2028 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2029
0d62e5e8
DJ
2030 /* If we were only supposed to resume one thread, only wait for
2031 that thread - if it's still alive. If it died, however - which
2032 can happen if we're coming from the thread death case below -
2033 then we need to make sure we restart the other threads. We could
2034 pick a thread at random or restart all; restarting all is less
2035 arbitrary. */
95954743
PA
2036 if (!non_stop
2037 && !ptid_equal (cont_thread, null_ptid)
2038 && !ptid_equal (cont_thread, minus_one_ptid))
0d62e5e8 2039 {
fc7238bb
PA
2040 struct thread_info *thread;
2041
bd99dc85
PA
2042 thread = (struct thread_info *) find_inferior_id (&all_threads,
2043 cont_thread);
0d62e5e8
DJ
2044
2045 /* No stepping, no signal - unless one is pending already, of course. */
bd99dc85 2046 if (thread == NULL)
64386c31
DJ
2047 {
2048 struct thread_resume resume_info;
95954743 2049 resume_info.thread = minus_one_ptid;
bd99dc85
PA
2050 resume_info.kind = resume_continue;
2051 resume_info.sig = 0;
2bd7c093 2052 linux_resume (&resume_info, 1);
64386c31 2053 }
bd99dc85 2054 else
95954743 2055 ptid = cont_thread;
0d62e5e8 2056 }
da6d8c04 2057
6bf5e0ba
PA
2058 if (ptid_equal (step_over_bkpt, null_ptid))
2059 pid = linux_wait_for_event (ptid, &w, options);
2060 else
2061 {
2062 if (debug_threads)
2063 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2064 target_pid_to_str (step_over_bkpt));
2065 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2066 }
2067
bd99dc85 2068 if (pid == 0) /* only if TARGET_WNOHANG */
95954743 2069 return null_ptid;
bd99dc85 2070
6bf5e0ba 2071 event_child = get_thread_lwp (current_inferior);
da6d8c04 2072
0d62e5e8
DJ
2073 /* If we are waiting for a particular child, and it exited,
2074 linux_wait_for_event will return its exit status. Similarly if
2075 the last child exited. If this is not the last child, however,
2076 do not report it as exited until there is a 'thread exited' response
2077 available in the remote protocol. Instead, just wait for another event.
2078 This should be safe, because if the thread crashed we will already
2079 have reported the termination signal to GDB; that should stop any
2080 in-progress stepping operations, etc.
2081
2082 Report the exit status of the last thread to exit. This matches
2083 LinuxThreads' behavior. */
2084
95954743 2085 if (last_thread_of_process_p (current_inferior))
da6d8c04 2086 {
bd99dc85 2087 if (WIFEXITED (w) || WIFSIGNALED (w))
0d62e5e8 2088 {
bd99dc85
PA
2089 if (WIFEXITED (w))
2090 {
2091 ourstatus->kind = TARGET_WAITKIND_EXITED;
2092 ourstatus->value.integer = WEXITSTATUS (w);
2093
2094 if (debug_threads)
493e2a69
MS
2095 fprintf (stderr,
2096 "\nChild exited with retcode = %x \n",
2097 WEXITSTATUS (w));
bd99dc85
PA
2098 }
2099 else
2100 {
2101 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2102 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2103
2104 if (debug_threads)
493e2a69
MS
2105 fprintf (stderr,
2106 "\nChild terminated with signal = %x \n",
2107 WTERMSIG (w));
bd99dc85
PA
2108
2109 }
5b1c542e 2110
3e4c1235 2111 return ptid_of (event_child);
0d62e5e8 2112 }
da6d8c04 2113 }
0d62e5e8 2114 else
da6d8c04 2115 {
0d62e5e8
DJ
2116 if (!WIFSTOPPED (w))
2117 goto retry;
da6d8c04
DJ
2118 }
2119
6bf5e0ba
PA
2120 /* If this event was not handled before, and is not a SIGTRAP, we
2121 report it. SIGILL and SIGSEGV are also treated as traps in case
2122 a breakpoint is inserted at the current PC. If this target does
2123 not support internal breakpoints at all, we also report the
2124 SIGTRAP without further processing; it's of no concern to us. */
2125 maybe_internal_trap
2126 = (supports_breakpoints ()
2127 && (WSTOPSIG (w) == SIGTRAP
2128 || ((WSTOPSIG (w) == SIGILL
2129 || WSTOPSIG (w) == SIGSEGV)
2130 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2131
2132 if (maybe_internal_trap)
2133 {
2134 /* Handle anything that requires bookkeeping before deciding to
2135 report the event or continue waiting. */
2136
2137 /* First check if we can explain the SIGTRAP with an internal
2138 breakpoint, or if we should possibly report the event to GDB.
2139 Do this before anything that may remove or insert a
2140 breakpoint. */
2141 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2142
2143 /* We have a SIGTRAP, possibly a step-over dance has just
2144 finished. If so, tweak the state machine accordingly,
2145 reinsert breakpoints and delete any reinsert (software
2146 single-step) breakpoints. */
2147 step_over_finished = finish_step_over (event_child);
2148
2149 /* Now invoke the callbacks of any internal breakpoints there. */
2150 check_breakpoints (event_child->stop_pc);
2151
219f2f23
PA
2152 /* Handle tracepoint data collecting. This may overflow the
2153 trace buffer, and cause a tracing stop, removing
2154 breakpoints. */
2155 trace_event = handle_tracepoints (event_child);
2156
6bf5e0ba
PA
2157 if (bp_explains_trap)
2158 {
2159 /* If we stepped or ran into an internal breakpoint, we've
2160 already handled it. So next time we resume (from this
2161 PC), we should step over it. */
2162 if (debug_threads)
2163 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2164
8b07ae33
PA
2165 if (breakpoint_here (event_child->stop_pc))
2166 event_child->need_step_over = 1;
6bf5e0ba
PA
2167 }
2168 }
2169 else
2170 {
2171 /* We have some other signal, possibly a step-over dance was in
2172 progress, and it should be cancelled too. */
2173 step_over_finished = finish_step_over (event_child);
fa593d66
PA
2174 }
2175
2176 /* We have all the data we need. Either report the event to GDB, or
2177 resume threads and keep waiting for more. */
2178
2179 /* If we're collecting a fast tracepoint, finish the collection and
2180 move out of the jump pad before delivering a signal. See
2181 linux_stabilize_threads. */
2182
2183 if (WIFSTOPPED (w)
2184 && WSTOPSIG (w) != SIGTRAP
2185 && supports_fast_tracepoints ()
2186 && in_process_agent_loaded ())
2187 {
2188 if (debug_threads)
2189 fprintf (stderr,
2190 "Got signal %d for LWP %ld. Check if we need "
2191 "to defer or adjust it.\n",
2192 WSTOPSIG (w), lwpid_of (event_child));
2193
2194 /* Allow debugging the jump pad itself. */
2195 if (current_inferior->last_resume_kind != resume_step
2196 && maybe_move_out_of_jump_pad (event_child, &w))
2197 {
2198 enqueue_one_deferred_signal (event_child, &w);
2199
2200 if (debug_threads)
2201 fprintf (stderr,
2202 "Signal %d for LWP %ld deferred (in jump pad)\n",
2203 WSTOPSIG (w), lwpid_of (event_child));
2204
2205 linux_resume_one_lwp (event_child, 0, 0, NULL);
2206 goto retry;
2207 }
2208 }
219f2f23 2209
fa593d66
PA
2210 if (event_child->collecting_fast_tracepoint)
2211 {
2212 if (debug_threads)
2213 fprintf (stderr, "\
2214LWP %ld was trying to move out of the jump pad (%d). \
2215Check if we're already there.\n",
2216 lwpid_of (event_child),
2217 event_child->collecting_fast_tracepoint);
2218
2219 trace_event = 1;
2220
2221 event_child->collecting_fast_tracepoint
2222 = linux_fast_tracepoint_collecting (event_child, NULL);
2223
2224 if (event_child->collecting_fast_tracepoint != 1)
2225 {
2226 /* No longer need this breakpoint. */
2227 if (event_child->exit_jump_pad_bkpt != NULL)
2228 {
2229 if (debug_threads)
2230 fprintf (stderr,
2231 "No longer need exit-jump-pad bkpt; removing it."
2232 "stopping all threads momentarily.\n");
2233
2234 /* Other running threads could hit this breakpoint.
2235 We don't handle moribund locations like GDB does,
2236 instead we always pause all threads when removing
2237 breakpoints, so that any step-over or
2238 decr_pc_after_break adjustment is always taken
2239 care of while the breakpoint is still
2240 inserted. */
2241 stop_all_lwps (1, event_child);
2242 cancel_breakpoints ();
2243
2244 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2245 event_child->exit_jump_pad_bkpt = NULL;
2246
2247 unstop_all_lwps (1, event_child);
2248
2249 gdb_assert (event_child->suspended >= 0);
2250 }
2251 }
2252
2253 if (event_child->collecting_fast_tracepoint == 0)
2254 {
2255 if (debug_threads)
2256 fprintf (stderr,
2257 "fast tracepoint finished "
2258 "collecting successfully.\n");
2259
2260 /* We may have a deferred signal to report. */
2261 if (dequeue_one_deferred_signal (event_child, &w))
2262 {
2263 if (debug_threads)
2264 fprintf (stderr, "dequeued one signal.\n");
2265 }
3c11dd79 2266 else
fa593d66 2267 {
3c11dd79
PA
2268 if (debug_threads)
2269 fprintf (stderr, "no deferred signals.\n");
fa593d66
PA
2270
2271 if (stabilizing_threads)
2272 {
2273 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2274 ourstatus->value.sig = TARGET_SIGNAL_0;
2275 return ptid_of (event_child);
2276 }
2277 }
2278 }
6bf5e0ba
PA
2279 }
2280
e471f25b
PA
2281 /* Check whether GDB would be interested in this event. */
2282
2283 /* If GDB is not interested in this signal, don't stop other
2284 threads, and don't report it to GDB. Just resume the inferior
2285 right away. We do this for threading-related signals as well as
2286 any that GDB specifically requested we ignore. But never ignore
2287 SIGSTOP if we sent it ourselves, and do not ignore signals when
2288 stepping - they may require special handling to skip the signal
2289 handler. */
2290 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2291 thread library? */
2292 if (WIFSTOPPED (w)
2293 && current_inferior->last_resume_kind != resume_step
2294 && (
1a981360 2295#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
e471f25b
PA
2296 (current_process ()->private->thread_db != NULL
2297 && (WSTOPSIG (w) == __SIGRTMIN
2298 || WSTOPSIG (w) == __SIGRTMIN + 1))
2299 ||
2300#endif
2301 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2302 && !(WSTOPSIG (w) == SIGSTOP
2303 && current_inferior->last_resume_kind == resume_stop))))
2304 {
2305 siginfo_t info, *info_p;
2306
2307 if (debug_threads)
2308 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2309 WSTOPSIG (w), lwpid_of (event_child));
2310
2311 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2312 info_p = &info;
2313 else
2314 info_p = NULL;
2315 linux_resume_one_lwp (event_child, event_child->stepping,
2316 WSTOPSIG (w), info_p);
2317 goto retry;
2318 }
2319
2320 /* If GDB wanted this thread to single step, we always want to
2321 report the SIGTRAP, and let GDB handle it. Watchpoints should
2322 always be reported. So should signals we can't explain. A
2323 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2324 not support Z0 breakpoints. If we do, we're be able to handle
2325 GDB breakpoints on top of internal breakpoints, by handling the
2326 internal breakpoint and still reporting the event to GDB. If we
2327 don't, we're out of luck, GDB won't see the breakpoint hit. */
6bf5e0ba 2328 report_to_gdb = (!maybe_internal_trap
8336d594 2329 || current_inferior->last_resume_kind == resume_step
6bf5e0ba 2330 || event_child->stopped_by_watchpoint
493e2a69
MS
2331 || (!step_over_finished
2332 && !bp_explains_trap && !trace_event)
8b07ae33 2333 || gdb_breakpoint_here (event_child->stop_pc));
6bf5e0ba
PA
2334
2335 /* We found no reason GDB would want us to stop. We either hit one
2336 of our own breakpoints, or finished an internal step GDB
2337 shouldn't know about. */
2338 if (!report_to_gdb)
2339 {
2340 if (debug_threads)
2341 {
2342 if (bp_explains_trap)
2343 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2344 if (step_over_finished)
2345 fprintf (stderr, "Step-over finished.\n");
219f2f23
PA
2346 if (trace_event)
2347 fprintf (stderr, "Tracepoint event.\n");
6bf5e0ba
PA
2348 }
2349
2350 /* We're not reporting this breakpoint to GDB, so apply the
2351 decr_pc_after_break adjustment to the inferior's regcache
2352 ourselves. */
2353
2354 if (the_low_target.set_pc != NULL)
2355 {
2356 struct regcache *regcache
2357 = get_thread_regcache (get_lwp_thread (event_child), 1);
2358 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2359 }
2360
7984d532
PA
2361 /* We may have finished stepping over a breakpoint. If so,
2362 we've stopped and suspended all LWPs momentarily except the
2363 stepping one. This is where we resume them all again. We're
2364 going to keep waiting, so use proceed, which handles stepping
2365 over the next breakpoint. */
6bf5e0ba
PA
2366 if (debug_threads)
2367 fprintf (stderr, "proceeding all threads.\n");
7984d532
PA
2368
2369 if (step_over_finished)
2370 unsuspend_all_lwps (event_child);
2371
6bf5e0ba
PA
2372 proceed_all_lwps ();
2373 goto retry;
2374 }
2375
2376 if (debug_threads)
2377 {
8336d594 2378 if (current_inferior->last_resume_kind == resume_step)
6bf5e0ba
PA
2379 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2380 if (event_child->stopped_by_watchpoint)
2381 fprintf (stderr, "Stopped by watchpoint.\n");
8b07ae33
PA
2382 if (gdb_breakpoint_here (event_child->stop_pc))
2383 fprintf (stderr, "Stopped by GDB breakpoint.\n");
6bf5e0ba
PA
2384 if (debug_threads)
2385 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2386 }
2387
2388 /* Alright, we're going to report a stop. */
2389
fa593d66 2390 if (!non_stop && !stabilizing_threads)
6bf5e0ba
PA
2391 {
2392 /* In all-stop, stop all threads. */
7984d532 2393 stop_all_lwps (0, NULL);
6bf5e0ba
PA
2394
2395 /* If we're not waiting for a specific LWP, choose an event LWP
2396 from among those that have had events. Giving equal priority
2397 to all LWPs that have had events helps prevent
2398 starvation. */
2399 if (ptid_equal (ptid, minus_one_ptid))
2400 {
2401 event_child->status_pending_p = 1;
2402 event_child->status_pending = w;
2403
2404 select_event_lwp (&event_child);
2405
2406 event_child->status_pending_p = 0;
2407 w = event_child->status_pending;
2408 }
2409
2410 /* Now that we've selected our final event LWP, cancel any
2411 breakpoints in other LWPs that have hit a GDB breakpoint.
2412 See the comment in cancel_breakpoints_callback to find out
2413 why. */
2414 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
fa593d66
PA
2415
2416 /* Stabilize threads (move out of jump pads). */
2417 stabilize_threads ();
6bf5e0ba
PA
2418 }
2419 else
2420 {
2421 /* If we just finished a step-over, then all threads had been
2422 momentarily paused. In all-stop, that's fine, we want
2423 threads stopped by now anyway. In non-stop, we need to
2424 re-resume threads that GDB wanted to be running. */
2425 if (step_over_finished)
7984d532 2426 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
2427 }
2428
5b1c542e 2429 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 2430
8336d594
PA
2431 if (current_inferior->last_resume_kind == resume_stop
2432 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
2433 {
2434 /* A thread that has been requested to stop by GDB with vCont;t,
2435 and it stopped cleanly, so report as SIG0. The use of
2436 SIGSTOP is an implementation detail. */
2437 ourstatus->value.sig = TARGET_SIGNAL_0;
2438 }
8336d594
PA
2439 else if (current_inferior->last_resume_kind == resume_stop
2440 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
2441 {
2442 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 2443 but, it stopped for other reasons. */
bd99dc85
PA
2444 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2445 }
2446 else
2447 {
2448 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2449 }
2450
d50171e4
PA
2451 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2452
bd99dc85 2453 if (debug_threads)
95954743 2454 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
6bf5e0ba 2455 target_pid_to_str (ptid_of (event_child)),
bd99dc85
PA
2456 ourstatus->kind,
2457 ourstatus->value.sig);
2458
6bf5e0ba 2459 return ptid_of (event_child);
bd99dc85
PA
2460}
2461
2462/* Get rid of any pending event in the pipe. */
2463static void
2464async_file_flush (void)
2465{
2466 int ret;
2467 char buf;
2468
2469 do
2470 ret = read (linux_event_pipe[0], &buf, 1);
2471 while (ret >= 0 || (ret == -1 && errno == EINTR));
2472}
2473
2474/* Put something in the pipe, so the event loop wakes up. */
2475static void
2476async_file_mark (void)
2477{
2478 int ret;
2479
2480 async_file_flush ();
2481
2482 do
2483 ret = write (linux_event_pipe[1], "+", 1);
2484 while (ret == 0 || (ret == -1 && errno == EINTR));
2485
2486 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2487 be awakened anyway. */
2488}
2489
95954743
PA
2490static ptid_t
2491linux_wait (ptid_t ptid,
2492 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 2493{
95954743 2494 ptid_t event_ptid;
bd99dc85
PA
2495
2496 if (debug_threads)
95954743 2497 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
bd99dc85
PA
2498
2499 /* Flush the async file first. */
2500 if (target_is_async_p ())
2501 async_file_flush ();
2502
95954743 2503 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
bd99dc85
PA
2504
2505 /* If at least one stop was reported, there may be more. A single
2506 SIGCHLD can signal more than one child stop. */
2507 if (target_is_async_p ()
2508 && (target_options & TARGET_WNOHANG) != 0
95954743 2509 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
2510 async_file_mark ();
2511
2512 return event_ptid;
da6d8c04
DJ
2513}
2514
c5f62d5f 2515/* Send a signal to an LWP. */
fd500816
DJ
2516
2517static int
a1928bad 2518kill_lwp (unsigned long lwpid, int signo)
fd500816 2519{
c5f62d5f
DE
2520 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2521 fails, then we are not using nptl threads and we should be using kill. */
fd500816 2522
c5f62d5f
DE
2523#ifdef __NR_tkill
2524 {
2525 static int tkill_failed;
fd500816 2526
c5f62d5f
DE
2527 if (!tkill_failed)
2528 {
2529 int ret;
2530
2531 errno = 0;
2532 ret = syscall (__NR_tkill, lwpid, signo);
2533 if (errno != ENOSYS)
2534 return ret;
2535 tkill_failed = 1;
2536 }
2537 }
fd500816
DJ
2538#endif
2539
2540 return kill (lwpid, signo);
2541}
2542
964e4306
PA
2543void
2544linux_stop_lwp (struct lwp_info *lwp)
2545{
2546 send_sigstop (lwp);
2547}
2548
0d62e5e8 2549static void
02fc4de7 2550send_sigstop (struct lwp_info *lwp)
0d62e5e8 2551{
bd99dc85 2552 int pid;
0d62e5e8 2553
bd99dc85
PA
2554 pid = lwpid_of (lwp);
2555
0d62e5e8
DJ
2556 /* If we already have a pending stop signal for this process, don't
2557 send another. */
54a0b537 2558 if (lwp->stop_expected)
0d62e5e8 2559 {
ae13219e 2560 if (debug_threads)
bd99dc85 2561 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
ae13219e 2562
0d62e5e8
DJ
2563 return;
2564 }
2565
2566 if (debug_threads)
bd99dc85 2567 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
0d62e5e8 2568
d50171e4 2569 lwp->stop_expected = 1;
bd99dc85 2570 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
2571}
2572
7984d532
PA
2573static int
2574send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7
PA
2575{
2576 struct lwp_info *lwp = (struct lwp_info *) entry;
2577
7984d532
PA
2578 /* Ignore EXCEPT. */
2579 if (lwp == except)
2580 return 0;
2581
02fc4de7 2582 if (lwp->stopped)
7984d532 2583 return 0;
02fc4de7
PA
2584
2585 send_sigstop (lwp);
7984d532
PA
2586 return 0;
2587}
2588
2589/* Increment the suspend count of an LWP, and stop it, if not stopped
2590 yet. */
2591static int
2592suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2593 void *except)
2594{
2595 struct lwp_info *lwp = (struct lwp_info *) entry;
2596
2597 /* Ignore EXCEPT. */
2598 if (lwp == except)
2599 return 0;
2600
2601 lwp->suspended++;
2602
2603 return send_sigstop_callback (entry, except);
02fc4de7
PA
2604}
2605
95954743
PA
2606static void
2607mark_lwp_dead (struct lwp_info *lwp, int wstat)
2608{
2609 /* It's dead, really. */
2610 lwp->dead = 1;
2611
2612 /* Store the exit status for later. */
2613 lwp->status_pending_p = 1;
2614 lwp->status_pending = wstat;
2615
95954743
PA
2616 /* Prevent trying to stop it. */
2617 lwp->stopped = 1;
2618
2619 /* No further stops are expected from a dead lwp. */
2620 lwp->stop_expected = 0;
2621}
2622
0d62e5e8
DJ
2623static void
2624wait_for_sigstop (struct inferior_list_entry *entry)
2625{
54a0b537 2626 struct lwp_info *lwp = (struct lwp_info *) entry;
bd99dc85 2627 struct thread_info *saved_inferior;
a1928bad 2628 int wstat;
95954743
PA
2629 ptid_t saved_tid;
2630 ptid_t ptid;
d50171e4 2631 int pid;
0d62e5e8 2632
54a0b537 2633 if (lwp->stopped)
d50171e4
PA
2634 {
2635 if (debug_threads)
2636 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2637 lwpid_of (lwp));
2638 return;
2639 }
0d62e5e8
DJ
2640
2641 saved_inferior = current_inferior;
bd99dc85
PA
2642 if (saved_inferior != NULL)
2643 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2644 else
95954743 2645 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 2646
95954743 2647 ptid = lwp->head.id;
bd99dc85 2648
d50171e4
PA
2649 if (debug_threads)
2650 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2651
2652 pid = linux_wait_for_event (ptid, &wstat, __WALL);
0d62e5e8
DJ
2653
2654 /* If we stopped with a non-SIGSTOP signal, save it for later
2655 and record the pending SIGSTOP. If the process exited, just
2656 return. */
d50171e4 2657 if (WIFSTOPPED (wstat))
0d62e5e8
DJ
2658 {
2659 if (debug_threads)
d50171e4
PA
2660 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2661 lwpid_of (lwp), WSTOPSIG (wstat));
c35fafde 2662
d50171e4 2663 if (WSTOPSIG (wstat) != SIGSTOP)
c35fafde
PA
2664 {
2665 if (debug_threads)
d50171e4
PA
2666 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2667 lwpid_of (lwp), wstat);
2668
c35fafde
PA
2669 lwp->status_pending_p = 1;
2670 lwp->status_pending = wstat;
2671 }
0d62e5e8 2672 }
d50171e4 2673 else
95954743
PA
2674 {
2675 if (debug_threads)
d50171e4 2676 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
95954743 2677
d50171e4
PA
2678 lwp = find_lwp_pid (pid_to_ptid (pid));
2679 if (lwp)
2680 {
2681 /* Leave this status pending for the next time we're able to
2682 report it. In the mean time, we'll report this lwp as
2683 dead to GDB, so GDB doesn't try to read registers and
2684 memory from it. This can only happen if this was the
2685 last thread of the process; otherwise, PID is removed
2686 from the thread tables before linux_wait_for_event
2687 returns. */
2688 mark_lwp_dead (lwp, wstat);
2689 }
95954743 2690 }
0d62e5e8 2691
bd99dc85 2692 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
0d62e5e8
DJ
2693 current_inferior = saved_inferior;
2694 else
2695 {
2696 if (debug_threads)
2697 fprintf (stderr, "Previously current thread died.\n");
2698
bd99dc85
PA
2699 if (non_stop)
2700 {
2701 /* We can't change the current inferior behind GDB's back,
2702 otherwise, a subsequent command may apply to the wrong
2703 process. */
2704 current_inferior = NULL;
2705 }
2706 else
2707 {
2708 /* Set a valid thread as current. */
2709 set_desired_inferior (0);
2710 }
0d62e5e8
DJ
2711 }
2712}
2713
fa593d66
PA
2714/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2715 move it out, because we need to report the stop event to GDB. For
2716 example, if the user puts a breakpoint in the jump pad, it's
2717 because she wants to debug it. */
2718
2719static int
2720stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2721{
2722 struct lwp_info *lwp = (struct lwp_info *) entry;
2723 struct thread_info *thread = get_lwp_thread (lwp);
2724
2725 gdb_assert (lwp->suspended == 0);
2726 gdb_assert (lwp->stopped);
2727
2728 /* Allow debugging the jump pad, gdb_collect, etc.. */
2729 return (supports_fast_tracepoints ()
2730 && in_process_agent_loaded ()
2731 && (gdb_breakpoint_here (lwp->stop_pc)
2732 || lwp->stopped_by_watchpoint
2733 || thread->last_resume_kind == resume_step)
2734 && linux_fast_tracepoint_collecting (lwp, NULL));
2735}
2736
2737static void
2738move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2739{
2740 struct lwp_info *lwp = (struct lwp_info *) entry;
2741 struct thread_info *thread = get_lwp_thread (lwp);
2742 int *wstat;
2743
2744 gdb_assert (lwp->suspended == 0);
2745 gdb_assert (lwp->stopped);
2746
2747 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2748
2749 /* Allow debugging the jump pad, gdb_collect, etc. */
2750 if (!gdb_breakpoint_here (lwp->stop_pc)
2751 && !lwp->stopped_by_watchpoint
2752 && thread->last_resume_kind != resume_step
2753 && maybe_move_out_of_jump_pad (lwp, wstat))
2754 {
2755 if (debug_threads)
2756 fprintf (stderr,
2757 "LWP %ld needs stabilizing (in jump pad)\n",
2758 lwpid_of (lwp));
2759
2760 if (wstat)
2761 {
2762 lwp->status_pending_p = 0;
2763 enqueue_one_deferred_signal (lwp, wstat);
2764
2765 if (debug_threads)
2766 fprintf (stderr,
2767 "Signal %d for LWP %ld deferred "
2768 "(in jump pad)\n",
2769 WSTOPSIG (*wstat), lwpid_of (lwp));
2770 }
2771
2772 linux_resume_one_lwp (lwp, 0, 0, NULL);
2773 }
2774 else
2775 lwp->suspended++;
2776}
2777
2778static int
2779lwp_running (struct inferior_list_entry *entry, void *data)
2780{
2781 struct lwp_info *lwp = (struct lwp_info *) entry;
2782
2783 if (lwp->dead)
2784 return 0;
2785 if (lwp->stopped)
2786 return 0;
2787 return 1;
2788}
2789
7984d532
PA
2790/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2791 If SUSPEND, then also increase the suspend count of every LWP,
2792 except EXCEPT. */
2793
0d62e5e8 2794static void
7984d532 2795stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8
DJ
2796{
2797 stopping_threads = 1;
7984d532
PA
2798
2799 if (suspend)
2800 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2801 else
2802 find_inferior (&all_lwps, send_sigstop_callback, except);
54a0b537 2803 for_each_inferior (&all_lwps, wait_for_sigstop);
0d62e5e8
DJ
2804 stopping_threads = 0;
2805}
2806
da6d8c04
DJ
2807/* Resume execution of the inferior process.
2808 If STEP is nonzero, single-step it.
2809 If SIGNAL is nonzero, give it that signal. */
2810
ce3a066d 2811static void
2acc282a 2812linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 2813 int step, int signal, siginfo_t *info)
da6d8c04 2814{
0d62e5e8 2815 struct thread_info *saved_inferior;
fa593d66 2816 int fast_tp_collecting;
0d62e5e8 2817
54a0b537 2818 if (lwp->stopped == 0)
0d62e5e8
DJ
2819 return;
2820
fa593d66
PA
2821 fast_tp_collecting = lwp->collecting_fast_tracepoint;
2822
2823 gdb_assert (!stabilizing_threads || fast_tp_collecting);
2824
219f2f23
PA
2825 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2826 user used the "jump" command, or "set $pc = foo"). */
2827 if (lwp->stop_pc != get_pc (lwp))
2828 {
2829 /* Collecting 'while-stepping' actions doesn't make sense
2830 anymore. */
2831 release_while_stepping_state_list (get_lwp_thread (lwp));
2832 }
2833
0d62e5e8
DJ
2834 /* If we have pending signals or status, and a new signal, enqueue the
2835 signal. Also enqueue the signal if we are waiting to reinsert a
2836 breakpoint; it will be picked up again below. */
2837 if (signal != 0
fa593d66
PA
2838 && (lwp->status_pending_p
2839 || lwp->pending_signals != NULL
2840 || lwp->bp_reinsert != 0
2841 || fast_tp_collecting))
0d62e5e8
DJ
2842 {
2843 struct pending_signals *p_sig;
bca929d3 2844 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 2845 p_sig->prev = lwp->pending_signals;
0d62e5e8 2846 p_sig->signal = signal;
32ca6d61
DJ
2847 if (info == NULL)
2848 memset (&p_sig->info, 0, sizeof (siginfo_t));
2849 else
2850 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 2851 lwp->pending_signals = p_sig;
0d62e5e8
DJ
2852 }
2853
d50171e4
PA
2854 if (lwp->status_pending_p)
2855 {
2856 if (debug_threads)
2857 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2858 " has pending status\n",
2859 lwpid_of (lwp), step ? "step" : "continue", signal,
2860 lwp->stop_expected ? "expected" : "not expected");
2861 return;
2862 }
0d62e5e8
DJ
2863
2864 saved_inferior = current_inferior;
54a0b537 2865 current_inferior = get_lwp_thread (lwp);
0d62e5e8
DJ
2866
2867 if (debug_threads)
1b3f6016 2868 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
bd99dc85 2869 lwpid_of (lwp), step ? "step" : "continue", signal,
54a0b537 2870 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
2871
2872 /* This bit needs some thinking about. If we get a signal that
2873 we must report while a single-step reinsert is still pending,
2874 we often end up resuming the thread. It might be better to
2875 (ew) allow a stack of pending events; then we could be sure that
2876 the reinsert happened right away and not lose any signals.
2877
2878 Making this stack would also shrink the window in which breakpoints are
54a0b537 2879 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
2880 complete correctness, so it won't solve that problem. It may be
2881 worthwhile just to solve this one, however. */
54a0b537 2882 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
2883 {
2884 if (debug_threads)
d50171e4
PA
2885 fprintf (stderr, " pending reinsert at 0x%s\n",
2886 paddress (lwp->bp_reinsert));
2887
2888 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2889 {
fa593d66
PA
2890 if (fast_tp_collecting == 0)
2891 {
2892 if (step == 0)
2893 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2894 if (lwp->suspended)
2895 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
2896 lwp->suspended);
2897 }
d50171e4
PA
2898
2899 step = 1;
2900 }
0d62e5e8
DJ
2901
2902 /* Postpone any pending signal. It was enqueued above. */
2903 signal = 0;
2904 }
2905
fa593d66
PA
2906 if (fast_tp_collecting == 1)
2907 {
2908 if (debug_threads)
2909 fprintf (stderr, "\
2910lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
2911 lwpid_of (lwp));
2912
2913 /* Postpone any pending signal. It was enqueued above. */
2914 signal = 0;
2915 }
2916 else if (fast_tp_collecting == 2)
2917 {
2918 if (debug_threads)
2919 fprintf (stderr, "\
2920lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
2921 lwpid_of (lwp));
2922
2923 if (can_hardware_single_step ())
2924 step = 1;
2925 else
2926 fatal ("moving out of jump pad single-stepping"
2927 " not implemented on this target");
2928
2929 /* Postpone any pending signal. It was enqueued above. */
2930 signal = 0;
2931 }
2932
219f2f23
PA
2933 /* If we have while-stepping actions in this thread set it stepping.
2934 If we have a signal to deliver, it may or may not be set to
2935 SIG_IGN, we don't know. Assume so, and allow collecting
2936 while-stepping into a signal handler. A possible smart thing to
2937 do would be to set an internal breakpoint at the signal return
2938 address, continue, and carry on catching this while-stepping
2939 action only when that breakpoint is hit. A future
2940 enhancement. */
2941 if (get_lwp_thread (lwp)->while_stepping != NULL
2942 && can_hardware_single_step ())
2943 {
2944 if (debug_threads)
2945 fprintf (stderr,
2946 "lwp %ld has a while-stepping action -> forcing step.\n",
2947 lwpid_of (lwp));
2948 step = 1;
2949 }
2950
aa691b87 2951 if (debug_threads && the_low_target.get_pc != NULL)
0d62e5e8 2952 {
442ea881
PA
2953 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2954 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
47c0c975 2955 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
0d62e5e8
DJ
2956 }
2957
fa593d66
PA
2958 /* If we have pending signals, consume one unless we are trying to
2959 reinsert a breakpoint or we're trying to finish a fast tracepoint
2960 collect. */
2961 if (lwp->pending_signals != NULL
2962 && lwp->bp_reinsert == 0
2963 && fast_tp_collecting == 0)
0d62e5e8
DJ
2964 {
2965 struct pending_signals **p_sig;
2966
54a0b537 2967 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
2968 while ((*p_sig)->prev != NULL)
2969 p_sig = &(*p_sig)->prev;
2970
2971 signal = (*p_sig)->signal;
32ca6d61 2972 if ((*p_sig)->info.si_signo != 0)
bd99dc85 2973 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
32ca6d61 2974
0d62e5e8
DJ
2975 free (*p_sig);
2976 *p_sig = NULL;
2977 }
2978
aa5ca48f
DE
2979 if (the_low_target.prepare_to_resume != NULL)
2980 the_low_target.prepare_to_resume (lwp);
2981
0d62e5e8 2982 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 2983 get_lwp_thread (lwp));
da6d8c04 2984 errno = 0;
54a0b537 2985 lwp->stopped = 0;
c3adc08c 2986 lwp->stopped_by_watchpoint = 0;
54a0b537 2987 lwp->stepping = step;
14ce3065
DE
2988 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2989 /* Coerce to a uintptr_t first to avoid potential gcc warning
2990 of coercing an 8 byte integer to a 4 byte pointer. */
2991 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
0d62e5e8
DJ
2992
2993 current_inferior = saved_inferior;
da6d8c04 2994 if (errno)
3221518c
UW
2995 {
2996 /* ESRCH from ptrace either means that the thread was already
2997 running (an error) or that it is gone (a race condition). If
2998 it's gone, we will get a notification the next time we wait,
2999 so we can ignore the error. We could differentiate these
3000 two, but it's tricky without waiting; the thread still exists
3001 as a zombie, so sending it signal 0 would succeed. So just
3002 ignore ESRCH. */
3003 if (errno == ESRCH)
3004 return;
3005
3006 perror_with_name ("ptrace");
3007 }
da6d8c04
DJ
3008}
3009
2bd7c093
PA
3010struct thread_resume_array
3011{
3012 struct thread_resume *resume;
3013 size_t n;
3014};
64386c31
DJ
3015
3016/* This function is called once per thread. We look up the thread
5544ad89
DJ
3017 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3018 resume request.
3019
3020 This algorithm is O(threads * resume elements), but resume elements
3021 is small (and will remain small at least until GDB supports thread
3022 suspension). */
2bd7c093
PA
3023static int
3024linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 3025{
54a0b537 3026 struct lwp_info *lwp;
64386c31 3027 struct thread_info *thread;
5544ad89 3028 int ndx;
2bd7c093 3029 struct thread_resume_array *r;
64386c31
DJ
3030
3031 thread = (struct thread_info *) entry;
54a0b537 3032 lwp = get_thread_lwp (thread);
2bd7c093 3033 r = arg;
64386c31 3034
2bd7c093 3035 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
3036 {
3037 ptid_t ptid = r->resume[ndx].thread;
3038 if (ptid_equal (ptid, minus_one_ptid)
3039 || ptid_equal (ptid, entry->id)
3040 || (ptid_is_pid (ptid)
3041 && (ptid_get_pid (ptid) == pid_of (lwp)))
3042 || (ptid_get_lwp (ptid) == -1
3043 && (ptid_get_pid (ptid) == pid_of (lwp))))
3044 {
d50171e4 3045 if (r->resume[ndx].kind == resume_stop
8336d594 3046 && thread->last_resume_kind == resume_stop)
d50171e4
PA
3047 {
3048 if (debug_threads)
3049 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3050 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3051 ? "stopped"
3052 : "stopping",
3053 lwpid_of (lwp));
3054
3055 continue;
3056 }
3057
95954743 3058 lwp->resume = &r->resume[ndx];
8336d594 3059 thread->last_resume_kind = lwp->resume->kind;
fa593d66
PA
3060
3061 /* If we had a deferred signal to report, dequeue one now.
3062 This can happen if LWP gets more than one signal while
3063 trying to get out of a jump pad. */
3064 if (lwp->stopped
3065 && !lwp->status_pending_p
3066 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3067 {
3068 lwp->status_pending_p = 1;
3069
3070 if (debug_threads)
3071 fprintf (stderr,
3072 "Dequeueing deferred signal %d for LWP %ld, "
3073 "leaving status pending.\n",
3074 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3075 }
3076
95954743
PA
3077 return 0;
3078 }
3079 }
2bd7c093
PA
3080
3081 /* No resume action for this thread. */
3082 lwp->resume = NULL;
64386c31 3083
2bd7c093 3084 return 0;
5544ad89
DJ
3085}
3086
5544ad89 3087
bd99dc85
PA
3088/* Set *FLAG_P if this lwp has an interesting status pending. */
3089static int
3090resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 3091{
bd99dc85 3092 struct lwp_info *lwp = (struct lwp_info *) entry;
5544ad89 3093
bd99dc85
PA
3094 /* LWPs which will not be resumed are not interesting, because
3095 we might not wait for them next time through linux_wait. */
2bd7c093 3096 if (lwp->resume == NULL)
bd99dc85 3097 return 0;
64386c31 3098
bd99dc85 3099 if (lwp->status_pending_p)
d50171e4
PA
3100 * (int *) flag_p = 1;
3101
3102 return 0;
3103}
3104
3105/* Return 1 if this lwp that GDB wants running is stopped at an
3106 internal breakpoint that we need to step over. It assumes that any
3107 required STOP_PC adjustment has already been propagated to the
3108 inferior's regcache. */
3109
3110static int
3111need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3112{
3113 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 3114 struct thread_info *thread;
d50171e4
PA
3115 struct thread_info *saved_inferior;
3116 CORE_ADDR pc;
3117
3118 /* LWPs which will not be resumed are not interesting, because we
3119 might not wait for them next time through linux_wait. */
3120
3121 if (!lwp->stopped)
3122 {
3123 if (debug_threads)
3124 fprintf (stderr,
3125 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3126 lwpid_of (lwp));
3127 return 0;
3128 }
3129
8336d594
PA
3130 thread = get_lwp_thread (lwp);
3131
3132 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
3133 {
3134 if (debug_threads)
3135 fprintf (stderr,
3136 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3137 lwpid_of (lwp));
3138 return 0;
3139 }
3140
7984d532
PA
3141 gdb_assert (lwp->suspended >= 0);
3142
3143 if (lwp->suspended)
3144 {
3145 if (debug_threads)
3146 fprintf (stderr,
3147 "Need step over [LWP %ld]? Ignoring, suspended\n",
3148 lwpid_of (lwp));
3149 return 0;
3150 }
3151
d50171e4
PA
3152 if (!lwp->need_step_over)
3153 {
3154 if (debug_threads)
3155 fprintf (stderr,
3156 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3157 }
5544ad89 3158
bd99dc85 3159 if (lwp->status_pending_p)
d50171e4
PA
3160 {
3161 if (debug_threads)
3162 fprintf (stderr,
3163 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3164 lwpid_of (lwp));
3165 return 0;
3166 }
3167
3168 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3169 or we have. */
3170 pc = get_pc (lwp);
3171
3172 /* If the PC has changed since we stopped, then don't do anything,
3173 and let the breakpoint/tracepoint be hit. This happens if, for
3174 instance, GDB handled the decr_pc_after_break subtraction itself,
3175 GDB is OOL stepping this thread, or the user has issued a "jump"
3176 command, or poked thread's registers herself. */
3177 if (pc != lwp->stop_pc)
3178 {
3179 if (debug_threads)
3180 fprintf (stderr,
3181 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3182 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3183 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3184
3185 lwp->need_step_over = 0;
3186 return 0;
3187 }
3188
3189 saved_inferior = current_inferior;
8336d594 3190 current_inferior = thread;
d50171e4 3191
8b07ae33 3192 /* We can only step over breakpoints we know about. */
fa593d66 3193 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 3194 {
8b07ae33
PA
3195 /* Don't step over a breakpoint that GDB expects to hit
3196 though. */
3197 if (gdb_breakpoint_here (pc))
3198 {
3199 if (debug_threads)
3200 fprintf (stderr,
3201 "Need step over [LWP %ld]? yes, but found"
3202 " GDB breakpoint at 0x%s; skipping step over\n",
3203 lwpid_of (lwp), paddress (pc));
d50171e4 3204
8b07ae33
PA
3205 current_inferior = saved_inferior;
3206 return 0;
3207 }
3208 else
3209 {
3210 if (debug_threads)
3211 fprintf (stderr,
493e2a69
MS
3212 "Need step over [LWP %ld]? yes, "
3213 "found breakpoint at 0x%s\n",
8b07ae33 3214 lwpid_of (lwp), paddress (pc));
d50171e4 3215
8b07ae33
PA
3216 /* We've found an lwp that needs stepping over --- return 1 so
3217 that find_inferior stops looking. */
3218 current_inferior = saved_inferior;
3219
3220 /* If the step over is cancelled, this is set again. */
3221 lwp->need_step_over = 0;
3222 return 1;
3223 }
d50171e4
PA
3224 }
3225
3226 current_inferior = saved_inferior;
3227
3228 if (debug_threads)
3229 fprintf (stderr,
3230 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3231 lwpid_of (lwp), paddress (pc));
c6ecbae5 3232
bd99dc85 3233 return 0;
5544ad89
DJ
3234}
3235
d50171e4
PA
3236/* Start a step-over operation on LWP. When LWP stopped at a
3237 breakpoint, to make progress, we need to remove the breakpoint out
3238 of the way. If we let other threads run while we do that, they may
3239 pass by the breakpoint location and miss hitting it. To avoid
3240 that, a step-over momentarily stops all threads while LWP is
3241 single-stepped while the breakpoint is temporarily uninserted from
3242 the inferior. When the single-step finishes, we reinsert the
3243 breakpoint, and let all threads that are supposed to be running,
3244 run again.
3245
3246 On targets that don't support hardware single-step, we don't
3247 currently support full software single-stepping. Instead, we only
3248 support stepping over the thread event breakpoint, by asking the
3249 low target where to place a reinsert breakpoint. Since this
3250 routine assumes the breakpoint being stepped over is a thread event
3251 breakpoint, it usually assumes the return address of the current
3252 function is a good enough place to set the reinsert breakpoint. */
3253
3254static int
3255start_step_over (struct lwp_info *lwp)
3256{
3257 struct thread_info *saved_inferior;
3258 CORE_ADDR pc;
3259 int step;
3260
3261 if (debug_threads)
3262 fprintf (stderr,
3263 "Starting step-over on LWP %ld. Stopping all threads\n",
3264 lwpid_of (lwp));
3265
7984d532
PA
3266 stop_all_lwps (1, lwp);
3267 gdb_assert (lwp->suspended == 0);
d50171e4
PA
3268
3269 if (debug_threads)
3270 fprintf (stderr, "Done stopping all threads for step-over.\n");
3271
3272 /* Note, we should always reach here with an already adjusted PC,
3273 either by GDB (if we're resuming due to GDB's request), or by our
3274 caller, if we just finished handling an internal breakpoint GDB
3275 shouldn't care about. */
3276 pc = get_pc (lwp);
3277
3278 saved_inferior = current_inferior;
3279 current_inferior = get_lwp_thread (lwp);
3280
3281 lwp->bp_reinsert = pc;
3282 uninsert_breakpoints_at (pc);
fa593d66 3283 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
3284
3285 if (can_hardware_single_step ())
3286 {
3287 step = 1;
3288 }
3289 else
3290 {
3291 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3292 set_reinsert_breakpoint (raddr);
3293 step = 0;
3294 }
3295
3296 current_inferior = saved_inferior;
3297
3298 linux_resume_one_lwp (lwp, step, 0, NULL);
3299
3300 /* Require next event from this LWP. */
3301 step_over_bkpt = lwp->head.id;
3302 return 1;
3303}
3304
3305/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3306 start_step_over, if still there, and delete any reinsert
3307 breakpoints we've set, on non hardware single-step targets. */
3308
3309static int
3310finish_step_over (struct lwp_info *lwp)
3311{
3312 if (lwp->bp_reinsert != 0)
3313 {
3314 if (debug_threads)
3315 fprintf (stderr, "Finished step over.\n");
3316
3317 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3318 may be no breakpoint to reinsert there by now. */
3319 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 3320 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
3321
3322 lwp->bp_reinsert = 0;
3323
3324 /* Delete any software-single-step reinsert breakpoints. No
3325 longer needed. We don't have to worry about other threads
3326 hitting this trap, and later not being able to explain it,
3327 because we were stepping over a breakpoint, and we hold all
3328 threads but LWP stopped while doing that. */
3329 if (!can_hardware_single_step ())
3330 delete_reinsert_breakpoints ();
3331
3332 step_over_bkpt = null_ptid;
3333 return 1;
3334 }
3335 else
3336 return 0;
3337}
3338
5544ad89
DJ
3339/* This function is called once per thread. We check the thread's resume
3340 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 3341 stopped; and what signal, if any, it should be sent.
5544ad89 3342
bd99dc85
PA
3343 For threads which we aren't explicitly told otherwise, we preserve
3344 the stepping flag; this is used for stepping over gdbserver-placed
3345 breakpoints.
3346
3347 If pending_flags was set in any thread, we queue any needed
3348 signals, since we won't actually resume. We already have a pending
3349 event to report, so we don't need to preserve any step requests;
3350 they should be re-issued if necessary. */
3351
3352static int
3353linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 3354{
54a0b537 3355 struct lwp_info *lwp;
5544ad89 3356 struct thread_info *thread;
bd99dc85 3357 int step;
d50171e4
PA
3358 int leave_all_stopped = * (int *) arg;
3359 int leave_pending;
5544ad89
DJ
3360
3361 thread = (struct thread_info *) entry;
54a0b537 3362 lwp = get_thread_lwp (thread);
5544ad89 3363
2bd7c093 3364 if (lwp->resume == NULL)
bd99dc85 3365 return 0;
5544ad89 3366
bd99dc85 3367 if (lwp->resume->kind == resume_stop)
5544ad89 3368 {
bd99dc85 3369 if (debug_threads)
d50171e4 3370 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
bd99dc85
PA
3371
3372 if (!lwp->stopped)
3373 {
3374 if (debug_threads)
d50171e4 3375 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
bd99dc85 3376
d50171e4
PA
3377 /* Stop the thread, and wait for the event asynchronously,
3378 through the event loop. */
02fc4de7 3379 send_sigstop (lwp);
bd99dc85
PA
3380 }
3381 else
3382 {
3383 if (debug_threads)
d50171e4
PA
3384 fprintf (stderr, "already stopped LWP %ld\n",
3385 lwpid_of (lwp));
3386
3387 /* The LWP may have been stopped in an internal event that
3388 was not meant to be notified back to GDB (e.g., gdbserver
3389 breakpoint), so we should be reporting a stop event in
3390 this case too. */
3391
3392 /* If the thread already has a pending SIGSTOP, this is a
3393 no-op. Otherwise, something later will presumably resume
3394 the thread and this will cause it to cancel any pending
3395 operation, due to last_resume_kind == resume_stop. If
3396 the thread already has a pending status to report, we
3397 will still report it the next time we wait - see
3398 status_pending_p_callback. */
1a981360
PA
3399
3400 /* If we already have a pending signal to report, then
3401 there's no need to queue a SIGSTOP, as this means we're
3402 midway through moving the LWP out of the jumppad, and we
3403 will report the pending signal as soon as that is
3404 finished. */
3405 if (lwp->pending_signals_to_report == NULL)
3406 send_sigstop (lwp);
bd99dc85 3407 }
32ca6d61 3408
bd99dc85
PA
3409 /* For stop requests, we're done. */
3410 lwp->resume = NULL;
fc7238bb 3411 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3412 return 0;
5544ad89
DJ
3413 }
3414
bd99dc85
PA
3415 /* If this thread which is about to be resumed has a pending status,
3416 then don't resume any threads - we can just report the pending
3417 status. Make sure to queue any signals that would otherwise be
3418 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
3419 thread has a pending status. If there's a thread that needs the
3420 step-over-breakpoint dance, then don't resume any other thread
3421 but that particular one. */
3422 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 3423
d50171e4 3424 if (!leave_pending)
bd99dc85
PA
3425 {
3426 if (debug_threads)
3427 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
5544ad89 3428
d50171e4 3429 step = (lwp->resume->kind == resume_step);
2acc282a 3430 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
3431 }
3432 else
3433 {
3434 if (debug_threads)
3435 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
5544ad89 3436
bd99dc85
PA
3437 /* If we have a new signal, enqueue the signal. */
3438 if (lwp->resume->sig != 0)
3439 {
3440 struct pending_signals *p_sig;
3441 p_sig = xmalloc (sizeof (*p_sig));
3442 p_sig->prev = lwp->pending_signals;
3443 p_sig->signal = lwp->resume->sig;
3444 memset (&p_sig->info, 0, sizeof (siginfo_t));
3445
3446 /* If this is the same signal we were previously stopped by,
3447 make sure to queue its siginfo. We can ignore the return
3448 value of ptrace; if it fails, we'll skip
3449 PTRACE_SETSIGINFO. */
3450 if (WIFSTOPPED (lwp->last_status)
3451 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3452 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3453
3454 lwp->pending_signals = p_sig;
3455 }
3456 }
5544ad89 3457
fc7238bb 3458 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3459 lwp->resume = NULL;
5544ad89 3460 return 0;
0d62e5e8
DJ
3461}
3462
3463static void
2bd7c093 3464linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 3465{
2bd7c093 3466 struct thread_resume_array array = { resume_info, n };
d50171e4
PA
3467 struct lwp_info *need_step_over = NULL;
3468 int any_pending;
3469 int leave_all_stopped;
c6ecbae5 3470
2bd7c093 3471 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 3472
d50171e4
PA
3473 /* If there is a thread which would otherwise be resumed, which has
3474 a pending status, then don't resume any threads - we can just
3475 report the pending status. Make sure to queue any signals that
3476 would otherwise be sent. In non-stop mode, we'll apply this
3477 logic to each thread individually. We consume all pending events
3478 before considering to start a step-over (in all-stop). */
3479 any_pending = 0;
bd99dc85 3480 if (!non_stop)
d50171e4
PA
3481 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3482
3483 /* If there is a thread which would otherwise be resumed, which is
3484 stopped at a breakpoint that needs stepping over, then don't
3485 resume any threads - have it step over the breakpoint with all
3486 other threads stopped, then resume all threads again. Make sure
3487 to queue any signals that would otherwise be delivered or
3488 queued. */
3489 if (!any_pending && supports_breakpoints ())
3490 need_step_over
3491 = (struct lwp_info *) find_inferior (&all_lwps,
3492 need_step_over_p, NULL);
3493
3494 leave_all_stopped = (need_step_over != NULL || any_pending);
3495
3496 if (debug_threads)
3497 {
3498 if (need_step_over != NULL)
3499 fprintf (stderr, "Not resuming all, need step over\n");
3500 else if (any_pending)
3501 fprintf (stderr,
3502 "Not resuming, all-stop and found "
3503 "an LWP with pending status\n");
3504 else
3505 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3506 }
3507
3508 /* Even if we're leaving threads stopped, queue all signals we'd
3509 otherwise deliver. */
3510 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3511
3512 if (need_step_over)
3513 start_step_over (need_step_over);
3514}
3515
3516/* This function is called once per thread. We check the thread's
3517 last resume request, which will tell us whether to resume, step, or
3518 leave the thread stopped. Any signal the client requested to be
3519 delivered has already been enqueued at this point.
3520
3521 If any thread that GDB wants running is stopped at an internal
3522 breakpoint that needs stepping over, we start a step-over operation
3523 on that particular thread, and leave all others stopped. */
3524
7984d532
PA
3525static int
3526proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 3527{
7984d532 3528 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 3529 struct thread_info *thread;
d50171e4
PA
3530 int step;
3531
7984d532
PA
3532 if (lwp == except)
3533 return 0;
d50171e4
PA
3534
3535 if (debug_threads)
3536 fprintf (stderr,
3537 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3538
3539 if (!lwp->stopped)
3540 {
3541 if (debug_threads)
3542 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
7984d532 3543 return 0;
d50171e4
PA
3544 }
3545
8336d594
PA
3546 thread = get_lwp_thread (lwp);
3547
02fc4de7
PA
3548 if (thread->last_resume_kind == resume_stop
3549 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
3550 {
3551 if (debug_threads)
02fc4de7
PA
3552 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3553 lwpid_of (lwp));
7984d532 3554 return 0;
d50171e4
PA
3555 }
3556
3557 if (lwp->status_pending_p)
3558 {
3559 if (debug_threads)
3560 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3561 lwpid_of (lwp));
7984d532 3562 return 0;
d50171e4
PA
3563 }
3564
7984d532
PA
3565 gdb_assert (lwp->suspended >= 0);
3566
d50171e4
PA
3567 if (lwp->suspended)
3568 {
3569 if (debug_threads)
3570 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
7984d532 3571 return 0;
d50171e4
PA
3572 }
3573
1a981360
PA
3574 if (thread->last_resume_kind == resume_stop
3575 && lwp->pending_signals_to_report == NULL
3576 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
3577 {
3578 /* We haven't reported this LWP as stopped yet (otherwise, the
3579 last_status.kind check above would catch it, and we wouldn't
3580 reach here. This LWP may have been momentarily paused by a
3581 stop_all_lwps call while handling for example, another LWP's
3582 step-over. In that case, the pending expected SIGSTOP signal
3583 that was queued at vCont;t handling time will have already
3584 been consumed by wait_for_sigstop, and so we need to requeue
3585 another one here. Note that if the LWP already has a SIGSTOP
3586 pending, this is a no-op. */
3587
3588 if (debug_threads)
3589 fprintf (stderr,
3590 "Client wants LWP %ld to stop. "
3591 "Making sure it has a SIGSTOP pending\n",
3592 lwpid_of (lwp));
3593
3594 send_sigstop (lwp);
3595 }
3596
8336d594 3597 step = thread->last_resume_kind == resume_step;
d50171e4 3598 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
3599 return 0;
3600}
3601
3602static int
3603unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3604{
3605 struct lwp_info *lwp = (struct lwp_info *) entry;
3606
3607 if (lwp == except)
3608 return 0;
3609
3610 lwp->suspended--;
3611 gdb_assert (lwp->suspended >= 0);
3612
3613 return proceed_one_lwp (entry, except);
d50171e4
PA
3614}
3615
3616/* When we finish a step-over, set threads running again. If there's
3617 another thread that may need a step-over, now's the time to start
3618 it. Eventually, we'll move all threads past their breakpoints. */
3619
3620static void
3621proceed_all_lwps (void)
3622{
3623 struct lwp_info *need_step_over;
3624
3625 /* If there is a thread which would otherwise be resumed, which is
3626 stopped at a breakpoint that needs stepping over, then don't
3627 resume any threads - have it step over the breakpoint with all
3628 other threads stopped, then resume all threads again. */
3629
3630 if (supports_breakpoints ())
3631 {
3632 need_step_over
3633 = (struct lwp_info *) find_inferior (&all_lwps,
3634 need_step_over_p, NULL);
3635
3636 if (need_step_over != NULL)
3637 {
3638 if (debug_threads)
3639 fprintf (stderr, "proceed_all_lwps: found "
3640 "thread %ld needing a step-over\n",
3641 lwpid_of (need_step_over));
3642
3643 start_step_over (need_step_over);
3644 return;
3645 }
3646 }
5544ad89 3647
d50171e4
PA
3648 if (debug_threads)
3649 fprintf (stderr, "Proceeding, no step-over needed\n");
3650
7984d532 3651 find_inferior (&all_lwps, proceed_one_lwp, NULL);
d50171e4
PA
3652}
3653
3654/* Stopped LWPs that the client wanted to be running, that don't have
3655 pending statuses, are set to run again, except for EXCEPT, if not
3656 NULL. This undoes a stop_all_lwps call. */
3657
3658static void
7984d532 3659unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 3660{
5544ad89
DJ
3661 if (debug_threads)
3662 {
d50171e4
PA
3663 if (except)
3664 fprintf (stderr,
3665 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
5544ad89 3666 else
d50171e4
PA
3667 fprintf (stderr,
3668 "unstopping all lwps\n");
5544ad89
DJ
3669 }
3670
7984d532
PA
3671 if (unsuspend)
3672 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3673 else
3674 find_inferior (&all_lwps, proceed_one_lwp, except);
0d62e5e8
DJ
3675}
3676
3677#ifdef HAVE_LINUX_USRREGS
da6d8c04
DJ
3678
3679int
0a30fbc4 3680register_addr (int regnum)
da6d8c04
DJ
3681{
3682 int addr;
3683
2ec06d2e 3684 if (regnum < 0 || regnum >= the_low_target.num_regs)
da6d8c04
DJ
3685 error ("Invalid register number %d.", regnum);
3686
2ec06d2e 3687 addr = the_low_target.regmap[regnum];
da6d8c04
DJ
3688
3689 return addr;
3690}
3691
58caa3dc 3692/* Fetch one register. */
da6d8c04 3693static void
442ea881 3694fetch_register (struct regcache *regcache, int regno)
da6d8c04
DJ
3695{
3696 CORE_ADDR regaddr;
48d93c75 3697 int i, size;
0d62e5e8 3698 char *buf;
95954743 3699 int pid;
da6d8c04 3700
2ec06d2e 3701 if (regno >= the_low_target.num_regs)
0a30fbc4 3702 return;
2ec06d2e 3703 if ((*the_low_target.cannot_fetch_register) (regno))
0a30fbc4 3704 return;
da6d8c04 3705
0a30fbc4
DJ
3706 regaddr = register_addr (regno);
3707 if (regaddr == -1)
3708 return;
95954743
PA
3709
3710 pid = lwpid_of (get_thread_lwp (current_inferior));
1b3f6016
PA
3711 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3712 & - sizeof (PTRACE_XFER_TYPE));
48d93c75
UW
3713 buf = alloca (size);
3714 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
da6d8c04
DJ
3715 {
3716 errno = 0;
0d62e5e8 3717 *(PTRACE_XFER_TYPE *) (buf + i) =
14ce3065
DE
3718 ptrace (PTRACE_PEEKUSER, pid,
3719 /* Coerce to a uintptr_t first to avoid potential gcc warning
3720 of coercing an 8 byte integer to a 4 byte pointer. */
3721 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
da6d8c04
DJ
3722 regaddr += sizeof (PTRACE_XFER_TYPE);
3723 if (errno != 0)
f52cd8cd 3724 error ("reading register %d: %s", regno, strerror (errno));
da6d8c04 3725 }
ee1a7ae4
UW
3726
3727 if (the_low_target.supply_ptrace_register)
442ea881 3728 the_low_target.supply_ptrace_register (regcache, regno, buf);
5a1f5858 3729 else
442ea881 3730 supply_register (regcache, regno, buf);
da6d8c04
DJ
3731}
3732
3733/* Fetch all registers, or just one, from the child process. */
58caa3dc 3734static void
442ea881 3735usr_fetch_inferior_registers (struct regcache *regcache, int regno)
da6d8c04 3736{
4463ce24 3737 if (regno == -1)
2ec06d2e 3738 for (regno = 0; regno < the_low_target.num_regs; regno++)
442ea881 3739 fetch_register (regcache, regno);
da6d8c04 3740 else
442ea881 3741 fetch_register (regcache, regno);
da6d8c04
DJ
3742}
3743
3744/* Store our register values back into the inferior.
3745 If REGNO is -1, do this for all registers.
3746 Otherwise, REGNO specifies which register (so we can save time). */
58caa3dc 3747static void
442ea881 3748usr_store_inferior_registers (struct regcache *regcache, int regno)
da6d8c04
DJ
3749{
3750 CORE_ADDR regaddr;
48d93c75 3751 int i, size;
0d62e5e8 3752 char *buf;
55ac2b99 3753 int pid;
da6d8c04
DJ
3754
3755 if (regno >= 0)
3756 {
2ec06d2e 3757 if (regno >= the_low_target.num_regs)
0a30fbc4
DJ
3758 return;
3759
bc1e36ca 3760 if ((*the_low_target.cannot_store_register) (regno) == 1)
0a30fbc4
DJ
3761 return;
3762
3763 regaddr = register_addr (regno);
3764 if (regaddr == -1)
da6d8c04 3765 return;
da6d8c04 3766 errno = 0;
48d93c75
UW
3767 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3768 & - sizeof (PTRACE_XFER_TYPE);
3769 buf = alloca (size);
3770 memset (buf, 0, size);
ee1a7ae4
UW
3771
3772 if (the_low_target.collect_ptrace_register)
442ea881 3773 the_low_target.collect_ptrace_register (regcache, regno, buf);
5a1f5858 3774 else
442ea881 3775 collect_register (regcache, regno, buf);
ee1a7ae4 3776
95954743 3777 pid = lwpid_of (get_thread_lwp (current_inferior));
48d93c75 3778 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
da6d8c04 3779 {
0a30fbc4 3780 errno = 0;
14ce3065
DE
3781 ptrace (PTRACE_POKEUSER, pid,
3782 /* Coerce to a uintptr_t first to avoid potential gcc warning
3783 about coercing an 8 byte integer to a 4 byte pointer. */
3784 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3785 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
da6d8c04
DJ
3786 if (errno != 0)
3787 {
1b3f6016
PA
3788 /* At this point, ESRCH should mean the process is
3789 already gone, in which case we simply ignore attempts
3790 to change its registers. See also the related
3791 comment in linux_resume_one_lwp. */
3221518c
UW
3792 if (errno == ESRCH)
3793 return;
3794
bc1e36ca 3795 if ((*the_low_target.cannot_store_register) (regno) == 0)
f52cd8cd 3796 error ("writing register %d: %s", regno, strerror (errno));
da6d8c04 3797 }
2ff29de4 3798 regaddr += sizeof (PTRACE_XFER_TYPE);
da6d8c04 3799 }
da6d8c04
DJ
3800 }
3801 else
2ec06d2e 3802 for (regno = 0; regno < the_low_target.num_regs; regno++)
442ea881 3803 usr_store_inferior_registers (regcache, regno);
da6d8c04 3804}
58caa3dc
DJ
3805#endif /* HAVE_LINUX_USRREGS */
3806
3807
3808
3809#ifdef HAVE_LINUX_REGSETS
3810
3811static int
442ea881 3812regsets_fetch_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3813{
3814 struct regset_info *regset;
e9d25b98 3815 int saw_general_regs = 0;
95954743 3816 int pid;
1570b33e 3817 struct iovec iov;
58caa3dc
DJ
3818
3819 regset = target_regsets;
3820
95954743 3821 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3822 while (regset->size >= 0)
3823 {
1570b33e
L
3824 void *buf, *data;
3825 int nt_type, res;
58caa3dc 3826
52fa2412 3827 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3828 {
3829 regset ++;
3830 continue;
3831 }
3832
bca929d3 3833 buf = xmalloc (regset->size);
1570b33e
L
3834
3835 nt_type = regset->nt_type;
3836 if (nt_type)
3837 {
3838 iov.iov_base = buf;
3839 iov.iov_len = regset->size;
3840 data = (void *) &iov;
3841 }
3842 else
3843 data = buf;
3844
dfb64f85 3845#ifndef __sparc__
1570b33e 3846 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3847#else
1570b33e 3848 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 3849#endif
58caa3dc
DJ
3850 if (res < 0)
3851 {
3852 if (errno == EIO)
3853 {
52fa2412
UW
3854 /* If we get EIO on a regset, do not try it again for
3855 this process. */
3856 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3857 free (buf);
52fa2412 3858 continue;
58caa3dc
DJ
3859 }
3860 else
3861 {
0d62e5e8 3862 char s[256];
95954743
PA
3863 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3864 pid);
0d62e5e8 3865 perror (s);
58caa3dc
DJ
3866 }
3867 }
e9d25b98
DJ
3868 else if (regset->type == GENERAL_REGS)
3869 saw_general_regs = 1;
442ea881 3870 regset->store_function (regcache, buf);
58caa3dc 3871 regset ++;
fdeb2a12 3872 free (buf);
58caa3dc 3873 }
e9d25b98
DJ
3874 if (saw_general_regs)
3875 return 0;
3876 else
3877 return 1;
58caa3dc
DJ
3878}
3879
3880static int
442ea881 3881regsets_store_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3882{
3883 struct regset_info *regset;
e9d25b98 3884 int saw_general_regs = 0;
95954743 3885 int pid;
1570b33e 3886 struct iovec iov;
58caa3dc
DJ
3887
3888 regset = target_regsets;
3889
95954743 3890 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3891 while (regset->size >= 0)
3892 {
1570b33e
L
3893 void *buf, *data;
3894 int nt_type, res;
58caa3dc 3895
52fa2412 3896 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3897 {
3898 regset ++;
3899 continue;
3900 }
3901
bca929d3 3902 buf = xmalloc (regset->size);
545587ee
DJ
3903
3904 /* First fill the buffer with the current register set contents,
3905 in case there are any items in the kernel's regset that are
3906 not in gdbserver's regcache. */
1570b33e
L
3907
3908 nt_type = regset->nt_type;
3909 if (nt_type)
3910 {
3911 iov.iov_base = buf;
3912 iov.iov_len = regset->size;
3913 data = (void *) &iov;
3914 }
3915 else
3916 data = buf;
3917
dfb64f85 3918#ifndef __sparc__
1570b33e 3919 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3920#else
1570b33e 3921 res = ptrace (regset->get_request, pid, &iov, data);
dfb64f85 3922#endif
545587ee
DJ
3923
3924 if (res == 0)
3925 {
3926 /* Then overlay our cached registers on that. */
442ea881 3927 regset->fill_function (regcache, buf);
545587ee
DJ
3928
3929 /* Only now do we write the register set. */
dfb64f85 3930#ifndef __sparc__
1570b33e 3931 res = ptrace (regset->set_request, pid, nt_type, data);
dfb64f85 3932#else
1570b33e 3933 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 3934#endif
545587ee
DJ
3935 }
3936
58caa3dc
DJ
3937 if (res < 0)
3938 {
3939 if (errno == EIO)
3940 {
52fa2412
UW
3941 /* If we get EIO on a regset, do not try it again for
3942 this process. */
3943 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3944 free (buf);
52fa2412 3945 continue;
58caa3dc 3946 }
3221518c
UW
3947 else if (errno == ESRCH)
3948 {
1b3f6016
PA
3949 /* At this point, ESRCH should mean the process is
3950 already gone, in which case we simply ignore attempts
3951 to change its registers. See also the related
3952 comment in linux_resume_one_lwp. */
fdeb2a12 3953 free (buf);
3221518c
UW
3954 return 0;
3955 }
58caa3dc
DJ
3956 else
3957 {
ce3a066d 3958 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
3959 }
3960 }
e9d25b98
DJ
3961 else if (regset->type == GENERAL_REGS)
3962 saw_general_regs = 1;
58caa3dc 3963 regset ++;
09ec9b38 3964 free (buf);
58caa3dc 3965 }
e9d25b98
DJ
3966 if (saw_general_regs)
3967 return 0;
3968 else
3969 return 1;
ce3a066d 3970 return 0;
58caa3dc
DJ
3971}
3972
3973#endif /* HAVE_LINUX_REGSETS */
3974
3975
3976void
442ea881 3977linux_fetch_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
3978{
3979#ifdef HAVE_LINUX_REGSETS
442ea881 3980 if (regsets_fetch_inferior_registers (regcache) == 0)
52fa2412 3981 return;
58caa3dc
DJ
3982#endif
3983#ifdef HAVE_LINUX_USRREGS
442ea881 3984 usr_fetch_inferior_registers (regcache, regno);
58caa3dc
DJ
3985#endif
3986}
3987
3988void
442ea881 3989linux_store_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
3990{
3991#ifdef HAVE_LINUX_REGSETS
442ea881 3992 if (regsets_store_inferior_registers (regcache) == 0)
52fa2412 3993 return;
58caa3dc
DJ
3994#endif
3995#ifdef HAVE_LINUX_USRREGS
442ea881 3996 usr_store_inferior_registers (regcache, regno);
58caa3dc
DJ
3997#endif
3998}
3999
da6d8c04 4000
da6d8c04
DJ
4001/* Copy LEN bytes from inferior's memory starting at MEMADDR
4002 to debugger memory starting at MYADDR. */
4003
c3e735a6 4004static int
f450004a 4005linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04
DJ
4006{
4007 register int i;
4008 /* Round starting address down to longword boundary. */
4009 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4010 /* Round ending address up; get number of longwords that makes. */
aa691b87
RM
4011 register int count
4012 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
da6d8c04
DJ
4013 / sizeof (PTRACE_XFER_TYPE);
4014 /* Allocate buffer of that many longwords. */
aa691b87 4015 register PTRACE_XFER_TYPE *buffer
da6d8c04 4016 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
fd462a61
DJ
4017 int fd;
4018 char filename[64];
95954743 4019 int pid = lwpid_of (get_thread_lwp (current_inferior));
fd462a61
DJ
4020
4021 /* Try using /proc. Don't bother for one word. */
4022 if (len >= 3 * sizeof (long))
4023 {
4024 /* We could keep this file open and cache it - possibly one per
4025 thread. That requires some juggling, but is even faster. */
95954743 4026 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
4027 fd = open (filename, O_RDONLY | O_LARGEFILE);
4028 if (fd == -1)
4029 goto no_proc;
4030
4031 /* If pread64 is available, use it. It's faster if the kernel
4032 supports it (only one syscall), and it's 64-bit safe even on
4033 32-bit platforms (for instance, SPARC debugging a SPARC64
4034 application). */
4035#ifdef HAVE_PREAD64
4036 if (pread64 (fd, myaddr, len, memaddr) != len)
4037#else
1de1badb 4038 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
fd462a61
DJ
4039#endif
4040 {
4041 close (fd);
4042 goto no_proc;
4043 }
4044
4045 close (fd);
4046 return 0;
4047 }
da6d8c04 4048
fd462a61 4049 no_proc:
da6d8c04
DJ
4050 /* Read all the longwords */
4051 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4052 {
c3e735a6 4053 errno = 0;
14ce3065
DE
4054 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4055 about coercing an 8 byte integer to a 4 byte pointer. */
4056 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4057 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
c3e735a6
DJ
4058 if (errno)
4059 return errno;
da6d8c04
DJ
4060 }
4061
4062 /* Copy appropriate bytes out of the buffer. */
1b3f6016
PA
4063 memcpy (myaddr,
4064 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4065 len);
c3e735a6
DJ
4066
4067 return 0;
da6d8c04
DJ
4068}
4069
93ae6fdc
PA
4070/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4071 memory at MEMADDR. On failure (cannot write to the inferior)
da6d8c04
DJ
4072 returns the value of errno. */
4073
ce3a066d 4074static int
f450004a 4075linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
4076{
4077 register int i;
4078 /* Round starting address down to longword boundary. */
4079 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4080 /* Round ending address up; get number of longwords that makes. */
4081 register int count
493e2a69
MS
4082 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4083 / sizeof (PTRACE_XFER_TYPE);
4084
da6d8c04 4085 /* Allocate buffer of that many longwords. */
493e2a69
MS
4086 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4087 alloca (count * sizeof (PTRACE_XFER_TYPE));
4088
95954743 4089 int pid = lwpid_of (get_thread_lwp (current_inferior));
da6d8c04 4090
0d62e5e8
DJ
4091 if (debug_threads)
4092 {
58d6951d
DJ
4093 /* Dump up to four bytes. */
4094 unsigned int val = * (unsigned int *) myaddr;
4095 if (len == 1)
4096 val = val & 0xff;
4097 else if (len == 2)
4098 val = val & 0xffff;
4099 else if (len == 3)
4100 val = val & 0xffffff;
4101 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4102 val, (long)memaddr);
0d62e5e8
DJ
4103 }
4104
da6d8c04
DJ
4105 /* Fill start and end extra bytes of buffer with existing memory data. */
4106
93ae6fdc 4107 errno = 0;
14ce3065
DE
4108 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4109 about coercing an 8 byte integer to a 4 byte pointer. */
4110 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4111 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
93ae6fdc
PA
4112 if (errno)
4113 return errno;
da6d8c04
DJ
4114
4115 if (count > 1)
4116 {
93ae6fdc 4117 errno = 0;
da6d8c04 4118 buffer[count - 1]
95954743 4119 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
4120 /* Coerce to a uintptr_t first to avoid potential gcc warning
4121 about coercing an 8 byte integer to a 4 byte pointer. */
4122 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4123 * sizeof (PTRACE_XFER_TYPE)),
d844cde6 4124 0);
93ae6fdc
PA
4125 if (errno)
4126 return errno;
da6d8c04
DJ
4127 }
4128
93ae6fdc 4129 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 4130
493e2a69
MS
4131 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4132 myaddr, len);
da6d8c04
DJ
4133
4134 /* Write the entire buffer. */
4135
4136 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4137 {
4138 errno = 0;
14ce3065
DE
4139 ptrace (PTRACE_POKETEXT, pid,
4140 /* Coerce to a uintptr_t first to avoid potential gcc warning
4141 about coercing an 8 byte integer to a 4 byte pointer. */
4142 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4143 (PTRACE_ARG4_TYPE) buffer[i]);
da6d8c04
DJ
4144 if (errno)
4145 return errno;
4146 }
4147
4148 return 0;
4149}
2f2893d9 4150
6076632b 4151/* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
24a09b5f
DJ
4152static int linux_supports_tracefork_flag;
4153
1e7fc18c
PA
4154static void
4155linux_enable_event_reporting (int pid)
4156{
4157 if (!linux_supports_tracefork_flag)
4158 return;
4159
4160 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4161}
4162
51c2684e 4163/* Helper functions for linux_test_for_tracefork, called via clone (). */
24a09b5f 4164
51c2684e
DJ
4165static int
4166linux_tracefork_grandchild (void *arg)
4167{
4168 _exit (0);
4169}
4170
7407e2de
AS
4171#define STACK_SIZE 4096
4172
51c2684e
DJ
4173static int
4174linux_tracefork_child (void *arg)
24a09b5f
DJ
4175{
4176 ptrace (PTRACE_TRACEME, 0, 0, 0);
4177 kill (getpid (), SIGSTOP);
e4b7f41c
JK
4178
4179#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4180
4181 if (fork () == 0)
4182 linux_tracefork_grandchild (NULL);
4183
4184#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4185
7407e2de
AS
4186#ifdef __ia64__
4187 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4188 CLONE_VM | SIGCHLD, NULL);
4189#else
a1f2ce7d 4190 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
7407e2de
AS
4191 CLONE_VM | SIGCHLD, NULL);
4192#endif
e4b7f41c
JK
4193
4194#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4195
24a09b5f
DJ
4196 _exit (0);
4197}
4198
24a09b5f
DJ
4199/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4200 sure that we can enable the option, and that it had the desired
4201 effect. */
4202
4203static void
4204linux_test_for_tracefork (void)
4205{
4206 int child_pid, ret, status;
4207 long second_pid;
e4b7f41c 4208#if defined(__UCLIBC__) && defined(HAS_NOMMU)
bca929d3 4209 char *stack = xmalloc (STACK_SIZE * 4);
e4b7f41c 4210#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
4211
4212 linux_supports_tracefork_flag = 0;
4213
e4b7f41c
JK
4214#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4215
4216 child_pid = fork ();
4217 if (child_pid == 0)
4218 linux_tracefork_child (NULL);
4219
4220#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4221
51c2684e 4222 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
7407e2de
AS
4223#ifdef __ia64__
4224 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4225 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c 4226#else /* !__ia64__ */
7407e2de
AS
4227 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4228 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c
JK
4229#endif /* !__ia64__ */
4230
4231#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4232
24a09b5f 4233 if (child_pid == -1)
51c2684e 4234 perror_with_name ("clone");
24a09b5f
DJ
4235
4236 ret = my_waitpid (child_pid, &status, 0);
4237 if (ret == -1)
4238 perror_with_name ("waitpid");
4239 else if (ret != child_pid)
4240 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4241 if (! WIFSTOPPED (status))
4242 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4243
14ce3065
DE
4244 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4245 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
24a09b5f
DJ
4246 if (ret != 0)
4247 {
4248 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4249 if (ret != 0)
4250 {
4251 warning ("linux_test_for_tracefork: failed to kill child");
4252 return;
4253 }
4254
4255 ret = my_waitpid (child_pid, &status, 0);
4256 if (ret != child_pid)
4257 warning ("linux_test_for_tracefork: failed to wait for killed child");
4258 else if (!WIFSIGNALED (status))
4259 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4260 "killed child", status);
4261
4262 return;
4263 }
4264
4265 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4266 if (ret != 0)
4267 warning ("linux_test_for_tracefork: failed to resume child");
4268
4269 ret = my_waitpid (child_pid, &status, 0);
4270
4271 if (ret == child_pid && WIFSTOPPED (status)
4272 && status >> 16 == PTRACE_EVENT_FORK)
4273 {
4274 second_pid = 0;
4275 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4276 if (ret == 0 && second_pid != 0)
4277 {
4278 int second_status;
4279
4280 linux_supports_tracefork_flag = 1;
4281 my_waitpid (second_pid, &second_status, 0);
4282 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4283 if (ret != 0)
4284 warning ("linux_test_for_tracefork: failed to kill second child");
4285 my_waitpid (second_pid, &status, 0);
4286 }
4287 }
4288 else
4289 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4290 "(%d, status 0x%x)", ret, status);
4291
4292 do
4293 {
4294 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4295 if (ret != 0)
4296 warning ("linux_test_for_tracefork: failed to kill child");
4297 my_waitpid (child_pid, &status, 0);
4298 }
4299 while (WIFSTOPPED (status));
51c2684e 4300
e4b7f41c 4301#if defined(__UCLIBC__) && defined(HAS_NOMMU)
51c2684e 4302 free (stack);
e4b7f41c 4303#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
4304}
4305
4306
2f2893d9
DJ
4307static void
4308linux_look_up_symbols (void)
4309{
0d62e5e8 4310#ifdef USE_THREAD_DB
95954743
PA
4311 struct process_info *proc = current_process ();
4312
cdbfd419 4313 if (proc->private->thread_db != NULL)
0d62e5e8
DJ
4314 return;
4315
6076632b
DE
4316 /* If the kernel supports tracing forks then it also supports tracing
4317 clones, and then we don't need to use the magic thread event breakpoint
4318 to learn about threads. */
cdbfd419 4319 thread_db_init (!linux_supports_tracefork_flag);
0d62e5e8
DJ
4320#endif
4321}
4322
e5379b03 4323static void
ef57601b 4324linux_request_interrupt (void)
e5379b03 4325{
a1928bad 4326 extern unsigned long signal_pid;
e5379b03 4327
95954743
PA
4328 if (!ptid_equal (cont_thread, null_ptid)
4329 && !ptid_equal (cont_thread, minus_one_ptid))
e5379b03 4330 {
54a0b537 4331 struct lwp_info *lwp;
bd99dc85 4332 int lwpid;
e5379b03 4333
54a0b537 4334 lwp = get_thread_lwp (current_inferior);
bd99dc85
PA
4335 lwpid = lwpid_of (lwp);
4336 kill_lwp (lwpid, SIGINT);
e5379b03
DJ
4337 }
4338 else
ef57601b 4339 kill_lwp (signal_pid, SIGINT);
e5379b03
DJ
4340}
4341
aa691b87
RM
4342/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4343 to debugger memory starting at MYADDR. */
4344
4345static int
f450004a 4346linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
4347{
4348 char filename[PATH_MAX];
4349 int fd, n;
95954743 4350 int pid = lwpid_of (get_thread_lwp (current_inferior));
aa691b87 4351
6cebaf6e 4352 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
4353
4354 fd = open (filename, O_RDONLY);
4355 if (fd < 0)
4356 return -1;
4357
4358 if (offset != (CORE_ADDR) 0
4359 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4360 n = -1;
4361 else
4362 n = read (fd, myaddr, len);
4363
4364 close (fd);
4365
4366 return n;
4367}
4368
d993e290
PA
4369/* These breakpoint and watchpoint related wrapper functions simply
4370 pass on the function call if the target has registered a
4371 corresponding function. */
e013ee27
OF
4372
4373static int
d993e290 4374linux_insert_point (char type, CORE_ADDR addr, int len)
e013ee27 4375{
d993e290
PA
4376 if (the_low_target.insert_point != NULL)
4377 return the_low_target.insert_point (type, addr, len);
e013ee27
OF
4378 else
4379 /* Unsupported (see target.h). */
4380 return 1;
4381}
4382
4383static int
d993e290 4384linux_remove_point (char type, CORE_ADDR addr, int len)
e013ee27 4385{
d993e290
PA
4386 if (the_low_target.remove_point != NULL)
4387 return the_low_target.remove_point (type, addr, len);
e013ee27
OF
4388 else
4389 /* Unsupported (see target.h). */
4390 return 1;
4391}
4392
4393static int
4394linux_stopped_by_watchpoint (void)
4395{
c3adc08c
PA
4396 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4397
4398 return lwp->stopped_by_watchpoint;
e013ee27
OF
4399}
4400
4401static CORE_ADDR
4402linux_stopped_data_address (void)
4403{
c3adc08c
PA
4404 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4405
4406 return lwp->stopped_data_address;
e013ee27
OF
4407}
4408
42c81e2a 4409#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
4410#if defined(__mcoldfire__)
4411/* These should really be defined in the kernel's ptrace.h header. */
4412#define PT_TEXT_ADDR 49*4
4413#define PT_DATA_ADDR 50*4
4414#define PT_TEXT_END_ADDR 51*4
eb826dc6
MF
4415#elif defined(BFIN)
4416#define PT_TEXT_ADDR 220
4417#define PT_TEXT_END_ADDR 224
4418#define PT_DATA_ADDR 228
58dbd541
YQ
4419#elif defined(__TMS320C6X__)
4420#define PT_TEXT_ADDR (0x10000*4)
4421#define PT_DATA_ADDR (0x10004*4)
4422#define PT_TEXT_END_ADDR (0x10008*4)
52fb6437
NS
4423#endif
4424
4425/* Under uClinux, programs are loaded at non-zero offsets, which we need
4426 to tell gdb about. */
4427
4428static int
4429linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4430{
4431#if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4432 unsigned long text, text_end, data;
bd99dc85 4433 int pid = lwpid_of (get_thread_lwp (current_inferior));
52fb6437
NS
4434
4435 errno = 0;
4436
4437 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4438 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4439 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4440
4441 if (errno == 0)
4442 {
4443 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
4444 used by gdb) are relative to the beginning of the program,
4445 with the data segment immediately following the text segment.
4446 However, the actual runtime layout in memory may put the data
4447 somewhere else, so when we send gdb a data base-address, we
4448 use the real data base address and subtract the compile-time
4449 data base-address from it (which is just the length of the
4450 text segment). BSS immediately follows data in both
4451 cases. */
52fb6437
NS
4452 *text_p = text;
4453 *data_p = data - (text_end - text);
1b3f6016 4454
52fb6437
NS
4455 return 1;
4456 }
4457#endif
4458 return 0;
4459}
4460#endif
4461
07e059b5
VP
4462static int
4463linux_qxfer_osdata (const char *annex,
1b3f6016
PA
4464 unsigned char *readbuf, unsigned const char *writebuf,
4465 CORE_ADDR offset, int len)
07e059b5 4466{
d26e3629 4467 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
4468}
4469
d0722149
DE
4470/* Convert a native/host siginfo object, into/from the siginfo in the
4471 layout of the inferiors' architecture. */
4472
4473static void
4474siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4475{
4476 int done = 0;
4477
4478 if (the_low_target.siginfo_fixup != NULL)
4479 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4480
4481 /* If there was no callback, or the callback didn't do anything,
4482 then just do a straight memcpy. */
4483 if (!done)
4484 {
4485 if (direction == 1)
4486 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4487 else
4488 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4489 }
4490}
4491
4aa995e1
PA
4492static int
4493linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4494 unsigned const char *writebuf, CORE_ADDR offset, int len)
4495{
d0722149 4496 int pid;
4aa995e1 4497 struct siginfo siginfo;
d0722149 4498 char inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
4499
4500 if (current_inferior == NULL)
4501 return -1;
4502
bd99dc85 4503 pid = lwpid_of (get_thread_lwp (current_inferior));
4aa995e1
PA
4504
4505 if (debug_threads)
d0722149 4506 fprintf (stderr, "%s siginfo for lwp %d.\n",
4aa995e1
PA
4507 readbuf != NULL ? "Reading" : "Writing",
4508 pid);
4509
0adea5f7 4510 if (offset >= sizeof (siginfo))
4aa995e1
PA
4511 return -1;
4512
4513 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4514 return -1;
4515
d0722149
DE
4516 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4517 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4518 inferior with a 64-bit GDBSERVER should look the same as debugging it
4519 with a 32-bit GDBSERVER, we need to convert it. */
4520 siginfo_fixup (&siginfo, inf_siginfo, 0);
4521
4aa995e1
PA
4522 if (offset + len > sizeof (siginfo))
4523 len = sizeof (siginfo) - offset;
4524
4525 if (readbuf != NULL)
d0722149 4526 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4527 else
4528 {
d0722149
DE
4529 memcpy (inf_siginfo + offset, writebuf, len);
4530
4531 /* Convert back to ptrace layout before flushing it out. */
4532 siginfo_fixup (&siginfo, inf_siginfo, 1);
4533
4aa995e1
PA
4534 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4535 return -1;
4536 }
4537
4538 return len;
4539}
4540
bd99dc85
PA
4541/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4542 so we notice when children change state; as the handler for the
4543 sigsuspend in my_waitpid. */
4544
4545static void
4546sigchld_handler (int signo)
4547{
4548 int old_errno = errno;
4549
4550 if (debug_threads)
e581f2b4
PA
4551 {
4552 do
4553 {
4554 /* fprintf is not async-signal-safe, so call write
4555 directly. */
4556 if (write (2, "sigchld_handler\n",
4557 sizeof ("sigchld_handler\n") - 1) < 0)
4558 break; /* just ignore */
4559 } while (0);
4560 }
bd99dc85
PA
4561
4562 if (target_is_async_p ())
4563 async_file_mark (); /* trigger a linux_wait */
4564
4565 errno = old_errno;
4566}
4567
4568static int
4569linux_supports_non_stop (void)
4570{
4571 return 1;
4572}
4573
4574static int
4575linux_async (int enable)
4576{
4577 int previous = (linux_event_pipe[0] != -1);
4578
8336d594
PA
4579 if (debug_threads)
4580 fprintf (stderr, "linux_async (%d), previous=%d\n",
4581 enable, previous);
4582
bd99dc85
PA
4583 if (previous != enable)
4584 {
4585 sigset_t mask;
4586 sigemptyset (&mask);
4587 sigaddset (&mask, SIGCHLD);
4588
4589 sigprocmask (SIG_BLOCK, &mask, NULL);
4590
4591 if (enable)
4592 {
4593 if (pipe (linux_event_pipe) == -1)
4594 fatal ("creating event pipe failed.");
4595
4596 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4597 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4598
4599 /* Register the event loop handler. */
4600 add_file_handler (linux_event_pipe[0],
4601 handle_target_event, NULL);
4602
4603 /* Always trigger a linux_wait. */
4604 async_file_mark ();
4605 }
4606 else
4607 {
4608 delete_file_handler (linux_event_pipe[0]);
4609
4610 close (linux_event_pipe[0]);
4611 close (linux_event_pipe[1]);
4612 linux_event_pipe[0] = -1;
4613 linux_event_pipe[1] = -1;
4614 }
4615
4616 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4617 }
4618
4619 return previous;
4620}
4621
4622static int
4623linux_start_non_stop (int nonstop)
4624{
4625 /* Register or unregister from event-loop accordingly. */
4626 linux_async (nonstop);
4627 return 0;
4628}
4629
cf8fd78b
PA
4630static int
4631linux_supports_multi_process (void)
4632{
4633 return 1;
4634}
4635
efcbbd14
UW
4636
4637/* Enumerate spufs IDs for process PID. */
4638static int
4639spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4640{
4641 int pos = 0;
4642 int written = 0;
4643 char path[128];
4644 DIR *dir;
4645 struct dirent *entry;
4646
4647 sprintf (path, "/proc/%ld/fd", pid);
4648 dir = opendir (path);
4649 if (!dir)
4650 return -1;
4651
4652 rewinddir (dir);
4653 while ((entry = readdir (dir)) != NULL)
4654 {
4655 struct stat st;
4656 struct statfs stfs;
4657 int fd;
4658
4659 fd = atoi (entry->d_name);
4660 if (!fd)
4661 continue;
4662
4663 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4664 if (stat (path, &st) != 0)
4665 continue;
4666 if (!S_ISDIR (st.st_mode))
4667 continue;
4668
4669 if (statfs (path, &stfs) != 0)
4670 continue;
4671 if (stfs.f_type != SPUFS_MAGIC)
4672 continue;
4673
4674 if (pos >= offset && pos + 4 <= offset + len)
4675 {
4676 *(unsigned int *)(buf + pos - offset) = fd;
4677 written += 4;
4678 }
4679 pos += 4;
4680 }
4681
4682 closedir (dir);
4683 return written;
4684}
4685
4686/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4687 object type, using the /proc file system. */
4688static int
4689linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4690 unsigned const char *writebuf,
4691 CORE_ADDR offset, int len)
4692{
4693 long pid = lwpid_of (get_thread_lwp (current_inferior));
4694 char buf[128];
4695 int fd = 0;
4696 int ret = 0;
4697
4698 if (!writebuf && !readbuf)
4699 return -1;
4700
4701 if (!*annex)
4702 {
4703 if (!readbuf)
4704 return -1;
4705 else
4706 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4707 }
4708
4709 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4710 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4711 if (fd <= 0)
4712 return -1;
4713
4714 if (offset != 0
4715 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4716 {
4717 close (fd);
4718 return 0;
4719 }
4720
4721 if (writebuf)
4722 ret = write (fd, writebuf, (size_t) len);
4723 else
4724 ret = read (fd, readbuf, (size_t) len);
4725
4726 close (fd);
4727 return ret;
4728}
4729
723b724b 4730#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
4731struct target_loadseg
4732{
4733 /* Core address to which the segment is mapped. */
4734 Elf32_Addr addr;
4735 /* VMA recorded in the program header. */
4736 Elf32_Addr p_vaddr;
4737 /* Size of this segment in memory. */
4738 Elf32_Word p_memsz;
4739};
4740
723b724b 4741# if defined PT_GETDSBT
78d85199
YQ
4742struct target_loadmap
4743{
4744 /* Protocol version number, must be zero. */
4745 Elf32_Word version;
4746 /* Pointer to the DSBT table, its size, and the DSBT index. */
4747 unsigned *dsbt_table;
4748 unsigned dsbt_size, dsbt_index;
4749 /* Number of segments in this map. */
4750 Elf32_Word nsegs;
4751 /* The actual memory map. */
4752 struct target_loadseg segs[/*nsegs*/];
4753};
723b724b
MF
4754# define LINUX_LOADMAP PT_GETDSBT
4755# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
4756# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
4757# else
4758struct target_loadmap
4759{
4760 /* Protocol version number, must be zero. */
4761 Elf32_Half version;
4762 /* Number of segments in this map. */
4763 Elf32_Half nsegs;
4764 /* The actual memory map. */
4765 struct target_loadseg segs[/*nsegs*/];
4766};
4767# define LINUX_LOADMAP PTRACE_GETFDPIC
4768# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
4769# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
4770# endif
78d85199 4771
78d85199
YQ
4772static int
4773linux_read_loadmap (const char *annex, CORE_ADDR offset,
4774 unsigned char *myaddr, unsigned int len)
4775{
4776 int pid = lwpid_of (get_thread_lwp (current_inferior));
4777 int addr = -1;
4778 struct target_loadmap *data = NULL;
4779 unsigned int actual_length, copy_length;
4780
4781 if (strcmp (annex, "exec") == 0)
723b724b 4782 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 4783 else if (strcmp (annex, "interp") == 0)
723b724b 4784 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
4785 else
4786 return -1;
4787
723b724b 4788 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
4789 return -1;
4790
4791 if (data == NULL)
4792 return -1;
4793
4794 actual_length = sizeof (struct target_loadmap)
4795 + sizeof (struct target_loadseg) * data->nsegs;
4796
4797 if (offset < 0 || offset > actual_length)
4798 return -1;
4799
4800 copy_length = actual_length - offset < len ? actual_length - offset : len;
4801 memcpy (myaddr, (char *) data + offset, copy_length);
4802 return copy_length;
4803}
723b724b
MF
4804#else
4805# define linux_read_loadmap NULL
4806#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 4807
1570b33e
L
4808static void
4809linux_process_qsupported (const char *query)
4810{
4811 if (the_low_target.process_qsupported != NULL)
4812 the_low_target.process_qsupported (query);
4813}
4814
219f2f23
PA
4815static int
4816linux_supports_tracepoints (void)
4817{
4818 if (*the_low_target.supports_tracepoints == NULL)
4819 return 0;
4820
4821 return (*the_low_target.supports_tracepoints) ();
4822}
4823
4824static CORE_ADDR
4825linux_read_pc (struct regcache *regcache)
4826{
4827 if (the_low_target.get_pc == NULL)
4828 return 0;
4829
4830 return (*the_low_target.get_pc) (regcache);
4831}
4832
4833static void
4834linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4835{
4836 gdb_assert (the_low_target.set_pc != NULL);
4837
4838 (*the_low_target.set_pc) (regcache, pc);
4839}
4840
8336d594
PA
4841static int
4842linux_thread_stopped (struct thread_info *thread)
4843{
4844 return get_thread_lwp (thread)->stopped;
4845}
4846
4847/* This exposes stop-all-threads functionality to other modules. */
4848
4849static void
7984d532 4850linux_pause_all (int freeze)
8336d594 4851{
7984d532
PA
4852 stop_all_lwps (freeze, NULL);
4853}
4854
4855/* This exposes unstop-all-threads functionality to other gdbserver
4856 modules. */
4857
4858static void
4859linux_unpause_all (int unfreeze)
4860{
4861 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
4862}
4863
90d74c30
PA
4864static int
4865linux_prepare_to_access_memory (void)
4866{
4867 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4868 running LWP. */
4869 if (non_stop)
4870 linux_pause_all (1);
4871 return 0;
4872}
4873
4874static void
0146f85b 4875linux_done_accessing_memory (void)
90d74c30
PA
4876{
4877 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4878 running LWP. */
4879 if (non_stop)
4880 linux_unpause_all (1);
4881}
4882
fa593d66
PA
4883static int
4884linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
4885 CORE_ADDR collector,
4886 CORE_ADDR lockaddr,
4887 ULONGEST orig_size,
4888 CORE_ADDR *jump_entry,
4889 unsigned char *jjump_pad_insn,
4890 ULONGEST *jjump_pad_insn_size,
4891 CORE_ADDR *adjusted_insn_addr,
4892 CORE_ADDR *adjusted_insn_addr_end)
4893{
4894 return (*the_low_target.install_fast_tracepoint_jump_pad)
4895 (tpoint, tpaddr, collector, lockaddr, orig_size,
4896 jump_entry, jjump_pad_insn, jjump_pad_insn_size,
4897 adjusted_insn_addr, adjusted_insn_addr_end);
4898}
4899
6a271cae
PA
4900static struct emit_ops *
4901linux_emit_ops (void)
4902{
4903 if (the_low_target.emit_ops != NULL)
4904 return (*the_low_target.emit_ops) ();
4905 else
4906 return NULL;
4907}
4908
ce3a066d
DJ
4909static struct target_ops linux_target_ops = {
4910 linux_create_inferior,
4911 linux_attach,
4912 linux_kill,
6ad8ae5c 4913 linux_detach,
8336d594 4914 linux_mourn,
444d6139 4915 linux_join,
ce3a066d
DJ
4916 linux_thread_alive,
4917 linux_resume,
4918 linux_wait,
4919 linux_fetch_registers,
4920 linux_store_registers,
90d74c30 4921 linux_prepare_to_access_memory,
0146f85b 4922 linux_done_accessing_memory,
ce3a066d
DJ
4923 linux_read_memory,
4924 linux_write_memory,
2f2893d9 4925 linux_look_up_symbols,
ef57601b 4926 linux_request_interrupt,
aa691b87 4927 linux_read_auxv,
d993e290
PA
4928 linux_insert_point,
4929 linux_remove_point,
e013ee27
OF
4930 linux_stopped_by_watchpoint,
4931 linux_stopped_data_address,
42c81e2a 4932#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437 4933 linux_read_offsets,
dae5f5cf
DJ
4934#else
4935 NULL,
4936#endif
4937#ifdef USE_THREAD_DB
4938 thread_db_get_tls_address,
4939#else
4940 NULL,
52fb6437 4941#endif
efcbbd14 4942 linux_qxfer_spu,
59a016f0 4943 hostio_last_error_from_errno,
07e059b5 4944 linux_qxfer_osdata,
4aa995e1 4945 linux_xfer_siginfo,
bd99dc85
PA
4946 linux_supports_non_stop,
4947 linux_async,
4948 linux_start_non_stop,
cdbfd419
PP
4949 linux_supports_multi_process,
4950#ifdef USE_THREAD_DB
dc146f7c 4951 thread_db_handle_monitor_command,
cdbfd419 4952#else
dc146f7c 4953 NULL,
cdbfd419 4954#endif
d26e3629 4955 linux_common_core_of_thread,
78d85199 4956 linux_read_loadmap,
219f2f23
PA
4957 linux_process_qsupported,
4958 linux_supports_tracepoints,
4959 linux_read_pc,
8336d594
PA
4960 linux_write_pc,
4961 linux_thread_stopped,
7984d532 4962 NULL,
711e434b 4963 linux_pause_all,
7984d532 4964 linux_unpause_all,
fa593d66
PA
4965 linux_cancel_breakpoints,
4966 linux_stabilize_threads,
6a271cae
PA
4967 linux_install_fast_tracepoint_jump_pad,
4968 linux_emit_ops
ce3a066d
DJ
4969};
4970
0d62e5e8
DJ
4971static void
4972linux_init_signals ()
4973{
4974 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4975 to find what the cancel signal actually is. */
1a981360 4976#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 4977 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 4978#endif
0d62e5e8
DJ
4979}
4980
da6d8c04
DJ
4981void
4982initialize_low (void)
4983{
bd99dc85
PA
4984 struct sigaction sigchld_action;
4985 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 4986 set_target_ops (&linux_target_ops);
611cb4a5
DJ
4987 set_breakpoint_data (the_low_target.breakpoint,
4988 the_low_target.breakpoint_len);
0d62e5e8 4989 linux_init_signals ();
24a09b5f 4990 linux_test_for_tracefork ();
52fa2412
UW
4991#ifdef HAVE_LINUX_REGSETS
4992 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
4993 ;
bca929d3 4994 disabled_regsets = xmalloc (num_regsets);
52fa2412 4995#endif
bd99dc85
PA
4996
4997 sigchld_action.sa_handler = sigchld_handler;
4998 sigemptyset (&sigchld_action.sa_mask);
4999 sigchld_action.sa_flags = SA_RESTART;
5000 sigaction (SIGCHLD, &sigchld_action, NULL);
da6d8c04 5001}