]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame_incremental - gdb/gdbserver/linux-low.c
* gdbint.texinfo (Adding support for debugging core files): New node.
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
... / ...
CommitLineData
1/* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20#include "server.h"
21#include "linux-low.h"
22
23#include <sys/wait.h>
24#include <stdio.h>
25#include <sys/param.h>
26#include <sys/ptrace.h>
27#include <signal.h>
28#include <sys/ioctl.h>
29#include <fcntl.h>
30#include <string.h>
31#include <stdlib.h>
32#include <unistd.h>
33#include <errno.h>
34#include <sys/syscall.h>
35#include <sched.h>
36#include <ctype.h>
37#include <pwd.h>
38#include <sys/types.h>
39#include <dirent.h>
40
41#ifndef PTRACE_GETSIGINFO
42# define PTRACE_GETSIGINFO 0x4202
43# define PTRACE_SETSIGINFO 0x4203
44#endif
45
46#ifndef O_LARGEFILE
47#define O_LARGEFILE 0
48#endif
49
50/* If the system headers did not provide the constants, hard-code the normal
51 values. */
52#ifndef PTRACE_EVENT_FORK
53
54#define PTRACE_SETOPTIONS 0x4200
55#define PTRACE_GETEVENTMSG 0x4201
56
57/* options set using PTRACE_SETOPTIONS */
58#define PTRACE_O_TRACESYSGOOD 0x00000001
59#define PTRACE_O_TRACEFORK 0x00000002
60#define PTRACE_O_TRACEVFORK 0x00000004
61#define PTRACE_O_TRACECLONE 0x00000008
62#define PTRACE_O_TRACEEXEC 0x00000010
63#define PTRACE_O_TRACEVFORKDONE 0x00000020
64#define PTRACE_O_TRACEEXIT 0x00000040
65
66/* Wait extended result codes for the above trace options. */
67#define PTRACE_EVENT_FORK 1
68#define PTRACE_EVENT_VFORK 2
69#define PTRACE_EVENT_CLONE 3
70#define PTRACE_EVENT_EXEC 4
71#define PTRACE_EVENT_VFORK_DONE 5
72#define PTRACE_EVENT_EXIT 6
73
74#endif /* PTRACE_EVENT_FORK */
75
76/* We can't always assume that this flag is available, but all systems
77 with the ptrace event handlers also have __WALL, so it's safe to use
78 in some contexts. */
79#ifndef __WALL
80#define __WALL 0x40000000 /* Wait for any child. */
81#endif
82
83#ifdef __UCLIBC__
84#if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
85#define HAS_NOMMU
86#endif
87#endif
88
89/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
90 representation of the thread ID.
91
92 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
93 the same as the LWP ID.
94
95 ``all_processes'' is keyed by the "overall process ID", which
96 GNU/Linux calls tgid, "thread group ID". */
97
98struct inferior_list all_lwps;
99
100/* A list of all unknown processes which receive stop signals. Some other
101 process will presumably claim each of these as forked children
102 momentarily. */
103
104struct inferior_list stopped_pids;
105
106/* FIXME this is a bit of a hack, and could be removed. */
107int stopping_threads;
108
109/* FIXME make into a target method? */
110int using_threads = 1;
111
112/* This flag is true iff we've just created or attached to our first
113 inferior but it has not stopped yet. As soon as it does, we need
114 to call the low target's arch_setup callback. Doing this only on
115 the first inferior avoids reinializing the architecture on every
116 inferior, and avoids messing with the register caches of the
117 already running inferiors. NOTE: this assumes all inferiors under
118 control of gdbserver have the same architecture. */
119static int new_inferior;
120
121static void linux_resume_one_lwp (struct inferior_list_entry *entry,
122 int step, int signal, siginfo_t *info);
123static void linux_resume (struct thread_resume *resume_info, size_t n);
124static void stop_all_lwps (void);
125static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
126static int check_removed_breakpoint (struct lwp_info *event_child);
127static void *add_lwp (ptid_t ptid);
128static int my_waitpid (int pid, int *status, int flags);
129static int linux_stopped_by_watchpoint (void);
130static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
131
132struct pending_signals
133{
134 int signal;
135 siginfo_t info;
136 struct pending_signals *prev;
137};
138
139#define PTRACE_ARG3_TYPE long
140#define PTRACE_XFER_TYPE long
141
142#ifdef HAVE_LINUX_REGSETS
143static char *disabled_regsets;
144static int num_regsets;
145#endif
146
147/* The read/write ends of the pipe registered as waitable file in the
148 event loop. */
149static int linux_event_pipe[2] = { -1, -1 };
150
151/* True if we're currently in async mode. */
152#define target_is_async_p() (linux_event_pipe[0] != -1)
153
154static void send_sigstop (struct inferior_list_entry *entry);
155static void wait_for_sigstop (struct inferior_list_entry *entry);
156
157static void
158delete_lwp (struct lwp_info *lwp)
159{
160 remove_thread (get_lwp_thread (lwp));
161 remove_inferior (&all_lwps, &lwp->head);
162 free (lwp);
163}
164
165/* Add a process to the common process list, and set its private
166 data. */
167
168static struct process_info *
169linux_add_process (int pid, int attached)
170{
171 struct process_info *proc;
172
173 /* Is this the first process? If so, then set the arch. */
174 if (all_processes.head == NULL)
175 new_inferior = 1;
176
177 proc = add_process (pid, attached);
178 proc->private = xcalloc (1, sizeof (*proc->private));
179
180 return proc;
181}
182
183/* Remove a process from the common process list,
184 also freeing all private data. */
185
186static void
187linux_remove_process (struct process_info *process)
188{
189 free (process->private);
190 remove_process (process);
191}
192
193/* Handle a GNU/Linux extended wait response. If we see a clone
194 event, we need to add the new LWP to our list (and not report the
195 trap to higher layers). */
196
197static void
198handle_extended_wait (struct lwp_info *event_child, int wstat)
199{
200 int event = wstat >> 16;
201 struct lwp_info *new_lwp;
202
203 if (event == PTRACE_EVENT_CLONE)
204 {
205 ptid_t ptid;
206 unsigned long new_pid;
207 int ret, status = W_STOPCODE (SIGSTOP);
208
209 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
210
211 /* If we haven't already seen the new PID stop, wait for it now. */
212 if (! pull_pid_from_list (&stopped_pids, new_pid))
213 {
214 /* The new child has a pending SIGSTOP. We can't affect it until it
215 hits the SIGSTOP, but we're already attached. */
216
217 ret = my_waitpid (new_pid, &status, __WALL);
218
219 if (ret == -1)
220 perror_with_name ("waiting for new child");
221 else if (ret != new_pid)
222 warning ("wait returned unexpected PID %d", ret);
223 else if (!WIFSTOPPED (status))
224 warning ("wait returned unexpected status 0x%x", status);
225 }
226
227 ptrace (PTRACE_SETOPTIONS, new_pid, 0, PTRACE_O_TRACECLONE);
228
229 ptid = ptid_build (pid_of (event_child), new_pid, 0);
230 new_lwp = (struct lwp_info *) add_lwp (ptid);
231 add_thread (ptid, new_lwp);
232
233 /* Normally we will get the pending SIGSTOP. But in some cases
234 we might get another signal delivered to the group first.
235 If we do get another signal, be sure not to lose it. */
236 if (WSTOPSIG (status) == SIGSTOP)
237 {
238 if (stopping_threads)
239 new_lwp->stopped = 1;
240 else
241 ptrace (PTRACE_CONT, new_pid, 0, 0);
242 }
243 else
244 {
245 new_lwp->stop_expected = 1;
246 if (stopping_threads)
247 {
248 new_lwp->stopped = 1;
249 new_lwp->status_pending_p = 1;
250 new_lwp->status_pending = status;
251 }
252 else
253 /* Pass the signal on. This is what GDB does - except
254 shouldn't we really report it instead? */
255 ptrace (PTRACE_CONT, new_pid, 0, WSTOPSIG (status));
256 }
257
258 /* Always resume the current thread. If we are stopping
259 threads, it will have a pending SIGSTOP; we may as well
260 collect it now. */
261 linux_resume_one_lwp (&event_child->head,
262 event_child->stepping, 0, NULL);
263 }
264}
265
266/* This function should only be called if the process got a SIGTRAP.
267 The SIGTRAP could mean several things.
268
269 On i386, where decr_pc_after_break is non-zero:
270 If we were single-stepping this process using PTRACE_SINGLESTEP,
271 we will get only the one SIGTRAP (even if the instruction we
272 stepped over was a breakpoint). The value of $eip will be the
273 next instruction.
274 If we continue the process using PTRACE_CONT, we will get a
275 SIGTRAP when we hit a breakpoint. The value of $eip will be
276 the instruction after the breakpoint (i.e. needs to be
277 decremented). If we report the SIGTRAP to GDB, we must also
278 report the undecremented PC. If we cancel the SIGTRAP, we
279 must resume at the decremented PC.
280
281 (Presumably, not yet tested) On a non-decr_pc_after_break machine
282 with hardware or kernel single-step:
283 If we single-step over a breakpoint instruction, our PC will
284 point at the following instruction. If we continue and hit a
285 breakpoint instruction, our PC will point at the breakpoint
286 instruction. */
287
288static CORE_ADDR
289get_stop_pc (void)
290{
291 CORE_ADDR stop_pc = (*the_low_target.get_pc) ();
292
293 if (get_thread_lwp (current_inferior)->stepping)
294 return stop_pc;
295 else
296 return stop_pc - the_low_target.decr_pc_after_break;
297}
298
299static void *
300add_lwp (ptid_t ptid)
301{
302 struct lwp_info *lwp;
303
304 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
305 memset (lwp, 0, sizeof (*lwp));
306
307 lwp->head.id = ptid;
308
309 add_inferior_to_list (&all_lwps, &lwp->head);
310
311 return lwp;
312}
313
314/* Start an inferior process and returns its pid.
315 ALLARGS is a vector of program-name and args. */
316
317static int
318linux_create_inferior (char *program, char **allargs)
319{
320 struct lwp_info *new_lwp;
321 int pid;
322 ptid_t ptid;
323
324#if defined(__UCLIBC__) && defined(HAS_NOMMU)
325 pid = vfork ();
326#else
327 pid = fork ();
328#endif
329 if (pid < 0)
330 perror_with_name ("fork");
331
332 if (pid == 0)
333 {
334 ptrace (PTRACE_TRACEME, 0, 0, 0);
335
336 signal (__SIGRTMIN + 1, SIG_DFL);
337
338 setpgid (0, 0);
339
340 execv (program, allargs);
341 if (errno == ENOENT)
342 execvp (program, allargs);
343
344 fprintf (stderr, "Cannot exec %s: %s.\n", program,
345 strerror (errno));
346 fflush (stderr);
347 _exit (0177);
348 }
349
350 linux_add_process (pid, 0);
351
352 ptid = ptid_build (pid, pid, 0);
353 new_lwp = add_lwp (ptid);
354 add_thread (ptid, new_lwp);
355 new_lwp->must_set_ptrace_flags = 1;
356
357 return pid;
358}
359
360/* Attach to an inferior process. */
361
362static void
363linux_attach_lwp_1 (unsigned long lwpid, int initial)
364{
365 ptid_t ptid;
366 struct lwp_info *new_lwp;
367
368 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
369 {
370 if (!initial)
371 {
372 /* If we fail to attach to an LWP, just warn. */
373 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
374 strerror (errno), errno);
375 fflush (stderr);
376 return;
377 }
378 else
379 /* If we fail to attach to a process, report an error. */
380 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
381 strerror (errno), errno);
382 }
383
384 if (initial)
385 /* NOTE/FIXME: This lwp might have not been the tgid. */
386 ptid = ptid_build (lwpid, lwpid, 0);
387 else
388 {
389 /* Note that extracting the pid from the current inferior is
390 safe, since we're always called in the context of the same
391 process as this new thread. */
392 int pid = pid_of (get_thread_lwp (current_inferior));
393 ptid = ptid_build (pid, lwpid, 0);
394 }
395
396 new_lwp = (struct lwp_info *) add_lwp (ptid);
397 add_thread (ptid, new_lwp);
398
399
400 /* We need to wait for SIGSTOP before being able to make the next
401 ptrace call on this LWP. */
402 new_lwp->must_set_ptrace_flags = 1;
403
404 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
405 brings it to a halt.
406
407 There are several cases to consider here:
408
409 1) gdbserver has already attached to the process and is being notified
410 of a new thread that is being created.
411 In this case we should ignore that SIGSTOP and resume the process.
412 This is handled below by setting stop_expected = 1.
413
414 2) This is the first thread (the process thread), and we're attaching
415 to it via attach_inferior.
416 In this case we want the process thread to stop.
417 This is handled by having linux_attach clear stop_expected after
418 we return.
419 ??? If the process already has several threads we leave the other
420 threads running.
421
422 3) GDB is connecting to gdbserver and is requesting an enumeration of all
423 existing threads.
424 In this case we want the thread to stop.
425 FIXME: This case is currently not properly handled.
426 We should wait for the SIGSTOP but don't. Things work apparently
427 because enough time passes between when we ptrace (ATTACH) and when
428 gdb makes the next ptrace call on the thread.
429
430 On the other hand, if we are currently trying to stop all threads, we
431 should treat the new thread as if we had sent it a SIGSTOP. This works
432 because we are guaranteed that the add_lwp call above added us to the
433 end of the list, and so the new thread has not yet reached
434 wait_for_sigstop (but will). */
435 if (! stopping_threads)
436 new_lwp->stop_expected = 1;
437}
438
439void
440linux_attach_lwp (unsigned long lwpid)
441{
442 linux_attach_lwp_1 (lwpid, 0);
443}
444
445int
446linux_attach (unsigned long pid)
447{
448 struct lwp_info *lwp;
449
450 linux_attach_lwp_1 (pid, 1);
451
452 linux_add_process (pid, 1);
453
454 if (!non_stop)
455 {
456 /* Don't ignore the initial SIGSTOP if we just attached to this
457 process. It will be collected by wait shortly. */
458 lwp = (struct lwp_info *) find_inferior_id (&all_lwps,
459 ptid_build (pid, pid, 0));
460 lwp->stop_expected = 0;
461 }
462
463 return 0;
464}
465
466struct counter
467{
468 int pid;
469 int count;
470};
471
472static int
473second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
474{
475 struct counter *counter = args;
476
477 if (ptid_get_pid (entry->id) == counter->pid)
478 {
479 if (++counter->count > 1)
480 return 1;
481 }
482
483 return 0;
484}
485
486static int
487last_thread_of_process_p (struct thread_info *thread)
488{
489 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
490 int pid = ptid_get_pid (ptid);
491 struct counter counter = { pid , 0 };
492
493 return (find_inferior (&all_threads,
494 second_thread_of_pid_p, &counter) == NULL);
495}
496
497/* Kill the inferior lwp. */
498
499static int
500linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
501{
502 struct thread_info *thread = (struct thread_info *) entry;
503 struct lwp_info *lwp = get_thread_lwp (thread);
504 int wstat;
505 int pid = * (int *) args;
506
507 if (ptid_get_pid (entry->id) != pid)
508 return 0;
509
510 /* We avoid killing the first thread here, because of a Linux kernel (at
511 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
512 the children get a chance to be reaped, it will remain a zombie
513 forever. */
514
515 if (last_thread_of_process_p (thread))
516 {
517 if (debug_threads)
518 fprintf (stderr, "lkop: is last of process %s\n",
519 target_pid_to_str (entry->id));
520 return 0;
521 }
522
523 /* If we're killing a running inferior, make sure it is stopped
524 first, as PTRACE_KILL will not work otherwise. */
525 if (!lwp->stopped)
526 send_sigstop (&lwp->head);
527
528 do
529 {
530 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
531
532 /* Make sure it died. The loop is most likely unnecessary. */
533 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
534 } while (pid > 0 && WIFSTOPPED (wstat));
535
536 return 0;
537}
538
539static int
540linux_kill (int pid)
541{
542 struct process_info *process;
543 struct lwp_info *lwp;
544 struct thread_info *thread;
545 int wstat;
546 int lwpid;
547
548 process = find_process_pid (pid);
549 if (process == NULL)
550 return -1;
551
552 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
553
554 /* See the comment in linux_kill_one_lwp. We did not kill the first
555 thread in the list, so do so now. */
556 lwp = find_lwp_pid (pid_to_ptid (pid));
557 thread = get_lwp_thread (lwp);
558
559 if (debug_threads)
560 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
561 lwpid_of (lwp), pid);
562
563 /* If we're killing a running inferior, make sure it is stopped
564 first, as PTRACE_KILL will not work otherwise. */
565 if (!lwp->stopped)
566 send_sigstop (&lwp->head);
567
568 do
569 {
570 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
571
572 /* Make sure it died. The loop is most likely unnecessary. */
573 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
574 } while (lwpid > 0 && WIFSTOPPED (wstat));
575
576 delete_lwp (lwp);
577 linux_remove_process (process);
578 return 0;
579}
580
581static int
582linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
583{
584 struct thread_info *thread = (struct thread_info *) entry;
585 struct lwp_info *lwp = get_thread_lwp (thread);
586 int pid = * (int *) args;
587
588 if (ptid_get_pid (entry->id) != pid)
589 return 0;
590
591 /* If we're detaching from a running inferior, make sure it is
592 stopped first, as PTRACE_DETACH will not work otherwise. */
593 if (!lwp->stopped)
594 {
595 int lwpid = lwpid_of (lwp);
596
597 stopping_threads = 1;
598 send_sigstop (&lwp->head);
599
600 /* If this detects a new thread through a clone event, the new
601 thread is appended to the end of the lwp list, so we'll
602 eventually detach from it. */
603 wait_for_sigstop (&lwp->head);
604 stopping_threads = 0;
605
606 /* If LWP exits while we're trying to stop it, there's nothing
607 left to do. */
608 lwp = find_lwp_pid (pid_to_ptid (lwpid));
609 if (lwp == NULL)
610 return 0;
611 }
612
613 /* Make sure the process isn't stopped at a breakpoint that's
614 no longer there. */
615 check_removed_breakpoint (lwp);
616
617 /* If this process is stopped but is expecting a SIGSTOP, then make
618 sure we take care of that now. This isn't absolutely guaranteed
619 to collect the SIGSTOP, but is fairly likely to. */
620 if (lwp->stop_expected)
621 {
622 int wstat;
623 /* Clear stop_expected, so that the SIGSTOP will be reported. */
624 lwp->stop_expected = 0;
625 if (lwp->stopped)
626 linux_resume_one_lwp (&lwp->head, 0, 0, NULL);
627 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
628 }
629
630 /* Flush any pending changes to the process's registers. */
631 regcache_invalidate_one ((struct inferior_list_entry *)
632 get_lwp_thread (lwp));
633
634 /* Finally, let it resume. */
635 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
636
637 delete_lwp (lwp);
638 return 0;
639}
640
641static int
642any_thread_of (struct inferior_list_entry *entry, void *args)
643{
644 int *pid_p = args;
645
646 if (ptid_get_pid (entry->id) == *pid_p)
647 return 1;
648
649 return 0;
650}
651
652static int
653linux_detach (int pid)
654{
655 struct process_info *process;
656
657 process = find_process_pid (pid);
658 if (process == NULL)
659 return -1;
660
661 current_inferior =
662 (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
663
664 delete_all_breakpoints ();
665 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
666 linux_remove_process (process);
667 return 0;
668}
669
670static void
671linux_join (int pid)
672{
673 int status, ret;
674 struct process_info *process;
675
676 process = find_process_pid (pid);
677 if (process == NULL)
678 return;
679
680 do {
681 ret = my_waitpid (pid, &status, 0);
682 if (WIFEXITED (status) || WIFSIGNALED (status))
683 break;
684 } while (ret != -1 || errno != ECHILD);
685}
686
687/* Return nonzero if the given thread is still alive. */
688static int
689linux_thread_alive (ptid_t ptid)
690{
691 struct lwp_info *lwp = find_lwp_pid (ptid);
692
693 /* We assume we always know if a thread exits. If a whole process
694 exited but we still haven't been able to report it to GDB, we'll
695 hold on to the last lwp of the dead process. */
696 if (lwp != NULL)
697 return !lwp->dead;
698 else
699 return 0;
700}
701
702/* Return nonzero if this process stopped at a breakpoint which
703 no longer appears to be inserted. Also adjust the PC
704 appropriately to resume where the breakpoint used to be. */
705static int
706check_removed_breakpoint (struct lwp_info *event_child)
707{
708 CORE_ADDR stop_pc;
709 struct thread_info *saved_inferior;
710
711 if (event_child->pending_is_breakpoint == 0)
712 return 0;
713
714 if (debug_threads)
715 fprintf (stderr, "Checking for breakpoint in lwp %ld.\n",
716 lwpid_of (event_child));
717
718 saved_inferior = current_inferior;
719 current_inferior = get_lwp_thread (event_child);
720
721 stop_pc = get_stop_pc ();
722
723 /* If the PC has changed since we stopped, then we shouldn't do
724 anything. This happens if, for instance, GDB handled the
725 decr_pc_after_break subtraction itself. */
726 if (stop_pc != event_child->pending_stop_pc)
727 {
728 if (debug_threads)
729 fprintf (stderr, "Ignoring, PC was changed. Old PC was 0x%08llx\n",
730 event_child->pending_stop_pc);
731
732 event_child->pending_is_breakpoint = 0;
733 current_inferior = saved_inferior;
734 return 0;
735 }
736
737 /* If the breakpoint is still there, we will report hitting it. */
738 if ((*the_low_target.breakpoint_at) (stop_pc))
739 {
740 if (debug_threads)
741 fprintf (stderr, "Ignoring, breakpoint is still present.\n");
742 current_inferior = saved_inferior;
743 return 0;
744 }
745
746 if (debug_threads)
747 fprintf (stderr, "Removed breakpoint.\n");
748
749 /* For decr_pc_after_break targets, here is where we perform the
750 decrement. We go immediately from this function to resuming,
751 and can not safely call get_stop_pc () again. */
752 if (the_low_target.set_pc != NULL)
753 (*the_low_target.set_pc) (stop_pc);
754
755 /* We consumed the pending SIGTRAP. */
756 event_child->pending_is_breakpoint = 0;
757 event_child->status_pending_p = 0;
758 event_child->status_pending = 0;
759
760 current_inferior = saved_inferior;
761 return 1;
762}
763
764/* Return 1 if this lwp has an interesting status pending. This
765 function may silently resume an inferior lwp. */
766static int
767status_pending_p (struct inferior_list_entry *entry, void *arg)
768{
769 struct lwp_info *lwp = (struct lwp_info *) entry;
770 ptid_t ptid = * (ptid_t *) arg;
771
772 /* Check if we're only interested in events from a specific process
773 or its lwps. */
774 if (!ptid_equal (minus_one_ptid, ptid)
775 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
776 return 0;
777
778 if (lwp->status_pending_p && !lwp->suspended)
779 if (check_removed_breakpoint (lwp))
780 {
781 /* This thread was stopped at a breakpoint, and the breakpoint
782 is now gone. We were told to continue (or step...) all threads,
783 so GDB isn't trying to single-step past this breakpoint.
784 So instead of reporting the old SIGTRAP, pretend we got to
785 the breakpoint just after it was removed instead of just
786 before; resume the process. */
787 linux_resume_one_lwp (&lwp->head, 0, 0, NULL);
788 return 0;
789 }
790
791 return (lwp->status_pending_p && !lwp->suspended);
792}
793
794static int
795same_lwp (struct inferior_list_entry *entry, void *data)
796{
797 ptid_t ptid = *(ptid_t *) data;
798 int lwp;
799
800 if (ptid_get_lwp (ptid) != 0)
801 lwp = ptid_get_lwp (ptid);
802 else
803 lwp = ptid_get_pid (ptid);
804
805 if (ptid_get_lwp (entry->id) == lwp)
806 return 1;
807
808 return 0;
809}
810
811struct lwp_info *
812find_lwp_pid (ptid_t ptid)
813{
814 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
815}
816
817static struct lwp_info *
818linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
819{
820 int ret;
821 int to_wait_for = -1;
822 struct lwp_info *child = NULL;
823
824 if (debug_threads)
825 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
826
827 if (ptid_equal (ptid, minus_one_ptid))
828 to_wait_for = -1; /* any child */
829 else
830 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
831
832 options |= __WALL;
833
834retry:
835
836 ret = my_waitpid (to_wait_for, wstatp, options);
837 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
838 return NULL;
839 else if (ret == -1)
840 perror_with_name ("waitpid");
841
842 if (debug_threads
843 && (!WIFSTOPPED (*wstatp)
844 || (WSTOPSIG (*wstatp) != 32
845 && WSTOPSIG (*wstatp) != 33)))
846 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
847
848 child = find_lwp_pid (pid_to_ptid (ret));
849
850 /* If we didn't find a process, one of two things presumably happened:
851 - A process we started and then detached from has exited. Ignore it.
852 - A process we are controlling has forked and the new child's stop
853 was reported to us by the kernel. Save its PID. */
854 if (child == NULL && WIFSTOPPED (*wstatp))
855 {
856 add_pid_to_list (&stopped_pids, ret);
857 goto retry;
858 }
859 else if (child == NULL)
860 goto retry;
861
862 child->stopped = 1;
863 child->pending_is_breakpoint = 0;
864
865 child->last_status = *wstatp;
866
867 /* Architecture-specific setup after inferior is running.
868 This needs to happen after we have attached to the inferior
869 and it is stopped for the first time, but before we access
870 any inferior registers. */
871 if (new_inferior)
872 {
873 the_low_target.arch_setup ();
874#ifdef HAVE_LINUX_REGSETS
875 memset (disabled_regsets, 0, num_regsets);
876#endif
877 new_inferior = 0;
878 }
879
880 if (debug_threads
881 && WIFSTOPPED (*wstatp))
882 {
883 struct thread_info *saved_inferior = current_inferior;
884 current_inferior = (struct thread_info *)
885 find_inferior_id (&all_threads, child->head.id);
886 /* For testing only; i386_stop_pc prints out a diagnostic. */
887 if (the_low_target.get_pc != NULL)
888 get_stop_pc ();
889 current_inferior = saved_inferior;
890 }
891
892 return child;
893}
894
895/* Wait for an event from child PID. If PID is -1, wait for any
896 child. Store the stop status through the status pointer WSTAT.
897 OPTIONS is passed to the waitpid call. Return 0 if no child stop
898 event was found and OPTIONS contains WNOHANG. Return the PID of
899 the stopped child otherwise. */
900
901static int
902linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
903{
904 CORE_ADDR stop_pc;
905 struct lwp_info *event_child = NULL;
906 int bp_status;
907 struct lwp_info *requested_child = NULL;
908
909 /* Check for a lwp with a pending status. */
910 /* It is possible that the user changed the pending task's registers since
911 it stopped. We correctly handle the change of PC if we hit a breakpoint
912 (in check_removed_breakpoint); signals should be reported anyway. */
913
914 if (ptid_equal (ptid, minus_one_ptid)
915 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
916 {
917 event_child = (struct lwp_info *)
918 find_inferior (&all_lwps, status_pending_p, &ptid);
919 if (debug_threads && event_child)
920 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
921 }
922 else
923 {
924 requested_child = find_lwp_pid (ptid);
925 if (requested_child->status_pending_p
926 && !check_removed_breakpoint (requested_child))
927 event_child = requested_child;
928 }
929
930 if (event_child != NULL)
931 {
932 if (debug_threads)
933 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
934 lwpid_of (event_child), event_child->status_pending);
935 *wstat = event_child->status_pending;
936 event_child->status_pending_p = 0;
937 event_child->status_pending = 0;
938 current_inferior = get_lwp_thread (event_child);
939 return lwpid_of (event_child);
940 }
941
942 /* We only enter this loop if no process has a pending wait status. Thus
943 any action taken in response to a wait status inside this loop is
944 responding as soon as we detect the status, not after any pending
945 events. */
946 while (1)
947 {
948 event_child = linux_wait_for_lwp (ptid, wstat, options);
949
950 if ((options & WNOHANG) && event_child == NULL)
951 return 0;
952
953 if (event_child == NULL)
954 error ("event from unknown child");
955
956 current_inferior = get_lwp_thread (event_child);
957
958 /* Check for thread exit. */
959 if (! WIFSTOPPED (*wstat))
960 {
961 if (debug_threads)
962 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
963
964 /* If the last thread is exiting, just return. */
965 if (last_thread_of_process_p (current_inferior))
966 {
967 if (debug_threads)
968 fprintf (stderr, "LWP %ld is last lwp of process\n",
969 lwpid_of (event_child));
970 return lwpid_of (event_child);
971 }
972
973 delete_lwp (event_child);
974
975 if (!non_stop)
976 {
977 current_inferior = (struct thread_info *) all_threads.head;
978 if (debug_threads)
979 fprintf (stderr, "Current inferior is now %ld\n",
980 lwpid_of (get_thread_lwp (current_inferior)));
981 }
982 else
983 {
984 current_inferior = NULL;
985 if (debug_threads)
986 fprintf (stderr, "Current inferior is now <NULL>\n");
987 }
988
989 /* If we were waiting for this particular child to do something...
990 well, it did something. */
991 if (requested_child != NULL)
992 return lwpid_of (event_child);
993
994 /* Wait for a more interesting event. */
995 continue;
996 }
997
998 if (event_child->must_set_ptrace_flags)
999 {
1000 ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child),
1001 0, PTRACE_O_TRACECLONE);
1002 event_child->must_set_ptrace_flags = 0;
1003 }
1004
1005 if (WIFSTOPPED (*wstat)
1006 && WSTOPSIG (*wstat) == SIGSTOP
1007 && event_child->stop_expected)
1008 {
1009 if (debug_threads)
1010 fprintf (stderr, "Expected stop.\n");
1011 event_child->stop_expected = 0;
1012 linux_resume_one_lwp (&event_child->head,
1013 event_child->stepping, 0, NULL);
1014 continue;
1015 }
1016
1017 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1018 && *wstat >> 16 != 0)
1019 {
1020 handle_extended_wait (event_child, *wstat);
1021 continue;
1022 }
1023
1024 /* If GDB is not interested in this signal, don't stop other
1025 threads, and don't report it to GDB. Just resume the
1026 inferior right away. We do this for threading-related
1027 signals as well as any that GDB specifically requested we
1028 ignore. But never ignore SIGSTOP if we sent it ourselves,
1029 and do not ignore signals when stepping - they may require
1030 special handling to skip the signal handler. */
1031 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1032 thread library? */
1033 if (WIFSTOPPED (*wstat)
1034 && !event_child->stepping
1035 && (
1036#ifdef USE_THREAD_DB
1037 (current_process ()->private->thread_db_active
1038 && (WSTOPSIG (*wstat) == __SIGRTMIN
1039 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
1040 ||
1041#endif
1042 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
1043 && (WSTOPSIG (*wstat) != SIGSTOP || !stopping_threads))))
1044 {
1045 siginfo_t info, *info_p;
1046
1047 if (debug_threads)
1048 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1049 WSTOPSIG (*wstat), lwpid_of (event_child));
1050
1051 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1052 info_p = &info;
1053 else
1054 info_p = NULL;
1055 linux_resume_one_lwp (&event_child->head,
1056 event_child->stepping,
1057 WSTOPSIG (*wstat), info_p);
1058 continue;
1059 }
1060
1061 /* If this event was not handled above, and is not a SIGTRAP, report
1062 it. */
1063 if (!WIFSTOPPED (*wstat) || WSTOPSIG (*wstat) != SIGTRAP)
1064 return lwpid_of (event_child);
1065
1066 /* If this target does not support breakpoints, we simply report the
1067 SIGTRAP; it's of no concern to us. */
1068 if (the_low_target.get_pc == NULL)
1069 return lwpid_of (event_child);
1070
1071 stop_pc = get_stop_pc ();
1072
1073 /* bp_reinsert will only be set if we were single-stepping.
1074 Notice that we will resume the process after hitting
1075 a gdbserver breakpoint; single-stepping to/over one
1076 is not supported (yet). */
1077 if (event_child->bp_reinsert != 0)
1078 {
1079 if (debug_threads)
1080 fprintf (stderr, "Reinserted breakpoint.\n");
1081 reinsert_breakpoint (event_child->bp_reinsert);
1082 event_child->bp_reinsert = 0;
1083
1084 /* Clear the single-stepping flag and SIGTRAP as we resume. */
1085 linux_resume_one_lwp (&event_child->head, 0, 0, NULL);
1086 continue;
1087 }
1088
1089 bp_status = check_breakpoints (stop_pc);
1090
1091 if (bp_status != 0)
1092 {
1093 if (debug_threads)
1094 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1095
1096 /* We hit one of our own breakpoints. We mark it as a pending
1097 breakpoint, so that check_removed_breakpoint () will do the PC
1098 adjustment for us at the appropriate time. */
1099 event_child->pending_is_breakpoint = 1;
1100 event_child->pending_stop_pc = stop_pc;
1101
1102 /* We may need to put the breakpoint back. We continue in the event
1103 loop instead of simply replacing the breakpoint right away,
1104 in order to not lose signals sent to the thread that hit the
1105 breakpoint. Unfortunately this increases the window where another
1106 thread could sneak past the removed breakpoint. For the current
1107 use of server-side breakpoints (thread creation) this is
1108 acceptable; but it needs to be considered before this breakpoint
1109 mechanism can be used in more general ways. For some breakpoints
1110 it may be necessary to stop all other threads, but that should
1111 be avoided where possible.
1112
1113 If breakpoint_reinsert_addr is NULL, that means that we can
1114 use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint,
1115 mark it for reinsertion, and single-step.
1116
1117 Otherwise, call the target function to figure out where we need
1118 our temporary breakpoint, create it, and continue executing this
1119 process. */
1120
1121 /* NOTE: we're lifting breakpoints in non-stop mode. This
1122 is currently only used for thread event breakpoints, so
1123 it isn't that bad as long as we have PTRACE_EVENT_CLONE
1124 events. */
1125 if (bp_status == 2)
1126 /* No need to reinsert. */
1127 linux_resume_one_lwp (&event_child->head, 0, 0, NULL);
1128 else if (the_low_target.breakpoint_reinsert_addr == NULL)
1129 {
1130 event_child->bp_reinsert = stop_pc;
1131 uninsert_breakpoint (stop_pc);
1132 linux_resume_one_lwp (&event_child->head, 1, 0, NULL);
1133 }
1134 else
1135 {
1136 reinsert_breakpoint_by_bp
1137 (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ());
1138 linux_resume_one_lwp (&event_child->head, 0, 0, NULL);
1139 }
1140
1141 continue;
1142 }
1143
1144 if (debug_threads)
1145 fprintf (stderr, "Hit a non-gdbserver breakpoint.\n");
1146
1147 /* If we were single-stepping, we definitely want to report the
1148 SIGTRAP. Although the single-step operation has completed,
1149 do not clear clear the stepping flag yet; we need to check it
1150 in wait_for_sigstop. */
1151 if (event_child->stepping)
1152 return lwpid_of (event_child);
1153
1154 /* A SIGTRAP that we can't explain. It may have been a breakpoint.
1155 Check if it is a breakpoint, and if so mark the process information
1156 accordingly. This will handle both the necessary fiddling with the
1157 PC on decr_pc_after_break targets and suppressing extra threads
1158 hitting a breakpoint if two hit it at once and then GDB removes it
1159 after the first is reported. Arguably it would be better to report
1160 multiple threads hitting breakpoints simultaneously, but the current
1161 remote protocol does not allow this. */
1162 if ((*the_low_target.breakpoint_at) (stop_pc))
1163 {
1164 event_child->pending_is_breakpoint = 1;
1165 event_child->pending_stop_pc = stop_pc;
1166 }
1167
1168 return lwpid_of (event_child);
1169 }
1170
1171 /* NOTREACHED */
1172 return 0;
1173}
1174
1175static int
1176linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1177{
1178 ptid_t wait_ptid;
1179
1180 if (ptid_is_pid (ptid))
1181 {
1182 /* A request to wait for a specific tgid. This is not possible
1183 with waitpid, so instead, we wait for any child, and leave
1184 children we're not interested in right now with a pending
1185 status to report later. */
1186 wait_ptid = minus_one_ptid;
1187 }
1188 else
1189 wait_ptid = ptid;
1190
1191 while (1)
1192 {
1193 int event_pid;
1194
1195 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1196
1197 if (event_pid > 0
1198 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1199 {
1200 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1201
1202 if (! WIFSTOPPED (*wstat))
1203 mark_lwp_dead (event_child, *wstat);
1204 else
1205 {
1206 event_child->status_pending_p = 1;
1207 event_child->status_pending = *wstat;
1208 }
1209 }
1210 else
1211 return event_pid;
1212 }
1213}
1214
1215/* Wait for process, returns status. */
1216
1217static ptid_t
1218linux_wait_1 (ptid_t ptid,
1219 struct target_waitstatus *ourstatus, int target_options)
1220{
1221 int w;
1222 struct thread_info *thread = NULL;
1223 struct lwp_info *lwp = NULL;
1224 int options;
1225 int pid;
1226
1227 /* Translate generic target options into linux options. */
1228 options = __WALL;
1229 if (target_options & TARGET_WNOHANG)
1230 options |= WNOHANG;
1231
1232retry:
1233 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1234
1235 /* If we were only supposed to resume one thread, only wait for
1236 that thread - if it's still alive. If it died, however - which
1237 can happen if we're coming from the thread death case below -
1238 then we need to make sure we restart the other threads. We could
1239 pick a thread at random or restart all; restarting all is less
1240 arbitrary. */
1241 if (!non_stop
1242 && !ptid_equal (cont_thread, null_ptid)
1243 && !ptid_equal (cont_thread, minus_one_ptid))
1244 {
1245 thread = (struct thread_info *) find_inferior_id (&all_threads,
1246 cont_thread);
1247
1248 /* No stepping, no signal - unless one is pending already, of course. */
1249 if (thread == NULL)
1250 {
1251 struct thread_resume resume_info;
1252 resume_info.thread = minus_one_ptid;
1253 resume_info.kind = resume_continue;
1254 resume_info.sig = 0;
1255 linux_resume (&resume_info, 1);
1256 }
1257 else
1258 ptid = cont_thread;
1259 }
1260
1261 pid = linux_wait_for_event (ptid, &w, options);
1262 if (pid == 0) /* only if TARGET_WNOHANG */
1263 return null_ptid;
1264
1265 lwp = get_thread_lwp (current_inferior);
1266
1267 /* If we are waiting for a particular child, and it exited,
1268 linux_wait_for_event will return its exit status. Similarly if
1269 the last child exited. If this is not the last child, however,
1270 do not report it as exited until there is a 'thread exited' response
1271 available in the remote protocol. Instead, just wait for another event.
1272 This should be safe, because if the thread crashed we will already
1273 have reported the termination signal to GDB; that should stop any
1274 in-progress stepping operations, etc.
1275
1276 Report the exit status of the last thread to exit. This matches
1277 LinuxThreads' behavior. */
1278
1279 if (last_thread_of_process_p (current_inferior))
1280 {
1281 if (WIFEXITED (w) || WIFSIGNALED (w))
1282 {
1283 int pid = pid_of (lwp);
1284 struct process_info *process = find_process_pid (pid);
1285
1286 delete_lwp (lwp);
1287 linux_remove_process (process);
1288
1289 current_inferior = NULL;
1290
1291 if (WIFEXITED (w))
1292 {
1293 ourstatus->kind = TARGET_WAITKIND_EXITED;
1294 ourstatus->value.integer = WEXITSTATUS (w);
1295
1296 if (debug_threads)
1297 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1298 }
1299 else
1300 {
1301 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1302 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1303
1304 if (debug_threads)
1305 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1306
1307 }
1308
1309 return pid_to_ptid (pid);
1310 }
1311 }
1312 else
1313 {
1314 if (!WIFSTOPPED (w))
1315 goto retry;
1316 }
1317
1318 /* In all-stop, stop all threads. Be careful to only do this if
1319 we're about to report an event to GDB. */
1320 if (!non_stop)
1321 stop_all_lwps ();
1322
1323 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1324
1325 if (lwp->suspended && WSTOPSIG (w) == SIGSTOP)
1326 {
1327 /* A thread that has been requested to stop by GDB with vCont;t,
1328 and it stopped cleanly, so report as SIG0. The use of
1329 SIGSTOP is an implementation detail. */
1330 ourstatus->value.sig = TARGET_SIGNAL_0;
1331 }
1332 else if (lwp->suspended && WSTOPSIG (w) != SIGSTOP)
1333 {
1334 /* A thread that has been requested to stop by GDB with vCont;t,
1335 but, it stopped for other reasons. Set stop_expected so the
1336 pending SIGSTOP is ignored and the LWP is resumed. */
1337 lwp->stop_expected = 1;
1338 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1339 }
1340 else
1341 {
1342 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1343 }
1344
1345 if (debug_threads)
1346 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1347 target_pid_to_str (lwp->head.id),
1348 ourstatus->kind,
1349 ourstatus->value.sig);
1350
1351 return lwp->head.id;
1352}
1353
1354/* Get rid of any pending event in the pipe. */
1355static void
1356async_file_flush (void)
1357{
1358 int ret;
1359 char buf;
1360
1361 do
1362 ret = read (linux_event_pipe[0], &buf, 1);
1363 while (ret >= 0 || (ret == -1 && errno == EINTR));
1364}
1365
1366/* Put something in the pipe, so the event loop wakes up. */
1367static void
1368async_file_mark (void)
1369{
1370 int ret;
1371
1372 async_file_flush ();
1373
1374 do
1375 ret = write (linux_event_pipe[1], "+", 1);
1376 while (ret == 0 || (ret == -1 && errno == EINTR));
1377
1378 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1379 be awakened anyway. */
1380}
1381
1382static ptid_t
1383linux_wait (ptid_t ptid,
1384 struct target_waitstatus *ourstatus, int target_options)
1385{
1386 ptid_t event_ptid;
1387
1388 if (debug_threads)
1389 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
1390
1391 /* Flush the async file first. */
1392 if (target_is_async_p ())
1393 async_file_flush ();
1394
1395 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
1396
1397 /* If at least one stop was reported, there may be more. A single
1398 SIGCHLD can signal more than one child stop. */
1399 if (target_is_async_p ()
1400 && (target_options & TARGET_WNOHANG) != 0
1401 && !ptid_equal (event_ptid, null_ptid))
1402 async_file_mark ();
1403
1404 return event_ptid;
1405}
1406
1407/* Send a signal to an LWP. For LinuxThreads, kill is enough; however, if
1408 thread groups are in use, we need to use tkill. */
1409
1410static int
1411kill_lwp (unsigned long lwpid, int signo)
1412{
1413 static int tkill_failed;
1414
1415 errno = 0;
1416
1417#ifdef SYS_tkill
1418 if (!tkill_failed)
1419 {
1420 int ret = syscall (SYS_tkill, lwpid, signo);
1421 if (errno != ENOSYS)
1422 return ret;
1423 errno = 0;
1424 tkill_failed = 1;
1425 }
1426#endif
1427
1428 return kill (lwpid, signo);
1429}
1430
1431static void
1432send_sigstop (struct inferior_list_entry *entry)
1433{
1434 struct lwp_info *lwp = (struct lwp_info *) entry;
1435 int pid;
1436
1437 if (lwp->stopped)
1438 return;
1439
1440 pid = lwpid_of (lwp);
1441
1442 /* If we already have a pending stop signal for this process, don't
1443 send another. */
1444 if (lwp->stop_expected)
1445 {
1446 if (debug_threads)
1447 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
1448
1449 /* We clear the stop_expected flag so that wait_for_sigstop
1450 will receive the SIGSTOP event (instead of silently resuming and
1451 waiting again). It'll be reset below. */
1452 lwp->stop_expected = 0;
1453 return;
1454 }
1455
1456 if (debug_threads)
1457 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
1458
1459 kill_lwp (pid, SIGSTOP);
1460}
1461
1462static void
1463mark_lwp_dead (struct lwp_info *lwp, int wstat)
1464{
1465 /* It's dead, really. */
1466 lwp->dead = 1;
1467
1468 /* Store the exit status for later. */
1469 lwp->status_pending_p = 1;
1470 lwp->status_pending = wstat;
1471
1472 /* So that check_removed_breakpoint doesn't try to figure out if
1473 this is stopped at a breakpoint. */
1474 lwp->pending_is_breakpoint = 0;
1475
1476 /* Prevent trying to stop it. */
1477 lwp->stopped = 1;
1478
1479 /* No further stops are expected from a dead lwp. */
1480 lwp->stop_expected = 0;
1481}
1482
1483static void
1484wait_for_sigstop (struct inferior_list_entry *entry)
1485{
1486 struct lwp_info *lwp = (struct lwp_info *) entry;
1487 struct thread_info *saved_inferior;
1488 int wstat;
1489 ptid_t saved_tid;
1490 ptid_t ptid;
1491
1492 if (lwp->stopped)
1493 return;
1494
1495 saved_inferior = current_inferior;
1496 if (saved_inferior != NULL)
1497 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
1498 else
1499 saved_tid = null_ptid; /* avoid bogus unused warning */
1500
1501 ptid = lwp->head.id;
1502
1503 linux_wait_for_event (ptid, &wstat, __WALL);
1504
1505 /* If we stopped with a non-SIGSTOP signal, save it for later
1506 and record the pending SIGSTOP. If the process exited, just
1507 return. */
1508 if (WIFSTOPPED (wstat)
1509 && WSTOPSIG (wstat) != SIGSTOP)
1510 {
1511 if (debug_threads)
1512 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
1513 lwpid_of (lwp), wstat);
1514
1515 /* Do not leave a pending single-step finish to be reported to
1516 the client. The client will give us a new action for this
1517 thread, possibly a continue request --- otherwise, the client
1518 would consider this pending SIGTRAP reported later a spurious
1519 signal. */
1520 if (WSTOPSIG (wstat) == SIGTRAP
1521 && lwp->stepping
1522 && !linux_stopped_by_watchpoint ())
1523 {
1524 if (debug_threads)
1525 fprintf (stderr, " single-step SIGTRAP ignored\n");
1526 }
1527 else
1528 {
1529 lwp->status_pending_p = 1;
1530 lwp->status_pending = wstat;
1531 }
1532 lwp->stop_expected = 1;
1533 }
1534 else if (!WIFSTOPPED (wstat))
1535 {
1536 if (debug_threads)
1537 fprintf (stderr, "Process %ld exited while stopping LWPs\n",
1538 lwpid_of (lwp));
1539
1540 /* Leave this status pending for the next time we're able to
1541 report it. In the mean time, we'll report this lwp as dead
1542 to GDB, so GDB doesn't try to read registers and memory from
1543 it. */
1544 mark_lwp_dead (lwp, wstat);
1545 }
1546
1547 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
1548 current_inferior = saved_inferior;
1549 else
1550 {
1551 if (debug_threads)
1552 fprintf (stderr, "Previously current thread died.\n");
1553
1554 if (non_stop)
1555 {
1556 /* We can't change the current inferior behind GDB's back,
1557 otherwise, a subsequent command may apply to the wrong
1558 process. */
1559 current_inferior = NULL;
1560 }
1561 else
1562 {
1563 /* Set a valid thread as current. */
1564 set_desired_inferior (0);
1565 }
1566 }
1567}
1568
1569static void
1570stop_all_lwps (void)
1571{
1572 stopping_threads = 1;
1573 for_each_inferior (&all_lwps, send_sigstop);
1574 for_each_inferior (&all_lwps, wait_for_sigstop);
1575 stopping_threads = 0;
1576}
1577
1578/* Resume execution of the inferior process.
1579 If STEP is nonzero, single-step it.
1580 If SIGNAL is nonzero, give it that signal. */
1581
1582static void
1583linux_resume_one_lwp (struct inferior_list_entry *entry,
1584 int step, int signal, siginfo_t *info)
1585{
1586 struct lwp_info *lwp = (struct lwp_info *) entry;
1587 struct thread_info *saved_inferior;
1588
1589 if (lwp->stopped == 0)
1590 return;
1591
1592 /* If we have pending signals or status, and a new signal, enqueue the
1593 signal. Also enqueue the signal if we are waiting to reinsert a
1594 breakpoint; it will be picked up again below. */
1595 if (signal != 0
1596 && (lwp->status_pending_p || lwp->pending_signals != NULL
1597 || lwp->bp_reinsert != 0))
1598 {
1599 struct pending_signals *p_sig;
1600 p_sig = xmalloc (sizeof (*p_sig));
1601 p_sig->prev = lwp->pending_signals;
1602 p_sig->signal = signal;
1603 if (info == NULL)
1604 memset (&p_sig->info, 0, sizeof (siginfo_t));
1605 else
1606 memcpy (&p_sig->info, info, sizeof (siginfo_t));
1607 lwp->pending_signals = p_sig;
1608 }
1609
1610 if (lwp->status_pending_p && !check_removed_breakpoint (lwp))
1611 return;
1612
1613 saved_inferior = current_inferior;
1614 current_inferior = get_lwp_thread (lwp);
1615
1616 if (debug_threads)
1617 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
1618 lwpid_of (lwp), step ? "step" : "continue", signal,
1619 lwp->stop_expected ? "expected" : "not expected");
1620
1621 /* This bit needs some thinking about. If we get a signal that
1622 we must report while a single-step reinsert is still pending,
1623 we often end up resuming the thread. It might be better to
1624 (ew) allow a stack of pending events; then we could be sure that
1625 the reinsert happened right away and not lose any signals.
1626
1627 Making this stack would also shrink the window in which breakpoints are
1628 uninserted (see comment in linux_wait_for_lwp) but not enough for
1629 complete correctness, so it won't solve that problem. It may be
1630 worthwhile just to solve this one, however. */
1631 if (lwp->bp_reinsert != 0)
1632 {
1633 if (debug_threads)
1634 fprintf (stderr, " pending reinsert at %08lx", (long)lwp->bp_reinsert);
1635 if (step == 0)
1636 fprintf (stderr, "BAD - reinserting but not stepping.\n");
1637 step = 1;
1638
1639 /* Postpone any pending signal. It was enqueued above. */
1640 signal = 0;
1641 }
1642
1643 check_removed_breakpoint (lwp);
1644
1645 if (debug_threads && the_low_target.get_pc != NULL)
1646 {
1647 fprintf (stderr, " ");
1648 (*the_low_target.get_pc) ();
1649 }
1650
1651 /* If we have pending signals, consume one unless we are trying to reinsert
1652 a breakpoint. */
1653 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
1654 {
1655 struct pending_signals **p_sig;
1656
1657 p_sig = &lwp->pending_signals;
1658 while ((*p_sig)->prev != NULL)
1659 p_sig = &(*p_sig)->prev;
1660
1661 signal = (*p_sig)->signal;
1662 if ((*p_sig)->info.si_signo != 0)
1663 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1664
1665 free (*p_sig);
1666 *p_sig = NULL;
1667 }
1668
1669 regcache_invalidate_one ((struct inferior_list_entry *)
1670 get_lwp_thread (lwp));
1671 errno = 0;
1672 lwp->stopped = 0;
1673 lwp->stepping = step;
1674 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0, signal);
1675
1676 current_inferior = saved_inferior;
1677 if (errno)
1678 {
1679 /* ESRCH from ptrace either means that the thread was already
1680 running (an error) or that it is gone (a race condition). If
1681 it's gone, we will get a notification the next time we wait,
1682 so we can ignore the error. We could differentiate these
1683 two, but it's tricky without waiting; the thread still exists
1684 as a zombie, so sending it signal 0 would succeed. So just
1685 ignore ESRCH. */
1686 if (errno == ESRCH)
1687 return;
1688
1689 perror_with_name ("ptrace");
1690 }
1691}
1692
1693struct thread_resume_array
1694{
1695 struct thread_resume *resume;
1696 size_t n;
1697};
1698
1699/* This function is called once per thread. We look up the thread
1700 in RESUME_PTR, and mark the thread with a pointer to the appropriate
1701 resume request.
1702
1703 This algorithm is O(threads * resume elements), but resume elements
1704 is small (and will remain small at least until GDB supports thread
1705 suspension). */
1706static int
1707linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
1708{
1709 struct lwp_info *lwp;
1710 struct thread_info *thread;
1711 int ndx;
1712 struct thread_resume_array *r;
1713
1714 thread = (struct thread_info *) entry;
1715 lwp = get_thread_lwp (thread);
1716 r = arg;
1717
1718 for (ndx = 0; ndx < r->n; ndx++)
1719 {
1720 ptid_t ptid = r->resume[ndx].thread;
1721 if (ptid_equal (ptid, minus_one_ptid)
1722 || ptid_equal (ptid, entry->id)
1723 || (ptid_is_pid (ptid)
1724 && (ptid_get_pid (ptid) == pid_of (lwp)))
1725 || (ptid_get_lwp (ptid) == -1
1726 && (ptid_get_pid (ptid) == pid_of (lwp))))
1727 {
1728 lwp->resume = &r->resume[ndx];
1729 return 0;
1730 }
1731 }
1732
1733 /* No resume action for this thread. */
1734 lwp->resume = NULL;
1735
1736 return 0;
1737}
1738
1739
1740/* Set *FLAG_P if this lwp has an interesting status pending. */
1741static int
1742resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
1743{
1744 struct lwp_info *lwp = (struct lwp_info *) entry;
1745
1746 /* LWPs which will not be resumed are not interesting, because
1747 we might not wait for them next time through linux_wait. */
1748 if (lwp->resume == NULL)
1749 return 0;
1750
1751 /* If this thread has a removed breakpoint, we won't have any
1752 events to report later, so check now. check_removed_breakpoint
1753 may clear status_pending_p. We avoid calling check_removed_breakpoint
1754 for any thread that we are not otherwise going to resume - this
1755 lets us preserve stopped status when two threads hit a breakpoint.
1756 GDB removes the breakpoint to single-step a particular thread
1757 past it, then re-inserts it and resumes all threads. We want
1758 to report the second thread without resuming it in the interim. */
1759 if (lwp->status_pending_p)
1760 check_removed_breakpoint (lwp);
1761
1762 if (lwp->status_pending_p)
1763 * (int *) flag_p = 1;
1764
1765 return 0;
1766}
1767
1768/* This function is called once per thread. We check the thread's resume
1769 request, which will tell us whether to resume, step, or leave the thread
1770 stopped; and what signal, if any, it should be sent.
1771
1772 For threads which we aren't explicitly told otherwise, we preserve
1773 the stepping flag; this is used for stepping over gdbserver-placed
1774 breakpoints.
1775
1776 If pending_flags was set in any thread, we queue any needed
1777 signals, since we won't actually resume. We already have a pending
1778 event to report, so we don't need to preserve any step requests;
1779 they should be re-issued if necessary. */
1780
1781static int
1782linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
1783{
1784 struct lwp_info *lwp;
1785 struct thread_info *thread;
1786 int step;
1787 int pending_flag = * (int *) arg;
1788
1789 thread = (struct thread_info *) entry;
1790 lwp = get_thread_lwp (thread);
1791
1792 if (lwp->resume == NULL)
1793 return 0;
1794
1795 if (lwp->resume->kind == resume_stop)
1796 {
1797 if (debug_threads)
1798 fprintf (stderr, "suspending LWP %ld\n", lwpid_of (lwp));
1799
1800 if (!lwp->stopped)
1801 {
1802 if (debug_threads)
1803 fprintf (stderr, "running -> suspending LWP %ld\n", lwpid_of (lwp));
1804
1805 lwp->suspended = 1;
1806 send_sigstop (&lwp->head);
1807 }
1808 else
1809 {
1810 if (debug_threads)
1811 {
1812 if (lwp->suspended)
1813 fprintf (stderr, "already stopped/suspended LWP %ld\n",
1814 lwpid_of (lwp));
1815 else
1816 fprintf (stderr, "already stopped/not suspended LWP %ld\n",
1817 lwpid_of (lwp));
1818 }
1819
1820 /* Make sure we leave the LWP suspended, so we don't try to
1821 resume it without GDB telling us to. FIXME: The LWP may
1822 have been stopped in an internal event that was not meant
1823 to be notified back to GDB (e.g., gdbserver breakpoint),
1824 so we should be reporting a stop event in that case
1825 too. */
1826 lwp->suspended = 1;
1827 }
1828
1829 /* For stop requests, we're done. */
1830 lwp->resume = NULL;
1831 return 0;
1832 }
1833 else
1834 lwp->suspended = 0;
1835
1836 /* If this thread which is about to be resumed has a pending status,
1837 then don't resume any threads - we can just report the pending
1838 status. Make sure to queue any signals that would otherwise be
1839 sent. In all-stop mode, we do this decision based on if *any*
1840 thread has a pending status. */
1841 if (non_stop)
1842 resume_status_pending_p (&lwp->head, &pending_flag);
1843
1844 if (!pending_flag)
1845 {
1846 if (debug_threads)
1847 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
1848
1849 if (ptid_equal (lwp->resume->thread, minus_one_ptid)
1850 && lwp->stepping
1851 && lwp->pending_is_breakpoint)
1852 step = 1;
1853 else
1854 step = (lwp->resume->kind == resume_step);
1855
1856 linux_resume_one_lwp (&lwp->head, step, lwp->resume->sig, NULL);
1857 }
1858 else
1859 {
1860 if (debug_threads)
1861 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
1862
1863 /* If we have a new signal, enqueue the signal. */
1864 if (lwp->resume->sig != 0)
1865 {
1866 struct pending_signals *p_sig;
1867 p_sig = xmalloc (sizeof (*p_sig));
1868 p_sig->prev = lwp->pending_signals;
1869 p_sig->signal = lwp->resume->sig;
1870 memset (&p_sig->info, 0, sizeof (siginfo_t));
1871
1872 /* If this is the same signal we were previously stopped by,
1873 make sure to queue its siginfo. We can ignore the return
1874 value of ptrace; if it fails, we'll skip
1875 PTRACE_SETSIGINFO. */
1876 if (WIFSTOPPED (lwp->last_status)
1877 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
1878 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1879
1880 lwp->pending_signals = p_sig;
1881 }
1882 }
1883
1884 lwp->resume = NULL;
1885 return 0;
1886}
1887
1888static void
1889linux_resume (struct thread_resume *resume_info, size_t n)
1890{
1891 int pending_flag;
1892 struct thread_resume_array array = { resume_info, n };
1893
1894 find_inferior (&all_threads, linux_set_resume_request, &array);
1895
1896 /* If there is a thread which would otherwise be resumed, which
1897 has a pending status, then don't resume any threads - we can just
1898 report the pending status. Make sure to queue any signals
1899 that would otherwise be sent. In non-stop mode, we'll apply this
1900 logic to each thread individually. */
1901 pending_flag = 0;
1902 if (!non_stop)
1903 find_inferior (&all_lwps, resume_status_pending_p, &pending_flag);
1904
1905 if (debug_threads)
1906 {
1907 if (pending_flag)
1908 fprintf (stderr, "Not resuming, pending status\n");
1909 else
1910 fprintf (stderr, "Resuming, no pending status\n");
1911 }
1912
1913 find_inferior (&all_threads, linux_resume_one_thread, &pending_flag);
1914}
1915
1916#ifdef HAVE_LINUX_USRREGS
1917
1918int
1919register_addr (int regnum)
1920{
1921 int addr;
1922
1923 if (regnum < 0 || regnum >= the_low_target.num_regs)
1924 error ("Invalid register number %d.", regnum);
1925
1926 addr = the_low_target.regmap[regnum];
1927
1928 return addr;
1929}
1930
1931/* Fetch one register. */
1932static void
1933fetch_register (int regno)
1934{
1935 CORE_ADDR regaddr;
1936 int i, size;
1937 char *buf;
1938 int pid;
1939
1940 if (regno >= the_low_target.num_regs)
1941 return;
1942 if ((*the_low_target.cannot_fetch_register) (regno))
1943 return;
1944
1945 regaddr = register_addr (regno);
1946 if (regaddr == -1)
1947 return;
1948
1949 pid = lwpid_of (get_thread_lwp (current_inferior));
1950 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1951 & - sizeof (PTRACE_XFER_TYPE));
1952 buf = alloca (size);
1953 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1954 {
1955 errno = 0;
1956 *(PTRACE_XFER_TYPE *) (buf + i) =
1957 ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) regaddr, 0);
1958 regaddr += sizeof (PTRACE_XFER_TYPE);
1959 if (errno != 0)
1960 {
1961 /* Warning, not error, in case we are attached; sometimes the
1962 kernel doesn't let us at the registers. */
1963 char *err = strerror (errno);
1964 char *msg = alloca (strlen (err) + 128);
1965 sprintf (msg, "reading register %d: %s", regno, err);
1966 error (msg);
1967 goto error_exit;
1968 }
1969 }
1970
1971 if (the_low_target.supply_ptrace_register)
1972 the_low_target.supply_ptrace_register (regno, buf);
1973 else
1974 supply_register (regno, buf);
1975
1976error_exit:;
1977}
1978
1979/* Fetch all registers, or just one, from the child process. */
1980static void
1981usr_fetch_inferior_registers (int regno)
1982{
1983 if (regno == -1 || regno == 0)
1984 for (regno = 0; regno < the_low_target.num_regs; regno++)
1985 fetch_register (regno);
1986 else
1987 fetch_register (regno);
1988}
1989
1990/* Store our register values back into the inferior.
1991 If REGNO is -1, do this for all registers.
1992 Otherwise, REGNO specifies which register (so we can save time). */
1993static void
1994usr_store_inferior_registers (int regno)
1995{
1996 CORE_ADDR regaddr;
1997 int i, size;
1998 char *buf;
1999 int pid;
2000
2001 if (regno >= 0)
2002 {
2003 if (regno >= the_low_target.num_regs)
2004 return;
2005
2006 if ((*the_low_target.cannot_store_register) (regno) == 1)
2007 return;
2008
2009 regaddr = register_addr (regno);
2010 if (regaddr == -1)
2011 return;
2012 errno = 0;
2013 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2014 & - sizeof (PTRACE_XFER_TYPE);
2015 buf = alloca (size);
2016 memset (buf, 0, size);
2017
2018 if (the_low_target.collect_ptrace_register)
2019 the_low_target.collect_ptrace_register (regno, buf);
2020 else
2021 collect_register (regno, buf);
2022
2023 pid = lwpid_of (get_thread_lwp (current_inferior));
2024 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2025 {
2026 errno = 0;
2027 ptrace (PTRACE_POKEUSER, pid, (PTRACE_ARG3_TYPE) regaddr,
2028 *(PTRACE_XFER_TYPE *) (buf + i));
2029 if (errno != 0)
2030 {
2031 /* At this point, ESRCH should mean the process is
2032 already gone, in which case we simply ignore attempts
2033 to change its registers. See also the related
2034 comment in linux_resume_one_lwp. */
2035 if (errno == ESRCH)
2036 return;
2037
2038 if ((*the_low_target.cannot_store_register) (regno) == 0)
2039 {
2040 char *err = strerror (errno);
2041 char *msg = alloca (strlen (err) + 128);
2042 sprintf (msg, "writing register %d: %s",
2043 regno, err);
2044 error (msg);
2045 return;
2046 }
2047 }
2048 regaddr += sizeof (PTRACE_XFER_TYPE);
2049 }
2050 }
2051 else
2052 for (regno = 0; regno < the_low_target.num_regs; regno++)
2053 usr_store_inferior_registers (regno);
2054}
2055#endif /* HAVE_LINUX_USRREGS */
2056
2057
2058
2059#ifdef HAVE_LINUX_REGSETS
2060
2061static int
2062regsets_fetch_inferior_registers ()
2063{
2064 struct regset_info *regset;
2065 int saw_general_regs = 0;
2066 int pid;
2067
2068 regset = target_regsets;
2069
2070 pid = lwpid_of (get_thread_lwp (current_inferior));
2071 while (regset->size >= 0)
2072 {
2073 void *buf;
2074 int res;
2075
2076 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2077 {
2078 regset ++;
2079 continue;
2080 }
2081
2082 buf = xmalloc (regset->size);
2083#ifndef __sparc__
2084 res = ptrace (regset->get_request, pid, 0, buf);
2085#else
2086 res = ptrace (regset->get_request, pid, buf, 0);
2087#endif
2088 if (res < 0)
2089 {
2090 if (errno == EIO)
2091 {
2092 /* If we get EIO on a regset, do not try it again for
2093 this process. */
2094 disabled_regsets[regset - target_regsets] = 1;
2095 continue;
2096 }
2097 else
2098 {
2099 char s[256];
2100 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
2101 pid);
2102 perror (s);
2103 }
2104 }
2105 else if (regset->type == GENERAL_REGS)
2106 saw_general_regs = 1;
2107 regset->store_function (buf);
2108 regset ++;
2109 }
2110 if (saw_general_regs)
2111 return 0;
2112 else
2113 return 1;
2114}
2115
2116static int
2117regsets_store_inferior_registers ()
2118{
2119 struct regset_info *regset;
2120 int saw_general_regs = 0;
2121 int pid;
2122
2123 regset = target_regsets;
2124
2125 pid = lwpid_of (get_thread_lwp (current_inferior));
2126 while (regset->size >= 0)
2127 {
2128 void *buf;
2129 int res;
2130
2131 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2132 {
2133 regset ++;
2134 continue;
2135 }
2136
2137 buf = xmalloc (regset->size);
2138
2139 /* First fill the buffer with the current register set contents,
2140 in case there are any items in the kernel's regset that are
2141 not in gdbserver's regcache. */
2142#ifndef __sparc__
2143 res = ptrace (regset->get_request, pid, 0, buf);
2144#else
2145 res = ptrace (regset->get_request, pid, buf, 0);
2146#endif
2147
2148 if (res == 0)
2149 {
2150 /* Then overlay our cached registers on that. */
2151 regset->fill_function (buf);
2152
2153 /* Only now do we write the register set. */
2154#ifndef __sparc__
2155 res = ptrace (regset->set_request, pid, 0, buf);
2156#else
2157 res = ptrace (regset->set_request, pid, buf, 0);
2158#endif
2159 }
2160
2161 if (res < 0)
2162 {
2163 if (errno == EIO)
2164 {
2165 /* If we get EIO on a regset, do not try it again for
2166 this process. */
2167 disabled_regsets[regset - target_regsets] = 1;
2168 continue;
2169 }
2170 else if (errno == ESRCH)
2171 {
2172 /* At this point, ESRCH should mean the process is
2173 already gone, in which case we simply ignore attempts
2174 to change its registers. See also the related
2175 comment in linux_resume_one_lwp. */
2176 return 0;
2177 }
2178 else
2179 {
2180 perror ("Warning: ptrace(regsets_store_inferior_registers)");
2181 }
2182 }
2183 else if (regset->type == GENERAL_REGS)
2184 saw_general_regs = 1;
2185 regset ++;
2186 free (buf);
2187 }
2188 if (saw_general_regs)
2189 return 0;
2190 else
2191 return 1;
2192 return 0;
2193}
2194
2195#endif /* HAVE_LINUX_REGSETS */
2196
2197
2198void
2199linux_fetch_registers (int regno)
2200{
2201#ifdef HAVE_LINUX_REGSETS
2202 if (regsets_fetch_inferior_registers () == 0)
2203 return;
2204#endif
2205#ifdef HAVE_LINUX_USRREGS
2206 usr_fetch_inferior_registers (regno);
2207#endif
2208}
2209
2210void
2211linux_store_registers (int regno)
2212{
2213#ifdef HAVE_LINUX_REGSETS
2214 if (regsets_store_inferior_registers () == 0)
2215 return;
2216#endif
2217#ifdef HAVE_LINUX_USRREGS
2218 usr_store_inferior_registers (regno);
2219#endif
2220}
2221
2222
2223/* Copy LEN bytes from inferior's memory starting at MEMADDR
2224 to debugger memory starting at MYADDR. */
2225
2226static int
2227linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
2228{
2229 register int i;
2230 /* Round starting address down to longword boundary. */
2231 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2232 /* Round ending address up; get number of longwords that makes. */
2233 register int count
2234 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
2235 / sizeof (PTRACE_XFER_TYPE);
2236 /* Allocate buffer of that many longwords. */
2237 register PTRACE_XFER_TYPE *buffer
2238 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2239 int fd;
2240 char filename[64];
2241 int pid = lwpid_of (get_thread_lwp (current_inferior));
2242
2243 /* Try using /proc. Don't bother for one word. */
2244 if (len >= 3 * sizeof (long))
2245 {
2246 /* We could keep this file open and cache it - possibly one per
2247 thread. That requires some juggling, but is even faster. */
2248 sprintf (filename, "/proc/%d/mem", pid);
2249 fd = open (filename, O_RDONLY | O_LARGEFILE);
2250 if (fd == -1)
2251 goto no_proc;
2252
2253 /* If pread64 is available, use it. It's faster if the kernel
2254 supports it (only one syscall), and it's 64-bit safe even on
2255 32-bit platforms (for instance, SPARC debugging a SPARC64
2256 application). */
2257#ifdef HAVE_PREAD64
2258 if (pread64 (fd, myaddr, len, memaddr) != len)
2259#else
2260 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, memaddr, len) != len)
2261#endif
2262 {
2263 close (fd);
2264 goto no_proc;
2265 }
2266
2267 close (fd);
2268 return 0;
2269 }
2270
2271 no_proc:
2272 /* Read all the longwords */
2273 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2274 {
2275 errno = 0;
2276 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid, (PTRACE_ARG3_TYPE) addr, 0);
2277 if (errno)
2278 return errno;
2279 }
2280
2281 /* Copy appropriate bytes out of the buffer. */
2282 memcpy (myaddr,
2283 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
2284 len);
2285
2286 return 0;
2287}
2288
2289/* Copy LEN bytes of data from debugger memory at MYADDR
2290 to inferior's memory at MEMADDR.
2291 On failure (cannot write the inferior)
2292 returns the value of errno. */
2293
2294static int
2295linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
2296{
2297 register int i;
2298 /* Round starting address down to longword boundary. */
2299 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2300 /* Round ending address up; get number of longwords that makes. */
2301 register int count
2302 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
2303 /* Allocate buffer of that many longwords. */
2304 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2305 int pid = lwpid_of (get_thread_lwp (current_inferior));
2306
2307 if (debug_threads)
2308 {
2309 fprintf (stderr, "Writing %02x to %08lx\n", (unsigned)myaddr[0], (long)memaddr);
2310 }
2311
2312 /* Fill start and end extra bytes of buffer with existing memory data. */
2313
2314 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid, (PTRACE_ARG3_TYPE) addr, 0);
2315
2316 if (count > 1)
2317 {
2318 buffer[count - 1]
2319 = ptrace (PTRACE_PEEKTEXT, pid,
2320 (PTRACE_ARG3_TYPE) (addr + (count - 1)
2321 * sizeof (PTRACE_XFER_TYPE)),
2322 0);
2323 }
2324
2325 /* Copy data to be written over corresponding part of buffer */
2326
2327 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
2328
2329 /* Write the entire buffer. */
2330
2331 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2332 {
2333 errno = 0;
2334 ptrace (PTRACE_POKETEXT, pid, (PTRACE_ARG3_TYPE) addr, buffer[i]);
2335 if (errno)
2336 return errno;
2337 }
2338
2339 return 0;
2340}
2341
2342static int linux_supports_tracefork_flag;
2343
2344/* Helper functions for linux_test_for_tracefork, called via clone (). */
2345
2346static int
2347linux_tracefork_grandchild (void *arg)
2348{
2349 _exit (0);
2350}
2351
2352#define STACK_SIZE 4096
2353
2354static int
2355linux_tracefork_child (void *arg)
2356{
2357 ptrace (PTRACE_TRACEME, 0, 0, 0);
2358 kill (getpid (), SIGSTOP);
2359#ifdef __ia64__
2360 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
2361 CLONE_VM | SIGCHLD, NULL);
2362#else
2363 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
2364 CLONE_VM | SIGCHLD, NULL);
2365#endif
2366 _exit (0);
2367}
2368
2369/* Wrapper function for waitpid which handles EINTR, and emulates
2370 __WALL for systems where that is not available. */
2371
2372static int
2373my_waitpid (int pid, int *status, int flags)
2374{
2375 int ret, out_errno;
2376
2377 if (debug_threads)
2378 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
2379
2380 if (flags & __WALL)
2381 {
2382 sigset_t block_mask, org_mask, wake_mask;
2383 int wnohang;
2384
2385 wnohang = (flags & WNOHANG) != 0;
2386 flags &= ~(__WALL | __WCLONE);
2387 flags |= WNOHANG;
2388
2389 /* Block all signals while here. This avoids knowing about
2390 LinuxThread's signals. */
2391 sigfillset (&block_mask);
2392 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
2393
2394 /* ... except during the sigsuspend below. */
2395 sigemptyset (&wake_mask);
2396
2397 while (1)
2398 {
2399 /* Since all signals are blocked, there's no need to check
2400 for EINTR here. */
2401 ret = waitpid (pid, status, flags);
2402 out_errno = errno;
2403
2404 if (ret == -1 && out_errno != ECHILD)
2405 break;
2406 else if (ret > 0)
2407 break;
2408
2409 if (flags & __WCLONE)
2410 {
2411 /* We've tried both flavors now. If WNOHANG is set,
2412 there's nothing else to do, just bail out. */
2413 if (wnohang)
2414 break;
2415
2416 if (debug_threads)
2417 fprintf (stderr, "blocking\n");
2418
2419 /* Block waiting for signals. */
2420 sigsuspend (&wake_mask);
2421 }
2422
2423 flags ^= __WCLONE;
2424 }
2425
2426 sigprocmask (SIG_SETMASK, &org_mask, NULL);
2427 }
2428 else
2429 {
2430 do
2431 ret = waitpid (pid, status, flags);
2432 while (ret == -1 && errno == EINTR);
2433 out_errno = errno;
2434 }
2435
2436 if (debug_threads)
2437 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
2438 pid, flags, status ? *status : -1, ret);
2439
2440 errno = out_errno;
2441 return ret;
2442}
2443
2444/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
2445 sure that we can enable the option, and that it had the desired
2446 effect. */
2447
2448static void
2449linux_test_for_tracefork (void)
2450{
2451 int child_pid, ret, status;
2452 long second_pid;
2453 char *stack = xmalloc (STACK_SIZE * 4);
2454
2455 linux_supports_tracefork_flag = 0;
2456
2457 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
2458#ifdef __ia64__
2459 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
2460 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2461#else
2462 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
2463 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2464#endif
2465 if (child_pid == -1)
2466 perror_with_name ("clone");
2467
2468 ret = my_waitpid (child_pid, &status, 0);
2469 if (ret == -1)
2470 perror_with_name ("waitpid");
2471 else if (ret != child_pid)
2472 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
2473 if (! WIFSTOPPED (status))
2474 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
2475
2476 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
2477 if (ret != 0)
2478 {
2479 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2480 if (ret != 0)
2481 {
2482 warning ("linux_test_for_tracefork: failed to kill child");
2483 return;
2484 }
2485
2486 ret = my_waitpid (child_pid, &status, 0);
2487 if (ret != child_pid)
2488 warning ("linux_test_for_tracefork: failed to wait for killed child");
2489 else if (!WIFSIGNALED (status))
2490 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
2491 "killed child", status);
2492
2493 return;
2494 }
2495
2496 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
2497 if (ret != 0)
2498 warning ("linux_test_for_tracefork: failed to resume child");
2499
2500 ret = my_waitpid (child_pid, &status, 0);
2501
2502 if (ret == child_pid && WIFSTOPPED (status)
2503 && status >> 16 == PTRACE_EVENT_FORK)
2504 {
2505 second_pid = 0;
2506 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
2507 if (ret == 0 && second_pid != 0)
2508 {
2509 int second_status;
2510
2511 linux_supports_tracefork_flag = 1;
2512 my_waitpid (second_pid, &second_status, 0);
2513 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
2514 if (ret != 0)
2515 warning ("linux_test_for_tracefork: failed to kill second child");
2516 my_waitpid (second_pid, &status, 0);
2517 }
2518 }
2519 else
2520 warning ("linux_test_for_tracefork: unexpected result from waitpid "
2521 "(%d, status 0x%x)", ret, status);
2522
2523 do
2524 {
2525 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2526 if (ret != 0)
2527 warning ("linux_test_for_tracefork: failed to kill child");
2528 my_waitpid (child_pid, &status, 0);
2529 }
2530 while (WIFSTOPPED (status));
2531
2532 free (stack);
2533}
2534
2535
2536static void
2537linux_look_up_symbols (void)
2538{
2539#ifdef USE_THREAD_DB
2540 struct process_info *proc = current_process ();
2541
2542 if (proc->private->thread_db_active)
2543 return;
2544
2545 proc->private->thread_db_active
2546 = thread_db_init (!linux_supports_tracefork_flag);
2547#endif
2548}
2549
2550static void
2551linux_request_interrupt (void)
2552{
2553 extern unsigned long signal_pid;
2554
2555 if (!ptid_equal (cont_thread, null_ptid)
2556 && !ptid_equal (cont_thread, minus_one_ptid))
2557 {
2558 struct lwp_info *lwp;
2559 int lwpid;
2560
2561 lwp = get_thread_lwp (current_inferior);
2562 lwpid = lwpid_of (lwp);
2563 kill_lwp (lwpid, SIGINT);
2564 }
2565 else
2566 kill_lwp (signal_pid, SIGINT);
2567}
2568
2569/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
2570 to debugger memory starting at MYADDR. */
2571
2572static int
2573linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
2574{
2575 char filename[PATH_MAX];
2576 int fd, n;
2577 int pid = lwpid_of (get_thread_lwp (current_inferior));
2578
2579 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
2580
2581 fd = open (filename, O_RDONLY);
2582 if (fd < 0)
2583 return -1;
2584
2585 if (offset != (CORE_ADDR) 0
2586 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
2587 n = -1;
2588 else
2589 n = read (fd, myaddr, len);
2590
2591 close (fd);
2592
2593 return n;
2594}
2595
2596/* These watchpoint related wrapper functions simply pass on the function call
2597 if the target has registered a corresponding function. */
2598
2599static int
2600linux_insert_watchpoint (char type, CORE_ADDR addr, int len)
2601{
2602 if (the_low_target.insert_watchpoint != NULL)
2603 return the_low_target.insert_watchpoint (type, addr, len);
2604 else
2605 /* Unsupported (see target.h). */
2606 return 1;
2607}
2608
2609static int
2610linux_remove_watchpoint (char type, CORE_ADDR addr, int len)
2611{
2612 if (the_low_target.remove_watchpoint != NULL)
2613 return the_low_target.remove_watchpoint (type, addr, len);
2614 else
2615 /* Unsupported (see target.h). */
2616 return 1;
2617}
2618
2619static int
2620linux_stopped_by_watchpoint (void)
2621{
2622 if (the_low_target.stopped_by_watchpoint != NULL)
2623 return the_low_target.stopped_by_watchpoint ();
2624 else
2625 return 0;
2626}
2627
2628static CORE_ADDR
2629linux_stopped_data_address (void)
2630{
2631 if (the_low_target.stopped_data_address != NULL)
2632 return the_low_target.stopped_data_address ();
2633 else
2634 return 0;
2635}
2636
2637#if defined(__UCLIBC__) && defined(HAS_NOMMU)
2638#if defined(__mcoldfire__)
2639/* These should really be defined in the kernel's ptrace.h header. */
2640#define PT_TEXT_ADDR 49*4
2641#define PT_DATA_ADDR 50*4
2642#define PT_TEXT_END_ADDR 51*4
2643#endif
2644
2645/* Under uClinux, programs are loaded at non-zero offsets, which we need
2646 to tell gdb about. */
2647
2648static int
2649linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
2650{
2651#if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
2652 unsigned long text, text_end, data;
2653 int pid = lwpid_of (get_thread_lwp (current_inferior));
2654
2655 errno = 0;
2656
2657 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
2658 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
2659 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
2660
2661 if (errno == 0)
2662 {
2663 /* Both text and data offsets produced at compile-time (and so
2664 used by gdb) are relative to the beginning of the program,
2665 with the data segment immediately following the text segment.
2666 However, the actual runtime layout in memory may put the data
2667 somewhere else, so when we send gdb a data base-address, we
2668 use the real data base address and subtract the compile-time
2669 data base-address from it (which is just the length of the
2670 text segment). BSS immediately follows data in both
2671 cases. */
2672 *text_p = text;
2673 *data_p = data - (text_end - text);
2674
2675 return 1;
2676 }
2677#endif
2678 return 0;
2679}
2680#endif
2681
2682static int
2683linux_qxfer_osdata (const char *annex,
2684 unsigned char *readbuf, unsigned const char *writebuf,
2685 CORE_ADDR offset, int len)
2686{
2687 /* We make the process list snapshot when the object starts to be
2688 read. */
2689 static const char *buf;
2690 static long len_avail = -1;
2691 static struct buffer buffer;
2692
2693 DIR *dirp;
2694
2695 if (strcmp (annex, "processes") != 0)
2696 return 0;
2697
2698 if (!readbuf || writebuf)
2699 return 0;
2700
2701 if (offset == 0)
2702 {
2703 if (len_avail != -1 && len_avail != 0)
2704 buffer_free (&buffer);
2705 len_avail = 0;
2706 buf = NULL;
2707 buffer_init (&buffer);
2708 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
2709
2710 dirp = opendir ("/proc");
2711 if (dirp)
2712 {
2713 struct dirent *dp;
2714 while ((dp = readdir (dirp)) != NULL)
2715 {
2716 struct stat statbuf;
2717 char procentry[sizeof ("/proc/4294967295")];
2718
2719 if (!isdigit (dp->d_name[0])
2720 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
2721 continue;
2722
2723 sprintf (procentry, "/proc/%s", dp->d_name);
2724 if (stat (procentry, &statbuf) == 0
2725 && S_ISDIR (statbuf.st_mode))
2726 {
2727 char pathname[128];
2728 FILE *f;
2729 char cmd[MAXPATHLEN + 1];
2730 struct passwd *entry;
2731
2732 sprintf (pathname, "/proc/%s/cmdline", dp->d_name);
2733 entry = getpwuid (statbuf.st_uid);
2734
2735 if ((f = fopen (pathname, "r")) != NULL)
2736 {
2737 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
2738 if (len > 0)
2739 {
2740 int i;
2741 for (i = 0; i < len; i++)
2742 if (cmd[i] == '\0')
2743 cmd[i] = ' ';
2744 cmd[len] = '\0';
2745
2746 buffer_xml_printf (
2747 &buffer,
2748 "<item>"
2749 "<column name=\"pid\">%s</column>"
2750 "<column name=\"user\">%s</column>"
2751 "<column name=\"command\">%s</column>"
2752 "</item>",
2753 dp->d_name,
2754 entry ? entry->pw_name : "?",
2755 cmd);
2756 }
2757 fclose (f);
2758 }
2759 }
2760 }
2761
2762 closedir (dirp);
2763 }
2764 buffer_grow_str0 (&buffer, "</osdata>\n");
2765 buf = buffer_finish (&buffer);
2766 len_avail = strlen (buf);
2767 }
2768
2769 if (offset >= len_avail)
2770 {
2771 /* Done. Get rid of the data. */
2772 buffer_free (&buffer);
2773 buf = NULL;
2774 len_avail = 0;
2775 return 0;
2776 }
2777
2778 if (len > len_avail - offset)
2779 len = len_avail - offset;
2780 memcpy (readbuf, buf + offset, len);
2781
2782 return len;
2783}
2784
2785static int
2786linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
2787 unsigned const char *writebuf, CORE_ADDR offset, int len)
2788{
2789 struct siginfo siginfo;
2790 long pid = -1;
2791
2792 if (current_inferior == NULL)
2793 return -1;
2794
2795 pid = lwpid_of (get_thread_lwp (current_inferior));
2796
2797 if (debug_threads)
2798 fprintf (stderr, "%s siginfo for lwp %ld.\n",
2799 readbuf != NULL ? "Reading" : "Writing",
2800 pid);
2801
2802 if (offset > sizeof (siginfo))
2803 return -1;
2804
2805 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
2806 return -1;
2807
2808 if (offset + len > sizeof (siginfo))
2809 len = sizeof (siginfo) - offset;
2810
2811 if (readbuf != NULL)
2812 memcpy (readbuf, (char *) &siginfo + offset, len);
2813 else
2814 {
2815 memcpy ((char *) &siginfo + offset, writebuf, len);
2816 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
2817 return -1;
2818 }
2819
2820 return len;
2821}
2822
2823/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
2824 so we notice when children change state; as the handler for the
2825 sigsuspend in my_waitpid. */
2826
2827static void
2828sigchld_handler (int signo)
2829{
2830 int old_errno = errno;
2831
2832 if (debug_threads)
2833 /* fprintf is not async-signal-safe, so call write directly. */
2834 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
2835
2836 if (target_is_async_p ())
2837 async_file_mark (); /* trigger a linux_wait */
2838
2839 errno = old_errno;
2840}
2841
2842static int
2843linux_supports_non_stop (void)
2844{
2845 return 1;
2846}
2847
2848static int
2849linux_async (int enable)
2850{
2851 int previous = (linux_event_pipe[0] != -1);
2852
2853 if (previous != enable)
2854 {
2855 sigset_t mask;
2856 sigemptyset (&mask);
2857 sigaddset (&mask, SIGCHLD);
2858
2859 sigprocmask (SIG_BLOCK, &mask, NULL);
2860
2861 if (enable)
2862 {
2863 if (pipe (linux_event_pipe) == -1)
2864 fatal ("creating event pipe failed.");
2865
2866 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
2867 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
2868
2869 /* Register the event loop handler. */
2870 add_file_handler (linux_event_pipe[0],
2871 handle_target_event, NULL);
2872
2873 /* Always trigger a linux_wait. */
2874 async_file_mark ();
2875 }
2876 else
2877 {
2878 delete_file_handler (linux_event_pipe[0]);
2879
2880 close (linux_event_pipe[0]);
2881 close (linux_event_pipe[1]);
2882 linux_event_pipe[0] = -1;
2883 linux_event_pipe[1] = -1;
2884 }
2885
2886 sigprocmask (SIG_UNBLOCK, &mask, NULL);
2887 }
2888
2889 return previous;
2890}
2891
2892static int
2893linux_start_non_stop (int nonstop)
2894{
2895 /* Register or unregister from event-loop accordingly. */
2896 linux_async (nonstop);
2897 return 0;
2898}
2899
2900static struct target_ops linux_target_ops = {
2901 linux_create_inferior,
2902 linux_attach,
2903 linux_kill,
2904 linux_detach,
2905 linux_join,
2906 linux_thread_alive,
2907 linux_resume,
2908 linux_wait,
2909 linux_fetch_registers,
2910 linux_store_registers,
2911 linux_read_memory,
2912 linux_write_memory,
2913 linux_look_up_symbols,
2914 linux_request_interrupt,
2915 linux_read_auxv,
2916 linux_insert_watchpoint,
2917 linux_remove_watchpoint,
2918 linux_stopped_by_watchpoint,
2919 linux_stopped_data_address,
2920#if defined(__UCLIBC__) && defined(HAS_NOMMU)
2921 linux_read_offsets,
2922#else
2923 NULL,
2924#endif
2925#ifdef USE_THREAD_DB
2926 thread_db_get_tls_address,
2927#else
2928 NULL,
2929#endif
2930 NULL,
2931 hostio_last_error_from_errno,
2932 linux_qxfer_osdata,
2933 linux_xfer_siginfo,
2934 linux_supports_non_stop,
2935 linux_async,
2936 linux_start_non_stop,
2937};
2938
2939static void
2940linux_init_signals ()
2941{
2942 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
2943 to find what the cancel signal actually is. */
2944 signal (__SIGRTMIN+1, SIG_IGN);
2945}
2946
2947void
2948initialize_low (void)
2949{
2950 struct sigaction sigchld_action;
2951 memset (&sigchld_action, 0, sizeof (sigchld_action));
2952 set_target_ops (&linux_target_ops);
2953 set_breakpoint_data (the_low_target.breakpoint,
2954 the_low_target.breakpoint_len);
2955 linux_init_signals ();
2956 linux_test_for_tracefork ();
2957#ifdef HAVE_LINUX_REGSETS
2958 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
2959 ;
2960 disabled_regsets = xmalloc (num_regsets);
2961#endif
2962
2963 sigchld_action.sa_handler = sigchld_handler;
2964 sigemptyset (&sigchld_action.sa_mask);
2965 sigchld_action.sa_flags = SA_RESTART;
2966 sigaction (SIGCHLD, &sigchld_action, NULL);
2967}