]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
2009-04-01 Pedro Alves <pedro@codesourcery.com>
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40
41 #ifndef PTRACE_GETSIGINFO
42 # define PTRACE_GETSIGINFO 0x4202
43 # define PTRACE_SETSIGINFO 0x4203
44 #endif
45
46 #ifndef O_LARGEFILE
47 #define O_LARGEFILE 0
48 #endif
49
50 /* If the system headers did not provide the constants, hard-code the normal
51 values. */
52 #ifndef PTRACE_EVENT_FORK
53
54 #define PTRACE_SETOPTIONS 0x4200
55 #define PTRACE_GETEVENTMSG 0x4201
56
57 /* options set using PTRACE_SETOPTIONS */
58 #define PTRACE_O_TRACESYSGOOD 0x00000001
59 #define PTRACE_O_TRACEFORK 0x00000002
60 #define PTRACE_O_TRACEVFORK 0x00000004
61 #define PTRACE_O_TRACECLONE 0x00000008
62 #define PTRACE_O_TRACEEXEC 0x00000010
63 #define PTRACE_O_TRACEVFORKDONE 0x00000020
64 #define PTRACE_O_TRACEEXIT 0x00000040
65
66 /* Wait extended result codes for the above trace options. */
67 #define PTRACE_EVENT_FORK 1
68 #define PTRACE_EVENT_VFORK 2
69 #define PTRACE_EVENT_CLONE 3
70 #define PTRACE_EVENT_EXEC 4
71 #define PTRACE_EVENT_VFORK_DONE 5
72 #define PTRACE_EVENT_EXIT 6
73
74 #endif /* PTRACE_EVENT_FORK */
75
76 /* We can't always assume that this flag is available, but all systems
77 with the ptrace event handlers also have __WALL, so it's safe to use
78 in some contexts. */
79 #ifndef __WALL
80 #define __WALL 0x40000000 /* Wait for any child. */
81 #endif
82
83 #ifdef __UCLIBC__
84 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
85 #define HAS_NOMMU
86 #endif
87 #endif
88
89 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
90 representation of the thread ID.
91
92 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
93 the same as the LWP ID.
94
95 ``all_processes'' is keyed by the "overall process ID", which
96 GNU/Linux calls tgid, "thread group ID". */
97
98 struct inferior_list all_lwps;
99
100 /* A list of all unknown processes which receive stop signals. Some other
101 process will presumably claim each of these as forked children
102 momentarily. */
103
104 struct inferior_list stopped_pids;
105
106 /* FIXME this is a bit of a hack, and could be removed. */
107 int stopping_threads;
108
109 /* FIXME make into a target method? */
110 int using_threads = 1;
111
112 static int must_set_ptrace_flags;
113
114 /* This flag is true iff we've just created or attached to our first
115 inferior but it has not stopped yet. As soon as it does, we need
116 to call the low target's arch_setup callback. Doing this only on
117 the first inferior avoids reinializing the architecture on every
118 inferior, and avoids messing with the register caches of the
119 already running inferiors. NOTE: this assumes all inferiors under
120 control of gdbserver have the same architecture. */
121 static int new_inferior;
122
123 static void linux_resume_one_lwp (struct inferior_list_entry *entry,
124 int step, int signal, siginfo_t *info);
125 static void linux_resume (struct thread_resume *resume_info, size_t n);
126 static void stop_all_lwps (void);
127 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
128 static int check_removed_breakpoint (struct lwp_info *event_child);
129 static void *add_lwp (ptid_t ptid);
130 static int my_waitpid (int pid, int *status, int flags);
131 static int linux_stopped_by_watchpoint (void);
132 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
133
134 struct pending_signals
135 {
136 int signal;
137 siginfo_t info;
138 struct pending_signals *prev;
139 };
140
141 #define PTRACE_ARG3_TYPE long
142 #define PTRACE_XFER_TYPE long
143
144 #ifdef HAVE_LINUX_REGSETS
145 static char *disabled_regsets;
146 static int num_regsets;
147 #endif
148
149 /* The read/write ends of the pipe registered as waitable file in the
150 event loop. */
151 static int linux_event_pipe[2] = { -1, -1 };
152
153 /* True if we're currently in async mode. */
154 #define target_is_async_p() (linux_event_pipe[0] != -1)
155
156 static void send_sigstop (struct inferior_list_entry *entry);
157 static void wait_for_sigstop (struct inferior_list_entry *entry);
158
159 static void
160 delete_lwp (struct lwp_info *lwp)
161 {
162 remove_thread (get_lwp_thread (lwp));
163 remove_inferior (&all_lwps, &lwp->head);
164 free (lwp);
165 }
166
167 /* Add a process to the common process list, and set its private
168 data. */
169
170 static struct process_info *
171 linux_add_process (int pid, int attached)
172 {
173 struct process_info *proc;
174
175 /* Is this the first process? If so, then set the arch. */
176 if (all_processes.head == NULL)
177 new_inferior = 1;
178
179 proc = add_process (pid, attached);
180 proc->private = xcalloc (1, sizeof (*proc->private));
181
182 return proc;
183 }
184
185 /* Handle a GNU/Linux extended wait response. If we see a clone
186 event, we need to add the new LWP to our list (and not report the
187 trap to higher layers). */
188
189 static void
190 handle_extended_wait (struct lwp_info *event_child, int wstat)
191 {
192 int event = wstat >> 16;
193 struct lwp_info *new_lwp;
194
195 if (event == PTRACE_EVENT_CLONE)
196 {
197 ptid_t ptid;
198 unsigned long new_pid;
199 int ret, status = W_STOPCODE (SIGSTOP);
200
201 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
202
203 /* If we haven't already seen the new PID stop, wait for it now. */
204 if (! pull_pid_from_list (&stopped_pids, new_pid))
205 {
206 /* The new child has a pending SIGSTOP. We can't affect it until it
207 hits the SIGSTOP, but we're already attached. */
208
209 ret = my_waitpid (new_pid, &status, __WALL);
210
211 if (ret == -1)
212 perror_with_name ("waiting for new child");
213 else if (ret != new_pid)
214 warning ("wait returned unexpected PID %d", ret);
215 else if (!WIFSTOPPED (status))
216 warning ("wait returned unexpected status 0x%x", status);
217 }
218
219 ptrace (PTRACE_SETOPTIONS, new_pid, 0, PTRACE_O_TRACECLONE);
220
221 ptid = ptid_build (pid_of (event_child), new_pid, 0);
222 new_lwp = (struct lwp_info *) add_lwp (ptid);
223 add_thread (ptid, new_lwp);
224
225 /* Normally we will get the pending SIGSTOP. But in some cases
226 we might get another signal delivered to the group first.
227 If we do get another signal, be sure not to lose it. */
228 if (WSTOPSIG (status) == SIGSTOP)
229 {
230 if (stopping_threads)
231 new_lwp->stopped = 1;
232 else
233 ptrace (PTRACE_CONT, new_pid, 0, 0);
234 }
235 else
236 {
237 new_lwp->stop_expected = 1;
238 if (stopping_threads)
239 {
240 new_lwp->stopped = 1;
241 new_lwp->status_pending_p = 1;
242 new_lwp->status_pending = status;
243 }
244 else
245 /* Pass the signal on. This is what GDB does - except
246 shouldn't we really report it instead? */
247 ptrace (PTRACE_CONT, new_pid, 0, WSTOPSIG (status));
248 }
249
250 /* Always resume the current thread. If we are stopping
251 threads, it will have a pending SIGSTOP; we may as well
252 collect it now. */
253 linux_resume_one_lwp (&event_child->head,
254 event_child->stepping, 0, NULL);
255 }
256 }
257
258 /* This function should only be called if the process got a SIGTRAP.
259 The SIGTRAP could mean several things.
260
261 On i386, where decr_pc_after_break is non-zero:
262 If we were single-stepping this process using PTRACE_SINGLESTEP,
263 we will get only the one SIGTRAP (even if the instruction we
264 stepped over was a breakpoint). The value of $eip will be the
265 next instruction.
266 If we continue the process using PTRACE_CONT, we will get a
267 SIGTRAP when we hit a breakpoint. The value of $eip will be
268 the instruction after the breakpoint (i.e. needs to be
269 decremented). If we report the SIGTRAP to GDB, we must also
270 report the undecremented PC. If we cancel the SIGTRAP, we
271 must resume at the decremented PC.
272
273 (Presumably, not yet tested) On a non-decr_pc_after_break machine
274 with hardware or kernel single-step:
275 If we single-step over a breakpoint instruction, our PC will
276 point at the following instruction. If we continue and hit a
277 breakpoint instruction, our PC will point at the breakpoint
278 instruction. */
279
280 static CORE_ADDR
281 get_stop_pc (void)
282 {
283 CORE_ADDR stop_pc = (*the_low_target.get_pc) ();
284
285 if (get_thread_lwp (current_inferior)->stepping)
286 return stop_pc;
287 else
288 return stop_pc - the_low_target.decr_pc_after_break;
289 }
290
291 static void *
292 add_lwp (ptid_t ptid)
293 {
294 struct lwp_info *lwp;
295
296 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
297 memset (lwp, 0, sizeof (*lwp));
298
299 lwp->head.id = ptid;
300
301 add_inferior_to_list (&all_lwps, &lwp->head);
302
303 return lwp;
304 }
305
306 /* Start an inferior process and returns its pid.
307 ALLARGS is a vector of program-name and args. */
308
309 static int
310 linux_create_inferior (char *program, char **allargs)
311 {
312 void *new_lwp;
313 int pid;
314 ptid_t ptid;
315
316 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
317 pid = vfork ();
318 #else
319 pid = fork ();
320 #endif
321 if (pid < 0)
322 perror_with_name ("fork");
323
324 if (pid == 0)
325 {
326 ptrace (PTRACE_TRACEME, 0, 0, 0);
327
328 signal (__SIGRTMIN + 1, SIG_DFL);
329
330 setpgid (0, 0);
331
332 execv (program, allargs);
333 if (errno == ENOENT)
334 execvp (program, allargs);
335
336 fprintf (stderr, "Cannot exec %s: %s.\n", program,
337 strerror (errno));
338 fflush (stderr);
339 _exit (0177);
340 }
341
342 linux_add_process (pid, 0);
343
344 ptid = ptid_build (pid, pid, 0);
345 new_lwp = add_lwp (ptid);
346 add_thread (ptid, new_lwp);
347 must_set_ptrace_flags = 1;
348
349 return pid;
350 }
351
352 /* Attach to an inferior process. */
353
354 static void
355 linux_attach_lwp_1 (unsigned long lwpid, int initial)
356 {
357 ptid_t ptid;
358 struct lwp_info *new_lwp;
359
360 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
361 {
362 if (!initial)
363 {
364 /* If we fail to attach to an LWP, just warn. */
365 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
366 strerror (errno), errno);
367 fflush (stderr);
368 return;
369 }
370 else
371 /* If we fail to attach to a process, report an error. */
372 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
373 strerror (errno), errno);
374 }
375
376 /* FIXME: This intermittently fails.
377 We need to wait for SIGSTOP first. */
378 ptrace (PTRACE_SETOPTIONS, lwpid, 0, PTRACE_O_TRACECLONE);
379
380 if (initial)
381 /* NOTE/FIXME: This lwp might have not been the tgid. */
382 ptid = ptid_build (lwpid, lwpid, 0);
383 else
384 {
385 /* Note that extracting the pid from the current inferior is
386 safe, since we're always called in the context of the same
387 process as this new thread. */
388 int pid = pid_of (get_thread_lwp (current_inferior));
389 ptid = ptid_build (pid, lwpid, 0);
390 }
391
392 new_lwp = (struct lwp_info *) add_lwp (ptid);
393 add_thread (ptid, new_lwp);
394
395 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
396 brings it to a halt.
397
398 There are several cases to consider here:
399
400 1) gdbserver has already attached to the process and is being notified
401 of a new thread that is being created.
402 In this case we should ignore that SIGSTOP and resume the process.
403 This is handled below by setting stop_expected = 1.
404
405 2) This is the first thread (the process thread), and we're attaching
406 to it via attach_inferior.
407 In this case we want the process thread to stop.
408 This is handled by having linux_attach clear stop_expected after
409 we return.
410 ??? If the process already has several threads we leave the other
411 threads running.
412
413 3) GDB is connecting to gdbserver and is requesting an enumeration of all
414 existing threads.
415 In this case we want the thread to stop.
416 FIXME: This case is currently not properly handled.
417 We should wait for the SIGSTOP but don't. Things work apparently
418 because enough time passes between when we ptrace (ATTACH) and when
419 gdb makes the next ptrace call on the thread.
420
421 On the other hand, if we are currently trying to stop all threads, we
422 should treat the new thread as if we had sent it a SIGSTOP. This works
423 because we are guaranteed that the add_lwp call above added us to the
424 end of the list, and so the new thread has not yet reached
425 wait_for_sigstop (but will). */
426 if (! stopping_threads)
427 new_lwp->stop_expected = 1;
428 }
429
430 void
431 linux_attach_lwp (unsigned long lwpid)
432 {
433 linux_attach_lwp_1 (lwpid, 0);
434 }
435
436 int
437 linux_attach (unsigned long pid)
438 {
439 struct lwp_info *lwp;
440
441 linux_attach_lwp_1 (pid, 1);
442
443 linux_add_process (pid, 1);
444
445 if (!non_stop)
446 {
447 /* Don't ignore the initial SIGSTOP if we just attached to this
448 process. It will be collected by wait shortly. */
449 lwp = (struct lwp_info *) find_inferior_id (&all_lwps,
450 ptid_build (pid, pid, 0));
451 lwp->stop_expected = 0;
452 }
453
454 return 0;
455 }
456
457 struct counter
458 {
459 int pid;
460 int count;
461 };
462
463 static int
464 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
465 {
466 struct counter *counter = args;
467
468 if (ptid_get_pid (entry->id) == counter->pid)
469 {
470 if (++counter->count > 1)
471 return 1;
472 }
473
474 return 0;
475 }
476
477 static int
478 last_thread_of_process_p (struct thread_info *thread)
479 {
480 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
481 int pid = ptid_get_pid (ptid);
482 struct counter counter = { pid , 0 };
483
484 return (find_inferior (&all_threads,
485 second_thread_of_pid_p, &counter) == NULL);
486 }
487
488 /* Kill the inferior lwp. */
489
490 static int
491 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
492 {
493 struct thread_info *thread = (struct thread_info *) entry;
494 struct lwp_info *lwp = get_thread_lwp (thread);
495 int wstat;
496 int pid = * (int *) args;
497
498 if (ptid_get_pid (entry->id) != pid)
499 return 0;
500
501 /* We avoid killing the first thread here, because of a Linux kernel (at
502 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
503 the children get a chance to be reaped, it will remain a zombie
504 forever. */
505
506 if (last_thread_of_process_p (thread))
507 {
508 if (debug_threads)
509 fprintf (stderr, "lkop: is last of process %s\n",
510 target_pid_to_str (entry->id));
511 return 0;
512 }
513
514 /* If we're killing a running inferior, make sure it is stopped
515 first, as PTRACE_KILL will not work otherwise. */
516 if (!lwp->stopped)
517 send_sigstop (&lwp->head);
518
519 do
520 {
521 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
522
523 /* Make sure it died. The loop is most likely unnecessary. */
524 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
525 } while (pid > 0 && WIFSTOPPED (wstat));
526
527 return 0;
528 }
529
530 static int
531 linux_kill (int pid)
532 {
533 struct process_info *process;
534 struct lwp_info *lwp;
535 struct thread_info *thread;
536 int wstat;
537 int lwpid;
538
539 process = find_process_pid (pid);
540 if (process == NULL)
541 return -1;
542
543 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
544
545 /* See the comment in linux_kill_one_lwp. We did not kill the first
546 thread in the list, so do so now. */
547 lwp = find_lwp_pid (pid_to_ptid (pid));
548 thread = get_lwp_thread (lwp);
549
550 if (debug_threads)
551 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
552 lwpid_of (lwp), pid);
553
554 /* If we're killing a running inferior, make sure it is stopped
555 first, as PTRACE_KILL will not work otherwise. */
556 if (!lwp->stopped)
557 send_sigstop (&lwp->head);
558
559 do
560 {
561 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
562
563 /* Make sure it died. The loop is most likely unnecessary. */
564 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
565 } while (lwpid > 0 && WIFSTOPPED (wstat));
566
567 delete_lwp (lwp);
568 remove_process (process);
569 return 0;
570 }
571
572 static int
573 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
574 {
575 struct thread_info *thread = (struct thread_info *) entry;
576 struct lwp_info *lwp = get_thread_lwp (thread);
577 int pid = * (int *) args;
578
579 if (ptid_get_pid (entry->id) != pid)
580 return 0;
581
582 /* If we're detaching from a running inferior, make sure it is
583 stopped first, as PTRACE_DETACH will not work otherwise. */
584 if (!lwp->stopped)
585 {
586 int lwpid = lwpid_of (lwp);
587
588 stopping_threads = 1;
589 send_sigstop (&lwp->head);
590
591 /* If this detects a new thread through a clone event, the new
592 thread is appended to the end of the lwp list, so we'll
593 eventually detach from it. */
594 wait_for_sigstop (&lwp->head);
595 stopping_threads = 0;
596
597 /* If LWP exits while we're trying to stop it, there's nothing
598 left to do. */
599 lwp = find_lwp_pid (pid_to_ptid (lwpid));
600 if (lwp == NULL)
601 return 0;
602 }
603
604 /* Make sure the process isn't stopped at a breakpoint that's
605 no longer there. */
606 check_removed_breakpoint (lwp);
607
608 /* If this process is stopped but is expecting a SIGSTOP, then make
609 sure we take care of that now. This isn't absolutely guaranteed
610 to collect the SIGSTOP, but is fairly likely to. */
611 if (lwp->stop_expected)
612 {
613 int wstat;
614 /* Clear stop_expected, so that the SIGSTOP will be reported. */
615 lwp->stop_expected = 0;
616 if (lwp->stopped)
617 linux_resume_one_lwp (&lwp->head, 0, 0, NULL);
618 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
619 }
620
621 /* Flush any pending changes to the process's registers. */
622 regcache_invalidate_one ((struct inferior_list_entry *)
623 get_lwp_thread (lwp));
624
625 /* Finally, let it resume. */
626 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
627
628 delete_lwp (lwp);
629 return 0;
630 }
631
632 static int
633 any_thread_of (struct inferior_list_entry *entry, void *args)
634 {
635 int *pid_p = args;
636
637 if (ptid_get_pid (entry->id) == *pid_p)
638 return 1;
639
640 return 0;
641 }
642
643 static int
644 linux_detach (int pid)
645 {
646 struct process_info *process;
647
648 process = find_process_pid (pid);
649 if (process == NULL)
650 return -1;
651
652 current_inferior =
653 (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
654
655 delete_all_breakpoints ();
656 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
657 remove_process (process);
658 return 0;
659 }
660
661 static void
662 linux_join (int pid)
663 {
664 int status, ret;
665 struct process_info *process;
666
667 process = find_process_pid (pid);
668 if (process == NULL)
669 return;
670
671 do {
672 ret = my_waitpid (pid, &status, 0);
673 if (WIFEXITED (status) || WIFSIGNALED (status))
674 break;
675 } while (ret != -1 || errno != ECHILD);
676 }
677
678 /* Return nonzero if the given thread is still alive. */
679 static int
680 linux_thread_alive (ptid_t ptid)
681 {
682 struct lwp_info *lwp = find_lwp_pid (ptid);
683
684 /* We assume we always know if a thread exits. If a whole process
685 exited but we still haven't been able to report it to GDB, we'll
686 hold on to the last lwp of the dead process. */
687 if (lwp != NULL)
688 return !lwp->dead;
689 else
690 return 0;
691 }
692
693 /* Return nonzero if this process stopped at a breakpoint which
694 no longer appears to be inserted. Also adjust the PC
695 appropriately to resume where the breakpoint used to be. */
696 static int
697 check_removed_breakpoint (struct lwp_info *event_child)
698 {
699 CORE_ADDR stop_pc;
700 struct thread_info *saved_inferior;
701
702 if (event_child->pending_is_breakpoint == 0)
703 return 0;
704
705 if (debug_threads)
706 fprintf (stderr, "Checking for breakpoint in lwp %ld.\n",
707 lwpid_of (event_child));
708
709 saved_inferior = current_inferior;
710 current_inferior = get_lwp_thread (event_child);
711
712 stop_pc = get_stop_pc ();
713
714 /* If the PC has changed since we stopped, then we shouldn't do
715 anything. This happens if, for instance, GDB handled the
716 decr_pc_after_break subtraction itself. */
717 if (stop_pc != event_child->pending_stop_pc)
718 {
719 if (debug_threads)
720 fprintf (stderr, "Ignoring, PC was changed. Old PC was 0x%08llx\n",
721 event_child->pending_stop_pc);
722
723 event_child->pending_is_breakpoint = 0;
724 current_inferior = saved_inferior;
725 return 0;
726 }
727
728 /* If the breakpoint is still there, we will report hitting it. */
729 if ((*the_low_target.breakpoint_at) (stop_pc))
730 {
731 if (debug_threads)
732 fprintf (stderr, "Ignoring, breakpoint is still present.\n");
733 current_inferior = saved_inferior;
734 return 0;
735 }
736
737 if (debug_threads)
738 fprintf (stderr, "Removed breakpoint.\n");
739
740 /* For decr_pc_after_break targets, here is where we perform the
741 decrement. We go immediately from this function to resuming,
742 and can not safely call get_stop_pc () again. */
743 if (the_low_target.set_pc != NULL)
744 (*the_low_target.set_pc) (stop_pc);
745
746 /* We consumed the pending SIGTRAP. */
747 event_child->pending_is_breakpoint = 0;
748 event_child->status_pending_p = 0;
749 event_child->status_pending = 0;
750
751 current_inferior = saved_inferior;
752 return 1;
753 }
754
755 /* Return 1 if this lwp has an interesting status pending. This
756 function may silently resume an inferior lwp. */
757 static int
758 status_pending_p (struct inferior_list_entry *entry, void *arg)
759 {
760 struct lwp_info *lwp = (struct lwp_info *) entry;
761 ptid_t ptid = * (ptid_t *) arg;
762
763 /* Check if we're only interested in events from a specific process
764 or its lwps. */
765 if (!ptid_equal (minus_one_ptid, ptid)
766 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
767 return 0;
768
769 if (lwp->status_pending_p && !lwp->suspended)
770 if (check_removed_breakpoint (lwp))
771 {
772 /* This thread was stopped at a breakpoint, and the breakpoint
773 is now gone. We were told to continue (or step...) all threads,
774 so GDB isn't trying to single-step past this breakpoint.
775 So instead of reporting the old SIGTRAP, pretend we got to
776 the breakpoint just after it was removed instead of just
777 before; resume the process. */
778 linux_resume_one_lwp (&lwp->head, 0, 0, NULL);
779 return 0;
780 }
781
782 return (lwp->status_pending_p && !lwp->suspended);
783 }
784
785 static int
786 same_lwp (struct inferior_list_entry *entry, void *data)
787 {
788 ptid_t ptid = *(ptid_t *) data;
789 int lwp;
790
791 if (ptid_get_lwp (ptid) != 0)
792 lwp = ptid_get_lwp (ptid);
793 else
794 lwp = ptid_get_pid (ptid);
795
796 if (ptid_get_lwp (entry->id) == lwp)
797 return 1;
798
799 return 0;
800 }
801
802 struct lwp_info *
803 find_lwp_pid (ptid_t ptid)
804 {
805 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
806 }
807
808 static struct lwp_info *
809 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
810 {
811 int ret;
812 int to_wait_for = -1;
813 struct lwp_info *child = NULL;
814
815 if (debug_threads)
816 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
817
818 if (ptid_equal (ptid, minus_one_ptid))
819 to_wait_for = -1; /* any child */
820 else
821 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
822
823 options |= __WALL;
824
825 retry:
826
827 ret = my_waitpid (to_wait_for, wstatp, options);
828 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
829 return NULL;
830 else if (ret == -1)
831 perror_with_name ("waitpid");
832
833 if (debug_threads
834 && (!WIFSTOPPED (*wstatp)
835 || (WSTOPSIG (*wstatp) != 32
836 && WSTOPSIG (*wstatp) != 33)))
837 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
838
839 child = find_lwp_pid (pid_to_ptid (ret));
840
841 /* If we didn't find a process, one of two things presumably happened:
842 - A process we started and then detached from has exited. Ignore it.
843 - A process we are controlling has forked and the new child's stop
844 was reported to us by the kernel. Save its PID. */
845 if (child == NULL && WIFSTOPPED (*wstatp))
846 {
847 add_pid_to_list (&stopped_pids, ret);
848 goto retry;
849 }
850 else if (child == NULL)
851 goto retry;
852
853 child->stopped = 1;
854 child->pending_is_breakpoint = 0;
855
856 child->last_status = *wstatp;
857
858 /* Architecture-specific setup after inferior is running.
859 This needs to happen after we have attached to the inferior
860 and it is stopped for the first time, but before we access
861 any inferior registers. */
862 if (new_inferior)
863 {
864 the_low_target.arch_setup ();
865 #ifdef HAVE_LINUX_REGSETS
866 memset (disabled_regsets, 0, num_regsets);
867 #endif
868 new_inferior = 0;
869 }
870
871 if (debug_threads
872 && WIFSTOPPED (*wstatp))
873 {
874 struct thread_info *saved_inferior = current_inferior;
875 current_inferior = (struct thread_info *)
876 find_inferior_id (&all_threads, child->head.id);
877 /* For testing only; i386_stop_pc prints out a diagnostic. */
878 if (the_low_target.get_pc != NULL)
879 get_stop_pc ();
880 current_inferior = saved_inferior;
881 }
882
883 return child;
884 }
885
886 /* Wait for an event from child PID. If PID is -1, wait for any
887 child. Store the stop status through the status pointer WSTAT.
888 OPTIONS is passed to the waitpid call. Return 0 if no child stop
889 event was found and OPTIONS contains WNOHANG. Return the PID of
890 the stopped child otherwise. */
891
892 static int
893 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
894 {
895 CORE_ADDR stop_pc;
896 struct lwp_info *event_child = NULL;
897 int bp_status;
898 struct lwp_info *requested_child = NULL;
899
900 /* Check for a lwp with a pending status. */
901 /* It is possible that the user changed the pending task's registers since
902 it stopped. We correctly handle the change of PC if we hit a breakpoint
903 (in check_removed_breakpoint); signals should be reported anyway. */
904
905 if (ptid_equal (ptid, minus_one_ptid)
906 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
907 {
908 event_child = (struct lwp_info *)
909 find_inferior (&all_lwps, status_pending_p, &ptid);
910 if (debug_threads && event_child)
911 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
912 }
913 else
914 {
915 requested_child = find_lwp_pid (ptid);
916 if (requested_child->status_pending_p
917 && !check_removed_breakpoint (requested_child))
918 event_child = requested_child;
919 }
920
921 if (event_child != NULL)
922 {
923 if (debug_threads)
924 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
925 lwpid_of (event_child), event_child->status_pending);
926 *wstat = event_child->status_pending;
927 event_child->status_pending_p = 0;
928 event_child->status_pending = 0;
929 current_inferior = get_lwp_thread (event_child);
930 return lwpid_of (event_child);
931 }
932
933 /* We only enter this loop if no process has a pending wait status. Thus
934 any action taken in response to a wait status inside this loop is
935 responding as soon as we detect the status, not after any pending
936 events. */
937 while (1)
938 {
939 event_child = linux_wait_for_lwp (ptid, wstat, options);
940
941 if ((options & WNOHANG) && event_child == NULL)
942 return 0;
943
944 if (event_child == NULL)
945 error ("event from unknown child");
946
947 current_inferior = get_lwp_thread (event_child);
948
949 /* Check for thread exit. */
950 if (! WIFSTOPPED (*wstat))
951 {
952 if (debug_threads)
953 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
954
955 /* If the last thread is exiting, just return. */
956 if (last_thread_of_process_p (current_inferior))
957 {
958 if (debug_threads)
959 fprintf (stderr, "LWP %ld is last lwp of process\n",
960 lwpid_of (event_child));
961 return lwpid_of (event_child);
962 }
963
964 delete_lwp (event_child);
965
966 if (!non_stop)
967 {
968 current_inferior = (struct thread_info *) all_threads.head;
969 if (debug_threads)
970 fprintf (stderr, "Current inferior is now %ld\n",
971 lwpid_of (get_thread_lwp (current_inferior)));
972 }
973 else
974 {
975 current_inferior = NULL;
976 if (debug_threads)
977 fprintf (stderr, "Current inferior is now <NULL>\n");
978 }
979
980 /* If we were waiting for this particular child to do something...
981 well, it did something. */
982 if (requested_child != NULL)
983 return lwpid_of (event_child);
984
985 /* Wait for a more interesting event. */
986 continue;
987 }
988
989 if (WIFSTOPPED (*wstat)
990 && WSTOPSIG (*wstat) == SIGSTOP
991 && event_child->stop_expected)
992 {
993 if (debug_threads)
994 fprintf (stderr, "Expected stop.\n");
995 event_child->stop_expected = 0;
996 linux_resume_one_lwp (&event_child->head,
997 event_child->stepping, 0, NULL);
998 continue;
999 }
1000
1001 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1002 && *wstat >> 16 != 0)
1003 {
1004 handle_extended_wait (event_child, *wstat);
1005 continue;
1006 }
1007
1008 /* If GDB is not interested in this signal, don't stop other
1009 threads, and don't report it to GDB. Just resume the
1010 inferior right away. We do this for threading-related
1011 signals as well as any that GDB specifically requested we
1012 ignore. But never ignore SIGSTOP if we sent it ourselves,
1013 and do not ignore signals when stepping - they may require
1014 special handling to skip the signal handler. */
1015 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1016 thread library? */
1017 if (WIFSTOPPED (*wstat)
1018 && !event_child->stepping
1019 && (
1020 #ifdef USE_THREAD_DB
1021 (current_process ()->private->thread_db_active
1022 && (WSTOPSIG (*wstat) == __SIGRTMIN
1023 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
1024 ||
1025 #endif
1026 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
1027 && (WSTOPSIG (*wstat) != SIGSTOP || !stopping_threads))))
1028 {
1029 siginfo_t info, *info_p;
1030
1031 if (debug_threads)
1032 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1033 WSTOPSIG (*wstat), lwpid_of (event_child));
1034
1035 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1036 info_p = &info;
1037 else
1038 info_p = NULL;
1039 linux_resume_one_lwp (&event_child->head,
1040 event_child->stepping,
1041 WSTOPSIG (*wstat), info_p);
1042 continue;
1043 }
1044
1045 /* If this event was not handled above, and is not a SIGTRAP, report
1046 it. */
1047 if (!WIFSTOPPED (*wstat) || WSTOPSIG (*wstat) != SIGTRAP)
1048 return lwpid_of (event_child);
1049
1050 /* If this target does not support breakpoints, we simply report the
1051 SIGTRAP; it's of no concern to us. */
1052 if (the_low_target.get_pc == NULL)
1053 return lwpid_of (event_child);
1054
1055 stop_pc = get_stop_pc ();
1056
1057 /* bp_reinsert will only be set if we were single-stepping.
1058 Notice that we will resume the process after hitting
1059 a gdbserver breakpoint; single-stepping to/over one
1060 is not supported (yet). */
1061 if (event_child->bp_reinsert != 0)
1062 {
1063 if (debug_threads)
1064 fprintf (stderr, "Reinserted breakpoint.\n");
1065 reinsert_breakpoint (event_child->bp_reinsert);
1066 event_child->bp_reinsert = 0;
1067
1068 /* Clear the single-stepping flag and SIGTRAP as we resume. */
1069 linux_resume_one_lwp (&event_child->head, 0, 0, NULL);
1070 continue;
1071 }
1072
1073 bp_status = check_breakpoints (stop_pc);
1074
1075 if (bp_status != 0)
1076 {
1077 if (debug_threads)
1078 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1079
1080 /* We hit one of our own breakpoints. We mark it as a pending
1081 breakpoint, so that check_removed_breakpoint () will do the PC
1082 adjustment for us at the appropriate time. */
1083 event_child->pending_is_breakpoint = 1;
1084 event_child->pending_stop_pc = stop_pc;
1085
1086 /* We may need to put the breakpoint back. We continue in the event
1087 loop instead of simply replacing the breakpoint right away,
1088 in order to not lose signals sent to the thread that hit the
1089 breakpoint. Unfortunately this increases the window where another
1090 thread could sneak past the removed breakpoint. For the current
1091 use of server-side breakpoints (thread creation) this is
1092 acceptable; but it needs to be considered before this breakpoint
1093 mechanism can be used in more general ways. For some breakpoints
1094 it may be necessary to stop all other threads, but that should
1095 be avoided where possible.
1096
1097 If breakpoint_reinsert_addr is NULL, that means that we can
1098 use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint,
1099 mark it for reinsertion, and single-step.
1100
1101 Otherwise, call the target function to figure out where we need
1102 our temporary breakpoint, create it, and continue executing this
1103 process. */
1104
1105 /* NOTE: we're lifting breakpoints in non-stop mode. This
1106 is currently only used for thread event breakpoints, so
1107 it isn't that bad as long as we have PTRACE_EVENT_CLONE
1108 events. */
1109 if (bp_status == 2)
1110 /* No need to reinsert. */
1111 linux_resume_one_lwp (&event_child->head, 0, 0, NULL);
1112 else if (the_low_target.breakpoint_reinsert_addr == NULL)
1113 {
1114 event_child->bp_reinsert = stop_pc;
1115 uninsert_breakpoint (stop_pc);
1116 linux_resume_one_lwp (&event_child->head, 1, 0, NULL);
1117 }
1118 else
1119 {
1120 reinsert_breakpoint_by_bp
1121 (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ());
1122 linux_resume_one_lwp (&event_child->head, 0, 0, NULL);
1123 }
1124
1125 continue;
1126 }
1127
1128 if (debug_threads)
1129 fprintf (stderr, "Hit a non-gdbserver breakpoint.\n");
1130
1131 /* If we were single-stepping, we definitely want to report the
1132 SIGTRAP. Although the single-step operation has completed,
1133 do not clear clear the stepping flag yet; we need to check it
1134 in wait_for_sigstop. */
1135 if (event_child->stepping)
1136 return lwpid_of (event_child);
1137
1138 /* A SIGTRAP that we can't explain. It may have been a breakpoint.
1139 Check if it is a breakpoint, and if so mark the process information
1140 accordingly. This will handle both the necessary fiddling with the
1141 PC on decr_pc_after_break targets and suppressing extra threads
1142 hitting a breakpoint if two hit it at once and then GDB removes it
1143 after the first is reported. Arguably it would be better to report
1144 multiple threads hitting breakpoints simultaneously, but the current
1145 remote protocol does not allow this. */
1146 if ((*the_low_target.breakpoint_at) (stop_pc))
1147 {
1148 event_child->pending_is_breakpoint = 1;
1149 event_child->pending_stop_pc = stop_pc;
1150 }
1151
1152 return lwpid_of (event_child);
1153 }
1154
1155 /* NOTREACHED */
1156 return 0;
1157 }
1158
1159 static int
1160 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1161 {
1162 ptid_t wait_ptid;
1163
1164 if (ptid_is_pid (ptid))
1165 {
1166 /* A request to wait for a specific tgid. This is not possible
1167 with waitpid, so instead, we wait for any child, and leave
1168 children we're not interested in right now with a pending
1169 status to report later. */
1170 wait_ptid = minus_one_ptid;
1171 }
1172 else
1173 wait_ptid = ptid;
1174
1175 while (1)
1176 {
1177 int event_pid;
1178
1179 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1180
1181 if (event_pid > 0
1182 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1183 {
1184 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1185
1186 if (! WIFSTOPPED (*wstat))
1187 mark_lwp_dead (event_child, *wstat);
1188 else
1189 {
1190 event_child->status_pending_p = 1;
1191 event_child->status_pending = *wstat;
1192 }
1193 }
1194 else
1195 return event_pid;
1196 }
1197 }
1198
1199 /* Wait for process, returns status. */
1200
1201 static ptid_t
1202 linux_wait_1 (ptid_t ptid,
1203 struct target_waitstatus *ourstatus, int target_options)
1204 {
1205 int w;
1206 struct thread_info *thread = NULL;
1207 struct lwp_info *lwp = NULL;
1208 int options;
1209 int pid;
1210
1211 /* Translate generic target options into linux options. */
1212 options = __WALL;
1213 if (target_options & TARGET_WNOHANG)
1214 options |= WNOHANG;
1215
1216 retry:
1217 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1218
1219 /* If we were only supposed to resume one thread, only wait for
1220 that thread - if it's still alive. If it died, however - which
1221 can happen if we're coming from the thread death case below -
1222 then we need to make sure we restart the other threads. We could
1223 pick a thread at random or restart all; restarting all is less
1224 arbitrary. */
1225 if (!non_stop
1226 && !ptid_equal (cont_thread, null_ptid)
1227 && !ptid_equal (cont_thread, minus_one_ptid))
1228 {
1229 thread = (struct thread_info *) find_inferior_id (&all_threads,
1230 cont_thread);
1231
1232 /* No stepping, no signal - unless one is pending already, of course. */
1233 if (thread == NULL)
1234 {
1235 struct thread_resume resume_info;
1236 resume_info.thread = minus_one_ptid;
1237 resume_info.kind = resume_continue;
1238 resume_info.sig = 0;
1239 linux_resume (&resume_info, 1);
1240 }
1241 else
1242 ptid = cont_thread;
1243 }
1244
1245 pid = linux_wait_for_event (ptid, &w, options);
1246 if (pid == 0) /* only if TARGET_WNOHANG */
1247 return null_ptid;
1248
1249 lwp = get_thread_lwp (current_inferior);
1250
1251 if (must_set_ptrace_flags)
1252 {
1253 ptrace (PTRACE_SETOPTIONS, lwpid_of (lwp), 0, PTRACE_O_TRACECLONE);
1254 must_set_ptrace_flags = 0;
1255 }
1256 /* If we are waiting for a particular child, and it exited,
1257 linux_wait_for_event will return its exit status. Similarly if
1258 the last child exited. If this is not the last child, however,
1259 do not report it as exited until there is a 'thread exited' response
1260 available in the remote protocol. Instead, just wait for another event.
1261 This should be safe, because if the thread crashed we will already
1262 have reported the termination signal to GDB; that should stop any
1263 in-progress stepping operations, etc.
1264
1265 Report the exit status of the last thread to exit. This matches
1266 LinuxThreads' behavior. */
1267
1268 if (last_thread_of_process_p (current_inferior))
1269 {
1270 if (WIFEXITED (w) || WIFSIGNALED (w))
1271 {
1272 int pid = pid_of (lwp);
1273 struct process_info *process = find_process_pid (pid);
1274
1275 delete_lwp (lwp);
1276 remove_process (process);
1277
1278 current_inferior = NULL;
1279
1280 if (WIFEXITED (w))
1281 {
1282 ourstatus->kind = TARGET_WAITKIND_EXITED;
1283 ourstatus->value.integer = WEXITSTATUS (w);
1284
1285 if (debug_threads)
1286 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1287 }
1288 else
1289 {
1290 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1291 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1292
1293 if (debug_threads)
1294 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1295
1296 }
1297
1298 return pid_to_ptid (pid);
1299 }
1300 }
1301 else
1302 {
1303 if (!WIFSTOPPED (w))
1304 goto retry;
1305 }
1306
1307 /* In all-stop, stop all threads. Be careful to only do this if
1308 we're about to report an event to GDB. */
1309 if (!non_stop)
1310 stop_all_lwps ();
1311
1312 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1313
1314 if (lwp->suspended && WSTOPSIG (w) == SIGSTOP)
1315 {
1316 /* A thread that has been requested to stop by GDB with vCont;t,
1317 and it stopped cleanly, so report as SIG0. The use of
1318 SIGSTOP is an implementation detail. */
1319 ourstatus->value.sig = TARGET_SIGNAL_0;
1320 }
1321 else if (lwp->suspended && WSTOPSIG (w) != SIGSTOP)
1322 {
1323 /* A thread that has been requested to stop by GDB with vCont;t,
1324 but, it stopped for other reasons. Set stop_expected so the
1325 pending SIGSTOP is ignored and the LWP is resumed. */
1326 lwp->stop_expected = 1;
1327 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1328 }
1329 else
1330 {
1331 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1332 }
1333
1334 if (debug_threads)
1335 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1336 target_pid_to_str (lwp->head.id),
1337 ourstatus->kind,
1338 ourstatus->value.sig);
1339
1340 return lwp->head.id;
1341 }
1342
1343 /* Get rid of any pending event in the pipe. */
1344 static void
1345 async_file_flush (void)
1346 {
1347 int ret;
1348 char buf;
1349
1350 do
1351 ret = read (linux_event_pipe[0], &buf, 1);
1352 while (ret >= 0 || (ret == -1 && errno == EINTR));
1353 }
1354
1355 /* Put something in the pipe, so the event loop wakes up. */
1356 static void
1357 async_file_mark (void)
1358 {
1359 int ret;
1360
1361 async_file_flush ();
1362
1363 do
1364 ret = write (linux_event_pipe[1], "+", 1);
1365 while (ret == 0 || (ret == -1 && errno == EINTR));
1366
1367 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1368 be awakened anyway. */
1369 }
1370
1371 static ptid_t
1372 linux_wait (ptid_t ptid,
1373 struct target_waitstatus *ourstatus, int target_options)
1374 {
1375 ptid_t event_ptid;
1376
1377 if (debug_threads)
1378 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
1379
1380 /* Flush the async file first. */
1381 if (target_is_async_p ())
1382 async_file_flush ();
1383
1384 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
1385
1386 /* If at least one stop was reported, there may be more. A single
1387 SIGCHLD can signal more than one child stop. */
1388 if (target_is_async_p ()
1389 && (target_options & TARGET_WNOHANG) != 0
1390 && !ptid_equal (event_ptid, null_ptid))
1391 async_file_mark ();
1392
1393 return event_ptid;
1394 }
1395
1396 /* Send a signal to an LWP. For LinuxThreads, kill is enough; however, if
1397 thread groups are in use, we need to use tkill. */
1398
1399 static int
1400 kill_lwp (unsigned long lwpid, int signo)
1401 {
1402 static int tkill_failed;
1403
1404 errno = 0;
1405
1406 #ifdef SYS_tkill
1407 if (!tkill_failed)
1408 {
1409 int ret = syscall (SYS_tkill, lwpid, signo);
1410 if (errno != ENOSYS)
1411 return ret;
1412 errno = 0;
1413 tkill_failed = 1;
1414 }
1415 #endif
1416
1417 return kill (lwpid, signo);
1418 }
1419
1420 static void
1421 send_sigstop (struct inferior_list_entry *entry)
1422 {
1423 struct lwp_info *lwp = (struct lwp_info *) entry;
1424 int pid;
1425
1426 if (lwp->stopped)
1427 return;
1428
1429 pid = lwpid_of (lwp);
1430
1431 /* If we already have a pending stop signal for this process, don't
1432 send another. */
1433 if (lwp->stop_expected)
1434 {
1435 if (debug_threads)
1436 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
1437
1438 /* We clear the stop_expected flag so that wait_for_sigstop
1439 will receive the SIGSTOP event (instead of silently resuming and
1440 waiting again). It'll be reset below. */
1441 lwp->stop_expected = 0;
1442 return;
1443 }
1444
1445 if (debug_threads)
1446 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
1447
1448 kill_lwp (pid, SIGSTOP);
1449 }
1450
1451 static void
1452 mark_lwp_dead (struct lwp_info *lwp, int wstat)
1453 {
1454 /* It's dead, really. */
1455 lwp->dead = 1;
1456
1457 /* Store the exit status for later. */
1458 lwp->status_pending_p = 1;
1459 lwp->status_pending = wstat;
1460
1461 /* So that check_removed_breakpoint doesn't try to figure out if
1462 this is stopped at a breakpoint. */
1463 lwp->pending_is_breakpoint = 0;
1464
1465 /* Prevent trying to stop it. */
1466 lwp->stopped = 1;
1467
1468 /* No further stops are expected from a dead lwp. */
1469 lwp->stop_expected = 0;
1470 }
1471
1472 static void
1473 wait_for_sigstop (struct inferior_list_entry *entry)
1474 {
1475 struct lwp_info *lwp = (struct lwp_info *) entry;
1476 struct thread_info *saved_inferior;
1477 int wstat;
1478 ptid_t saved_tid;
1479 ptid_t ptid;
1480
1481 if (lwp->stopped)
1482 return;
1483
1484 saved_inferior = current_inferior;
1485 if (saved_inferior != NULL)
1486 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
1487 else
1488 saved_tid = null_ptid; /* avoid bogus unused warning */
1489
1490 ptid = lwp->head.id;
1491
1492 linux_wait_for_event (ptid, &wstat, __WALL);
1493
1494 /* If we stopped with a non-SIGSTOP signal, save it for later
1495 and record the pending SIGSTOP. If the process exited, just
1496 return. */
1497 if (WIFSTOPPED (wstat)
1498 && WSTOPSIG (wstat) != SIGSTOP)
1499 {
1500 if (debug_threads)
1501 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
1502 lwpid_of (lwp), wstat);
1503
1504 /* Do not leave a pending single-step finish to be reported to
1505 the client. The client will give us a new action for this
1506 thread, possibly a continue request --- otherwise, the client
1507 would consider this pending SIGTRAP reported later a spurious
1508 signal. */
1509 if (WSTOPSIG (wstat) == SIGTRAP
1510 && lwp->stepping
1511 && !linux_stopped_by_watchpoint ())
1512 {
1513 if (debug_threads)
1514 fprintf (stderr, " single-step SIGTRAP ignored\n");
1515 }
1516 else
1517 {
1518 lwp->status_pending_p = 1;
1519 lwp->status_pending = wstat;
1520 }
1521 lwp->stop_expected = 1;
1522 }
1523 else if (!WIFSTOPPED (wstat))
1524 {
1525 if (debug_threads)
1526 fprintf (stderr, "Process %ld exited while stopping LWPs\n",
1527 lwpid_of (lwp));
1528
1529 /* Leave this status pending for the next time we're able to
1530 report it. In the mean time, we'll report this lwp as dead
1531 to GDB, so GDB doesn't try to read registers and memory from
1532 it. */
1533 mark_lwp_dead (lwp, wstat);
1534 }
1535
1536 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
1537 current_inferior = saved_inferior;
1538 else
1539 {
1540 if (debug_threads)
1541 fprintf (stderr, "Previously current thread died.\n");
1542
1543 if (non_stop)
1544 {
1545 /* We can't change the current inferior behind GDB's back,
1546 otherwise, a subsequent command may apply to the wrong
1547 process. */
1548 current_inferior = NULL;
1549 }
1550 else
1551 {
1552 /* Set a valid thread as current. */
1553 set_desired_inferior (0);
1554 }
1555 }
1556 }
1557
1558 static void
1559 stop_all_lwps (void)
1560 {
1561 stopping_threads = 1;
1562 for_each_inferior (&all_lwps, send_sigstop);
1563 for_each_inferior (&all_lwps, wait_for_sigstop);
1564 stopping_threads = 0;
1565 }
1566
1567 /* Resume execution of the inferior process.
1568 If STEP is nonzero, single-step it.
1569 If SIGNAL is nonzero, give it that signal. */
1570
1571 static void
1572 linux_resume_one_lwp (struct inferior_list_entry *entry,
1573 int step, int signal, siginfo_t *info)
1574 {
1575 struct lwp_info *lwp = (struct lwp_info *) entry;
1576 struct thread_info *saved_inferior;
1577
1578 if (lwp->stopped == 0)
1579 return;
1580
1581 /* If we have pending signals or status, and a new signal, enqueue the
1582 signal. Also enqueue the signal if we are waiting to reinsert a
1583 breakpoint; it will be picked up again below. */
1584 if (signal != 0
1585 && (lwp->status_pending_p || lwp->pending_signals != NULL
1586 || lwp->bp_reinsert != 0))
1587 {
1588 struct pending_signals *p_sig;
1589 p_sig = xmalloc (sizeof (*p_sig));
1590 p_sig->prev = lwp->pending_signals;
1591 p_sig->signal = signal;
1592 if (info == NULL)
1593 memset (&p_sig->info, 0, sizeof (siginfo_t));
1594 else
1595 memcpy (&p_sig->info, info, sizeof (siginfo_t));
1596 lwp->pending_signals = p_sig;
1597 }
1598
1599 if (lwp->status_pending_p && !check_removed_breakpoint (lwp))
1600 return;
1601
1602 saved_inferior = current_inferior;
1603 current_inferior = get_lwp_thread (lwp);
1604
1605 if (debug_threads)
1606 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
1607 lwpid_of (lwp), step ? "step" : "continue", signal,
1608 lwp->stop_expected ? "expected" : "not expected");
1609
1610 /* This bit needs some thinking about. If we get a signal that
1611 we must report while a single-step reinsert is still pending,
1612 we often end up resuming the thread. It might be better to
1613 (ew) allow a stack of pending events; then we could be sure that
1614 the reinsert happened right away and not lose any signals.
1615
1616 Making this stack would also shrink the window in which breakpoints are
1617 uninserted (see comment in linux_wait_for_lwp) but not enough for
1618 complete correctness, so it won't solve that problem. It may be
1619 worthwhile just to solve this one, however. */
1620 if (lwp->bp_reinsert != 0)
1621 {
1622 if (debug_threads)
1623 fprintf (stderr, " pending reinsert at %08lx", (long)lwp->bp_reinsert);
1624 if (step == 0)
1625 fprintf (stderr, "BAD - reinserting but not stepping.\n");
1626 step = 1;
1627
1628 /* Postpone any pending signal. It was enqueued above. */
1629 signal = 0;
1630 }
1631
1632 check_removed_breakpoint (lwp);
1633
1634 if (debug_threads && the_low_target.get_pc != NULL)
1635 {
1636 fprintf (stderr, " ");
1637 (*the_low_target.get_pc) ();
1638 }
1639
1640 /* If we have pending signals, consume one unless we are trying to reinsert
1641 a breakpoint. */
1642 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
1643 {
1644 struct pending_signals **p_sig;
1645
1646 p_sig = &lwp->pending_signals;
1647 while ((*p_sig)->prev != NULL)
1648 p_sig = &(*p_sig)->prev;
1649
1650 signal = (*p_sig)->signal;
1651 if ((*p_sig)->info.si_signo != 0)
1652 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1653
1654 free (*p_sig);
1655 *p_sig = NULL;
1656 }
1657
1658 regcache_invalidate_one ((struct inferior_list_entry *)
1659 get_lwp_thread (lwp));
1660 errno = 0;
1661 lwp->stopped = 0;
1662 lwp->stepping = step;
1663 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0, signal);
1664
1665 current_inferior = saved_inferior;
1666 if (errno)
1667 {
1668 /* ESRCH from ptrace either means that the thread was already
1669 running (an error) or that it is gone (a race condition). If
1670 it's gone, we will get a notification the next time we wait,
1671 so we can ignore the error. We could differentiate these
1672 two, but it's tricky without waiting; the thread still exists
1673 as a zombie, so sending it signal 0 would succeed. So just
1674 ignore ESRCH. */
1675 if (errno == ESRCH)
1676 return;
1677
1678 perror_with_name ("ptrace");
1679 }
1680 }
1681
1682 struct thread_resume_array
1683 {
1684 struct thread_resume *resume;
1685 size_t n;
1686 };
1687
1688 /* This function is called once per thread. We look up the thread
1689 in RESUME_PTR, and mark the thread with a pointer to the appropriate
1690 resume request.
1691
1692 This algorithm is O(threads * resume elements), but resume elements
1693 is small (and will remain small at least until GDB supports thread
1694 suspension). */
1695 static int
1696 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
1697 {
1698 struct lwp_info *lwp;
1699 struct thread_info *thread;
1700 int ndx;
1701 struct thread_resume_array *r;
1702
1703 thread = (struct thread_info *) entry;
1704 lwp = get_thread_lwp (thread);
1705 r = arg;
1706
1707 for (ndx = 0; ndx < r->n; ndx++)
1708 {
1709 ptid_t ptid = r->resume[ndx].thread;
1710 if (ptid_equal (ptid, minus_one_ptid)
1711 || ptid_equal (ptid, entry->id)
1712 || (ptid_is_pid (ptid)
1713 && (ptid_get_pid (ptid) == pid_of (lwp)))
1714 || (ptid_get_lwp (ptid) == -1
1715 && (ptid_get_pid (ptid) == pid_of (lwp))))
1716 {
1717 lwp->resume = &r->resume[ndx];
1718 return 0;
1719 }
1720 }
1721
1722 /* No resume action for this thread. */
1723 lwp->resume = NULL;
1724
1725 return 0;
1726 }
1727
1728
1729 /* Set *FLAG_P if this lwp has an interesting status pending. */
1730 static int
1731 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
1732 {
1733 struct lwp_info *lwp = (struct lwp_info *) entry;
1734
1735 /* LWPs which will not be resumed are not interesting, because
1736 we might not wait for them next time through linux_wait. */
1737 if (lwp->resume == NULL)
1738 return 0;
1739
1740 /* If this thread has a removed breakpoint, we won't have any
1741 events to report later, so check now. check_removed_breakpoint
1742 may clear status_pending_p. We avoid calling check_removed_breakpoint
1743 for any thread that we are not otherwise going to resume - this
1744 lets us preserve stopped status when two threads hit a breakpoint.
1745 GDB removes the breakpoint to single-step a particular thread
1746 past it, then re-inserts it and resumes all threads. We want
1747 to report the second thread without resuming it in the interim. */
1748 if (lwp->status_pending_p)
1749 check_removed_breakpoint (lwp);
1750
1751 if (lwp->status_pending_p)
1752 * (int *) flag_p = 1;
1753
1754 return 0;
1755 }
1756
1757 /* This function is called once per thread. We check the thread's resume
1758 request, which will tell us whether to resume, step, or leave the thread
1759 stopped; and what signal, if any, it should be sent.
1760
1761 For threads which we aren't explicitly told otherwise, we preserve
1762 the stepping flag; this is used for stepping over gdbserver-placed
1763 breakpoints.
1764
1765 If pending_flags was set in any thread, we queue any needed
1766 signals, since we won't actually resume. We already have a pending
1767 event to report, so we don't need to preserve any step requests;
1768 they should be re-issued if necessary. */
1769
1770 static int
1771 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
1772 {
1773 struct lwp_info *lwp;
1774 struct thread_info *thread;
1775 int step;
1776 int pending_flag = * (int *) arg;
1777
1778 thread = (struct thread_info *) entry;
1779 lwp = get_thread_lwp (thread);
1780
1781 if (lwp->resume == NULL)
1782 return 0;
1783
1784 if (lwp->resume->kind == resume_stop)
1785 {
1786 if (debug_threads)
1787 fprintf (stderr, "suspending LWP %ld\n", lwpid_of (lwp));
1788
1789 if (!lwp->stopped)
1790 {
1791 if (debug_threads)
1792 fprintf (stderr, "running -> suspending LWP %ld\n", lwpid_of (lwp));
1793
1794 lwp->suspended = 1;
1795 send_sigstop (&lwp->head);
1796 }
1797 else
1798 {
1799 if (debug_threads)
1800 {
1801 if (lwp->suspended)
1802 fprintf (stderr, "already stopped/suspended LWP %ld\n",
1803 lwpid_of (lwp));
1804 else
1805 fprintf (stderr, "already stopped/not suspended LWP %ld\n",
1806 lwpid_of (lwp));
1807 }
1808
1809 /* Make sure we leave the LWP suspended, so we don't try to
1810 resume it without GDB telling us to. FIXME: The LWP may
1811 have been stopped in an internal event that was not meant
1812 to be notified back to GDB (e.g., gdbserver breakpoint),
1813 so we should be reporting a stop event in that case
1814 too. */
1815 lwp->suspended = 1;
1816 }
1817
1818 /* For stop requests, we're done. */
1819 lwp->resume = NULL;
1820 return 0;
1821 }
1822 else
1823 lwp->suspended = 0;
1824
1825 /* If this thread which is about to be resumed has a pending status,
1826 then don't resume any threads - we can just report the pending
1827 status. Make sure to queue any signals that would otherwise be
1828 sent. In all-stop mode, we do this decision based on if *any*
1829 thread has a pending status. */
1830 if (non_stop)
1831 resume_status_pending_p (&lwp->head, &pending_flag);
1832
1833 if (!pending_flag)
1834 {
1835 if (debug_threads)
1836 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
1837
1838 if (ptid_equal (lwp->resume->thread, minus_one_ptid)
1839 && lwp->stepping
1840 && lwp->pending_is_breakpoint)
1841 step = 1;
1842 else
1843 step = (lwp->resume->kind == resume_step);
1844
1845 linux_resume_one_lwp (&lwp->head, step, lwp->resume->sig, NULL);
1846 }
1847 else
1848 {
1849 if (debug_threads)
1850 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
1851
1852 /* If we have a new signal, enqueue the signal. */
1853 if (lwp->resume->sig != 0)
1854 {
1855 struct pending_signals *p_sig;
1856 p_sig = xmalloc (sizeof (*p_sig));
1857 p_sig->prev = lwp->pending_signals;
1858 p_sig->signal = lwp->resume->sig;
1859 memset (&p_sig->info, 0, sizeof (siginfo_t));
1860
1861 /* If this is the same signal we were previously stopped by,
1862 make sure to queue its siginfo. We can ignore the return
1863 value of ptrace; if it fails, we'll skip
1864 PTRACE_SETSIGINFO. */
1865 if (WIFSTOPPED (lwp->last_status)
1866 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
1867 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1868
1869 lwp->pending_signals = p_sig;
1870 }
1871 }
1872
1873 lwp->resume = NULL;
1874 return 0;
1875 }
1876
1877 static void
1878 linux_resume (struct thread_resume *resume_info, size_t n)
1879 {
1880 int pending_flag;
1881 struct thread_resume_array array = { resume_info, n };
1882
1883 find_inferior (&all_threads, linux_set_resume_request, &array);
1884
1885 /* If there is a thread which would otherwise be resumed, which
1886 has a pending status, then don't resume any threads - we can just
1887 report the pending status. Make sure to queue any signals
1888 that would otherwise be sent. In non-stop mode, we'll apply this
1889 logic to each thread individually. */
1890 pending_flag = 0;
1891 if (!non_stop)
1892 find_inferior (&all_lwps, resume_status_pending_p, &pending_flag);
1893
1894 if (debug_threads)
1895 {
1896 if (pending_flag)
1897 fprintf (stderr, "Not resuming, pending status\n");
1898 else
1899 fprintf (stderr, "Resuming, no pending status\n");
1900 }
1901
1902 find_inferior (&all_threads, linux_resume_one_thread, &pending_flag);
1903 }
1904
1905 #ifdef HAVE_LINUX_USRREGS
1906
1907 int
1908 register_addr (int regnum)
1909 {
1910 int addr;
1911
1912 if (regnum < 0 || regnum >= the_low_target.num_regs)
1913 error ("Invalid register number %d.", regnum);
1914
1915 addr = the_low_target.regmap[regnum];
1916
1917 return addr;
1918 }
1919
1920 /* Fetch one register. */
1921 static void
1922 fetch_register (int regno)
1923 {
1924 CORE_ADDR regaddr;
1925 int i, size;
1926 char *buf;
1927 int pid;
1928
1929 if (regno >= the_low_target.num_regs)
1930 return;
1931 if ((*the_low_target.cannot_fetch_register) (regno))
1932 return;
1933
1934 regaddr = register_addr (regno);
1935 if (regaddr == -1)
1936 return;
1937
1938 pid = lwpid_of (get_thread_lwp (current_inferior));
1939 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1940 & - sizeof (PTRACE_XFER_TYPE));
1941 buf = alloca (size);
1942 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1943 {
1944 errno = 0;
1945 *(PTRACE_XFER_TYPE *) (buf + i) =
1946 ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) regaddr, 0);
1947 regaddr += sizeof (PTRACE_XFER_TYPE);
1948 if (errno != 0)
1949 {
1950 /* Warning, not error, in case we are attached; sometimes the
1951 kernel doesn't let us at the registers. */
1952 char *err = strerror (errno);
1953 char *msg = alloca (strlen (err) + 128);
1954 sprintf (msg, "reading register %d: %s", regno, err);
1955 error (msg);
1956 goto error_exit;
1957 }
1958 }
1959
1960 if (the_low_target.supply_ptrace_register)
1961 the_low_target.supply_ptrace_register (regno, buf);
1962 else
1963 supply_register (regno, buf);
1964
1965 error_exit:;
1966 }
1967
1968 /* Fetch all registers, or just one, from the child process. */
1969 static void
1970 usr_fetch_inferior_registers (int regno)
1971 {
1972 if (regno == -1 || regno == 0)
1973 for (regno = 0; regno < the_low_target.num_regs; regno++)
1974 fetch_register (regno);
1975 else
1976 fetch_register (regno);
1977 }
1978
1979 /* Store our register values back into the inferior.
1980 If REGNO is -1, do this for all registers.
1981 Otherwise, REGNO specifies which register (so we can save time). */
1982 static void
1983 usr_store_inferior_registers (int regno)
1984 {
1985 CORE_ADDR regaddr;
1986 int i, size;
1987 char *buf;
1988
1989 if (regno >= 0)
1990 {
1991 if (regno >= the_low_target.num_regs)
1992 return;
1993
1994 if ((*the_low_target.cannot_store_register) (regno) == 1)
1995 return;
1996
1997 regaddr = register_addr (regno);
1998 if (regaddr == -1)
1999 return;
2000 errno = 0;
2001 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2002 & - sizeof (PTRACE_XFER_TYPE);
2003 buf = alloca (size);
2004 memset (buf, 0, size);
2005
2006 if (the_low_target.collect_ptrace_register)
2007 the_low_target.collect_ptrace_register (regno, buf);
2008 else
2009 collect_register (regno, buf);
2010
2011 pid = lwpid_of (get_thread_lwp (current_inferior));
2012 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2013 {
2014 errno = 0;
2015 ptrace (PTRACE_POKEUSER, pid, (PTRACE_ARG3_TYPE) regaddr,
2016 *(PTRACE_XFER_TYPE *) (buf + i));
2017 if (errno != 0)
2018 {
2019 /* At this point, ESRCH should mean the process is
2020 already gone, in which case we simply ignore attempts
2021 to change its registers. See also the related
2022 comment in linux_resume_one_lwp. */
2023 if (errno == ESRCH)
2024 return;
2025
2026 if ((*the_low_target.cannot_store_register) (regno) == 0)
2027 {
2028 char *err = strerror (errno);
2029 char *msg = alloca (strlen (err) + 128);
2030 sprintf (msg, "writing register %d: %s",
2031 regno, err);
2032 error (msg);
2033 return;
2034 }
2035 }
2036 regaddr += sizeof (PTRACE_XFER_TYPE);
2037 }
2038 }
2039 else
2040 for (regno = 0; regno < the_low_target.num_regs; regno++)
2041 usr_store_inferior_registers (regno);
2042 }
2043 #endif /* HAVE_LINUX_USRREGS */
2044
2045
2046
2047 #ifdef HAVE_LINUX_REGSETS
2048
2049 static int
2050 regsets_fetch_inferior_registers ()
2051 {
2052 struct regset_info *regset;
2053 int saw_general_regs = 0;
2054 int pid;
2055
2056 regset = target_regsets;
2057
2058 pid = lwpid_of (get_thread_lwp (current_inferior));
2059 while (regset->size >= 0)
2060 {
2061 void *buf;
2062 int res;
2063
2064 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2065 {
2066 regset ++;
2067 continue;
2068 }
2069
2070 buf = xmalloc (regset->size);
2071 #ifndef __sparc__
2072 res = ptrace (regset->get_request, pid, 0, buf);
2073 #else
2074 res = ptrace (regset->get_request, pid, buf, 0);
2075 #endif
2076 if (res < 0)
2077 {
2078 if (errno == EIO)
2079 {
2080 /* If we get EIO on a regset, do not try it again for
2081 this process. */
2082 disabled_regsets[regset - target_regsets] = 1;
2083 continue;
2084 }
2085 else
2086 {
2087 char s[256];
2088 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
2089 pid);
2090 perror (s);
2091 }
2092 }
2093 else if (regset->type == GENERAL_REGS)
2094 saw_general_regs = 1;
2095 regset->store_function (buf);
2096 regset ++;
2097 }
2098 if (saw_general_regs)
2099 return 0;
2100 else
2101 return 1;
2102 }
2103
2104 static int
2105 regsets_store_inferior_registers ()
2106 {
2107 struct regset_info *regset;
2108 int saw_general_regs = 0;
2109 int pid;
2110
2111 regset = target_regsets;
2112
2113 pid = lwpid_of (get_thread_lwp (current_inferior));
2114 while (regset->size >= 0)
2115 {
2116 void *buf;
2117 int res;
2118
2119 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2120 {
2121 regset ++;
2122 continue;
2123 }
2124
2125 buf = xmalloc (regset->size);
2126
2127 /* First fill the buffer with the current register set contents,
2128 in case there are any items in the kernel's regset that are
2129 not in gdbserver's regcache. */
2130 #ifndef __sparc__
2131 res = ptrace (regset->get_request, pid, 0, buf);
2132 #else
2133 res = ptrace (regset->get_request, pid, buf, 0);
2134 #endif
2135
2136 if (res == 0)
2137 {
2138 /* Then overlay our cached registers on that. */
2139 regset->fill_function (buf);
2140
2141 /* Only now do we write the register set. */
2142 #ifndef __sparc__
2143 res = ptrace (regset->set_request, pid, 0, buf);
2144 #else
2145 res = ptrace (regset->set_request, pid, buf, 0);
2146 #endif
2147 }
2148
2149 if (res < 0)
2150 {
2151 if (errno == EIO)
2152 {
2153 /* If we get EIO on a regset, do not try it again for
2154 this process. */
2155 disabled_regsets[regset - target_regsets] = 1;
2156 continue;
2157 }
2158 else if (errno == ESRCH)
2159 {
2160 /* At this point, ESRCH should mean the process is
2161 already gone, in which case we simply ignore attempts
2162 to change its registers. See also the related
2163 comment in linux_resume_one_lwp. */
2164 return 0;
2165 }
2166 else
2167 {
2168 perror ("Warning: ptrace(regsets_store_inferior_registers)");
2169 }
2170 }
2171 else if (regset->type == GENERAL_REGS)
2172 saw_general_regs = 1;
2173 regset ++;
2174 free (buf);
2175 }
2176 if (saw_general_regs)
2177 return 0;
2178 else
2179 return 1;
2180 return 0;
2181 }
2182
2183 #endif /* HAVE_LINUX_REGSETS */
2184
2185
2186 void
2187 linux_fetch_registers (int regno)
2188 {
2189 #ifdef HAVE_LINUX_REGSETS
2190 if (regsets_fetch_inferior_registers () == 0)
2191 return;
2192 #endif
2193 #ifdef HAVE_LINUX_USRREGS
2194 usr_fetch_inferior_registers (regno);
2195 #endif
2196 }
2197
2198 void
2199 linux_store_registers (int regno)
2200 {
2201 #ifdef HAVE_LINUX_REGSETS
2202 if (regsets_store_inferior_registers () == 0)
2203 return;
2204 #endif
2205 #ifdef HAVE_LINUX_USRREGS
2206 usr_store_inferior_registers (regno);
2207 #endif
2208 }
2209
2210
2211 /* Copy LEN bytes from inferior's memory starting at MEMADDR
2212 to debugger memory starting at MYADDR. */
2213
2214 static int
2215 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
2216 {
2217 register int i;
2218 /* Round starting address down to longword boundary. */
2219 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2220 /* Round ending address up; get number of longwords that makes. */
2221 register int count
2222 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
2223 / sizeof (PTRACE_XFER_TYPE);
2224 /* Allocate buffer of that many longwords. */
2225 register PTRACE_XFER_TYPE *buffer
2226 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2227 int fd;
2228 char filename[64];
2229 int pid = lwpid_of (get_thread_lwp (current_inferior));
2230
2231 /* Try using /proc. Don't bother for one word. */
2232 if (len >= 3 * sizeof (long))
2233 {
2234 /* We could keep this file open and cache it - possibly one per
2235 thread. That requires some juggling, but is even faster. */
2236 sprintf (filename, "/proc/%d/mem", pid);
2237 fd = open (filename, O_RDONLY | O_LARGEFILE);
2238 if (fd == -1)
2239 goto no_proc;
2240
2241 /* If pread64 is available, use it. It's faster if the kernel
2242 supports it (only one syscall), and it's 64-bit safe even on
2243 32-bit platforms (for instance, SPARC debugging a SPARC64
2244 application). */
2245 #ifdef HAVE_PREAD64
2246 if (pread64 (fd, myaddr, len, memaddr) != len)
2247 #else
2248 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, memaddr, len) != len)
2249 #endif
2250 {
2251 close (fd);
2252 goto no_proc;
2253 }
2254
2255 close (fd);
2256 return 0;
2257 }
2258
2259 no_proc:
2260 /* Read all the longwords */
2261 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2262 {
2263 errno = 0;
2264 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid, (PTRACE_ARG3_TYPE) addr, 0);
2265 if (errno)
2266 return errno;
2267 }
2268
2269 /* Copy appropriate bytes out of the buffer. */
2270 memcpy (myaddr,
2271 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
2272 len);
2273
2274 return 0;
2275 }
2276
2277 /* Copy LEN bytes of data from debugger memory at MYADDR
2278 to inferior's memory at MEMADDR.
2279 On failure (cannot write the inferior)
2280 returns the value of errno. */
2281
2282 static int
2283 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
2284 {
2285 register int i;
2286 /* Round starting address down to longword boundary. */
2287 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2288 /* Round ending address up; get number of longwords that makes. */
2289 register int count
2290 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
2291 /* Allocate buffer of that many longwords. */
2292 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2293 int pid = lwpid_of (get_thread_lwp (current_inferior));
2294
2295 if (debug_threads)
2296 {
2297 fprintf (stderr, "Writing %02x to %08lx\n", (unsigned)myaddr[0], (long)memaddr);
2298 }
2299
2300 /* Fill start and end extra bytes of buffer with existing memory data. */
2301
2302 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid, (PTRACE_ARG3_TYPE) addr, 0);
2303
2304 if (count > 1)
2305 {
2306 buffer[count - 1]
2307 = ptrace (PTRACE_PEEKTEXT, pid,
2308 (PTRACE_ARG3_TYPE) (addr + (count - 1)
2309 * sizeof (PTRACE_XFER_TYPE)),
2310 0);
2311 }
2312
2313 /* Copy data to be written over corresponding part of buffer */
2314
2315 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
2316
2317 /* Write the entire buffer. */
2318
2319 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2320 {
2321 errno = 0;
2322 ptrace (PTRACE_POKETEXT, pid, (PTRACE_ARG3_TYPE) addr, buffer[i]);
2323 if (errno)
2324 return errno;
2325 }
2326
2327 return 0;
2328 }
2329
2330 static int linux_supports_tracefork_flag;
2331
2332 /* Helper functions for linux_test_for_tracefork, called via clone (). */
2333
2334 static int
2335 linux_tracefork_grandchild (void *arg)
2336 {
2337 _exit (0);
2338 }
2339
2340 #define STACK_SIZE 4096
2341
2342 static int
2343 linux_tracefork_child (void *arg)
2344 {
2345 ptrace (PTRACE_TRACEME, 0, 0, 0);
2346 kill (getpid (), SIGSTOP);
2347 #ifdef __ia64__
2348 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
2349 CLONE_VM | SIGCHLD, NULL);
2350 #else
2351 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
2352 CLONE_VM | SIGCHLD, NULL);
2353 #endif
2354 _exit (0);
2355 }
2356
2357 /* Wrapper function for waitpid which handles EINTR, and emulates
2358 __WALL for systems where that is not available. */
2359
2360 static int
2361 my_waitpid (int pid, int *status, int flags)
2362 {
2363 int ret, out_errno;
2364
2365 if (debug_threads)
2366 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
2367
2368 if (flags & __WALL)
2369 {
2370 sigset_t block_mask, org_mask, wake_mask;
2371 int wnohang;
2372
2373 wnohang = (flags & WNOHANG) != 0;
2374 flags &= ~(__WALL | __WCLONE);
2375 flags |= WNOHANG;
2376
2377 /* Block all signals while here. This avoids knowing about
2378 LinuxThread's signals. */
2379 sigfillset (&block_mask);
2380 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
2381
2382 /* ... except during the sigsuspend below. */
2383 sigemptyset (&wake_mask);
2384
2385 while (1)
2386 {
2387 /* Since all signals are blocked, there's no need to check
2388 for EINTR here. */
2389 ret = waitpid (pid, status, flags);
2390 out_errno = errno;
2391
2392 if (ret == -1 && out_errno != ECHILD)
2393 break;
2394 else if (ret > 0)
2395 break;
2396
2397 if (flags & __WCLONE)
2398 {
2399 /* We've tried both flavors now. If WNOHANG is set,
2400 there's nothing else to do, just bail out. */
2401 if (wnohang)
2402 break;
2403
2404 if (debug_threads)
2405 fprintf (stderr, "blocking\n");
2406
2407 /* Block waiting for signals. */
2408 sigsuspend (&wake_mask);
2409 }
2410
2411 flags ^= __WCLONE;
2412 }
2413
2414 sigprocmask (SIG_SETMASK, &org_mask, NULL);
2415 }
2416 else
2417 {
2418 do
2419 ret = waitpid (pid, status, flags);
2420 while (ret == -1 && errno == EINTR);
2421 out_errno = errno;
2422 }
2423
2424 if (debug_threads)
2425 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
2426 pid, flags, status ? *status : -1, ret);
2427
2428 errno = out_errno;
2429 return ret;
2430 }
2431
2432 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
2433 sure that we can enable the option, and that it had the desired
2434 effect. */
2435
2436 static void
2437 linux_test_for_tracefork (void)
2438 {
2439 int child_pid, ret, status;
2440 long second_pid;
2441 char *stack = xmalloc (STACK_SIZE * 4);
2442
2443 linux_supports_tracefork_flag = 0;
2444
2445 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
2446 #ifdef __ia64__
2447 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
2448 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2449 #else
2450 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
2451 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2452 #endif
2453 if (child_pid == -1)
2454 perror_with_name ("clone");
2455
2456 ret = my_waitpid (child_pid, &status, 0);
2457 if (ret == -1)
2458 perror_with_name ("waitpid");
2459 else if (ret != child_pid)
2460 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
2461 if (! WIFSTOPPED (status))
2462 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
2463
2464 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
2465 if (ret != 0)
2466 {
2467 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2468 if (ret != 0)
2469 {
2470 warning ("linux_test_for_tracefork: failed to kill child");
2471 return;
2472 }
2473
2474 ret = my_waitpid (child_pid, &status, 0);
2475 if (ret != child_pid)
2476 warning ("linux_test_for_tracefork: failed to wait for killed child");
2477 else if (!WIFSIGNALED (status))
2478 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
2479 "killed child", status);
2480
2481 return;
2482 }
2483
2484 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
2485 if (ret != 0)
2486 warning ("linux_test_for_tracefork: failed to resume child");
2487
2488 ret = my_waitpid (child_pid, &status, 0);
2489
2490 if (ret == child_pid && WIFSTOPPED (status)
2491 && status >> 16 == PTRACE_EVENT_FORK)
2492 {
2493 second_pid = 0;
2494 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
2495 if (ret == 0 && second_pid != 0)
2496 {
2497 int second_status;
2498
2499 linux_supports_tracefork_flag = 1;
2500 my_waitpid (second_pid, &second_status, 0);
2501 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
2502 if (ret != 0)
2503 warning ("linux_test_for_tracefork: failed to kill second child");
2504 my_waitpid (second_pid, &status, 0);
2505 }
2506 }
2507 else
2508 warning ("linux_test_for_tracefork: unexpected result from waitpid "
2509 "(%d, status 0x%x)", ret, status);
2510
2511 do
2512 {
2513 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2514 if (ret != 0)
2515 warning ("linux_test_for_tracefork: failed to kill child");
2516 my_waitpid (child_pid, &status, 0);
2517 }
2518 while (WIFSTOPPED (status));
2519
2520 free (stack);
2521 }
2522
2523
2524 static void
2525 linux_look_up_symbols (void)
2526 {
2527 #ifdef USE_THREAD_DB
2528 struct process_info *proc = current_process ();
2529
2530 if (proc->private->thread_db_active)
2531 return;
2532
2533 proc->private->thread_db_active
2534 = thread_db_init (!linux_supports_tracefork_flag);
2535 #endif
2536 }
2537
2538 static void
2539 linux_request_interrupt (void)
2540 {
2541 extern unsigned long signal_pid;
2542
2543 if (!ptid_equal (cont_thread, null_ptid)
2544 && !ptid_equal (cont_thread, minus_one_ptid))
2545 {
2546 struct lwp_info *lwp;
2547 int lwpid;
2548
2549 lwp = get_thread_lwp (current_inferior);
2550 lwpid = lwpid_of (lwp);
2551 kill_lwp (lwpid, SIGINT);
2552 }
2553 else
2554 kill_lwp (signal_pid, SIGINT);
2555 }
2556
2557 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
2558 to debugger memory starting at MYADDR. */
2559
2560 static int
2561 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
2562 {
2563 char filename[PATH_MAX];
2564 int fd, n;
2565 int pid = lwpid_of (get_thread_lwp (current_inferior));
2566
2567 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
2568
2569 fd = open (filename, O_RDONLY);
2570 if (fd < 0)
2571 return -1;
2572
2573 if (offset != (CORE_ADDR) 0
2574 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
2575 n = -1;
2576 else
2577 n = read (fd, myaddr, len);
2578
2579 close (fd);
2580
2581 return n;
2582 }
2583
2584 /* These watchpoint related wrapper functions simply pass on the function call
2585 if the target has registered a corresponding function. */
2586
2587 static int
2588 linux_insert_watchpoint (char type, CORE_ADDR addr, int len)
2589 {
2590 if (the_low_target.insert_watchpoint != NULL)
2591 return the_low_target.insert_watchpoint (type, addr, len);
2592 else
2593 /* Unsupported (see target.h). */
2594 return 1;
2595 }
2596
2597 static int
2598 linux_remove_watchpoint (char type, CORE_ADDR addr, int len)
2599 {
2600 if (the_low_target.remove_watchpoint != NULL)
2601 return the_low_target.remove_watchpoint (type, addr, len);
2602 else
2603 /* Unsupported (see target.h). */
2604 return 1;
2605 }
2606
2607 static int
2608 linux_stopped_by_watchpoint (void)
2609 {
2610 if (the_low_target.stopped_by_watchpoint != NULL)
2611 return the_low_target.stopped_by_watchpoint ();
2612 else
2613 return 0;
2614 }
2615
2616 static CORE_ADDR
2617 linux_stopped_data_address (void)
2618 {
2619 if (the_low_target.stopped_data_address != NULL)
2620 return the_low_target.stopped_data_address ();
2621 else
2622 return 0;
2623 }
2624
2625 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2626 #if defined(__mcoldfire__)
2627 /* These should really be defined in the kernel's ptrace.h header. */
2628 #define PT_TEXT_ADDR 49*4
2629 #define PT_DATA_ADDR 50*4
2630 #define PT_TEXT_END_ADDR 51*4
2631 #endif
2632
2633 /* Under uClinux, programs are loaded at non-zero offsets, which we need
2634 to tell gdb about. */
2635
2636 static int
2637 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
2638 {
2639 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
2640 unsigned long text, text_end, data;
2641 int pid = lwpid_of (get_thread_lwp (current_inferior));
2642
2643 errno = 0;
2644
2645 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
2646 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
2647 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
2648
2649 if (errno == 0)
2650 {
2651 /* Both text and data offsets produced at compile-time (and so
2652 used by gdb) are relative to the beginning of the program,
2653 with the data segment immediately following the text segment.
2654 However, the actual runtime layout in memory may put the data
2655 somewhere else, so when we send gdb a data base-address, we
2656 use the real data base address and subtract the compile-time
2657 data base-address from it (which is just the length of the
2658 text segment). BSS immediately follows data in both
2659 cases. */
2660 *text_p = text;
2661 *data_p = data - (text_end - text);
2662
2663 return 1;
2664 }
2665 #endif
2666 return 0;
2667 }
2668 #endif
2669
2670 static int
2671 linux_qxfer_osdata (const char *annex,
2672 unsigned char *readbuf, unsigned const char *writebuf,
2673 CORE_ADDR offset, int len)
2674 {
2675 /* We make the process list snapshot when the object starts to be
2676 read. */
2677 static const char *buf;
2678 static long len_avail = -1;
2679 static struct buffer buffer;
2680
2681 DIR *dirp;
2682
2683 if (strcmp (annex, "processes") != 0)
2684 return 0;
2685
2686 if (!readbuf || writebuf)
2687 return 0;
2688
2689 if (offset == 0)
2690 {
2691 if (len_avail != -1 && len_avail != 0)
2692 buffer_free (&buffer);
2693 len_avail = 0;
2694 buf = NULL;
2695 buffer_init (&buffer);
2696 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
2697
2698 dirp = opendir ("/proc");
2699 if (dirp)
2700 {
2701 struct dirent *dp;
2702 while ((dp = readdir (dirp)) != NULL)
2703 {
2704 struct stat statbuf;
2705 char procentry[sizeof ("/proc/4294967295")];
2706
2707 if (!isdigit (dp->d_name[0])
2708 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
2709 continue;
2710
2711 sprintf (procentry, "/proc/%s", dp->d_name);
2712 if (stat (procentry, &statbuf) == 0
2713 && S_ISDIR (statbuf.st_mode))
2714 {
2715 char pathname[128];
2716 FILE *f;
2717 char cmd[MAXPATHLEN + 1];
2718 struct passwd *entry;
2719
2720 sprintf (pathname, "/proc/%s/cmdline", dp->d_name);
2721 entry = getpwuid (statbuf.st_uid);
2722
2723 if ((f = fopen (pathname, "r")) != NULL)
2724 {
2725 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
2726 if (len > 0)
2727 {
2728 int i;
2729 for (i = 0; i < len; i++)
2730 if (cmd[i] == '\0')
2731 cmd[i] = ' ';
2732 cmd[len] = '\0';
2733
2734 buffer_xml_printf (
2735 &buffer,
2736 "<item>"
2737 "<column name=\"pid\">%s</column>"
2738 "<column name=\"user\">%s</column>"
2739 "<column name=\"command\">%s</column>"
2740 "</item>",
2741 dp->d_name,
2742 entry ? entry->pw_name : "?",
2743 cmd);
2744 }
2745 fclose (f);
2746 }
2747 }
2748 }
2749
2750 closedir (dirp);
2751 }
2752 buffer_grow_str0 (&buffer, "</osdata>\n");
2753 buf = buffer_finish (&buffer);
2754 len_avail = strlen (buf);
2755 }
2756
2757 if (offset >= len_avail)
2758 {
2759 /* Done. Get rid of the data. */
2760 buffer_free (&buffer);
2761 buf = NULL;
2762 len_avail = 0;
2763 return 0;
2764 }
2765
2766 if (len > len_avail - offset)
2767 len = len_avail - offset;
2768 memcpy (readbuf, buf + offset, len);
2769
2770 return len;
2771 }
2772
2773 static int
2774 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
2775 unsigned const char *writebuf, CORE_ADDR offset, int len)
2776 {
2777 struct siginfo siginfo;
2778 long pid = -1;
2779
2780 if (current_inferior == NULL)
2781 return -1;
2782
2783 pid = lwpid_of (get_thread_lwp (current_inferior));
2784
2785 if (debug_threads)
2786 fprintf (stderr, "%s siginfo for lwp %ld.\n",
2787 readbuf != NULL ? "Reading" : "Writing",
2788 pid);
2789
2790 if (offset > sizeof (siginfo))
2791 return -1;
2792
2793 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
2794 return -1;
2795
2796 if (offset + len > sizeof (siginfo))
2797 len = sizeof (siginfo) - offset;
2798
2799 if (readbuf != NULL)
2800 memcpy (readbuf, (char *) &siginfo + offset, len);
2801 else
2802 {
2803 memcpy ((char *) &siginfo + offset, writebuf, len);
2804 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
2805 return -1;
2806 }
2807
2808 return len;
2809 }
2810
2811 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
2812 so we notice when children change state; as the handler for the
2813 sigsuspend in my_waitpid. */
2814
2815 static void
2816 sigchld_handler (int signo)
2817 {
2818 int old_errno = errno;
2819
2820 if (debug_threads)
2821 /* fprintf is not async-signal-safe, so call write directly. */
2822 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
2823
2824 if (target_is_async_p ())
2825 async_file_mark (); /* trigger a linux_wait */
2826
2827 errno = old_errno;
2828 }
2829
2830 static int
2831 linux_supports_non_stop (void)
2832 {
2833 return 1;
2834 }
2835
2836 static int
2837 linux_async (int enable)
2838 {
2839 int previous = (linux_event_pipe[0] != -1);
2840
2841 if (previous != enable)
2842 {
2843 sigset_t mask;
2844 sigemptyset (&mask);
2845 sigaddset (&mask, SIGCHLD);
2846
2847 sigprocmask (SIG_BLOCK, &mask, NULL);
2848
2849 if (enable)
2850 {
2851 if (pipe (linux_event_pipe) == -1)
2852 fatal ("creating event pipe failed.");
2853
2854 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
2855 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
2856
2857 /* Register the event loop handler. */
2858 add_file_handler (linux_event_pipe[0],
2859 handle_target_event, NULL);
2860
2861 /* Always trigger a linux_wait. */
2862 async_file_mark ();
2863 }
2864 else
2865 {
2866 delete_file_handler (linux_event_pipe[0]);
2867
2868 close (linux_event_pipe[0]);
2869 close (linux_event_pipe[1]);
2870 linux_event_pipe[0] = -1;
2871 linux_event_pipe[1] = -1;
2872 }
2873
2874 sigprocmask (SIG_UNBLOCK, &mask, NULL);
2875 }
2876
2877 return previous;
2878 }
2879
2880 static int
2881 linux_start_non_stop (int nonstop)
2882 {
2883 /* Register or unregister from event-loop accordingly. */
2884 linux_async (nonstop);
2885 return 0;
2886 }
2887
2888 static struct target_ops linux_target_ops = {
2889 linux_create_inferior,
2890 linux_attach,
2891 linux_kill,
2892 linux_detach,
2893 linux_join,
2894 linux_thread_alive,
2895 linux_resume,
2896 linux_wait,
2897 linux_fetch_registers,
2898 linux_store_registers,
2899 linux_read_memory,
2900 linux_write_memory,
2901 linux_look_up_symbols,
2902 linux_request_interrupt,
2903 linux_read_auxv,
2904 linux_insert_watchpoint,
2905 linux_remove_watchpoint,
2906 linux_stopped_by_watchpoint,
2907 linux_stopped_data_address,
2908 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2909 linux_read_offsets,
2910 #else
2911 NULL,
2912 #endif
2913 #ifdef USE_THREAD_DB
2914 thread_db_get_tls_address,
2915 #else
2916 NULL,
2917 #endif
2918 NULL,
2919 hostio_last_error_from_errno,
2920 linux_qxfer_osdata,
2921 linux_xfer_siginfo,
2922 linux_supports_non_stop,
2923 linux_async,
2924 linux_start_non_stop,
2925 };
2926
2927 static void
2928 linux_init_signals ()
2929 {
2930 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
2931 to find what the cancel signal actually is. */
2932 signal (__SIGRTMIN+1, SIG_IGN);
2933 }
2934
2935 void
2936 initialize_low (void)
2937 {
2938 struct sigaction sigchld_action;
2939 memset (&sigchld_action, 0, sizeof (sigchld_action));
2940 set_target_ops (&linux_target_ops);
2941 set_breakpoint_data (the_low_target.breakpoint,
2942 the_low_target.breakpoint_len);
2943 linux_init_signals ();
2944 linux_test_for_tracefork ();
2945 #ifdef HAVE_LINUX_REGSETS
2946 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
2947 ;
2948 disabled_regsets = xmalloc (num_regsets);
2949 #endif
2950
2951 sigchld_action.sa_handler = sigchld_handler;
2952 sigemptyset (&sigchld_action.sa_mask);
2953 sigchld_action.sa_flags = SA_RESTART;
2954 sigaction (SIGCHLD, &sigchld_action, NULL);
2955 }