]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
* linux-low.c (linux_kill_one_lwp): Adjust kernel workaround to skip
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22 #include "ansidecl.h" /* For ATTRIBUTE_PACKED, must be bug in external.h. */
23 #include "elf/common.h"
24 #include "elf/external.h"
25
26 #include <sys/wait.h>
27 #include <stdio.h>
28 #include <sys/param.h>
29 #include <sys/ptrace.h>
30 #include <signal.h>
31 #include <sys/ioctl.h>
32 #include <fcntl.h>
33 #include <string.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43
44 #ifndef PTRACE_GETSIGINFO
45 # define PTRACE_GETSIGINFO 0x4202
46 # define PTRACE_SETSIGINFO 0x4203
47 #endif
48
49 #ifndef O_LARGEFILE
50 #define O_LARGEFILE 0
51 #endif
52
53 /* If the system headers did not provide the constants, hard-code the normal
54 values. */
55 #ifndef PTRACE_EVENT_FORK
56
57 #define PTRACE_SETOPTIONS 0x4200
58 #define PTRACE_GETEVENTMSG 0x4201
59
60 /* options set using PTRACE_SETOPTIONS */
61 #define PTRACE_O_TRACESYSGOOD 0x00000001
62 #define PTRACE_O_TRACEFORK 0x00000002
63 #define PTRACE_O_TRACEVFORK 0x00000004
64 #define PTRACE_O_TRACECLONE 0x00000008
65 #define PTRACE_O_TRACEEXEC 0x00000010
66 #define PTRACE_O_TRACEVFORKDONE 0x00000020
67 #define PTRACE_O_TRACEEXIT 0x00000040
68
69 /* Wait extended result codes for the above trace options. */
70 #define PTRACE_EVENT_FORK 1
71 #define PTRACE_EVENT_VFORK 2
72 #define PTRACE_EVENT_CLONE 3
73 #define PTRACE_EVENT_EXEC 4
74 #define PTRACE_EVENT_VFORK_DONE 5
75 #define PTRACE_EVENT_EXIT 6
76
77 #endif /* PTRACE_EVENT_FORK */
78
79 /* We can't always assume that this flag is available, but all systems
80 with the ptrace event handlers also have __WALL, so it's safe to use
81 in some contexts. */
82 #ifndef __WALL
83 #define __WALL 0x40000000 /* Wait for any child. */
84 #endif
85
86 #ifdef __UCLIBC__
87 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
88 #define HAS_NOMMU
89 #endif
90 #endif
91
92 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
93 representation of the thread ID.
94
95 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
96 the same as the LWP ID.
97
98 ``all_processes'' is keyed by the "overall process ID", which
99 GNU/Linux calls tgid, "thread group ID". */
100
101 struct inferior_list all_lwps;
102
103 /* A list of all unknown processes which receive stop signals. Some other
104 process will presumably claim each of these as forked children
105 momentarily. */
106
107 struct inferior_list stopped_pids;
108
109 /* FIXME this is a bit of a hack, and could be removed. */
110 int stopping_threads;
111
112 /* FIXME make into a target method? */
113 int using_threads = 1;
114
115 /* This flag is true iff we've just created or attached to our first
116 inferior but it has not stopped yet. As soon as it does, we need
117 to call the low target's arch_setup callback. Doing this only on
118 the first inferior avoids reinializing the architecture on every
119 inferior, and avoids messing with the register caches of the
120 already running inferiors. NOTE: this assumes all inferiors under
121 control of gdbserver have the same architecture. */
122 static int new_inferior;
123
124 static void linux_resume_one_lwp (struct lwp_info *lwp,
125 int step, int signal, siginfo_t *info);
126 static void linux_resume (struct thread_resume *resume_info, size_t n);
127 static void stop_all_lwps (void);
128 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
129 static int check_removed_breakpoint (struct lwp_info *event_child);
130 static void *add_lwp (ptid_t ptid);
131 static int my_waitpid (int pid, int *status, int flags);
132 static int linux_stopped_by_watchpoint (void);
133 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
134
135 struct pending_signals
136 {
137 int signal;
138 siginfo_t info;
139 struct pending_signals *prev;
140 };
141
142 #define PTRACE_ARG3_TYPE long
143 #define PTRACE_XFER_TYPE long
144
145 #ifdef HAVE_LINUX_REGSETS
146 static char *disabled_regsets;
147 static int num_regsets;
148 #endif
149
150 /* The read/write ends of the pipe registered as waitable file in the
151 event loop. */
152 static int linux_event_pipe[2] = { -1, -1 };
153
154 /* True if we're currently in async mode. */
155 #define target_is_async_p() (linux_event_pipe[0] != -1)
156
157 static void send_sigstop (struct inferior_list_entry *entry);
158 static void wait_for_sigstop (struct inferior_list_entry *entry);
159
160 /* Accepts an integer PID; Returns a string representing a file that
161 can be opened to get info for the child process.
162 Space for the result is malloc'd, caller must free. */
163
164 char *
165 linux_child_pid_to_exec_file (int pid)
166 {
167 char *name1, *name2;
168
169 name1 = xmalloc (MAXPATHLEN);
170 name2 = xmalloc (MAXPATHLEN);
171 memset (name2, 0, MAXPATHLEN);
172
173 sprintf (name1, "/proc/%d/exe", pid);
174 if (readlink (name1, name2, MAXPATHLEN) > 0)
175 {
176 free (name1);
177 return name2;
178 }
179 else
180 {
181 free (name2);
182 return name1;
183 }
184 }
185
186 /* Return non-zero if HEADER is a 64-bit ELF file. */
187
188 static int
189 elf_64_header_p (const Elf64_External_Ehdr *header)
190 {
191 return (header->e_ident[EI_MAG0] == ELFMAG0
192 && header->e_ident[EI_MAG1] == ELFMAG1
193 && header->e_ident[EI_MAG2] == ELFMAG2
194 && header->e_ident[EI_MAG3] == ELFMAG3
195 && header->e_ident[EI_CLASS] == ELFCLASS64);
196 }
197
198 /* Return non-zero if FILE is a 64-bit ELF file,
199 zero if the file is not a 64-bit ELF file,
200 and -1 if the file is not accessible or doesn't exist. */
201
202 int
203 elf_64_file_p (const char *file)
204 {
205 Elf64_External_Ehdr header;
206 int fd;
207
208 fd = open (file, O_RDONLY);
209 if (fd < 0)
210 return -1;
211
212 if (read (fd, &header, sizeof (header)) != sizeof (header))
213 {
214 close (fd);
215 return 0;
216 }
217 close (fd);
218
219 return elf_64_header_p (&header);
220 }
221
222 static void
223 delete_lwp (struct lwp_info *lwp)
224 {
225 remove_thread (get_lwp_thread (lwp));
226 remove_inferior (&all_lwps, &lwp->head);
227 free (lwp->arch_private);
228 free (lwp);
229 }
230
231 /* Add a process to the common process list, and set its private
232 data. */
233
234 static struct process_info *
235 linux_add_process (int pid, int attached)
236 {
237 struct process_info *proc;
238
239 /* Is this the first process? If so, then set the arch. */
240 if (all_processes.head == NULL)
241 new_inferior = 1;
242
243 proc = add_process (pid, attached);
244 proc->private = xcalloc (1, sizeof (*proc->private));
245
246 if (the_low_target.new_process != NULL)
247 proc->private->arch_private = the_low_target.new_process ();
248
249 return proc;
250 }
251
252 /* Remove a process from the common process list,
253 also freeing all private data. */
254
255 static void
256 linux_remove_process (struct process_info *process)
257 {
258 free (process->private->arch_private);
259 free (process->private);
260 remove_process (process);
261 }
262
263 /* Handle a GNU/Linux extended wait response. If we see a clone
264 event, we need to add the new LWP to our list (and not report the
265 trap to higher layers). */
266
267 static void
268 handle_extended_wait (struct lwp_info *event_child, int wstat)
269 {
270 int event = wstat >> 16;
271 struct lwp_info *new_lwp;
272
273 if (event == PTRACE_EVENT_CLONE)
274 {
275 ptid_t ptid;
276 unsigned long new_pid;
277 int ret, status = W_STOPCODE (SIGSTOP);
278
279 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
280
281 /* If we haven't already seen the new PID stop, wait for it now. */
282 if (! pull_pid_from_list (&stopped_pids, new_pid))
283 {
284 /* The new child has a pending SIGSTOP. We can't affect it until it
285 hits the SIGSTOP, but we're already attached. */
286
287 ret = my_waitpid (new_pid, &status, __WALL);
288
289 if (ret == -1)
290 perror_with_name ("waiting for new child");
291 else if (ret != new_pid)
292 warning ("wait returned unexpected PID %d", ret);
293 else if (!WIFSTOPPED (status))
294 warning ("wait returned unexpected status 0x%x", status);
295 }
296
297 ptrace (PTRACE_SETOPTIONS, new_pid, 0, PTRACE_O_TRACECLONE);
298
299 ptid = ptid_build (pid_of (event_child), new_pid, 0);
300 new_lwp = (struct lwp_info *) add_lwp (ptid);
301 add_thread (ptid, new_lwp);
302
303 /* Either we're going to immediately resume the new thread
304 or leave it stopped. linux_resume_one_lwp is a nop if it
305 thinks the thread is currently running, so set this first
306 before calling linux_resume_one_lwp. */
307 new_lwp->stopped = 1;
308
309 /* Normally we will get the pending SIGSTOP. But in some cases
310 we might get another signal delivered to the group first.
311 If we do get another signal, be sure not to lose it. */
312 if (WSTOPSIG (status) == SIGSTOP)
313 {
314 if (! stopping_threads)
315 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
316 }
317 else
318 {
319 new_lwp->stop_expected = 1;
320 if (stopping_threads)
321 {
322 new_lwp->status_pending_p = 1;
323 new_lwp->status_pending = status;
324 }
325 else
326 /* Pass the signal on. This is what GDB does - except
327 shouldn't we really report it instead? */
328 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
329 }
330
331 /* Always resume the current thread. If we are stopping
332 threads, it will have a pending SIGSTOP; we may as well
333 collect it now. */
334 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
335 }
336 }
337
338 /* This function should only be called if the process got a SIGTRAP.
339 The SIGTRAP could mean several things.
340
341 On i386, where decr_pc_after_break is non-zero:
342 If we were single-stepping this process using PTRACE_SINGLESTEP,
343 we will get only the one SIGTRAP (even if the instruction we
344 stepped over was a breakpoint). The value of $eip will be the
345 next instruction.
346 If we continue the process using PTRACE_CONT, we will get a
347 SIGTRAP when we hit a breakpoint. The value of $eip will be
348 the instruction after the breakpoint (i.e. needs to be
349 decremented). If we report the SIGTRAP to GDB, we must also
350 report the undecremented PC. If we cancel the SIGTRAP, we
351 must resume at the decremented PC.
352
353 (Presumably, not yet tested) On a non-decr_pc_after_break machine
354 with hardware or kernel single-step:
355 If we single-step over a breakpoint instruction, our PC will
356 point at the following instruction. If we continue and hit a
357 breakpoint instruction, our PC will point at the breakpoint
358 instruction. */
359
360 static CORE_ADDR
361 get_stop_pc (void)
362 {
363 CORE_ADDR stop_pc = (*the_low_target.get_pc) ();
364
365 if (! get_thread_lwp (current_inferior)->stepping)
366 stop_pc -= the_low_target.decr_pc_after_break;
367
368 if (debug_threads)
369 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
370
371 return stop_pc;
372 }
373
374 static void *
375 add_lwp (ptid_t ptid)
376 {
377 struct lwp_info *lwp;
378
379 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
380 memset (lwp, 0, sizeof (*lwp));
381
382 lwp->head.id = ptid;
383
384 if (the_low_target.new_thread != NULL)
385 lwp->arch_private = the_low_target.new_thread ();
386
387 add_inferior_to_list (&all_lwps, &lwp->head);
388
389 return lwp;
390 }
391
392 /* Start an inferior process and returns its pid.
393 ALLARGS is a vector of program-name and args. */
394
395 static int
396 linux_create_inferior (char *program, char **allargs)
397 {
398 struct lwp_info *new_lwp;
399 int pid;
400 ptid_t ptid;
401
402 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
403 pid = vfork ();
404 #else
405 pid = fork ();
406 #endif
407 if (pid < 0)
408 perror_with_name ("fork");
409
410 if (pid == 0)
411 {
412 ptrace (PTRACE_TRACEME, 0, 0, 0);
413
414 signal (__SIGRTMIN + 1, SIG_DFL);
415
416 setpgid (0, 0);
417
418 execv (program, allargs);
419 if (errno == ENOENT)
420 execvp (program, allargs);
421
422 fprintf (stderr, "Cannot exec %s: %s.\n", program,
423 strerror (errno));
424 fflush (stderr);
425 _exit (0177);
426 }
427
428 linux_add_process (pid, 0);
429
430 ptid = ptid_build (pid, pid, 0);
431 new_lwp = add_lwp (ptid);
432 add_thread (ptid, new_lwp);
433 new_lwp->must_set_ptrace_flags = 1;
434
435 return pid;
436 }
437
438 /* Attach to an inferior process. */
439
440 static void
441 linux_attach_lwp_1 (unsigned long lwpid, int initial)
442 {
443 ptid_t ptid;
444 struct lwp_info *new_lwp;
445
446 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
447 {
448 if (!initial)
449 {
450 /* If we fail to attach to an LWP, just warn. */
451 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
452 strerror (errno), errno);
453 fflush (stderr);
454 return;
455 }
456 else
457 /* If we fail to attach to a process, report an error. */
458 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
459 strerror (errno), errno);
460 }
461
462 if (initial)
463 /* NOTE/FIXME: This lwp might have not been the tgid. */
464 ptid = ptid_build (lwpid, lwpid, 0);
465 else
466 {
467 /* Note that extracting the pid from the current inferior is
468 safe, since we're always called in the context of the same
469 process as this new thread. */
470 int pid = pid_of (get_thread_lwp (current_inferior));
471 ptid = ptid_build (pid, lwpid, 0);
472 }
473
474 new_lwp = (struct lwp_info *) add_lwp (ptid);
475 add_thread (ptid, new_lwp);
476
477 /* We need to wait for SIGSTOP before being able to make the next
478 ptrace call on this LWP. */
479 new_lwp->must_set_ptrace_flags = 1;
480
481 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
482 brings it to a halt.
483
484 There are several cases to consider here:
485
486 1) gdbserver has already attached to the process and is being notified
487 of a new thread that is being created.
488 In this case we should ignore that SIGSTOP and resume the process.
489 This is handled below by setting stop_expected = 1.
490
491 2) This is the first thread (the process thread), and we're attaching
492 to it via attach_inferior.
493 In this case we want the process thread to stop.
494 This is handled by having linux_attach clear stop_expected after
495 we return.
496 ??? If the process already has several threads we leave the other
497 threads running.
498
499 3) GDB is connecting to gdbserver and is requesting an enumeration of all
500 existing threads.
501 In this case we want the thread to stop.
502 FIXME: This case is currently not properly handled.
503 We should wait for the SIGSTOP but don't. Things work apparently
504 because enough time passes between when we ptrace (ATTACH) and when
505 gdb makes the next ptrace call on the thread.
506
507 On the other hand, if we are currently trying to stop all threads, we
508 should treat the new thread as if we had sent it a SIGSTOP. This works
509 because we are guaranteed that the add_lwp call above added us to the
510 end of the list, and so the new thread has not yet reached
511 wait_for_sigstop (but will). */
512 if (! stopping_threads)
513 new_lwp->stop_expected = 1;
514 }
515
516 void
517 linux_attach_lwp (unsigned long lwpid)
518 {
519 linux_attach_lwp_1 (lwpid, 0);
520 }
521
522 int
523 linux_attach (unsigned long pid)
524 {
525 struct lwp_info *lwp;
526
527 linux_attach_lwp_1 (pid, 1);
528
529 linux_add_process (pid, 1);
530
531 if (!non_stop)
532 {
533 /* Don't ignore the initial SIGSTOP if we just attached to this
534 process. It will be collected by wait shortly. */
535 lwp = (struct lwp_info *) find_inferior_id (&all_lwps,
536 ptid_build (pid, pid, 0));
537 lwp->stop_expected = 0;
538 }
539
540 return 0;
541 }
542
543 struct counter
544 {
545 int pid;
546 int count;
547 };
548
549 static int
550 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
551 {
552 struct counter *counter = args;
553
554 if (ptid_get_pid (entry->id) == counter->pid)
555 {
556 if (++counter->count > 1)
557 return 1;
558 }
559
560 return 0;
561 }
562
563 static int
564 last_thread_of_process_p (struct thread_info *thread)
565 {
566 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
567 int pid = ptid_get_pid (ptid);
568 struct counter counter = { pid , 0 };
569
570 return (find_inferior (&all_threads,
571 second_thread_of_pid_p, &counter) == NULL);
572 }
573
574 /* Kill the inferior lwp. */
575
576 static int
577 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
578 {
579 struct thread_info *thread = (struct thread_info *) entry;
580 struct lwp_info *lwp = get_thread_lwp (thread);
581 int wstat;
582 int pid = * (int *) args;
583
584 if (ptid_get_pid (entry->id) != pid)
585 return 0;
586
587 /* We avoid killing the first thread here, because of a Linux kernel (at
588 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
589 the children get a chance to be reaped, it will remain a zombie
590 forever. */
591
592 if (lwpid_of (lwp) == pid)
593 {
594 if (debug_threads)
595 fprintf (stderr, "lkop: is last of process %s\n",
596 target_pid_to_str (entry->id));
597 return 0;
598 }
599
600 /* If we're killing a running inferior, make sure it is stopped
601 first, as PTRACE_KILL will not work otherwise. */
602 if (!lwp->stopped)
603 send_sigstop (&lwp->head);
604
605 do
606 {
607 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
608
609 /* Make sure it died. The loop is most likely unnecessary. */
610 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
611 } while (pid > 0 && WIFSTOPPED (wstat));
612
613 return 0;
614 }
615
616 static int
617 linux_kill (int pid)
618 {
619 struct process_info *process;
620 struct lwp_info *lwp;
621 struct thread_info *thread;
622 int wstat;
623 int lwpid;
624
625 process = find_process_pid (pid);
626 if (process == NULL)
627 return -1;
628
629 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
630
631 /* See the comment in linux_kill_one_lwp. We did not kill the first
632 thread in the list, so do so now. */
633 lwp = find_lwp_pid (pid_to_ptid (pid));
634 thread = get_lwp_thread (lwp);
635
636 if (debug_threads)
637 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
638 lwpid_of (lwp), pid);
639
640 /* If we're killing a running inferior, make sure it is stopped
641 first, as PTRACE_KILL will not work otherwise. */
642 if (!lwp->stopped)
643 send_sigstop (&lwp->head);
644
645 do
646 {
647 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
648
649 /* Make sure it died. The loop is most likely unnecessary. */
650 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
651 } while (lwpid > 0 && WIFSTOPPED (wstat));
652
653 delete_lwp (lwp);
654 linux_remove_process (process);
655 return 0;
656 }
657
658 static int
659 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
660 {
661 struct thread_info *thread = (struct thread_info *) entry;
662 struct lwp_info *lwp = get_thread_lwp (thread);
663 int pid = * (int *) args;
664
665 if (ptid_get_pid (entry->id) != pid)
666 return 0;
667
668 /* If we're detaching from a running inferior, make sure it is
669 stopped first, as PTRACE_DETACH will not work otherwise. */
670 if (!lwp->stopped)
671 {
672 int lwpid = lwpid_of (lwp);
673
674 stopping_threads = 1;
675 send_sigstop (&lwp->head);
676
677 /* If this detects a new thread through a clone event, the new
678 thread is appended to the end of the lwp list, so we'll
679 eventually detach from it. */
680 wait_for_sigstop (&lwp->head);
681 stopping_threads = 0;
682
683 /* If LWP exits while we're trying to stop it, there's nothing
684 left to do. */
685 lwp = find_lwp_pid (pid_to_ptid (lwpid));
686 if (lwp == NULL)
687 return 0;
688 }
689
690 /* Make sure the process isn't stopped at a breakpoint that's
691 no longer there. */
692 check_removed_breakpoint (lwp);
693
694 /* If this process is stopped but is expecting a SIGSTOP, then make
695 sure we take care of that now. This isn't absolutely guaranteed
696 to collect the SIGSTOP, but is fairly likely to. */
697 if (lwp->stop_expected)
698 {
699 int wstat;
700 /* Clear stop_expected, so that the SIGSTOP will be reported. */
701 lwp->stop_expected = 0;
702 if (lwp->stopped)
703 linux_resume_one_lwp (lwp, 0, 0, NULL);
704 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
705 }
706
707 /* Flush any pending changes to the process's registers. */
708 regcache_invalidate_one ((struct inferior_list_entry *)
709 get_lwp_thread (lwp));
710
711 /* Finally, let it resume. */
712 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
713
714 delete_lwp (lwp);
715 return 0;
716 }
717
718 static int
719 any_thread_of (struct inferior_list_entry *entry, void *args)
720 {
721 int *pid_p = args;
722
723 if (ptid_get_pid (entry->id) == *pid_p)
724 return 1;
725
726 return 0;
727 }
728
729 static int
730 linux_detach (int pid)
731 {
732 struct process_info *process;
733
734 process = find_process_pid (pid);
735 if (process == NULL)
736 return -1;
737
738 current_inferior =
739 (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
740
741 delete_all_breakpoints ();
742 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
743 linux_remove_process (process);
744 return 0;
745 }
746
747 static void
748 linux_join (int pid)
749 {
750 int status, ret;
751 struct process_info *process;
752
753 process = find_process_pid (pid);
754 if (process == NULL)
755 return;
756
757 do {
758 ret = my_waitpid (pid, &status, 0);
759 if (WIFEXITED (status) || WIFSIGNALED (status))
760 break;
761 } while (ret != -1 || errno != ECHILD);
762 }
763
764 /* Return nonzero if the given thread is still alive. */
765 static int
766 linux_thread_alive (ptid_t ptid)
767 {
768 struct lwp_info *lwp = find_lwp_pid (ptid);
769
770 /* We assume we always know if a thread exits. If a whole process
771 exited but we still haven't been able to report it to GDB, we'll
772 hold on to the last lwp of the dead process. */
773 if (lwp != NULL)
774 return !lwp->dead;
775 else
776 return 0;
777 }
778
779 /* Return nonzero if this process stopped at a breakpoint which
780 no longer appears to be inserted. Also adjust the PC
781 appropriately to resume where the breakpoint used to be. */
782 static int
783 check_removed_breakpoint (struct lwp_info *event_child)
784 {
785 CORE_ADDR stop_pc;
786 struct thread_info *saved_inferior;
787
788 if (event_child->pending_is_breakpoint == 0)
789 return 0;
790
791 if (debug_threads)
792 fprintf (stderr, "Checking for breakpoint in lwp %ld.\n",
793 lwpid_of (event_child));
794
795 saved_inferior = current_inferior;
796 current_inferior = get_lwp_thread (event_child);
797
798 stop_pc = get_stop_pc ();
799
800 /* If the PC has changed since we stopped, then we shouldn't do
801 anything. This happens if, for instance, GDB handled the
802 decr_pc_after_break subtraction itself. */
803 if (stop_pc != event_child->pending_stop_pc)
804 {
805 if (debug_threads)
806 fprintf (stderr, "Ignoring, PC was changed. Old PC was 0x%08llx\n",
807 event_child->pending_stop_pc);
808
809 event_child->pending_is_breakpoint = 0;
810 current_inferior = saved_inferior;
811 return 0;
812 }
813
814 /* If the breakpoint is still there, we will report hitting it. */
815 if ((*the_low_target.breakpoint_at) (stop_pc))
816 {
817 if (debug_threads)
818 fprintf (stderr, "Ignoring, breakpoint is still present.\n");
819 current_inferior = saved_inferior;
820 return 0;
821 }
822
823 if (debug_threads)
824 fprintf (stderr, "Removed breakpoint.\n");
825
826 /* For decr_pc_after_break targets, here is where we perform the
827 decrement. We go immediately from this function to resuming,
828 and can not safely call get_stop_pc () again. */
829 if (the_low_target.set_pc != NULL)
830 {
831 if (debug_threads)
832 fprintf (stderr, "Set pc to 0x%lx\n", (long) stop_pc);
833 (*the_low_target.set_pc) (stop_pc);
834 }
835
836 /* We consumed the pending SIGTRAP. */
837 event_child->pending_is_breakpoint = 0;
838 event_child->status_pending_p = 0;
839 event_child->status_pending = 0;
840
841 current_inferior = saved_inferior;
842 return 1;
843 }
844
845 /* Return 1 if this lwp has an interesting status pending. This
846 function may silently resume an inferior lwp. */
847 static int
848 status_pending_p (struct inferior_list_entry *entry, void *arg)
849 {
850 struct lwp_info *lwp = (struct lwp_info *) entry;
851 ptid_t ptid = * (ptid_t *) arg;
852
853 /* Check if we're only interested in events from a specific process
854 or its lwps. */
855 if (!ptid_equal (minus_one_ptid, ptid)
856 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
857 return 0;
858
859 if (lwp->status_pending_p && !lwp->suspended)
860 if (check_removed_breakpoint (lwp))
861 {
862 /* This thread was stopped at a breakpoint, and the breakpoint
863 is now gone. We were told to continue (or step...) all threads,
864 so GDB isn't trying to single-step past this breakpoint.
865 So instead of reporting the old SIGTRAP, pretend we got to
866 the breakpoint just after it was removed instead of just
867 before; resume the process. */
868 linux_resume_one_lwp (lwp, 0, 0, NULL);
869 return 0;
870 }
871
872 return (lwp->status_pending_p && !lwp->suspended);
873 }
874
875 static int
876 same_lwp (struct inferior_list_entry *entry, void *data)
877 {
878 ptid_t ptid = *(ptid_t *) data;
879 int lwp;
880
881 if (ptid_get_lwp (ptid) != 0)
882 lwp = ptid_get_lwp (ptid);
883 else
884 lwp = ptid_get_pid (ptid);
885
886 if (ptid_get_lwp (entry->id) == lwp)
887 return 1;
888
889 return 0;
890 }
891
892 struct lwp_info *
893 find_lwp_pid (ptid_t ptid)
894 {
895 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
896 }
897
898 static struct lwp_info *
899 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
900 {
901 int ret;
902 int to_wait_for = -1;
903 struct lwp_info *child = NULL;
904
905 if (debug_threads)
906 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
907
908 if (ptid_equal (ptid, minus_one_ptid))
909 to_wait_for = -1; /* any child */
910 else
911 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
912
913 options |= __WALL;
914
915 retry:
916
917 ret = my_waitpid (to_wait_for, wstatp, options);
918 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
919 return NULL;
920 else if (ret == -1)
921 perror_with_name ("waitpid");
922
923 if (debug_threads
924 && (!WIFSTOPPED (*wstatp)
925 || (WSTOPSIG (*wstatp) != 32
926 && WSTOPSIG (*wstatp) != 33)))
927 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
928
929 child = find_lwp_pid (pid_to_ptid (ret));
930
931 /* If we didn't find a process, one of two things presumably happened:
932 - A process we started and then detached from has exited. Ignore it.
933 - A process we are controlling has forked and the new child's stop
934 was reported to us by the kernel. Save its PID. */
935 if (child == NULL && WIFSTOPPED (*wstatp))
936 {
937 add_pid_to_list (&stopped_pids, ret);
938 goto retry;
939 }
940 else if (child == NULL)
941 goto retry;
942
943 child->stopped = 1;
944 child->pending_is_breakpoint = 0;
945
946 child->last_status = *wstatp;
947
948 /* Architecture-specific setup after inferior is running.
949 This needs to happen after we have attached to the inferior
950 and it is stopped for the first time, but before we access
951 any inferior registers. */
952 if (new_inferior)
953 {
954 the_low_target.arch_setup ();
955 #ifdef HAVE_LINUX_REGSETS
956 memset (disabled_regsets, 0, num_regsets);
957 #endif
958 new_inferior = 0;
959 }
960
961 if (debug_threads
962 && WIFSTOPPED (*wstatp)
963 && the_low_target.get_pc != NULL)
964 {
965 struct thread_info *saved_inferior = current_inferior;
966 CORE_ADDR pc;
967
968 current_inferior = (struct thread_info *)
969 find_inferior_id (&all_threads, child->head.id);
970 pc = (*the_low_target.get_pc) ();
971 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
972 current_inferior = saved_inferior;
973 }
974
975 return child;
976 }
977
978 /* Wait for an event from child PID. If PID is -1, wait for any
979 child. Store the stop status through the status pointer WSTAT.
980 OPTIONS is passed to the waitpid call. Return 0 if no child stop
981 event was found and OPTIONS contains WNOHANG. Return the PID of
982 the stopped child otherwise. */
983
984 static int
985 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
986 {
987 CORE_ADDR stop_pc;
988 struct lwp_info *event_child = NULL;
989 int bp_status;
990 struct lwp_info *requested_child = NULL;
991
992 /* Check for a lwp with a pending status. */
993 /* It is possible that the user changed the pending task's registers since
994 it stopped. We correctly handle the change of PC if we hit a breakpoint
995 (in check_removed_breakpoint); signals should be reported anyway. */
996
997 if (ptid_equal (ptid, minus_one_ptid)
998 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
999 {
1000 event_child = (struct lwp_info *)
1001 find_inferior (&all_lwps, status_pending_p, &ptid);
1002 if (debug_threads && event_child)
1003 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1004 }
1005 else
1006 {
1007 requested_child = find_lwp_pid (ptid);
1008 if (requested_child->status_pending_p
1009 && !check_removed_breakpoint (requested_child))
1010 event_child = requested_child;
1011 }
1012
1013 if (event_child != NULL)
1014 {
1015 if (debug_threads)
1016 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1017 lwpid_of (event_child), event_child->status_pending);
1018 *wstat = event_child->status_pending;
1019 event_child->status_pending_p = 0;
1020 event_child->status_pending = 0;
1021 current_inferior = get_lwp_thread (event_child);
1022 return lwpid_of (event_child);
1023 }
1024
1025 /* We only enter this loop if no process has a pending wait status. Thus
1026 any action taken in response to a wait status inside this loop is
1027 responding as soon as we detect the status, not after any pending
1028 events. */
1029 while (1)
1030 {
1031 event_child = linux_wait_for_lwp (ptid, wstat, options);
1032
1033 if ((options & WNOHANG) && event_child == NULL)
1034 return 0;
1035
1036 if (event_child == NULL)
1037 error ("event from unknown child");
1038
1039 current_inferior = get_lwp_thread (event_child);
1040
1041 /* Check for thread exit. */
1042 if (! WIFSTOPPED (*wstat))
1043 {
1044 if (debug_threads)
1045 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1046
1047 /* If the last thread is exiting, just return. */
1048 if (last_thread_of_process_p (current_inferior))
1049 {
1050 if (debug_threads)
1051 fprintf (stderr, "LWP %ld is last lwp of process\n",
1052 lwpid_of (event_child));
1053 return lwpid_of (event_child);
1054 }
1055
1056 delete_lwp (event_child);
1057
1058 if (!non_stop)
1059 {
1060 current_inferior = (struct thread_info *) all_threads.head;
1061 if (debug_threads)
1062 fprintf (stderr, "Current inferior is now %ld\n",
1063 lwpid_of (get_thread_lwp (current_inferior)));
1064 }
1065 else
1066 {
1067 current_inferior = NULL;
1068 if (debug_threads)
1069 fprintf (stderr, "Current inferior is now <NULL>\n");
1070 }
1071
1072 /* If we were waiting for this particular child to do something...
1073 well, it did something. */
1074 if (requested_child != NULL)
1075 return lwpid_of (event_child);
1076
1077 /* Wait for a more interesting event. */
1078 continue;
1079 }
1080
1081 if (event_child->must_set_ptrace_flags)
1082 {
1083 ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child),
1084 0, PTRACE_O_TRACECLONE);
1085 event_child->must_set_ptrace_flags = 0;
1086 }
1087
1088 if (WIFSTOPPED (*wstat)
1089 && WSTOPSIG (*wstat) == SIGSTOP
1090 && event_child->stop_expected)
1091 {
1092 if (debug_threads)
1093 fprintf (stderr, "Expected stop.\n");
1094 event_child->stop_expected = 0;
1095 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
1096 continue;
1097 }
1098
1099 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1100 && *wstat >> 16 != 0)
1101 {
1102 handle_extended_wait (event_child, *wstat);
1103 continue;
1104 }
1105
1106 /* If GDB is not interested in this signal, don't stop other
1107 threads, and don't report it to GDB. Just resume the
1108 inferior right away. We do this for threading-related
1109 signals as well as any that GDB specifically requested we
1110 ignore. But never ignore SIGSTOP if we sent it ourselves,
1111 and do not ignore signals when stepping - they may require
1112 special handling to skip the signal handler. */
1113 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1114 thread library? */
1115 if (WIFSTOPPED (*wstat)
1116 && !event_child->stepping
1117 && (
1118 #ifdef USE_THREAD_DB
1119 (current_process ()->private->thread_db_active
1120 && (WSTOPSIG (*wstat) == __SIGRTMIN
1121 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
1122 ||
1123 #endif
1124 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
1125 && (WSTOPSIG (*wstat) != SIGSTOP || !stopping_threads))))
1126 {
1127 siginfo_t info, *info_p;
1128
1129 if (debug_threads)
1130 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1131 WSTOPSIG (*wstat), lwpid_of (event_child));
1132
1133 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1134 info_p = &info;
1135 else
1136 info_p = NULL;
1137 linux_resume_one_lwp (event_child,
1138 event_child->stepping,
1139 WSTOPSIG (*wstat), info_p);
1140 continue;
1141 }
1142
1143 /* If this event was not handled above, and is not a SIGTRAP, report
1144 it. */
1145 if (!WIFSTOPPED (*wstat) || WSTOPSIG (*wstat) != SIGTRAP)
1146 return lwpid_of (event_child);
1147
1148 /* If this target does not support breakpoints, we simply report the
1149 SIGTRAP; it's of no concern to us. */
1150 if (the_low_target.get_pc == NULL)
1151 return lwpid_of (event_child);
1152
1153 stop_pc = get_stop_pc ();
1154
1155 /* bp_reinsert will only be set if we were single-stepping.
1156 Notice that we will resume the process after hitting
1157 a gdbserver breakpoint; single-stepping to/over one
1158 is not supported (yet). */
1159 if (event_child->bp_reinsert != 0)
1160 {
1161 if (debug_threads)
1162 fprintf (stderr, "Reinserted breakpoint.\n");
1163 reinsert_breakpoint (event_child->bp_reinsert);
1164 event_child->bp_reinsert = 0;
1165
1166 /* Clear the single-stepping flag and SIGTRAP as we resume. */
1167 linux_resume_one_lwp (event_child, 0, 0, NULL);
1168 continue;
1169 }
1170
1171 bp_status = check_breakpoints (stop_pc);
1172
1173 if (bp_status != 0)
1174 {
1175 if (debug_threads)
1176 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1177
1178 /* We hit one of our own breakpoints. We mark it as a pending
1179 breakpoint, so that check_removed_breakpoint () will do the PC
1180 adjustment for us at the appropriate time. */
1181 event_child->pending_is_breakpoint = 1;
1182 event_child->pending_stop_pc = stop_pc;
1183
1184 /* We may need to put the breakpoint back. We continue in the event
1185 loop instead of simply replacing the breakpoint right away,
1186 in order to not lose signals sent to the thread that hit the
1187 breakpoint. Unfortunately this increases the window where another
1188 thread could sneak past the removed breakpoint. For the current
1189 use of server-side breakpoints (thread creation) this is
1190 acceptable; but it needs to be considered before this breakpoint
1191 mechanism can be used in more general ways. For some breakpoints
1192 it may be necessary to stop all other threads, but that should
1193 be avoided where possible.
1194
1195 If breakpoint_reinsert_addr is NULL, that means that we can
1196 use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint,
1197 mark it for reinsertion, and single-step.
1198
1199 Otherwise, call the target function to figure out where we need
1200 our temporary breakpoint, create it, and continue executing this
1201 process. */
1202
1203 /* NOTE: we're lifting breakpoints in non-stop mode. This
1204 is currently only used for thread event breakpoints, so
1205 it isn't that bad as long as we have PTRACE_EVENT_CLONE
1206 events. */
1207 if (bp_status == 2)
1208 /* No need to reinsert. */
1209 linux_resume_one_lwp (event_child, 0, 0, NULL);
1210 else if (the_low_target.breakpoint_reinsert_addr == NULL)
1211 {
1212 event_child->bp_reinsert = stop_pc;
1213 uninsert_breakpoint (stop_pc);
1214 linux_resume_one_lwp (event_child, 1, 0, NULL);
1215 }
1216 else
1217 {
1218 reinsert_breakpoint_by_bp
1219 (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ());
1220 linux_resume_one_lwp (event_child, 0, 0, NULL);
1221 }
1222
1223 continue;
1224 }
1225
1226 if (debug_threads)
1227 fprintf (stderr, "Hit a non-gdbserver breakpoint.\n");
1228
1229 /* If we were single-stepping, we definitely want to report the
1230 SIGTRAP. Although the single-step operation has completed,
1231 do not clear clear the stepping flag yet; we need to check it
1232 in wait_for_sigstop. */
1233 if (event_child->stepping)
1234 return lwpid_of (event_child);
1235
1236 /* A SIGTRAP that we can't explain. It may have been a breakpoint.
1237 Check if it is a breakpoint, and if so mark the process information
1238 accordingly. This will handle both the necessary fiddling with the
1239 PC on decr_pc_after_break targets and suppressing extra threads
1240 hitting a breakpoint if two hit it at once and then GDB removes it
1241 after the first is reported. Arguably it would be better to report
1242 multiple threads hitting breakpoints simultaneously, but the current
1243 remote protocol does not allow this. */
1244 if ((*the_low_target.breakpoint_at) (stop_pc))
1245 {
1246 event_child->pending_is_breakpoint = 1;
1247 event_child->pending_stop_pc = stop_pc;
1248 }
1249
1250 return lwpid_of (event_child);
1251 }
1252
1253 /* NOTREACHED */
1254 return 0;
1255 }
1256
1257 static int
1258 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1259 {
1260 ptid_t wait_ptid;
1261
1262 if (ptid_is_pid (ptid))
1263 {
1264 /* A request to wait for a specific tgid. This is not possible
1265 with waitpid, so instead, we wait for any child, and leave
1266 children we're not interested in right now with a pending
1267 status to report later. */
1268 wait_ptid = minus_one_ptid;
1269 }
1270 else
1271 wait_ptid = ptid;
1272
1273 while (1)
1274 {
1275 int event_pid;
1276
1277 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1278
1279 if (event_pid > 0
1280 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1281 {
1282 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1283
1284 if (! WIFSTOPPED (*wstat))
1285 mark_lwp_dead (event_child, *wstat);
1286 else
1287 {
1288 event_child->status_pending_p = 1;
1289 event_child->status_pending = *wstat;
1290 }
1291 }
1292 else
1293 return event_pid;
1294 }
1295 }
1296
1297 /* Wait for process, returns status. */
1298
1299 static ptid_t
1300 linux_wait_1 (ptid_t ptid,
1301 struct target_waitstatus *ourstatus, int target_options)
1302 {
1303 int w;
1304 struct thread_info *thread = NULL;
1305 struct lwp_info *lwp = NULL;
1306 int options;
1307 int pid;
1308
1309 /* Translate generic target options into linux options. */
1310 options = __WALL;
1311 if (target_options & TARGET_WNOHANG)
1312 options |= WNOHANG;
1313
1314 retry:
1315 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1316
1317 /* If we were only supposed to resume one thread, only wait for
1318 that thread - if it's still alive. If it died, however - which
1319 can happen if we're coming from the thread death case below -
1320 then we need to make sure we restart the other threads. We could
1321 pick a thread at random or restart all; restarting all is less
1322 arbitrary. */
1323 if (!non_stop
1324 && !ptid_equal (cont_thread, null_ptid)
1325 && !ptid_equal (cont_thread, minus_one_ptid))
1326 {
1327 thread = (struct thread_info *) find_inferior_id (&all_threads,
1328 cont_thread);
1329
1330 /* No stepping, no signal - unless one is pending already, of course. */
1331 if (thread == NULL)
1332 {
1333 struct thread_resume resume_info;
1334 resume_info.thread = minus_one_ptid;
1335 resume_info.kind = resume_continue;
1336 resume_info.sig = 0;
1337 linux_resume (&resume_info, 1);
1338 }
1339 else
1340 ptid = cont_thread;
1341 }
1342
1343 pid = linux_wait_for_event (ptid, &w, options);
1344 if (pid == 0) /* only if TARGET_WNOHANG */
1345 return null_ptid;
1346
1347 lwp = get_thread_lwp (current_inferior);
1348
1349 /* If we are waiting for a particular child, and it exited,
1350 linux_wait_for_event will return its exit status. Similarly if
1351 the last child exited. If this is not the last child, however,
1352 do not report it as exited until there is a 'thread exited' response
1353 available in the remote protocol. Instead, just wait for another event.
1354 This should be safe, because if the thread crashed we will already
1355 have reported the termination signal to GDB; that should stop any
1356 in-progress stepping operations, etc.
1357
1358 Report the exit status of the last thread to exit. This matches
1359 LinuxThreads' behavior. */
1360
1361 if (last_thread_of_process_p (current_inferior))
1362 {
1363 if (WIFEXITED (w) || WIFSIGNALED (w))
1364 {
1365 int pid = pid_of (lwp);
1366 struct process_info *process = find_process_pid (pid);
1367
1368 delete_lwp (lwp);
1369 linux_remove_process (process);
1370
1371 current_inferior = NULL;
1372
1373 if (WIFEXITED (w))
1374 {
1375 ourstatus->kind = TARGET_WAITKIND_EXITED;
1376 ourstatus->value.integer = WEXITSTATUS (w);
1377
1378 if (debug_threads)
1379 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1380 }
1381 else
1382 {
1383 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1384 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1385
1386 if (debug_threads)
1387 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1388
1389 }
1390
1391 return pid_to_ptid (pid);
1392 }
1393 }
1394 else
1395 {
1396 if (!WIFSTOPPED (w))
1397 goto retry;
1398 }
1399
1400 /* In all-stop, stop all threads. Be careful to only do this if
1401 we're about to report an event to GDB. */
1402 if (!non_stop)
1403 stop_all_lwps ();
1404
1405 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1406
1407 if (lwp->suspended && WSTOPSIG (w) == SIGSTOP)
1408 {
1409 /* A thread that has been requested to stop by GDB with vCont;t,
1410 and it stopped cleanly, so report as SIG0. The use of
1411 SIGSTOP is an implementation detail. */
1412 ourstatus->value.sig = TARGET_SIGNAL_0;
1413 }
1414 else if (lwp->suspended && WSTOPSIG (w) != SIGSTOP)
1415 {
1416 /* A thread that has been requested to stop by GDB with vCont;t,
1417 but, it stopped for other reasons. Set stop_expected so the
1418 pending SIGSTOP is ignored and the LWP is resumed. */
1419 lwp->stop_expected = 1;
1420 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1421 }
1422 else
1423 {
1424 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1425 }
1426
1427 if (debug_threads)
1428 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1429 target_pid_to_str (lwp->head.id),
1430 ourstatus->kind,
1431 ourstatus->value.sig);
1432
1433 return lwp->head.id;
1434 }
1435
1436 /* Get rid of any pending event in the pipe. */
1437 static void
1438 async_file_flush (void)
1439 {
1440 int ret;
1441 char buf;
1442
1443 do
1444 ret = read (linux_event_pipe[0], &buf, 1);
1445 while (ret >= 0 || (ret == -1 && errno == EINTR));
1446 }
1447
1448 /* Put something in the pipe, so the event loop wakes up. */
1449 static void
1450 async_file_mark (void)
1451 {
1452 int ret;
1453
1454 async_file_flush ();
1455
1456 do
1457 ret = write (linux_event_pipe[1], "+", 1);
1458 while (ret == 0 || (ret == -1 && errno == EINTR));
1459
1460 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1461 be awakened anyway. */
1462 }
1463
1464 static ptid_t
1465 linux_wait (ptid_t ptid,
1466 struct target_waitstatus *ourstatus, int target_options)
1467 {
1468 ptid_t event_ptid;
1469
1470 if (debug_threads)
1471 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
1472
1473 /* Flush the async file first. */
1474 if (target_is_async_p ())
1475 async_file_flush ();
1476
1477 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
1478
1479 /* If at least one stop was reported, there may be more. A single
1480 SIGCHLD can signal more than one child stop. */
1481 if (target_is_async_p ()
1482 && (target_options & TARGET_WNOHANG) != 0
1483 && !ptid_equal (event_ptid, null_ptid))
1484 async_file_mark ();
1485
1486 return event_ptid;
1487 }
1488
1489 /* Send a signal to an LWP. For LinuxThreads, kill is enough; however, if
1490 thread groups are in use, we need to use tkill. */
1491
1492 static int
1493 kill_lwp (unsigned long lwpid, int signo)
1494 {
1495 static int tkill_failed;
1496
1497 errno = 0;
1498
1499 #ifdef SYS_tkill
1500 if (!tkill_failed)
1501 {
1502 int ret = syscall (SYS_tkill, lwpid, signo);
1503 if (errno != ENOSYS)
1504 return ret;
1505 errno = 0;
1506 tkill_failed = 1;
1507 }
1508 #endif
1509
1510 return kill (lwpid, signo);
1511 }
1512
1513 static void
1514 send_sigstop (struct inferior_list_entry *entry)
1515 {
1516 struct lwp_info *lwp = (struct lwp_info *) entry;
1517 int pid;
1518
1519 if (lwp->stopped)
1520 return;
1521
1522 pid = lwpid_of (lwp);
1523
1524 /* If we already have a pending stop signal for this process, don't
1525 send another. */
1526 if (lwp->stop_expected)
1527 {
1528 if (debug_threads)
1529 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
1530
1531 /* We clear the stop_expected flag so that wait_for_sigstop
1532 will receive the SIGSTOP event (instead of silently resuming and
1533 waiting again). It'll be reset below. */
1534 lwp->stop_expected = 0;
1535 return;
1536 }
1537
1538 if (debug_threads)
1539 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
1540
1541 kill_lwp (pid, SIGSTOP);
1542 }
1543
1544 static void
1545 mark_lwp_dead (struct lwp_info *lwp, int wstat)
1546 {
1547 /* It's dead, really. */
1548 lwp->dead = 1;
1549
1550 /* Store the exit status for later. */
1551 lwp->status_pending_p = 1;
1552 lwp->status_pending = wstat;
1553
1554 /* So that check_removed_breakpoint doesn't try to figure out if
1555 this is stopped at a breakpoint. */
1556 lwp->pending_is_breakpoint = 0;
1557
1558 /* Prevent trying to stop it. */
1559 lwp->stopped = 1;
1560
1561 /* No further stops are expected from a dead lwp. */
1562 lwp->stop_expected = 0;
1563 }
1564
1565 static void
1566 wait_for_sigstop (struct inferior_list_entry *entry)
1567 {
1568 struct lwp_info *lwp = (struct lwp_info *) entry;
1569 struct thread_info *saved_inferior;
1570 int wstat;
1571 ptid_t saved_tid;
1572 ptid_t ptid;
1573
1574 if (lwp->stopped)
1575 return;
1576
1577 saved_inferior = current_inferior;
1578 if (saved_inferior != NULL)
1579 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
1580 else
1581 saved_tid = null_ptid; /* avoid bogus unused warning */
1582
1583 ptid = lwp->head.id;
1584
1585 linux_wait_for_event (ptid, &wstat, __WALL);
1586
1587 /* If we stopped with a non-SIGSTOP signal, save it for later
1588 and record the pending SIGSTOP. If the process exited, just
1589 return. */
1590 if (WIFSTOPPED (wstat)
1591 && WSTOPSIG (wstat) != SIGSTOP)
1592 {
1593 if (debug_threads)
1594 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
1595 lwpid_of (lwp), wstat);
1596
1597 /* Do not leave a pending single-step finish to be reported to
1598 the client. The client will give us a new action for this
1599 thread, possibly a continue request --- otherwise, the client
1600 would consider this pending SIGTRAP reported later a spurious
1601 signal. */
1602 if (WSTOPSIG (wstat) == SIGTRAP
1603 && lwp->stepping
1604 && !linux_stopped_by_watchpoint ())
1605 {
1606 if (debug_threads)
1607 fprintf (stderr, " single-step SIGTRAP ignored\n");
1608 }
1609 else
1610 {
1611 lwp->status_pending_p = 1;
1612 lwp->status_pending = wstat;
1613 }
1614 lwp->stop_expected = 1;
1615 }
1616 else if (!WIFSTOPPED (wstat))
1617 {
1618 if (debug_threads)
1619 fprintf (stderr, "Process %ld exited while stopping LWPs\n",
1620 lwpid_of (lwp));
1621
1622 /* Leave this status pending for the next time we're able to
1623 report it. In the mean time, we'll report this lwp as dead
1624 to GDB, so GDB doesn't try to read registers and memory from
1625 it. */
1626 mark_lwp_dead (lwp, wstat);
1627 }
1628
1629 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
1630 current_inferior = saved_inferior;
1631 else
1632 {
1633 if (debug_threads)
1634 fprintf (stderr, "Previously current thread died.\n");
1635
1636 if (non_stop)
1637 {
1638 /* We can't change the current inferior behind GDB's back,
1639 otherwise, a subsequent command may apply to the wrong
1640 process. */
1641 current_inferior = NULL;
1642 }
1643 else
1644 {
1645 /* Set a valid thread as current. */
1646 set_desired_inferior (0);
1647 }
1648 }
1649 }
1650
1651 static void
1652 stop_all_lwps (void)
1653 {
1654 stopping_threads = 1;
1655 for_each_inferior (&all_lwps, send_sigstop);
1656 for_each_inferior (&all_lwps, wait_for_sigstop);
1657 stopping_threads = 0;
1658 }
1659
1660 /* Resume execution of the inferior process.
1661 If STEP is nonzero, single-step it.
1662 If SIGNAL is nonzero, give it that signal. */
1663
1664 static void
1665 linux_resume_one_lwp (struct lwp_info *lwp,
1666 int step, int signal, siginfo_t *info)
1667 {
1668 struct thread_info *saved_inferior;
1669
1670 if (lwp->stopped == 0)
1671 return;
1672
1673 /* If we have pending signals or status, and a new signal, enqueue the
1674 signal. Also enqueue the signal if we are waiting to reinsert a
1675 breakpoint; it will be picked up again below. */
1676 if (signal != 0
1677 && (lwp->status_pending_p || lwp->pending_signals != NULL
1678 || lwp->bp_reinsert != 0))
1679 {
1680 struct pending_signals *p_sig;
1681 p_sig = xmalloc (sizeof (*p_sig));
1682 p_sig->prev = lwp->pending_signals;
1683 p_sig->signal = signal;
1684 if (info == NULL)
1685 memset (&p_sig->info, 0, sizeof (siginfo_t));
1686 else
1687 memcpy (&p_sig->info, info, sizeof (siginfo_t));
1688 lwp->pending_signals = p_sig;
1689 }
1690
1691 if (lwp->status_pending_p && !check_removed_breakpoint (lwp))
1692 return;
1693
1694 saved_inferior = current_inferior;
1695 current_inferior = get_lwp_thread (lwp);
1696
1697 if (debug_threads)
1698 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
1699 lwpid_of (lwp), step ? "step" : "continue", signal,
1700 lwp->stop_expected ? "expected" : "not expected");
1701
1702 /* This bit needs some thinking about. If we get a signal that
1703 we must report while a single-step reinsert is still pending,
1704 we often end up resuming the thread. It might be better to
1705 (ew) allow a stack of pending events; then we could be sure that
1706 the reinsert happened right away and not lose any signals.
1707
1708 Making this stack would also shrink the window in which breakpoints are
1709 uninserted (see comment in linux_wait_for_lwp) but not enough for
1710 complete correctness, so it won't solve that problem. It may be
1711 worthwhile just to solve this one, however. */
1712 if (lwp->bp_reinsert != 0)
1713 {
1714 if (debug_threads)
1715 fprintf (stderr, " pending reinsert at %08lx", (long)lwp->bp_reinsert);
1716 if (step == 0)
1717 fprintf (stderr, "BAD - reinserting but not stepping.\n");
1718 step = 1;
1719
1720 /* Postpone any pending signal. It was enqueued above. */
1721 signal = 0;
1722 }
1723
1724 check_removed_breakpoint (lwp);
1725
1726 if (debug_threads && the_low_target.get_pc != NULL)
1727 {
1728 CORE_ADDR pc = (*the_low_target.get_pc) ();
1729 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
1730 }
1731
1732 /* If we have pending signals, consume one unless we are trying to reinsert
1733 a breakpoint. */
1734 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
1735 {
1736 struct pending_signals **p_sig;
1737
1738 p_sig = &lwp->pending_signals;
1739 while ((*p_sig)->prev != NULL)
1740 p_sig = &(*p_sig)->prev;
1741
1742 signal = (*p_sig)->signal;
1743 if ((*p_sig)->info.si_signo != 0)
1744 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1745
1746 free (*p_sig);
1747 *p_sig = NULL;
1748 }
1749
1750 if (the_low_target.prepare_to_resume != NULL)
1751 the_low_target.prepare_to_resume (lwp);
1752
1753 regcache_invalidate_one ((struct inferior_list_entry *)
1754 get_lwp_thread (lwp));
1755 errno = 0;
1756 lwp->stopped = 0;
1757 lwp->stepping = step;
1758 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0, signal);
1759
1760 current_inferior = saved_inferior;
1761 if (errno)
1762 {
1763 /* ESRCH from ptrace either means that the thread was already
1764 running (an error) or that it is gone (a race condition). If
1765 it's gone, we will get a notification the next time we wait,
1766 so we can ignore the error. We could differentiate these
1767 two, but it's tricky without waiting; the thread still exists
1768 as a zombie, so sending it signal 0 would succeed. So just
1769 ignore ESRCH. */
1770 if (errno == ESRCH)
1771 return;
1772
1773 perror_with_name ("ptrace");
1774 }
1775 }
1776
1777 struct thread_resume_array
1778 {
1779 struct thread_resume *resume;
1780 size_t n;
1781 };
1782
1783 /* This function is called once per thread. We look up the thread
1784 in RESUME_PTR, and mark the thread with a pointer to the appropriate
1785 resume request.
1786
1787 This algorithm is O(threads * resume elements), but resume elements
1788 is small (and will remain small at least until GDB supports thread
1789 suspension). */
1790 static int
1791 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
1792 {
1793 struct lwp_info *lwp;
1794 struct thread_info *thread;
1795 int ndx;
1796 struct thread_resume_array *r;
1797
1798 thread = (struct thread_info *) entry;
1799 lwp = get_thread_lwp (thread);
1800 r = arg;
1801
1802 for (ndx = 0; ndx < r->n; ndx++)
1803 {
1804 ptid_t ptid = r->resume[ndx].thread;
1805 if (ptid_equal (ptid, minus_one_ptid)
1806 || ptid_equal (ptid, entry->id)
1807 || (ptid_is_pid (ptid)
1808 && (ptid_get_pid (ptid) == pid_of (lwp)))
1809 || (ptid_get_lwp (ptid) == -1
1810 && (ptid_get_pid (ptid) == pid_of (lwp))))
1811 {
1812 lwp->resume = &r->resume[ndx];
1813 return 0;
1814 }
1815 }
1816
1817 /* No resume action for this thread. */
1818 lwp->resume = NULL;
1819
1820 return 0;
1821 }
1822
1823
1824 /* Set *FLAG_P if this lwp has an interesting status pending. */
1825 static int
1826 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
1827 {
1828 struct lwp_info *lwp = (struct lwp_info *) entry;
1829
1830 /* LWPs which will not be resumed are not interesting, because
1831 we might not wait for them next time through linux_wait. */
1832 if (lwp->resume == NULL)
1833 return 0;
1834
1835 /* If this thread has a removed breakpoint, we won't have any
1836 events to report later, so check now. check_removed_breakpoint
1837 may clear status_pending_p. We avoid calling check_removed_breakpoint
1838 for any thread that we are not otherwise going to resume - this
1839 lets us preserve stopped status when two threads hit a breakpoint.
1840 GDB removes the breakpoint to single-step a particular thread
1841 past it, then re-inserts it and resumes all threads. We want
1842 to report the second thread without resuming it in the interim. */
1843 if (lwp->status_pending_p)
1844 check_removed_breakpoint (lwp);
1845
1846 if (lwp->status_pending_p)
1847 * (int *) flag_p = 1;
1848
1849 return 0;
1850 }
1851
1852 /* This function is called once per thread. We check the thread's resume
1853 request, which will tell us whether to resume, step, or leave the thread
1854 stopped; and what signal, if any, it should be sent.
1855
1856 For threads which we aren't explicitly told otherwise, we preserve
1857 the stepping flag; this is used for stepping over gdbserver-placed
1858 breakpoints.
1859
1860 If pending_flags was set in any thread, we queue any needed
1861 signals, since we won't actually resume. We already have a pending
1862 event to report, so we don't need to preserve any step requests;
1863 they should be re-issued if necessary. */
1864
1865 static int
1866 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
1867 {
1868 struct lwp_info *lwp;
1869 struct thread_info *thread;
1870 int step;
1871 int pending_flag = * (int *) arg;
1872
1873 thread = (struct thread_info *) entry;
1874 lwp = get_thread_lwp (thread);
1875
1876 if (lwp->resume == NULL)
1877 return 0;
1878
1879 if (lwp->resume->kind == resume_stop)
1880 {
1881 if (debug_threads)
1882 fprintf (stderr, "suspending LWP %ld\n", lwpid_of (lwp));
1883
1884 if (!lwp->stopped)
1885 {
1886 if (debug_threads)
1887 fprintf (stderr, "running -> suspending LWP %ld\n", lwpid_of (lwp));
1888
1889 lwp->suspended = 1;
1890 send_sigstop (&lwp->head);
1891 }
1892 else
1893 {
1894 if (debug_threads)
1895 {
1896 if (lwp->suspended)
1897 fprintf (stderr, "already stopped/suspended LWP %ld\n",
1898 lwpid_of (lwp));
1899 else
1900 fprintf (stderr, "already stopped/not suspended LWP %ld\n",
1901 lwpid_of (lwp));
1902 }
1903
1904 /* Make sure we leave the LWP suspended, so we don't try to
1905 resume it without GDB telling us to. FIXME: The LWP may
1906 have been stopped in an internal event that was not meant
1907 to be notified back to GDB (e.g., gdbserver breakpoint),
1908 so we should be reporting a stop event in that case
1909 too. */
1910 lwp->suspended = 1;
1911 }
1912
1913 /* For stop requests, we're done. */
1914 lwp->resume = NULL;
1915 return 0;
1916 }
1917 else
1918 lwp->suspended = 0;
1919
1920 /* If this thread which is about to be resumed has a pending status,
1921 then don't resume any threads - we can just report the pending
1922 status. Make sure to queue any signals that would otherwise be
1923 sent. In all-stop mode, we do this decision based on if *any*
1924 thread has a pending status. */
1925 if (non_stop)
1926 resume_status_pending_p (&lwp->head, &pending_flag);
1927
1928 if (!pending_flag)
1929 {
1930 if (debug_threads)
1931 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
1932
1933 if (ptid_equal (lwp->resume->thread, minus_one_ptid)
1934 && lwp->stepping
1935 && lwp->pending_is_breakpoint)
1936 step = 1;
1937 else
1938 step = (lwp->resume->kind == resume_step);
1939
1940 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
1941 }
1942 else
1943 {
1944 if (debug_threads)
1945 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
1946
1947 /* If we have a new signal, enqueue the signal. */
1948 if (lwp->resume->sig != 0)
1949 {
1950 struct pending_signals *p_sig;
1951 p_sig = xmalloc (sizeof (*p_sig));
1952 p_sig->prev = lwp->pending_signals;
1953 p_sig->signal = lwp->resume->sig;
1954 memset (&p_sig->info, 0, sizeof (siginfo_t));
1955
1956 /* If this is the same signal we were previously stopped by,
1957 make sure to queue its siginfo. We can ignore the return
1958 value of ptrace; if it fails, we'll skip
1959 PTRACE_SETSIGINFO. */
1960 if (WIFSTOPPED (lwp->last_status)
1961 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
1962 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1963
1964 lwp->pending_signals = p_sig;
1965 }
1966 }
1967
1968 lwp->resume = NULL;
1969 return 0;
1970 }
1971
1972 static void
1973 linux_resume (struct thread_resume *resume_info, size_t n)
1974 {
1975 int pending_flag;
1976 struct thread_resume_array array = { resume_info, n };
1977
1978 find_inferior (&all_threads, linux_set_resume_request, &array);
1979
1980 /* If there is a thread which would otherwise be resumed, which
1981 has a pending status, then don't resume any threads - we can just
1982 report the pending status. Make sure to queue any signals
1983 that would otherwise be sent. In non-stop mode, we'll apply this
1984 logic to each thread individually. */
1985 pending_flag = 0;
1986 if (!non_stop)
1987 find_inferior (&all_lwps, resume_status_pending_p, &pending_flag);
1988
1989 if (debug_threads)
1990 {
1991 if (pending_flag)
1992 fprintf (stderr, "Not resuming, pending status\n");
1993 else
1994 fprintf (stderr, "Resuming, no pending status\n");
1995 }
1996
1997 find_inferior (&all_threads, linux_resume_one_thread, &pending_flag);
1998 }
1999
2000 #ifdef HAVE_LINUX_USRREGS
2001
2002 int
2003 register_addr (int regnum)
2004 {
2005 int addr;
2006
2007 if (regnum < 0 || regnum >= the_low_target.num_regs)
2008 error ("Invalid register number %d.", regnum);
2009
2010 addr = the_low_target.regmap[regnum];
2011
2012 return addr;
2013 }
2014
2015 /* Fetch one register. */
2016 static void
2017 fetch_register (int regno)
2018 {
2019 CORE_ADDR regaddr;
2020 int i, size;
2021 char *buf;
2022 int pid;
2023
2024 if (regno >= the_low_target.num_regs)
2025 return;
2026 if ((*the_low_target.cannot_fetch_register) (regno))
2027 return;
2028
2029 regaddr = register_addr (regno);
2030 if (regaddr == -1)
2031 return;
2032
2033 pid = lwpid_of (get_thread_lwp (current_inferior));
2034 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2035 & - sizeof (PTRACE_XFER_TYPE));
2036 buf = alloca (size);
2037 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2038 {
2039 errno = 0;
2040 *(PTRACE_XFER_TYPE *) (buf + i) =
2041 ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) regaddr, 0);
2042 regaddr += sizeof (PTRACE_XFER_TYPE);
2043 if (errno != 0)
2044 {
2045 /* Warning, not error, in case we are attached; sometimes the
2046 kernel doesn't let us at the registers. */
2047 char *err = strerror (errno);
2048 char *msg = alloca (strlen (err) + 128);
2049 sprintf (msg, "reading register %d: %s", regno, err);
2050 error (msg);
2051 goto error_exit;
2052 }
2053 }
2054
2055 if (the_low_target.supply_ptrace_register)
2056 the_low_target.supply_ptrace_register (regno, buf);
2057 else
2058 supply_register (regno, buf);
2059
2060 error_exit:;
2061 }
2062
2063 /* Fetch all registers, or just one, from the child process. */
2064 static void
2065 usr_fetch_inferior_registers (int regno)
2066 {
2067 if (regno == -1)
2068 for (regno = 0; regno < the_low_target.num_regs; regno++)
2069 fetch_register (regno);
2070 else
2071 fetch_register (regno);
2072 }
2073
2074 /* Store our register values back into the inferior.
2075 If REGNO is -1, do this for all registers.
2076 Otherwise, REGNO specifies which register (so we can save time). */
2077 static void
2078 usr_store_inferior_registers (int regno)
2079 {
2080 CORE_ADDR regaddr;
2081 int i, size;
2082 char *buf;
2083 int pid;
2084
2085 if (regno >= 0)
2086 {
2087 if (regno >= the_low_target.num_regs)
2088 return;
2089
2090 if ((*the_low_target.cannot_store_register) (regno) == 1)
2091 return;
2092
2093 regaddr = register_addr (regno);
2094 if (regaddr == -1)
2095 return;
2096 errno = 0;
2097 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2098 & - sizeof (PTRACE_XFER_TYPE);
2099 buf = alloca (size);
2100 memset (buf, 0, size);
2101
2102 if (the_low_target.collect_ptrace_register)
2103 the_low_target.collect_ptrace_register (regno, buf);
2104 else
2105 collect_register (regno, buf);
2106
2107 pid = lwpid_of (get_thread_lwp (current_inferior));
2108 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2109 {
2110 errno = 0;
2111 ptrace (PTRACE_POKEUSER, pid, (PTRACE_ARG3_TYPE) regaddr,
2112 *(PTRACE_XFER_TYPE *) (buf + i));
2113 if (errno != 0)
2114 {
2115 /* At this point, ESRCH should mean the process is
2116 already gone, in which case we simply ignore attempts
2117 to change its registers. See also the related
2118 comment in linux_resume_one_lwp. */
2119 if (errno == ESRCH)
2120 return;
2121
2122 if ((*the_low_target.cannot_store_register) (regno) == 0)
2123 {
2124 char *err = strerror (errno);
2125 char *msg = alloca (strlen (err) + 128);
2126 sprintf (msg, "writing register %d: %s",
2127 regno, err);
2128 error (msg);
2129 return;
2130 }
2131 }
2132 regaddr += sizeof (PTRACE_XFER_TYPE);
2133 }
2134 }
2135 else
2136 for (regno = 0; regno < the_low_target.num_regs; regno++)
2137 usr_store_inferior_registers (regno);
2138 }
2139 #endif /* HAVE_LINUX_USRREGS */
2140
2141
2142
2143 #ifdef HAVE_LINUX_REGSETS
2144
2145 static int
2146 regsets_fetch_inferior_registers ()
2147 {
2148 struct regset_info *regset;
2149 int saw_general_regs = 0;
2150 int pid;
2151
2152 regset = target_regsets;
2153
2154 pid = lwpid_of (get_thread_lwp (current_inferior));
2155 while (regset->size >= 0)
2156 {
2157 void *buf;
2158 int res;
2159
2160 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2161 {
2162 regset ++;
2163 continue;
2164 }
2165
2166 buf = xmalloc (regset->size);
2167 #ifndef __sparc__
2168 res = ptrace (regset->get_request, pid, 0, buf);
2169 #else
2170 res = ptrace (regset->get_request, pid, buf, 0);
2171 #endif
2172 if (res < 0)
2173 {
2174 if (errno == EIO)
2175 {
2176 /* If we get EIO on a regset, do not try it again for
2177 this process. */
2178 disabled_regsets[regset - target_regsets] = 1;
2179 free (buf);
2180 continue;
2181 }
2182 else
2183 {
2184 char s[256];
2185 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
2186 pid);
2187 perror (s);
2188 }
2189 }
2190 else if (regset->type == GENERAL_REGS)
2191 saw_general_regs = 1;
2192 regset->store_function (buf);
2193 regset ++;
2194 free (buf);
2195 }
2196 if (saw_general_regs)
2197 return 0;
2198 else
2199 return 1;
2200 }
2201
2202 static int
2203 regsets_store_inferior_registers ()
2204 {
2205 struct regset_info *regset;
2206 int saw_general_regs = 0;
2207 int pid;
2208
2209 regset = target_regsets;
2210
2211 pid = lwpid_of (get_thread_lwp (current_inferior));
2212 while (regset->size >= 0)
2213 {
2214 void *buf;
2215 int res;
2216
2217 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2218 {
2219 regset ++;
2220 continue;
2221 }
2222
2223 buf = xmalloc (regset->size);
2224
2225 /* First fill the buffer with the current register set contents,
2226 in case there are any items in the kernel's regset that are
2227 not in gdbserver's regcache. */
2228 #ifndef __sparc__
2229 res = ptrace (regset->get_request, pid, 0, buf);
2230 #else
2231 res = ptrace (regset->get_request, pid, buf, 0);
2232 #endif
2233
2234 if (res == 0)
2235 {
2236 /* Then overlay our cached registers on that. */
2237 regset->fill_function (buf);
2238
2239 /* Only now do we write the register set. */
2240 #ifndef __sparc__
2241 res = ptrace (regset->set_request, pid, 0, buf);
2242 #else
2243 res = ptrace (regset->set_request, pid, buf, 0);
2244 #endif
2245 }
2246
2247 if (res < 0)
2248 {
2249 if (errno == EIO)
2250 {
2251 /* If we get EIO on a regset, do not try it again for
2252 this process. */
2253 disabled_regsets[regset - target_regsets] = 1;
2254 free (buf);
2255 continue;
2256 }
2257 else if (errno == ESRCH)
2258 {
2259 /* At this point, ESRCH should mean the process is
2260 already gone, in which case we simply ignore attempts
2261 to change its registers. See also the related
2262 comment in linux_resume_one_lwp. */
2263 free (buf);
2264 return 0;
2265 }
2266 else
2267 {
2268 perror ("Warning: ptrace(regsets_store_inferior_registers)");
2269 }
2270 }
2271 else if (regset->type == GENERAL_REGS)
2272 saw_general_regs = 1;
2273 regset ++;
2274 free (buf);
2275 }
2276 if (saw_general_regs)
2277 return 0;
2278 else
2279 return 1;
2280 return 0;
2281 }
2282
2283 #endif /* HAVE_LINUX_REGSETS */
2284
2285
2286 void
2287 linux_fetch_registers (int regno)
2288 {
2289 #ifdef HAVE_LINUX_REGSETS
2290 if (regsets_fetch_inferior_registers () == 0)
2291 return;
2292 #endif
2293 #ifdef HAVE_LINUX_USRREGS
2294 usr_fetch_inferior_registers (regno);
2295 #endif
2296 }
2297
2298 void
2299 linux_store_registers (int regno)
2300 {
2301 #ifdef HAVE_LINUX_REGSETS
2302 if (regsets_store_inferior_registers () == 0)
2303 return;
2304 #endif
2305 #ifdef HAVE_LINUX_USRREGS
2306 usr_store_inferior_registers (regno);
2307 #endif
2308 }
2309
2310
2311 /* Copy LEN bytes from inferior's memory starting at MEMADDR
2312 to debugger memory starting at MYADDR. */
2313
2314 static int
2315 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
2316 {
2317 register int i;
2318 /* Round starting address down to longword boundary. */
2319 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2320 /* Round ending address up; get number of longwords that makes. */
2321 register int count
2322 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
2323 / sizeof (PTRACE_XFER_TYPE);
2324 /* Allocate buffer of that many longwords. */
2325 register PTRACE_XFER_TYPE *buffer
2326 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2327 int fd;
2328 char filename[64];
2329 int pid = lwpid_of (get_thread_lwp (current_inferior));
2330
2331 /* Try using /proc. Don't bother for one word. */
2332 if (len >= 3 * sizeof (long))
2333 {
2334 /* We could keep this file open and cache it - possibly one per
2335 thread. That requires some juggling, but is even faster. */
2336 sprintf (filename, "/proc/%d/mem", pid);
2337 fd = open (filename, O_RDONLY | O_LARGEFILE);
2338 if (fd == -1)
2339 goto no_proc;
2340
2341 /* If pread64 is available, use it. It's faster if the kernel
2342 supports it (only one syscall), and it's 64-bit safe even on
2343 32-bit platforms (for instance, SPARC debugging a SPARC64
2344 application). */
2345 #ifdef HAVE_PREAD64
2346 if (pread64 (fd, myaddr, len, memaddr) != len)
2347 #else
2348 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, memaddr, len) != len)
2349 #endif
2350 {
2351 close (fd);
2352 goto no_proc;
2353 }
2354
2355 close (fd);
2356 return 0;
2357 }
2358
2359 no_proc:
2360 /* Read all the longwords */
2361 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2362 {
2363 errno = 0;
2364 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid, (PTRACE_ARG3_TYPE) addr, 0);
2365 if (errno)
2366 return errno;
2367 }
2368
2369 /* Copy appropriate bytes out of the buffer. */
2370 memcpy (myaddr,
2371 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
2372 len);
2373
2374 return 0;
2375 }
2376
2377 /* Copy LEN bytes of data from debugger memory at MYADDR
2378 to inferior's memory at MEMADDR.
2379 On failure (cannot write the inferior)
2380 returns the value of errno. */
2381
2382 static int
2383 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
2384 {
2385 register int i;
2386 /* Round starting address down to longword boundary. */
2387 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2388 /* Round ending address up; get number of longwords that makes. */
2389 register int count
2390 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
2391 /* Allocate buffer of that many longwords. */
2392 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2393 int pid = lwpid_of (get_thread_lwp (current_inferior));
2394
2395 if (debug_threads)
2396 {
2397 fprintf (stderr, "Writing %02x to %08lx\n", (unsigned)myaddr[0], (long)memaddr);
2398 }
2399
2400 /* Fill start and end extra bytes of buffer with existing memory data. */
2401
2402 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid, (PTRACE_ARG3_TYPE) addr, 0);
2403
2404 if (count > 1)
2405 {
2406 buffer[count - 1]
2407 = ptrace (PTRACE_PEEKTEXT, pid,
2408 (PTRACE_ARG3_TYPE) (addr + (count - 1)
2409 * sizeof (PTRACE_XFER_TYPE)),
2410 0);
2411 }
2412
2413 /* Copy data to be written over corresponding part of buffer */
2414
2415 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
2416
2417 /* Write the entire buffer. */
2418
2419 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2420 {
2421 errno = 0;
2422 ptrace (PTRACE_POKETEXT, pid, (PTRACE_ARG3_TYPE) addr, buffer[i]);
2423 if (errno)
2424 return errno;
2425 }
2426
2427 return 0;
2428 }
2429
2430 static int linux_supports_tracefork_flag;
2431
2432 /* Helper functions for linux_test_for_tracefork, called via clone (). */
2433
2434 static int
2435 linux_tracefork_grandchild (void *arg)
2436 {
2437 _exit (0);
2438 }
2439
2440 #define STACK_SIZE 4096
2441
2442 static int
2443 linux_tracefork_child (void *arg)
2444 {
2445 ptrace (PTRACE_TRACEME, 0, 0, 0);
2446 kill (getpid (), SIGSTOP);
2447 #ifdef __ia64__
2448 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
2449 CLONE_VM | SIGCHLD, NULL);
2450 #else
2451 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
2452 CLONE_VM | SIGCHLD, NULL);
2453 #endif
2454 _exit (0);
2455 }
2456
2457 /* Wrapper function for waitpid which handles EINTR, and emulates
2458 __WALL for systems where that is not available. */
2459
2460 static int
2461 my_waitpid (int pid, int *status, int flags)
2462 {
2463 int ret, out_errno;
2464
2465 if (debug_threads)
2466 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
2467
2468 if (flags & __WALL)
2469 {
2470 sigset_t block_mask, org_mask, wake_mask;
2471 int wnohang;
2472
2473 wnohang = (flags & WNOHANG) != 0;
2474 flags &= ~(__WALL | __WCLONE);
2475 flags |= WNOHANG;
2476
2477 /* Block all signals while here. This avoids knowing about
2478 LinuxThread's signals. */
2479 sigfillset (&block_mask);
2480 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
2481
2482 /* ... except during the sigsuspend below. */
2483 sigemptyset (&wake_mask);
2484
2485 while (1)
2486 {
2487 /* Since all signals are blocked, there's no need to check
2488 for EINTR here. */
2489 ret = waitpid (pid, status, flags);
2490 out_errno = errno;
2491
2492 if (ret == -1 && out_errno != ECHILD)
2493 break;
2494 else if (ret > 0)
2495 break;
2496
2497 if (flags & __WCLONE)
2498 {
2499 /* We've tried both flavors now. If WNOHANG is set,
2500 there's nothing else to do, just bail out. */
2501 if (wnohang)
2502 break;
2503
2504 if (debug_threads)
2505 fprintf (stderr, "blocking\n");
2506
2507 /* Block waiting for signals. */
2508 sigsuspend (&wake_mask);
2509 }
2510
2511 flags ^= __WCLONE;
2512 }
2513
2514 sigprocmask (SIG_SETMASK, &org_mask, NULL);
2515 }
2516 else
2517 {
2518 do
2519 ret = waitpid (pid, status, flags);
2520 while (ret == -1 && errno == EINTR);
2521 out_errno = errno;
2522 }
2523
2524 if (debug_threads)
2525 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
2526 pid, flags, status ? *status : -1, ret);
2527
2528 errno = out_errno;
2529 return ret;
2530 }
2531
2532 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
2533 sure that we can enable the option, and that it had the desired
2534 effect. */
2535
2536 static void
2537 linux_test_for_tracefork (void)
2538 {
2539 int child_pid, ret, status;
2540 long second_pid;
2541 char *stack = xmalloc (STACK_SIZE * 4);
2542
2543 linux_supports_tracefork_flag = 0;
2544
2545 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
2546 #ifdef __ia64__
2547 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
2548 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2549 #else
2550 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
2551 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2552 #endif
2553 if (child_pid == -1)
2554 perror_with_name ("clone");
2555
2556 ret = my_waitpid (child_pid, &status, 0);
2557 if (ret == -1)
2558 perror_with_name ("waitpid");
2559 else if (ret != child_pid)
2560 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
2561 if (! WIFSTOPPED (status))
2562 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
2563
2564 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
2565 if (ret != 0)
2566 {
2567 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2568 if (ret != 0)
2569 {
2570 warning ("linux_test_for_tracefork: failed to kill child");
2571 return;
2572 }
2573
2574 ret = my_waitpid (child_pid, &status, 0);
2575 if (ret != child_pid)
2576 warning ("linux_test_for_tracefork: failed to wait for killed child");
2577 else if (!WIFSIGNALED (status))
2578 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
2579 "killed child", status);
2580
2581 return;
2582 }
2583
2584 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
2585 if (ret != 0)
2586 warning ("linux_test_for_tracefork: failed to resume child");
2587
2588 ret = my_waitpid (child_pid, &status, 0);
2589
2590 if (ret == child_pid && WIFSTOPPED (status)
2591 && status >> 16 == PTRACE_EVENT_FORK)
2592 {
2593 second_pid = 0;
2594 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
2595 if (ret == 0 && second_pid != 0)
2596 {
2597 int second_status;
2598
2599 linux_supports_tracefork_flag = 1;
2600 my_waitpid (second_pid, &second_status, 0);
2601 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
2602 if (ret != 0)
2603 warning ("linux_test_for_tracefork: failed to kill second child");
2604 my_waitpid (second_pid, &status, 0);
2605 }
2606 }
2607 else
2608 warning ("linux_test_for_tracefork: unexpected result from waitpid "
2609 "(%d, status 0x%x)", ret, status);
2610
2611 do
2612 {
2613 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2614 if (ret != 0)
2615 warning ("linux_test_for_tracefork: failed to kill child");
2616 my_waitpid (child_pid, &status, 0);
2617 }
2618 while (WIFSTOPPED (status));
2619
2620 free (stack);
2621 }
2622
2623
2624 static void
2625 linux_look_up_symbols (void)
2626 {
2627 #ifdef USE_THREAD_DB
2628 struct process_info *proc = current_process ();
2629
2630 if (proc->private->thread_db_active)
2631 return;
2632
2633 proc->private->thread_db_active
2634 = thread_db_init (!linux_supports_tracefork_flag);
2635 #endif
2636 }
2637
2638 static void
2639 linux_request_interrupt (void)
2640 {
2641 extern unsigned long signal_pid;
2642
2643 if (!ptid_equal (cont_thread, null_ptid)
2644 && !ptid_equal (cont_thread, minus_one_ptid))
2645 {
2646 struct lwp_info *lwp;
2647 int lwpid;
2648
2649 lwp = get_thread_lwp (current_inferior);
2650 lwpid = lwpid_of (lwp);
2651 kill_lwp (lwpid, SIGINT);
2652 }
2653 else
2654 kill_lwp (signal_pid, SIGINT);
2655 }
2656
2657 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
2658 to debugger memory starting at MYADDR. */
2659
2660 static int
2661 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
2662 {
2663 char filename[PATH_MAX];
2664 int fd, n;
2665 int pid = lwpid_of (get_thread_lwp (current_inferior));
2666
2667 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
2668
2669 fd = open (filename, O_RDONLY);
2670 if (fd < 0)
2671 return -1;
2672
2673 if (offset != (CORE_ADDR) 0
2674 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
2675 n = -1;
2676 else
2677 n = read (fd, myaddr, len);
2678
2679 close (fd);
2680
2681 return n;
2682 }
2683
2684 /* These breakpoint and watchpoint related wrapper functions simply
2685 pass on the function call if the target has registered a
2686 corresponding function. */
2687
2688 static int
2689 linux_insert_point (char type, CORE_ADDR addr, int len)
2690 {
2691 if (the_low_target.insert_point != NULL)
2692 return the_low_target.insert_point (type, addr, len);
2693 else
2694 /* Unsupported (see target.h). */
2695 return 1;
2696 }
2697
2698 static int
2699 linux_remove_point (char type, CORE_ADDR addr, int len)
2700 {
2701 if (the_low_target.remove_point != NULL)
2702 return the_low_target.remove_point (type, addr, len);
2703 else
2704 /* Unsupported (see target.h). */
2705 return 1;
2706 }
2707
2708 static int
2709 linux_stopped_by_watchpoint (void)
2710 {
2711 if (the_low_target.stopped_by_watchpoint != NULL)
2712 return the_low_target.stopped_by_watchpoint ();
2713 else
2714 return 0;
2715 }
2716
2717 static CORE_ADDR
2718 linux_stopped_data_address (void)
2719 {
2720 if (the_low_target.stopped_data_address != NULL)
2721 return the_low_target.stopped_data_address ();
2722 else
2723 return 0;
2724 }
2725
2726 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2727 #if defined(__mcoldfire__)
2728 /* These should really be defined in the kernel's ptrace.h header. */
2729 #define PT_TEXT_ADDR 49*4
2730 #define PT_DATA_ADDR 50*4
2731 #define PT_TEXT_END_ADDR 51*4
2732 #endif
2733
2734 /* Under uClinux, programs are loaded at non-zero offsets, which we need
2735 to tell gdb about. */
2736
2737 static int
2738 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
2739 {
2740 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
2741 unsigned long text, text_end, data;
2742 int pid = lwpid_of (get_thread_lwp (current_inferior));
2743
2744 errno = 0;
2745
2746 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
2747 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
2748 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
2749
2750 if (errno == 0)
2751 {
2752 /* Both text and data offsets produced at compile-time (and so
2753 used by gdb) are relative to the beginning of the program,
2754 with the data segment immediately following the text segment.
2755 However, the actual runtime layout in memory may put the data
2756 somewhere else, so when we send gdb a data base-address, we
2757 use the real data base address and subtract the compile-time
2758 data base-address from it (which is just the length of the
2759 text segment). BSS immediately follows data in both
2760 cases. */
2761 *text_p = text;
2762 *data_p = data - (text_end - text);
2763
2764 return 1;
2765 }
2766 #endif
2767 return 0;
2768 }
2769 #endif
2770
2771 static int
2772 linux_qxfer_osdata (const char *annex,
2773 unsigned char *readbuf, unsigned const char *writebuf,
2774 CORE_ADDR offset, int len)
2775 {
2776 /* We make the process list snapshot when the object starts to be
2777 read. */
2778 static const char *buf;
2779 static long len_avail = -1;
2780 static struct buffer buffer;
2781
2782 DIR *dirp;
2783
2784 if (strcmp (annex, "processes") != 0)
2785 return 0;
2786
2787 if (!readbuf || writebuf)
2788 return 0;
2789
2790 if (offset == 0)
2791 {
2792 if (len_avail != -1 && len_avail != 0)
2793 buffer_free (&buffer);
2794 len_avail = 0;
2795 buf = NULL;
2796 buffer_init (&buffer);
2797 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
2798
2799 dirp = opendir ("/proc");
2800 if (dirp)
2801 {
2802 struct dirent *dp;
2803 while ((dp = readdir (dirp)) != NULL)
2804 {
2805 struct stat statbuf;
2806 char procentry[sizeof ("/proc/4294967295")];
2807
2808 if (!isdigit (dp->d_name[0])
2809 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
2810 continue;
2811
2812 sprintf (procentry, "/proc/%s", dp->d_name);
2813 if (stat (procentry, &statbuf) == 0
2814 && S_ISDIR (statbuf.st_mode))
2815 {
2816 char pathname[128];
2817 FILE *f;
2818 char cmd[MAXPATHLEN + 1];
2819 struct passwd *entry;
2820
2821 sprintf (pathname, "/proc/%s/cmdline", dp->d_name);
2822 entry = getpwuid (statbuf.st_uid);
2823
2824 if ((f = fopen (pathname, "r")) != NULL)
2825 {
2826 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
2827 if (len > 0)
2828 {
2829 int i;
2830 for (i = 0; i < len; i++)
2831 if (cmd[i] == '\0')
2832 cmd[i] = ' ';
2833 cmd[len] = '\0';
2834
2835 buffer_xml_printf (
2836 &buffer,
2837 "<item>"
2838 "<column name=\"pid\">%s</column>"
2839 "<column name=\"user\">%s</column>"
2840 "<column name=\"command\">%s</column>"
2841 "</item>",
2842 dp->d_name,
2843 entry ? entry->pw_name : "?",
2844 cmd);
2845 }
2846 fclose (f);
2847 }
2848 }
2849 }
2850
2851 closedir (dirp);
2852 }
2853 buffer_grow_str0 (&buffer, "</osdata>\n");
2854 buf = buffer_finish (&buffer);
2855 len_avail = strlen (buf);
2856 }
2857
2858 if (offset >= len_avail)
2859 {
2860 /* Done. Get rid of the data. */
2861 buffer_free (&buffer);
2862 buf = NULL;
2863 len_avail = 0;
2864 return 0;
2865 }
2866
2867 if (len > len_avail - offset)
2868 len = len_avail - offset;
2869 memcpy (readbuf, buf + offset, len);
2870
2871 return len;
2872 }
2873
2874 /* Convert a native/host siginfo object, into/from the siginfo in the
2875 layout of the inferiors' architecture. */
2876
2877 static void
2878 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
2879 {
2880 int done = 0;
2881
2882 if (the_low_target.siginfo_fixup != NULL)
2883 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
2884
2885 /* If there was no callback, or the callback didn't do anything,
2886 then just do a straight memcpy. */
2887 if (!done)
2888 {
2889 if (direction == 1)
2890 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
2891 else
2892 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
2893 }
2894 }
2895
2896 static int
2897 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
2898 unsigned const char *writebuf, CORE_ADDR offset, int len)
2899 {
2900 int pid;
2901 struct siginfo siginfo;
2902 char inf_siginfo[sizeof (struct siginfo)];
2903
2904 if (current_inferior == NULL)
2905 return -1;
2906
2907 pid = lwpid_of (get_thread_lwp (current_inferior));
2908
2909 if (debug_threads)
2910 fprintf (stderr, "%s siginfo for lwp %d.\n",
2911 readbuf != NULL ? "Reading" : "Writing",
2912 pid);
2913
2914 if (offset > sizeof (siginfo))
2915 return -1;
2916
2917 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
2918 return -1;
2919
2920 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
2921 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
2922 inferior with a 64-bit GDBSERVER should look the same as debugging it
2923 with a 32-bit GDBSERVER, we need to convert it. */
2924 siginfo_fixup (&siginfo, inf_siginfo, 0);
2925
2926 if (offset + len > sizeof (siginfo))
2927 len = sizeof (siginfo) - offset;
2928
2929 if (readbuf != NULL)
2930 memcpy (readbuf, inf_siginfo + offset, len);
2931 else
2932 {
2933 memcpy (inf_siginfo + offset, writebuf, len);
2934
2935 /* Convert back to ptrace layout before flushing it out. */
2936 siginfo_fixup (&siginfo, inf_siginfo, 1);
2937
2938 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
2939 return -1;
2940 }
2941
2942 return len;
2943 }
2944
2945 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
2946 so we notice when children change state; as the handler for the
2947 sigsuspend in my_waitpid. */
2948
2949 static void
2950 sigchld_handler (int signo)
2951 {
2952 int old_errno = errno;
2953
2954 if (debug_threads)
2955 /* fprintf is not async-signal-safe, so call write directly. */
2956 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
2957
2958 if (target_is_async_p ())
2959 async_file_mark (); /* trigger a linux_wait */
2960
2961 errno = old_errno;
2962 }
2963
2964 static int
2965 linux_supports_non_stop (void)
2966 {
2967 return 1;
2968 }
2969
2970 static int
2971 linux_async (int enable)
2972 {
2973 int previous = (linux_event_pipe[0] != -1);
2974
2975 if (previous != enable)
2976 {
2977 sigset_t mask;
2978 sigemptyset (&mask);
2979 sigaddset (&mask, SIGCHLD);
2980
2981 sigprocmask (SIG_BLOCK, &mask, NULL);
2982
2983 if (enable)
2984 {
2985 if (pipe (linux_event_pipe) == -1)
2986 fatal ("creating event pipe failed.");
2987
2988 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
2989 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
2990
2991 /* Register the event loop handler. */
2992 add_file_handler (linux_event_pipe[0],
2993 handle_target_event, NULL);
2994
2995 /* Always trigger a linux_wait. */
2996 async_file_mark ();
2997 }
2998 else
2999 {
3000 delete_file_handler (linux_event_pipe[0]);
3001
3002 close (linux_event_pipe[0]);
3003 close (linux_event_pipe[1]);
3004 linux_event_pipe[0] = -1;
3005 linux_event_pipe[1] = -1;
3006 }
3007
3008 sigprocmask (SIG_UNBLOCK, &mask, NULL);
3009 }
3010
3011 return previous;
3012 }
3013
3014 static int
3015 linux_start_non_stop (int nonstop)
3016 {
3017 /* Register or unregister from event-loop accordingly. */
3018 linux_async (nonstop);
3019 return 0;
3020 }
3021
3022 static int
3023 linux_supports_multi_process (void)
3024 {
3025 return 1;
3026 }
3027
3028 static struct target_ops linux_target_ops = {
3029 linux_create_inferior,
3030 linux_attach,
3031 linux_kill,
3032 linux_detach,
3033 linux_join,
3034 linux_thread_alive,
3035 linux_resume,
3036 linux_wait,
3037 linux_fetch_registers,
3038 linux_store_registers,
3039 linux_read_memory,
3040 linux_write_memory,
3041 linux_look_up_symbols,
3042 linux_request_interrupt,
3043 linux_read_auxv,
3044 linux_insert_point,
3045 linux_remove_point,
3046 linux_stopped_by_watchpoint,
3047 linux_stopped_data_address,
3048 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3049 linux_read_offsets,
3050 #else
3051 NULL,
3052 #endif
3053 #ifdef USE_THREAD_DB
3054 thread_db_get_tls_address,
3055 #else
3056 NULL,
3057 #endif
3058 NULL,
3059 hostio_last_error_from_errno,
3060 linux_qxfer_osdata,
3061 linux_xfer_siginfo,
3062 linux_supports_non_stop,
3063 linux_async,
3064 linux_start_non_stop,
3065 linux_supports_multi_process
3066 };
3067
3068 static void
3069 linux_init_signals ()
3070 {
3071 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
3072 to find what the cancel signal actually is. */
3073 signal (__SIGRTMIN+1, SIG_IGN);
3074 }
3075
3076 void
3077 initialize_low (void)
3078 {
3079 struct sigaction sigchld_action;
3080 memset (&sigchld_action, 0, sizeof (sigchld_action));
3081 set_target_ops (&linux_target_ops);
3082 set_breakpoint_data (the_low_target.breakpoint,
3083 the_low_target.breakpoint_len);
3084 linux_init_signals ();
3085 linux_test_for_tracefork ();
3086 #ifdef HAVE_LINUX_REGSETS
3087 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
3088 ;
3089 disabled_regsets = xmalloc (num_regsets);
3090 #endif
3091
3092 sigchld_action.sa_handler = sigchld_handler;
3093 sigemptyset (&sigchld_action.sa_mask);
3094 sigchld_action.sa_flags = SA_RESTART;
3095 sigaction (SIGCHLD, &sigchld_action, NULL);
3096 }