]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
remote follow fork and spurious child stops in non-stop mode
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #ifndef ELFMAG0
50 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54 #include <elf.h>
55 #endif
56 #include "nat/linux-namespaces.h"
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 #ifndef W_STOPCODE
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75 #endif
76
77 /* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79 #ifndef __SIGRTMIN
80 #define __SIGRTMIN 32
81 #endif
82
83 /* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
95 #elif defined(BFIN)
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
104 #endif
105 #endif
106
107 #ifdef HAVE_LINUX_BTRACE
108 # include "nat/linux-btrace.h"
109 # include "btrace-common.h"
110 #endif
111
112 #ifndef HAVE_ELF32_AUXV_T
113 /* Copied from glibc's elf.h. */
114 typedef struct
115 {
116 uint32_t a_type; /* Entry type */
117 union
118 {
119 uint32_t a_val; /* Integer value */
120 /* We use to have pointer elements added here. We cannot do that,
121 though, since it does not work when using 32-bit definitions
122 on 64-bit platforms and vice versa. */
123 } a_un;
124 } Elf32_auxv_t;
125 #endif
126
127 #ifndef HAVE_ELF64_AUXV_T
128 /* Copied from glibc's elf.h. */
129 typedef struct
130 {
131 uint64_t a_type; /* Entry type */
132 union
133 {
134 uint64_t a_val; /* Integer value */
135 /* We use to have pointer elements added here. We cannot do that,
136 though, since it does not work when using 32-bit definitions
137 on 64-bit platforms and vice versa. */
138 } a_un;
139 } Elf64_auxv_t;
140 #endif
141
142 /* LWP accessors. */
143
144 /* See nat/linux-nat.h. */
145
146 ptid_t
147 ptid_of_lwp (struct lwp_info *lwp)
148 {
149 return ptid_of (get_lwp_thread (lwp));
150 }
151
152 /* See nat/linux-nat.h. */
153
154 void
155 lwp_set_arch_private_info (struct lwp_info *lwp,
156 struct arch_lwp_info *info)
157 {
158 lwp->arch_private = info;
159 }
160
161 /* See nat/linux-nat.h. */
162
163 struct arch_lwp_info *
164 lwp_arch_private_info (struct lwp_info *lwp)
165 {
166 return lwp->arch_private;
167 }
168
169 /* See nat/linux-nat.h. */
170
171 int
172 lwp_is_stopped (struct lwp_info *lwp)
173 {
174 return lwp->stopped;
175 }
176
177 /* See nat/linux-nat.h. */
178
179 enum target_stop_reason
180 lwp_stop_reason (struct lwp_info *lwp)
181 {
182 return lwp->stop_reason;
183 }
184
185 /* A list of all unknown processes which receive stop signals. Some
186 other process will presumably claim each of these as forked
187 children momentarily. */
188
189 struct simple_pid_list
190 {
191 /* The process ID. */
192 int pid;
193
194 /* The status as reported by waitpid. */
195 int status;
196
197 /* Next in chain. */
198 struct simple_pid_list *next;
199 };
200 struct simple_pid_list *stopped_pids;
201
202 /* Trivial list manipulation functions to keep track of a list of new
203 stopped processes. */
204
205 static void
206 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
207 {
208 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
209
210 new_pid->pid = pid;
211 new_pid->status = status;
212 new_pid->next = *listp;
213 *listp = new_pid;
214 }
215
216 static int
217 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
218 {
219 struct simple_pid_list **p;
220
221 for (p = listp; *p != NULL; p = &(*p)->next)
222 if ((*p)->pid == pid)
223 {
224 struct simple_pid_list *next = (*p)->next;
225
226 *statusp = (*p)->status;
227 xfree (*p);
228 *p = next;
229 return 1;
230 }
231 return 0;
232 }
233
234 enum stopping_threads_kind
235 {
236 /* Not stopping threads presently. */
237 NOT_STOPPING_THREADS,
238
239 /* Stopping threads. */
240 STOPPING_THREADS,
241
242 /* Stopping and suspending threads. */
243 STOPPING_AND_SUSPENDING_THREADS
244 };
245
246 /* This is set while stop_all_lwps is in effect. */
247 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
248
249 /* FIXME make into a target method? */
250 int using_threads = 1;
251
252 /* True if we're presently stabilizing threads (moving them out of
253 jump pads). */
254 static int stabilizing_threads;
255
256 static void linux_resume_one_lwp (struct lwp_info *lwp,
257 int step, int signal, siginfo_t *info);
258 static void linux_resume (struct thread_resume *resume_info, size_t n);
259 static void stop_all_lwps (int suspend, struct lwp_info *except);
260 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
261 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
262 int *wstat, int options);
263 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
264 static struct lwp_info *add_lwp (ptid_t ptid);
265 static int linux_stopped_by_watchpoint (void);
266 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
267 static void proceed_all_lwps (void);
268 static int finish_step_over (struct lwp_info *lwp);
269 static int kill_lwp (unsigned long lwpid, int signo);
270
271 /* When the event-loop is doing a step-over, this points at the thread
272 being stepped. */
273 ptid_t step_over_bkpt;
274
275 /* True if the low target can hardware single-step. Such targets
276 don't need a BREAKPOINT_REINSERT_ADDR callback. */
277
278 static int
279 can_hardware_single_step (void)
280 {
281 return (the_low_target.breakpoint_reinsert_addr == NULL);
282 }
283
284 /* True if the low target supports memory breakpoints. If so, we'll
285 have a GET_PC implementation. */
286
287 static int
288 supports_breakpoints (void)
289 {
290 return (the_low_target.get_pc != NULL);
291 }
292
293 /* Returns true if this target can support fast tracepoints. This
294 does not mean that the in-process agent has been loaded in the
295 inferior. */
296
297 static int
298 supports_fast_tracepoints (void)
299 {
300 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
301 }
302
303 /* True if LWP is stopped in its stepping range. */
304
305 static int
306 lwp_in_step_range (struct lwp_info *lwp)
307 {
308 CORE_ADDR pc = lwp->stop_pc;
309
310 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
311 }
312
313 struct pending_signals
314 {
315 int signal;
316 siginfo_t info;
317 struct pending_signals *prev;
318 };
319
320 /* The read/write ends of the pipe registered as waitable file in the
321 event loop. */
322 static int linux_event_pipe[2] = { -1, -1 };
323
324 /* True if we're currently in async mode. */
325 #define target_is_async_p() (linux_event_pipe[0] != -1)
326
327 static void send_sigstop (struct lwp_info *lwp);
328 static void wait_for_sigstop (void);
329
330 /* Return non-zero if HEADER is a 64-bit ELF file. */
331
332 static int
333 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
334 {
335 if (header->e_ident[EI_MAG0] == ELFMAG0
336 && header->e_ident[EI_MAG1] == ELFMAG1
337 && header->e_ident[EI_MAG2] == ELFMAG2
338 && header->e_ident[EI_MAG3] == ELFMAG3)
339 {
340 *machine = header->e_machine;
341 return header->e_ident[EI_CLASS] == ELFCLASS64;
342
343 }
344 *machine = EM_NONE;
345 return -1;
346 }
347
348 /* Return non-zero if FILE is a 64-bit ELF file,
349 zero if the file is not a 64-bit ELF file,
350 and -1 if the file is not accessible or doesn't exist. */
351
352 static int
353 elf_64_file_p (const char *file, unsigned int *machine)
354 {
355 Elf64_Ehdr header;
356 int fd;
357
358 fd = open (file, O_RDONLY);
359 if (fd < 0)
360 return -1;
361
362 if (read (fd, &header, sizeof (header)) != sizeof (header))
363 {
364 close (fd);
365 return 0;
366 }
367 close (fd);
368
369 return elf_64_header_p (&header, machine);
370 }
371
372 /* Accepts an integer PID; Returns true if the executable PID is
373 running is a 64-bit ELF file.. */
374
375 int
376 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
377 {
378 char file[PATH_MAX];
379
380 sprintf (file, "/proc/%d/exe", pid);
381 return elf_64_file_p (file, machine);
382 }
383
384 static void
385 delete_lwp (struct lwp_info *lwp)
386 {
387 struct thread_info *thr = get_lwp_thread (lwp);
388
389 if (debug_threads)
390 debug_printf ("deleting %ld\n", lwpid_of (thr));
391
392 remove_thread (thr);
393 free (lwp->arch_private);
394 free (lwp);
395 }
396
397 /* Add a process to the common process list, and set its private
398 data. */
399
400 static struct process_info *
401 linux_add_process (int pid, int attached)
402 {
403 struct process_info *proc;
404
405 proc = add_process (pid, attached);
406 proc->priv = xcalloc (1, sizeof (*proc->priv));
407
408 if (the_low_target.new_process != NULL)
409 proc->priv->arch_private = the_low_target.new_process ();
410
411 return proc;
412 }
413
414 static CORE_ADDR get_pc (struct lwp_info *lwp);
415
416 /* Handle a GNU/Linux extended wait response. If we see a clone
417 event, we need to add the new LWP to our list (and return 0 so as
418 not to report the trap to higher layers). */
419
420 static int
421 handle_extended_wait (struct lwp_info *event_lwp, int wstat)
422 {
423 int event = linux_ptrace_get_extended_event (wstat);
424 struct thread_info *event_thr = get_lwp_thread (event_lwp);
425 struct lwp_info *new_lwp;
426
427 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
428 || (event == PTRACE_EVENT_CLONE))
429 {
430 ptid_t ptid;
431 unsigned long new_pid;
432 int ret, status;
433
434 /* Get the pid of the new lwp. */
435 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
436 &new_pid);
437
438 /* If we haven't already seen the new PID stop, wait for it now. */
439 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
440 {
441 /* The new child has a pending SIGSTOP. We can't affect it until it
442 hits the SIGSTOP, but we're already attached. */
443
444 ret = my_waitpid (new_pid, &status, __WALL);
445
446 if (ret == -1)
447 perror_with_name ("waiting for new child");
448 else if (ret != new_pid)
449 warning ("wait returned unexpected PID %d", ret);
450 else if (!WIFSTOPPED (status))
451 warning ("wait returned unexpected status 0x%x", status);
452 }
453
454 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
455 {
456 struct process_info *parent_proc;
457 struct process_info *child_proc;
458 struct lwp_info *child_lwp;
459 struct thread_info *child_thr;
460 struct target_desc *tdesc;
461
462 ptid = ptid_build (new_pid, new_pid, 0);
463
464 if (debug_threads)
465 {
466 debug_printf ("HEW: Got fork event from LWP %ld, "
467 "new child is %d\n",
468 ptid_get_lwp (ptid_of (event_thr)),
469 ptid_get_pid (ptid));
470 }
471
472 /* Add the new process to the tables and clone the breakpoint
473 lists of the parent. We need to do this even if the new process
474 will be detached, since we will need the process object and the
475 breakpoints to remove any breakpoints from memory when we
476 detach, and the client side will access registers. */
477 child_proc = linux_add_process (new_pid, 0);
478 gdb_assert (child_proc != NULL);
479 child_lwp = add_lwp (ptid);
480 gdb_assert (child_lwp != NULL);
481 child_lwp->stopped = 1;
482 child_lwp->must_set_ptrace_flags = 1;
483 child_lwp->status_pending_p = 0;
484 child_thr = get_lwp_thread (child_lwp);
485 child_thr->last_resume_kind = resume_stop;
486 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
487
488 parent_proc = get_thread_process (event_thr);
489 child_proc->attached = parent_proc->attached;
490 clone_all_breakpoints (&child_proc->breakpoints,
491 &child_proc->raw_breakpoints,
492 parent_proc->breakpoints);
493
494 tdesc = xmalloc (sizeof (struct target_desc));
495 copy_target_description (tdesc, parent_proc->tdesc);
496 child_proc->tdesc = tdesc;
497
498 /* Clone arch-specific process data. */
499 if (the_low_target.new_fork != NULL)
500 the_low_target.new_fork (parent_proc, child_proc);
501
502 /* Save fork info in the parent thread. */
503 if (event == PTRACE_EVENT_FORK)
504 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
505 else if (event == PTRACE_EVENT_VFORK)
506 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
507
508 event_lwp->waitstatus.value.related_pid = ptid;
509
510 /* The status_pending field contains bits denoting the
511 extended event, so when the pending event is handled,
512 the handler will look at lwp->waitstatus. */
513 event_lwp->status_pending_p = 1;
514 event_lwp->status_pending = wstat;
515
516 /* Report the event. */
517 return 0;
518 }
519
520 if (debug_threads)
521 debug_printf ("HEW: Got clone event "
522 "from LWP %ld, new child is LWP %ld\n",
523 lwpid_of (event_thr), new_pid);
524
525 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
526 new_lwp = add_lwp (ptid);
527
528 /* Either we're going to immediately resume the new thread
529 or leave it stopped. linux_resume_one_lwp is a nop if it
530 thinks the thread is currently running, so set this first
531 before calling linux_resume_one_lwp. */
532 new_lwp->stopped = 1;
533
534 /* If we're suspending all threads, leave this one suspended
535 too. */
536 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
537 new_lwp->suspended = 1;
538
539 /* Normally we will get the pending SIGSTOP. But in some cases
540 we might get another signal delivered to the group first.
541 If we do get another signal, be sure not to lose it. */
542 if (WSTOPSIG (status) != SIGSTOP)
543 {
544 new_lwp->stop_expected = 1;
545 new_lwp->status_pending_p = 1;
546 new_lwp->status_pending = status;
547 }
548
549 /* Don't report the event. */
550 return 1;
551 }
552 else if (event == PTRACE_EVENT_VFORK_DONE)
553 {
554 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
555
556 /* Report the event. */
557 return 0;
558 }
559
560 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
561 }
562
563 /* Return the PC as read from the regcache of LWP, without any
564 adjustment. */
565
566 static CORE_ADDR
567 get_pc (struct lwp_info *lwp)
568 {
569 struct thread_info *saved_thread;
570 struct regcache *regcache;
571 CORE_ADDR pc;
572
573 if (the_low_target.get_pc == NULL)
574 return 0;
575
576 saved_thread = current_thread;
577 current_thread = get_lwp_thread (lwp);
578
579 regcache = get_thread_regcache (current_thread, 1);
580 pc = (*the_low_target.get_pc) (regcache);
581
582 if (debug_threads)
583 debug_printf ("pc is 0x%lx\n", (long) pc);
584
585 current_thread = saved_thread;
586 return pc;
587 }
588
589 /* This function should only be called if LWP got a SIGTRAP.
590 The SIGTRAP could mean several things.
591
592 On i386, where decr_pc_after_break is non-zero:
593
594 If we were single-stepping this process using PTRACE_SINGLESTEP, we
595 will get only the one SIGTRAP. The value of $eip will be the next
596 instruction. If the instruction we stepped over was a breakpoint,
597 we need to decrement the PC.
598
599 If we continue the process using PTRACE_CONT, we will get a
600 SIGTRAP when we hit a breakpoint. The value of $eip will be
601 the instruction after the breakpoint (i.e. needs to be
602 decremented). If we report the SIGTRAP to GDB, we must also
603 report the undecremented PC. If the breakpoint is removed, we
604 must resume at the decremented PC.
605
606 On a non-decr_pc_after_break machine with hardware or kernel
607 single-step:
608
609 If we either single-step a breakpoint instruction, or continue and
610 hit a breakpoint instruction, our PC will point at the breakpoint
611 instruction. */
612
613 static int
614 check_stopped_by_breakpoint (struct lwp_info *lwp)
615 {
616 CORE_ADDR pc;
617 CORE_ADDR sw_breakpoint_pc;
618 struct thread_info *saved_thread;
619 #if USE_SIGTRAP_SIGINFO
620 siginfo_t siginfo;
621 #endif
622
623 if (the_low_target.get_pc == NULL)
624 return 0;
625
626 pc = get_pc (lwp);
627 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
628
629 /* breakpoint_at reads from the current thread. */
630 saved_thread = current_thread;
631 current_thread = get_lwp_thread (lwp);
632
633 #if USE_SIGTRAP_SIGINFO
634 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
635 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
636 {
637 if (siginfo.si_signo == SIGTRAP)
638 {
639 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
640 {
641 if (debug_threads)
642 {
643 struct thread_info *thr = get_lwp_thread (lwp);
644
645 debug_printf ("CSBB: %s stopped by software breakpoint\n",
646 target_pid_to_str (ptid_of (thr)));
647 }
648
649 /* Back up the PC if necessary. */
650 if (pc != sw_breakpoint_pc)
651 {
652 struct regcache *regcache
653 = get_thread_regcache (current_thread, 1);
654 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
655 }
656
657 lwp->stop_pc = sw_breakpoint_pc;
658 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
659 current_thread = saved_thread;
660 return 1;
661 }
662 else if (siginfo.si_code == TRAP_HWBKPT)
663 {
664 if (debug_threads)
665 {
666 struct thread_info *thr = get_lwp_thread (lwp);
667
668 debug_printf ("CSBB: %s stopped by hardware "
669 "breakpoint/watchpoint\n",
670 target_pid_to_str (ptid_of (thr)));
671 }
672
673 lwp->stop_pc = pc;
674 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
675 current_thread = saved_thread;
676 return 1;
677 }
678 else if (siginfo.si_code == TRAP_TRACE)
679 {
680 if (debug_threads)
681 {
682 struct thread_info *thr = get_lwp_thread (lwp);
683
684 debug_printf ("CSBB: %s stopped by trace\n",
685 target_pid_to_str (ptid_of (thr)));
686 }
687 }
688 }
689 }
690 #else
691 /* We may have just stepped a breakpoint instruction. E.g., in
692 non-stop mode, GDB first tells the thread A to step a range, and
693 then the user inserts a breakpoint inside the range. In that
694 case we need to report the breakpoint PC. */
695 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
696 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
697 {
698 if (debug_threads)
699 {
700 struct thread_info *thr = get_lwp_thread (lwp);
701
702 debug_printf ("CSBB: %s stopped by software breakpoint\n",
703 target_pid_to_str (ptid_of (thr)));
704 }
705
706 /* Back up the PC if necessary. */
707 if (pc != sw_breakpoint_pc)
708 {
709 struct regcache *regcache
710 = get_thread_regcache (current_thread, 1);
711 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
712 }
713
714 lwp->stop_pc = sw_breakpoint_pc;
715 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
716 current_thread = saved_thread;
717 return 1;
718 }
719
720 if (hardware_breakpoint_inserted_here (pc))
721 {
722 if (debug_threads)
723 {
724 struct thread_info *thr = get_lwp_thread (lwp);
725
726 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
727 target_pid_to_str (ptid_of (thr)));
728 }
729
730 lwp->stop_pc = pc;
731 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
732 current_thread = saved_thread;
733 return 1;
734 }
735 #endif
736
737 current_thread = saved_thread;
738 return 0;
739 }
740
741 static struct lwp_info *
742 add_lwp (ptid_t ptid)
743 {
744 struct lwp_info *lwp;
745
746 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
747 memset (lwp, 0, sizeof (*lwp));
748
749 if (the_low_target.new_thread != NULL)
750 the_low_target.new_thread (lwp);
751
752 lwp->thread = add_thread (ptid, lwp);
753
754 return lwp;
755 }
756
757 /* Start an inferior process and returns its pid.
758 ALLARGS is a vector of program-name and args. */
759
760 static int
761 linux_create_inferior (char *program, char **allargs)
762 {
763 struct lwp_info *new_lwp;
764 int pid;
765 ptid_t ptid;
766 struct cleanup *restore_personality
767 = maybe_disable_address_space_randomization (disable_randomization);
768
769 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
770 pid = vfork ();
771 #else
772 pid = fork ();
773 #endif
774 if (pid < 0)
775 perror_with_name ("fork");
776
777 if (pid == 0)
778 {
779 close_most_fds ();
780 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
781
782 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
783 signal (__SIGRTMIN + 1, SIG_DFL);
784 #endif
785
786 setpgid (0, 0);
787
788 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
789 stdout to stderr so that inferior i/o doesn't corrupt the connection.
790 Also, redirect stdin to /dev/null. */
791 if (remote_connection_is_stdio ())
792 {
793 close (0);
794 open ("/dev/null", O_RDONLY);
795 dup2 (2, 1);
796 if (write (2, "stdin/stdout redirected\n",
797 sizeof ("stdin/stdout redirected\n") - 1) < 0)
798 {
799 /* Errors ignored. */;
800 }
801 }
802
803 execv (program, allargs);
804 if (errno == ENOENT)
805 execvp (program, allargs);
806
807 fprintf (stderr, "Cannot exec %s: %s.\n", program,
808 strerror (errno));
809 fflush (stderr);
810 _exit (0177);
811 }
812
813 do_cleanups (restore_personality);
814
815 linux_add_process (pid, 0);
816
817 ptid = ptid_build (pid, pid, 0);
818 new_lwp = add_lwp (ptid);
819 new_lwp->must_set_ptrace_flags = 1;
820
821 return pid;
822 }
823
824 /* Implement the arch_setup target_ops method. */
825
826 static void
827 linux_arch_setup (void)
828 {
829 the_low_target.arch_setup ();
830 }
831
832 /* Attach to an inferior process. Returns 0 on success, ERRNO on
833 error. */
834
835 int
836 linux_attach_lwp (ptid_t ptid)
837 {
838 struct lwp_info *new_lwp;
839 int lwpid = ptid_get_lwp (ptid);
840
841 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
842 != 0)
843 return errno;
844
845 new_lwp = add_lwp (ptid);
846
847 /* We need to wait for SIGSTOP before being able to make the next
848 ptrace call on this LWP. */
849 new_lwp->must_set_ptrace_flags = 1;
850
851 if (linux_proc_pid_is_stopped (lwpid))
852 {
853 if (debug_threads)
854 debug_printf ("Attached to a stopped process\n");
855
856 /* The process is definitely stopped. It is in a job control
857 stop, unless the kernel predates the TASK_STOPPED /
858 TASK_TRACED distinction, in which case it might be in a
859 ptrace stop. Make sure it is in a ptrace stop; from there we
860 can kill it, signal it, et cetera.
861
862 First make sure there is a pending SIGSTOP. Since we are
863 already attached, the process can not transition from stopped
864 to running without a PTRACE_CONT; so we know this signal will
865 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
866 probably already in the queue (unless this kernel is old
867 enough to use TASK_STOPPED for ptrace stops); but since
868 SIGSTOP is not an RT signal, it can only be queued once. */
869 kill_lwp (lwpid, SIGSTOP);
870
871 /* Finally, resume the stopped process. This will deliver the
872 SIGSTOP (or a higher priority signal, just like normal
873 PTRACE_ATTACH), which we'll catch later on. */
874 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
875 }
876
877 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
878 brings it to a halt.
879
880 There are several cases to consider here:
881
882 1) gdbserver has already attached to the process and is being notified
883 of a new thread that is being created.
884 In this case we should ignore that SIGSTOP and resume the
885 process. This is handled below by setting stop_expected = 1,
886 and the fact that add_thread sets last_resume_kind ==
887 resume_continue.
888
889 2) This is the first thread (the process thread), and we're attaching
890 to it via attach_inferior.
891 In this case we want the process thread to stop.
892 This is handled by having linux_attach set last_resume_kind ==
893 resume_stop after we return.
894
895 If the pid we are attaching to is also the tgid, we attach to and
896 stop all the existing threads. Otherwise, we attach to pid and
897 ignore any other threads in the same group as this pid.
898
899 3) GDB is connecting to gdbserver and is requesting an enumeration of all
900 existing threads.
901 In this case we want the thread to stop.
902 FIXME: This case is currently not properly handled.
903 We should wait for the SIGSTOP but don't. Things work apparently
904 because enough time passes between when we ptrace (ATTACH) and when
905 gdb makes the next ptrace call on the thread.
906
907 On the other hand, if we are currently trying to stop all threads, we
908 should treat the new thread as if we had sent it a SIGSTOP. This works
909 because we are guaranteed that the add_lwp call above added us to the
910 end of the list, and so the new thread has not yet reached
911 wait_for_sigstop (but will). */
912 new_lwp->stop_expected = 1;
913
914 return 0;
915 }
916
917 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
918 already attached. Returns true if a new LWP is found, false
919 otherwise. */
920
921 static int
922 attach_proc_task_lwp_callback (ptid_t ptid)
923 {
924 /* Is this a new thread? */
925 if (find_thread_ptid (ptid) == NULL)
926 {
927 int lwpid = ptid_get_lwp (ptid);
928 int err;
929
930 if (debug_threads)
931 debug_printf ("Found new lwp %d\n", lwpid);
932
933 err = linux_attach_lwp (ptid);
934
935 /* Be quiet if we simply raced with the thread exiting. EPERM
936 is returned if the thread's task still exists, and is marked
937 as exited or zombie, as well as other conditions, so in that
938 case, confirm the status in /proc/PID/status. */
939 if (err == ESRCH
940 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
941 {
942 if (debug_threads)
943 {
944 debug_printf ("Cannot attach to lwp %d: "
945 "thread is gone (%d: %s)\n",
946 lwpid, err, strerror (err));
947 }
948 }
949 else if (err != 0)
950 {
951 warning (_("Cannot attach to lwp %d: %s"),
952 lwpid,
953 linux_ptrace_attach_fail_reason_string (ptid, err));
954 }
955
956 return 1;
957 }
958 return 0;
959 }
960
961 /* Attach to PID. If PID is the tgid, attach to it and all
962 of its threads. */
963
964 static int
965 linux_attach (unsigned long pid)
966 {
967 ptid_t ptid = ptid_build (pid, pid, 0);
968 int err;
969
970 /* Attach to PID. We will check for other threads
971 soon. */
972 err = linux_attach_lwp (ptid);
973 if (err != 0)
974 error ("Cannot attach to process %ld: %s",
975 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
976
977 linux_add_process (pid, 1);
978
979 if (!non_stop)
980 {
981 struct thread_info *thread;
982
983 /* Don't ignore the initial SIGSTOP if we just attached to this
984 process. It will be collected by wait shortly. */
985 thread = find_thread_ptid (ptid_build (pid, pid, 0));
986 thread->last_resume_kind = resume_stop;
987 }
988
989 /* We must attach to every LWP. If /proc is mounted, use that to
990 find them now. On the one hand, the inferior may be using raw
991 clone instead of using pthreads. On the other hand, even if it
992 is using pthreads, GDB may not be connected yet (thread_db needs
993 to do symbol lookups, through qSymbol). Also, thread_db walks
994 structures in the inferior's address space to find the list of
995 threads/LWPs, and those structures may well be corrupted. Note
996 that once thread_db is loaded, we'll still use it to list threads
997 and associate pthread info with each LWP. */
998 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
999 return 0;
1000 }
1001
1002 struct counter
1003 {
1004 int pid;
1005 int count;
1006 };
1007
1008 static int
1009 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1010 {
1011 struct counter *counter = args;
1012
1013 if (ptid_get_pid (entry->id) == counter->pid)
1014 {
1015 if (++counter->count > 1)
1016 return 1;
1017 }
1018
1019 return 0;
1020 }
1021
1022 static int
1023 last_thread_of_process_p (int pid)
1024 {
1025 struct counter counter = { pid , 0 };
1026
1027 return (find_inferior (&all_threads,
1028 second_thread_of_pid_p, &counter) == NULL);
1029 }
1030
1031 /* Kill LWP. */
1032
1033 static void
1034 linux_kill_one_lwp (struct lwp_info *lwp)
1035 {
1036 struct thread_info *thr = get_lwp_thread (lwp);
1037 int pid = lwpid_of (thr);
1038
1039 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1040 there is no signal context, and ptrace(PTRACE_KILL) (or
1041 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1042 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1043 alternative is to kill with SIGKILL. We only need one SIGKILL
1044 per process, not one for each thread. But since we still support
1045 linuxthreads, and we also support debugging programs using raw
1046 clone without CLONE_THREAD, we send one for each thread. For
1047 years, we used PTRACE_KILL only, so we're being a bit paranoid
1048 about some old kernels where PTRACE_KILL might work better
1049 (dubious if there are any such, but that's why it's paranoia), so
1050 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1051 everywhere. */
1052
1053 errno = 0;
1054 kill_lwp (pid, SIGKILL);
1055 if (debug_threads)
1056 {
1057 int save_errno = errno;
1058
1059 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1060 target_pid_to_str (ptid_of (thr)),
1061 save_errno ? strerror (save_errno) : "OK");
1062 }
1063
1064 errno = 0;
1065 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1066 if (debug_threads)
1067 {
1068 int save_errno = errno;
1069
1070 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1071 target_pid_to_str (ptid_of (thr)),
1072 save_errno ? strerror (save_errno) : "OK");
1073 }
1074 }
1075
1076 /* Kill LWP and wait for it to die. */
1077
1078 static void
1079 kill_wait_lwp (struct lwp_info *lwp)
1080 {
1081 struct thread_info *thr = get_lwp_thread (lwp);
1082 int pid = ptid_get_pid (ptid_of (thr));
1083 int lwpid = ptid_get_lwp (ptid_of (thr));
1084 int wstat;
1085 int res;
1086
1087 if (debug_threads)
1088 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1089
1090 do
1091 {
1092 linux_kill_one_lwp (lwp);
1093
1094 /* Make sure it died. Notes:
1095
1096 - The loop is most likely unnecessary.
1097
1098 - We don't use linux_wait_for_event as that could delete lwps
1099 while we're iterating over them. We're not interested in
1100 any pending status at this point, only in making sure all
1101 wait status on the kernel side are collected until the
1102 process is reaped.
1103
1104 - We don't use __WALL here as the __WALL emulation relies on
1105 SIGCHLD, and killing a stopped process doesn't generate
1106 one, nor an exit status.
1107 */
1108 res = my_waitpid (lwpid, &wstat, 0);
1109 if (res == -1 && errno == ECHILD)
1110 res = my_waitpid (lwpid, &wstat, __WCLONE);
1111 } while (res > 0 && WIFSTOPPED (wstat));
1112
1113 /* Even if it was stopped, the child may have already disappeared.
1114 E.g., if it was killed by SIGKILL. */
1115 if (res < 0 && errno != ECHILD)
1116 perror_with_name ("kill_wait_lwp");
1117 }
1118
1119 /* Callback for `find_inferior'. Kills an lwp of a given process,
1120 except the leader. */
1121
1122 static int
1123 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1124 {
1125 struct thread_info *thread = (struct thread_info *) entry;
1126 struct lwp_info *lwp = get_thread_lwp (thread);
1127 int pid = * (int *) args;
1128
1129 if (ptid_get_pid (entry->id) != pid)
1130 return 0;
1131
1132 /* We avoid killing the first thread here, because of a Linux kernel (at
1133 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1134 the children get a chance to be reaped, it will remain a zombie
1135 forever. */
1136
1137 if (lwpid_of (thread) == pid)
1138 {
1139 if (debug_threads)
1140 debug_printf ("lkop: is last of process %s\n",
1141 target_pid_to_str (entry->id));
1142 return 0;
1143 }
1144
1145 kill_wait_lwp (lwp);
1146 return 0;
1147 }
1148
1149 static int
1150 linux_kill (int pid)
1151 {
1152 struct process_info *process;
1153 struct lwp_info *lwp;
1154
1155 process = find_process_pid (pid);
1156 if (process == NULL)
1157 return -1;
1158
1159 /* If we're killing a running inferior, make sure it is stopped
1160 first, as PTRACE_KILL will not work otherwise. */
1161 stop_all_lwps (0, NULL);
1162
1163 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1164
1165 /* See the comment in linux_kill_one_lwp. We did not kill the first
1166 thread in the list, so do so now. */
1167 lwp = find_lwp_pid (pid_to_ptid (pid));
1168
1169 if (lwp == NULL)
1170 {
1171 if (debug_threads)
1172 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1173 pid);
1174 }
1175 else
1176 kill_wait_lwp (lwp);
1177
1178 the_target->mourn (process);
1179
1180 /* Since we presently can only stop all lwps of all processes, we
1181 need to unstop lwps of other processes. */
1182 unstop_all_lwps (0, NULL);
1183 return 0;
1184 }
1185
1186 /* Get pending signal of THREAD, for detaching purposes. This is the
1187 signal the thread last stopped for, which we need to deliver to the
1188 thread when detaching, otherwise, it'd be suppressed/lost. */
1189
1190 static int
1191 get_detach_signal (struct thread_info *thread)
1192 {
1193 enum gdb_signal signo = GDB_SIGNAL_0;
1194 int status;
1195 struct lwp_info *lp = get_thread_lwp (thread);
1196
1197 if (lp->status_pending_p)
1198 status = lp->status_pending;
1199 else
1200 {
1201 /* If the thread had been suspended by gdbserver, and it stopped
1202 cleanly, then it'll have stopped with SIGSTOP. But we don't
1203 want to deliver that SIGSTOP. */
1204 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1205 || thread->last_status.value.sig == GDB_SIGNAL_0)
1206 return 0;
1207
1208 /* Otherwise, we may need to deliver the signal we
1209 intercepted. */
1210 status = lp->last_status;
1211 }
1212
1213 if (!WIFSTOPPED (status))
1214 {
1215 if (debug_threads)
1216 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1217 target_pid_to_str (ptid_of (thread)));
1218 return 0;
1219 }
1220
1221 /* Extended wait statuses aren't real SIGTRAPs. */
1222 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1223 {
1224 if (debug_threads)
1225 debug_printf ("GPS: lwp %s had stopped with extended "
1226 "status: no pending signal\n",
1227 target_pid_to_str (ptid_of (thread)));
1228 return 0;
1229 }
1230
1231 signo = gdb_signal_from_host (WSTOPSIG (status));
1232
1233 if (program_signals_p && !program_signals[signo])
1234 {
1235 if (debug_threads)
1236 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1237 target_pid_to_str (ptid_of (thread)),
1238 gdb_signal_to_string (signo));
1239 return 0;
1240 }
1241 else if (!program_signals_p
1242 /* If we have no way to know which signals GDB does not
1243 want to have passed to the program, assume
1244 SIGTRAP/SIGINT, which is GDB's default. */
1245 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1246 {
1247 if (debug_threads)
1248 debug_printf ("GPS: lwp %s had signal %s, "
1249 "but we don't know if we should pass it. "
1250 "Default to not.\n",
1251 target_pid_to_str (ptid_of (thread)),
1252 gdb_signal_to_string (signo));
1253 return 0;
1254 }
1255 else
1256 {
1257 if (debug_threads)
1258 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1259 target_pid_to_str (ptid_of (thread)),
1260 gdb_signal_to_string (signo));
1261
1262 return WSTOPSIG (status);
1263 }
1264 }
1265
1266 static int
1267 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1268 {
1269 struct thread_info *thread = (struct thread_info *) entry;
1270 struct lwp_info *lwp = get_thread_lwp (thread);
1271 int pid = * (int *) args;
1272 int sig;
1273
1274 if (ptid_get_pid (entry->id) != pid)
1275 return 0;
1276
1277 /* If there is a pending SIGSTOP, get rid of it. */
1278 if (lwp->stop_expected)
1279 {
1280 if (debug_threads)
1281 debug_printf ("Sending SIGCONT to %s\n",
1282 target_pid_to_str (ptid_of (thread)));
1283
1284 kill_lwp (lwpid_of (thread), SIGCONT);
1285 lwp->stop_expected = 0;
1286 }
1287
1288 /* Flush any pending changes to the process's registers. */
1289 regcache_invalidate_thread (thread);
1290
1291 /* Pass on any pending signal for this thread. */
1292 sig = get_detach_signal (thread);
1293
1294 /* Finally, let it resume. */
1295 if (the_low_target.prepare_to_resume != NULL)
1296 the_low_target.prepare_to_resume (lwp);
1297 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1298 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1299 error (_("Can't detach %s: %s"),
1300 target_pid_to_str (ptid_of (thread)),
1301 strerror (errno));
1302
1303 delete_lwp (lwp);
1304 return 0;
1305 }
1306
1307 static int
1308 linux_detach (int pid)
1309 {
1310 struct process_info *process;
1311
1312 process = find_process_pid (pid);
1313 if (process == NULL)
1314 return -1;
1315
1316 /* Stop all threads before detaching. First, ptrace requires that
1317 the thread is stopped to sucessfully detach. Second, thread_db
1318 may need to uninstall thread event breakpoints from memory, which
1319 only works with a stopped process anyway. */
1320 stop_all_lwps (0, NULL);
1321
1322 #ifdef USE_THREAD_DB
1323 thread_db_detach (process);
1324 #endif
1325
1326 /* Stabilize threads (move out of jump pads). */
1327 stabilize_threads ();
1328
1329 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1330
1331 the_target->mourn (process);
1332
1333 /* Since we presently can only stop all lwps of all processes, we
1334 need to unstop lwps of other processes. */
1335 unstop_all_lwps (0, NULL);
1336 return 0;
1337 }
1338
1339 /* Remove all LWPs that belong to process PROC from the lwp list. */
1340
1341 static int
1342 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1343 {
1344 struct thread_info *thread = (struct thread_info *) entry;
1345 struct lwp_info *lwp = get_thread_lwp (thread);
1346 struct process_info *process = proc;
1347
1348 if (pid_of (thread) == pid_of (process))
1349 delete_lwp (lwp);
1350
1351 return 0;
1352 }
1353
1354 static void
1355 linux_mourn (struct process_info *process)
1356 {
1357 struct process_info_private *priv;
1358
1359 #ifdef USE_THREAD_DB
1360 thread_db_mourn (process);
1361 #endif
1362
1363 find_inferior (&all_threads, delete_lwp_callback, process);
1364
1365 /* Freeing all private data. */
1366 priv = process->priv;
1367 free (priv->arch_private);
1368 free (priv);
1369 process->priv = NULL;
1370
1371 remove_process (process);
1372 }
1373
1374 static void
1375 linux_join (int pid)
1376 {
1377 int status, ret;
1378
1379 do {
1380 ret = my_waitpid (pid, &status, 0);
1381 if (WIFEXITED (status) || WIFSIGNALED (status))
1382 break;
1383 } while (ret != -1 || errno != ECHILD);
1384 }
1385
1386 /* Return nonzero if the given thread is still alive. */
1387 static int
1388 linux_thread_alive (ptid_t ptid)
1389 {
1390 struct lwp_info *lwp = find_lwp_pid (ptid);
1391
1392 /* We assume we always know if a thread exits. If a whole process
1393 exited but we still haven't been able to report it to GDB, we'll
1394 hold on to the last lwp of the dead process. */
1395 if (lwp != NULL)
1396 return !lwp->dead;
1397 else
1398 return 0;
1399 }
1400
1401 /* Return 1 if this lwp still has an interesting status pending. If
1402 not (e.g., it had stopped for a breakpoint that is gone), return
1403 false. */
1404
1405 static int
1406 thread_still_has_status_pending_p (struct thread_info *thread)
1407 {
1408 struct lwp_info *lp = get_thread_lwp (thread);
1409
1410 if (!lp->status_pending_p)
1411 return 0;
1412
1413 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1414 report any status pending the LWP may have. */
1415 if (thread->last_resume_kind == resume_stop
1416 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1417 return 0;
1418
1419 if (thread->last_resume_kind != resume_stop
1420 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1421 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1422 {
1423 struct thread_info *saved_thread;
1424 CORE_ADDR pc;
1425 int discard = 0;
1426
1427 gdb_assert (lp->last_status != 0);
1428
1429 pc = get_pc (lp);
1430
1431 saved_thread = current_thread;
1432 current_thread = thread;
1433
1434 if (pc != lp->stop_pc)
1435 {
1436 if (debug_threads)
1437 debug_printf ("PC of %ld changed\n",
1438 lwpid_of (thread));
1439 discard = 1;
1440 }
1441
1442 #if !USE_SIGTRAP_SIGINFO
1443 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1444 && !(*the_low_target.breakpoint_at) (pc))
1445 {
1446 if (debug_threads)
1447 debug_printf ("previous SW breakpoint of %ld gone\n",
1448 lwpid_of (thread));
1449 discard = 1;
1450 }
1451 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1452 && !hardware_breakpoint_inserted_here (pc))
1453 {
1454 if (debug_threads)
1455 debug_printf ("previous HW breakpoint of %ld gone\n",
1456 lwpid_of (thread));
1457 discard = 1;
1458 }
1459 #endif
1460
1461 current_thread = saved_thread;
1462
1463 if (discard)
1464 {
1465 if (debug_threads)
1466 debug_printf ("discarding pending breakpoint status\n");
1467 lp->status_pending_p = 0;
1468 return 0;
1469 }
1470 }
1471
1472 return 1;
1473 }
1474
1475 /* Return 1 if this lwp has an interesting status pending. */
1476 static int
1477 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1478 {
1479 struct thread_info *thread = (struct thread_info *) entry;
1480 struct lwp_info *lp = get_thread_lwp (thread);
1481 ptid_t ptid = * (ptid_t *) arg;
1482
1483 /* Check if we're only interested in events from a specific process
1484 or a specific LWP. */
1485 if (!ptid_match (ptid_of (thread), ptid))
1486 return 0;
1487
1488 if (lp->status_pending_p
1489 && !thread_still_has_status_pending_p (thread))
1490 {
1491 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1492 return 0;
1493 }
1494
1495 return lp->status_pending_p;
1496 }
1497
1498 static int
1499 same_lwp (struct inferior_list_entry *entry, void *data)
1500 {
1501 ptid_t ptid = *(ptid_t *) data;
1502 int lwp;
1503
1504 if (ptid_get_lwp (ptid) != 0)
1505 lwp = ptid_get_lwp (ptid);
1506 else
1507 lwp = ptid_get_pid (ptid);
1508
1509 if (ptid_get_lwp (entry->id) == lwp)
1510 return 1;
1511
1512 return 0;
1513 }
1514
1515 struct lwp_info *
1516 find_lwp_pid (ptid_t ptid)
1517 {
1518 struct inferior_list_entry *thread
1519 = find_inferior (&all_threads, same_lwp, &ptid);
1520
1521 if (thread == NULL)
1522 return NULL;
1523
1524 return get_thread_lwp ((struct thread_info *) thread);
1525 }
1526
1527 /* Return the number of known LWPs in the tgid given by PID. */
1528
1529 static int
1530 num_lwps (int pid)
1531 {
1532 struct inferior_list_entry *inf, *tmp;
1533 int count = 0;
1534
1535 ALL_INFERIORS (&all_threads, inf, tmp)
1536 {
1537 if (ptid_get_pid (inf->id) == pid)
1538 count++;
1539 }
1540
1541 return count;
1542 }
1543
1544 /* The arguments passed to iterate_over_lwps. */
1545
1546 struct iterate_over_lwps_args
1547 {
1548 /* The FILTER argument passed to iterate_over_lwps. */
1549 ptid_t filter;
1550
1551 /* The CALLBACK argument passed to iterate_over_lwps. */
1552 iterate_over_lwps_ftype *callback;
1553
1554 /* The DATA argument passed to iterate_over_lwps. */
1555 void *data;
1556 };
1557
1558 /* Callback for find_inferior used by iterate_over_lwps to filter
1559 calls to the callback supplied to that function. Returning a
1560 nonzero value causes find_inferiors to stop iterating and return
1561 the current inferior_list_entry. Returning zero indicates that
1562 find_inferiors should continue iterating. */
1563
1564 static int
1565 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1566 {
1567 struct iterate_over_lwps_args *args
1568 = (struct iterate_over_lwps_args *) args_p;
1569
1570 if (ptid_match (entry->id, args->filter))
1571 {
1572 struct thread_info *thr = (struct thread_info *) entry;
1573 struct lwp_info *lwp = get_thread_lwp (thr);
1574
1575 return (*args->callback) (lwp, args->data);
1576 }
1577
1578 return 0;
1579 }
1580
1581 /* See nat/linux-nat.h. */
1582
1583 struct lwp_info *
1584 iterate_over_lwps (ptid_t filter,
1585 iterate_over_lwps_ftype callback,
1586 void *data)
1587 {
1588 struct iterate_over_lwps_args args = {filter, callback, data};
1589 struct inferior_list_entry *entry;
1590
1591 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1592 if (entry == NULL)
1593 return NULL;
1594
1595 return get_thread_lwp ((struct thread_info *) entry);
1596 }
1597
1598 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1599 their exits until all other threads in the group have exited. */
1600
1601 static void
1602 check_zombie_leaders (void)
1603 {
1604 struct process_info *proc, *tmp;
1605
1606 ALL_PROCESSES (proc, tmp)
1607 {
1608 pid_t leader_pid = pid_of (proc);
1609 struct lwp_info *leader_lp;
1610
1611 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1612
1613 if (debug_threads)
1614 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1615 "num_lwps=%d, zombie=%d\n",
1616 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1617 linux_proc_pid_is_zombie (leader_pid));
1618
1619 if (leader_lp != NULL
1620 /* Check if there are other threads in the group, as we may
1621 have raced with the inferior simply exiting. */
1622 && !last_thread_of_process_p (leader_pid)
1623 && linux_proc_pid_is_zombie (leader_pid))
1624 {
1625 /* A leader zombie can mean one of two things:
1626
1627 - It exited, and there's an exit status pending
1628 available, or only the leader exited (not the whole
1629 program). In the latter case, we can't waitpid the
1630 leader's exit status until all other threads are gone.
1631
1632 - There are 3 or more threads in the group, and a thread
1633 other than the leader exec'd. On an exec, the Linux
1634 kernel destroys all other threads (except the execing
1635 one) in the thread group, and resets the execing thread's
1636 tid to the tgid. No exit notification is sent for the
1637 execing thread -- from the ptracer's perspective, it
1638 appears as though the execing thread just vanishes.
1639 Until we reap all other threads except the leader and the
1640 execing thread, the leader will be zombie, and the
1641 execing thread will be in `D (disc sleep)'. As soon as
1642 all other threads are reaped, the execing thread changes
1643 it's tid to the tgid, and the previous (zombie) leader
1644 vanishes, giving place to the "new" leader. We could try
1645 distinguishing the exit and exec cases, by waiting once
1646 more, and seeing if something comes out, but it doesn't
1647 sound useful. The previous leader _does_ go away, and
1648 we'll re-add the new one once we see the exec event
1649 (which is just the same as what would happen if the
1650 previous leader did exit voluntarily before some other
1651 thread execs). */
1652
1653 if (debug_threads)
1654 fprintf (stderr,
1655 "CZL: Thread group leader %d zombie "
1656 "(it exited, or another thread execd).\n",
1657 leader_pid);
1658
1659 delete_lwp (leader_lp);
1660 }
1661 }
1662 }
1663
1664 /* Callback for `find_inferior'. Returns the first LWP that is not
1665 stopped. ARG is a PTID filter. */
1666
1667 static int
1668 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1669 {
1670 struct thread_info *thr = (struct thread_info *) entry;
1671 struct lwp_info *lwp;
1672 ptid_t filter = *(ptid_t *) arg;
1673
1674 if (!ptid_match (ptid_of (thr), filter))
1675 return 0;
1676
1677 lwp = get_thread_lwp (thr);
1678 if (!lwp->stopped)
1679 return 1;
1680
1681 return 0;
1682 }
1683
1684 /* This function should only be called if the LWP got a SIGTRAP.
1685
1686 Handle any tracepoint steps or hits. Return true if a tracepoint
1687 event was handled, 0 otherwise. */
1688
1689 static int
1690 handle_tracepoints (struct lwp_info *lwp)
1691 {
1692 struct thread_info *tinfo = get_lwp_thread (lwp);
1693 int tpoint_related_event = 0;
1694
1695 gdb_assert (lwp->suspended == 0);
1696
1697 /* If this tracepoint hit causes a tracing stop, we'll immediately
1698 uninsert tracepoints. To do this, we temporarily pause all
1699 threads, unpatch away, and then unpause threads. We need to make
1700 sure the unpausing doesn't resume LWP too. */
1701 lwp->suspended++;
1702
1703 /* And we need to be sure that any all-threads-stopping doesn't try
1704 to move threads out of the jump pads, as it could deadlock the
1705 inferior (LWP could be in the jump pad, maybe even holding the
1706 lock.) */
1707
1708 /* Do any necessary step collect actions. */
1709 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1710
1711 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1712
1713 /* See if we just hit a tracepoint and do its main collect
1714 actions. */
1715 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1716
1717 lwp->suspended--;
1718
1719 gdb_assert (lwp->suspended == 0);
1720 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1721
1722 if (tpoint_related_event)
1723 {
1724 if (debug_threads)
1725 debug_printf ("got a tracepoint event\n");
1726 return 1;
1727 }
1728
1729 return 0;
1730 }
1731
1732 /* Convenience wrapper. Returns true if LWP is presently collecting a
1733 fast tracepoint. */
1734
1735 static int
1736 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1737 struct fast_tpoint_collect_status *status)
1738 {
1739 CORE_ADDR thread_area;
1740 struct thread_info *thread = get_lwp_thread (lwp);
1741
1742 if (the_low_target.get_thread_area == NULL)
1743 return 0;
1744
1745 /* Get the thread area address. This is used to recognize which
1746 thread is which when tracing with the in-process agent library.
1747 We don't read anything from the address, and treat it as opaque;
1748 it's the address itself that we assume is unique per-thread. */
1749 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1750 return 0;
1751
1752 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1753 }
1754
1755 /* The reason we resume in the caller, is because we want to be able
1756 to pass lwp->status_pending as WSTAT, and we need to clear
1757 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1758 refuses to resume. */
1759
1760 static int
1761 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1762 {
1763 struct thread_info *saved_thread;
1764
1765 saved_thread = current_thread;
1766 current_thread = get_lwp_thread (lwp);
1767
1768 if ((wstat == NULL
1769 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1770 && supports_fast_tracepoints ()
1771 && agent_loaded_p ())
1772 {
1773 struct fast_tpoint_collect_status status;
1774 int r;
1775
1776 if (debug_threads)
1777 debug_printf ("Checking whether LWP %ld needs to move out of the "
1778 "jump pad.\n",
1779 lwpid_of (current_thread));
1780
1781 r = linux_fast_tracepoint_collecting (lwp, &status);
1782
1783 if (wstat == NULL
1784 || (WSTOPSIG (*wstat) != SIGILL
1785 && WSTOPSIG (*wstat) != SIGFPE
1786 && WSTOPSIG (*wstat) != SIGSEGV
1787 && WSTOPSIG (*wstat) != SIGBUS))
1788 {
1789 lwp->collecting_fast_tracepoint = r;
1790
1791 if (r != 0)
1792 {
1793 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1794 {
1795 /* Haven't executed the original instruction yet.
1796 Set breakpoint there, and wait till it's hit,
1797 then single-step until exiting the jump pad. */
1798 lwp->exit_jump_pad_bkpt
1799 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1800 }
1801
1802 if (debug_threads)
1803 debug_printf ("Checking whether LWP %ld needs to move out of "
1804 "the jump pad...it does\n",
1805 lwpid_of (current_thread));
1806 current_thread = saved_thread;
1807
1808 return 1;
1809 }
1810 }
1811 else
1812 {
1813 /* If we get a synchronous signal while collecting, *and*
1814 while executing the (relocated) original instruction,
1815 reset the PC to point at the tpoint address, before
1816 reporting to GDB. Otherwise, it's an IPA lib bug: just
1817 report the signal to GDB, and pray for the best. */
1818
1819 lwp->collecting_fast_tracepoint = 0;
1820
1821 if (r != 0
1822 && (status.adjusted_insn_addr <= lwp->stop_pc
1823 && lwp->stop_pc < status.adjusted_insn_addr_end))
1824 {
1825 siginfo_t info;
1826 struct regcache *regcache;
1827
1828 /* The si_addr on a few signals references the address
1829 of the faulting instruction. Adjust that as
1830 well. */
1831 if ((WSTOPSIG (*wstat) == SIGILL
1832 || WSTOPSIG (*wstat) == SIGFPE
1833 || WSTOPSIG (*wstat) == SIGBUS
1834 || WSTOPSIG (*wstat) == SIGSEGV)
1835 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1836 (PTRACE_TYPE_ARG3) 0, &info) == 0
1837 /* Final check just to make sure we don't clobber
1838 the siginfo of non-kernel-sent signals. */
1839 && (uintptr_t) info.si_addr == lwp->stop_pc)
1840 {
1841 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1842 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1843 (PTRACE_TYPE_ARG3) 0, &info);
1844 }
1845
1846 regcache = get_thread_regcache (current_thread, 1);
1847 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1848 lwp->stop_pc = status.tpoint_addr;
1849
1850 /* Cancel any fast tracepoint lock this thread was
1851 holding. */
1852 force_unlock_trace_buffer ();
1853 }
1854
1855 if (lwp->exit_jump_pad_bkpt != NULL)
1856 {
1857 if (debug_threads)
1858 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1859 "stopping all threads momentarily.\n");
1860
1861 stop_all_lwps (1, lwp);
1862
1863 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1864 lwp->exit_jump_pad_bkpt = NULL;
1865
1866 unstop_all_lwps (1, lwp);
1867
1868 gdb_assert (lwp->suspended >= 0);
1869 }
1870 }
1871 }
1872
1873 if (debug_threads)
1874 debug_printf ("Checking whether LWP %ld needs to move out of the "
1875 "jump pad...no\n",
1876 lwpid_of (current_thread));
1877
1878 current_thread = saved_thread;
1879 return 0;
1880 }
1881
1882 /* Enqueue one signal in the "signals to report later when out of the
1883 jump pad" list. */
1884
1885 static void
1886 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1887 {
1888 struct pending_signals *p_sig;
1889 struct thread_info *thread = get_lwp_thread (lwp);
1890
1891 if (debug_threads)
1892 debug_printf ("Deferring signal %d for LWP %ld.\n",
1893 WSTOPSIG (*wstat), lwpid_of (thread));
1894
1895 if (debug_threads)
1896 {
1897 struct pending_signals *sig;
1898
1899 for (sig = lwp->pending_signals_to_report;
1900 sig != NULL;
1901 sig = sig->prev)
1902 debug_printf (" Already queued %d\n",
1903 sig->signal);
1904
1905 debug_printf (" (no more currently queued signals)\n");
1906 }
1907
1908 /* Don't enqueue non-RT signals if they are already in the deferred
1909 queue. (SIGSTOP being the easiest signal to see ending up here
1910 twice) */
1911 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1912 {
1913 struct pending_signals *sig;
1914
1915 for (sig = lwp->pending_signals_to_report;
1916 sig != NULL;
1917 sig = sig->prev)
1918 {
1919 if (sig->signal == WSTOPSIG (*wstat))
1920 {
1921 if (debug_threads)
1922 debug_printf ("Not requeuing already queued non-RT signal %d"
1923 " for LWP %ld\n",
1924 sig->signal,
1925 lwpid_of (thread));
1926 return;
1927 }
1928 }
1929 }
1930
1931 p_sig = xmalloc (sizeof (*p_sig));
1932 p_sig->prev = lwp->pending_signals_to_report;
1933 p_sig->signal = WSTOPSIG (*wstat);
1934 memset (&p_sig->info, 0, sizeof (siginfo_t));
1935 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1936 &p_sig->info);
1937
1938 lwp->pending_signals_to_report = p_sig;
1939 }
1940
1941 /* Dequeue one signal from the "signals to report later when out of
1942 the jump pad" list. */
1943
1944 static int
1945 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1946 {
1947 struct thread_info *thread = get_lwp_thread (lwp);
1948
1949 if (lwp->pending_signals_to_report != NULL)
1950 {
1951 struct pending_signals **p_sig;
1952
1953 p_sig = &lwp->pending_signals_to_report;
1954 while ((*p_sig)->prev != NULL)
1955 p_sig = &(*p_sig)->prev;
1956
1957 *wstat = W_STOPCODE ((*p_sig)->signal);
1958 if ((*p_sig)->info.si_signo != 0)
1959 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1960 &(*p_sig)->info);
1961 free (*p_sig);
1962 *p_sig = NULL;
1963
1964 if (debug_threads)
1965 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1966 WSTOPSIG (*wstat), lwpid_of (thread));
1967
1968 if (debug_threads)
1969 {
1970 struct pending_signals *sig;
1971
1972 for (sig = lwp->pending_signals_to_report;
1973 sig != NULL;
1974 sig = sig->prev)
1975 debug_printf (" Still queued %d\n",
1976 sig->signal);
1977
1978 debug_printf (" (no more queued signals)\n");
1979 }
1980
1981 return 1;
1982 }
1983
1984 return 0;
1985 }
1986
1987 /* Fetch the possibly triggered data watchpoint info and store it in
1988 CHILD.
1989
1990 On some archs, like x86, that use debug registers to set
1991 watchpoints, it's possible that the way to know which watched
1992 address trapped, is to check the register that is used to select
1993 which address to watch. Problem is, between setting the watchpoint
1994 and reading back which data address trapped, the user may change
1995 the set of watchpoints, and, as a consequence, GDB changes the
1996 debug registers in the inferior. To avoid reading back a stale
1997 stopped-data-address when that happens, we cache in LP the fact
1998 that a watchpoint trapped, and the corresponding data address, as
1999 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2000 registers meanwhile, we have the cached data we can rely on. */
2001
2002 static int
2003 check_stopped_by_watchpoint (struct lwp_info *child)
2004 {
2005 if (the_low_target.stopped_by_watchpoint != NULL)
2006 {
2007 struct thread_info *saved_thread;
2008
2009 saved_thread = current_thread;
2010 current_thread = get_lwp_thread (child);
2011
2012 if (the_low_target.stopped_by_watchpoint ())
2013 {
2014 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2015
2016 if (the_low_target.stopped_data_address != NULL)
2017 child->stopped_data_address
2018 = the_low_target.stopped_data_address ();
2019 else
2020 child->stopped_data_address = 0;
2021 }
2022
2023 current_thread = saved_thread;
2024 }
2025
2026 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2027 }
2028
2029 /* Return the ptrace options that we want to try to enable. */
2030
2031 static int
2032 linux_low_ptrace_options (int attached)
2033 {
2034 int options = 0;
2035
2036 if (!attached)
2037 options |= PTRACE_O_EXITKILL;
2038
2039 if (report_fork_events)
2040 options |= PTRACE_O_TRACEFORK;
2041
2042 if (report_vfork_events)
2043 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2044
2045 return options;
2046 }
2047
2048 /* Do low-level handling of the event, and check if we should go on
2049 and pass it to caller code. Return the affected lwp if we are, or
2050 NULL otherwise. */
2051
2052 static struct lwp_info *
2053 linux_low_filter_event (int lwpid, int wstat)
2054 {
2055 struct lwp_info *child;
2056 struct thread_info *thread;
2057 int have_stop_pc = 0;
2058
2059 child = find_lwp_pid (pid_to_ptid (lwpid));
2060
2061 /* If we didn't find a process, one of two things presumably happened:
2062 - A process we started and then detached from has exited. Ignore it.
2063 - A process we are controlling has forked and the new child's stop
2064 was reported to us by the kernel. Save its PID. */
2065 if (child == NULL && WIFSTOPPED (wstat))
2066 {
2067 add_to_pid_list (&stopped_pids, lwpid, wstat);
2068 return NULL;
2069 }
2070 else if (child == NULL)
2071 return NULL;
2072
2073 thread = get_lwp_thread (child);
2074
2075 child->stopped = 1;
2076
2077 child->last_status = wstat;
2078
2079 /* Check if the thread has exited. */
2080 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2081 {
2082 if (debug_threads)
2083 debug_printf ("LLFE: %d exited.\n", lwpid);
2084 if (num_lwps (pid_of (thread)) > 1)
2085 {
2086
2087 /* If there is at least one more LWP, then the exit signal was
2088 not the end of the debugged application and should be
2089 ignored. */
2090 delete_lwp (child);
2091 return NULL;
2092 }
2093 else
2094 {
2095 /* This was the last lwp in the process. Since events are
2096 serialized to GDB core, and we can't report this one
2097 right now, but GDB core and the other target layers will
2098 want to be notified about the exit code/signal, leave the
2099 status pending for the next time we're able to report
2100 it. */
2101 mark_lwp_dead (child, wstat);
2102 return child;
2103 }
2104 }
2105
2106 gdb_assert (WIFSTOPPED (wstat));
2107
2108 if (WIFSTOPPED (wstat))
2109 {
2110 struct process_info *proc;
2111
2112 /* Architecture-specific setup after inferior is running. */
2113 proc = find_process_pid (pid_of (thread));
2114 if (proc->tdesc == NULL)
2115 {
2116 if (proc->attached)
2117 {
2118 struct thread_info *saved_thread;
2119
2120 /* This needs to happen after we have attached to the
2121 inferior and it is stopped for the first time, but
2122 before we access any inferior registers. */
2123 saved_thread = current_thread;
2124 current_thread = thread;
2125
2126 the_low_target.arch_setup ();
2127
2128 current_thread = saved_thread;
2129 }
2130 else
2131 {
2132 /* The process is started, but GDBserver will do
2133 architecture-specific setup after the program stops at
2134 the first instruction. */
2135 child->status_pending_p = 1;
2136 child->status_pending = wstat;
2137 return child;
2138 }
2139 }
2140 }
2141
2142 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2143 {
2144 struct process_info *proc = find_process_pid (pid_of (thread));
2145 int options = linux_low_ptrace_options (proc->attached);
2146
2147 linux_enable_event_reporting (lwpid, options);
2148 child->must_set_ptrace_flags = 0;
2149 }
2150
2151 /* Be careful to not overwrite stop_pc until
2152 check_stopped_by_breakpoint is called. */
2153 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2154 && linux_is_extended_waitstatus (wstat))
2155 {
2156 child->stop_pc = get_pc (child);
2157 if (handle_extended_wait (child, wstat))
2158 {
2159 /* The event has been handled, so just return without
2160 reporting it. */
2161 return NULL;
2162 }
2163 }
2164
2165 /* Check first whether this was a SW/HW breakpoint before checking
2166 watchpoints, because at least s390 can't tell the data address of
2167 hardware watchpoint hits, and returns stopped-by-watchpoint as
2168 long as there's a watchpoint set. */
2169 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2170 {
2171 if (check_stopped_by_breakpoint (child))
2172 have_stop_pc = 1;
2173 }
2174
2175 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2176 or hardware watchpoint. Check which is which if we got
2177 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2178 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2179 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2180 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2181 check_stopped_by_watchpoint (child);
2182
2183 if (!have_stop_pc)
2184 child->stop_pc = get_pc (child);
2185
2186 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2187 && child->stop_expected)
2188 {
2189 if (debug_threads)
2190 debug_printf ("Expected stop.\n");
2191 child->stop_expected = 0;
2192
2193 if (thread->last_resume_kind == resume_stop)
2194 {
2195 /* We want to report the stop to the core. Treat the
2196 SIGSTOP as a normal event. */
2197 if (debug_threads)
2198 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2199 target_pid_to_str (ptid_of (thread)));
2200 }
2201 else if (stopping_threads != NOT_STOPPING_THREADS)
2202 {
2203 /* Stopping threads. We don't want this SIGSTOP to end up
2204 pending. */
2205 if (debug_threads)
2206 debug_printf ("LLW: SIGSTOP caught for %s "
2207 "while stopping threads.\n",
2208 target_pid_to_str (ptid_of (thread)));
2209 return NULL;
2210 }
2211 else
2212 {
2213 /* This is a delayed SIGSTOP. Filter out the event. */
2214 if (debug_threads)
2215 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2216 child->stepping ? "step" : "continue",
2217 target_pid_to_str (ptid_of (thread)));
2218
2219 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2220 return NULL;
2221 }
2222 }
2223
2224 child->status_pending_p = 1;
2225 child->status_pending = wstat;
2226 return child;
2227 }
2228
2229 /* Resume LWPs that are currently stopped without any pending status
2230 to report, but are resumed from the core's perspective. */
2231
2232 static void
2233 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2234 {
2235 struct thread_info *thread = (struct thread_info *) entry;
2236 struct lwp_info *lp = get_thread_lwp (thread);
2237
2238 if (lp->stopped
2239 && !lp->status_pending_p
2240 && thread->last_resume_kind != resume_stop
2241 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2242 {
2243 int step = thread->last_resume_kind == resume_step;
2244
2245 if (debug_threads)
2246 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2247 target_pid_to_str (ptid_of (thread)),
2248 paddress (lp->stop_pc),
2249 step);
2250
2251 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2252 }
2253 }
2254
2255 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2256 match FILTER_PTID (leaving others pending). The PTIDs can be:
2257 minus_one_ptid, to specify any child; a pid PTID, specifying all
2258 lwps of a thread group; or a PTID representing a single lwp. Store
2259 the stop status through the status pointer WSTAT. OPTIONS is
2260 passed to the waitpid call. Return 0 if no event was found and
2261 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2262 was found. Return the PID of the stopped child otherwise. */
2263
2264 static int
2265 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2266 int *wstatp, int options)
2267 {
2268 struct thread_info *event_thread;
2269 struct lwp_info *event_child, *requested_child;
2270 sigset_t block_mask, prev_mask;
2271
2272 retry:
2273 /* N.B. event_thread points to the thread_info struct that contains
2274 event_child. Keep them in sync. */
2275 event_thread = NULL;
2276 event_child = NULL;
2277 requested_child = NULL;
2278
2279 /* Check for a lwp with a pending status. */
2280
2281 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2282 {
2283 event_thread = (struct thread_info *)
2284 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2285 if (event_thread != NULL)
2286 event_child = get_thread_lwp (event_thread);
2287 if (debug_threads && event_thread)
2288 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2289 }
2290 else if (!ptid_equal (filter_ptid, null_ptid))
2291 {
2292 requested_child = find_lwp_pid (filter_ptid);
2293
2294 if (stopping_threads == NOT_STOPPING_THREADS
2295 && requested_child->status_pending_p
2296 && requested_child->collecting_fast_tracepoint)
2297 {
2298 enqueue_one_deferred_signal (requested_child,
2299 &requested_child->status_pending);
2300 requested_child->status_pending_p = 0;
2301 requested_child->status_pending = 0;
2302 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2303 }
2304
2305 if (requested_child->suspended
2306 && requested_child->status_pending_p)
2307 {
2308 internal_error (__FILE__, __LINE__,
2309 "requesting an event out of a"
2310 " suspended child?");
2311 }
2312
2313 if (requested_child->status_pending_p)
2314 {
2315 event_child = requested_child;
2316 event_thread = get_lwp_thread (event_child);
2317 }
2318 }
2319
2320 if (event_child != NULL)
2321 {
2322 if (debug_threads)
2323 debug_printf ("Got an event from pending child %ld (%04x)\n",
2324 lwpid_of (event_thread), event_child->status_pending);
2325 *wstatp = event_child->status_pending;
2326 event_child->status_pending_p = 0;
2327 event_child->status_pending = 0;
2328 current_thread = event_thread;
2329 return lwpid_of (event_thread);
2330 }
2331
2332 /* But if we don't find a pending event, we'll have to wait.
2333
2334 We only enter this loop if no process has a pending wait status.
2335 Thus any action taken in response to a wait status inside this
2336 loop is responding as soon as we detect the status, not after any
2337 pending events. */
2338
2339 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2340 all signals while here. */
2341 sigfillset (&block_mask);
2342 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2343
2344 /* Always pull all events out of the kernel. We'll randomly select
2345 an event LWP out of all that have events, to prevent
2346 starvation. */
2347 while (event_child == NULL)
2348 {
2349 pid_t ret = 0;
2350
2351 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2352 quirks:
2353
2354 - If the thread group leader exits while other threads in the
2355 thread group still exist, waitpid(TGID, ...) hangs. That
2356 waitpid won't return an exit status until the other threads
2357 in the group are reaped.
2358
2359 - When a non-leader thread execs, that thread just vanishes
2360 without reporting an exit (so we'd hang if we waited for it
2361 explicitly in that case). The exec event is reported to
2362 the TGID pid (although we don't currently enable exec
2363 events). */
2364 errno = 0;
2365 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2366
2367 if (debug_threads)
2368 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2369 ret, errno ? strerror (errno) : "ERRNO-OK");
2370
2371 if (ret > 0)
2372 {
2373 if (debug_threads)
2374 {
2375 debug_printf ("LLW: waitpid %ld received %s\n",
2376 (long) ret, status_to_str (*wstatp));
2377 }
2378
2379 /* Filter all events. IOW, leave all events pending. We'll
2380 randomly select an event LWP out of all that have events
2381 below. */
2382 linux_low_filter_event (ret, *wstatp);
2383 /* Retry until nothing comes out of waitpid. A single
2384 SIGCHLD can indicate more than one child stopped. */
2385 continue;
2386 }
2387
2388 /* Now that we've pulled all events out of the kernel, resume
2389 LWPs that don't have an interesting event to report. */
2390 if (stopping_threads == NOT_STOPPING_THREADS)
2391 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2392
2393 /* ... and find an LWP with a status to report to the core, if
2394 any. */
2395 event_thread = (struct thread_info *)
2396 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2397 if (event_thread != NULL)
2398 {
2399 event_child = get_thread_lwp (event_thread);
2400 *wstatp = event_child->status_pending;
2401 event_child->status_pending_p = 0;
2402 event_child->status_pending = 0;
2403 break;
2404 }
2405
2406 /* Check for zombie thread group leaders. Those can't be reaped
2407 until all other threads in the thread group are. */
2408 check_zombie_leaders ();
2409
2410 /* If there are no resumed children left in the set of LWPs we
2411 want to wait for, bail. We can't just block in
2412 waitpid/sigsuspend, because lwps might have been left stopped
2413 in trace-stop state, and we'd be stuck forever waiting for
2414 their status to change (which would only happen if we resumed
2415 them). Even if WNOHANG is set, this return code is preferred
2416 over 0 (below), as it is more detailed. */
2417 if ((find_inferior (&all_threads,
2418 not_stopped_callback,
2419 &wait_ptid) == NULL))
2420 {
2421 if (debug_threads)
2422 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2423 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2424 return -1;
2425 }
2426
2427 /* No interesting event to report to the caller. */
2428 if ((options & WNOHANG))
2429 {
2430 if (debug_threads)
2431 debug_printf ("WNOHANG set, no event found\n");
2432
2433 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2434 return 0;
2435 }
2436
2437 /* Block until we get an event reported with SIGCHLD. */
2438 if (debug_threads)
2439 debug_printf ("sigsuspend'ing\n");
2440
2441 sigsuspend (&prev_mask);
2442 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2443 goto retry;
2444 }
2445
2446 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2447
2448 current_thread = event_thread;
2449
2450 /* Check for thread exit. */
2451 if (! WIFSTOPPED (*wstatp))
2452 {
2453 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2454
2455 if (debug_threads)
2456 debug_printf ("LWP %d is the last lwp of process. "
2457 "Process %ld exiting.\n",
2458 pid_of (event_thread), lwpid_of (event_thread));
2459 return lwpid_of (event_thread);
2460 }
2461
2462 return lwpid_of (event_thread);
2463 }
2464
2465 /* Wait for an event from child(ren) PTID. PTIDs can be:
2466 minus_one_ptid, to specify any child; a pid PTID, specifying all
2467 lwps of a thread group; or a PTID representing a single lwp. Store
2468 the stop status through the status pointer WSTAT. OPTIONS is
2469 passed to the waitpid call. Return 0 if no event was found and
2470 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2471 was found. Return the PID of the stopped child otherwise. */
2472
2473 static int
2474 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2475 {
2476 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2477 }
2478
2479 /* Count the LWP's that have had events. */
2480
2481 static int
2482 count_events_callback (struct inferior_list_entry *entry, void *data)
2483 {
2484 struct thread_info *thread = (struct thread_info *) entry;
2485 struct lwp_info *lp = get_thread_lwp (thread);
2486 int *count = data;
2487
2488 gdb_assert (count != NULL);
2489
2490 /* Count only resumed LWPs that have an event pending. */
2491 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2492 && lp->status_pending_p)
2493 (*count)++;
2494
2495 return 0;
2496 }
2497
2498 /* Select the LWP (if any) that is currently being single-stepped. */
2499
2500 static int
2501 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2502 {
2503 struct thread_info *thread = (struct thread_info *) entry;
2504 struct lwp_info *lp = get_thread_lwp (thread);
2505
2506 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2507 && thread->last_resume_kind == resume_step
2508 && lp->status_pending_p)
2509 return 1;
2510 else
2511 return 0;
2512 }
2513
2514 /* Select the Nth LWP that has had an event. */
2515
2516 static int
2517 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2518 {
2519 struct thread_info *thread = (struct thread_info *) entry;
2520 struct lwp_info *lp = get_thread_lwp (thread);
2521 int *selector = data;
2522
2523 gdb_assert (selector != NULL);
2524
2525 /* Select only resumed LWPs that have an event pending. */
2526 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2527 && lp->status_pending_p)
2528 if ((*selector)-- == 0)
2529 return 1;
2530
2531 return 0;
2532 }
2533
2534 /* Select one LWP out of those that have events pending. */
2535
2536 static void
2537 select_event_lwp (struct lwp_info **orig_lp)
2538 {
2539 int num_events = 0;
2540 int random_selector;
2541 struct thread_info *event_thread = NULL;
2542
2543 /* In all-stop, give preference to the LWP that is being
2544 single-stepped. There will be at most one, and it's the LWP that
2545 the core is most interested in. If we didn't do this, then we'd
2546 have to handle pending step SIGTRAPs somehow in case the core
2547 later continues the previously-stepped thread, otherwise we'd
2548 report the pending SIGTRAP, and the core, not having stepped the
2549 thread, wouldn't understand what the trap was for, and therefore
2550 would report it to the user as a random signal. */
2551 if (!non_stop)
2552 {
2553 event_thread
2554 = (struct thread_info *) find_inferior (&all_threads,
2555 select_singlestep_lwp_callback,
2556 NULL);
2557 if (event_thread != NULL)
2558 {
2559 if (debug_threads)
2560 debug_printf ("SEL: Select single-step %s\n",
2561 target_pid_to_str (ptid_of (event_thread)));
2562 }
2563 }
2564 if (event_thread == NULL)
2565 {
2566 /* No single-stepping LWP. Select one at random, out of those
2567 which have had events. */
2568
2569 /* First see how many events we have. */
2570 find_inferior (&all_threads, count_events_callback, &num_events);
2571 gdb_assert (num_events > 0);
2572
2573 /* Now randomly pick a LWP out of those that have had
2574 events. */
2575 random_selector = (int)
2576 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2577
2578 if (debug_threads && num_events > 1)
2579 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2580 num_events, random_selector);
2581
2582 event_thread
2583 = (struct thread_info *) find_inferior (&all_threads,
2584 select_event_lwp_callback,
2585 &random_selector);
2586 }
2587
2588 if (event_thread != NULL)
2589 {
2590 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2591
2592 /* Switch the event LWP. */
2593 *orig_lp = event_lp;
2594 }
2595 }
2596
2597 /* Decrement the suspend count of an LWP. */
2598
2599 static int
2600 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2601 {
2602 struct thread_info *thread = (struct thread_info *) entry;
2603 struct lwp_info *lwp = get_thread_lwp (thread);
2604
2605 /* Ignore EXCEPT. */
2606 if (lwp == except)
2607 return 0;
2608
2609 lwp->suspended--;
2610
2611 gdb_assert (lwp->suspended >= 0);
2612 return 0;
2613 }
2614
2615 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2616 NULL. */
2617
2618 static void
2619 unsuspend_all_lwps (struct lwp_info *except)
2620 {
2621 find_inferior (&all_threads, unsuspend_one_lwp, except);
2622 }
2623
2624 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2625 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2626 void *data);
2627 static int lwp_running (struct inferior_list_entry *entry, void *data);
2628 static ptid_t linux_wait_1 (ptid_t ptid,
2629 struct target_waitstatus *ourstatus,
2630 int target_options);
2631
2632 /* Stabilize threads (move out of jump pads).
2633
2634 If a thread is midway collecting a fast tracepoint, we need to
2635 finish the collection and move it out of the jump pad before
2636 reporting the signal.
2637
2638 This avoids recursion while collecting (when a signal arrives
2639 midway, and the signal handler itself collects), which would trash
2640 the trace buffer. In case the user set a breakpoint in a signal
2641 handler, this avoids the backtrace showing the jump pad, etc..
2642 Most importantly, there are certain things we can't do safely if
2643 threads are stopped in a jump pad (or in its callee's). For
2644 example:
2645
2646 - starting a new trace run. A thread still collecting the
2647 previous run, could trash the trace buffer when resumed. The trace
2648 buffer control structures would have been reset but the thread had
2649 no way to tell. The thread could even midway memcpy'ing to the
2650 buffer, which would mean that when resumed, it would clobber the
2651 trace buffer that had been set for a new run.
2652
2653 - we can't rewrite/reuse the jump pads for new tracepoints
2654 safely. Say you do tstart while a thread is stopped midway while
2655 collecting. When the thread is later resumed, it finishes the
2656 collection, and returns to the jump pad, to execute the original
2657 instruction that was under the tracepoint jump at the time the
2658 older run had been started. If the jump pad had been rewritten
2659 since for something else in the new run, the thread would now
2660 execute the wrong / random instructions. */
2661
2662 static void
2663 linux_stabilize_threads (void)
2664 {
2665 struct thread_info *saved_thread;
2666 struct thread_info *thread_stuck;
2667
2668 thread_stuck
2669 = (struct thread_info *) find_inferior (&all_threads,
2670 stuck_in_jump_pad_callback,
2671 NULL);
2672 if (thread_stuck != NULL)
2673 {
2674 if (debug_threads)
2675 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2676 lwpid_of (thread_stuck));
2677 return;
2678 }
2679
2680 saved_thread = current_thread;
2681
2682 stabilizing_threads = 1;
2683
2684 /* Kick 'em all. */
2685 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2686
2687 /* Loop until all are stopped out of the jump pads. */
2688 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2689 {
2690 struct target_waitstatus ourstatus;
2691 struct lwp_info *lwp;
2692 int wstat;
2693
2694 /* Note that we go through the full wait even loop. While
2695 moving threads out of jump pad, we need to be able to step
2696 over internal breakpoints and such. */
2697 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2698
2699 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2700 {
2701 lwp = get_thread_lwp (current_thread);
2702
2703 /* Lock it. */
2704 lwp->suspended++;
2705
2706 if (ourstatus.value.sig != GDB_SIGNAL_0
2707 || current_thread->last_resume_kind == resume_stop)
2708 {
2709 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2710 enqueue_one_deferred_signal (lwp, &wstat);
2711 }
2712 }
2713 }
2714
2715 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2716
2717 stabilizing_threads = 0;
2718
2719 current_thread = saved_thread;
2720
2721 if (debug_threads)
2722 {
2723 thread_stuck
2724 = (struct thread_info *) find_inferior (&all_threads,
2725 stuck_in_jump_pad_callback,
2726 NULL);
2727 if (thread_stuck != NULL)
2728 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2729 lwpid_of (thread_stuck));
2730 }
2731 }
2732
2733 static void async_file_mark (void);
2734
2735 /* Convenience function that is called when the kernel reports an
2736 event that is not passed out to GDB. */
2737
2738 static ptid_t
2739 ignore_event (struct target_waitstatus *ourstatus)
2740 {
2741 /* If we got an event, there may still be others, as a single
2742 SIGCHLD can indicate more than one child stopped. This forces
2743 another target_wait call. */
2744 async_file_mark ();
2745
2746 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2747 return null_ptid;
2748 }
2749
2750 /* Return non-zero if WAITSTATUS reflects an extended linux
2751 event. Otherwise, return zero. */
2752
2753 static int
2754 extended_event_reported (const struct target_waitstatus *waitstatus)
2755 {
2756 if (waitstatus == NULL)
2757 return 0;
2758
2759 return (waitstatus->kind == TARGET_WAITKIND_FORKED
2760 || waitstatus->kind == TARGET_WAITKIND_VFORKED
2761 || waitstatus->kind == TARGET_WAITKIND_VFORK_DONE);
2762 }
2763
2764 /* Wait for process, returns status. */
2765
2766 static ptid_t
2767 linux_wait_1 (ptid_t ptid,
2768 struct target_waitstatus *ourstatus, int target_options)
2769 {
2770 int w;
2771 struct lwp_info *event_child;
2772 int options;
2773 int pid;
2774 int step_over_finished;
2775 int bp_explains_trap;
2776 int maybe_internal_trap;
2777 int report_to_gdb;
2778 int trace_event;
2779 int in_step_range;
2780
2781 if (debug_threads)
2782 {
2783 debug_enter ();
2784 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2785 }
2786
2787 /* Translate generic target options into linux options. */
2788 options = __WALL;
2789 if (target_options & TARGET_WNOHANG)
2790 options |= WNOHANG;
2791
2792 bp_explains_trap = 0;
2793 trace_event = 0;
2794 in_step_range = 0;
2795 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2796
2797 if (ptid_equal (step_over_bkpt, null_ptid))
2798 pid = linux_wait_for_event (ptid, &w, options);
2799 else
2800 {
2801 if (debug_threads)
2802 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2803 target_pid_to_str (step_over_bkpt));
2804 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2805 }
2806
2807 if (pid == 0)
2808 {
2809 gdb_assert (target_options & TARGET_WNOHANG);
2810
2811 if (debug_threads)
2812 {
2813 debug_printf ("linux_wait_1 ret = null_ptid, "
2814 "TARGET_WAITKIND_IGNORE\n");
2815 debug_exit ();
2816 }
2817
2818 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2819 return null_ptid;
2820 }
2821 else if (pid == -1)
2822 {
2823 if (debug_threads)
2824 {
2825 debug_printf ("linux_wait_1 ret = null_ptid, "
2826 "TARGET_WAITKIND_NO_RESUMED\n");
2827 debug_exit ();
2828 }
2829
2830 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2831 return null_ptid;
2832 }
2833
2834 event_child = get_thread_lwp (current_thread);
2835
2836 /* linux_wait_for_event only returns an exit status for the last
2837 child of a process. Report it. */
2838 if (WIFEXITED (w) || WIFSIGNALED (w))
2839 {
2840 if (WIFEXITED (w))
2841 {
2842 ourstatus->kind = TARGET_WAITKIND_EXITED;
2843 ourstatus->value.integer = WEXITSTATUS (w);
2844
2845 if (debug_threads)
2846 {
2847 debug_printf ("linux_wait_1 ret = %s, exited with "
2848 "retcode %d\n",
2849 target_pid_to_str (ptid_of (current_thread)),
2850 WEXITSTATUS (w));
2851 debug_exit ();
2852 }
2853 }
2854 else
2855 {
2856 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2857 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2858
2859 if (debug_threads)
2860 {
2861 debug_printf ("linux_wait_1 ret = %s, terminated with "
2862 "signal %d\n",
2863 target_pid_to_str (ptid_of (current_thread)),
2864 WTERMSIG (w));
2865 debug_exit ();
2866 }
2867 }
2868
2869 return ptid_of (current_thread);
2870 }
2871
2872 /* If step-over executes a breakpoint instruction, it means a
2873 gdb/gdbserver breakpoint had been planted on top of a permanent
2874 breakpoint. The PC has been adjusted by
2875 check_stopped_by_breakpoint to point at the breakpoint address.
2876 Advance the PC manually past the breakpoint, otherwise the
2877 program would keep trapping the permanent breakpoint forever. */
2878 if (!ptid_equal (step_over_bkpt, null_ptid)
2879 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2880 {
2881 unsigned int increment_pc = the_low_target.breakpoint_len;
2882
2883 if (debug_threads)
2884 {
2885 debug_printf ("step-over for %s executed software breakpoint\n",
2886 target_pid_to_str (ptid_of (current_thread)));
2887 }
2888
2889 if (increment_pc != 0)
2890 {
2891 struct regcache *regcache
2892 = get_thread_regcache (current_thread, 1);
2893
2894 event_child->stop_pc += increment_pc;
2895 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2896
2897 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2898 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2899 }
2900 }
2901
2902 /* If this event was not handled before, and is not a SIGTRAP, we
2903 report it. SIGILL and SIGSEGV are also treated as traps in case
2904 a breakpoint is inserted at the current PC. If this target does
2905 not support internal breakpoints at all, we also report the
2906 SIGTRAP without further processing; it's of no concern to us. */
2907 maybe_internal_trap
2908 = (supports_breakpoints ()
2909 && (WSTOPSIG (w) == SIGTRAP
2910 || ((WSTOPSIG (w) == SIGILL
2911 || WSTOPSIG (w) == SIGSEGV)
2912 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2913
2914 if (maybe_internal_trap)
2915 {
2916 /* Handle anything that requires bookkeeping before deciding to
2917 report the event or continue waiting. */
2918
2919 /* First check if we can explain the SIGTRAP with an internal
2920 breakpoint, or if we should possibly report the event to GDB.
2921 Do this before anything that may remove or insert a
2922 breakpoint. */
2923 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2924
2925 /* We have a SIGTRAP, possibly a step-over dance has just
2926 finished. If so, tweak the state machine accordingly,
2927 reinsert breakpoints and delete any reinsert (software
2928 single-step) breakpoints. */
2929 step_over_finished = finish_step_over (event_child);
2930
2931 /* Now invoke the callbacks of any internal breakpoints there. */
2932 check_breakpoints (event_child->stop_pc);
2933
2934 /* Handle tracepoint data collecting. This may overflow the
2935 trace buffer, and cause a tracing stop, removing
2936 breakpoints. */
2937 trace_event = handle_tracepoints (event_child);
2938
2939 if (bp_explains_trap)
2940 {
2941 /* If we stepped or ran into an internal breakpoint, we've
2942 already handled it. So next time we resume (from this
2943 PC), we should step over it. */
2944 if (debug_threads)
2945 debug_printf ("Hit a gdbserver breakpoint.\n");
2946
2947 if (breakpoint_here (event_child->stop_pc))
2948 event_child->need_step_over = 1;
2949 }
2950 }
2951 else
2952 {
2953 /* We have some other signal, possibly a step-over dance was in
2954 progress, and it should be cancelled too. */
2955 step_over_finished = finish_step_over (event_child);
2956 }
2957
2958 /* We have all the data we need. Either report the event to GDB, or
2959 resume threads and keep waiting for more. */
2960
2961 /* If we're collecting a fast tracepoint, finish the collection and
2962 move out of the jump pad before delivering a signal. See
2963 linux_stabilize_threads. */
2964
2965 if (WIFSTOPPED (w)
2966 && WSTOPSIG (w) != SIGTRAP
2967 && supports_fast_tracepoints ()
2968 && agent_loaded_p ())
2969 {
2970 if (debug_threads)
2971 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2972 "to defer or adjust it.\n",
2973 WSTOPSIG (w), lwpid_of (current_thread));
2974
2975 /* Allow debugging the jump pad itself. */
2976 if (current_thread->last_resume_kind != resume_step
2977 && maybe_move_out_of_jump_pad (event_child, &w))
2978 {
2979 enqueue_one_deferred_signal (event_child, &w);
2980
2981 if (debug_threads)
2982 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2983 WSTOPSIG (w), lwpid_of (current_thread));
2984
2985 linux_resume_one_lwp (event_child, 0, 0, NULL);
2986
2987 return ignore_event (ourstatus);
2988 }
2989 }
2990
2991 if (event_child->collecting_fast_tracepoint)
2992 {
2993 if (debug_threads)
2994 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2995 "Check if we're already there.\n",
2996 lwpid_of (current_thread),
2997 event_child->collecting_fast_tracepoint);
2998
2999 trace_event = 1;
3000
3001 event_child->collecting_fast_tracepoint
3002 = linux_fast_tracepoint_collecting (event_child, NULL);
3003
3004 if (event_child->collecting_fast_tracepoint != 1)
3005 {
3006 /* No longer need this breakpoint. */
3007 if (event_child->exit_jump_pad_bkpt != NULL)
3008 {
3009 if (debug_threads)
3010 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3011 "stopping all threads momentarily.\n");
3012
3013 /* Other running threads could hit this breakpoint.
3014 We don't handle moribund locations like GDB does,
3015 instead we always pause all threads when removing
3016 breakpoints, so that any step-over or
3017 decr_pc_after_break adjustment is always taken
3018 care of while the breakpoint is still
3019 inserted. */
3020 stop_all_lwps (1, event_child);
3021
3022 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3023 event_child->exit_jump_pad_bkpt = NULL;
3024
3025 unstop_all_lwps (1, event_child);
3026
3027 gdb_assert (event_child->suspended >= 0);
3028 }
3029 }
3030
3031 if (event_child->collecting_fast_tracepoint == 0)
3032 {
3033 if (debug_threads)
3034 debug_printf ("fast tracepoint finished "
3035 "collecting successfully.\n");
3036
3037 /* We may have a deferred signal to report. */
3038 if (dequeue_one_deferred_signal (event_child, &w))
3039 {
3040 if (debug_threads)
3041 debug_printf ("dequeued one signal.\n");
3042 }
3043 else
3044 {
3045 if (debug_threads)
3046 debug_printf ("no deferred signals.\n");
3047
3048 if (stabilizing_threads)
3049 {
3050 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3051 ourstatus->value.sig = GDB_SIGNAL_0;
3052
3053 if (debug_threads)
3054 {
3055 debug_printf ("linux_wait_1 ret = %s, stopped "
3056 "while stabilizing threads\n",
3057 target_pid_to_str (ptid_of (current_thread)));
3058 debug_exit ();
3059 }
3060
3061 return ptid_of (current_thread);
3062 }
3063 }
3064 }
3065 }
3066
3067 /* Check whether GDB would be interested in this event. */
3068
3069 /* If GDB is not interested in this signal, don't stop other
3070 threads, and don't report it to GDB. Just resume the inferior
3071 right away. We do this for threading-related signals as well as
3072 any that GDB specifically requested we ignore. But never ignore
3073 SIGSTOP if we sent it ourselves, and do not ignore signals when
3074 stepping - they may require special handling to skip the signal
3075 handler. Also never ignore signals that could be caused by a
3076 breakpoint. */
3077 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3078 thread library? */
3079 if (WIFSTOPPED (w)
3080 && current_thread->last_resume_kind != resume_step
3081 && (
3082 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3083 (current_process ()->priv->thread_db != NULL
3084 && (WSTOPSIG (w) == __SIGRTMIN
3085 || WSTOPSIG (w) == __SIGRTMIN + 1))
3086 ||
3087 #endif
3088 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3089 && !(WSTOPSIG (w) == SIGSTOP
3090 && current_thread->last_resume_kind == resume_stop)
3091 && !linux_wstatus_maybe_breakpoint (w))))
3092 {
3093 siginfo_t info, *info_p;
3094
3095 if (debug_threads)
3096 debug_printf ("Ignored signal %d for LWP %ld.\n",
3097 WSTOPSIG (w), lwpid_of (current_thread));
3098
3099 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3100 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3101 info_p = &info;
3102 else
3103 info_p = NULL;
3104 linux_resume_one_lwp (event_child, event_child->stepping,
3105 WSTOPSIG (w), info_p);
3106 return ignore_event (ourstatus);
3107 }
3108
3109 /* Note that all addresses are always "out of the step range" when
3110 there's no range to begin with. */
3111 in_step_range = lwp_in_step_range (event_child);
3112
3113 /* If GDB wanted this thread to single step, and the thread is out
3114 of the step range, we always want to report the SIGTRAP, and let
3115 GDB handle it. Watchpoints should always be reported. So should
3116 signals we can't explain. A SIGTRAP we can't explain could be a
3117 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3118 do, we're be able to handle GDB breakpoints on top of internal
3119 breakpoints, by handling the internal breakpoint and still
3120 reporting the event to GDB. If we don't, we're out of luck, GDB
3121 won't see the breakpoint hit. */
3122 report_to_gdb = (!maybe_internal_trap
3123 || (current_thread->last_resume_kind == resume_step
3124 && !in_step_range)
3125 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3126 || (!step_over_finished && !in_step_range
3127 && !bp_explains_trap && !trace_event)
3128 || (gdb_breakpoint_here (event_child->stop_pc)
3129 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3130 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3131 || extended_event_reported (&event_child->waitstatus));
3132
3133 run_breakpoint_commands (event_child->stop_pc);
3134
3135 /* We found no reason GDB would want us to stop. We either hit one
3136 of our own breakpoints, or finished an internal step GDB
3137 shouldn't know about. */
3138 if (!report_to_gdb)
3139 {
3140 if (debug_threads)
3141 {
3142 if (bp_explains_trap)
3143 debug_printf ("Hit a gdbserver breakpoint.\n");
3144 if (step_over_finished)
3145 debug_printf ("Step-over finished.\n");
3146 if (trace_event)
3147 debug_printf ("Tracepoint event.\n");
3148 if (lwp_in_step_range (event_child))
3149 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3150 paddress (event_child->stop_pc),
3151 paddress (event_child->step_range_start),
3152 paddress (event_child->step_range_end));
3153 if (extended_event_reported (&event_child->waitstatus))
3154 {
3155 char *str = target_waitstatus_to_string (ourstatus);
3156 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3157 lwpid_of (get_lwp_thread (event_child)), str);
3158 xfree (str);
3159 }
3160 }
3161
3162 /* We're not reporting this breakpoint to GDB, so apply the
3163 decr_pc_after_break adjustment to the inferior's regcache
3164 ourselves. */
3165
3166 if (the_low_target.set_pc != NULL)
3167 {
3168 struct regcache *regcache
3169 = get_thread_regcache (current_thread, 1);
3170 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3171 }
3172
3173 /* We may have finished stepping over a breakpoint. If so,
3174 we've stopped and suspended all LWPs momentarily except the
3175 stepping one. This is where we resume them all again. We're
3176 going to keep waiting, so use proceed, which handles stepping
3177 over the next breakpoint. */
3178 if (debug_threads)
3179 debug_printf ("proceeding all threads.\n");
3180
3181 if (step_over_finished)
3182 unsuspend_all_lwps (event_child);
3183
3184 proceed_all_lwps ();
3185 return ignore_event (ourstatus);
3186 }
3187
3188 if (debug_threads)
3189 {
3190 if (current_thread->last_resume_kind == resume_step)
3191 {
3192 if (event_child->step_range_start == event_child->step_range_end)
3193 debug_printf ("GDB wanted to single-step, reporting event.\n");
3194 else if (!lwp_in_step_range (event_child))
3195 debug_printf ("Out of step range, reporting event.\n");
3196 }
3197 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3198 debug_printf ("Stopped by watchpoint.\n");
3199 else if (gdb_breakpoint_here (event_child->stop_pc))
3200 debug_printf ("Stopped by GDB breakpoint.\n");
3201 if (debug_threads)
3202 debug_printf ("Hit a non-gdbserver trap event.\n");
3203 }
3204
3205 /* Alright, we're going to report a stop. */
3206
3207 if (!stabilizing_threads)
3208 {
3209 /* In all-stop, stop all threads. */
3210 if (!non_stop)
3211 stop_all_lwps (0, NULL);
3212
3213 /* If we're not waiting for a specific LWP, choose an event LWP
3214 from among those that have had events. Giving equal priority
3215 to all LWPs that have had events helps prevent
3216 starvation. */
3217 if (ptid_equal (ptid, minus_one_ptid))
3218 {
3219 event_child->status_pending_p = 1;
3220 event_child->status_pending = w;
3221
3222 select_event_lwp (&event_child);
3223
3224 /* current_thread and event_child must stay in sync. */
3225 current_thread = get_lwp_thread (event_child);
3226
3227 event_child->status_pending_p = 0;
3228 w = event_child->status_pending;
3229 }
3230
3231 if (step_over_finished)
3232 {
3233 if (!non_stop)
3234 {
3235 /* If we were doing a step-over, all other threads but
3236 the stepping one had been paused in start_step_over,
3237 with their suspend counts incremented. We don't want
3238 to do a full unstop/unpause, because we're in
3239 all-stop mode (so we want threads stopped), but we
3240 still need to unsuspend the other threads, to
3241 decrement their `suspended' count back. */
3242 unsuspend_all_lwps (event_child);
3243 }
3244 else
3245 {
3246 /* If we just finished a step-over, then all threads had
3247 been momentarily paused. In all-stop, that's fine,
3248 we want threads stopped by now anyway. In non-stop,
3249 we need to re-resume threads that GDB wanted to be
3250 running. */
3251 unstop_all_lwps (1, event_child);
3252 }
3253 }
3254
3255 /* Stabilize threads (move out of jump pads). */
3256 if (!non_stop)
3257 stabilize_threads ();
3258 }
3259 else
3260 {
3261 /* If we just finished a step-over, then all threads had been
3262 momentarily paused. In all-stop, that's fine, we want
3263 threads stopped by now anyway. In non-stop, we need to
3264 re-resume threads that GDB wanted to be running. */
3265 if (step_over_finished)
3266 unstop_all_lwps (1, event_child);
3267 }
3268
3269 if (extended_event_reported (&event_child->waitstatus))
3270 {
3271 /* If the reported event is a fork, vfork or exec, let GDB know. */
3272 ourstatus->kind = event_child->waitstatus.kind;
3273 ourstatus->value = event_child->waitstatus.value;
3274
3275 /* Clear the event lwp's waitstatus since we handled it already. */
3276 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3277 }
3278 else
3279 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3280
3281 /* Now that we've selected our final event LWP, un-adjust its PC if
3282 it was a software breakpoint, and the client doesn't know we can
3283 adjust the breakpoint ourselves. */
3284 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3285 && !swbreak_feature)
3286 {
3287 int decr_pc = the_low_target.decr_pc_after_break;
3288
3289 if (decr_pc != 0)
3290 {
3291 struct regcache *regcache
3292 = get_thread_regcache (current_thread, 1);
3293 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3294 }
3295 }
3296
3297 if (current_thread->last_resume_kind == resume_stop
3298 && WSTOPSIG (w) == SIGSTOP)
3299 {
3300 /* A thread that has been requested to stop by GDB with vCont;t,
3301 and it stopped cleanly, so report as SIG0. The use of
3302 SIGSTOP is an implementation detail. */
3303 ourstatus->value.sig = GDB_SIGNAL_0;
3304 }
3305 else if (current_thread->last_resume_kind == resume_stop
3306 && WSTOPSIG (w) != SIGSTOP)
3307 {
3308 /* A thread that has been requested to stop by GDB with vCont;t,
3309 but, it stopped for other reasons. */
3310 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3311 }
3312 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3313 {
3314 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3315 }
3316
3317 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3318
3319 if (debug_threads)
3320 {
3321 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3322 target_pid_to_str (ptid_of (current_thread)),
3323 ourstatus->kind, ourstatus->value.sig);
3324 debug_exit ();
3325 }
3326
3327 return ptid_of (current_thread);
3328 }
3329
3330 /* Get rid of any pending event in the pipe. */
3331 static void
3332 async_file_flush (void)
3333 {
3334 int ret;
3335 char buf;
3336
3337 do
3338 ret = read (linux_event_pipe[0], &buf, 1);
3339 while (ret >= 0 || (ret == -1 && errno == EINTR));
3340 }
3341
3342 /* Put something in the pipe, so the event loop wakes up. */
3343 static void
3344 async_file_mark (void)
3345 {
3346 int ret;
3347
3348 async_file_flush ();
3349
3350 do
3351 ret = write (linux_event_pipe[1], "+", 1);
3352 while (ret == 0 || (ret == -1 && errno == EINTR));
3353
3354 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3355 be awakened anyway. */
3356 }
3357
3358 static ptid_t
3359 linux_wait (ptid_t ptid,
3360 struct target_waitstatus *ourstatus, int target_options)
3361 {
3362 ptid_t event_ptid;
3363
3364 /* Flush the async file first. */
3365 if (target_is_async_p ())
3366 async_file_flush ();
3367
3368 do
3369 {
3370 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3371 }
3372 while ((target_options & TARGET_WNOHANG) == 0
3373 && ptid_equal (event_ptid, null_ptid)
3374 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3375
3376 /* If at least one stop was reported, there may be more. A single
3377 SIGCHLD can signal more than one child stop. */
3378 if (target_is_async_p ()
3379 && (target_options & TARGET_WNOHANG) != 0
3380 && !ptid_equal (event_ptid, null_ptid))
3381 async_file_mark ();
3382
3383 return event_ptid;
3384 }
3385
3386 /* Send a signal to an LWP. */
3387
3388 static int
3389 kill_lwp (unsigned long lwpid, int signo)
3390 {
3391 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3392 fails, then we are not using nptl threads and we should be using kill. */
3393
3394 #ifdef __NR_tkill
3395 {
3396 static int tkill_failed;
3397
3398 if (!tkill_failed)
3399 {
3400 int ret;
3401
3402 errno = 0;
3403 ret = syscall (__NR_tkill, lwpid, signo);
3404 if (errno != ENOSYS)
3405 return ret;
3406 tkill_failed = 1;
3407 }
3408 }
3409 #endif
3410
3411 return kill (lwpid, signo);
3412 }
3413
3414 void
3415 linux_stop_lwp (struct lwp_info *lwp)
3416 {
3417 send_sigstop (lwp);
3418 }
3419
3420 static void
3421 send_sigstop (struct lwp_info *lwp)
3422 {
3423 int pid;
3424
3425 pid = lwpid_of (get_lwp_thread (lwp));
3426
3427 /* If we already have a pending stop signal for this process, don't
3428 send another. */
3429 if (lwp->stop_expected)
3430 {
3431 if (debug_threads)
3432 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3433
3434 return;
3435 }
3436
3437 if (debug_threads)
3438 debug_printf ("Sending sigstop to lwp %d\n", pid);
3439
3440 lwp->stop_expected = 1;
3441 kill_lwp (pid, SIGSTOP);
3442 }
3443
3444 static int
3445 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3446 {
3447 struct thread_info *thread = (struct thread_info *) entry;
3448 struct lwp_info *lwp = get_thread_lwp (thread);
3449
3450 /* Ignore EXCEPT. */
3451 if (lwp == except)
3452 return 0;
3453
3454 if (lwp->stopped)
3455 return 0;
3456
3457 send_sigstop (lwp);
3458 return 0;
3459 }
3460
3461 /* Increment the suspend count of an LWP, and stop it, if not stopped
3462 yet. */
3463 static int
3464 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3465 void *except)
3466 {
3467 struct thread_info *thread = (struct thread_info *) entry;
3468 struct lwp_info *lwp = get_thread_lwp (thread);
3469
3470 /* Ignore EXCEPT. */
3471 if (lwp == except)
3472 return 0;
3473
3474 lwp->suspended++;
3475
3476 return send_sigstop_callback (entry, except);
3477 }
3478
3479 static void
3480 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3481 {
3482 /* It's dead, really. */
3483 lwp->dead = 1;
3484
3485 /* Store the exit status for later. */
3486 lwp->status_pending_p = 1;
3487 lwp->status_pending = wstat;
3488
3489 /* Prevent trying to stop it. */
3490 lwp->stopped = 1;
3491
3492 /* No further stops are expected from a dead lwp. */
3493 lwp->stop_expected = 0;
3494 }
3495
3496 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3497
3498 static void
3499 wait_for_sigstop (void)
3500 {
3501 struct thread_info *saved_thread;
3502 ptid_t saved_tid;
3503 int wstat;
3504 int ret;
3505
3506 saved_thread = current_thread;
3507 if (saved_thread != NULL)
3508 saved_tid = saved_thread->entry.id;
3509 else
3510 saved_tid = null_ptid; /* avoid bogus unused warning */
3511
3512 if (debug_threads)
3513 debug_printf ("wait_for_sigstop: pulling events\n");
3514
3515 /* Passing NULL_PTID as filter indicates we want all events to be
3516 left pending. Eventually this returns when there are no
3517 unwaited-for children left. */
3518 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3519 &wstat, __WALL);
3520 gdb_assert (ret == -1);
3521
3522 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3523 current_thread = saved_thread;
3524 else
3525 {
3526 if (debug_threads)
3527 debug_printf ("Previously current thread died.\n");
3528
3529 if (non_stop)
3530 {
3531 /* We can't change the current inferior behind GDB's back,
3532 otherwise, a subsequent command may apply to the wrong
3533 process. */
3534 current_thread = NULL;
3535 }
3536 else
3537 {
3538 /* Set a valid thread as current. */
3539 set_desired_thread (0);
3540 }
3541 }
3542 }
3543
3544 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3545 move it out, because we need to report the stop event to GDB. For
3546 example, if the user puts a breakpoint in the jump pad, it's
3547 because she wants to debug it. */
3548
3549 static int
3550 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3551 {
3552 struct thread_info *thread = (struct thread_info *) entry;
3553 struct lwp_info *lwp = get_thread_lwp (thread);
3554
3555 gdb_assert (lwp->suspended == 0);
3556 gdb_assert (lwp->stopped);
3557
3558 /* Allow debugging the jump pad, gdb_collect, etc.. */
3559 return (supports_fast_tracepoints ()
3560 && agent_loaded_p ()
3561 && (gdb_breakpoint_here (lwp->stop_pc)
3562 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3563 || thread->last_resume_kind == resume_step)
3564 && linux_fast_tracepoint_collecting (lwp, NULL));
3565 }
3566
3567 static void
3568 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3569 {
3570 struct thread_info *thread = (struct thread_info *) entry;
3571 struct lwp_info *lwp = get_thread_lwp (thread);
3572 int *wstat;
3573
3574 gdb_assert (lwp->suspended == 0);
3575 gdb_assert (lwp->stopped);
3576
3577 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3578
3579 /* Allow debugging the jump pad, gdb_collect, etc. */
3580 if (!gdb_breakpoint_here (lwp->stop_pc)
3581 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3582 && thread->last_resume_kind != resume_step
3583 && maybe_move_out_of_jump_pad (lwp, wstat))
3584 {
3585 if (debug_threads)
3586 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3587 lwpid_of (thread));
3588
3589 if (wstat)
3590 {
3591 lwp->status_pending_p = 0;
3592 enqueue_one_deferred_signal (lwp, wstat);
3593
3594 if (debug_threads)
3595 debug_printf ("Signal %d for LWP %ld deferred "
3596 "(in jump pad)\n",
3597 WSTOPSIG (*wstat), lwpid_of (thread));
3598 }
3599
3600 linux_resume_one_lwp (lwp, 0, 0, NULL);
3601 }
3602 else
3603 lwp->suspended++;
3604 }
3605
3606 static int
3607 lwp_running (struct inferior_list_entry *entry, void *data)
3608 {
3609 struct thread_info *thread = (struct thread_info *) entry;
3610 struct lwp_info *lwp = get_thread_lwp (thread);
3611
3612 if (lwp->dead)
3613 return 0;
3614 if (lwp->stopped)
3615 return 0;
3616 return 1;
3617 }
3618
3619 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3620 If SUSPEND, then also increase the suspend count of every LWP,
3621 except EXCEPT. */
3622
3623 static void
3624 stop_all_lwps (int suspend, struct lwp_info *except)
3625 {
3626 /* Should not be called recursively. */
3627 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3628
3629 if (debug_threads)
3630 {
3631 debug_enter ();
3632 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3633 suspend ? "stop-and-suspend" : "stop",
3634 except != NULL
3635 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3636 : "none");
3637 }
3638
3639 stopping_threads = (suspend
3640 ? STOPPING_AND_SUSPENDING_THREADS
3641 : STOPPING_THREADS);
3642
3643 if (suspend)
3644 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3645 else
3646 find_inferior (&all_threads, send_sigstop_callback, except);
3647 wait_for_sigstop ();
3648 stopping_threads = NOT_STOPPING_THREADS;
3649
3650 if (debug_threads)
3651 {
3652 debug_printf ("stop_all_lwps done, setting stopping_threads "
3653 "back to !stopping\n");
3654 debug_exit ();
3655 }
3656 }
3657
3658 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3659 SIGNAL is nonzero, give it that signal. */
3660
3661 static void
3662 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3663 int step, int signal, siginfo_t *info)
3664 {
3665 struct thread_info *thread = get_lwp_thread (lwp);
3666 struct thread_info *saved_thread;
3667 int fast_tp_collecting;
3668 struct process_info *proc = get_thread_process (thread);
3669
3670 /* Note that target description may not be initialised
3671 (proc->tdesc == NULL) at this point because the program hasn't
3672 stopped at the first instruction yet. It means GDBserver skips
3673 the extra traps from the wrapper program (see option --wrapper).
3674 Code in this function that requires register access should be
3675 guarded by proc->tdesc == NULL or something else. */
3676
3677 if (lwp->stopped == 0)
3678 return;
3679
3680 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3681
3682 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3683
3684 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3685 user used the "jump" command, or "set $pc = foo"). */
3686 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
3687 {
3688 /* Collecting 'while-stepping' actions doesn't make sense
3689 anymore. */
3690 release_while_stepping_state_list (thread);
3691 }
3692
3693 /* If we have pending signals or status, and a new signal, enqueue the
3694 signal. Also enqueue the signal if we are waiting to reinsert a
3695 breakpoint; it will be picked up again below. */
3696 if (signal != 0
3697 && (lwp->status_pending_p
3698 || lwp->pending_signals != NULL
3699 || lwp->bp_reinsert != 0
3700 || fast_tp_collecting))
3701 {
3702 struct pending_signals *p_sig;
3703 p_sig = xmalloc (sizeof (*p_sig));
3704 p_sig->prev = lwp->pending_signals;
3705 p_sig->signal = signal;
3706 if (info == NULL)
3707 memset (&p_sig->info, 0, sizeof (siginfo_t));
3708 else
3709 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3710 lwp->pending_signals = p_sig;
3711 }
3712
3713 if (lwp->status_pending_p)
3714 {
3715 if (debug_threads)
3716 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3717 " has pending status\n",
3718 lwpid_of (thread), step ? "step" : "continue", signal,
3719 lwp->stop_expected ? "expected" : "not expected");
3720 return;
3721 }
3722
3723 saved_thread = current_thread;
3724 current_thread = thread;
3725
3726 if (debug_threads)
3727 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3728 lwpid_of (thread), step ? "step" : "continue", signal,
3729 lwp->stop_expected ? "expected" : "not expected");
3730
3731 /* This bit needs some thinking about. If we get a signal that
3732 we must report while a single-step reinsert is still pending,
3733 we often end up resuming the thread. It might be better to
3734 (ew) allow a stack of pending events; then we could be sure that
3735 the reinsert happened right away and not lose any signals.
3736
3737 Making this stack would also shrink the window in which breakpoints are
3738 uninserted (see comment in linux_wait_for_lwp) but not enough for
3739 complete correctness, so it won't solve that problem. It may be
3740 worthwhile just to solve this one, however. */
3741 if (lwp->bp_reinsert != 0)
3742 {
3743 if (debug_threads)
3744 debug_printf (" pending reinsert at 0x%s\n",
3745 paddress (lwp->bp_reinsert));
3746
3747 if (can_hardware_single_step ())
3748 {
3749 if (fast_tp_collecting == 0)
3750 {
3751 if (step == 0)
3752 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3753 if (lwp->suspended)
3754 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3755 lwp->suspended);
3756 }
3757
3758 step = 1;
3759 }
3760
3761 /* Postpone any pending signal. It was enqueued above. */
3762 signal = 0;
3763 }
3764
3765 if (fast_tp_collecting == 1)
3766 {
3767 if (debug_threads)
3768 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3769 " (exit-jump-pad-bkpt)\n",
3770 lwpid_of (thread));
3771
3772 /* Postpone any pending signal. It was enqueued above. */
3773 signal = 0;
3774 }
3775 else if (fast_tp_collecting == 2)
3776 {
3777 if (debug_threads)
3778 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3779 " single-stepping\n",
3780 lwpid_of (thread));
3781
3782 if (can_hardware_single_step ())
3783 step = 1;
3784 else
3785 {
3786 internal_error (__FILE__, __LINE__,
3787 "moving out of jump pad single-stepping"
3788 " not implemented on this target");
3789 }
3790
3791 /* Postpone any pending signal. It was enqueued above. */
3792 signal = 0;
3793 }
3794
3795 /* If we have while-stepping actions in this thread set it stepping.
3796 If we have a signal to deliver, it may or may not be set to
3797 SIG_IGN, we don't know. Assume so, and allow collecting
3798 while-stepping into a signal handler. A possible smart thing to
3799 do would be to set an internal breakpoint at the signal return
3800 address, continue, and carry on catching this while-stepping
3801 action only when that breakpoint is hit. A future
3802 enhancement. */
3803 if (thread->while_stepping != NULL
3804 && can_hardware_single_step ())
3805 {
3806 if (debug_threads)
3807 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3808 lwpid_of (thread));
3809 step = 1;
3810 }
3811
3812 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
3813 {
3814 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3815
3816 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3817
3818 if (debug_threads)
3819 {
3820 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3821 (long) lwp->stop_pc);
3822 }
3823 }
3824
3825 /* If we have pending signals, consume one unless we are trying to
3826 reinsert a breakpoint or we're trying to finish a fast tracepoint
3827 collect. */
3828 if (lwp->pending_signals != NULL
3829 && lwp->bp_reinsert == 0
3830 && fast_tp_collecting == 0)
3831 {
3832 struct pending_signals **p_sig;
3833
3834 p_sig = &lwp->pending_signals;
3835 while ((*p_sig)->prev != NULL)
3836 p_sig = &(*p_sig)->prev;
3837
3838 signal = (*p_sig)->signal;
3839 if ((*p_sig)->info.si_signo != 0)
3840 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3841 &(*p_sig)->info);
3842
3843 free (*p_sig);
3844 *p_sig = NULL;
3845 }
3846
3847 if (the_low_target.prepare_to_resume != NULL)
3848 the_low_target.prepare_to_resume (lwp);
3849
3850 regcache_invalidate_thread (thread);
3851 errno = 0;
3852 lwp->stepping = step;
3853 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3854 (PTRACE_TYPE_ARG3) 0,
3855 /* Coerce to a uintptr_t first to avoid potential gcc warning
3856 of coercing an 8 byte integer to a 4 byte pointer. */
3857 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3858
3859 current_thread = saved_thread;
3860 if (errno)
3861 perror_with_name ("resuming thread");
3862
3863 /* Successfully resumed. Clear state that no longer makes sense,
3864 and mark the LWP as running. Must not do this before resuming
3865 otherwise if that fails other code will be confused. E.g., we'd
3866 later try to stop the LWP and hang forever waiting for a stop
3867 status. Note that we must not throw after this is cleared,
3868 otherwise handle_zombie_lwp_error would get confused. */
3869 lwp->stopped = 0;
3870 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3871 }
3872
3873 /* Called when we try to resume a stopped LWP and that errors out. If
3874 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3875 or about to become), discard the error, clear any pending status
3876 the LWP may have, and return true (we'll collect the exit status
3877 soon enough). Otherwise, return false. */
3878
3879 static int
3880 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3881 {
3882 struct thread_info *thread = get_lwp_thread (lp);
3883
3884 /* If we get an error after resuming the LWP successfully, we'd
3885 confuse !T state for the LWP being gone. */
3886 gdb_assert (lp->stopped);
3887
3888 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3889 because even if ptrace failed with ESRCH, the tracee may be "not
3890 yet fully dead", but already refusing ptrace requests. In that
3891 case the tracee has 'R (Running)' state for a little bit
3892 (observed in Linux 3.18). See also the note on ESRCH in the
3893 ptrace(2) man page. Instead, check whether the LWP has any state
3894 other than ptrace-stopped. */
3895
3896 /* Don't assume anything if /proc/PID/status can't be read. */
3897 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3898 {
3899 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3900 lp->status_pending_p = 0;
3901 return 1;
3902 }
3903 return 0;
3904 }
3905
3906 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3907 disappears while we try to resume it. */
3908
3909 static void
3910 linux_resume_one_lwp (struct lwp_info *lwp,
3911 int step, int signal, siginfo_t *info)
3912 {
3913 TRY
3914 {
3915 linux_resume_one_lwp_throw (lwp, step, signal, info);
3916 }
3917 CATCH (ex, RETURN_MASK_ERROR)
3918 {
3919 if (!check_ptrace_stopped_lwp_gone (lwp))
3920 throw_exception (ex);
3921 }
3922 END_CATCH
3923 }
3924
3925 struct thread_resume_array
3926 {
3927 struct thread_resume *resume;
3928 size_t n;
3929 };
3930
3931 /* This function is called once per thread via find_inferior.
3932 ARG is a pointer to a thread_resume_array struct.
3933 We look up the thread specified by ENTRY in ARG, and mark the thread
3934 with a pointer to the appropriate resume request.
3935
3936 This algorithm is O(threads * resume elements), but resume elements
3937 is small (and will remain small at least until GDB supports thread
3938 suspension). */
3939
3940 static int
3941 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3942 {
3943 struct thread_info *thread = (struct thread_info *) entry;
3944 struct lwp_info *lwp = get_thread_lwp (thread);
3945 int ndx;
3946 struct thread_resume_array *r;
3947
3948 r = arg;
3949
3950 for (ndx = 0; ndx < r->n; ndx++)
3951 {
3952 ptid_t ptid = r->resume[ndx].thread;
3953 if (ptid_equal (ptid, minus_one_ptid)
3954 || ptid_equal (ptid, entry->id)
3955 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3956 of PID'. */
3957 || (ptid_get_pid (ptid) == pid_of (thread)
3958 && (ptid_is_pid (ptid)
3959 || ptid_get_lwp (ptid) == -1)))
3960 {
3961 if (r->resume[ndx].kind == resume_stop
3962 && thread->last_resume_kind == resume_stop)
3963 {
3964 if (debug_threads)
3965 debug_printf ("already %s LWP %ld at GDB's request\n",
3966 (thread->last_status.kind
3967 == TARGET_WAITKIND_STOPPED)
3968 ? "stopped"
3969 : "stopping",
3970 lwpid_of (thread));
3971
3972 continue;
3973 }
3974
3975 lwp->resume = &r->resume[ndx];
3976 thread->last_resume_kind = lwp->resume->kind;
3977
3978 lwp->step_range_start = lwp->resume->step_range_start;
3979 lwp->step_range_end = lwp->resume->step_range_end;
3980
3981 /* If we had a deferred signal to report, dequeue one now.
3982 This can happen if LWP gets more than one signal while
3983 trying to get out of a jump pad. */
3984 if (lwp->stopped
3985 && !lwp->status_pending_p
3986 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3987 {
3988 lwp->status_pending_p = 1;
3989
3990 if (debug_threads)
3991 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3992 "leaving status pending.\n",
3993 WSTOPSIG (lwp->status_pending),
3994 lwpid_of (thread));
3995 }
3996
3997 return 0;
3998 }
3999 }
4000
4001 /* No resume action for this thread. */
4002 lwp->resume = NULL;
4003
4004 return 0;
4005 }
4006
4007 /* find_inferior callback for linux_resume.
4008 Set *FLAG_P if this lwp has an interesting status pending. */
4009
4010 static int
4011 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4012 {
4013 struct thread_info *thread = (struct thread_info *) entry;
4014 struct lwp_info *lwp = get_thread_lwp (thread);
4015
4016 /* LWPs which will not be resumed are not interesting, because
4017 we might not wait for them next time through linux_wait. */
4018 if (lwp->resume == NULL)
4019 return 0;
4020
4021 if (thread_still_has_status_pending_p (thread))
4022 * (int *) flag_p = 1;
4023
4024 return 0;
4025 }
4026
4027 /* Return 1 if this lwp that GDB wants running is stopped at an
4028 internal breakpoint that we need to step over. It assumes that any
4029 required STOP_PC adjustment has already been propagated to the
4030 inferior's regcache. */
4031
4032 static int
4033 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4034 {
4035 struct thread_info *thread = (struct thread_info *) entry;
4036 struct lwp_info *lwp = get_thread_lwp (thread);
4037 struct thread_info *saved_thread;
4038 CORE_ADDR pc;
4039 struct process_info *proc = get_thread_process (thread);
4040
4041 /* GDBserver is skipping the extra traps from the wrapper program,
4042 don't have to do step over. */
4043 if (proc->tdesc == NULL)
4044 return 0;
4045
4046 /* LWPs which will not be resumed are not interesting, because we
4047 might not wait for them next time through linux_wait. */
4048
4049 if (!lwp->stopped)
4050 {
4051 if (debug_threads)
4052 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4053 lwpid_of (thread));
4054 return 0;
4055 }
4056
4057 if (thread->last_resume_kind == resume_stop)
4058 {
4059 if (debug_threads)
4060 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4061 " stopped\n",
4062 lwpid_of (thread));
4063 return 0;
4064 }
4065
4066 gdb_assert (lwp->suspended >= 0);
4067
4068 if (lwp->suspended)
4069 {
4070 if (debug_threads)
4071 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4072 lwpid_of (thread));
4073 return 0;
4074 }
4075
4076 if (!lwp->need_step_over)
4077 {
4078 if (debug_threads)
4079 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4080 }
4081
4082 if (lwp->status_pending_p)
4083 {
4084 if (debug_threads)
4085 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4086 " status.\n",
4087 lwpid_of (thread));
4088 return 0;
4089 }
4090
4091 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4092 or we have. */
4093 pc = get_pc (lwp);
4094
4095 /* If the PC has changed since we stopped, then don't do anything,
4096 and let the breakpoint/tracepoint be hit. This happens if, for
4097 instance, GDB handled the decr_pc_after_break subtraction itself,
4098 GDB is OOL stepping this thread, or the user has issued a "jump"
4099 command, or poked thread's registers herself. */
4100 if (pc != lwp->stop_pc)
4101 {
4102 if (debug_threads)
4103 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4104 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4105 lwpid_of (thread),
4106 paddress (lwp->stop_pc), paddress (pc));
4107
4108 lwp->need_step_over = 0;
4109 return 0;
4110 }
4111
4112 saved_thread = current_thread;
4113 current_thread = thread;
4114
4115 /* We can only step over breakpoints we know about. */
4116 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4117 {
4118 /* Don't step over a breakpoint that GDB expects to hit
4119 though. If the condition is being evaluated on the target's side
4120 and it evaluate to false, step over this breakpoint as well. */
4121 if (gdb_breakpoint_here (pc)
4122 && gdb_condition_true_at_breakpoint (pc)
4123 && gdb_no_commands_at_breakpoint (pc))
4124 {
4125 if (debug_threads)
4126 debug_printf ("Need step over [LWP %ld]? yes, but found"
4127 " GDB breakpoint at 0x%s; skipping step over\n",
4128 lwpid_of (thread), paddress (pc));
4129
4130 current_thread = saved_thread;
4131 return 0;
4132 }
4133 else
4134 {
4135 if (debug_threads)
4136 debug_printf ("Need step over [LWP %ld]? yes, "
4137 "found breakpoint at 0x%s\n",
4138 lwpid_of (thread), paddress (pc));
4139
4140 /* We've found an lwp that needs stepping over --- return 1 so
4141 that find_inferior stops looking. */
4142 current_thread = saved_thread;
4143
4144 /* If the step over is cancelled, this is set again. */
4145 lwp->need_step_over = 0;
4146 return 1;
4147 }
4148 }
4149
4150 current_thread = saved_thread;
4151
4152 if (debug_threads)
4153 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4154 " at 0x%s\n",
4155 lwpid_of (thread), paddress (pc));
4156
4157 return 0;
4158 }
4159
4160 /* Start a step-over operation on LWP. When LWP stopped at a
4161 breakpoint, to make progress, we need to remove the breakpoint out
4162 of the way. If we let other threads run while we do that, they may
4163 pass by the breakpoint location and miss hitting it. To avoid
4164 that, a step-over momentarily stops all threads while LWP is
4165 single-stepped while the breakpoint is temporarily uninserted from
4166 the inferior. When the single-step finishes, we reinsert the
4167 breakpoint, and let all threads that are supposed to be running,
4168 run again.
4169
4170 On targets that don't support hardware single-step, we don't
4171 currently support full software single-stepping. Instead, we only
4172 support stepping over the thread event breakpoint, by asking the
4173 low target where to place a reinsert breakpoint. Since this
4174 routine assumes the breakpoint being stepped over is a thread event
4175 breakpoint, it usually assumes the return address of the current
4176 function is a good enough place to set the reinsert breakpoint. */
4177
4178 static int
4179 start_step_over (struct lwp_info *lwp)
4180 {
4181 struct thread_info *thread = get_lwp_thread (lwp);
4182 struct thread_info *saved_thread;
4183 CORE_ADDR pc;
4184 int step;
4185
4186 if (debug_threads)
4187 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4188 lwpid_of (thread));
4189
4190 stop_all_lwps (1, lwp);
4191 gdb_assert (lwp->suspended == 0);
4192
4193 if (debug_threads)
4194 debug_printf ("Done stopping all threads for step-over.\n");
4195
4196 /* Note, we should always reach here with an already adjusted PC,
4197 either by GDB (if we're resuming due to GDB's request), or by our
4198 caller, if we just finished handling an internal breakpoint GDB
4199 shouldn't care about. */
4200 pc = get_pc (lwp);
4201
4202 saved_thread = current_thread;
4203 current_thread = thread;
4204
4205 lwp->bp_reinsert = pc;
4206 uninsert_breakpoints_at (pc);
4207 uninsert_fast_tracepoint_jumps_at (pc);
4208
4209 if (can_hardware_single_step ())
4210 {
4211 step = 1;
4212 }
4213 else
4214 {
4215 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4216 set_reinsert_breakpoint (raddr);
4217 step = 0;
4218 }
4219
4220 current_thread = saved_thread;
4221
4222 linux_resume_one_lwp (lwp, step, 0, NULL);
4223
4224 /* Require next event from this LWP. */
4225 step_over_bkpt = thread->entry.id;
4226 return 1;
4227 }
4228
4229 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4230 start_step_over, if still there, and delete any reinsert
4231 breakpoints we've set, on non hardware single-step targets. */
4232
4233 static int
4234 finish_step_over (struct lwp_info *lwp)
4235 {
4236 if (lwp->bp_reinsert != 0)
4237 {
4238 if (debug_threads)
4239 debug_printf ("Finished step over.\n");
4240
4241 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4242 may be no breakpoint to reinsert there by now. */
4243 reinsert_breakpoints_at (lwp->bp_reinsert);
4244 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4245
4246 lwp->bp_reinsert = 0;
4247
4248 /* Delete any software-single-step reinsert breakpoints. No
4249 longer needed. We don't have to worry about other threads
4250 hitting this trap, and later not being able to explain it,
4251 because we were stepping over a breakpoint, and we hold all
4252 threads but LWP stopped while doing that. */
4253 if (!can_hardware_single_step ())
4254 delete_reinsert_breakpoints ();
4255
4256 step_over_bkpt = null_ptid;
4257 return 1;
4258 }
4259 else
4260 return 0;
4261 }
4262
4263 /* This function is called once per thread. We check the thread's resume
4264 request, which will tell us whether to resume, step, or leave the thread
4265 stopped; and what signal, if any, it should be sent.
4266
4267 For threads which we aren't explicitly told otherwise, we preserve
4268 the stepping flag; this is used for stepping over gdbserver-placed
4269 breakpoints.
4270
4271 If pending_flags was set in any thread, we queue any needed
4272 signals, since we won't actually resume. We already have a pending
4273 event to report, so we don't need to preserve any step requests;
4274 they should be re-issued if necessary. */
4275
4276 static int
4277 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4278 {
4279 struct thread_info *thread = (struct thread_info *) entry;
4280 struct lwp_info *lwp = get_thread_lwp (thread);
4281 int step;
4282 int leave_all_stopped = * (int *) arg;
4283 int leave_pending;
4284
4285 if (lwp->resume == NULL)
4286 return 0;
4287
4288 if (lwp->resume->kind == resume_stop)
4289 {
4290 if (debug_threads)
4291 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4292
4293 if (!lwp->stopped)
4294 {
4295 if (debug_threads)
4296 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4297
4298 /* Stop the thread, and wait for the event asynchronously,
4299 through the event loop. */
4300 send_sigstop (lwp);
4301 }
4302 else
4303 {
4304 if (debug_threads)
4305 debug_printf ("already stopped LWP %ld\n",
4306 lwpid_of (thread));
4307
4308 /* The LWP may have been stopped in an internal event that
4309 was not meant to be notified back to GDB (e.g., gdbserver
4310 breakpoint), so we should be reporting a stop event in
4311 this case too. */
4312
4313 /* If the thread already has a pending SIGSTOP, this is a
4314 no-op. Otherwise, something later will presumably resume
4315 the thread and this will cause it to cancel any pending
4316 operation, due to last_resume_kind == resume_stop. If
4317 the thread already has a pending status to report, we
4318 will still report it the next time we wait - see
4319 status_pending_p_callback. */
4320
4321 /* If we already have a pending signal to report, then
4322 there's no need to queue a SIGSTOP, as this means we're
4323 midway through moving the LWP out of the jumppad, and we
4324 will report the pending signal as soon as that is
4325 finished. */
4326 if (lwp->pending_signals_to_report == NULL)
4327 send_sigstop (lwp);
4328 }
4329
4330 /* For stop requests, we're done. */
4331 lwp->resume = NULL;
4332 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4333 return 0;
4334 }
4335
4336 /* If this thread which is about to be resumed has a pending status,
4337 then don't resume any threads - we can just report the pending
4338 status. Make sure to queue any signals that would otherwise be
4339 sent. In all-stop mode, we do this decision based on if *any*
4340 thread has a pending status. If there's a thread that needs the
4341 step-over-breakpoint dance, then don't resume any other thread
4342 but that particular one. */
4343 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4344
4345 if (!leave_pending)
4346 {
4347 if (debug_threads)
4348 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4349
4350 step = (lwp->resume->kind == resume_step);
4351 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4352 }
4353 else
4354 {
4355 if (debug_threads)
4356 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4357
4358 /* If we have a new signal, enqueue the signal. */
4359 if (lwp->resume->sig != 0)
4360 {
4361 struct pending_signals *p_sig;
4362 p_sig = xmalloc (sizeof (*p_sig));
4363 p_sig->prev = lwp->pending_signals;
4364 p_sig->signal = lwp->resume->sig;
4365 memset (&p_sig->info, 0, sizeof (siginfo_t));
4366
4367 /* If this is the same signal we were previously stopped by,
4368 make sure to queue its siginfo. We can ignore the return
4369 value of ptrace; if it fails, we'll skip
4370 PTRACE_SETSIGINFO. */
4371 if (WIFSTOPPED (lwp->last_status)
4372 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4373 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4374 &p_sig->info);
4375
4376 lwp->pending_signals = p_sig;
4377 }
4378 }
4379
4380 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4381 lwp->resume = NULL;
4382 return 0;
4383 }
4384
4385 static void
4386 linux_resume (struct thread_resume *resume_info, size_t n)
4387 {
4388 struct thread_resume_array array = { resume_info, n };
4389 struct thread_info *need_step_over = NULL;
4390 int any_pending;
4391 int leave_all_stopped;
4392
4393 if (debug_threads)
4394 {
4395 debug_enter ();
4396 debug_printf ("linux_resume:\n");
4397 }
4398
4399 find_inferior (&all_threads, linux_set_resume_request, &array);
4400
4401 /* If there is a thread which would otherwise be resumed, which has
4402 a pending status, then don't resume any threads - we can just
4403 report the pending status. Make sure to queue any signals that
4404 would otherwise be sent. In non-stop mode, we'll apply this
4405 logic to each thread individually. We consume all pending events
4406 before considering to start a step-over (in all-stop). */
4407 any_pending = 0;
4408 if (!non_stop)
4409 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4410
4411 /* If there is a thread which would otherwise be resumed, which is
4412 stopped at a breakpoint that needs stepping over, then don't
4413 resume any threads - have it step over the breakpoint with all
4414 other threads stopped, then resume all threads again. Make sure
4415 to queue any signals that would otherwise be delivered or
4416 queued. */
4417 if (!any_pending && supports_breakpoints ())
4418 need_step_over
4419 = (struct thread_info *) find_inferior (&all_threads,
4420 need_step_over_p, NULL);
4421
4422 leave_all_stopped = (need_step_over != NULL || any_pending);
4423
4424 if (debug_threads)
4425 {
4426 if (need_step_over != NULL)
4427 debug_printf ("Not resuming all, need step over\n");
4428 else if (any_pending)
4429 debug_printf ("Not resuming, all-stop and found "
4430 "an LWP with pending status\n");
4431 else
4432 debug_printf ("Resuming, no pending status or step over needed\n");
4433 }
4434
4435 /* Even if we're leaving threads stopped, queue all signals we'd
4436 otherwise deliver. */
4437 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4438
4439 if (need_step_over)
4440 start_step_over (get_thread_lwp (need_step_over));
4441
4442 if (debug_threads)
4443 {
4444 debug_printf ("linux_resume done\n");
4445 debug_exit ();
4446 }
4447 }
4448
4449 /* This function is called once per thread. We check the thread's
4450 last resume request, which will tell us whether to resume, step, or
4451 leave the thread stopped. Any signal the client requested to be
4452 delivered has already been enqueued at this point.
4453
4454 If any thread that GDB wants running is stopped at an internal
4455 breakpoint that needs stepping over, we start a step-over operation
4456 on that particular thread, and leave all others stopped. */
4457
4458 static int
4459 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4460 {
4461 struct thread_info *thread = (struct thread_info *) entry;
4462 struct lwp_info *lwp = get_thread_lwp (thread);
4463 int step;
4464
4465 if (lwp == except)
4466 return 0;
4467
4468 if (debug_threads)
4469 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4470
4471 if (!lwp->stopped)
4472 {
4473 if (debug_threads)
4474 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4475 return 0;
4476 }
4477
4478 if (thread->last_resume_kind == resume_stop
4479 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4480 {
4481 if (debug_threads)
4482 debug_printf (" client wants LWP to remain %ld stopped\n",
4483 lwpid_of (thread));
4484 return 0;
4485 }
4486
4487 if (lwp->status_pending_p)
4488 {
4489 if (debug_threads)
4490 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4491 lwpid_of (thread));
4492 return 0;
4493 }
4494
4495 gdb_assert (lwp->suspended >= 0);
4496
4497 if (lwp->suspended)
4498 {
4499 if (debug_threads)
4500 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4501 return 0;
4502 }
4503
4504 if (thread->last_resume_kind == resume_stop
4505 && lwp->pending_signals_to_report == NULL
4506 && lwp->collecting_fast_tracepoint == 0)
4507 {
4508 /* We haven't reported this LWP as stopped yet (otherwise, the
4509 last_status.kind check above would catch it, and we wouldn't
4510 reach here. This LWP may have been momentarily paused by a
4511 stop_all_lwps call while handling for example, another LWP's
4512 step-over. In that case, the pending expected SIGSTOP signal
4513 that was queued at vCont;t handling time will have already
4514 been consumed by wait_for_sigstop, and so we need to requeue
4515 another one here. Note that if the LWP already has a SIGSTOP
4516 pending, this is a no-op. */
4517
4518 if (debug_threads)
4519 debug_printf ("Client wants LWP %ld to stop. "
4520 "Making sure it has a SIGSTOP pending\n",
4521 lwpid_of (thread));
4522
4523 send_sigstop (lwp);
4524 }
4525
4526 step = thread->last_resume_kind == resume_step;
4527 linux_resume_one_lwp (lwp, step, 0, NULL);
4528 return 0;
4529 }
4530
4531 static int
4532 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4533 {
4534 struct thread_info *thread = (struct thread_info *) entry;
4535 struct lwp_info *lwp = get_thread_lwp (thread);
4536
4537 if (lwp == except)
4538 return 0;
4539
4540 lwp->suspended--;
4541 gdb_assert (lwp->suspended >= 0);
4542
4543 return proceed_one_lwp (entry, except);
4544 }
4545
4546 /* When we finish a step-over, set threads running again. If there's
4547 another thread that may need a step-over, now's the time to start
4548 it. Eventually, we'll move all threads past their breakpoints. */
4549
4550 static void
4551 proceed_all_lwps (void)
4552 {
4553 struct thread_info *need_step_over;
4554
4555 /* If there is a thread which would otherwise be resumed, which is
4556 stopped at a breakpoint that needs stepping over, then don't
4557 resume any threads - have it step over the breakpoint with all
4558 other threads stopped, then resume all threads again. */
4559
4560 if (supports_breakpoints ())
4561 {
4562 need_step_over
4563 = (struct thread_info *) find_inferior (&all_threads,
4564 need_step_over_p, NULL);
4565
4566 if (need_step_over != NULL)
4567 {
4568 if (debug_threads)
4569 debug_printf ("proceed_all_lwps: found "
4570 "thread %ld needing a step-over\n",
4571 lwpid_of (need_step_over));
4572
4573 start_step_over (get_thread_lwp (need_step_over));
4574 return;
4575 }
4576 }
4577
4578 if (debug_threads)
4579 debug_printf ("Proceeding, no step-over needed\n");
4580
4581 find_inferior (&all_threads, proceed_one_lwp, NULL);
4582 }
4583
4584 /* Stopped LWPs that the client wanted to be running, that don't have
4585 pending statuses, are set to run again, except for EXCEPT, if not
4586 NULL. This undoes a stop_all_lwps call. */
4587
4588 static void
4589 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4590 {
4591 if (debug_threads)
4592 {
4593 debug_enter ();
4594 if (except)
4595 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4596 lwpid_of (get_lwp_thread (except)));
4597 else
4598 debug_printf ("unstopping all lwps\n");
4599 }
4600
4601 if (unsuspend)
4602 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4603 else
4604 find_inferior (&all_threads, proceed_one_lwp, except);
4605
4606 if (debug_threads)
4607 {
4608 debug_printf ("unstop_all_lwps done\n");
4609 debug_exit ();
4610 }
4611 }
4612
4613
4614 #ifdef HAVE_LINUX_REGSETS
4615
4616 #define use_linux_regsets 1
4617
4618 /* Returns true if REGSET has been disabled. */
4619
4620 static int
4621 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4622 {
4623 return (info->disabled_regsets != NULL
4624 && info->disabled_regsets[regset - info->regsets]);
4625 }
4626
4627 /* Disable REGSET. */
4628
4629 static void
4630 disable_regset (struct regsets_info *info, struct regset_info *regset)
4631 {
4632 int dr_offset;
4633
4634 dr_offset = regset - info->regsets;
4635 if (info->disabled_regsets == NULL)
4636 info->disabled_regsets = xcalloc (1, info->num_regsets);
4637 info->disabled_regsets[dr_offset] = 1;
4638 }
4639
4640 static int
4641 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4642 struct regcache *regcache)
4643 {
4644 struct regset_info *regset;
4645 int saw_general_regs = 0;
4646 int pid;
4647 struct iovec iov;
4648
4649 pid = lwpid_of (current_thread);
4650 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4651 {
4652 void *buf, *data;
4653 int nt_type, res;
4654
4655 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4656 continue;
4657
4658 buf = xmalloc (regset->size);
4659
4660 nt_type = regset->nt_type;
4661 if (nt_type)
4662 {
4663 iov.iov_base = buf;
4664 iov.iov_len = regset->size;
4665 data = (void *) &iov;
4666 }
4667 else
4668 data = buf;
4669
4670 #ifndef __sparc__
4671 res = ptrace (regset->get_request, pid,
4672 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4673 #else
4674 res = ptrace (regset->get_request, pid, data, nt_type);
4675 #endif
4676 if (res < 0)
4677 {
4678 if (errno == EIO)
4679 {
4680 /* If we get EIO on a regset, do not try it again for
4681 this process mode. */
4682 disable_regset (regsets_info, regset);
4683 }
4684 else if (errno == ENODATA)
4685 {
4686 /* ENODATA may be returned if the regset is currently
4687 not "active". This can happen in normal operation,
4688 so suppress the warning in this case. */
4689 }
4690 else
4691 {
4692 char s[256];
4693 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4694 pid);
4695 perror (s);
4696 }
4697 }
4698 else
4699 {
4700 if (regset->type == GENERAL_REGS)
4701 saw_general_regs = 1;
4702 regset->store_function (regcache, buf);
4703 }
4704 free (buf);
4705 }
4706 if (saw_general_regs)
4707 return 0;
4708 else
4709 return 1;
4710 }
4711
4712 static int
4713 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4714 struct regcache *regcache)
4715 {
4716 struct regset_info *regset;
4717 int saw_general_regs = 0;
4718 int pid;
4719 struct iovec iov;
4720
4721 pid = lwpid_of (current_thread);
4722 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4723 {
4724 void *buf, *data;
4725 int nt_type, res;
4726
4727 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4728 || regset->fill_function == NULL)
4729 continue;
4730
4731 buf = xmalloc (regset->size);
4732
4733 /* First fill the buffer with the current register set contents,
4734 in case there are any items in the kernel's regset that are
4735 not in gdbserver's regcache. */
4736
4737 nt_type = regset->nt_type;
4738 if (nt_type)
4739 {
4740 iov.iov_base = buf;
4741 iov.iov_len = regset->size;
4742 data = (void *) &iov;
4743 }
4744 else
4745 data = buf;
4746
4747 #ifndef __sparc__
4748 res = ptrace (regset->get_request, pid,
4749 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4750 #else
4751 res = ptrace (regset->get_request, pid, data, nt_type);
4752 #endif
4753
4754 if (res == 0)
4755 {
4756 /* Then overlay our cached registers on that. */
4757 regset->fill_function (regcache, buf);
4758
4759 /* Only now do we write the register set. */
4760 #ifndef __sparc__
4761 res = ptrace (regset->set_request, pid,
4762 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4763 #else
4764 res = ptrace (regset->set_request, pid, data, nt_type);
4765 #endif
4766 }
4767
4768 if (res < 0)
4769 {
4770 if (errno == EIO)
4771 {
4772 /* If we get EIO on a regset, do not try it again for
4773 this process mode. */
4774 disable_regset (regsets_info, regset);
4775 }
4776 else if (errno == ESRCH)
4777 {
4778 /* At this point, ESRCH should mean the process is
4779 already gone, in which case we simply ignore attempts
4780 to change its registers. See also the related
4781 comment in linux_resume_one_lwp. */
4782 free (buf);
4783 return 0;
4784 }
4785 else
4786 {
4787 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4788 }
4789 }
4790 else if (regset->type == GENERAL_REGS)
4791 saw_general_regs = 1;
4792 free (buf);
4793 }
4794 if (saw_general_regs)
4795 return 0;
4796 else
4797 return 1;
4798 }
4799
4800 #else /* !HAVE_LINUX_REGSETS */
4801
4802 #define use_linux_regsets 0
4803 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4804 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4805
4806 #endif
4807
4808 /* Return 1 if register REGNO is supported by one of the regset ptrace
4809 calls or 0 if it has to be transferred individually. */
4810
4811 static int
4812 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4813 {
4814 unsigned char mask = 1 << (regno % 8);
4815 size_t index = regno / 8;
4816
4817 return (use_linux_regsets
4818 && (regs_info->regset_bitmap == NULL
4819 || (regs_info->regset_bitmap[index] & mask) != 0));
4820 }
4821
4822 #ifdef HAVE_LINUX_USRREGS
4823
4824 int
4825 register_addr (const struct usrregs_info *usrregs, int regnum)
4826 {
4827 int addr;
4828
4829 if (regnum < 0 || regnum >= usrregs->num_regs)
4830 error ("Invalid register number %d.", regnum);
4831
4832 addr = usrregs->regmap[regnum];
4833
4834 return addr;
4835 }
4836
4837 /* Fetch one register. */
4838 static void
4839 fetch_register (const struct usrregs_info *usrregs,
4840 struct regcache *regcache, int regno)
4841 {
4842 CORE_ADDR regaddr;
4843 int i, size;
4844 char *buf;
4845 int pid;
4846
4847 if (regno >= usrregs->num_regs)
4848 return;
4849 if ((*the_low_target.cannot_fetch_register) (regno))
4850 return;
4851
4852 regaddr = register_addr (usrregs, regno);
4853 if (regaddr == -1)
4854 return;
4855
4856 size = ((register_size (regcache->tdesc, regno)
4857 + sizeof (PTRACE_XFER_TYPE) - 1)
4858 & -sizeof (PTRACE_XFER_TYPE));
4859 buf = alloca (size);
4860
4861 pid = lwpid_of (current_thread);
4862 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4863 {
4864 errno = 0;
4865 *(PTRACE_XFER_TYPE *) (buf + i) =
4866 ptrace (PTRACE_PEEKUSER, pid,
4867 /* Coerce to a uintptr_t first to avoid potential gcc warning
4868 of coercing an 8 byte integer to a 4 byte pointer. */
4869 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4870 regaddr += sizeof (PTRACE_XFER_TYPE);
4871 if (errno != 0)
4872 error ("reading register %d: %s", regno, strerror (errno));
4873 }
4874
4875 if (the_low_target.supply_ptrace_register)
4876 the_low_target.supply_ptrace_register (regcache, regno, buf);
4877 else
4878 supply_register (regcache, regno, buf);
4879 }
4880
4881 /* Store one register. */
4882 static void
4883 store_register (const struct usrregs_info *usrregs,
4884 struct regcache *regcache, int regno)
4885 {
4886 CORE_ADDR regaddr;
4887 int i, size;
4888 char *buf;
4889 int pid;
4890
4891 if (regno >= usrregs->num_regs)
4892 return;
4893 if ((*the_low_target.cannot_store_register) (regno))
4894 return;
4895
4896 regaddr = register_addr (usrregs, regno);
4897 if (regaddr == -1)
4898 return;
4899
4900 size = ((register_size (regcache->tdesc, regno)
4901 + sizeof (PTRACE_XFER_TYPE) - 1)
4902 & -sizeof (PTRACE_XFER_TYPE));
4903 buf = alloca (size);
4904 memset (buf, 0, size);
4905
4906 if (the_low_target.collect_ptrace_register)
4907 the_low_target.collect_ptrace_register (regcache, regno, buf);
4908 else
4909 collect_register (regcache, regno, buf);
4910
4911 pid = lwpid_of (current_thread);
4912 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4913 {
4914 errno = 0;
4915 ptrace (PTRACE_POKEUSER, pid,
4916 /* Coerce to a uintptr_t first to avoid potential gcc warning
4917 about coercing an 8 byte integer to a 4 byte pointer. */
4918 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4919 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4920 if (errno != 0)
4921 {
4922 /* At this point, ESRCH should mean the process is
4923 already gone, in which case we simply ignore attempts
4924 to change its registers. See also the related
4925 comment in linux_resume_one_lwp. */
4926 if (errno == ESRCH)
4927 return;
4928
4929 if ((*the_low_target.cannot_store_register) (regno) == 0)
4930 error ("writing register %d: %s", regno, strerror (errno));
4931 }
4932 regaddr += sizeof (PTRACE_XFER_TYPE);
4933 }
4934 }
4935
4936 /* Fetch all registers, or just one, from the child process.
4937 If REGNO is -1, do this for all registers, skipping any that are
4938 assumed to have been retrieved by regsets_fetch_inferior_registers,
4939 unless ALL is non-zero.
4940 Otherwise, REGNO specifies which register (so we can save time). */
4941 static void
4942 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4943 struct regcache *regcache, int regno, int all)
4944 {
4945 struct usrregs_info *usr = regs_info->usrregs;
4946
4947 if (regno == -1)
4948 {
4949 for (regno = 0; regno < usr->num_regs; regno++)
4950 if (all || !linux_register_in_regsets (regs_info, regno))
4951 fetch_register (usr, regcache, regno);
4952 }
4953 else
4954 fetch_register (usr, regcache, regno);
4955 }
4956
4957 /* Store our register values back into the inferior.
4958 If REGNO is -1, do this for all registers, skipping any that are
4959 assumed to have been saved by regsets_store_inferior_registers,
4960 unless ALL is non-zero.
4961 Otherwise, REGNO specifies which register (so we can save time). */
4962 static void
4963 usr_store_inferior_registers (const struct regs_info *regs_info,
4964 struct regcache *regcache, int regno, int all)
4965 {
4966 struct usrregs_info *usr = regs_info->usrregs;
4967
4968 if (regno == -1)
4969 {
4970 for (regno = 0; regno < usr->num_regs; regno++)
4971 if (all || !linux_register_in_regsets (regs_info, regno))
4972 store_register (usr, regcache, regno);
4973 }
4974 else
4975 store_register (usr, regcache, regno);
4976 }
4977
4978 #else /* !HAVE_LINUX_USRREGS */
4979
4980 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4981 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4982
4983 #endif
4984
4985
4986 void
4987 linux_fetch_registers (struct regcache *regcache, int regno)
4988 {
4989 int use_regsets;
4990 int all = 0;
4991 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4992
4993 if (regno == -1)
4994 {
4995 if (the_low_target.fetch_register != NULL
4996 && regs_info->usrregs != NULL)
4997 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4998 (*the_low_target.fetch_register) (regcache, regno);
4999
5000 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5001 if (regs_info->usrregs != NULL)
5002 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5003 }
5004 else
5005 {
5006 if (the_low_target.fetch_register != NULL
5007 && (*the_low_target.fetch_register) (regcache, regno))
5008 return;
5009
5010 use_regsets = linux_register_in_regsets (regs_info, regno);
5011 if (use_regsets)
5012 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5013 regcache);
5014 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5015 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5016 }
5017 }
5018
5019 void
5020 linux_store_registers (struct regcache *regcache, int regno)
5021 {
5022 int use_regsets;
5023 int all = 0;
5024 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5025
5026 if (regno == -1)
5027 {
5028 all = regsets_store_inferior_registers (regs_info->regsets_info,
5029 regcache);
5030 if (regs_info->usrregs != NULL)
5031 usr_store_inferior_registers (regs_info, regcache, regno, all);
5032 }
5033 else
5034 {
5035 use_regsets = linux_register_in_regsets (regs_info, regno);
5036 if (use_regsets)
5037 all = regsets_store_inferior_registers (regs_info->regsets_info,
5038 regcache);
5039 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5040 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5041 }
5042 }
5043
5044
5045 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5046 to debugger memory starting at MYADDR. */
5047
5048 static int
5049 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5050 {
5051 int pid = lwpid_of (current_thread);
5052 register PTRACE_XFER_TYPE *buffer;
5053 register CORE_ADDR addr;
5054 register int count;
5055 char filename[64];
5056 register int i;
5057 int ret;
5058 int fd;
5059
5060 /* Try using /proc. Don't bother for one word. */
5061 if (len >= 3 * sizeof (long))
5062 {
5063 int bytes;
5064
5065 /* We could keep this file open and cache it - possibly one per
5066 thread. That requires some juggling, but is even faster. */
5067 sprintf (filename, "/proc/%d/mem", pid);
5068 fd = open (filename, O_RDONLY | O_LARGEFILE);
5069 if (fd == -1)
5070 goto no_proc;
5071
5072 /* If pread64 is available, use it. It's faster if the kernel
5073 supports it (only one syscall), and it's 64-bit safe even on
5074 32-bit platforms (for instance, SPARC debugging a SPARC64
5075 application). */
5076 #ifdef HAVE_PREAD64
5077 bytes = pread64 (fd, myaddr, len, memaddr);
5078 #else
5079 bytes = -1;
5080 if (lseek (fd, memaddr, SEEK_SET) != -1)
5081 bytes = read (fd, myaddr, len);
5082 #endif
5083
5084 close (fd);
5085 if (bytes == len)
5086 return 0;
5087
5088 /* Some data was read, we'll try to get the rest with ptrace. */
5089 if (bytes > 0)
5090 {
5091 memaddr += bytes;
5092 myaddr += bytes;
5093 len -= bytes;
5094 }
5095 }
5096
5097 no_proc:
5098 /* Round starting address down to longword boundary. */
5099 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5100 /* Round ending address up; get number of longwords that makes. */
5101 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5102 / sizeof (PTRACE_XFER_TYPE));
5103 /* Allocate buffer of that many longwords. */
5104 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5105
5106 /* Read all the longwords */
5107 errno = 0;
5108 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5109 {
5110 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5111 about coercing an 8 byte integer to a 4 byte pointer. */
5112 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5113 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5114 (PTRACE_TYPE_ARG4) 0);
5115 if (errno)
5116 break;
5117 }
5118 ret = errno;
5119
5120 /* Copy appropriate bytes out of the buffer. */
5121 if (i > 0)
5122 {
5123 i *= sizeof (PTRACE_XFER_TYPE);
5124 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5125 memcpy (myaddr,
5126 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5127 i < len ? i : len);
5128 }
5129
5130 return ret;
5131 }
5132
5133 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5134 memory at MEMADDR. On failure (cannot write to the inferior)
5135 returns the value of errno. Always succeeds if LEN is zero. */
5136
5137 static int
5138 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5139 {
5140 register int i;
5141 /* Round starting address down to longword boundary. */
5142 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5143 /* Round ending address up; get number of longwords that makes. */
5144 register int count
5145 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5146 / sizeof (PTRACE_XFER_TYPE);
5147
5148 /* Allocate buffer of that many longwords. */
5149 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5150 alloca (count * sizeof (PTRACE_XFER_TYPE));
5151
5152 int pid = lwpid_of (current_thread);
5153
5154 if (len == 0)
5155 {
5156 /* Zero length write always succeeds. */
5157 return 0;
5158 }
5159
5160 if (debug_threads)
5161 {
5162 /* Dump up to four bytes. */
5163 unsigned int val = * (unsigned int *) myaddr;
5164 if (len == 1)
5165 val = val & 0xff;
5166 else if (len == 2)
5167 val = val & 0xffff;
5168 else if (len == 3)
5169 val = val & 0xffffff;
5170 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5171 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
5172 }
5173
5174 /* Fill start and end extra bytes of buffer with existing memory data. */
5175
5176 errno = 0;
5177 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5178 about coercing an 8 byte integer to a 4 byte pointer. */
5179 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5180 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5181 (PTRACE_TYPE_ARG4) 0);
5182 if (errno)
5183 return errno;
5184
5185 if (count > 1)
5186 {
5187 errno = 0;
5188 buffer[count - 1]
5189 = ptrace (PTRACE_PEEKTEXT, pid,
5190 /* Coerce to a uintptr_t first to avoid potential gcc warning
5191 about coercing an 8 byte integer to a 4 byte pointer. */
5192 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5193 * sizeof (PTRACE_XFER_TYPE)),
5194 (PTRACE_TYPE_ARG4) 0);
5195 if (errno)
5196 return errno;
5197 }
5198
5199 /* Copy data to be written over corresponding part of buffer. */
5200
5201 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5202 myaddr, len);
5203
5204 /* Write the entire buffer. */
5205
5206 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5207 {
5208 errno = 0;
5209 ptrace (PTRACE_POKETEXT, pid,
5210 /* Coerce to a uintptr_t first to avoid potential gcc warning
5211 about coercing an 8 byte integer to a 4 byte pointer. */
5212 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5213 (PTRACE_TYPE_ARG4) buffer[i]);
5214 if (errno)
5215 return errno;
5216 }
5217
5218 return 0;
5219 }
5220
5221 static void
5222 linux_look_up_symbols (void)
5223 {
5224 #ifdef USE_THREAD_DB
5225 struct process_info *proc = current_process ();
5226
5227 if (proc->priv->thread_db != NULL)
5228 return;
5229
5230 /* If the kernel supports tracing clones, then we don't need to
5231 use the magic thread event breakpoint to learn about
5232 threads. */
5233 thread_db_init (!linux_supports_traceclone ());
5234 #endif
5235 }
5236
5237 static void
5238 linux_request_interrupt (void)
5239 {
5240 extern unsigned long signal_pid;
5241
5242 /* Send a SIGINT to the process group. This acts just like the user
5243 typed a ^C on the controlling terminal. */
5244 kill (-signal_pid, SIGINT);
5245 }
5246
5247 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5248 to debugger memory starting at MYADDR. */
5249
5250 static int
5251 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5252 {
5253 char filename[PATH_MAX];
5254 int fd, n;
5255 int pid = lwpid_of (current_thread);
5256
5257 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5258
5259 fd = open (filename, O_RDONLY);
5260 if (fd < 0)
5261 return -1;
5262
5263 if (offset != (CORE_ADDR) 0
5264 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5265 n = -1;
5266 else
5267 n = read (fd, myaddr, len);
5268
5269 close (fd);
5270
5271 return n;
5272 }
5273
5274 /* These breakpoint and watchpoint related wrapper functions simply
5275 pass on the function call if the target has registered a
5276 corresponding function. */
5277
5278 static int
5279 linux_supports_z_point_type (char z_type)
5280 {
5281 return (the_low_target.supports_z_point_type != NULL
5282 && the_low_target.supports_z_point_type (z_type));
5283 }
5284
5285 static int
5286 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5287 int size, struct raw_breakpoint *bp)
5288 {
5289 if (type == raw_bkpt_type_sw)
5290 return insert_memory_breakpoint (bp);
5291 else if (the_low_target.insert_point != NULL)
5292 return the_low_target.insert_point (type, addr, size, bp);
5293 else
5294 /* Unsupported (see target.h). */
5295 return 1;
5296 }
5297
5298 static int
5299 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5300 int size, struct raw_breakpoint *bp)
5301 {
5302 if (type == raw_bkpt_type_sw)
5303 return remove_memory_breakpoint (bp);
5304 else if (the_low_target.remove_point != NULL)
5305 return the_low_target.remove_point (type, addr, size, bp);
5306 else
5307 /* Unsupported (see target.h). */
5308 return 1;
5309 }
5310
5311 /* Implement the to_stopped_by_sw_breakpoint target_ops
5312 method. */
5313
5314 static int
5315 linux_stopped_by_sw_breakpoint (void)
5316 {
5317 struct lwp_info *lwp = get_thread_lwp (current_thread);
5318
5319 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5320 }
5321
5322 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5323 method. */
5324
5325 static int
5326 linux_supports_stopped_by_sw_breakpoint (void)
5327 {
5328 return USE_SIGTRAP_SIGINFO;
5329 }
5330
5331 /* Implement the to_stopped_by_hw_breakpoint target_ops
5332 method. */
5333
5334 static int
5335 linux_stopped_by_hw_breakpoint (void)
5336 {
5337 struct lwp_info *lwp = get_thread_lwp (current_thread);
5338
5339 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5340 }
5341
5342 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5343 method. */
5344
5345 static int
5346 linux_supports_stopped_by_hw_breakpoint (void)
5347 {
5348 return USE_SIGTRAP_SIGINFO;
5349 }
5350
5351 /* Implement the supports_conditional_breakpoints target_ops
5352 method. */
5353
5354 static int
5355 linux_supports_conditional_breakpoints (void)
5356 {
5357 /* GDBserver needs to step over the breakpoint if the condition is
5358 false. GDBserver software single step is too simple, so disable
5359 conditional breakpoints if the target doesn't have hardware single
5360 step. */
5361 return can_hardware_single_step ();
5362 }
5363
5364 static int
5365 linux_stopped_by_watchpoint (void)
5366 {
5367 struct lwp_info *lwp = get_thread_lwp (current_thread);
5368
5369 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5370 }
5371
5372 static CORE_ADDR
5373 linux_stopped_data_address (void)
5374 {
5375 struct lwp_info *lwp = get_thread_lwp (current_thread);
5376
5377 return lwp->stopped_data_address;
5378 }
5379
5380 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5381 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5382 && defined(PT_TEXT_END_ADDR)
5383
5384 /* This is only used for targets that define PT_TEXT_ADDR,
5385 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5386 the target has different ways of acquiring this information, like
5387 loadmaps. */
5388
5389 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5390 to tell gdb about. */
5391
5392 static int
5393 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5394 {
5395 unsigned long text, text_end, data;
5396 int pid = lwpid_of (current_thread);
5397
5398 errno = 0;
5399
5400 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5401 (PTRACE_TYPE_ARG4) 0);
5402 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5403 (PTRACE_TYPE_ARG4) 0);
5404 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5405 (PTRACE_TYPE_ARG4) 0);
5406
5407 if (errno == 0)
5408 {
5409 /* Both text and data offsets produced at compile-time (and so
5410 used by gdb) are relative to the beginning of the program,
5411 with the data segment immediately following the text segment.
5412 However, the actual runtime layout in memory may put the data
5413 somewhere else, so when we send gdb a data base-address, we
5414 use the real data base address and subtract the compile-time
5415 data base-address from it (which is just the length of the
5416 text segment). BSS immediately follows data in both
5417 cases. */
5418 *text_p = text;
5419 *data_p = data - (text_end - text);
5420
5421 return 1;
5422 }
5423 return 0;
5424 }
5425 #endif
5426
5427 static int
5428 linux_qxfer_osdata (const char *annex,
5429 unsigned char *readbuf, unsigned const char *writebuf,
5430 CORE_ADDR offset, int len)
5431 {
5432 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5433 }
5434
5435 /* Convert a native/host siginfo object, into/from the siginfo in the
5436 layout of the inferiors' architecture. */
5437
5438 static void
5439 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5440 {
5441 int done = 0;
5442
5443 if (the_low_target.siginfo_fixup != NULL)
5444 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5445
5446 /* If there was no callback, or the callback didn't do anything,
5447 then just do a straight memcpy. */
5448 if (!done)
5449 {
5450 if (direction == 1)
5451 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5452 else
5453 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5454 }
5455 }
5456
5457 static int
5458 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5459 unsigned const char *writebuf, CORE_ADDR offset, int len)
5460 {
5461 int pid;
5462 siginfo_t siginfo;
5463 char inf_siginfo[sizeof (siginfo_t)];
5464
5465 if (current_thread == NULL)
5466 return -1;
5467
5468 pid = lwpid_of (current_thread);
5469
5470 if (debug_threads)
5471 debug_printf ("%s siginfo for lwp %d.\n",
5472 readbuf != NULL ? "Reading" : "Writing",
5473 pid);
5474
5475 if (offset >= sizeof (siginfo))
5476 return -1;
5477
5478 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5479 return -1;
5480
5481 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5482 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5483 inferior with a 64-bit GDBSERVER should look the same as debugging it
5484 with a 32-bit GDBSERVER, we need to convert it. */
5485 siginfo_fixup (&siginfo, inf_siginfo, 0);
5486
5487 if (offset + len > sizeof (siginfo))
5488 len = sizeof (siginfo) - offset;
5489
5490 if (readbuf != NULL)
5491 memcpy (readbuf, inf_siginfo + offset, len);
5492 else
5493 {
5494 memcpy (inf_siginfo + offset, writebuf, len);
5495
5496 /* Convert back to ptrace layout before flushing it out. */
5497 siginfo_fixup (&siginfo, inf_siginfo, 1);
5498
5499 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5500 return -1;
5501 }
5502
5503 return len;
5504 }
5505
5506 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5507 so we notice when children change state; as the handler for the
5508 sigsuspend in my_waitpid. */
5509
5510 static void
5511 sigchld_handler (int signo)
5512 {
5513 int old_errno = errno;
5514
5515 if (debug_threads)
5516 {
5517 do
5518 {
5519 /* fprintf is not async-signal-safe, so call write
5520 directly. */
5521 if (write (2, "sigchld_handler\n",
5522 sizeof ("sigchld_handler\n") - 1) < 0)
5523 break; /* just ignore */
5524 } while (0);
5525 }
5526
5527 if (target_is_async_p ())
5528 async_file_mark (); /* trigger a linux_wait */
5529
5530 errno = old_errno;
5531 }
5532
5533 static int
5534 linux_supports_non_stop (void)
5535 {
5536 return 1;
5537 }
5538
5539 static int
5540 linux_async (int enable)
5541 {
5542 int previous = target_is_async_p ();
5543
5544 if (debug_threads)
5545 debug_printf ("linux_async (%d), previous=%d\n",
5546 enable, previous);
5547
5548 if (previous != enable)
5549 {
5550 sigset_t mask;
5551 sigemptyset (&mask);
5552 sigaddset (&mask, SIGCHLD);
5553
5554 sigprocmask (SIG_BLOCK, &mask, NULL);
5555
5556 if (enable)
5557 {
5558 if (pipe (linux_event_pipe) == -1)
5559 {
5560 linux_event_pipe[0] = -1;
5561 linux_event_pipe[1] = -1;
5562 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5563
5564 warning ("creating event pipe failed.");
5565 return previous;
5566 }
5567
5568 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5569 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5570
5571 /* Register the event loop handler. */
5572 add_file_handler (linux_event_pipe[0],
5573 handle_target_event, NULL);
5574
5575 /* Always trigger a linux_wait. */
5576 async_file_mark ();
5577 }
5578 else
5579 {
5580 delete_file_handler (linux_event_pipe[0]);
5581
5582 close (linux_event_pipe[0]);
5583 close (linux_event_pipe[1]);
5584 linux_event_pipe[0] = -1;
5585 linux_event_pipe[1] = -1;
5586 }
5587
5588 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5589 }
5590
5591 return previous;
5592 }
5593
5594 static int
5595 linux_start_non_stop (int nonstop)
5596 {
5597 /* Register or unregister from event-loop accordingly. */
5598 linux_async (nonstop);
5599
5600 if (target_is_async_p () != (nonstop != 0))
5601 return -1;
5602
5603 return 0;
5604 }
5605
5606 static int
5607 linux_supports_multi_process (void)
5608 {
5609 return 1;
5610 }
5611
5612 /* Check if fork events are supported. */
5613
5614 static int
5615 linux_supports_fork_events (void)
5616 {
5617 return linux_supports_tracefork ();
5618 }
5619
5620 /* Check if vfork events are supported. */
5621
5622 static int
5623 linux_supports_vfork_events (void)
5624 {
5625 return linux_supports_tracefork ();
5626 }
5627
5628 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5629 options for the specified lwp. */
5630
5631 static int
5632 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5633 void *args)
5634 {
5635 struct thread_info *thread = (struct thread_info *) entry;
5636 struct lwp_info *lwp = get_thread_lwp (thread);
5637
5638 if (!lwp->stopped)
5639 {
5640 /* Stop the lwp so we can modify its ptrace options. */
5641 lwp->must_set_ptrace_flags = 1;
5642 linux_stop_lwp (lwp);
5643 }
5644 else
5645 {
5646 /* Already stopped; go ahead and set the ptrace options. */
5647 struct process_info *proc = find_process_pid (pid_of (thread));
5648 int options = linux_low_ptrace_options (proc->attached);
5649
5650 linux_enable_event_reporting (lwpid_of (thread), options);
5651 lwp->must_set_ptrace_flags = 0;
5652 }
5653
5654 return 0;
5655 }
5656
5657 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5658 ptrace flags for all inferiors. This is in case the new GDB connection
5659 doesn't support the same set of events that the previous one did. */
5660
5661 static void
5662 linux_handle_new_gdb_connection (void)
5663 {
5664 pid_t pid;
5665
5666 /* Request that all the lwps reset their ptrace options. */
5667 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5668 }
5669
5670 static int
5671 linux_supports_disable_randomization (void)
5672 {
5673 #ifdef HAVE_PERSONALITY
5674 return 1;
5675 #else
5676 return 0;
5677 #endif
5678 }
5679
5680 static int
5681 linux_supports_agent (void)
5682 {
5683 return 1;
5684 }
5685
5686 static int
5687 linux_supports_range_stepping (void)
5688 {
5689 if (*the_low_target.supports_range_stepping == NULL)
5690 return 0;
5691
5692 return (*the_low_target.supports_range_stepping) ();
5693 }
5694
5695 /* Enumerate spufs IDs for process PID. */
5696 static int
5697 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5698 {
5699 int pos = 0;
5700 int written = 0;
5701 char path[128];
5702 DIR *dir;
5703 struct dirent *entry;
5704
5705 sprintf (path, "/proc/%ld/fd", pid);
5706 dir = opendir (path);
5707 if (!dir)
5708 return -1;
5709
5710 rewinddir (dir);
5711 while ((entry = readdir (dir)) != NULL)
5712 {
5713 struct stat st;
5714 struct statfs stfs;
5715 int fd;
5716
5717 fd = atoi (entry->d_name);
5718 if (!fd)
5719 continue;
5720
5721 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5722 if (stat (path, &st) != 0)
5723 continue;
5724 if (!S_ISDIR (st.st_mode))
5725 continue;
5726
5727 if (statfs (path, &stfs) != 0)
5728 continue;
5729 if (stfs.f_type != SPUFS_MAGIC)
5730 continue;
5731
5732 if (pos >= offset && pos + 4 <= offset + len)
5733 {
5734 *(unsigned int *)(buf + pos - offset) = fd;
5735 written += 4;
5736 }
5737 pos += 4;
5738 }
5739
5740 closedir (dir);
5741 return written;
5742 }
5743
5744 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5745 object type, using the /proc file system. */
5746 static int
5747 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5748 unsigned const char *writebuf,
5749 CORE_ADDR offset, int len)
5750 {
5751 long pid = lwpid_of (current_thread);
5752 char buf[128];
5753 int fd = 0;
5754 int ret = 0;
5755
5756 if (!writebuf && !readbuf)
5757 return -1;
5758
5759 if (!*annex)
5760 {
5761 if (!readbuf)
5762 return -1;
5763 else
5764 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5765 }
5766
5767 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5768 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5769 if (fd <= 0)
5770 return -1;
5771
5772 if (offset != 0
5773 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5774 {
5775 close (fd);
5776 return 0;
5777 }
5778
5779 if (writebuf)
5780 ret = write (fd, writebuf, (size_t) len);
5781 else
5782 ret = read (fd, readbuf, (size_t) len);
5783
5784 close (fd);
5785 return ret;
5786 }
5787
5788 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5789 struct target_loadseg
5790 {
5791 /* Core address to which the segment is mapped. */
5792 Elf32_Addr addr;
5793 /* VMA recorded in the program header. */
5794 Elf32_Addr p_vaddr;
5795 /* Size of this segment in memory. */
5796 Elf32_Word p_memsz;
5797 };
5798
5799 # if defined PT_GETDSBT
5800 struct target_loadmap
5801 {
5802 /* Protocol version number, must be zero. */
5803 Elf32_Word version;
5804 /* Pointer to the DSBT table, its size, and the DSBT index. */
5805 unsigned *dsbt_table;
5806 unsigned dsbt_size, dsbt_index;
5807 /* Number of segments in this map. */
5808 Elf32_Word nsegs;
5809 /* The actual memory map. */
5810 struct target_loadseg segs[/*nsegs*/];
5811 };
5812 # define LINUX_LOADMAP PT_GETDSBT
5813 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5814 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5815 # else
5816 struct target_loadmap
5817 {
5818 /* Protocol version number, must be zero. */
5819 Elf32_Half version;
5820 /* Number of segments in this map. */
5821 Elf32_Half nsegs;
5822 /* The actual memory map. */
5823 struct target_loadseg segs[/*nsegs*/];
5824 };
5825 # define LINUX_LOADMAP PTRACE_GETFDPIC
5826 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5827 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5828 # endif
5829
5830 static int
5831 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5832 unsigned char *myaddr, unsigned int len)
5833 {
5834 int pid = lwpid_of (current_thread);
5835 int addr = -1;
5836 struct target_loadmap *data = NULL;
5837 unsigned int actual_length, copy_length;
5838
5839 if (strcmp (annex, "exec") == 0)
5840 addr = (int) LINUX_LOADMAP_EXEC;
5841 else if (strcmp (annex, "interp") == 0)
5842 addr = (int) LINUX_LOADMAP_INTERP;
5843 else
5844 return -1;
5845
5846 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5847 return -1;
5848
5849 if (data == NULL)
5850 return -1;
5851
5852 actual_length = sizeof (struct target_loadmap)
5853 + sizeof (struct target_loadseg) * data->nsegs;
5854
5855 if (offset < 0 || offset > actual_length)
5856 return -1;
5857
5858 copy_length = actual_length - offset < len ? actual_length - offset : len;
5859 memcpy (myaddr, (char *) data + offset, copy_length);
5860 return copy_length;
5861 }
5862 #else
5863 # define linux_read_loadmap NULL
5864 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5865
5866 static void
5867 linux_process_qsupported (const char *query)
5868 {
5869 if (the_low_target.process_qsupported != NULL)
5870 the_low_target.process_qsupported (query);
5871 }
5872
5873 static int
5874 linux_supports_tracepoints (void)
5875 {
5876 if (*the_low_target.supports_tracepoints == NULL)
5877 return 0;
5878
5879 return (*the_low_target.supports_tracepoints) ();
5880 }
5881
5882 static CORE_ADDR
5883 linux_read_pc (struct regcache *regcache)
5884 {
5885 if (the_low_target.get_pc == NULL)
5886 return 0;
5887
5888 return (*the_low_target.get_pc) (regcache);
5889 }
5890
5891 static void
5892 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5893 {
5894 gdb_assert (the_low_target.set_pc != NULL);
5895
5896 (*the_low_target.set_pc) (regcache, pc);
5897 }
5898
5899 static int
5900 linux_thread_stopped (struct thread_info *thread)
5901 {
5902 return get_thread_lwp (thread)->stopped;
5903 }
5904
5905 /* This exposes stop-all-threads functionality to other modules. */
5906
5907 static void
5908 linux_pause_all (int freeze)
5909 {
5910 stop_all_lwps (freeze, NULL);
5911 }
5912
5913 /* This exposes unstop-all-threads functionality to other gdbserver
5914 modules. */
5915
5916 static void
5917 linux_unpause_all (int unfreeze)
5918 {
5919 unstop_all_lwps (unfreeze, NULL);
5920 }
5921
5922 static int
5923 linux_prepare_to_access_memory (void)
5924 {
5925 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5926 running LWP. */
5927 if (non_stop)
5928 linux_pause_all (1);
5929 return 0;
5930 }
5931
5932 static void
5933 linux_done_accessing_memory (void)
5934 {
5935 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5936 running LWP. */
5937 if (non_stop)
5938 linux_unpause_all (1);
5939 }
5940
5941 static int
5942 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5943 CORE_ADDR collector,
5944 CORE_ADDR lockaddr,
5945 ULONGEST orig_size,
5946 CORE_ADDR *jump_entry,
5947 CORE_ADDR *trampoline,
5948 ULONGEST *trampoline_size,
5949 unsigned char *jjump_pad_insn,
5950 ULONGEST *jjump_pad_insn_size,
5951 CORE_ADDR *adjusted_insn_addr,
5952 CORE_ADDR *adjusted_insn_addr_end,
5953 char *err)
5954 {
5955 return (*the_low_target.install_fast_tracepoint_jump_pad)
5956 (tpoint, tpaddr, collector, lockaddr, orig_size,
5957 jump_entry, trampoline, trampoline_size,
5958 jjump_pad_insn, jjump_pad_insn_size,
5959 adjusted_insn_addr, adjusted_insn_addr_end,
5960 err);
5961 }
5962
5963 static struct emit_ops *
5964 linux_emit_ops (void)
5965 {
5966 if (the_low_target.emit_ops != NULL)
5967 return (*the_low_target.emit_ops) ();
5968 else
5969 return NULL;
5970 }
5971
5972 static int
5973 linux_get_min_fast_tracepoint_insn_len (void)
5974 {
5975 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5976 }
5977
5978 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5979
5980 static int
5981 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5982 CORE_ADDR *phdr_memaddr, int *num_phdr)
5983 {
5984 char filename[PATH_MAX];
5985 int fd;
5986 const int auxv_size = is_elf64
5987 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5988 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5989
5990 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5991
5992 fd = open (filename, O_RDONLY);
5993 if (fd < 0)
5994 return 1;
5995
5996 *phdr_memaddr = 0;
5997 *num_phdr = 0;
5998 while (read (fd, buf, auxv_size) == auxv_size
5999 && (*phdr_memaddr == 0 || *num_phdr == 0))
6000 {
6001 if (is_elf64)
6002 {
6003 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6004
6005 switch (aux->a_type)
6006 {
6007 case AT_PHDR:
6008 *phdr_memaddr = aux->a_un.a_val;
6009 break;
6010 case AT_PHNUM:
6011 *num_phdr = aux->a_un.a_val;
6012 break;
6013 }
6014 }
6015 else
6016 {
6017 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6018
6019 switch (aux->a_type)
6020 {
6021 case AT_PHDR:
6022 *phdr_memaddr = aux->a_un.a_val;
6023 break;
6024 case AT_PHNUM:
6025 *num_phdr = aux->a_un.a_val;
6026 break;
6027 }
6028 }
6029 }
6030
6031 close (fd);
6032
6033 if (*phdr_memaddr == 0 || *num_phdr == 0)
6034 {
6035 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6036 "phdr_memaddr = %ld, phdr_num = %d",
6037 (long) *phdr_memaddr, *num_phdr);
6038 return 2;
6039 }
6040
6041 return 0;
6042 }
6043
6044 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6045
6046 static CORE_ADDR
6047 get_dynamic (const int pid, const int is_elf64)
6048 {
6049 CORE_ADDR phdr_memaddr, relocation;
6050 int num_phdr, i;
6051 unsigned char *phdr_buf;
6052 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6053
6054 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6055 return 0;
6056
6057 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6058 phdr_buf = alloca (num_phdr * phdr_size);
6059
6060 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6061 return 0;
6062
6063 /* Compute relocation: it is expected to be 0 for "regular" executables,
6064 non-zero for PIE ones. */
6065 relocation = -1;
6066 for (i = 0; relocation == -1 && i < num_phdr; i++)
6067 if (is_elf64)
6068 {
6069 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6070
6071 if (p->p_type == PT_PHDR)
6072 relocation = phdr_memaddr - p->p_vaddr;
6073 }
6074 else
6075 {
6076 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6077
6078 if (p->p_type == PT_PHDR)
6079 relocation = phdr_memaddr - p->p_vaddr;
6080 }
6081
6082 if (relocation == -1)
6083 {
6084 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6085 any real world executables, including PIE executables, have always
6086 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6087 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6088 or present DT_DEBUG anyway (fpc binaries are statically linked).
6089
6090 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6091
6092 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6093
6094 return 0;
6095 }
6096
6097 for (i = 0; i < num_phdr; i++)
6098 {
6099 if (is_elf64)
6100 {
6101 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6102
6103 if (p->p_type == PT_DYNAMIC)
6104 return p->p_vaddr + relocation;
6105 }
6106 else
6107 {
6108 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6109
6110 if (p->p_type == PT_DYNAMIC)
6111 return p->p_vaddr + relocation;
6112 }
6113 }
6114
6115 return 0;
6116 }
6117
6118 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6119 can be 0 if the inferior does not yet have the library list initialized.
6120 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6121 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6122
6123 static CORE_ADDR
6124 get_r_debug (const int pid, const int is_elf64)
6125 {
6126 CORE_ADDR dynamic_memaddr;
6127 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6128 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6129 CORE_ADDR map = -1;
6130
6131 dynamic_memaddr = get_dynamic (pid, is_elf64);
6132 if (dynamic_memaddr == 0)
6133 return map;
6134
6135 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6136 {
6137 if (is_elf64)
6138 {
6139 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6140 #ifdef DT_MIPS_RLD_MAP
6141 union
6142 {
6143 Elf64_Xword map;
6144 unsigned char buf[sizeof (Elf64_Xword)];
6145 }
6146 rld_map;
6147
6148 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6149 {
6150 if (linux_read_memory (dyn->d_un.d_val,
6151 rld_map.buf, sizeof (rld_map.buf)) == 0)
6152 return rld_map.map;
6153 else
6154 break;
6155 }
6156 #endif /* DT_MIPS_RLD_MAP */
6157
6158 if (dyn->d_tag == DT_DEBUG && map == -1)
6159 map = dyn->d_un.d_val;
6160
6161 if (dyn->d_tag == DT_NULL)
6162 break;
6163 }
6164 else
6165 {
6166 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6167 #ifdef DT_MIPS_RLD_MAP
6168 union
6169 {
6170 Elf32_Word map;
6171 unsigned char buf[sizeof (Elf32_Word)];
6172 }
6173 rld_map;
6174
6175 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6176 {
6177 if (linux_read_memory (dyn->d_un.d_val,
6178 rld_map.buf, sizeof (rld_map.buf)) == 0)
6179 return rld_map.map;
6180 else
6181 break;
6182 }
6183 #endif /* DT_MIPS_RLD_MAP */
6184
6185 if (dyn->d_tag == DT_DEBUG && map == -1)
6186 map = dyn->d_un.d_val;
6187
6188 if (dyn->d_tag == DT_NULL)
6189 break;
6190 }
6191
6192 dynamic_memaddr += dyn_size;
6193 }
6194
6195 return map;
6196 }
6197
6198 /* Read one pointer from MEMADDR in the inferior. */
6199
6200 static int
6201 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6202 {
6203 int ret;
6204
6205 /* Go through a union so this works on either big or little endian
6206 hosts, when the inferior's pointer size is smaller than the size
6207 of CORE_ADDR. It is assumed the inferior's endianness is the
6208 same of the superior's. */
6209 union
6210 {
6211 CORE_ADDR core_addr;
6212 unsigned int ui;
6213 unsigned char uc;
6214 } addr;
6215
6216 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6217 if (ret == 0)
6218 {
6219 if (ptr_size == sizeof (CORE_ADDR))
6220 *ptr = addr.core_addr;
6221 else if (ptr_size == sizeof (unsigned int))
6222 *ptr = addr.ui;
6223 else
6224 gdb_assert_not_reached ("unhandled pointer size");
6225 }
6226 return ret;
6227 }
6228
6229 struct link_map_offsets
6230 {
6231 /* Offset and size of r_debug.r_version. */
6232 int r_version_offset;
6233
6234 /* Offset and size of r_debug.r_map. */
6235 int r_map_offset;
6236
6237 /* Offset to l_addr field in struct link_map. */
6238 int l_addr_offset;
6239
6240 /* Offset to l_name field in struct link_map. */
6241 int l_name_offset;
6242
6243 /* Offset to l_ld field in struct link_map. */
6244 int l_ld_offset;
6245
6246 /* Offset to l_next field in struct link_map. */
6247 int l_next_offset;
6248
6249 /* Offset to l_prev field in struct link_map. */
6250 int l_prev_offset;
6251 };
6252
6253 /* Construct qXfer:libraries-svr4:read reply. */
6254
6255 static int
6256 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6257 unsigned const char *writebuf,
6258 CORE_ADDR offset, int len)
6259 {
6260 char *document;
6261 unsigned document_len;
6262 struct process_info_private *const priv = current_process ()->priv;
6263 char filename[PATH_MAX];
6264 int pid, is_elf64;
6265
6266 static const struct link_map_offsets lmo_32bit_offsets =
6267 {
6268 0, /* r_version offset. */
6269 4, /* r_debug.r_map offset. */
6270 0, /* l_addr offset in link_map. */
6271 4, /* l_name offset in link_map. */
6272 8, /* l_ld offset in link_map. */
6273 12, /* l_next offset in link_map. */
6274 16 /* l_prev offset in link_map. */
6275 };
6276
6277 static const struct link_map_offsets lmo_64bit_offsets =
6278 {
6279 0, /* r_version offset. */
6280 8, /* r_debug.r_map offset. */
6281 0, /* l_addr offset in link_map. */
6282 8, /* l_name offset in link_map. */
6283 16, /* l_ld offset in link_map. */
6284 24, /* l_next offset in link_map. */
6285 32 /* l_prev offset in link_map. */
6286 };
6287 const struct link_map_offsets *lmo;
6288 unsigned int machine;
6289 int ptr_size;
6290 CORE_ADDR lm_addr = 0, lm_prev = 0;
6291 int allocated = 1024;
6292 char *p;
6293 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6294 int header_done = 0;
6295
6296 if (writebuf != NULL)
6297 return -2;
6298 if (readbuf == NULL)
6299 return -1;
6300
6301 pid = lwpid_of (current_thread);
6302 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6303 is_elf64 = elf_64_file_p (filename, &machine);
6304 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6305 ptr_size = is_elf64 ? 8 : 4;
6306
6307 while (annex[0] != '\0')
6308 {
6309 const char *sep;
6310 CORE_ADDR *addrp;
6311 int len;
6312
6313 sep = strchr (annex, '=');
6314 if (sep == NULL)
6315 break;
6316
6317 len = sep - annex;
6318 if (len == 5 && startswith (annex, "start"))
6319 addrp = &lm_addr;
6320 else if (len == 4 && startswith (annex, "prev"))
6321 addrp = &lm_prev;
6322 else
6323 {
6324 annex = strchr (sep, ';');
6325 if (annex == NULL)
6326 break;
6327 annex++;
6328 continue;
6329 }
6330
6331 annex = decode_address_to_semicolon (addrp, sep + 1);
6332 }
6333
6334 if (lm_addr == 0)
6335 {
6336 int r_version = 0;
6337
6338 if (priv->r_debug == 0)
6339 priv->r_debug = get_r_debug (pid, is_elf64);
6340
6341 /* We failed to find DT_DEBUG. Such situation will not change
6342 for this inferior - do not retry it. Report it to GDB as
6343 E01, see for the reasons at the GDB solib-svr4.c side. */
6344 if (priv->r_debug == (CORE_ADDR) -1)
6345 return -1;
6346
6347 if (priv->r_debug != 0)
6348 {
6349 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6350 (unsigned char *) &r_version,
6351 sizeof (r_version)) != 0
6352 || r_version != 1)
6353 {
6354 warning ("unexpected r_debug version %d", r_version);
6355 }
6356 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6357 &lm_addr, ptr_size) != 0)
6358 {
6359 warning ("unable to read r_map from 0x%lx",
6360 (long) priv->r_debug + lmo->r_map_offset);
6361 }
6362 }
6363 }
6364
6365 document = xmalloc (allocated);
6366 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6367 p = document + strlen (document);
6368
6369 while (lm_addr
6370 && read_one_ptr (lm_addr + lmo->l_name_offset,
6371 &l_name, ptr_size) == 0
6372 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6373 &l_addr, ptr_size) == 0
6374 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6375 &l_ld, ptr_size) == 0
6376 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6377 &l_prev, ptr_size) == 0
6378 && read_one_ptr (lm_addr + lmo->l_next_offset,
6379 &l_next, ptr_size) == 0)
6380 {
6381 unsigned char libname[PATH_MAX];
6382
6383 if (lm_prev != l_prev)
6384 {
6385 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6386 (long) lm_prev, (long) l_prev);
6387 break;
6388 }
6389
6390 /* Ignore the first entry even if it has valid name as the first entry
6391 corresponds to the main executable. The first entry should not be
6392 skipped if the dynamic loader was loaded late by a static executable
6393 (see solib-svr4.c parameter ignore_first). But in such case the main
6394 executable does not have PT_DYNAMIC present and this function already
6395 exited above due to failed get_r_debug. */
6396 if (lm_prev == 0)
6397 {
6398 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6399 p = p + strlen (p);
6400 }
6401 else
6402 {
6403 /* Not checking for error because reading may stop before
6404 we've got PATH_MAX worth of characters. */
6405 libname[0] = '\0';
6406 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6407 libname[sizeof (libname) - 1] = '\0';
6408 if (libname[0] != '\0')
6409 {
6410 /* 6x the size for xml_escape_text below. */
6411 size_t len = 6 * strlen ((char *) libname);
6412 char *name;
6413
6414 if (!header_done)
6415 {
6416 /* Terminate `<library-list-svr4'. */
6417 *p++ = '>';
6418 header_done = 1;
6419 }
6420
6421 while (allocated < p - document + len + 200)
6422 {
6423 /* Expand to guarantee sufficient storage. */
6424 uintptr_t document_len = p - document;
6425
6426 document = xrealloc (document, 2 * allocated);
6427 allocated *= 2;
6428 p = document + document_len;
6429 }
6430
6431 name = xml_escape_text ((char *) libname);
6432 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6433 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6434 name, (unsigned long) lm_addr,
6435 (unsigned long) l_addr, (unsigned long) l_ld);
6436 free (name);
6437 }
6438 }
6439
6440 lm_prev = lm_addr;
6441 lm_addr = l_next;
6442 }
6443
6444 if (!header_done)
6445 {
6446 /* Empty list; terminate `<library-list-svr4'. */
6447 strcpy (p, "/>");
6448 }
6449 else
6450 strcpy (p, "</library-list-svr4>");
6451
6452 document_len = strlen (document);
6453 if (offset < document_len)
6454 document_len -= offset;
6455 else
6456 document_len = 0;
6457 if (len > document_len)
6458 len = document_len;
6459
6460 memcpy (readbuf, document + offset, len);
6461 xfree (document);
6462
6463 return len;
6464 }
6465
6466 #ifdef HAVE_LINUX_BTRACE
6467
6468 /* See to_enable_btrace target method. */
6469
6470 static struct btrace_target_info *
6471 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6472 {
6473 struct btrace_target_info *tinfo;
6474
6475 tinfo = linux_enable_btrace (ptid, conf);
6476
6477 if (tinfo != NULL && tinfo->ptr_bits == 0)
6478 {
6479 struct thread_info *thread = find_thread_ptid (ptid);
6480 struct regcache *regcache = get_thread_regcache (thread, 0);
6481
6482 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6483 }
6484
6485 return tinfo;
6486 }
6487
6488 /* See to_disable_btrace target method. */
6489
6490 static int
6491 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6492 {
6493 enum btrace_error err;
6494
6495 err = linux_disable_btrace (tinfo);
6496 return (err == BTRACE_ERR_NONE ? 0 : -1);
6497 }
6498
6499 /* Encode an Intel(R) Processor Trace configuration. */
6500
6501 static void
6502 linux_low_encode_pt_config (struct buffer *buffer,
6503 const struct btrace_data_pt_config *config)
6504 {
6505 buffer_grow_str (buffer, "<pt-config>\n");
6506
6507 switch (config->cpu.vendor)
6508 {
6509 case CV_INTEL:
6510 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6511 "model=\"%u\" stepping=\"%u\"/>\n",
6512 config->cpu.family, config->cpu.model,
6513 config->cpu.stepping);
6514 break;
6515
6516 default:
6517 break;
6518 }
6519
6520 buffer_grow_str (buffer, "</pt-config>\n");
6521 }
6522
6523 /* Encode a raw buffer. */
6524
6525 static void
6526 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6527 unsigned int size)
6528 {
6529 if (size == 0)
6530 return;
6531
6532 /* We use hex encoding - see common/rsp-low.h. */
6533 buffer_grow_str (buffer, "<raw>\n");
6534
6535 while (size-- > 0)
6536 {
6537 char elem[2];
6538
6539 elem[0] = tohex ((*data >> 4) & 0xf);
6540 elem[1] = tohex (*data++ & 0xf);
6541
6542 buffer_grow (buffer, elem, 2);
6543 }
6544
6545 buffer_grow_str (buffer, "</raw>\n");
6546 }
6547
6548 /* See to_read_btrace target method. */
6549
6550 static int
6551 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6552 int type)
6553 {
6554 struct btrace_data btrace;
6555 struct btrace_block *block;
6556 enum btrace_error err;
6557 int i;
6558
6559 btrace_data_init (&btrace);
6560
6561 err = linux_read_btrace (&btrace, tinfo, type);
6562 if (err != BTRACE_ERR_NONE)
6563 {
6564 if (err == BTRACE_ERR_OVERFLOW)
6565 buffer_grow_str0 (buffer, "E.Overflow.");
6566 else
6567 buffer_grow_str0 (buffer, "E.Generic Error.");
6568
6569 goto err;
6570 }
6571
6572 switch (btrace.format)
6573 {
6574 case BTRACE_FORMAT_NONE:
6575 buffer_grow_str0 (buffer, "E.No Trace.");
6576 goto err;
6577
6578 case BTRACE_FORMAT_BTS:
6579 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6580 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6581
6582 for (i = 0;
6583 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6584 i++)
6585 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6586 paddress (block->begin), paddress (block->end));
6587
6588 buffer_grow_str0 (buffer, "</btrace>\n");
6589 break;
6590
6591 case BTRACE_FORMAT_PT:
6592 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6593 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6594 buffer_grow_str (buffer, "<pt>\n");
6595
6596 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6597
6598 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6599 btrace.variant.pt.size);
6600
6601 buffer_grow_str (buffer, "</pt>\n");
6602 buffer_grow_str0 (buffer, "</btrace>\n");
6603 break;
6604
6605 default:
6606 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6607 goto err;
6608 }
6609
6610 btrace_data_fini (&btrace);
6611 return 0;
6612
6613 err:
6614 btrace_data_fini (&btrace);
6615 return -1;
6616 }
6617
6618 /* See to_btrace_conf target method. */
6619
6620 static int
6621 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6622 struct buffer *buffer)
6623 {
6624 const struct btrace_config *conf;
6625
6626 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6627 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6628
6629 conf = linux_btrace_conf (tinfo);
6630 if (conf != NULL)
6631 {
6632 switch (conf->format)
6633 {
6634 case BTRACE_FORMAT_NONE:
6635 break;
6636
6637 case BTRACE_FORMAT_BTS:
6638 buffer_xml_printf (buffer, "<bts");
6639 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6640 buffer_xml_printf (buffer, " />\n");
6641 break;
6642
6643 case BTRACE_FORMAT_PT:
6644 buffer_xml_printf (buffer, "<pt");
6645 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6646 buffer_xml_printf (buffer, "/>\n");
6647 break;
6648 }
6649 }
6650
6651 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6652 return 0;
6653 }
6654 #endif /* HAVE_LINUX_BTRACE */
6655
6656 /* See nat/linux-nat.h. */
6657
6658 ptid_t
6659 current_lwp_ptid (void)
6660 {
6661 return ptid_of (current_thread);
6662 }
6663
6664 static struct target_ops linux_target_ops = {
6665 linux_create_inferior,
6666 linux_arch_setup,
6667 linux_attach,
6668 linux_kill,
6669 linux_detach,
6670 linux_mourn,
6671 linux_join,
6672 linux_thread_alive,
6673 linux_resume,
6674 linux_wait,
6675 linux_fetch_registers,
6676 linux_store_registers,
6677 linux_prepare_to_access_memory,
6678 linux_done_accessing_memory,
6679 linux_read_memory,
6680 linux_write_memory,
6681 linux_look_up_symbols,
6682 linux_request_interrupt,
6683 linux_read_auxv,
6684 linux_supports_z_point_type,
6685 linux_insert_point,
6686 linux_remove_point,
6687 linux_stopped_by_sw_breakpoint,
6688 linux_supports_stopped_by_sw_breakpoint,
6689 linux_stopped_by_hw_breakpoint,
6690 linux_supports_stopped_by_hw_breakpoint,
6691 linux_supports_conditional_breakpoints,
6692 linux_stopped_by_watchpoint,
6693 linux_stopped_data_address,
6694 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6695 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6696 && defined(PT_TEXT_END_ADDR)
6697 linux_read_offsets,
6698 #else
6699 NULL,
6700 #endif
6701 #ifdef USE_THREAD_DB
6702 thread_db_get_tls_address,
6703 #else
6704 NULL,
6705 #endif
6706 linux_qxfer_spu,
6707 hostio_last_error_from_errno,
6708 linux_qxfer_osdata,
6709 linux_xfer_siginfo,
6710 linux_supports_non_stop,
6711 linux_async,
6712 linux_start_non_stop,
6713 linux_supports_multi_process,
6714 linux_supports_fork_events,
6715 linux_supports_vfork_events,
6716 linux_handle_new_gdb_connection,
6717 #ifdef USE_THREAD_DB
6718 thread_db_handle_monitor_command,
6719 #else
6720 NULL,
6721 #endif
6722 linux_common_core_of_thread,
6723 linux_read_loadmap,
6724 linux_process_qsupported,
6725 linux_supports_tracepoints,
6726 linux_read_pc,
6727 linux_write_pc,
6728 linux_thread_stopped,
6729 NULL,
6730 linux_pause_all,
6731 linux_unpause_all,
6732 linux_stabilize_threads,
6733 linux_install_fast_tracepoint_jump_pad,
6734 linux_emit_ops,
6735 linux_supports_disable_randomization,
6736 linux_get_min_fast_tracepoint_insn_len,
6737 linux_qxfer_libraries_svr4,
6738 linux_supports_agent,
6739 #ifdef HAVE_LINUX_BTRACE
6740 linux_supports_btrace,
6741 linux_low_enable_btrace,
6742 linux_low_disable_btrace,
6743 linux_low_read_btrace,
6744 linux_low_btrace_conf,
6745 #else
6746 NULL,
6747 NULL,
6748 NULL,
6749 NULL,
6750 NULL,
6751 #endif
6752 linux_supports_range_stepping,
6753 linux_proc_pid_to_exec_file,
6754 linux_mntns_open_cloexec,
6755 linux_mntns_unlink,
6756 linux_mntns_readlink,
6757 };
6758
6759 static void
6760 linux_init_signals ()
6761 {
6762 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6763 to find what the cancel signal actually is. */
6764 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6765 signal (__SIGRTMIN+1, SIG_IGN);
6766 #endif
6767 }
6768
6769 #ifdef HAVE_LINUX_REGSETS
6770 void
6771 initialize_regsets_info (struct regsets_info *info)
6772 {
6773 for (info->num_regsets = 0;
6774 info->regsets[info->num_regsets].size >= 0;
6775 info->num_regsets++)
6776 ;
6777 }
6778 #endif
6779
6780 void
6781 initialize_low (void)
6782 {
6783 struct sigaction sigchld_action;
6784 memset (&sigchld_action, 0, sizeof (sigchld_action));
6785 set_target_ops (&linux_target_ops);
6786 set_breakpoint_data (the_low_target.breakpoint,
6787 the_low_target.breakpoint_len);
6788 linux_init_signals ();
6789 linux_ptrace_init_warnings ();
6790
6791 sigchld_action.sa_handler = sigchld_handler;
6792 sigemptyset (&sigchld_action.sa_mask);
6793 sigchld_action.sa_flags = SA_RESTART;
6794 sigaction (SIGCHLD, &sigchld_action, NULL);
6795
6796 initialize_low_arch ();
6797
6798 linux_check_ptrace_features ();
6799 }