]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
Linux: sys/ptrace.h -> nat/gdb_ptrace.h everywhere
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #ifndef ELFMAG0
50 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54 #include <elf.h>
55 #endif
56 #include "nat/linux-namespaces.h"
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 #ifndef W_STOPCODE
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75 #endif
76
77 /* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79 #ifndef __SIGRTMIN
80 #define __SIGRTMIN 32
81 #endif
82
83 /* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
95 #elif defined(BFIN)
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
104 #endif
105 #endif
106
107 #ifdef HAVE_LINUX_BTRACE
108 # include "nat/linux-btrace.h"
109 # include "btrace-common.h"
110 #endif
111
112 #ifndef HAVE_ELF32_AUXV_T
113 /* Copied from glibc's elf.h. */
114 typedef struct
115 {
116 uint32_t a_type; /* Entry type */
117 union
118 {
119 uint32_t a_val; /* Integer value */
120 /* We use to have pointer elements added here. We cannot do that,
121 though, since it does not work when using 32-bit definitions
122 on 64-bit platforms and vice versa. */
123 } a_un;
124 } Elf32_auxv_t;
125 #endif
126
127 #ifndef HAVE_ELF64_AUXV_T
128 /* Copied from glibc's elf.h. */
129 typedef struct
130 {
131 uint64_t a_type; /* Entry type */
132 union
133 {
134 uint64_t a_val; /* Integer value */
135 /* We use to have pointer elements added here. We cannot do that,
136 though, since it does not work when using 32-bit definitions
137 on 64-bit platforms and vice versa. */
138 } a_un;
139 } Elf64_auxv_t;
140 #endif
141
142 /* LWP accessors. */
143
144 /* See nat/linux-nat.h. */
145
146 ptid_t
147 ptid_of_lwp (struct lwp_info *lwp)
148 {
149 return ptid_of (get_lwp_thread (lwp));
150 }
151
152 /* See nat/linux-nat.h. */
153
154 void
155 lwp_set_arch_private_info (struct lwp_info *lwp,
156 struct arch_lwp_info *info)
157 {
158 lwp->arch_private = info;
159 }
160
161 /* See nat/linux-nat.h. */
162
163 struct arch_lwp_info *
164 lwp_arch_private_info (struct lwp_info *lwp)
165 {
166 return lwp->arch_private;
167 }
168
169 /* See nat/linux-nat.h. */
170
171 int
172 lwp_is_stopped (struct lwp_info *lwp)
173 {
174 return lwp->stopped;
175 }
176
177 /* See nat/linux-nat.h. */
178
179 enum target_stop_reason
180 lwp_stop_reason (struct lwp_info *lwp)
181 {
182 return lwp->stop_reason;
183 }
184
185 /* A list of all unknown processes which receive stop signals. Some
186 other process will presumably claim each of these as forked
187 children momentarily. */
188
189 struct simple_pid_list
190 {
191 /* The process ID. */
192 int pid;
193
194 /* The status as reported by waitpid. */
195 int status;
196
197 /* Next in chain. */
198 struct simple_pid_list *next;
199 };
200 struct simple_pid_list *stopped_pids;
201
202 /* Trivial list manipulation functions to keep track of a list of new
203 stopped processes. */
204
205 static void
206 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
207 {
208 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
209
210 new_pid->pid = pid;
211 new_pid->status = status;
212 new_pid->next = *listp;
213 *listp = new_pid;
214 }
215
216 static int
217 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
218 {
219 struct simple_pid_list **p;
220
221 for (p = listp; *p != NULL; p = &(*p)->next)
222 if ((*p)->pid == pid)
223 {
224 struct simple_pid_list *next = (*p)->next;
225
226 *statusp = (*p)->status;
227 xfree (*p);
228 *p = next;
229 return 1;
230 }
231 return 0;
232 }
233
234 enum stopping_threads_kind
235 {
236 /* Not stopping threads presently. */
237 NOT_STOPPING_THREADS,
238
239 /* Stopping threads. */
240 STOPPING_THREADS,
241
242 /* Stopping and suspending threads. */
243 STOPPING_AND_SUSPENDING_THREADS
244 };
245
246 /* This is set while stop_all_lwps is in effect. */
247 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
248
249 /* FIXME make into a target method? */
250 int using_threads = 1;
251
252 /* True if we're presently stabilizing threads (moving them out of
253 jump pads). */
254 static int stabilizing_threads;
255
256 static void linux_resume_one_lwp (struct lwp_info *lwp,
257 int step, int signal, siginfo_t *info);
258 static void linux_resume (struct thread_resume *resume_info, size_t n);
259 static void stop_all_lwps (int suspend, struct lwp_info *except);
260 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
261 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
262 int *wstat, int options);
263 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
264 static struct lwp_info *add_lwp (ptid_t ptid);
265 static int linux_stopped_by_watchpoint (void);
266 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
267 static void proceed_all_lwps (void);
268 static int finish_step_over (struct lwp_info *lwp);
269 static int kill_lwp (unsigned long lwpid, int signo);
270
271 /* When the event-loop is doing a step-over, this points at the thread
272 being stepped. */
273 ptid_t step_over_bkpt;
274
275 /* True if the low target can hardware single-step. Such targets
276 don't need a BREAKPOINT_REINSERT_ADDR callback. */
277
278 static int
279 can_hardware_single_step (void)
280 {
281 return (the_low_target.breakpoint_reinsert_addr == NULL);
282 }
283
284 /* True if the low target supports memory breakpoints. If so, we'll
285 have a GET_PC implementation. */
286
287 static int
288 supports_breakpoints (void)
289 {
290 return (the_low_target.get_pc != NULL);
291 }
292
293 /* Returns true if this target can support fast tracepoints. This
294 does not mean that the in-process agent has been loaded in the
295 inferior. */
296
297 static int
298 supports_fast_tracepoints (void)
299 {
300 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
301 }
302
303 /* True if LWP is stopped in its stepping range. */
304
305 static int
306 lwp_in_step_range (struct lwp_info *lwp)
307 {
308 CORE_ADDR pc = lwp->stop_pc;
309
310 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
311 }
312
313 struct pending_signals
314 {
315 int signal;
316 siginfo_t info;
317 struct pending_signals *prev;
318 };
319
320 /* The read/write ends of the pipe registered as waitable file in the
321 event loop. */
322 static int linux_event_pipe[2] = { -1, -1 };
323
324 /* True if we're currently in async mode. */
325 #define target_is_async_p() (linux_event_pipe[0] != -1)
326
327 static void send_sigstop (struct lwp_info *lwp);
328 static void wait_for_sigstop (void);
329
330 /* Return non-zero if HEADER is a 64-bit ELF file. */
331
332 static int
333 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
334 {
335 if (header->e_ident[EI_MAG0] == ELFMAG0
336 && header->e_ident[EI_MAG1] == ELFMAG1
337 && header->e_ident[EI_MAG2] == ELFMAG2
338 && header->e_ident[EI_MAG3] == ELFMAG3)
339 {
340 *machine = header->e_machine;
341 return header->e_ident[EI_CLASS] == ELFCLASS64;
342
343 }
344 *machine = EM_NONE;
345 return -1;
346 }
347
348 /* Return non-zero if FILE is a 64-bit ELF file,
349 zero if the file is not a 64-bit ELF file,
350 and -1 if the file is not accessible or doesn't exist. */
351
352 static int
353 elf_64_file_p (const char *file, unsigned int *machine)
354 {
355 Elf64_Ehdr header;
356 int fd;
357
358 fd = open (file, O_RDONLY);
359 if (fd < 0)
360 return -1;
361
362 if (read (fd, &header, sizeof (header)) != sizeof (header))
363 {
364 close (fd);
365 return 0;
366 }
367 close (fd);
368
369 return elf_64_header_p (&header, machine);
370 }
371
372 /* Accepts an integer PID; Returns true if the executable PID is
373 running is a 64-bit ELF file.. */
374
375 int
376 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
377 {
378 char file[PATH_MAX];
379
380 sprintf (file, "/proc/%d/exe", pid);
381 return elf_64_file_p (file, machine);
382 }
383
384 static void
385 delete_lwp (struct lwp_info *lwp)
386 {
387 struct thread_info *thr = get_lwp_thread (lwp);
388
389 if (debug_threads)
390 debug_printf ("deleting %ld\n", lwpid_of (thr));
391
392 remove_thread (thr);
393 free (lwp->arch_private);
394 free (lwp);
395 }
396
397 /* Add a process to the common process list, and set its private
398 data. */
399
400 static struct process_info *
401 linux_add_process (int pid, int attached)
402 {
403 struct process_info *proc;
404
405 proc = add_process (pid, attached);
406 proc->priv = xcalloc (1, sizeof (*proc->priv));
407
408 if (the_low_target.new_process != NULL)
409 proc->priv->arch_private = the_low_target.new_process ();
410
411 return proc;
412 }
413
414 static CORE_ADDR get_pc (struct lwp_info *lwp);
415
416 /* Handle a GNU/Linux extended wait response. If we see a clone
417 event, we need to add the new LWP to our list (and return 0 so as
418 not to report the trap to higher layers). */
419
420 static int
421 handle_extended_wait (struct lwp_info *event_lwp, int wstat)
422 {
423 int event = linux_ptrace_get_extended_event (wstat);
424 struct thread_info *event_thr = get_lwp_thread (event_lwp);
425 struct lwp_info *new_lwp;
426
427 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
428 || (event == PTRACE_EVENT_CLONE))
429 {
430 ptid_t ptid;
431 unsigned long new_pid;
432 int ret, status;
433
434 /* Get the pid of the new lwp. */
435 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
436 &new_pid);
437
438 /* If we haven't already seen the new PID stop, wait for it now. */
439 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
440 {
441 /* The new child has a pending SIGSTOP. We can't affect it until it
442 hits the SIGSTOP, but we're already attached. */
443
444 ret = my_waitpid (new_pid, &status, __WALL);
445
446 if (ret == -1)
447 perror_with_name ("waiting for new child");
448 else if (ret != new_pid)
449 warning ("wait returned unexpected PID %d", ret);
450 else if (!WIFSTOPPED (status))
451 warning ("wait returned unexpected status 0x%x", status);
452 }
453
454 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
455 {
456 struct process_info *parent_proc;
457 struct process_info *child_proc;
458 struct lwp_info *child_lwp;
459 struct thread_info *child_thr;
460 struct target_desc *tdesc;
461
462 ptid = ptid_build (new_pid, new_pid, 0);
463
464 if (debug_threads)
465 {
466 debug_printf ("HEW: Got fork event from LWP %ld, "
467 "new child is %d\n",
468 ptid_get_lwp (ptid_of (event_thr)),
469 ptid_get_pid (ptid));
470 }
471
472 /* Add the new process to the tables and clone the breakpoint
473 lists of the parent. We need to do this even if the new process
474 will be detached, since we will need the process object and the
475 breakpoints to remove any breakpoints from memory when we
476 detach, and the client side will access registers. */
477 child_proc = linux_add_process (new_pid, 0);
478 gdb_assert (child_proc != NULL);
479 child_lwp = add_lwp (ptid);
480 gdb_assert (child_lwp != NULL);
481 child_lwp->stopped = 1;
482 child_lwp->must_set_ptrace_flags = 1;
483 child_lwp->status_pending_p = 0;
484 child_thr = get_lwp_thread (child_lwp);
485 child_thr->last_resume_kind = resume_stop;
486 parent_proc = get_thread_process (event_thr);
487 child_proc->attached = parent_proc->attached;
488 clone_all_breakpoints (&child_proc->breakpoints,
489 &child_proc->raw_breakpoints,
490 parent_proc->breakpoints);
491
492 tdesc = xmalloc (sizeof (struct target_desc));
493 copy_target_description (tdesc, parent_proc->tdesc);
494 child_proc->tdesc = tdesc;
495
496 /* Clone arch-specific process data. */
497 if (the_low_target.new_fork != NULL)
498 the_low_target.new_fork (parent_proc, child_proc);
499
500 /* Save fork info in the parent thread. */
501 if (event == PTRACE_EVENT_FORK)
502 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
503 else if (event == PTRACE_EVENT_VFORK)
504 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
505
506 event_lwp->waitstatus.value.related_pid = ptid;
507
508 /* The status_pending field contains bits denoting the
509 extended event, so when the pending event is handled,
510 the handler will look at lwp->waitstatus. */
511 event_lwp->status_pending_p = 1;
512 event_lwp->status_pending = wstat;
513
514 /* Report the event. */
515 return 0;
516 }
517
518 if (debug_threads)
519 debug_printf ("HEW: Got clone event "
520 "from LWP %ld, new child is LWP %ld\n",
521 lwpid_of (event_thr), new_pid);
522
523 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
524 new_lwp = add_lwp (ptid);
525
526 /* Either we're going to immediately resume the new thread
527 or leave it stopped. linux_resume_one_lwp is a nop if it
528 thinks the thread is currently running, so set this first
529 before calling linux_resume_one_lwp. */
530 new_lwp->stopped = 1;
531
532 /* If we're suspending all threads, leave this one suspended
533 too. */
534 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
535 new_lwp->suspended = 1;
536
537 /* Normally we will get the pending SIGSTOP. But in some cases
538 we might get another signal delivered to the group first.
539 If we do get another signal, be sure not to lose it. */
540 if (WSTOPSIG (status) != SIGSTOP)
541 {
542 new_lwp->stop_expected = 1;
543 new_lwp->status_pending_p = 1;
544 new_lwp->status_pending = status;
545 }
546
547 /* Don't report the event. */
548 return 1;
549 }
550 else if (event == PTRACE_EVENT_VFORK_DONE)
551 {
552 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
553
554 /* Report the event. */
555 return 0;
556 }
557
558 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
559 }
560
561 /* Return the PC as read from the regcache of LWP, without any
562 adjustment. */
563
564 static CORE_ADDR
565 get_pc (struct lwp_info *lwp)
566 {
567 struct thread_info *saved_thread;
568 struct regcache *regcache;
569 CORE_ADDR pc;
570
571 if (the_low_target.get_pc == NULL)
572 return 0;
573
574 saved_thread = current_thread;
575 current_thread = get_lwp_thread (lwp);
576
577 regcache = get_thread_regcache (current_thread, 1);
578 pc = (*the_low_target.get_pc) (regcache);
579
580 if (debug_threads)
581 debug_printf ("pc is 0x%lx\n", (long) pc);
582
583 current_thread = saved_thread;
584 return pc;
585 }
586
587 /* This function should only be called if LWP got a SIGTRAP.
588 The SIGTRAP could mean several things.
589
590 On i386, where decr_pc_after_break is non-zero:
591
592 If we were single-stepping this process using PTRACE_SINGLESTEP, we
593 will get only the one SIGTRAP. The value of $eip will be the next
594 instruction. If the instruction we stepped over was a breakpoint,
595 we need to decrement the PC.
596
597 If we continue the process using PTRACE_CONT, we will get a
598 SIGTRAP when we hit a breakpoint. The value of $eip will be
599 the instruction after the breakpoint (i.e. needs to be
600 decremented). If we report the SIGTRAP to GDB, we must also
601 report the undecremented PC. If the breakpoint is removed, we
602 must resume at the decremented PC.
603
604 On a non-decr_pc_after_break machine with hardware or kernel
605 single-step:
606
607 If we either single-step a breakpoint instruction, or continue and
608 hit a breakpoint instruction, our PC will point at the breakpoint
609 instruction. */
610
611 static int
612 check_stopped_by_breakpoint (struct lwp_info *lwp)
613 {
614 CORE_ADDR pc;
615 CORE_ADDR sw_breakpoint_pc;
616 struct thread_info *saved_thread;
617 #if USE_SIGTRAP_SIGINFO
618 siginfo_t siginfo;
619 #endif
620
621 if (the_low_target.get_pc == NULL)
622 return 0;
623
624 pc = get_pc (lwp);
625 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
626
627 /* breakpoint_at reads from the current thread. */
628 saved_thread = current_thread;
629 current_thread = get_lwp_thread (lwp);
630
631 #if USE_SIGTRAP_SIGINFO
632 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
633 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
634 {
635 if (siginfo.si_signo == SIGTRAP)
636 {
637 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
638 {
639 if (debug_threads)
640 {
641 struct thread_info *thr = get_lwp_thread (lwp);
642
643 debug_printf ("CSBB: %s stopped by software breakpoint\n",
644 target_pid_to_str (ptid_of (thr)));
645 }
646
647 /* Back up the PC if necessary. */
648 if (pc != sw_breakpoint_pc)
649 {
650 struct regcache *regcache
651 = get_thread_regcache (current_thread, 1);
652 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
653 }
654
655 lwp->stop_pc = sw_breakpoint_pc;
656 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
657 current_thread = saved_thread;
658 return 1;
659 }
660 else if (siginfo.si_code == TRAP_HWBKPT)
661 {
662 if (debug_threads)
663 {
664 struct thread_info *thr = get_lwp_thread (lwp);
665
666 debug_printf ("CSBB: %s stopped by hardware "
667 "breakpoint/watchpoint\n",
668 target_pid_to_str (ptid_of (thr)));
669 }
670
671 lwp->stop_pc = pc;
672 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
673 current_thread = saved_thread;
674 return 1;
675 }
676 else if (siginfo.si_code == TRAP_TRACE)
677 {
678 if (debug_threads)
679 {
680 struct thread_info *thr = get_lwp_thread (lwp);
681
682 debug_printf ("CSBB: %s stopped by trace\n",
683 target_pid_to_str (ptid_of (thr)));
684 }
685 }
686 }
687 }
688 #else
689 /* We may have just stepped a breakpoint instruction. E.g., in
690 non-stop mode, GDB first tells the thread A to step a range, and
691 then the user inserts a breakpoint inside the range. In that
692 case we need to report the breakpoint PC. */
693 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
694 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
695 {
696 if (debug_threads)
697 {
698 struct thread_info *thr = get_lwp_thread (lwp);
699
700 debug_printf ("CSBB: %s stopped by software breakpoint\n",
701 target_pid_to_str (ptid_of (thr)));
702 }
703
704 /* Back up the PC if necessary. */
705 if (pc != sw_breakpoint_pc)
706 {
707 struct regcache *regcache
708 = get_thread_regcache (current_thread, 1);
709 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
710 }
711
712 lwp->stop_pc = sw_breakpoint_pc;
713 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
714 current_thread = saved_thread;
715 return 1;
716 }
717
718 if (hardware_breakpoint_inserted_here (pc))
719 {
720 if (debug_threads)
721 {
722 struct thread_info *thr = get_lwp_thread (lwp);
723
724 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
725 target_pid_to_str (ptid_of (thr)));
726 }
727
728 lwp->stop_pc = pc;
729 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
730 current_thread = saved_thread;
731 return 1;
732 }
733 #endif
734
735 current_thread = saved_thread;
736 return 0;
737 }
738
739 static struct lwp_info *
740 add_lwp (ptid_t ptid)
741 {
742 struct lwp_info *lwp;
743
744 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
745 memset (lwp, 0, sizeof (*lwp));
746
747 if (the_low_target.new_thread != NULL)
748 the_low_target.new_thread (lwp);
749
750 lwp->thread = add_thread (ptid, lwp);
751
752 return lwp;
753 }
754
755 /* Start an inferior process and returns its pid.
756 ALLARGS is a vector of program-name and args. */
757
758 static int
759 linux_create_inferior (char *program, char **allargs)
760 {
761 struct lwp_info *new_lwp;
762 int pid;
763 ptid_t ptid;
764 struct cleanup *restore_personality
765 = maybe_disable_address_space_randomization (disable_randomization);
766
767 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
768 pid = vfork ();
769 #else
770 pid = fork ();
771 #endif
772 if (pid < 0)
773 perror_with_name ("fork");
774
775 if (pid == 0)
776 {
777 close_most_fds ();
778 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
779
780 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
781 signal (__SIGRTMIN + 1, SIG_DFL);
782 #endif
783
784 setpgid (0, 0);
785
786 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
787 stdout to stderr so that inferior i/o doesn't corrupt the connection.
788 Also, redirect stdin to /dev/null. */
789 if (remote_connection_is_stdio ())
790 {
791 close (0);
792 open ("/dev/null", O_RDONLY);
793 dup2 (2, 1);
794 if (write (2, "stdin/stdout redirected\n",
795 sizeof ("stdin/stdout redirected\n") - 1) < 0)
796 {
797 /* Errors ignored. */;
798 }
799 }
800
801 execv (program, allargs);
802 if (errno == ENOENT)
803 execvp (program, allargs);
804
805 fprintf (stderr, "Cannot exec %s: %s.\n", program,
806 strerror (errno));
807 fflush (stderr);
808 _exit (0177);
809 }
810
811 do_cleanups (restore_personality);
812
813 linux_add_process (pid, 0);
814
815 ptid = ptid_build (pid, pid, 0);
816 new_lwp = add_lwp (ptid);
817 new_lwp->must_set_ptrace_flags = 1;
818
819 return pid;
820 }
821
822 /* Implement the arch_setup target_ops method. */
823
824 static void
825 linux_arch_setup (void)
826 {
827 the_low_target.arch_setup ();
828 }
829
830 /* Attach to an inferior process. Returns 0 on success, ERRNO on
831 error. */
832
833 int
834 linux_attach_lwp (ptid_t ptid)
835 {
836 struct lwp_info *new_lwp;
837 int lwpid = ptid_get_lwp (ptid);
838
839 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
840 != 0)
841 return errno;
842
843 new_lwp = add_lwp (ptid);
844
845 /* We need to wait for SIGSTOP before being able to make the next
846 ptrace call on this LWP. */
847 new_lwp->must_set_ptrace_flags = 1;
848
849 if (linux_proc_pid_is_stopped (lwpid))
850 {
851 if (debug_threads)
852 debug_printf ("Attached to a stopped process\n");
853
854 /* The process is definitely stopped. It is in a job control
855 stop, unless the kernel predates the TASK_STOPPED /
856 TASK_TRACED distinction, in which case it might be in a
857 ptrace stop. Make sure it is in a ptrace stop; from there we
858 can kill it, signal it, et cetera.
859
860 First make sure there is a pending SIGSTOP. Since we are
861 already attached, the process can not transition from stopped
862 to running without a PTRACE_CONT; so we know this signal will
863 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
864 probably already in the queue (unless this kernel is old
865 enough to use TASK_STOPPED for ptrace stops); but since
866 SIGSTOP is not an RT signal, it can only be queued once. */
867 kill_lwp (lwpid, SIGSTOP);
868
869 /* Finally, resume the stopped process. This will deliver the
870 SIGSTOP (or a higher priority signal, just like normal
871 PTRACE_ATTACH), which we'll catch later on. */
872 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
873 }
874
875 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
876 brings it to a halt.
877
878 There are several cases to consider here:
879
880 1) gdbserver has already attached to the process and is being notified
881 of a new thread that is being created.
882 In this case we should ignore that SIGSTOP and resume the
883 process. This is handled below by setting stop_expected = 1,
884 and the fact that add_thread sets last_resume_kind ==
885 resume_continue.
886
887 2) This is the first thread (the process thread), and we're attaching
888 to it via attach_inferior.
889 In this case we want the process thread to stop.
890 This is handled by having linux_attach set last_resume_kind ==
891 resume_stop after we return.
892
893 If the pid we are attaching to is also the tgid, we attach to and
894 stop all the existing threads. Otherwise, we attach to pid and
895 ignore any other threads in the same group as this pid.
896
897 3) GDB is connecting to gdbserver and is requesting an enumeration of all
898 existing threads.
899 In this case we want the thread to stop.
900 FIXME: This case is currently not properly handled.
901 We should wait for the SIGSTOP but don't. Things work apparently
902 because enough time passes between when we ptrace (ATTACH) and when
903 gdb makes the next ptrace call on the thread.
904
905 On the other hand, if we are currently trying to stop all threads, we
906 should treat the new thread as if we had sent it a SIGSTOP. This works
907 because we are guaranteed that the add_lwp call above added us to the
908 end of the list, and so the new thread has not yet reached
909 wait_for_sigstop (but will). */
910 new_lwp->stop_expected = 1;
911
912 return 0;
913 }
914
915 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
916 already attached. Returns true if a new LWP is found, false
917 otherwise. */
918
919 static int
920 attach_proc_task_lwp_callback (ptid_t ptid)
921 {
922 /* Is this a new thread? */
923 if (find_thread_ptid (ptid) == NULL)
924 {
925 int lwpid = ptid_get_lwp (ptid);
926 int err;
927
928 if (debug_threads)
929 debug_printf ("Found new lwp %d\n", lwpid);
930
931 err = linux_attach_lwp (ptid);
932
933 /* Be quiet if we simply raced with the thread exiting. EPERM
934 is returned if the thread's task still exists, and is marked
935 as exited or zombie, as well as other conditions, so in that
936 case, confirm the status in /proc/PID/status. */
937 if (err == ESRCH
938 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
939 {
940 if (debug_threads)
941 {
942 debug_printf ("Cannot attach to lwp %d: "
943 "thread is gone (%d: %s)\n",
944 lwpid, err, strerror (err));
945 }
946 }
947 else if (err != 0)
948 {
949 warning (_("Cannot attach to lwp %d: %s"),
950 lwpid,
951 linux_ptrace_attach_fail_reason_string (ptid, err));
952 }
953
954 return 1;
955 }
956 return 0;
957 }
958
959 /* Attach to PID. If PID is the tgid, attach to it and all
960 of its threads. */
961
962 static int
963 linux_attach (unsigned long pid)
964 {
965 ptid_t ptid = ptid_build (pid, pid, 0);
966 int err;
967
968 /* Attach to PID. We will check for other threads
969 soon. */
970 err = linux_attach_lwp (ptid);
971 if (err != 0)
972 error ("Cannot attach to process %ld: %s",
973 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
974
975 linux_add_process (pid, 1);
976
977 if (!non_stop)
978 {
979 struct thread_info *thread;
980
981 /* Don't ignore the initial SIGSTOP if we just attached to this
982 process. It will be collected by wait shortly. */
983 thread = find_thread_ptid (ptid_build (pid, pid, 0));
984 thread->last_resume_kind = resume_stop;
985 }
986
987 /* We must attach to every LWP. If /proc is mounted, use that to
988 find them now. On the one hand, the inferior may be using raw
989 clone instead of using pthreads. On the other hand, even if it
990 is using pthreads, GDB may not be connected yet (thread_db needs
991 to do symbol lookups, through qSymbol). Also, thread_db walks
992 structures in the inferior's address space to find the list of
993 threads/LWPs, and those structures may well be corrupted. Note
994 that once thread_db is loaded, we'll still use it to list threads
995 and associate pthread info with each LWP. */
996 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
997 return 0;
998 }
999
1000 struct counter
1001 {
1002 int pid;
1003 int count;
1004 };
1005
1006 static int
1007 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1008 {
1009 struct counter *counter = args;
1010
1011 if (ptid_get_pid (entry->id) == counter->pid)
1012 {
1013 if (++counter->count > 1)
1014 return 1;
1015 }
1016
1017 return 0;
1018 }
1019
1020 static int
1021 last_thread_of_process_p (int pid)
1022 {
1023 struct counter counter = { pid , 0 };
1024
1025 return (find_inferior (&all_threads,
1026 second_thread_of_pid_p, &counter) == NULL);
1027 }
1028
1029 /* Kill LWP. */
1030
1031 static void
1032 linux_kill_one_lwp (struct lwp_info *lwp)
1033 {
1034 struct thread_info *thr = get_lwp_thread (lwp);
1035 int pid = lwpid_of (thr);
1036
1037 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1038 there is no signal context, and ptrace(PTRACE_KILL) (or
1039 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1040 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1041 alternative is to kill with SIGKILL. We only need one SIGKILL
1042 per process, not one for each thread. But since we still support
1043 linuxthreads, and we also support debugging programs using raw
1044 clone without CLONE_THREAD, we send one for each thread. For
1045 years, we used PTRACE_KILL only, so we're being a bit paranoid
1046 about some old kernels where PTRACE_KILL might work better
1047 (dubious if there are any such, but that's why it's paranoia), so
1048 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1049 everywhere. */
1050
1051 errno = 0;
1052 kill_lwp (pid, SIGKILL);
1053 if (debug_threads)
1054 {
1055 int save_errno = errno;
1056
1057 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1058 target_pid_to_str (ptid_of (thr)),
1059 save_errno ? strerror (save_errno) : "OK");
1060 }
1061
1062 errno = 0;
1063 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1064 if (debug_threads)
1065 {
1066 int save_errno = errno;
1067
1068 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1069 target_pid_to_str (ptid_of (thr)),
1070 save_errno ? strerror (save_errno) : "OK");
1071 }
1072 }
1073
1074 /* Kill LWP and wait for it to die. */
1075
1076 static void
1077 kill_wait_lwp (struct lwp_info *lwp)
1078 {
1079 struct thread_info *thr = get_lwp_thread (lwp);
1080 int pid = ptid_get_pid (ptid_of (thr));
1081 int lwpid = ptid_get_lwp (ptid_of (thr));
1082 int wstat;
1083 int res;
1084
1085 if (debug_threads)
1086 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1087
1088 do
1089 {
1090 linux_kill_one_lwp (lwp);
1091
1092 /* Make sure it died. Notes:
1093
1094 - The loop is most likely unnecessary.
1095
1096 - We don't use linux_wait_for_event as that could delete lwps
1097 while we're iterating over them. We're not interested in
1098 any pending status at this point, only in making sure all
1099 wait status on the kernel side are collected until the
1100 process is reaped.
1101
1102 - We don't use __WALL here as the __WALL emulation relies on
1103 SIGCHLD, and killing a stopped process doesn't generate
1104 one, nor an exit status.
1105 */
1106 res = my_waitpid (lwpid, &wstat, 0);
1107 if (res == -1 && errno == ECHILD)
1108 res = my_waitpid (lwpid, &wstat, __WCLONE);
1109 } while (res > 0 && WIFSTOPPED (wstat));
1110
1111 /* Even if it was stopped, the child may have already disappeared.
1112 E.g., if it was killed by SIGKILL. */
1113 if (res < 0 && errno != ECHILD)
1114 perror_with_name ("kill_wait_lwp");
1115 }
1116
1117 /* Callback for `find_inferior'. Kills an lwp of a given process,
1118 except the leader. */
1119
1120 static int
1121 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1122 {
1123 struct thread_info *thread = (struct thread_info *) entry;
1124 struct lwp_info *lwp = get_thread_lwp (thread);
1125 int pid = * (int *) args;
1126
1127 if (ptid_get_pid (entry->id) != pid)
1128 return 0;
1129
1130 /* We avoid killing the first thread here, because of a Linux kernel (at
1131 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1132 the children get a chance to be reaped, it will remain a zombie
1133 forever. */
1134
1135 if (lwpid_of (thread) == pid)
1136 {
1137 if (debug_threads)
1138 debug_printf ("lkop: is last of process %s\n",
1139 target_pid_to_str (entry->id));
1140 return 0;
1141 }
1142
1143 kill_wait_lwp (lwp);
1144 return 0;
1145 }
1146
1147 static int
1148 linux_kill (int pid)
1149 {
1150 struct process_info *process;
1151 struct lwp_info *lwp;
1152
1153 process = find_process_pid (pid);
1154 if (process == NULL)
1155 return -1;
1156
1157 /* If we're killing a running inferior, make sure it is stopped
1158 first, as PTRACE_KILL will not work otherwise. */
1159 stop_all_lwps (0, NULL);
1160
1161 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1162
1163 /* See the comment in linux_kill_one_lwp. We did not kill the first
1164 thread in the list, so do so now. */
1165 lwp = find_lwp_pid (pid_to_ptid (pid));
1166
1167 if (lwp == NULL)
1168 {
1169 if (debug_threads)
1170 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1171 pid);
1172 }
1173 else
1174 kill_wait_lwp (lwp);
1175
1176 the_target->mourn (process);
1177
1178 /* Since we presently can only stop all lwps of all processes, we
1179 need to unstop lwps of other processes. */
1180 unstop_all_lwps (0, NULL);
1181 return 0;
1182 }
1183
1184 /* Get pending signal of THREAD, for detaching purposes. This is the
1185 signal the thread last stopped for, which we need to deliver to the
1186 thread when detaching, otherwise, it'd be suppressed/lost. */
1187
1188 static int
1189 get_detach_signal (struct thread_info *thread)
1190 {
1191 enum gdb_signal signo = GDB_SIGNAL_0;
1192 int status;
1193 struct lwp_info *lp = get_thread_lwp (thread);
1194
1195 if (lp->status_pending_p)
1196 status = lp->status_pending;
1197 else
1198 {
1199 /* If the thread had been suspended by gdbserver, and it stopped
1200 cleanly, then it'll have stopped with SIGSTOP. But we don't
1201 want to deliver that SIGSTOP. */
1202 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1203 || thread->last_status.value.sig == GDB_SIGNAL_0)
1204 return 0;
1205
1206 /* Otherwise, we may need to deliver the signal we
1207 intercepted. */
1208 status = lp->last_status;
1209 }
1210
1211 if (!WIFSTOPPED (status))
1212 {
1213 if (debug_threads)
1214 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1215 target_pid_to_str (ptid_of (thread)));
1216 return 0;
1217 }
1218
1219 /* Extended wait statuses aren't real SIGTRAPs. */
1220 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1221 {
1222 if (debug_threads)
1223 debug_printf ("GPS: lwp %s had stopped with extended "
1224 "status: no pending signal\n",
1225 target_pid_to_str (ptid_of (thread)));
1226 return 0;
1227 }
1228
1229 signo = gdb_signal_from_host (WSTOPSIG (status));
1230
1231 if (program_signals_p && !program_signals[signo])
1232 {
1233 if (debug_threads)
1234 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1235 target_pid_to_str (ptid_of (thread)),
1236 gdb_signal_to_string (signo));
1237 return 0;
1238 }
1239 else if (!program_signals_p
1240 /* If we have no way to know which signals GDB does not
1241 want to have passed to the program, assume
1242 SIGTRAP/SIGINT, which is GDB's default. */
1243 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1244 {
1245 if (debug_threads)
1246 debug_printf ("GPS: lwp %s had signal %s, "
1247 "but we don't know if we should pass it. "
1248 "Default to not.\n",
1249 target_pid_to_str (ptid_of (thread)),
1250 gdb_signal_to_string (signo));
1251 return 0;
1252 }
1253 else
1254 {
1255 if (debug_threads)
1256 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1257 target_pid_to_str (ptid_of (thread)),
1258 gdb_signal_to_string (signo));
1259
1260 return WSTOPSIG (status);
1261 }
1262 }
1263
1264 static int
1265 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1266 {
1267 struct thread_info *thread = (struct thread_info *) entry;
1268 struct lwp_info *lwp = get_thread_lwp (thread);
1269 int pid = * (int *) args;
1270 int sig;
1271
1272 if (ptid_get_pid (entry->id) != pid)
1273 return 0;
1274
1275 /* If there is a pending SIGSTOP, get rid of it. */
1276 if (lwp->stop_expected)
1277 {
1278 if (debug_threads)
1279 debug_printf ("Sending SIGCONT to %s\n",
1280 target_pid_to_str (ptid_of (thread)));
1281
1282 kill_lwp (lwpid_of (thread), SIGCONT);
1283 lwp->stop_expected = 0;
1284 }
1285
1286 /* Flush any pending changes to the process's registers. */
1287 regcache_invalidate_thread (thread);
1288
1289 /* Pass on any pending signal for this thread. */
1290 sig = get_detach_signal (thread);
1291
1292 /* Finally, let it resume. */
1293 if (the_low_target.prepare_to_resume != NULL)
1294 the_low_target.prepare_to_resume (lwp);
1295 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1296 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1297 error (_("Can't detach %s: %s"),
1298 target_pid_to_str (ptid_of (thread)),
1299 strerror (errno));
1300
1301 delete_lwp (lwp);
1302 return 0;
1303 }
1304
1305 static int
1306 linux_detach (int pid)
1307 {
1308 struct process_info *process;
1309
1310 process = find_process_pid (pid);
1311 if (process == NULL)
1312 return -1;
1313
1314 /* Stop all threads before detaching. First, ptrace requires that
1315 the thread is stopped to sucessfully detach. Second, thread_db
1316 may need to uninstall thread event breakpoints from memory, which
1317 only works with a stopped process anyway. */
1318 stop_all_lwps (0, NULL);
1319
1320 #ifdef USE_THREAD_DB
1321 thread_db_detach (process);
1322 #endif
1323
1324 /* Stabilize threads (move out of jump pads). */
1325 stabilize_threads ();
1326
1327 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1328
1329 the_target->mourn (process);
1330
1331 /* Since we presently can only stop all lwps of all processes, we
1332 need to unstop lwps of other processes. */
1333 unstop_all_lwps (0, NULL);
1334 return 0;
1335 }
1336
1337 /* Remove all LWPs that belong to process PROC from the lwp list. */
1338
1339 static int
1340 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1341 {
1342 struct thread_info *thread = (struct thread_info *) entry;
1343 struct lwp_info *lwp = get_thread_lwp (thread);
1344 struct process_info *process = proc;
1345
1346 if (pid_of (thread) == pid_of (process))
1347 delete_lwp (lwp);
1348
1349 return 0;
1350 }
1351
1352 static void
1353 linux_mourn (struct process_info *process)
1354 {
1355 struct process_info_private *priv;
1356
1357 #ifdef USE_THREAD_DB
1358 thread_db_mourn (process);
1359 #endif
1360
1361 find_inferior (&all_threads, delete_lwp_callback, process);
1362
1363 /* Freeing all private data. */
1364 priv = process->priv;
1365 free (priv->arch_private);
1366 free (priv);
1367 process->priv = NULL;
1368
1369 remove_process (process);
1370 }
1371
1372 static void
1373 linux_join (int pid)
1374 {
1375 int status, ret;
1376
1377 do {
1378 ret = my_waitpid (pid, &status, 0);
1379 if (WIFEXITED (status) || WIFSIGNALED (status))
1380 break;
1381 } while (ret != -1 || errno != ECHILD);
1382 }
1383
1384 /* Return nonzero if the given thread is still alive. */
1385 static int
1386 linux_thread_alive (ptid_t ptid)
1387 {
1388 struct lwp_info *lwp = find_lwp_pid (ptid);
1389
1390 /* We assume we always know if a thread exits. If a whole process
1391 exited but we still haven't been able to report it to GDB, we'll
1392 hold on to the last lwp of the dead process. */
1393 if (lwp != NULL)
1394 return !lwp->dead;
1395 else
1396 return 0;
1397 }
1398
1399 /* Return 1 if this lwp still has an interesting status pending. If
1400 not (e.g., it had stopped for a breakpoint that is gone), return
1401 false. */
1402
1403 static int
1404 thread_still_has_status_pending_p (struct thread_info *thread)
1405 {
1406 struct lwp_info *lp = get_thread_lwp (thread);
1407
1408 if (!lp->status_pending_p)
1409 return 0;
1410
1411 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1412 report any status pending the LWP may have. */
1413 if (thread->last_resume_kind == resume_stop
1414 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1415 return 0;
1416
1417 if (thread->last_resume_kind != resume_stop
1418 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1419 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1420 {
1421 struct thread_info *saved_thread;
1422 CORE_ADDR pc;
1423 int discard = 0;
1424
1425 gdb_assert (lp->last_status != 0);
1426
1427 pc = get_pc (lp);
1428
1429 saved_thread = current_thread;
1430 current_thread = thread;
1431
1432 if (pc != lp->stop_pc)
1433 {
1434 if (debug_threads)
1435 debug_printf ("PC of %ld changed\n",
1436 lwpid_of (thread));
1437 discard = 1;
1438 }
1439
1440 #if !USE_SIGTRAP_SIGINFO
1441 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1442 && !(*the_low_target.breakpoint_at) (pc))
1443 {
1444 if (debug_threads)
1445 debug_printf ("previous SW breakpoint of %ld gone\n",
1446 lwpid_of (thread));
1447 discard = 1;
1448 }
1449 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1450 && !hardware_breakpoint_inserted_here (pc))
1451 {
1452 if (debug_threads)
1453 debug_printf ("previous HW breakpoint of %ld gone\n",
1454 lwpid_of (thread));
1455 discard = 1;
1456 }
1457 #endif
1458
1459 current_thread = saved_thread;
1460
1461 if (discard)
1462 {
1463 if (debug_threads)
1464 debug_printf ("discarding pending breakpoint status\n");
1465 lp->status_pending_p = 0;
1466 return 0;
1467 }
1468 }
1469
1470 return 1;
1471 }
1472
1473 /* Return 1 if this lwp has an interesting status pending. */
1474 static int
1475 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1476 {
1477 struct thread_info *thread = (struct thread_info *) entry;
1478 struct lwp_info *lp = get_thread_lwp (thread);
1479 ptid_t ptid = * (ptid_t *) arg;
1480
1481 /* Check if we're only interested in events from a specific process
1482 or a specific LWP. */
1483 if (!ptid_match (ptid_of (thread), ptid))
1484 return 0;
1485
1486 if (lp->status_pending_p
1487 && !thread_still_has_status_pending_p (thread))
1488 {
1489 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1490 return 0;
1491 }
1492
1493 return lp->status_pending_p;
1494 }
1495
1496 static int
1497 same_lwp (struct inferior_list_entry *entry, void *data)
1498 {
1499 ptid_t ptid = *(ptid_t *) data;
1500 int lwp;
1501
1502 if (ptid_get_lwp (ptid) != 0)
1503 lwp = ptid_get_lwp (ptid);
1504 else
1505 lwp = ptid_get_pid (ptid);
1506
1507 if (ptid_get_lwp (entry->id) == lwp)
1508 return 1;
1509
1510 return 0;
1511 }
1512
1513 struct lwp_info *
1514 find_lwp_pid (ptid_t ptid)
1515 {
1516 struct inferior_list_entry *thread
1517 = find_inferior (&all_threads, same_lwp, &ptid);
1518
1519 if (thread == NULL)
1520 return NULL;
1521
1522 return get_thread_lwp ((struct thread_info *) thread);
1523 }
1524
1525 /* Return the number of known LWPs in the tgid given by PID. */
1526
1527 static int
1528 num_lwps (int pid)
1529 {
1530 struct inferior_list_entry *inf, *tmp;
1531 int count = 0;
1532
1533 ALL_INFERIORS (&all_threads, inf, tmp)
1534 {
1535 if (ptid_get_pid (inf->id) == pid)
1536 count++;
1537 }
1538
1539 return count;
1540 }
1541
1542 /* The arguments passed to iterate_over_lwps. */
1543
1544 struct iterate_over_lwps_args
1545 {
1546 /* The FILTER argument passed to iterate_over_lwps. */
1547 ptid_t filter;
1548
1549 /* The CALLBACK argument passed to iterate_over_lwps. */
1550 iterate_over_lwps_ftype *callback;
1551
1552 /* The DATA argument passed to iterate_over_lwps. */
1553 void *data;
1554 };
1555
1556 /* Callback for find_inferior used by iterate_over_lwps to filter
1557 calls to the callback supplied to that function. Returning a
1558 nonzero value causes find_inferiors to stop iterating and return
1559 the current inferior_list_entry. Returning zero indicates that
1560 find_inferiors should continue iterating. */
1561
1562 static int
1563 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1564 {
1565 struct iterate_over_lwps_args *args
1566 = (struct iterate_over_lwps_args *) args_p;
1567
1568 if (ptid_match (entry->id, args->filter))
1569 {
1570 struct thread_info *thr = (struct thread_info *) entry;
1571 struct lwp_info *lwp = get_thread_lwp (thr);
1572
1573 return (*args->callback) (lwp, args->data);
1574 }
1575
1576 return 0;
1577 }
1578
1579 /* See nat/linux-nat.h. */
1580
1581 struct lwp_info *
1582 iterate_over_lwps (ptid_t filter,
1583 iterate_over_lwps_ftype callback,
1584 void *data)
1585 {
1586 struct iterate_over_lwps_args args = {filter, callback, data};
1587 struct inferior_list_entry *entry;
1588
1589 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1590 if (entry == NULL)
1591 return NULL;
1592
1593 return get_thread_lwp ((struct thread_info *) entry);
1594 }
1595
1596 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1597 their exits until all other threads in the group have exited. */
1598
1599 static void
1600 check_zombie_leaders (void)
1601 {
1602 struct process_info *proc, *tmp;
1603
1604 ALL_PROCESSES (proc, tmp)
1605 {
1606 pid_t leader_pid = pid_of (proc);
1607 struct lwp_info *leader_lp;
1608
1609 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1610
1611 if (debug_threads)
1612 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1613 "num_lwps=%d, zombie=%d\n",
1614 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1615 linux_proc_pid_is_zombie (leader_pid));
1616
1617 if (leader_lp != NULL
1618 /* Check if there are other threads in the group, as we may
1619 have raced with the inferior simply exiting. */
1620 && !last_thread_of_process_p (leader_pid)
1621 && linux_proc_pid_is_zombie (leader_pid))
1622 {
1623 /* A leader zombie can mean one of two things:
1624
1625 - It exited, and there's an exit status pending
1626 available, or only the leader exited (not the whole
1627 program). In the latter case, we can't waitpid the
1628 leader's exit status until all other threads are gone.
1629
1630 - There are 3 or more threads in the group, and a thread
1631 other than the leader exec'd. On an exec, the Linux
1632 kernel destroys all other threads (except the execing
1633 one) in the thread group, and resets the execing thread's
1634 tid to the tgid. No exit notification is sent for the
1635 execing thread -- from the ptracer's perspective, it
1636 appears as though the execing thread just vanishes.
1637 Until we reap all other threads except the leader and the
1638 execing thread, the leader will be zombie, and the
1639 execing thread will be in `D (disc sleep)'. As soon as
1640 all other threads are reaped, the execing thread changes
1641 it's tid to the tgid, and the previous (zombie) leader
1642 vanishes, giving place to the "new" leader. We could try
1643 distinguishing the exit and exec cases, by waiting once
1644 more, and seeing if something comes out, but it doesn't
1645 sound useful. The previous leader _does_ go away, and
1646 we'll re-add the new one once we see the exec event
1647 (which is just the same as what would happen if the
1648 previous leader did exit voluntarily before some other
1649 thread execs). */
1650
1651 if (debug_threads)
1652 fprintf (stderr,
1653 "CZL: Thread group leader %d zombie "
1654 "(it exited, or another thread execd).\n",
1655 leader_pid);
1656
1657 delete_lwp (leader_lp);
1658 }
1659 }
1660 }
1661
1662 /* Callback for `find_inferior'. Returns the first LWP that is not
1663 stopped. ARG is a PTID filter. */
1664
1665 static int
1666 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1667 {
1668 struct thread_info *thr = (struct thread_info *) entry;
1669 struct lwp_info *lwp;
1670 ptid_t filter = *(ptid_t *) arg;
1671
1672 if (!ptid_match (ptid_of (thr), filter))
1673 return 0;
1674
1675 lwp = get_thread_lwp (thr);
1676 if (!lwp->stopped)
1677 return 1;
1678
1679 return 0;
1680 }
1681
1682 /* This function should only be called if the LWP got a SIGTRAP.
1683
1684 Handle any tracepoint steps or hits. Return true if a tracepoint
1685 event was handled, 0 otherwise. */
1686
1687 static int
1688 handle_tracepoints (struct lwp_info *lwp)
1689 {
1690 struct thread_info *tinfo = get_lwp_thread (lwp);
1691 int tpoint_related_event = 0;
1692
1693 gdb_assert (lwp->suspended == 0);
1694
1695 /* If this tracepoint hit causes a tracing stop, we'll immediately
1696 uninsert tracepoints. To do this, we temporarily pause all
1697 threads, unpatch away, and then unpause threads. We need to make
1698 sure the unpausing doesn't resume LWP too. */
1699 lwp->suspended++;
1700
1701 /* And we need to be sure that any all-threads-stopping doesn't try
1702 to move threads out of the jump pads, as it could deadlock the
1703 inferior (LWP could be in the jump pad, maybe even holding the
1704 lock.) */
1705
1706 /* Do any necessary step collect actions. */
1707 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1708
1709 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1710
1711 /* See if we just hit a tracepoint and do its main collect
1712 actions. */
1713 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1714
1715 lwp->suspended--;
1716
1717 gdb_assert (lwp->suspended == 0);
1718 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1719
1720 if (tpoint_related_event)
1721 {
1722 if (debug_threads)
1723 debug_printf ("got a tracepoint event\n");
1724 return 1;
1725 }
1726
1727 return 0;
1728 }
1729
1730 /* Convenience wrapper. Returns true if LWP is presently collecting a
1731 fast tracepoint. */
1732
1733 static int
1734 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1735 struct fast_tpoint_collect_status *status)
1736 {
1737 CORE_ADDR thread_area;
1738 struct thread_info *thread = get_lwp_thread (lwp);
1739
1740 if (the_low_target.get_thread_area == NULL)
1741 return 0;
1742
1743 /* Get the thread area address. This is used to recognize which
1744 thread is which when tracing with the in-process agent library.
1745 We don't read anything from the address, and treat it as opaque;
1746 it's the address itself that we assume is unique per-thread. */
1747 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1748 return 0;
1749
1750 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1751 }
1752
1753 /* The reason we resume in the caller, is because we want to be able
1754 to pass lwp->status_pending as WSTAT, and we need to clear
1755 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1756 refuses to resume. */
1757
1758 static int
1759 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1760 {
1761 struct thread_info *saved_thread;
1762
1763 saved_thread = current_thread;
1764 current_thread = get_lwp_thread (lwp);
1765
1766 if ((wstat == NULL
1767 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1768 && supports_fast_tracepoints ()
1769 && agent_loaded_p ())
1770 {
1771 struct fast_tpoint_collect_status status;
1772 int r;
1773
1774 if (debug_threads)
1775 debug_printf ("Checking whether LWP %ld needs to move out of the "
1776 "jump pad.\n",
1777 lwpid_of (current_thread));
1778
1779 r = linux_fast_tracepoint_collecting (lwp, &status);
1780
1781 if (wstat == NULL
1782 || (WSTOPSIG (*wstat) != SIGILL
1783 && WSTOPSIG (*wstat) != SIGFPE
1784 && WSTOPSIG (*wstat) != SIGSEGV
1785 && WSTOPSIG (*wstat) != SIGBUS))
1786 {
1787 lwp->collecting_fast_tracepoint = r;
1788
1789 if (r != 0)
1790 {
1791 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1792 {
1793 /* Haven't executed the original instruction yet.
1794 Set breakpoint there, and wait till it's hit,
1795 then single-step until exiting the jump pad. */
1796 lwp->exit_jump_pad_bkpt
1797 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1798 }
1799
1800 if (debug_threads)
1801 debug_printf ("Checking whether LWP %ld needs to move out of "
1802 "the jump pad...it does\n",
1803 lwpid_of (current_thread));
1804 current_thread = saved_thread;
1805
1806 return 1;
1807 }
1808 }
1809 else
1810 {
1811 /* If we get a synchronous signal while collecting, *and*
1812 while executing the (relocated) original instruction,
1813 reset the PC to point at the tpoint address, before
1814 reporting to GDB. Otherwise, it's an IPA lib bug: just
1815 report the signal to GDB, and pray for the best. */
1816
1817 lwp->collecting_fast_tracepoint = 0;
1818
1819 if (r != 0
1820 && (status.adjusted_insn_addr <= lwp->stop_pc
1821 && lwp->stop_pc < status.adjusted_insn_addr_end))
1822 {
1823 siginfo_t info;
1824 struct regcache *regcache;
1825
1826 /* The si_addr on a few signals references the address
1827 of the faulting instruction. Adjust that as
1828 well. */
1829 if ((WSTOPSIG (*wstat) == SIGILL
1830 || WSTOPSIG (*wstat) == SIGFPE
1831 || WSTOPSIG (*wstat) == SIGBUS
1832 || WSTOPSIG (*wstat) == SIGSEGV)
1833 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1834 (PTRACE_TYPE_ARG3) 0, &info) == 0
1835 /* Final check just to make sure we don't clobber
1836 the siginfo of non-kernel-sent signals. */
1837 && (uintptr_t) info.si_addr == lwp->stop_pc)
1838 {
1839 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1840 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1841 (PTRACE_TYPE_ARG3) 0, &info);
1842 }
1843
1844 regcache = get_thread_regcache (current_thread, 1);
1845 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1846 lwp->stop_pc = status.tpoint_addr;
1847
1848 /* Cancel any fast tracepoint lock this thread was
1849 holding. */
1850 force_unlock_trace_buffer ();
1851 }
1852
1853 if (lwp->exit_jump_pad_bkpt != NULL)
1854 {
1855 if (debug_threads)
1856 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1857 "stopping all threads momentarily.\n");
1858
1859 stop_all_lwps (1, lwp);
1860
1861 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1862 lwp->exit_jump_pad_bkpt = NULL;
1863
1864 unstop_all_lwps (1, lwp);
1865
1866 gdb_assert (lwp->suspended >= 0);
1867 }
1868 }
1869 }
1870
1871 if (debug_threads)
1872 debug_printf ("Checking whether LWP %ld needs to move out of the "
1873 "jump pad...no\n",
1874 lwpid_of (current_thread));
1875
1876 current_thread = saved_thread;
1877 return 0;
1878 }
1879
1880 /* Enqueue one signal in the "signals to report later when out of the
1881 jump pad" list. */
1882
1883 static void
1884 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1885 {
1886 struct pending_signals *p_sig;
1887 struct thread_info *thread = get_lwp_thread (lwp);
1888
1889 if (debug_threads)
1890 debug_printf ("Deferring signal %d for LWP %ld.\n",
1891 WSTOPSIG (*wstat), lwpid_of (thread));
1892
1893 if (debug_threads)
1894 {
1895 struct pending_signals *sig;
1896
1897 for (sig = lwp->pending_signals_to_report;
1898 sig != NULL;
1899 sig = sig->prev)
1900 debug_printf (" Already queued %d\n",
1901 sig->signal);
1902
1903 debug_printf (" (no more currently queued signals)\n");
1904 }
1905
1906 /* Don't enqueue non-RT signals if they are already in the deferred
1907 queue. (SIGSTOP being the easiest signal to see ending up here
1908 twice) */
1909 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1910 {
1911 struct pending_signals *sig;
1912
1913 for (sig = lwp->pending_signals_to_report;
1914 sig != NULL;
1915 sig = sig->prev)
1916 {
1917 if (sig->signal == WSTOPSIG (*wstat))
1918 {
1919 if (debug_threads)
1920 debug_printf ("Not requeuing already queued non-RT signal %d"
1921 " for LWP %ld\n",
1922 sig->signal,
1923 lwpid_of (thread));
1924 return;
1925 }
1926 }
1927 }
1928
1929 p_sig = xmalloc (sizeof (*p_sig));
1930 p_sig->prev = lwp->pending_signals_to_report;
1931 p_sig->signal = WSTOPSIG (*wstat);
1932 memset (&p_sig->info, 0, sizeof (siginfo_t));
1933 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1934 &p_sig->info);
1935
1936 lwp->pending_signals_to_report = p_sig;
1937 }
1938
1939 /* Dequeue one signal from the "signals to report later when out of
1940 the jump pad" list. */
1941
1942 static int
1943 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1944 {
1945 struct thread_info *thread = get_lwp_thread (lwp);
1946
1947 if (lwp->pending_signals_to_report != NULL)
1948 {
1949 struct pending_signals **p_sig;
1950
1951 p_sig = &lwp->pending_signals_to_report;
1952 while ((*p_sig)->prev != NULL)
1953 p_sig = &(*p_sig)->prev;
1954
1955 *wstat = W_STOPCODE ((*p_sig)->signal);
1956 if ((*p_sig)->info.si_signo != 0)
1957 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1958 &(*p_sig)->info);
1959 free (*p_sig);
1960 *p_sig = NULL;
1961
1962 if (debug_threads)
1963 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1964 WSTOPSIG (*wstat), lwpid_of (thread));
1965
1966 if (debug_threads)
1967 {
1968 struct pending_signals *sig;
1969
1970 for (sig = lwp->pending_signals_to_report;
1971 sig != NULL;
1972 sig = sig->prev)
1973 debug_printf (" Still queued %d\n",
1974 sig->signal);
1975
1976 debug_printf (" (no more queued signals)\n");
1977 }
1978
1979 return 1;
1980 }
1981
1982 return 0;
1983 }
1984
1985 /* Fetch the possibly triggered data watchpoint info and store it in
1986 CHILD.
1987
1988 On some archs, like x86, that use debug registers to set
1989 watchpoints, it's possible that the way to know which watched
1990 address trapped, is to check the register that is used to select
1991 which address to watch. Problem is, between setting the watchpoint
1992 and reading back which data address trapped, the user may change
1993 the set of watchpoints, and, as a consequence, GDB changes the
1994 debug registers in the inferior. To avoid reading back a stale
1995 stopped-data-address when that happens, we cache in LP the fact
1996 that a watchpoint trapped, and the corresponding data address, as
1997 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1998 registers meanwhile, we have the cached data we can rely on. */
1999
2000 static int
2001 check_stopped_by_watchpoint (struct lwp_info *child)
2002 {
2003 if (the_low_target.stopped_by_watchpoint != NULL)
2004 {
2005 struct thread_info *saved_thread;
2006
2007 saved_thread = current_thread;
2008 current_thread = get_lwp_thread (child);
2009
2010 if (the_low_target.stopped_by_watchpoint ())
2011 {
2012 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2013
2014 if (the_low_target.stopped_data_address != NULL)
2015 child->stopped_data_address
2016 = the_low_target.stopped_data_address ();
2017 else
2018 child->stopped_data_address = 0;
2019 }
2020
2021 current_thread = saved_thread;
2022 }
2023
2024 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2025 }
2026
2027 /* Return the ptrace options that we want to try to enable. */
2028
2029 static int
2030 linux_low_ptrace_options (int attached)
2031 {
2032 int options = 0;
2033
2034 if (!attached)
2035 options |= PTRACE_O_EXITKILL;
2036
2037 if (report_fork_events)
2038 options |= PTRACE_O_TRACEFORK;
2039
2040 if (report_vfork_events)
2041 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2042
2043 return options;
2044 }
2045
2046 /* Do low-level handling of the event, and check if we should go on
2047 and pass it to caller code. Return the affected lwp if we are, or
2048 NULL otherwise. */
2049
2050 static struct lwp_info *
2051 linux_low_filter_event (int lwpid, int wstat)
2052 {
2053 struct lwp_info *child;
2054 struct thread_info *thread;
2055 int have_stop_pc = 0;
2056
2057 child = find_lwp_pid (pid_to_ptid (lwpid));
2058
2059 /* If we didn't find a process, one of two things presumably happened:
2060 - A process we started and then detached from has exited. Ignore it.
2061 - A process we are controlling has forked and the new child's stop
2062 was reported to us by the kernel. Save its PID. */
2063 if (child == NULL && WIFSTOPPED (wstat))
2064 {
2065 add_to_pid_list (&stopped_pids, lwpid, wstat);
2066 return NULL;
2067 }
2068 else if (child == NULL)
2069 return NULL;
2070
2071 thread = get_lwp_thread (child);
2072
2073 child->stopped = 1;
2074
2075 child->last_status = wstat;
2076
2077 /* Check if the thread has exited. */
2078 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2079 {
2080 if (debug_threads)
2081 debug_printf ("LLFE: %d exited.\n", lwpid);
2082 if (num_lwps (pid_of (thread)) > 1)
2083 {
2084
2085 /* If there is at least one more LWP, then the exit signal was
2086 not the end of the debugged application and should be
2087 ignored. */
2088 delete_lwp (child);
2089 return NULL;
2090 }
2091 else
2092 {
2093 /* This was the last lwp in the process. Since events are
2094 serialized to GDB core, and we can't report this one
2095 right now, but GDB core and the other target layers will
2096 want to be notified about the exit code/signal, leave the
2097 status pending for the next time we're able to report
2098 it. */
2099 mark_lwp_dead (child, wstat);
2100 return child;
2101 }
2102 }
2103
2104 gdb_assert (WIFSTOPPED (wstat));
2105
2106 if (WIFSTOPPED (wstat))
2107 {
2108 struct process_info *proc;
2109
2110 /* Architecture-specific setup after inferior is running. */
2111 proc = find_process_pid (pid_of (thread));
2112 if (proc->tdesc == NULL)
2113 {
2114 if (proc->attached)
2115 {
2116 struct thread_info *saved_thread;
2117
2118 /* This needs to happen after we have attached to the
2119 inferior and it is stopped for the first time, but
2120 before we access any inferior registers. */
2121 saved_thread = current_thread;
2122 current_thread = thread;
2123
2124 the_low_target.arch_setup ();
2125
2126 current_thread = saved_thread;
2127 }
2128 else
2129 {
2130 /* The process is started, but GDBserver will do
2131 architecture-specific setup after the program stops at
2132 the first instruction. */
2133 child->status_pending_p = 1;
2134 child->status_pending = wstat;
2135 return child;
2136 }
2137 }
2138 }
2139
2140 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2141 {
2142 struct process_info *proc = find_process_pid (pid_of (thread));
2143 int options = linux_low_ptrace_options (proc->attached);
2144
2145 linux_enable_event_reporting (lwpid, options);
2146 child->must_set_ptrace_flags = 0;
2147 }
2148
2149 /* Be careful to not overwrite stop_pc until
2150 check_stopped_by_breakpoint is called. */
2151 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2152 && linux_is_extended_waitstatus (wstat))
2153 {
2154 child->stop_pc = get_pc (child);
2155 if (handle_extended_wait (child, wstat))
2156 {
2157 /* The event has been handled, so just return without
2158 reporting it. */
2159 return NULL;
2160 }
2161 }
2162
2163 /* Check first whether this was a SW/HW breakpoint before checking
2164 watchpoints, because at least s390 can't tell the data address of
2165 hardware watchpoint hits, and returns stopped-by-watchpoint as
2166 long as there's a watchpoint set. */
2167 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2168 {
2169 if (check_stopped_by_breakpoint (child))
2170 have_stop_pc = 1;
2171 }
2172
2173 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2174 or hardware watchpoint. Check which is which if we got
2175 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2176 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2177 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2178 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2179 check_stopped_by_watchpoint (child);
2180
2181 if (!have_stop_pc)
2182 child->stop_pc = get_pc (child);
2183
2184 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2185 && child->stop_expected)
2186 {
2187 if (debug_threads)
2188 debug_printf ("Expected stop.\n");
2189 child->stop_expected = 0;
2190
2191 if (thread->last_resume_kind == resume_stop)
2192 {
2193 /* We want to report the stop to the core. Treat the
2194 SIGSTOP as a normal event. */
2195 if (debug_threads)
2196 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2197 target_pid_to_str (ptid_of (thread)));
2198 }
2199 else if (stopping_threads != NOT_STOPPING_THREADS)
2200 {
2201 /* Stopping threads. We don't want this SIGSTOP to end up
2202 pending. */
2203 if (debug_threads)
2204 debug_printf ("LLW: SIGSTOP caught for %s "
2205 "while stopping threads.\n",
2206 target_pid_to_str (ptid_of (thread)));
2207 return NULL;
2208 }
2209 else
2210 {
2211 /* This is a delayed SIGSTOP. Filter out the event. */
2212 if (debug_threads)
2213 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2214 child->stepping ? "step" : "continue",
2215 target_pid_to_str (ptid_of (thread)));
2216
2217 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2218 return NULL;
2219 }
2220 }
2221
2222 child->status_pending_p = 1;
2223 child->status_pending = wstat;
2224 return child;
2225 }
2226
2227 /* Resume LWPs that are currently stopped without any pending status
2228 to report, but are resumed from the core's perspective. */
2229
2230 static void
2231 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2232 {
2233 struct thread_info *thread = (struct thread_info *) entry;
2234 struct lwp_info *lp = get_thread_lwp (thread);
2235
2236 if (lp->stopped
2237 && !lp->status_pending_p
2238 && thread->last_resume_kind != resume_stop
2239 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2240 {
2241 int step = thread->last_resume_kind == resume_step;
2242
2243 if (debug_threads)
2244 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2245 target_pid_to_str (ptid_of (thread)),
2246 paddress (lp->stop_pc),
2247 step);
2248
2249 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2250 }
2251 }
2252
2253 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2254 match FILTER_PTID (leaving others pending). The PTIDs can be:
2255 minus_one_ptid, to specify any child; a pid PTID, specifying all
2256 lwps of a thread group; or a PTID representing a single lwp. Store
2257 the stop status through the status pointer WSTAT. OPTIONS is
2258 passed to the waitpid call. Return 0 if no event was found and
2259 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2260 was found. Return the PID of the stopped child otherwise. */
2261
2262 static int
2263 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2264 int *wstatp, int options)
2265 {
2266 struct thread_info *event_thread;
2267 struct lwp_info *event_child, *requested_child;
2268 sigset_t block_mask, prev_mask;
2269
2270 retry:
2271 /* N.B. event_thread points to the thread_info struct that contains
2272 event_child. Keep them in sync. */
2273 event_thread = NULL;
2274 event_child = NULL;
2275 requested_child = NULL;
2276
2277 /* Check for a lwp with a pending status. */
2278
2279 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2280 {
2281 event_thread = (struct thread_info *)
2282 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2283 if (event_thread != NULL)
2284 event_child = get_thread_lwp (event_thread);
2285 if (debug_threads && event_thread)
2286 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2287 }
2288 else if (!ptid_equal (filter_ptid, null_ptid))
2289 {
2290 requested_child = find_lwp_pid (filter_ptid);
2291
2292 if (stopping_threads == NOT_STOPPING_THREADS
2293 && requested_child->status_pending_p
2294 && requested_child->collecting_fast_tracepoint)
2295 {
2296 enqueue_one_deferred_signal (requested_child,
2297 &requested_child->status_pending);
2298 requested_child->status_pending_p = 0;
2299 requested_child->status_pending = 0;
2300 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2301 }
2302
2303 if (requested_child->suspended
2304 && requested_child->status_pending_p)
2305 {
2306 internal_error (__FILE__, __LINE__,
2307 "requesting an event out of a"
2308 " suspended child?");
2309 }
2310
2311 if (requested_child->status_pending_p)
2312 {
2313 event_child = requested_child;
2314 event_thread = get_lwp_thread (event_child);
2315 }
2316 }
2317
2318 if (event_child != NULL)
2319 {
2320 if (debug_threads)
2321 debug_printf ("Got an event from pending child %ld (%04x)\n",
2322 lwpid_of (event_thread), event_child->status_pending);
2323 *wstatp = event_child->status_pending;
2324 event_child->status_pending_p = 0;
2325 event_child->status_pending = 0;
2326 current_thread = event_thread;
2327 return lwpid_of (event_thread);
2328 }
2329
2330 /* But if we don't find a pending event, we'll have to wait.
2331
2332 We only enter this loop if no process has a pending wait status.
2333 Thus any action taken in response to a wait status inside this
2334 loop is responding as soon as we detect the status, not after any
2335 pending events. */
2336
2337 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2338 all signals while here. */
2339 sigfillset (&block_mask);
2340 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2341
2342 /* Always pull all events out of the kernel. We'll randomly select
2343 an event LWP out of all that have events, to prevent
2344 starvation. */
2345 while (event_child == NULL)
2346 {
2347 pid_t ret = 0;
2348
2349 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2350 quirks:
2351
2352 - If the thread group leader exits while other threads in the
2353 thread group still exist, waitpid(TGID, ...) hangs. That
2354 waitpid won't return an exit status until the other threads
2355 in the group are reaped.
2356
2357 - When a non-leader thread execs, that thread just vanishes
2358 without reporting an exit (so we'd hang if we waited for it
2359 explicitly in that case). The exec event is reported to
2360 the TGID pid (although we don't currently enable exec
2361 events). */
2362 errno = 0;
2363 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2364
2365 if (debug_threads)
2366 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2367 ret, errno ? strerror (errno) : "ERRNO-OK");
2368
2369 if (ret > 0)
2370 {
2371 if (debug_threads)
2372 {
2373 debug_printf ("LLW: waitpid %ld received %s\n",
2374 (long) ret, status_to_str (*wstatp));
2375 }
2376
2377 /* Filter all events. IOW, leave all events pending. We'll
2378 randomly select an event LWP out of all that have events
2379 below. */
2380 linux_low_filter_event (ret, *wstatp);
2381 /* Retry until nothing comes out of waitpid. A single
2382 SIGCHLD can indicate more than one child stopped. */
2383 continue;
2384 }
2385
2386 /* Now that we've pulled all events out of the kernel, resume
2387 LWPs that don't have an interesting event to report. */
2388 if (stopping_threads == NOT_STOPPING_THREADS)
2389 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2390
2391 /* ... and find an LWP with a status to report to the core, if
2392 any. */
2393 event_thread = (struct thread_info *)
2394 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2395 if (event_thread != NULL)
2396 {
2397 event_child = get_thread_lwp (event_thread);
2398 *wstatp = event_child->status_pending;
2399 event_child->status_pending_p = 0;
2400 event_child->status_pending = 0;
2401 break;
2402 }
2403
2404 /* Check for zombie thread group leaders. Those can't be reaped
2405 until all other threads in the thread group are. */
2406 check_zombie_leaders ();
2407
2408 /* If there are no resumed children left in the set of LWPs we
2409 want to wait for, bail. We can't just block in
2410 waitpid/sigsuspend, because lwps might have been left stopped
2411 in trace-stop state, and we'd be stuck forever waiting for
2412 their status to change (which would only happen if we resumed
2413 them). Even if WNOHANG is set, this return code is preferred
2414 over 0 (below), as it is more detailed. */
2415 if ((find_inferior (&all_threads,
2416 not_stopped_callback,
2417 &wait_ptid) == NULL))
2418 {
2419 if (debug_threads)
2420 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2421 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2422 return -1;
2423 }
2424
2425 /* No interesting event to report to the caller. */
2426 if ((options & WNOHANG))
2427 {
2428 if (debug_threads)
2429 debug_printf ("WNOHANG set, no event found\n");
2430
2431 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2432 return 0;
2433 }
2434
2435 /* Block until we get an event reported with SIGCHLD. */
2436 if (debug_threads)
2437 debug_printf ("sigsuspend'ing\n");
2438
2439 sigsuspend (&prev_mask);
2440 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2441 goto retry;
2442 }
2443
2444 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2445
2446 current_thread = event_thread;
2447
2448 /* Check for thread exit. */
2449 if (! WIFSTOPPED (*wstatp))
2450 {
2451 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2452
2453 if (debug_threads)
2454 debug_printf ("LWP %d is the last lwp of process. "
2455 "Process %ld exiting.\n",
2456 pid_of (event_thread), lwpid_of (event_thread));
2457 return lwpid_of (event_thread);
2458 }
2459
2460 return lwpid_of (event_thread);
2461 }
2462
2463 /* Wait for an event from child(ren) PTID. PTIDs can be:
2464 minus_one_ptid, to specify any child; a pid PTID, specifying all
2465 lwps of a thread group; or a PTID representing a single lwp. Store
2466 the stop status through the status pointer WSTAT. OPTIONS is
2467 passed to the waitpid call. Return 0 if no event was found and
2468 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2469 was found. Return the PID of the stopped child otherwise. */
2470
2471 static int
2472 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2473 {
2474 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2475 }
2476
2477 /* Count the LWP's that have had events. */
2478
2479 static int
2480 count_events_callback (struct inferior_list_entry *entry, void *data)
2481 {
2482 struct thread_info *thread = (struct thread_info *) entry;
2483 struct lwp_info *lp = get_thread_lwp (thread);
2484 int *count = data;
2485
2486 gdb_assert (count != NULL);
2487
2488 /* Count only resumed LWPs that have an event pending. */
2489 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2490 && lp->status_pending_p)
2491 (*count)++;
2492
2493 return 0;
2494 }
2495
2496 /* Select the LWP (if any) that is currently being single-stepped. */
2497
2498 static int
2499 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2500 {
2501 struct thread_info *thread = (struct thread_info *) entry;
2502 struct lwp_info *lp = get_thread_lwp (thread);
2503
2504 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2505 && thread->last_resume_kind == resume_step
2506 && lp->status_pending_p)
2507 return 1;
2508 else
2509 return 0;
2510 }
2511
2512 /* Select the Nth LWP that has had an event. */
2513
2514 static int
2515 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2516 {
2517 struct thread_info *thread = (struct thread_info *) entry;
2518 struct lwp_info *lp = get_thread_lwp (thread);
2519 int *selector = data;
2520
2521 gdb_assert (selector != NULL);
2522
2523 /* Select only resumed LWPs that have an event pending. */
2524 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2525 && lp->status_pending_p)
2526 if ((*selector)-- == 0)
2527 return 1;
2528
2529 return 0;
2530 }
2531
2532 /* Select one LWP out of those that have events pending. */
2533
2534 static void
2535 select_event_lwp (struct lwp_info **orig_lp)
2536 {
2537 int num_events = 0;
2538 int random_selector;
2539 struct thread_info *event_thread = NULL;
2540
2541 /* In all-stop, give preference to the LWP that is being
2542 single-stepped. There will be at most one, and it's the LWP that
2543 the core is most interested in. If we didn't do this, then we'd
2544 have to handle pending step SIGTRAPs somehow in case the core
2545 later continues the previously-stepped thread, otherwise we'd
2546 report the pending SIGTRAP, and the core, not having stepped the
2547 thread, wouldn't understand what the trap was for, and therefore
2548 would report it to the user as a random signal. */
2549 if (!non_stop)
2550 {
2551 event_thread
2552 = (struct thread_info *) find_inferior (&all_threads,
2553 select_singlestep_lwp_callback,
2554 NULL);
2555 if (event_thread != NULL)
2556 {
2557 if (debug_threads)
2558 debug_printf ("SEL: Select single-step %s\n",
2559 target_pid_to_str (ptid_of (event_thread)));
2560 }
2561 }
2562 if (event_thread == NULL)
2563 {
2564 /* No single-stepping LWP. Select one at random, out of those
2565 which have had events. */
2566
2567 /* First see how many events we have. */
2568 find_inferior (&all_threads, count_events_callback, &num_events);
2569 gdb_assert (num_events > 0);
2570
2571 /* Now randomly pick a LWP out of those that have had
2572 events. */
2573 random_selector = (int)
2574 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2575
2576 if (debug_threads && num_events > 1)
2577 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2578 num_events, random_selector);
2579
2580 event_thread
2581 = (struct thread_info *) find_inferior (&all_threads,
2582 select_event_lwp_callback,
2583 &random_selector);
2584 }
2585
2586 if (event_thread != NULL)
2587 {
2588 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2589
2590 /* Switch the event LWP. */
2591 *orig_lp = event_lp;
2592 }
2593 }
2594
2595 /* Decrement the suspend count of an LWP. */
2596
2597 static int
2598 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2599 {
2600 struct thread_info *thread = (struct thread_info *) entry;
2601 struct lwp_info *lwp = get_thread_lwp (thread);
2602
2603 /* Ignore EXCEPT. */
2604 if (lwp == except)
2605 return 0;
2606
2607 lwp->suspended--;
2608
2609 gdb_assert (lwp->suspended >= 0);
2610 return 0;
2611 }
2612
2613 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2614 NULL. */
2615
2616 static void
2617 unsuspend_all_lwps (struct lwp_info *except)
2618 {
2619 find_inferior (&all_threads, unsuspend_one_lwp, except);
2620 }
2621
2622 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2623 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2624 void *data);
2625 static int lwp_running (struct inferior_list_entry *entry, void *data);
2626 static ptid_t linux_wait_1 (ptid_t ptid,
2627 struct target_waitstatus *ourstatus,
2628 int target_options);
2629
2630 /* Stabilize threads (move out of jump pads).
2631
2632 If a thread is midway collecting a fast tracepoint, we need to
2633 finish the collection and move it out of the jump pad before
2634 reporting the signal.
2635
2636 This avoids recursion while collecting (when a signal arrives
2637 midway, and the signal handler itself collects), which would trash
2638 the trace buffer. In case the user set a breakpoint in a signal
2639 handler, this avoids the backtrace showing the jump pad, etc..
2640 Most importantly, there are certain things we can't do safely if
2641 threads are stopped in a jump pad (or in its callee's). For
2642 example:
2643
2644 - starting a new trace run. A thread still collecting the
2645 previous run, could trash the trace buffer when resumed. The trace
2646 buffer control structures would have been reset but the thread had
2647 no way to tell. The thread could even midway memcpy'ing to the
2648 buffer, which would mean that when resumed, it would clobber the
2649 trace buffer that had been set for a new run.
2650
2651 - we can't rewrite/reuse the jump pads for new tracepoints
2652 safely. Say you do tstart while a thread is stopped midway while
2653 collecting. When the thread is later resumed, it finishes the
2654 collection, and returns to the jump pad, to execute the original
2655 instruction that was under the tracepoint jump at the time the
2656 older run had been started. If the jump pad had been rewritten
2657 since for something else in the new run, the thread would now
2658 execute the wrong / random instructions. */
2659
2660 static void
2661 linux_stabilize_threads (void)
2662 {
2663 struct thread_info *saved_thread;
2664 struct thread_info *thread_stuck;
2665
2666 thread_stuck
2667 = (struct thread_info *) find_inferior (&all_threads,
2668 stuck_in_jump_pad_callback,
2669 NULL);
2670 if (thread_stuck != NULL)
2671 {
2672 if (debug_threads)
2673 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2674 lwpid_of (thread_stuck));
2675 return;
2676 }
2677
2678 saved_thread = current_thread;
2679
2680 stabilizing_threads = 1;
2681
2682 /* Kick 'em all. */
2683 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2684
2685 /* Loop until all are stopped out of the jump pads. */
2686 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2687 {
2688 struct target_waitstatus ourstatus;
2689 struct lwp_info *lwp;
2690 int wstat;
2691
2692 /* Note that we go through the full wait even loop. While
2693 moving threads out of jump pad, we need to be able to step
2694 over internal breakpoints and such. */
2695 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2696
2697 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2698 {
2699 lwp = get_thread_lwp (current_thread);
2700
2701 /* Lock it. */
2702 lwp->suspended++;
2703
2704 if (ourstatus.value.sig != GDB_SIGNAL_0
2705 || current_thread->last_resume_kind == resume_stop)
2706 {
2707 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2708 enqueue_one_deferred_signal (lwp, &wstat);
2709 }
2710 }
2711 }
2712
2713 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2714
2715 stabilizing_threads = 0;
2716
2717 current_thread = saved_thread;
2718
2719 if (debug_threads)
2720 {
2721 thread_stuck
2722 = (struct thread_info *) find_inferior (&all_threads,
2723 stuck_in_jump_pad_callback,
2724 NULL);
2725 if (thread_stuck != NULL)
2726 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2727 lwpid_of (thread_stuck));
2728 }
2729 }
2730
2731 static void async_file_mark (void);
2732
2733 /* Convenience function that is called when the kernel reports an
2734 event that is not passed out to GDB. */
2735
2736 static ptid_t
2737 ignore_event (struct target_waitstatus *ourstatus)
2738 {
2739 /* If we got an event, there may still be others, as a single
2740 SIGCHLD can indicate more than one child stopped. This forces
2741 another target_wait call. */
2742 async_file_mark ();
2743
2744 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2745 return null_ptid;
2746 }
2747
2748 /* Return non-zero if WAITSTATUS reflects an extended linux
2749 event. Otherwise, return zero. */
2750
2751 static int
2752 extended_event_reported (const struct target_waitstatus *waitstatus)
2753 {
2754 if (waitstatus == NULL)
2755 return 0;
2756
2757 return (waitstatus->kind == TARGET_WAITKIND_FORKED
2758 || waitstatus->kind == TARGET_WAITKIND_VFORKED
2759 || waitstatus->kind == TARGET_WAITKIND_VFORK_DONE);
2760 }
2761
2762 /* Wait for process, returns status. */
2763
2764 static ptid_t
2765 linux_wait_1 (ptid_t ptid,
2766 struct target_waitstatus *ourstatus, int target_options)
2767 {
2768 int w;
2769 struct lwp_info *event_child;
2770 int options;
2771 int pid;
2772 int step_over_finished;
2773 int bp_explains_trap;
2774 int maybe_internal_trap;
2775 int report_to_gdb;
2776 int trace_event;
2777 int in_step_range;
2778
2779 if (debug_threads)
2780 {
2781 debug_enter ();
2782 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2783 }
2784
2785 /* Translate generic target options into linux options. */
2786 options = __WALL;
2787 if (target_options & TARGET_WNOHANG)
2788 options |= WNOHANG;
2789
2790 bp_explains_trap = 0;
2791 trace_event = 0;
2792 in_step_range = 0;
2793 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2794
2795 if (ptid_equal (step_over_bkpt, null_ptid))
2796 pid = linux_wait_for_event (ptid, &w, options);
2797 else
2798 {
2799 if (debug_threads)
2800 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2801 target_pid_to_str (step_over_bkpt));
2802 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2803 }
2804
2805 if (pid == 0)
2806 {
2807 gdb_assert (target_options & TARGET_WNOHANG);
2808
2809 if (debug_threads)
2810 {
2811 debug_printf ("linux_wait_1 ret = null_ptid, "
2812 "TARGET_WAITKIND_IGNORE\n");
2813 debug_exit ();
2814 }
2815
2816 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2817 return null_ptid;
2818 }
2819 else if (pid == -1)
2820 {
2821 if (debug_threads)
2822 {
2823 debug_printf ("linux_wait_1 ret = null_ptid, "
2824 "TARGET_WAITKIND_NO_RESUMED\n");
2825 debug_exit ();
2826 }
2827
2828 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2829 return null_ptid;
2830 }
2831
2832 event_child = get_thread_lwp (current_thread);
2833
2834 /* linux_wait_for_event only returns an exit status for the last
2835 child of a process. Report it. */
2836 if (WIFEXITED (w) || WIFSIGNALED (w))
2837 {
2838 if (WIFEXITED (w))
2839 {
2840 ourstatus->kind = TARGET_WAITKIND_EXITED;
2841 ourstatus->value.integer = WEXITSTATUS (w);
2842
2843 if (debug_threads)
2844 {
2845 debug_printf ("linux_wait_1 ret = %s, exited with "
2846 "retcode %d\n",
2847 target_pid_to_str (ptid_of (current_thread)),
2848 WEXITSTATUS (w));
2849 debug_exit ();
2850 }
2851 }
2852 else
2853 {
2854 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2855 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2856
2857 if (debug_threads)
2858 {
2859 debug_printf ("linux_wait_1 ret = %s, terminated with "
2860 "signal %d\n",
2861 target_pid_to_str (ptid_of (current_thread)),
2862 WTERMSIG (w));
2863 debug_exit ();
2864 }
2865 }
2866
2867 return ptid_of (current_thread);
2868 }
2869
2870 /* If step-over executes a breakpoint instruction, it means a
2871 gdb/gdbserver breakpoint had been planted on top of a permanent
2872 breakpoint. The PC has been adjusted by
2873 check_stopped_by_breakpoint to point at the breakpoint address.
2874 Advance the PC manually past the breakpoint, otherwise the
2875 program would keep trapping the permanent breakpoint forever. */
2876 if (!ptid_equal (step_over_bkpt, null_ptid)
2877 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2878 {
2879 unsigned int increment_pc = the_low_target.breakpoint_len;
2880
2881 if (debug_threads)
2882 {
2883 debug_printf ("step-over for %s executed software breakpoint\n",
2884 target_pid_to_str (ptid_of (current_thread)));
2885 }
2886
2887 if (increment_pc != 0)
2888 {
2889 struct regcache *regcache
2890 = get_thread_regcache (current_thread, 1);
2891
2892 event_child->stop_pc += increment_pc;
2893 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2894
2895 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2896 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2897 }
2898 }
2899
2900 /* If this event was not handled before, and is not a SIGTRAP, we
2901 report it. SIGILL and SIGSEGV are also treated as traps in case
2902 a breakpoint is inserted at the current PC. If this target does
2903 not support internal breakpoints at all, we also report the
2904 SIGTRAP without further processing; it's of no concern to us. */
2905 maybe_internal_trap
2906 = (supports_breakpoints ()
2907 && (WSTOPSIG (w) == SIGTRAP
2908 || ((WSTOPSIG (w) == SIGILL
2909 || WSTOPSIG (w) == SIGSEGV)
2910 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2911
2912 if (maybe_internal_trap)
2913 {
2914 /* Handle anything that requires bookkeeping before deciding to
2915 report the event or continue waiting. */
2916
2917 /* First check if we can explain the SIGTRAP with an internal
2918 breakpoint, or if we should possibly report the event to GDB.
2919 Do this before anything that may remove or insert a
2920 breakpoint. */
2921 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2922
2923 /* We have a SIGTRAP, possibly a step-over dance has just
2924 finished. If so, tweak the state machine accordingly,
2925 reinsert breakpoints and delete any reinsert (software
2926 single-step) breakpoints. */
2927 step_over_finished = finish_step_over (event_child);
2928
2929 /* Now invoke the callbacks of any internal breakpoints there. */
2930 check_breakpoints (event_child->stop_pc);
2931
2932 /* Handle tracepoint data collecting. This may overflow the
2933 trace buffer, and cause a tracing stop, removing
2934 breakpoints. */
2935 trace_event = handle_tracepoints (event_child);
2936
2937 if (bp_explains_trap)
2938 {
2939 /* If we stepped or ran into an internal breakpoint, we've
2940 already handled it. So next time we resume (from this
2941 PC), we should step over it. */
2942 if (debug_threads)
2943 debug_printf ("Hit a gdbserver breakpoint.\n");
2944
2945 if (breakpoint_here (event_child->stop_pc))
2946 event_child->need_step_over = 1;
2947 }
2948 }
2949 else
2950 {
2951 /* We have some other signal, possibly a step-over dance was in
2952 progress, and it should be cancelled too. */
2953 step_over_finished = finish_step_over (event_child);
2954 }
2955
2956 /* We have all the data we need. Either report the event to GDB, or
2957 resume threads and keep waiting for more. */
2958
2959 /* If we're collecting a fast tracepoint, finish the collection and
2960 move out of the jump pad before delivering a signal. See
2961 linux_stabilize_threads. */
2962
2963 if (WIFSTOPPED (w)
2964 && WSTOPSIG (w) != SIGTRAP
2965 && supports_fast_tracepoints ()
2966 && agent_loaded_p ())
2967 {
2968 if (debug_threads)
2969 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2970 "to defer or adjust it.\n",
2971 WSTOPSIG (w), lwpid_of (current_thread));
2972
2973 /* Allow debugging the jump pad itself. */
2974 if (current_thread->last_resume_kind != resume_step
2975 && maybe_move_out_of_jump_pad (event_child, &w))
2976 {
2977 enqueue_one_deferred_signal (event_child, &w);
2978
2979 if (debug_threads)
2980 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2981 WSTOPSIG (w), lwpid_of (current_thread));
2982
2983 linux_resume_one_lwp (event_child, 0, 0, NULL);
2984
2985 return ignore_event (ourstatus);
2986 }
2987 }
2988
2989 if (event_child->collecting_fast_tracepoint)
2990 {
2991 if (debug_threads)
2992 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2993 "Check if we're already there.\n",
2994 lwpid_of (current_thread),
2995 event_child->collecting_fast_tracepoint);
2996
2997 trace_event = 1;
2998
2999 event_child->collecting_fast_tracepoint
3000 = linux_fast_tracepoint_collecting (event_child, NULL);
3001
3002 if (event_child->collecting_fast_tracepoint != 1)
3003 {
3004 /* No longer need this breakpoint. */
3005 if (event_child->exit_jump_pad_bkpt != NULL)
3006 {
3007 if (debug_threads)
3008 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3009 "stopping all threads momentarily.\n");
3010
3011 /* Other running threads could hit this breakpoint.
3012 We don't handle moribund locations like GDB does,
3013 instead we always pause all threads when removing
3014 breakpoints, so that any step-over or
3015 decr_pc_after_break adjustment is always taken
3016 care of while the breakpoint is still
3017 inserted. */
3018 stop_all_lwps (1, event_child);
3019
3020 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3021 event_child->exit_jump_pad_bkpt = NULL;
3022
3023 unstop_all_lwps (1, event_child);
3024
3025 gdb_assert (event_child->suspended >= 0);
3026 }
3027 }
3028
3029 if (event_child->collecting_fast_tracepoint == 0)
3030 {
3031 if (debug_threads)
3032 debug_printf ("fast tracepoint finished "
3033 "collecting successfully.\n");
3034
3035 /* We may have a deferred signal to report. */
3036 if (dequeue_one_deferred_signal (event_child, &w))
3037 {
3038 if (debug_threads)
3039 debug_printf ("dequeued one signal.\n");
3040 }
3041 else
3042 {
3043 if (debug_threads)
3044 debug_printf ("no deferred signals.\n");
3045
3046 if (stabilizing_threads)
3047 {
3048 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3049 ourstatus->value.sig = GDB_SIGNAL_0;
3050
3051 if (debug_threads)
3052 {
3053 debug_printf ("linux_wait_1 ret = %s, stopped "
3054 "while stabilizing threads\n",
3055 target_pid_to_str (ptid_of (current_thread)));
3056 debug_exit ();
3057 }
3058
3059 return ptid_of (current_thread);
3060 }
3061 }
3062 }
3063 }
3064
3065 /* Check whether GDB would be interested in this event. */
3066
3067 /* If GDB is not interested in this signal, don't stop other
3068 threads, and don't report it to GDB. Just resume the inferior
3069 right away. We do this for threading-related signals as well as
3070 any that GDB specifically requested we ignore. But never ignore
3071 SIGSTOP if we sent it ourselves, and do not ignore signals when
3072 stepping - they may require special handling to skip the signal
3073 handler. Also never ignore signals that could be caused by a
3074 breakpoint. */
3075 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3076 thread library? */
3077 if (WIFSTOPPED (w)
3078 && current_thread->last_resume_kind != resume_step
3079 && (
3080 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3081 (current_process ()->priv->thread_db != NULL
3082 && (WSTOPSIG (w) == __SIGRTMIN
3083 || WSTOPSIG (w) == __SIGRTMIN + 1))
3084 ||
3085 #endif
3086 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3087 && !(WSTOPSIG (w) == SIGSTOP
3088 && current_thread->last_resume_kind == resume_stop)
3089 && !linux_wstatus_maybe_breakpoint (w))))
3090 {
3091 siginfo_t info, *info_p;
3092
3093 if (debug_threads)
3094 debug_printf ("Ignored signal %d for LWP %ld.\n",
3095 WSTOPSIG (w), lwpid_of (current_thread));
3096
3097 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3098 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3099 info_p = &info;
3100 else
3101 info_p = NULL;
3102 linux_resume_one_lwp (event_child, event_child->stepping,
3103 WSTOPSIG (w), info_p);
3104 return ignore_event (ourstatus);
3105 }
3106
3107 /* Note that all addresses are always "out of the step range" when
3108 there's no range to begin with. */
3109 in_step_range = lwp_in_step_range (event_child);
3110
3111 /* If GDB wanted this thread to single step, and the thread is out
3112 of the step range, we always want to report the SIGTRAP, and let
3113 GDB handle it. Watchpoints should always be reported. So should
3114 signals we can't explain. A SIGTRAP we can't explain could be a
3115 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3116 do, we're be able to handle GDB breakpoints on top of internal
3117 breakpoints, by handling the internal breakpoint and still
3118 reporting the event to GDB. If we don't, we're out of luck, GDB
3119 won't see the breakpoint hit. */
3120 report_to_gdb = (!maybe_internal_trap
3121 || (current_thread->last_resume_kind == resume_step
3122 && !in_step_range)
3123 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3124 || (!step_over_finished && !in_step_range
3125 && !bp_explains_trap && !trace_event)
3126 || (gdb_breakpoint_here (event_child->stop_pc)
3127 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3128 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3129 || extended_event_reported (&event_child->waitstatus));
3130
3131 run_breakpoint_commands (event_child->stop_pc);
3132
3133 /* We found no reason GDB would want us to stop. We either hit one
3134 of our own breakpoints, or finished an internal step GDB
3135 shouldn't know about. */
3136 if (!report_to_gdb)
3137 {
3138 if (debug_threads)
3139 {
3140 if (bp_explains_trap)
3141 debug_printf ("Hit a gdbserver breakpoint.\n");
3142 if (step_over_finished)
3143 debug_printf ("Step-over finished.\n");
3144 if (trace_event)
3145 debug_printf ("Tracepoint event.\n");
3146 if (lwp_in_step_range (event_child))
3147 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3148 paddress (event_child->stop_pc),
3149 paddress (event_child->step_range_start),
3150 paddress (event_child->step_range_end));
3151 if (extended_event_reported (&event_child->waitstatus))
3152 {
3153 char *str = target_waitstatus_to_string (ourstatus);
3154 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3155 lwpid_of (get_lwp_thread (event_child)), str);
3156 xfree (str);
3157 }
3158 }
3159
3160 /* We're not reporting this breakpoint to GDB, so apply the
3161 decr_pc_after_break adjustment to the inferior's regcache
3162 ourselves. */
3163
3164 if (the_low_target.set_pc != NULL)
3165 {
3166 struct regcache *regcache
3167 = get_thread_regcache (current_thread, 1);
3168 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3169 }
3170
3171 /* We may have finished stepping over a breakpoint. If so,
3172 we've stopped and suspended all LWPs momentarily except the
3173 stepping one. This is where we resume them all again. We're
3174 going to keep waiting, so use proceed, which handles stepping
3175 over the next breakpoint. */
3176 if (debug_threads)
3177 debug_printf ("proceeding all threads.\n");
3178
3179 if (step_over_finished)
3180 unsuspend_all_lwps (event_child);
3181
3182 proceed_all_lwps ();
3183 return ignore_event (ourstatus);
3184 }
3185
3186 if (debug_threads)
3187 {
3188 if (current_thread->last_resume_kind == resume_step)
3189 {
3190 if (event_child->step_range_start == event_child->step_range_end)
3191 debug_printf ("GDB wanted to single-step, reporting event.\n");
3192 else if (!lwp_in_step_range (event_child))
3193 debug_printf ("Out of step range, reporting event.\n");
3194 }
3195 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3196 debug_printf ("Stopped by watchpoint.\n");
3197 else if (gdb_breakpoint_here (event_child->stop_pc))
3198 debug_printf ("Stopped by GDB breakpoint.\n");
3199 if (debug_threads)
3200 debug_printf ("Hit a non-gdbserver trap event.\n");
3201 }
3202
3203 /* Alright, we're going to report a stop. */
3204
3205 if (!stabilizing_threads)
3206 {
3207 /* In all-stop, stop all threads. */
3208 if (!non_stop)
3209 stop_all_lwps (0, NULL);
3210
3211 /* If we're not waiting for a specific LWP, choose an event LWP
3212 from among those that have had events. Giving equal priority
3213 to all LWPs that have had events helps prevent
3214 starvation. */
3215 if (ptid_equal (ptid, minus_one_ptid))
3216 {
3217 event_child->status_pending_p = 1;
3218 event_child->status_pending = w;
3219
3220 select_event_lwp (&event_child);
3221
3222 /* current_thread and event_child must stay in sync. */
3223 current_thread = get_lwp_thread (event_child);
3224
3225 event_child->status_pending_p = 0;
3226 w = event_child->status_pending;
3227 }
3228
3229 if (step_over_finished)
3230 {
3231 if (!non_stop)
3232 {
3233 /* If we were doing a step-over, all other threads but
3234 the stepping one had been paused in start_step_over,
3235 with their suspend counts incremented. We don't want
3236 to do a full unstop/unpause, because we're in
3237 all-stop mode (so we want threads stopped), but we
3238 still need to unsuspend the other threads, to
3239 decrement their `suspended' count back. */
3240 unsuspend_all_lwps (event_child);
3241 }
3242 else
3243 {
3244 /* If we just finished a step-over, then all threads had
3245 been momentarily paused. In all-stop, that's fine,
3246 we want threads stopped by now anyway. In non-stop,
3247 we need to re-resume threads that GDB wanted to be
3248 running. */
3249 unstop_all_lwps (1, event_child);
3250 }
3251 }
3252
3253 /* Stabilize threads (move out of jump pads). */
3254 if (!non_stop)
3255 stabilize_threads ();
3256 }
3257 else
3258 {
3259 /* If we just finished a step-over, then all threads had been
3260 momentarily paused. In all-stop, that's fine, we want
3261 threads stopped by now anyway. In non-stop, we need to
3262 re-resume threads that GDB wanted to be running. */
3263 if (step_over_finished)
3264 unstop_all_lwps (1, event_child);
3265 }
3266
3267 if (extended_event_reported (&event_child->waitstatus))
3268 {
3269 /* If the reported event is a fork, vfork or exec, let GDB know. */
3270 ourstatus->kind = event_child->waitstatus.kind;
3271 ourstatus->value = event_child->waitstatus.value;
3272
3273 /* Clear the event lwp's waitstatus since we handled it already. */
3274 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3275 }
3276 else
3277 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3278
3279 /* Now that we've selected our final event LWP, un-adjust its PC if
3280 it was a software breakpoint, and the client doesn't know we can
3281 adjust the breakpoint ourselves. */
3282 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3283 && !swbreak_feature)
3284 {
3285 int decr_pc = the_low_target.decr_pc_after_break;
3286
3287 if (decr_pc != 0)
3288 {
3289 struct regcache *regcache
3290 = get_thread_regcache (current_thread, 1);
3291 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3292 }
3293 }
3294
3295 if (current_thread->last_resume_kind == resume_stop
3296 && WSTOPSIG (w) == SIGSTOP)
3297 {
3298 /* A thread that has been requested to stop by GDB with vCont;t,
3299 and it stopped cleanly, so report as SIG0. The use of
3300 SIGSTOP is an implementation detail. */
3301 ourstatus->value.sig = GDB_SIGNAL_0;
3302 }
3303 else if (current_thread->last_resume_kind == resume_stop
3304 && WSTOPSIG (w) != SIGSTOP)
3305 {
3306 /* A thread that has been requested to stop by GDB with vCont;t,
3307 but, it stopped for other reasons. */
3308 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3309 }
3310 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3311 {
3312 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3313 }
3314
3315 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3316
3317 if (debug_threads)
3318 {
3319 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3320 target_pid_to_str (ptid_of (current_thread)),
3321 ourstatus->kind, ourstatus->value.sig);
3322 debug_exit ();
3323 }
3324
3325 return ptid_of (current_thread);
3326 }
3327
3328 /* Get rid of any pending event in the pipe. */
3329 static void
3330 async_file_flush (void)
3331 {
3332 int ret;
3333 char buf;
3334
3335 do
3336 ret = read (linux_event_pipe[0], &buf, 1);
3337 while (ret >= 0 || (ret == -1 && errno == EINTR));
3338 }
3339
3340 /* Put something in the pipe, so the event loop wakes up. */
3341 static void
3342 async_file_mark (void)
3343 {
3344 int ret;
3345
3346 async_file_flush ();
3347
3348 do
3349 ret = write (linux_event_pipe[1], "+", 1);
3350 while (ret == 0 || (ret == -1 && errno == EINTR));
3351
3352 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3353 be awakened anyway. */
3354 }
3355
3356 static ptid_t
3357 linux_wait (ptid_t ptid,
3358 struct target_waitstatus *ourstatus, int target_options)
3359 {
3360 ptid_t event_ptid;
3361
3362 /* Flush the async file first. */
3363 if (target_is_async_p ())
3364 async_file_flush ();
3365
3366 do
3367 {
3368 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3369 }
3370 while ((target_options & TARGET_WNOHANG) == 0
3371 && ptid_equal (event_ptid, null_ptid)
3372 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3373
3374 /* If at least one stop was reported, there may be more. A single
3375 SIGCHLD can signal more than one child stop. */
3376 if (target_is_async_p ()
3377 && (target_options & TARGET_WNOHANG) != 0
3378 && !ptid_equal (event_ptid, null_ptid))
3379 async_file_mark ();
3380
3381 return event_ptid;
3382 }
3383
3384 /* Send a signal to an LWP. */
3385
3386 static int
3387 kill_lwp (unsigned long lwpid, int signo)
3388 {
3389 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3390 fails, then we are not using nptl threads and we should be using kill. */
3391
3392 #ifdef __NR_tkill
3393 {
3394 static int tkill_failed;
3395
3396 if (!tkill_failed)
3397 {
3398 int ret;
3399
3400 errno = 0;
3401 ret = syscall (__NR_tkill, lwpid, signo);
3402 if (errno != ENOSYS)
3403 return ret;
3404 tkill_failed = 1;
3405 }
3406 }
3407 #endif
3408
3409 return kill (lwpid, signo);
3410 }
3411
3412 void
3413 linux_stop_lwp (struct lwp_info *lwp)
3414 {
3415 send_sigstop (lwp);
3416 }
3417
3418 static void
3419 send_sigstop (struct lwp_info *lwp)
3420 {
3421 int pid;
3422
3423 pid = lwpid_of (get_lwp_thread (lwp));
3424
3425 /* If we already have a pending stop signal for this process, don't
3426 send another. */
3427 if (lwp->stop_expected)
3428 {
3429 if (debug_threads)
3430 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3431
3432 return;
3433 }
3434
3435 if (debug_threads)
3436 debug_printf ("Sending sigstop to lwp %d\n", pid);
3437
3438 lwp->stop_expected = 1;
3439 kill_lwp (pid, SIGSTOP);
3440 }
3441
3442 static int
3443 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3444 {
3445 struct thread_info *thread = (struct thread_info *) entry;
3446 struct lwp_info *lwp = get_thread_lwp (thread);
3447
3448 /* Ignore EXCEPT. */
3449 if (lwp == except)
3450 return 0;
3451
3452 if (lwp->stopped)
3453 return 0;
3454
3455 send_sigstop (lwp);
3456 return 0;
3457 }
3458
3459 /* Increment the suspend count of an LWP, and stop it, if not stopped
3460 yet. */
3461 static int
3462 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3463 void *except)
3464 {
3465 struct thread_info *thread = (struct thread_info *) entry;
3466 struct lwp_info *lwp = get_thread_lwp (thread);
3467
3468 /* Ignore EXCEPT. */
3469 if (lwp == except)
3470 return 0;
3471
3472 lwp->suspended++;
3473
3474 return send_sigstop_callback (entry, except);
3475 }
3476
3477 static void
3478 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3479 {
3480 /* It's dead, really. */
3481 lwp->dead = 1;
3482
3483 /* Store the exit status for later. */
3484 lwp->status_pending_p = 1;
3485 lwp->status_pending = wstat;
3486
3487 /* Prevent trying to stop it. */
3488 lwp->stopped = 1;
3489
3490 /* No further stops are expected from a dead lwp. */
3491 lwp->stop_expected = 0;
3492 }
3493
3494 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3495
3496 static void
3497 wait_for_sigstop (void)
3498 {
3499 struct thread_info *saved_thread;
3500 ptid_t saved_tid;
3501 int wstat;
3502 int ret;
3503
3504 saved_thread = current_thread;
3505 if (saved_thread != NULL)
3506 saved_tid = saved_thread->entry.id;
3507 else
3508 saved_tid = null_ptid; /* avoid bogus unused warning */
3509
3510 if (debug_threads)
3511 debug_printf ("wait_for_sigstop: pulling events\n");
3512
3513 /* Passing NULL_PTID as filter indicates we want all events to be
3514 left pending. Eventually this returns when there are no
3515 unwaited-for children left. */
3516 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3517 &wstat, __WALL);
3518 gdb_assert (ret == -1);
3519
3520 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3521 current_thread = saved_thread;
3522 else
3523 {
3524 if (debug_threads)
3525 debug_printf ("Previously current thread died.\n");
3526
3527 if (non_stop)
3528 {
3529 /* We can't change the current inferior behind GDB's back,
3530 otherwise, a subsequent command may apply to the wrong
3531 process. */
3532 current_thread = NULL;
3533 }
3534 else
3535 {
3536 /* Set a valid thread as current. */
3537 set_desired_thread (0);
3538 }
3539 }
3540 }
3541
3542 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3543 move it out, because we need to report the stop event to GDB. For
3544 example, if the user puts a breakpoint in the jump pad, it's
3545 because she wants to debug it. */
3546
3547 static int
3548 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3549 {
3550 struct thread_info *thread = (struct thread_info *) entry;
3551 struct lwp_info *lwp = get_thread_lwp (thread);
3552
3553 gdb_assert (lwp->suspended == 0);
3554 gdb_assert (lwp->stopped);
3555
3556 /* Allow debugging the jump pad, gdb_collect, etc.. */
3557 return (supports_fast_tracepoints ()
3558 && agent_loaded_p ()
3559 && (gdb_breakpoint_here (lwp->stop_pc)
3560 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3561 || thread->last_resume_kind == resume_step)
3562 && linux_fast_tracepoint_collecting (lwp, NULL));
3563 }
3564
3565 static void
3566 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3567 {
3568 struct thread_info *thread = (struct thread_info *) entry;
3569 struct lwp_info *lwp = get_thread_lwp (thread);
3570 int *wstat;
3571
3572 gdb_assert (lwp->suspended == 0);
3573 gdb_assert (lwp->stopped);
3574
3575 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3576
3577 /* Allow debugging the jump pad, gdb_collect, etc. */
3578 if (!gdb_breakpoint_here (lwp->stop_pc)
3579 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3580 && thread->last_resume_kind != resume_step
3581 && maybe_move_out_of_jump_pad (lwp, wstat))
3582 {
3583 if (debug_threads)
3584 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3585 lwpid_of (thread));
3586
3587 if (wstat)
3588 {
3589 lwp->status_pending_p = 0;
3590 enqueue_one_deferred_signal (lwp, wstat);
3591
3592 if (debug_threads)
3593 debug_printf ("Signal %d for LWP %ld deferred "
3594 "(in jump pad)\n",
3595 WSTOPSIG (*wstat), lwpid_of (thread));
3596 }
3597
3598 linux_resume_one_lwp (lwp, 0, 0, NULL);
3599 }
3600 else
3601 lwp->suspended++;
3602 }
3603
3604 static int
3605 lwp_running (struct inferior_list_entry *entry, void *data)
3606 {
3607 struct thread_info *thread = (struct thread_info *) entry;
3608 struct lwp_info *lwp = get_thread_lwp (thread);
3609
3610 if (lwp->dead)
3611 return 0;
3612 if (lwp->stopped)
3613 return 0;
3614 return 1;
3615 }
3616
3617 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3618 If SUSPEND, then also increase the suspend count of every LWP,
3619 except EXCEPT. */
3620
3621 static void
3622 stop_all_lwps (int suspend, struct lwp_info *except)
3623 {
3624 /* Should not be called recursively. */
3625 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3626
3627 if (debug_threads)
3628 {
3629 debug_enter ();
3630 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3631 suspend ? "stop-and-suspend" : "stop",
3632 except != NULL
3633 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3634 : "none");
3635 }
3636
3637 stopping_threads = (suspend
3638 ? STOPPING_AND_SUSPENDING_THREADS
3639 : STOPPING_THREADS);
3640
3641 if (suspend)
3642 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3643 else
3644 find_inferior (&all_threads, send_sigstop_callback, except);
3645 wait_for_sigstop ();
3646 stopping_threads = NOT_STOPPING_THREADS;
3647
3648 if (debug_threads)
3649 {
3650 debug_printf ("stop_all_lwps done, setting stopping_threads "
3651 "back to !stopping\n");
3652 debug_exit ();
3653 }
3654 }
3655
3656 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3657 SIGNAL is nonzero, give it that signal. */
3658
3659 static void
3660 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3661 int step, int signal, siginfo_t *info)
3662 {
3663 struct thread_info *thread = get_lwp_thread (lwp);
3664 struct thread_info *saved_thread;
3665 int fast_tp_collecting;
3666 struct process_info *proc = get_thread_process (thread);
3667
3668 /* Note that target description may not be initialised
3669 (proc->tdesc == NULL) at this point because the program hasn't
3670 stopped at the first instruction yet. It means GDBserver skips
3671 the extra traps from the wrapper program (see option --wrapper).
3672 Code in this function that requires register access should be
3673 guarded by proc->tdesc == NULL or something else. */
3674
3675 if (lwp->stopped == 0)
3676 return;
3677
3678 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3679
3680 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3681
3682 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3683 user used the "jump" command, or "set $pc = foo"). */
3684 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
3685 {
3686 /* Collecting 'while-stepping' actions doesn't make sense
3687 anymore. */
3688 release_while_stepping_state_list (thread);
3689 }
3690
3691 /* If we have pending signals or status, and a new signal, enqueue the
3692 signal. Also enqueue the signal if we are waiting to reinsert a
3693 breakpoint; it will be picked up again below. */
3694 if (signal != 0
3695 && (lwp->status_pending_p
3696 || lwp->pending_signals != NULL
3697 || lwp->bp_reinsert != 0
3698 || fast_tp_collecting))
3699 {
3700 struct pending_signals *p_sig;
3701 p_sig = xmalloc (sizeof (*p_sig));
3702 p_sig->prev = lwp->pending_signals;
3703 p_sig->signal = signal;
3704 if (info == NULL)
3705 memset (&p_sig->info, 0, sizeof (siginfo_t));
3706 else
3707 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3708 lwp->pending_signals = p_sig;
3709 }
3710
3711 if (lwp->status_pending_p)
3712 {
3713 if (debug_threads)
3714 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3715 " has pending status\n",
3716 lwpid_of (thread), step ? "step" : "continue", signal,
3717 lwp->stop_expected ? "expected" : "not expected");
3718 return;
3719 }
3720
3721 saved_thread = current_thread;
3722 current_thread = thread;
3723
3724 if (debug_threads)
3725 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3726 lwpid_of (thread), step ? "step" : "continue", signal,
3727 lwp->stop_expected ? "expected" : "not expected");
3728
3729 /* This bit needs some thinking about. If we get a signal that
3730 we must report while a single-step reinsert is still pending,
3731 we often end up resuming the thread. It might be better to
3732 (ew) allow a stack of pending events; then we could be sure that
3733 the reinsert happened right away and not lose any signals.
3734
3735 Making this stack would also shrink the window in which breakpoints are
3736 uninserted (see comment in linux_wait_for_lwp) but not enough for
3737 complete correctness, so it won't solve that problem. It may be
3738 worthwhile just to solve this one, however. */
3739 if (lwp->bp_reinsert != 0)
3740 {
3741 if (debug_threads)
3742 debug_printf (" pending reinsert at 0x%s\n",
3743 paddress (lwp->bp_reinsert));
3744
3745 if (can_hardware_single_step ())
3746 {
3747 if (fast_tp_collecting == 0)
3748 {
3749 if (step == 0)
3750 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3751 if (lwp->suspended)
3752 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3753 lwp->suspended);
3754 }
3755
3756 step = 1;
3757 }
3758
3759 /* Postpone any pending signal. It was enqueued above. */
3760 signal = 0;
3761 }
3762
3763 if (fast_tp_collecting == 1)
3764 {
3765 if (debug_threads)
3766 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3767 " (exit-jump-pad-bkpt)\n",
3768 lwpid_of (thread));
3769
3770 /* Postpone any pending signal. It was enqueued above. */
3771 signal = 0;
3772 }
3773 else if (fast_tp_collecting == 2)
3774 {
3775 if (debug_threads)
3776 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3777 " single-stepping\n",
3778 lwpid_of (thread));
3779
3780 if (can_hardware_single_step ())
3781 step = 1;
3782 else
3783 {
3784 internal_error (__FILE__, __LINE__,
3785 "moving out of jump pad single-stepping"
3786 " not implemented on this target");
3787 }
3788
3789 /* Postpone any pending signal. It was enqueued above. */
3790 signal = 0;
3791 }
3792
3793 /* If we have while-stepping actions in this thread set it stepping.
3794 If we have a signal to deliver, it may or may not be set to
3795 SIG_IGN, we don't know. Assume so, and allow collecting
3796 while-stepping into a signal handler. A possible smart thing to
3797 do would be to set an internal breakpoint at the signal return
3798 address, continue, and carry on catching this while-stepping
3799 action only when that breakpoint is hit. A future
3800 enhancement. */
3801 if (thread->while_stepping != NULL
3802 && can_hardware_single_step ())
3803 {
3804 if (debug_threads)
3805 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3806 lwpid_of (thread));
3807 step = 1;
3808 }
3809
3810 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
3811 {
3812 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3813
3814 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3815
3816 if (debug_threads)
3817 {
3818 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3819 (long) lwp->stop_pc);
3820 }
3821 }
3822
3823 /* If we have pending signals, consume one unless we are trying to
3824 reinsert a breakpoint or we're trying to finish a fast tracepoint
3825 collect. */
3826 if (lwp->pending_signals != NULL
3827 && lwp->bp_reinsert == 0
3828 && fast_tp_collecting == 0)
3829 {
3830 struct pending_signals **p_sig;
3831
3832 p_sig = &lwp->pending_signals;
3833 while ((*p_sig)->prev != NULL)
3834 p_sig = &(*p_sig)->prev;
3835
3836 signal = (*p_sig)->signal;
3837 if ((*p_sig)->info.si_signo != 0)
3838 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3839 &(*p_sig)->info);
3840
3841 free (*p_sig);
3842 *p_sig = NULL;
3843 }
3844
3845 if (the_low_target.prepare_to_resume != NULL)
3846 the_low_target.prepare_to_resume (lwp);
3847
3848 regcache_invalidate_thread (thread);
3849 errno = 0;
3850 lwp->stepping = step;
3851 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3852 (PTRACE_TYPE_ARG3) 0,
3853 /* Coerce to a uintptr_t first to avoid potential gcc warning
3854 of coercing an 8 byte integer to a 4 byte pointer. */
3855 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3856
3857 current_thread = saved_thread;
3858 if (errno)
3859 perror_with_name ("resuming thread");
3860
3861 /* Successfully resumed. Clear state that no longer makes sense,
3862 and mark the LWP as running. Must not do this before resuming
3863 otherwise if that fails other code will be confused. E.g., we'd
3864 later try to stop the LWP and hang forever waiting for a stop
3865 status. Note that we must not throw after this is cleared,
3866 otherwise handle_zombie_lwp_error would get confused. */
3867 lwp->stopped = 0;
3868 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3869 }
3870
3871 /* Called when we try to resume a stopped LWP and that errors out. If
3872 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3873 or about to become), discard the error, clear any pending status
3874 the LWP may have, and return true (we'll collect the exit status
3875 soon enough). Otherwise, return false. */
3876
3877 static int
3878 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3879 {
3880 struct thread_info *thread = get_lwp_thread (lp);
3881
3882 /* If we get an error after resuming the LWP successfully, we'd
3883 confuse !T state for the LWP being gone. */
3884 gdb_assert (lp->stopped);
3885
3886 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3887 because even if ptrace failed with ESRCH, the tracee may be "not
3888 yet fully dead", but already refusing ptrace requests. In that
3889 case the tracee has 'R (Running)' state for a little bit
3890 (observed in Linux 3.18). See also the note on ESRCH in the
3891 ptrace(2) man page. Instead, check whether the LWP has any state
3892 other than ptrace-stopped. */
3893
3894 /* Don't assume anything if /proc/PID/status can't be read. */
3895 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3896 {
3897 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3898 lp->status_pending_p = 0;
3899 return 1;
3900 }
3901 return 0;
3902 }
3903
3904 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3905 disappears while we try to resume it. */
3906
3907 static void
3908 linux_resume_one_lwp (struct lwp_info *lwp,
3909 int step, int signal, siginfo_t *info)
3910 {
3911 TRY
3912 {
3913 linux_resume_one_lwp_throw (lwp, step, signal, info);
3914 }
3915 CATCH (ex, RETURN_MASK_ERROR)
3916 {
3917 if (!check_ptrace_stopped_lwp_gone (lwp))
3918 throw_exception (ex);
3919 }
3920 END_CATCH
3921 }
3922
3923 struct thread_resume_array
3924 {
3925 struct thread_resume *resume;
3926 size_t n;
3927 };
3928
3929 /* This function is called once per thread via find_inferior.
3930 ARG is a pointer to a thread_resume_array struct.
3931 We look up the thread specified by ENTRY in ARG, and mark the thread
3932 with a pointer to the appropriate resume request.
3933
3934 This algorithm is O(threads * resume elements), but resume elements
3935 is small (and will remain small at least until GDB supports thread
3936 suspension). */
3937
3938 static int
3939 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3940 {
3941 struct thread_info *thread = (struct thread_info *) entry;
3942 struct lwp_info *lwp = get_thread_lwp (thread);
3943 int ndx;
3944 struct thread_resume_array *r;
3945
3946 r = arg;
3947
3948 for (ndx = 0; ndx < r->n; ndx++)
3949 {
3950 ptid_t ptid = r->resume[ndx].thread;
3951 if (ptid_equal (ptid, minus_one_ptid)
3952 || ptid_equal (ptid, entry->id)
3953 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3954 of PID'. */
3955 || (ptid_get_pid (ptid) == pid_of (thread)
3956 && (ptid_is_pid (ptid)
3957 || ptid_get_lwp (ptid) == -1)))
3958 {
3959 if (r->resume[ndx].kind == resume_stop
3960 && thread->last_resume_kind == resume_stop)
3961 {
3962 if (debug_threads)
3963 debug_printf ("already %s LWP %ld at GDB's request\n",
3964 (thread->last_status.kind
3965 == TARGET_WAITKIND_STOPPED)
3966 ? "stopped"
3967 : "stopping",
3968 lwpid_of (thread));
3969
3970 continue;
3971 }
3972
3973 lwp->resume = &r->resume[ndx];
3974 thread->last_resume_kind = lwp->resume->kind;
3975
3976 lwp->step_range_start = lwp->resume->step_range_start;
3977 lwp->step_range_end = lwp->resume->step_range_end;
3978
3979 /* If we had a deferred signal to report, dequeue one now.
3980 This can happen if LWP gets more than one signal while
3981 trying to get out of a jump pad. */
3982 if (lwp->stopped
3983 && !lwp->status_pending_p
3984 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3985 {
3986 lwp->status_pending_p = 1;
3987
3988 if (debug_threads)
3989 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3990 "leaving status pending.\n",
3991 WSTOPSIG (lwp->status_pending),
3992 lwpid_of (thread));
3993 }
3994
3995 return 0;
3996 }
3997 }
3998
3999 /* No resume action for this thread. */
4000 lwp->resume = NULL;
4001
4002 return 0;
4003 }
4004
4005 /* find_inferior callback for linux_resume.
4006 Set *FLAG_P if this lwp has an interesting status pending. */
4007
4008 static int
4009 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4010 {
4011 struct thread_info *thread = (struct thread_info *) entry;
4012 struct lwp_info *lwp = get_thread_lwp (thread);
4013
4014 /* LWPs which will not be resumed are not interesting, because
4015 we might not wait for them next time through linux_wait. */
4016 if (lwp->resume == NULL)
4017 return 0;
4018
4019 if (thread_still_has_status_pending_p (thread))
4020 * (int *) flag_p = 1;
4021
4022 return 0;
4023 }
4024
4025 /* Return 1 if this lwp that GDB wants running is stopped at an
4026 internal breakpoint that we need to step over. It assumes that any
4027 required STOP_PC adjustment has already been propagated to the
4028 inferior's regcache. */
4029
4030 static int
4031 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4032 {
4033 struct thread_info *thread = (struct thread_info *) entry;
4034 struct lwp_info *lwp = get_thread_lwp (thread);
4035 struct thread_info *saved_thread;
4036 CORE_ADDR pc;
4037 struct process_info *proc = get_thread_process (thread);
4038
4039 /* GDBserver is skipping the extra traps from the wrapper program,
4040 don't have to do step over. */
4041 if (proc->tdesc == NULL)
4042 return 0;
4043
4044 /* LWPs which will not be resumed are not interesting, because we
4045 might not wait for them next time through linux_wait. */
4046
4047 if (!lwp->stopped)
4048 {
4049 if (debug_threads)
4050 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4051 lwpid_of (thread));
4052 return 0;
4053 }
4054
4055 if (thread->last_resume_kind == resume_stop)
4056 {
4057 if (debug_threads)
4058 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4059 " stopped\n",
4060 lwpid_of (thread));
4061 return 0;
4062 }
4063
4064 gdb_assert (lwp->suspended >= 0);
4065
4066 if (lwp->suspended)
4067 {
4068 if (debug_threads)
4069 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4070 lwpid_of (thread));
4071 return 0;
4072 }
4073
4074 if (!lwp->need_step_over)
4075 {
4076 if (debug_threads)
4077 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4078 }
4079
4080 if (lwp->status_pending_p)
4081 {
4082 if (debug_threads)
4083 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4084 " status.\n",
4085 lwpid_of (thread));
4086 return 0;
4087 }
4088
4089 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4090 or we have. */
4091 pc = get_pc (lwp);
4092
4093 /* If the PC has changed since we stopped, then don't do anything,
4094 and let the breakpoint/tracepoint be hit. This happens if, for
4095 instance, GDB handled the decr_pc_after_break subtraction itself,
4096 GDB is OOL stepping this thread, or the user has issued a "jump"
4097 command, or poked thread's registers herself. */
4098 if (pc != lwp->stop_pc)
4099 {
4100 if (debug_threads)
4101 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4102 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4103 lwpid_of (thread),
4104 paddress (lwp->stop_pc), paddress (pc));
4105
4106 lwp->need_step_over = 0;
4107 return 0;
4108 }
4109
4110 saved_thread = current_thread;
4111 current_thread = thread;
4112
4113 /* We can only step over breakpoints we know about. */
4114 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4115 {
4116 /* Don't step over a breakpoint that GDB expects to hit
4117 though. If the condition is being evaluated on the target's side
4118 and it evaluate to false, step over this breakpoint as well. */
4119 if (gdb_breakpoint_here (pc)
4120 && gdb_condition_true_at_breakpoint (pc)
4121 && gdb_no_commands_at_breakpoint (pc))
4122 {
4123 if (debug_threads)
4124 debug_printf ("Need step over [LWP %ld]? yes, but found"
4125 " GDB breakpoint at 0x%s; skipping step over\n",
4126 lwpid_of (thread), paddress (pc));
4127
4128 current_thread = saved_thread;
4129 return 0;
4130 }
4131 else
4132 {
4133 if (debug_threads)
4134 debug_printf ("Need step over [LWP %ld]? yes, "
4135 "found breakpoint at 0x%s\n",
4136 lwpid_of (thread), paddress (pc));
4137
4138 /* We've found an lwp that needs stepping over --- return 1 so
4139 that find_inferior stops looking. */
4140 current_thread = saved_thread;
4141
4142 /* If the step over is cancelled, this is set again. */
4143 lwp->need_step_over = 0;
4144 return 1;
4145 }
4146 }
4147
4148 current_thread = saved_thread;
4149
4150 if (debug_threads)
4151 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4152 " at 0x%s\n",
4153 lwpid_of (thread), paddress (pc));
4154
4155 return 0;
4156 }
4157
4158 /* Start a step-over operation on LWP. When LWP stopped at a
4159 breakpoint, to make progress, we need to remove the breakpoint out
4160 of the way. If we let other threads run while we do that, they may
4161 pass by the breakpoint location and miss hitting it. To avoid
4162 that, a step-over momentarily stops all threads while LWP is
4163 single-stepped while the breakpoint is temporarily uninserted from
4164 the inferior. When the single-step finishes, we reinsert the
4165 breakpoint, and let all threads that are supposed to be running,
4166 run again.
4167
4168 On targets that don't support hardware single-step, we don't
4169 currently support full software single-stepping. Instead, we only
4170 support stepping over the thread event breakpoint, by asking the
4171 low target where to place a reinsert breakpoint. Since this
4172 routine assumes the breakpoint being stepped over is a thread event
4173 breakpoint, it usually assumes the return address of the current
4174 function is a good enough place to set the reinsert breakpoint. */
4175
4176 static int
4177 start_step_over (struct lwp_info *lwp)
4178 {
4179 struct thread_info *thread = get_lwp_thread (lwp);
4180 struct thread_info *saved_thread;
4181 CORE_ADDR pc;
4182 int step;
4183
4184 if (debug_threads)
4185 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4186 lwpid_of (thread));
4187
4188 stop_all_lwps (1, lwp);
4189 gdb_assert (lwp->suspended == 0);
4190
4191 if (debug_threads)
4192 debug_printf ("Done stopping all threads for step-over.\n");
4193
4194 /* Note, we should always reach here with an already adjusted PC,
4195 either by GDB (if we're resuming due to GDB's request), or by our
4196 caller, if we just finished handling an internal breakpoint GDB
4197 shouldn't care about. */
4198 pc = get_pc (lwp);
4199
4200 saved_thread = current_thread;
4201 current_thread = thread;
4202
4203 lwp->bp_reinsert = pc;
4204 uninsert_breakpoints_at (pc);
4205 uninsert_fast_tracepoint_jumps_at (pc);
4206
4207 if (can_hardware_single_step ())
4208 {
4209 step = 1;
4210 }
4211 else
4212 {
4213 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4214 set_reinsert_breakpoint (raddr);
4215 step = 0;
4216 }
4217
4218 current_thread = saved_thread;
4219
4220 linux_resume_one_lwp (lwp, step, 0, NULL);
4221
4222 /* Require next event from this LWP. */
4223 step_over_bkpt = thread->entry.id;
4224 return 1;
4225 }
4226
4227 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4228 start_step_over, if still there, and delete any reinsert
4229 breakpoints we've set, on non hardware single-step targets. */
4230
4231 static int
4232 finish_step_over (struct lwp_info *lwp)
4233 {
4234 if (lwp->bp_reinsert != 0)
4235 {
4236 if (debug_threads)
4237 debug_printf ("Finished step over.\n");
4238
4239 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4240 may be no breakpoint to reinsert there by now. */
4241 reinsert_breakpoints_at (lwp->bp_reinsert);
4242 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4243
4244 lwp->bp_reinsert = 0;
4245
4246 /* Delete any software-single-step reinsert breakpoints. No
4247 longer needed. We don't have to worry about other threads
4248 hitting this trap, and later not being able to explain it,
4249 because we were stepping over a breakpoint, and we hold all
4250 threads but LWP stopped while doing that. */
4251 if (!can_hardware_single_step ())
4252 delete_reinsert_breakpoints ();
4253
4254 step_over_bkpt = null_ptid;
4255 return 1;
4256 }
4257 else
4258 return 0;
4259 }
4260
4261 /* This function is called once per thread. We check the thread's resume
4262 request, which will tell us whether to resume, step, or leave the thread
4263 stopped; and what signal, if any, it should be sent.
4264
4265 For threads which we aren't explicitly told otherwise, we preserve
4266 the stepping flag; this is used for stepping over gdbserver-placed
4267 breakpoints.
4268
4269 If pending_flags was set in any thread, we queue any needed
4270 signals, since we won't actually resume. We already have a pending
4271 event to report, so we don't need to preserve any step requests;
4272 they should be re-issued if necessary. */
4273
4274 static int
4275 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4276 {
4277 struct thread_info *thread = (struct thread_info *) entry;
4278 struct lwp_info *lwp = get_thread_lwp (thread);
4279 int step;
4280 int leave_all_stopped = * (int *) arg;
4281 int leave_pending;
4282
4283 if (lwp->resume == NULL)
4284 return 0;
4285
4286 if (lwp->resume->kind == resume_stop)
4287 {
4288 if (debug_threads)
4289 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4290
4291 if (!lwp->stopped)
4292 {
4293 if (debug_threads)
4294 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4295
4296 /* Stop the thread, and wait for the event asynchronously,
4297 through the event loop. */
4298 send_sigstop (lwp);
4299 }
4300 else
4301 {
4302 if (debug_threads)
4303 debug_printf ("already stopped LWP %ld\n",
4304 lwpid_of (thread));
4305
4306 /* The LWP may have been stopped in an internal event that
4307 was not meant to be notified back to GDB (e.g., gdbserver
4308 breakpoint), so we should be reporting a stop event in
4309 this case too. */
4310
4311 /* If the thread already has a pending SIGSTOP, this is a
4312 no-op. Otherwise, something later will presumably resume
4313 the thread and this will cause it to cancel any pending
4314 operation, due to last_resume_kind == resume_stop. If
4315 the thread already has a pending status to report, we
4316 will still report it the next time we wait - see
4317 status_pending_p_callback. */
4318
4319 /* If we already have a pending signal to report, then
4320 there's no need to queue a SIGSTOP, as this means we're
4321 midway through moving the LWP out of the jumppad, and we
4322 will report the pending signal as soon as that is
4323 finished. */
4324 if (lwp->pending_signals_to_report == NULL)
4325 send_sigstop (lwp);
4326 }
4327
4328 /* For stop requests, we're done. */
4329 lwp->resume = NULL;
4330 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4331 return 0;
4332 }
4333
4334 /* If this thread which is about to be resumed has a pending status,
4335 then don't resume any threads - we can just report the pending
4336 status. Make sure to queue any signals that would otherwise be
4337 sent. In all-stop mode, we do this decision based on if *any*
4338 thread has a pending status. If there's a thread that needs the
4339 step-over-breakpoint dance, then don't resume any other thread
4340 but that particular one. */
4341 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4342
4343 if (!leave_pending)
4344 {
4345 if (debug_threads)
4346 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4347
4348 step = (lwp->resume->kind == resume_step);
4349 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4350 }
4351 else
4352 {
4353 if (debug_threads)
4354 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4355
4356 /* If we have a new signal, enqueue the signal. */
4357 if (lwp->resume->sig != 0)
4358 {
4359 struct pending_signals *p_sig;
4360 p_sig = xmalloc (sizeof (*p_sig));
4361 p_sig->prev = lwp->pending_signals;
4362 p_sig->signal = lwp->resume->sig;
4363 memset (&p_sig->info, 0, sizeof (siginfo_t));
4364
4365 /* If this is the same signal we were previously stopped by,
4366 make sure to queue its siginfo. We can ignore the return
4367 value of ptrace; if it fails, we'll skip
4368 PTRACE_SETSIGINFO. */
4369 if (WIFSTOPPED (lwp->last_status)
4370 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4371 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4372 &p_sig->info);
4373
4374 lwp->pending_signals = p_sig;
4375 }
4376 }
4377
4378 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4379 lwp->resume = NULL;
4380 return 0;
4381 }
4382
4383 static void
4384 linux_resume (struct thread_resume *resume_info, size_t n)
4385 {
4386 struct thread_resume_array array = { resume_info, n };
4387 struct thread_info *need_step_over = NULL;
4388 int any_pending;
4389 int leave_all_stopped;
4390
4391 if (debug_threads)
4392 {
4393 debug_enter ();
4394 debug_printf ("linux_resume:\n");
4395 }
4396
4397 find_inferior (&all_threads, linux_set_resume_request, &array);
4398
4399 /* If there is a thread which would otherwise be resumed, which has
4400 a pending status, then don't resume any threads - we can just
4401 report the pending status. Make sure to queue any signals that
4402 would otherwise be sent. In non-stop mode, we'll apply this
4403 logic to each thread individually. We consume all pending events
4404 before considering to start a step-over (in all-stop). */
4405 any_pending = 0;
4406 if (!non_stop)
4407 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4408
4409 /* If there is a thread which would otherwise be resumed, which is
4410 stopped at a breakpoint that needs stepping over, then don't
4411 resume any threads - have it step over the breakpoint with all
4412 other threads stopped, then resume all threads again. Make sure
4413 to queue any signals that would otherwise be delivered or
4414 queued. */
4415 if (!any_pending && supports_breakpoints ())
4416 need_step_over
4417 = (struct thread_info *) find_inferior (&all_threads,
4418 need_step_over_p, NULL);
4419
4420 leave_all_stopped = (need_step_over != NULL || any_pending);
4421
4422 if (debug_threads)
4423 {
4424 if (need_step_over != NULL)
4425 debug_printf ("Not resuming all, need step over\n");
4426 else if (any_pending)
4427 debug_printf ("Not resuming, all-stop and found "
4428 "an LWP with pending status\n");
4429 else
4430 debug_printf ("Resuming, no pending status or step over needed\n");
4431 }
4432
4433 /* Even if we're leaving threads stopped, queue all signals we'd
4434 otherwise deliver. */
4435 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4436
4437 if (need_step_over)
4438 start_step_over (get_thread_lwp (need_step_over));
4439
4440 if (debug_threads)
4441 {
4442 debug_printf ("linux_resume done\n");
4443 debug_exit ();
4444 }
4445 }
4446
4447 /* This function is called once per thread. We check the thread's
4448 last resume request, which will tell us whether to resume, step, or
4449 leave the thread stopped. Any signal the client requested to be
4450 delivered has already been enqueued at this point.
4451
4452 If any thread that GDB wants running is stopped at an internal
4453 breakpoint that needs stepping over, we start a step-over operation
4454 on that particular thread, and leave all others stopped. */
4455
4456 static int
4457 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4458 {
4459 struct thread_info *thread = (struct thread_info *) entry;
4460 struct lwp_info *lwp = get_thread_lwp (thread);
4461 int step;
4462
4463 if (lwp == except)
4464 return 0;
4465
4466 if (debug_threads)
4467 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4468
4469 if (!lwp->stopped)
4470 {
4471 if (debug_threads)
4472 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4473 return 0;
4474 }
4475
4476 if (thread->last_resume_kind == resume_stop
4477 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4478 {
4479 if (debug_threads)
4480 debug_printf (" client wants LWP to remain %ld stopped\n",
4481 lwpid_of (thread));
4482 return 0;
4483 }
4484
4485 if (lwp->status_pending_p)
4486 {
4487 if (debug_threads)
4488 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4489 lwpid_of (thread));
4490 return 0;
4491 }
4492
4493 gdb_assert (lwp->suspended >= 0);
4494
4495 if (lwp->suspended)
4496 {
4497 if (debug_threads)
4498 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4499 return 0;
4500 }
4501
4502 if (thread->last_resume_kind == resume_stop
4503 && lwp->pending_signals_to_report == NULL
4504 && lwp->collecting_fast_tracepoint == 0)
4505 {
4506 /* We haven't reported this LWP as stopped yet (otherwise, the
4507 last_status.kind check above would catch it, and we wouldn't
4508 reach here. This LWP may have been momentarily paused by a
4509 stop_all_lwps call while handling for example, another LWP's
4510 step-over. In that case, the pending expected SIGSTOP signal
4511 that was queued at vCont;t handling time will have already
4512 been consumed by wait_for_sigstop, and so we need to requeue
4513 another one here. Note that if the LWP already has a SIGSTOP
4514 pending, this is a no-op. */
4515
4516 if (debug_threads)
4517 debug_printf ("Client wants LWP %ld to stop. "
4518 "Making sure it has a SIGSTOP pending\n",
4519 lwpid_of (thread));
4520
4521 send_sigstop (lwp);
4522 }
4523
4524 step = thread->last_resume_kind == resume_step;
4525 linux_resume_one_lwp (lwp, step, 0, NULL);
4526 return 0;
4527 }
4528
4529 static int
4530 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4531 {
4532 struct thread_info *thread = (struct thread_info *) entry;
4533 struct lwp_info *lwp = get_thread_lwp (thread);
4534
4535 if (lwp == except)
4536 return 0;
4537
4538 lwp->suspended--;
4539 gdb_assert (lwp->suspended >= 0);
4540
4541 return proceed_one_lwp (entry, except);
4542 }
4543
4544 /* When we finish a step-over, set threads running again. If there's
4545 another thread that may need a step-over, now's the time to start
4546 it. Eventually, we'll move all threads past their breakpoints. */
4547
4548 static void
4549 proceed_all_lwps (void)
4550 {
4551 struct thread_info *need_step_over;
4552
4553 /* If there is a thread which would otherwise be resumed, which is
4554 stopped at a breakpoint that needs stepping over, then don't
4555 resume any threads - have it step over the breakpoint with all
4556 other threads stopped, then resume all threads again. */
4557
4558 if (supports_breakpoints ())
4559 {
4560 need_step_over
4561 = (struct thread_info *) find_inferior (&all_threads,
4562 need_step_over_p, NULL);
4563
4564 if (need_step_over != NULL)
4565 {
4566 if (debug_threads)
4567 debug_printf ("proceed_all_lwps: found "
4568 "thread %ld needing a step-over\n",
4569 lwpid_of (need_step_over));
4570
4571 start_step_over (get_thread_lwp (need_step_over));
4572 return;
4573 }
4574 }
4575
4576 if (debug_threads)
4577 debug_printf ("Proceeding, no step-over needed\n");
4578
4579 find_inferior (&all_threads, proceed_one_lwp, NULL);
4580 }
4581
4582 /* Stopped LWPs that the client wanted to be running, that don't have
4583 pending statuses, are set to run again, except for EXCEPT, if not
4584 NULL. This undoes a stop_all_lwps call. */
4585
4586 static void
4587 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4588 {
4589 if (debug_threads)
4590 {
4591 debug_enter ();
4592 if (except)
4593 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4594 lwpid_of (get_lwp_thread (except)));
4595 else
4596 debug_printf ("unstopping all lwps\n");
4597 }
4598
4599 if (unsuspend)
4600 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4601 else
4602 find_inferior (&all_threads, proceed_one_lwp, except);
4603
4604 if (debug_threads)
4605 {
4606 debug_printf ("unstop_all_lwps done\n");
4607 debug_exit ();
4608 }
4609 }
4610
4611
4612 #ifdef HAVE_LINUX_REGSETS
4613
4614 #define use_linux_regsets 1
4615
4616 /* Returns true if REGSET has been disabled. */
4617
4618 static int
4619 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4620 {
4621 return (info->disabled_regsets != NULL
4622 && info->disabled_regsets[regset - info->regsets]);
4623 }
4624
4625 /* Disable REGSET. */
4626
4627 static void
4628 disable_regset (struct regsets_info *info, struct regset_info *regset)
4629 {
4630 int dr_offset;
4631
4632 dr_offset = regset - info->regsets;
4633 if (info->disabled_regsets == NULL)
4634 info->disabled_regsets = xcalloc (1, info->num_regsets);
4635 info->disabled_regsets[dr_offset] = 1;
4636 }
4637
4638 static int
4639 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4640 struct regcache *regcache)
4641 {
4642 struct regset_info *regset;
4643 int saw_general_regs = 0;
4644 int pid;
4645 struct iovec iov;
4646
4647 pid = lwpid_of (current_thread);
4648 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4649 {
4650 void *buf, *data;
4651 int nt_type, res;
4652
4653 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4654 continue;
4655
4656 buf = xmalloc (regset->size);
4657
4658 nt_type = regset->nt_type;
4659 if (nt_type)
4660 {
4661 iov.iov_base = buf;
4662 iov.iov_len = regset->size;
4663 data = (void *) &iov;
4664 }
4665 else
4666 data = buf;
4667
4668 #ifndef __sparc__
4669 res = ptrace (regset->get_request, pid,
4670 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4671 #else
4672 res = ptrace (regset->get_request, pid, data, nt_type);
4673 #endif
4674 if (res < 0)
4675 {
4676 if (errno == EIO)
4677 {
4678 /* If we get EIO on a regset, do not try it again for
4679 this process mode. */
4680 disable_regset (regsets_info, regset);
4681 }
4682 else if (errno == ENODATA)
4683 {
4684 /* ENODATA may be returned if the regset is currently
4685 not "active". This can happen in normal operation,
4686 so suppress the warning in this case. */
4687 }
4688 else
4689 {
4690 char s[256];
4691 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4692 pid);
4693 perror (s);
4694 }
4695 }
4696 else
4697 {
4698 if (regset->type == GENERAL_REGS)
4699 saw_general_regs = 1;
4700 regset->store_function (regcache, buf);
4701 }
4702 free (buf);
4703 }
4704 if (saw_general_regs)
4705 return 0;
4706 else
4707 return 1;
4708 }
4709
4710 static int
4711 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4712 struct regcache *regcache)
4713 {
4714 struct regset_info *regset;
4715 int saw_general_regs = 0;
4716 int pid;
4717 struct iovec iov;
4718
4719 pid = lwpid_of (current_thread);
4720 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4721 {
4722 void *buf, *data;
4723 int nt_type, res;
4724
4725 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4726 || regset->fill_function == NULL)
4727 continue;
4728
4729 buf = xmalloc (regset->size);
4730
4731 /* First fill the buffer with the current register set contents,
4732 in case there are any items in the kernel's regset that are
4733 not in gdbserver's regcache. */
4734
4735 nt_type = regset->nt_type;
4736 if (nt_type)
4737 {
4738 iov.iov_base = buf;
4739 iov.iov_len = regset->size;
4740 data = (void *) &iov;
4741 }
4742 else
4743 data = buf;
4744
4745 #ifndef __sparc__
4746 res = ptrace (regset->get_request, pid,
4747 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4748 #else
4749 res = ptrace (regset->get_request, pid, data, nt_type);
4750 #endif
4751
4752 if (res == 0)
4753 {
4754 /* Then overlay our cached registers on that. */
4755 regset->fill_function (regcache, buf);
4756
4757 /* Only now do we write the register set. */
4758 #ifndef __sparc__
4759 res = ptrace (regset->set_request, pid,
4760 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4761 #else
4762 res = ptrace (regset->set_request, pid, data, nt_type);
4763 #endif
4764 }
4765
4766 if (res < 0)
4767 {
4768 if (errno == EIO)
4769 {
4770 /* If we get EIO on a regset, do not try it again for
4771 this process mode. */
4772 disable_regset (regsets_info, regset);
4773 }
4774 else if (errno == ESRCH)
4775 {
4776 /* At this point, ESRCH should mean the process is
4777 already gone, in which case we simply ignore attempts
4778 to change its registers. See also the related
4779 comment in linux_resume_one_lwp. */
4780 free (buf);
4781 return 0;
4782 }
4783 else
4784 {
4785 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4786 }
4787 }
4788 else if (regset->type == GENERAL_REGS)
4789 saw_general_regs = 1;
4790 free (buf);
4791 }
4792 if (saw_general_regs)
4793 return 0;
4794 else
4795 return 1;
4796 }
4797
4798 #else /* !HAVE_LINUX_REGSETS */
4799
4800 #define use_linux_regsets 0
4801 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4802 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4803
4804 #endif
4805
4806 /* Return 1 if register REGNO is supported by one of the regset ptrace
4807 calls or 0 if it has to be transferred individually. */
4808
4809 static int
4810 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4811 {
4812 unsigned char mask = 1 << (regno % 8);
4813 size_t index = regno / 8;
4814
4815 return (use_linux_regsets
4816 && (regs_info->regset_bitmap == NULL
4817 || (regs_info->regset_bitmap[index] & mask) != 0));
4818 }
4819
4820 #ifdef HAVE_LINUX_USRREGS
4821
4822 int
4823 register_addr (const struct usrregs_info *usrregs, int regnum)
4824 {
4825 int addr;
4826
4827 if (regnum < 0 || regnum >= usrregs->num_regs)
4828 error ("Invalid register number %d.", regnum);
4829
4830 addr = usrregs->regmap[regnum];
4831
4832 return addr;
4833 }
4834
4835 /* Fetch one register. */
4836 static void
4837 fetch_register (const struct usrregs_info *usrregs,
4838 struct regcache *regcache, int regno)
4839 {
4840 CORE_ADDR regaddr;
4841 int i, size;
4842 char *buf;
4843 int pid;
4844
4845 if (regno >= usrregs->num_regs)
4846 return;
4847 if ((*the_low_target.cannot_fetch_register) (regno))
4848 return;
4849
4850 regaddr = register_addr (usrregs, regno);
4851 if (regaddr == -1)
4852 return;
4853
4854 size = ((register_size (regcache->tdesc, regno)
4855 + sizeof (PTRACE_XFER_TYPE) - 1)
4856 & -sizeof (PTRACE_XFER_TYPE));
4857 buf = alloca (size);
4858
4859 pid = lwpid_of (current_thread);
4860 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4861 {
4862 errno = 0;
4863 *(PTRACE_XFER_TYPE *) (buf + i) =
4864 ptrace (PTRACE_PEEKUSER, pid,
4865 /* Coerce to a uintptr_t first to avoid potential gcc warning
4866 of coercing an 8 byte integer to a 4 byte pointer. */
4867 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4868 regaddr += sizeof (PTRACE_XFER_TYPE);
4869 if (errno != 0)
4870 error ("reading register %d: %s", regno, strerror (errno));
4871 }
4872
4873 if (the_low_target.supply_ptrace_register)
4874 the_low_target.supply_ptrace_register (regcache, regno, buf);
4875 else
4876 supply_register (regcache, regno, buf);
4877 }
4878
4879 /* Store one register. */
4880 static void
4881 store_register (const struct usrregs_info *usrregs,
4882 struct regcache *regcache, int regno)
4883 {
4884 CORE_ADDR regaddr;
4885 int i, size;
4886 char *buf;
4887 int pid;
4888
4889 if (regno >= usrregs->num_regs)
4890 return;
4891 if ((*the_low_target.cannot_store_register) (regno))
4892 return;
4893
4894 regaddr = register_addr (usrregs, regno);
4895 if (regaddr == -1)
4896 return;
4897
4898 size = ((register_size (regcache->tdesc, regno)
4899 + sizeof (PTRACE_XFER_TYPE) - 1)
4900 & -sizeof (PTRACE_XFER_TYPE));
4901 buf = alloca (size);
4902 memset (buf, 0, size);
4903
4904 if (the_low_target.collect_ptrace_register)
4905 the_low_target.collect_ptrace_register (regcache, regno, buf);
4906 else
4907 collect_register (regcache, regno, buf);
4908
4909 pid = lwpid_of (current_thread);
4910 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4911 {
4912 errno = 0;
4913 ptrace (PTRACE_POKEUSER, pid,
4914 /* Coerce to a uintptr_t first to avoid potential gcc warning
4915 about coercing an 8 byte integer to a 4 byte pointer. */
4916 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4917 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4918 if (errno != 0)
4919 {
4920 /* At this point, ESRCH should mean the process is
4921 already gone, in which case we simply ignore attempts
4922 to change its registers. See also the related
4923 comment in linux_resume_one_lwp. */
4924 if (errno == ESRCH)
4925 return;
4926
4927 if ((*the_low_target.cannot_store_register) (regno) == 0)
4928 error ("writing register %d: %s", regno, strerror (errno));
4929 }
4930 regaddr += sizeof (PTRACE_XFER_TYPE);
4931 }
4932 }
4933
4934 /* Fetch all registers, or just one, from the child process.
4935 If REGNO is -1, do this for all registers, skipping any that are
4936 assumed to have been retrieved by regsets_fetch_inferior_registers,
4937 unless ALL is non-zero.
4938 Otherwise, REGNO specifies which register (so we can save time). */
4939 static void
4940 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4941 struct regcache *regcache, int regno, int all)
4942 {
4943 struct usrregs_info *usr = regs_info->usrregs;
4944
4945 if (regno == -1)
4946 {
4947 for (regno = 0; regno < usr->num_regs; regno++)
4948 if (all || !linux_register_in_regsets (regs_info, regno))
4949 fetch_register (usr, regcache, regno);
4950 }
4951 else
4952 fetch_register (usr, regcache, regno);
4953 }
4954
4955 /* Store our register values back into the inferior.
4956 If REGNO is -1, do this for all registers, skipping any that are
4957 assumed to have been saved by regsets_store_inferior_registers,
4958 unless ALL is non-zero.
4959 Otherwise, REGNO specifies which register (so we can save time). */
4960 static void
4961 usr_store_inferior_registers (const struct regs_info *regs_info,
4962 struct regcache *regcache, int regno, int all)
4963 {
4964 struct usrregs_info *usr = regs_info->usrregs;
4965
4966 if (regno == -1)
4967 {
4968 for (regno = 0; regno < usr->num_regs; regno++)
4969 if (all || !linux_register_in_regsets (regs_info, regno))
4970 store_register (usr, regcache, regno);
4971 }
4972 else
4973 store_register (usr, regcache, regno);
4974 }
4975
4976 #else /* !HAVE_LINUX_USRREGS */
4977
4978 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4979 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4980
4981 #endif
4982
4983
4984 void
4985 linux_fetch_registers (struct regcache *regcache, int regno)
4986 {
4987 int use_regsets;
4988 int all = 0;
4989 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4990
4991 if (regno == -1)
4992 {
4993 if (the_low_target.fetch_register != NULL
4994 && regs_info->usrregs != NULL)
4995 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4996 (*the_low_target.fetch_register) (regcache, regno);
4997
4998 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4999 if (regs_info->usrregs != NULL)
5000 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5001 }
5002 else
5003 {
5004 if (the_low_target.fetch_register != NULL
5005 && (*the_low_target.fetch_register) (regcache, regno))
5006 return;
5007
5008 use_regsets = linux_register_in_regsets (regs_info, regno);
5009 if (use_regsets)
5010 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5011 regcache);
5012 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5013 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5014 }
5015 }
5016
5017 void
5018 linux_store_registers (struct regcache *regcache, int regno)
5019 {
5020 int use_regsets;
5021 int all = 0;
5022 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5023
5024 if (regno == -1)
5025 {
5026 all = regsets_store_inferior_registers (regs_info->regsets_info,
5027 regcache);
5028 if (regs_info->usrregs != NULL)
5029 usr_store_inferior_registers (regs_info, regcache, regno, all);
5030 }
5031 else
5032 {
5033 use_regsets = linux_register_in_regsets (regs_info, regno);
5034 if (use_regsets)
5035 all = regsets_store_inferior_registers (regs_info->regsets_info,
5036 regcache);
5037 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5038 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5039 }
5040 }
5041
5042
5043 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5044 to debugger memory starting at MYADDR. */
5045
5046 static int
5047 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5048 {
5049 int pid = lwpid_of (current_thread);
5050 register PTRACE_XFER_TYPE *buffer;
5051 register CORE_ADDR addr;
5052 register int count;
5053 char filename[64];
5054 register int i;
5055 int ret;
5056 int fd;
5057
5058 /* Try using /proc. Don't bother for one word. */
5059 if (len >= 3 * sizeof (long))
5060 {
5061 int bytes;
5062
5063 /* We could keep this file open and cache it - possibly one per
5064 thread. That requires some juggling, but is even faster. */
5065 sprintf (filename, "/proc/%d/mem", pid);
5066 fd = open (filename, O_RDONLY | O_LARGEFILE);
5067 if (fd == -1)
5068 goto no_proc;
5069
5070 /* If pread64 is available, use it. It's faster if the kernel
5071 supports it (only one syscall), and it's 64-bit safe even on
5072 32-bit platforms (for instance, SPARC debugging a SPARC64
5073 application). */
5074 #ifdef HAVE_PREAD64
5075 bytes = pread64 (fd, myaddr, len, memaddr);
5076 #else
5077 bytes = -1;
5078 if (lseek (fd, memaddr, SEEK_SET) != -1)
5079 bytes = read (fd, myaddr, len);
5080 #endif
5081
5082 close (fd);
5083 if (bytes == len)
5084 return 0;
5085
5086 /* Some data was read, we'll try to get the rest with ptrace. */
5087 if (bytes > 0)
5088 {
5089 memaddr += bytes;
5090 myaddr += bytes;
5091 len -= bytes;
5092 }
5093 }
5094
5095 no_proc:
5096 /* Round starting address down to longword boundary. */
5097 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5098 /* Round ending address up; get number of longwords that makes. */
5099 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5100 / sizeof (PTRACE_XFER_TYPE));
5101 /* Allocate buffer of that many longwords. */
5102 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5103
5104 /* Read all the longwords */
5105 errno = 0;
5106 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5107 {
5108 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5109 about coercing an 8 byte integer to a 4 byte pointer. */
5110 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5111 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5112 (PTRACE_TYPE_ARG4) 0);
5113 if (errno)
5114 break;
5115 }
5116 ret = errno;
5117
5118 /* Copy appropriate bytes out of the buffer. */
5119 if (i > 0)
5120 {
5121 i *= sizeof (PTRACE_XFER_TYPE);
5122 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5123 memcpy (myaddr,
5124 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5125 i < len ? i : len);
5126 }
5127
5128 return ret;
5129 }
5130
5131 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5132 memory at MEMADDR. On failure (cannot write to the inferior)
5133 returns the value of errno. Always succeeds if LEN is zero. */
5134
5135 static int
5136 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5137 {
5138 register int i;
5139 /* Round starting address down to longword boundary. */
5140 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5141 /* Round ending address up; get number of longwords that makes. */
5142 register int count
5143 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5144 / sizeof (PTRACE_XFER_TYPE);
5145
5146 /* Allocate buffer of that many longwords. */
5147 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5148 alloca (count * sizeof (PTRACE_XFER_TYPE));
5149
5150 int pid = lwpid_of (current_thread);
5151
5152 if (len == 0)
5153 {
5154 /* Zero length write always succeeds. */
5155 return 0;
5156 }
5157
5158 if (debug_threads)
5159 {
5160 /* Dump up to four bytes. */
5161 unsigned int val = * (unsigned int *) myaddr;
5162 if (len == 1)
5163 val = val & 0xff;
5164 else if (len == 2)
5165 val = val & 0xffff;
5166 else if (len == 3)
5167 val = val & 0xffffff;
5168 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5169 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
5170 }
5171
5172 /* Fill start and end extra bytes of buffer with existing memory data. */
5173
5174 errno = 0;
5175 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5176 about coercing an 8 byte integer to a 4 byte pointer. */
5177 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5178 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5179 (PTRACE_TYPE_ARG4) 0);
5180 if (errno)
5181 return errno;
5182
5183 if (count > 1)
5184 {
5185 errno = 0;
5186 buffer[count - 1]
5187 = ptrace (PTRACE_PEEKTEXT, pid,
5188 /* Coerce to a uintptr_t first to avoid potential gcc warning
5189 about coercing an 8 byte integer to a 4 byte pointer. */
5190 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5191 * sizeof (PTRACE_XFER_TYPE)),
5192 (PTRACE_TYPE_ARG4) 0);
5193 if (errno)
5194 return errno;
5195 }
5196
5197 /* Copy data to be written over corresponding part of buffer. */
5198
5199 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5200 myaddr, len);
5201
5202 /* Write the entire buffer. */
5203
5204 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5205 {
5206 errno = 0;
5207 ptrace (PTRACE_POKETEXT, pid,
5208 /* Coerce to a uintptr_t first to avoid potential gcc warning
5209 about coercing an 8 byte integer to a 4 byte pointer. */
5210 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5211 (PTRACE_TYPE_ARG4) buffer[i]);
5212 if (errno)
5213 return errno;
5214 }
5215
5216 return 0;
5217 }
5218
5219 static void
5220 linux_look_up_symbols (void)
5221 {
5222 #ifdef USE_THREAD_DB
5223 struct process_info *proc = current_process ();
5224
5225 if (proc->priv->thread_db != NULL)
5226 return;
5227
5228 /* If the kernel supports tracing clones, then we don't need to
5229 use the magic thread event breakpoint to learn about
5230 threads. */
5231 thread_db_init (!linux_supports_traceclone ());
5232 #endif
5233 }
5234
5235 static void
5236 linux_request_interrupt (void)
5237 {
5238 extern unsigned long signal_pid;
5239
5240 /* Send a SIGINT to the process group. This acts just like the user
5241 typed a ^C on the controlling terminal. */
5242 kill (-signal_pid, SIGINT);
5243 }
5244
5245 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5246 to debugger memory starting at MYADDR. */
5247
5248 static int
5249 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5250 {
5251 char filename[PATH_MAX];
5252 int fd, n;
5253 int pid = lwpid_of (current_thread);
5254
5255 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5256
5257 fd = open (filename, O_RDONLY);
5258 if (fd < 0)
5259 return -1;
5260
5261 if (offset != (CORE_ADDR) 0
5262 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5263 n = -1;
5264 else
5265 n = read (fd, myaddr, len);
5266
5267 close (fd);
5268
5269 return n;
5270 }
5271
5272 /* These breakpoint and watchpoint related wrapper functions simply
5273 pass on the function call if the target has registered a
5274 corresponding function. */
5275
5276 static int
5277 linux_supports_z_point_type (char z_type)
5278 {
5279 return (the_low_target.supports_z_point_type != NULL
5280 && the_low_target.supports_z_point_type (z_type));
5281 }
5282
5283 static int
5284 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5285 int size, struct raw_breakpoint *bp)
5286 {
5287 if (type == raw_bkpt_type_sw)
5288 return insert_memory_breakpoint (bp);
5289 else if (the_low_target.insert_point != NULL)
5290 return the_low_target.insert_point (type, addr, size, bp);
5291 else
5292 /* Unsupported (see target.h). */
5293 return 1;
5294 }
5295
5296 static int
5297 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5298 int size, struct raw_breakpoint *bp)
5299 {
5300 if (type == raw_bkpt_type_sw)
5301 return remove_memory_breakpoint (bp);
5302 else if (the_low_target.remove_point != NULL)
5303 return the_low_target.remove_point (type, addr, size, bp);
5304 else
5305 /* Unsupported (see target.h). */
5306 return 1;
5307 }
5308
5309 /* Implement the to_stopped_by_sw_breakpoint target_ops
5310 method. */
5311
5312 static int
5313 linux_stopped_by_sw_breakpoint (void)
5314 {
5315 struct lwp_info *lwp = get_thread_lwp (current_thread);
5316
5317 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5318 }
5319
5320 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5321 method. */
5322
5323 static int
5324 linux_supports_stopped_by_sw_breakpoint (void)
5325 {
5326 return USE_SIGTRAP_SIGINFO;
5327 }
5328
5329 /* Implement the to_stopped_by_hw_breakpoint target_ops
5330 method. */
5331
5332 static int
5333 linux_stopped_by_hw_breakpoint (void)
5334 {
5335 struct lwp_info *lwp = get_thread_lwp (current_thread);
5336
5337 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5338 }
5339
5340 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5341 method. */
5342
5343 static int
5344 linux_supports_stopped_by_hw_breakpoint (void)
5345 {
5346 return USE_SIGTRAP_SIGINFO;
5347 }
5348
5349 /* Implement the supports_conditional_breakpoints target_ops
5350 method. */
5351
5352 static int
5353 linux_supports_conditional_breakpoints (void)
5354 {
5355 /* GDBserver needs to step over the breakpoint if the condition is
5356 false. GDBserver software single step is too simple, so disable
5357 conditional breakpoints if the target doesn't have hardware single
5358 step. */
5359 return can_hardware_single_step ();
5360 }
5361
5362 static int
5363 linux_stopped_by_watchpoint (void)
5364 {
5365 struct lwp_info *lwp = get_thread_lwp (current_thread);
5366
5367 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5368 }
5369
5370 static CORE_ADDR
5371 linux_stopped_data_address (void)
5372 {
5373 struct lwp_info *lwp = get_thread_lwp (current_thread);
5374
5375 return lwp->stopped_data_address;
5376 }
5377
5378 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5379 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5380 && defined(PT_TEXT_END_ADDR)
5381
5382 /* This is only used for targets that define PT_TEXT_ADDR,
5383 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5384 the target has different ways of acquiring this information, like
5385 loadmaps. */
5386
5387 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5388 to tell gdb about. */
5389
5390 static int
5391 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5392 {
5393 unsigned long text, text_end, data;
5394 int pid = lwpid_of (current_thread);
5395
5396 errno = 0;
5397
5398 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5399 (PTRACE_TYPE_ARG4) 0);
5400 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5401 (PTRACE_TYPE_ARG4) 0);
5402 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5403 (PTRACE_TYPE_ARG4) 0);
5404
5405 if (errno == 0)
5406 {
5407 /* Both text and data offsets produced at compile-time (and so
5408 used by gdb) are relative to the beginning of the program,
5409 with the data segment immediately following the text segment.
5410 However, the actual runtime layout in memory may put the data
5411 somewhere else, so when we send gdb a data base-address, we
5412 use the real data base address and subtract the compile-time
5413 data base-address from it (which is just the length of the
5414 text segment). BSS immediately follows data in both
5415 cases. */
5416 *text_p = text;
5417 *data_p = data - (text_end - text);
5418
5419 return 1;
5420 }
5421 return 0;
5422 }
5423 #endif
5424
5425 static int
5426 linux_qxfer_osdata (const char *annex,
5427 unsigned char *readbuf, unsigned const char *writebuf,
5428 CORE_ADDR offset, int len)
5429 {
5430 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5431 }
5432
5433 /* Convert a native/host siginfo object, into/from the siginfo in the
5434 layout of the inferiors' architecture. */
5435
5436 static void
5437 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5438 {
5439 int done = 0;
5440
5441 if (the_low_target.siginfo_fixup != NULL)
5442 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5443
5444 /* If there was no callback, or the callback didn't do anything,
5445 then just do a straight memcpy. */
5446 if (!done)
5447 {
5448 if (direction == 1)
5449 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5450 else
5451 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5452 }
5453 }
5454
5455 static int
5456 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5457 unsigned const char *writebuf, CORE_ADDR offset, int len)
5458 {
5459 int pid;
5460 siginfo_t siginfo;
5461 char inf_siginfo[sizeof (siginfo_t)];
5462
5463 if (current_thread == NULL)
5464 return -1;
5465
5466 pid = lwpid_of (current_thread);
5467
5468 if (debug_threads)
5469 debug_printf ("%s siginfo for lwp %d.\n",
5470 readbuf != NULL ? "Reading" : "Writing",
5471 pid);
5472
5473 if (offset >= sizeof (siginfo))
5474 return -1;
5475
5476 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5477 return -1;
5478
5479 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5480 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5481 inferior with a 64-bit GDBSERVER should look the same as debugging it
5482 with a 32-bit GDBSERVER, we need to convert it. */
5483 siginfo_fixup (&siginfo, inf_siginfo, 0);
5484
5485 if (offset + len > sizeof (siginfo))
5486 len = sizeof (siginfo) - offset;
5487
5488 if (readbuf != NULL)
5489 memcpy (readbuf, inf_siginfo + offset, len);
5490 else
5491 {
5492 memcpy (inf_siginfo + offset, writebuf, len);
5493
5494 /* Convert back to ptrace layout before flushing it out. */
5495 siginfo_fixup (&siginfo, inf_siginfo, 1);
5496
5497 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5498 return -1;
5499 }
5500
5501 return len;
5502 }
5503
5504 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5505 so we notice when children change state; as the handler for the
5506 sigsuspend in my_waitpid. */
5507
5508 static void
5509 sigchld_handler (int signo)
5510 {
5511 int old_errno = errno;
5512
5513 if (debug_threads)
5514 {
5515 do
5516 {
5517 /* fprintf is not async-signal-safe, so call write
5518 directly. */
5519 if (write (2, "sigchld_handler\n",
5520 sizeof ("sigchld_handler\n") - 1) < 0)
5521 break; /* just ignore */
5522 } while (0);
5523 }
5524
5525 if (target_is_async_p ())
5526 async_file_mark (); /* trigger a linux_wait */
5527
5528 errno = old_errno;
5529 }
5530
5531 static int
5532 linux_supports_non_stop (void)
5533 {
5534 return 1;
5535 }
5536
5537 static int
5538 linux_async (int enable)
5539 {
5540 int previous = target_is_async_p ();
5541
5542 if (debug_threads)
5543 debug_printf ("linux_async (%d), previous=%d\n",
5544 enable, previous);
5545
5546 if (previous != enable)
5547 {
5548 sigset_t mask;
5549 sigemptyset (&mask);
5550 sigaddset (&mask, SIGCHLD);
5551
5552 sigprocmask (SIG_BLOCK, &mask, NULL);
5553
5554 if (enable)
5555 {
5556 if (pipe (linux_event_pipe) == -1)
5557 {
5558 linux_event_pipe[0] = -1;
5559 linux_event_pipe[1] = -1;
5560 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5561
5562 warning ("creating event pipe failed.");
5563 return previous;
5564 }
5565
5566 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5567 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5568
5569 /* Register the event loop handler. */
5570 add_file_handler (linux_event_pipe[0],
5571 handle_target_event, NULL);
5572
5573 /* Always trigger a linux_wait. */
5574 async_file_mark ();
5575 }
5576 else
5577 {
5578 delete_file_handler (linux_event_pipe[0]);
5579
5580 close (linux_event_pipe[0]);
5581 close (linux_event_pipe[1]);
5582 linux_event_pipe[0] = -1;
5583 linux_event_pipe[1] = -1;
5584 }
5585
5586 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5587 }
5588
5589 return previous;
5590 }
5591
5592 static int
5593 linux_start_non_stop (int nonstop)
5594 {
5595 /* Register or unregister from event-loop accordingly. */
5596 linux_async (nonstop);
5597
5598 if (target_is_async_p () != (nonstop != 0))
5599 return -1;
5600
5601 return 0;
5602 }
5603
5604 static int
5605 linux_supports_multi_process (void)
5606 {
5607 return 1;
5608 }
5609
5610 /* Check if fork events are supported. */
5611
5612 static int
5613 linux_supports_fork_events (void)
5614 {
5615 return linux_supports_tracefork ();
5616 }
5617
5618 /* Check if vfork events are supported. */
5619
5620 static int
5621 linux_supports_vfork_events (void)
5622 {
5623 return linux_supports_tracefork ();
5624 }
5625
5626 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5627 options for the specified lwp. */
5628
5629 static int
5630 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5631 void *args)
5632 {
5633 struct thread_info *thread = (struct thread_info *) entry;
5634 struct lwp_info *lwp = get_thread_lwp (thread);
5635
5636 if (!lwp->stopped)
5637 {
5638 /* Stop the lwp so we can modify its ptrace options. */
5639 lwp->must_set_ptrace_flags = 1;
5640 linux_stop_lwp (lwp);
5641 }
5642 else
5643 {
5644 /* Already stopped; go ahead and set the ptrace options. */
5645 struct process_info *proc = find_process_pid (pid_of (thread));
5646 int options = linux_low_ptrace_options (proc->attached);
5647
5648 linux_enable_event_reporting (lwpid_of (thread), options);
5649 lwp->must_set_ptrace_flags = 0;
5650 }
5651
5652 return 0;
5653 }
5654
5655 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5656 ptrace flags for all inferiors. This is in case the new GDB connection
5657 doesn't support the same set of events that the previous one did. */
5658
5659 static void
5660 linux_handle_new_gdb_connection (void)
5661 {
5662 pid_t pid;
5663
5664 /* Request that all the lwps reset their ptrace options. */
5665 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5666 }
5667
5668 static int
5669 linux_supports_disable_randomization (void)
5670 {
5671 #ifdef HAVE_PERSONALITY
5672 return 1;
5673 #else
5674 return 0;
5675 #endif
5676 }
5677
5678 static int
5679 linux_supports_agent (void)
5680 {
5681 return 1;
5682 }
5683
5684 static int
5685 linux_supports_range_stepping (void)
5686 {
5687 if (*the_low_target.supports_range_stepping == NULL)
5688 return 0;
5689
5690 return (*the_low_target.supports_range_stepping) ();
5691 }
5692
5693 /* Enumerate spufs IDs for process PID. */
5694 static int
5695 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5696 {
5697 int pos = 0;
5698 int written = 0;
5699 char path[128];
5700 DIR *dir;
5701 struct dirent *entry;
5702
5703 sprintf (path, "/proc/%ld/fd", pid);
5704 dir = opendir (path);
5705 if (!dir)
5706 return -1;
5707
5708 rewinddir (dir);
5709 while ((entry = readdir (dir)) != NULL)
5710 {
5711 struct stat st;
5712 struct statfs stfs;
5713 int fd;
5714
5715 fd = atoi (entry->d_name);
5716 if (!fd)
5717 continue;
5718
5719 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5720 if (stat (path, &st) != 0)
5721 continue;
5722 if (!S_ISDIR (st.st_mode))
5723 continue;
5724
5725 if (statfs (path, &stfs) != 0)
5726 continue;
5727 if (stfs.f_type != SPUFS_MAGIC)
5728 continue;
5729
5730 if (pos >= offset && pos + 4 <= offset + len)
5731 {
5732 *(unsigned int *)(buf + pos - offset) = fd;
5733 written += 4;
5734 }
5735 pos += 4;
5736 }
5737
5738 closedir (dir);
5739 return written;
5740 }
5741
5742 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5743 object type, using the /proc file system. */
5744 static int
5745 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5746 unsigned const char *writebuf,
5747 CORE_ADDR offset, int len)
5748 {
5749 long pid = lwpid_of (current_thread);
5750 char buf[128];
5751 int fd = 0;
5752 int ret = 0;
5753
5754 if (!writebuf && !readbuf)
5755 return -1;
5756
5757 if (!*annex)
5758 {
5759 if (!readbuf)
5760 return -1;
5761 else
5762 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5763 }
5764
5765 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5766 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5767 if (fd <= 0)
5768 return -1;
5769
5770 if (offset != 0
5771 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5772 {
5773 close (fd);
5774 return 0;
5775 }
5776
5777 if (writebuf)
5778 ret = write (fd, writebuf, (size_t) len);
5779 else
5780 ret = read (fd, readbuf, (size_t) len);
5781
5782 close (fd);
5783 return ret;
5784 }
5785
5786 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5787 struct target_loadseg
5788 {
5789 /* Core address to which the segment is mapped. */
5790 Elf32_Addr addr;
5791 /* VMA recorded in the program header. */
5792 Elf32_Addr p_vaddr;
5793 /* Size of this segment in memory. */
5794 Elf32_Word p_memsz;
5795 };
5796
5797 # if defined PT_GETDSBT
5798 struct target_loadmap
5799 {
5800 /* Protocol version number, must be zero. */
5801 Elf32_Word version;
5802 /* Pointer to the DSBT table, its size, and the DSBT index. */
5803 unsigned *dsbt_table;
5804 unsigned dsbt_size, dsbt_index;
5805 /* Number of segments in this map. */
5806 Elf32_Word nsegs;
5807 /* The actual memory map. */
5808 struct target_loadseg segs[/*nsegs*/];
5809 };
5810 # define LINUX_LOADMAP PT_GETDSBT
5811 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5812 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5813 # else
5814 struct target_loadmap
5815 {
5816 /* Protocol version number, must be zero. */
5817 Elf32_Half version;
5818 /* Number of segments in this map. */
5819 Elf32_Half nsegs;
5820 /* The actual memory map. */
5821 struct target_loadseg segs[/*nsegs*/];
5822 };
5823 # define LINUX_LOADMAP PTRACE_GETFDPIC
5824 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5825 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5826 # endif
5827
5828 static int
5829 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5830 unsigned char *myaddr, unsigned int len)
5831 {
5832 int pid = lwpid_of (current_thread);
5833 int addr = -1;
5834 struct target_loadmap *data = NULL;
5835 unsigned int actual_length, copy_length;
5836
5837 if (strcmp (annex, "exec") == 0)
5838 addr = (int) LINUX_LOADMAP_EXEC;
5839 else if (strcmp (annex, "interp") == 0)
5840 addr = (int) LINUX_LOADMAP_INTERP;
5841 else
5842 return -1;
5843
5844 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5845 return -1;
5846
5847 if (data == NULL)
5848 return -1;
5849
5850 actual_length = sizeof (struct target_loadmap)
5851 + sizeof (struct target_loadseg) * data->nsegs;
5852
5853 if (offset < 0 || offset > actual_length)
5854 return -1;
5855
5856 copy_length = actual_length - offset < len ? actual_length - offset : len;
5857 memcpy (myaddr, (char *) data + offset, copy_length);
5858 return copy_length;
5859 }
5860 #else
5861 # define linux_read_loadmap NULL
5862 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5863
5864 static void
5865 linux_process_qsupported (const char *query)
5866 {
5867 if (the_low_target.process_qsupported != NULL)
5868 the_low_target.process_qsupported (query);
5869 }
5870
5871 static int
5872 linux_supports_tracepoints (void)
5873 {
5874 if (*the_low_target.supports_tracepoints == NULL)
5875 return 0;
5876
5877 return (*the_low_target.supports_tracepoints) ();
5878 }
5879
5880 static CORE_ADDR
5881 linux_read_pc (struct regcache *regcache)
5882 {
5883 if (the_low_target.get_pc == NULL)
5884 return 0;
5885
5886 return (*the_low_target.get_pc) (regcache);
5887 }
5888
5889 static void
5890 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5891 {
5892 gdb_assert (the_low_target.set_pc != NULL);
5893
5894 (*the_low_target.set_pc) (regcache, pc);
5895 }
5896
5897 static int
5898 linux_thread_stopped (struct thread_info *thread)
5899 {
5900 return get_thread_lwp (thread)->stopped;
5901 }
5902
5903 /* This exposes stop-all-threads functionality to other modules. */
5904
5905 static void
5906 linux_pause_all (int freeze)
5907 {
5908 stop_all_lwps (freeze, NULL);
5909 }
5910
5911 /* This exposes unstop-all-threads functionality to other gdbserver
5912 modules. */
5913
5914 static void
5915 linux_unpause_all (int unfreeze)
5916 {
5917 unstop_all_lwps (unfreeze, NULL);
5918 }
5919
5920 static int
5921 linux_prepare_to_access_memory (void)
5922 {
5923 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5924 running LWP. */
5925 if (non_stop)
5926 linux_pause_all (1);
5927 return 0;
5928 }
5929
5930 static void
5931 linux_done_accessing_memory (void)
5932 {
5933 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5934 running LWP. */
5935 if (non_stop)
5936 linux_unpause_all (1);
5937 }
5938
5939 static int
5940 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5941 CORE_ADDR collector,
5942 CORE_ADDR lockaddr,
5943 ULONGEST orig_size,
5944 CORE_ADDR *jump_entry,
5945 CORE_ADDR *trampoline,
5946 ULONGEST *trampoline_size,
5947 unsigned char *jjump_pad_insn,
5948 ULONGEST *jjump_pad_insn_size,
5949 CORE_ADDR *adjusted_insn_addr,
5950 CORE_ADDR *adjusted_insn_addr_end,
5951 char *err)
5952 {
5953 return (*the_low_target.install_fast_tracepoint_jump_pad)
5954 (tpoint, tpaddr, collector, lockaddr, orig_size,
5955 jump_entry, trampoline, trampoline_size,
5956 jjump_pad_insn, jjump_pad_insn_size,
5957 adjusted_insn_addr, adjusted_insn_addr_end,
5958 err);
5959 }
5960
5961 static struct emit_ops *
5962 linux_emit_ops (void)
5963 {
5964 if (the_low_target.emit_ops != NULL)
5965 return (*the_low_target.emit_ops) ();
5966 else
5967 return NULL;
5968 }
5969
5970 static int
5971 linux_get_min_fast_tracepoint_insn_len (void)
5972 {
5973 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5974 }
5975
5976 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5977
5978 static int
5979 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5980 CORE_ADDR *phdr_memaddr, int *num_phdr)
5981 {
5982 char filename[PATH_MAX];
5983 int fd;
5984 const int auxv_size = is_elf64
5985 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5986 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5987
5988 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5989
5990 fd = open (filename, O_RDONLY);
5991 if (fd < 0)
5992 return 1;
5993
5994 *phdr_memaddr = 0;
5995 *num_phdr = 0;
5996 while (read (fd, buf, auxv_size) == auxv_size
5997 && (*phdr_memaddr == 0 || *num_phdr == 0))
5998 {
5999 if (is_elf64)
6000 {
6001 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6002
6003 switch (aux->a_type)
6004 {
6005 case AT_PHDR:
6006 *phdr_memaddr = aux->a_un.a_val;
6007 break;
6008 case AT_PHNUM:
6009 *num_phdr = aux->a_un.a_val;
6010 break;
6011 }
6012 }
6013 else
6014 {
6015 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6016
6017 switch (aux->a_type)
6018 {
6019 case AT_PHDR:
6020 *phdr_memaddr = aux->a_un.a_val;
6021 break;
6022 case AT_PHNUM:
6023 *num_phdr = aux->a_un.a_val;
6024 break;
6025 }
6026 }
6027 }
6028
6029 close (fd);
6030
6031 if (*phdr_memaddr == 0 || *num_phdr == 0)
6032 {
6033 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6034 "phdr_memaddr = %ld, phdr_num = %d",
6035 (long) *phdr_memaddr, *num_phdr);
6036 return 2;
6037 }
6038
6039 return 0;
6040 }
6041
6042 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6043
6044 static CORE_ADDR
6045 get_dynamic (const int pid, const int is_elf64)
6046 {
6047 CORE_ADDR phdr_memaddr, relocation;
6048 int num_phdr, i;
6049 unsigned char *phdr_buf;
6050 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6051
6052 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6053 return 0;
6054
6055 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6056 phdr_buf = alloca (num_phdr * phdr_size);
6057
6058 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6059 return 0;
6060
6061 /* Compute relocation: it is expected to be 0 for "regular" executables,
6062 non-zero for PIE ones. */
6063 relocation = -1;
6064 for (i = 0; relocation == -1 && i < num_phdr; i++)
6065 if (is_elf64)
6066 {
6067 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6068
6069 if (p->p_type == PT_PHDR)
6070 relocation = phdr_memaddr - p->p_vaddr;
6071 }
6072 else
6073 {
6074 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6075
6076 if (p->p_type == PT_PHDR)
6077 relocation = phdr_memaddr - p->p_vaddr;
6078 }
6079
6080 if (relocation == -1)
6081 {
6082 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6083 any real world executables, including PIE executables, have always
6084 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6085 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6086 or present DT_DEBUG anyway (fpc binaries are statically linked).
6087
6088 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6089
6090 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6091
6092 return 0;
6093 }
6094
6095 for (i = 0; i < num_phdr; i++)
6096 {
6097 if (is_elf64)
6098 {
6099 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6100
6101 if (p->p_type == PT_DYNAMIC)
6102 return p->p_vaddr + relocation;
6103 }
6104 else
6105 {
6106 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6107
6108 if (p->p_type == PT_DYNAMIC)
6109 return p->p_vaddr + relocation;
6110 }
6111 }
6112
6113 return 0;
6114 }
6115
6116 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6117 can be 0 if the inferior does not yet have the library list initialized.
6118 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6119 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6120
6121 static CORE_ADDR
6122 get_r_debug (const int pid, const int is_elf64)
6123 {
6124 CORE_ADDR dynamic_memaddr;
6125 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6126 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6127 CORE_ADDR map = -1;
6128
6129 dynamic_memaddr = get_dynamic (pid, is_elf64);
6130 if (dynamic_memaddr == 0)
6131 return map;
6132
6133 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6134 {
6135 if (is_elf64)
6136 {
6137 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6138 #ifdef DT_MIPS_RLD_MAP
6139 union
6140 {
6141 Elf64_Xword map;
6142 unsigned char buf[sizeof (Elf64_Xword)];
6143 }
6144 rld_map;
6145
6146 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6147 {
6148 if (linux_read_memory (dyn->d_un.d_val,
6149 rld_map.buf, sizeof (rld_map.buf)) == 0)
6150 return rld_map.map;
6151 else
6152 break;
6153 }
6154 #endif /* DT_MIPS_RLD_MAP */
6155
6156 if (dyn->d_tag == DT_DEBUG && map == -1)
6157 map = dyn->d_un.d_val;
6158
6159 if (dyn->d_tag == DT_NULL)
6160 break;
6161 }
6162 else
6163 {
6164 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6165 #ifdef DT_MIPS_RLD_MAP
6166 union
6167 {
6168 Elf32_Word map;
6169 unsigned char buf[sizeof (Elf32_Word)];
6170 }
6171 rld_map;
6172
6173 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6174 {
6175 if (linux_read_memory (dyn->d_un.d_val,
6176 rld_map.buf, sizeof (rld_map.buf)) == 0)
6177 return rld_map.map;
6178 else
6179 break;
6180 }
6181 #endif /* DT_MIPS_RLD_MAP */
6182
6183 if (dyn->d_tag == DT_DEBUG && map == -1)
6184 map = dyn->d_un.d_val;
6185
6186 if (dyn->d_tag == DT_NULL)
6187 break;
6188 }
6189
6190 dynamic_memaddr += dyn_size;
6191 }
6192
6193 return map;
6194 }
6195
6196 /* Read one pointer from MEMADDR in the inferior. */
6197
6198 static int
6199 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6200 {
6201 int ret;
6202
6203 /* Go through a union so this works on either big or little endian
6204 hosts, when the inferior's pointer size is smaller than the size
6205 of CORE_ADDR. It is assumed the inferior's endianness is the
6206 same of the superior's. */
6207 union
6208 {
6209 CORE_ADDR core_addr;
6210 unsigned int ui;
6211 unsigned char uc;
6212 } addr;
6213
6214 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6215 if (ret == 0)
6216 {
6217 if (ptr_size == sizeof (CORE_ADDR))
6218 *ptr = addr.core_addr;
6219 else if (ptr_size == sizeof (unsigned int))
6220 *ptr = addr.ui;
6221 else
6222 gdb_assert_not_reached ("unhandled pointer size");
6223 }
6224 return ret;
6225 }
6226
6227 struct link_map_offsets
6228 {
6229 /* Offset and size of r_debug.r_version. */
6230 int r_version_offset;
6231
6232 /* Offset and size of r_debug.r_map. */
6233 int r_map_offset;
6234
6235 /* Offset to l_addr field in struct link_map. */
6236 int l_addr_offset;
6237
6238 /* Offset to l_name field in struct link_map. */
6239 int l_name_offset;
6240
6241 /* Offset to l_ld field in struct link_map. */
6242 int l_ld_offset;
6243
6244 /* Offset to l_next field in struct link_map. */
6245 int l_next_offset;
6246
6247 /* Offset to l_prev field in struct link_map. */
6248 int l_prev_offset;
6249 };
6250
6251 /* Construct qXfer:libraries-svr4:read reply. */
6252
6253 static int
6254 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6255 unsigned const char *writebuf,
6256 CORE_ADDR offset, int len)
6257 {
6258 char *document;
6259 unsigned document_len;
6260 struct process_info_private *const priv = current_process ()->priv;
6261 char filename[PATH_MAX];
6262 int pid, is_elf64;
6263
6264 static const struct link_map_offsets lmo_32bit_offsets =
6265 {
6266 0, /* r_version offset. */
6267 4, /* r_debug.r_map offset. */
6268 0, /* l_addr offset in link_map. */
6269 4, /* l_name offset in link_map. */
6270 8, /* l_ld offset in link_map. */
6271 12, /* l_next offset in link_map. */
6272 16 /* l_prev offset in link_map. */
6273 };
6274
6275 static const struct link_map_offsets lmo_64bit_offsets =
6276 {
6277 0, /* r_version offset. */
6278 8, /* r_debug.r_map offset. */
6279 0, /* l_addr offset in link_map. */
6280 8, /* l_name offset in link_map. */
6281 16, /* l_ld offset in link_map. */
6282 24, /* l_next offset in link_map. */
6283 32 /* l_prev offset in link_map. */
6284 };
6285 const struct link_map_offsets *lmo;
6286 unsigned int machine;
6287 int ptr_size;
6288 CORE_ADDR lm_addr = 0, lm_prev = 0;
6289 int allocated = 1024;
6290 char *p;
6291 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6292 int header_done = 0;
6293
6294 if (writebuf != NULL)
6295 return -2;
6296 if (readbuf == NULL)
6297 return -1;
6298
6299 pid = lwpid_of (current_thread);
6300 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6301 is_elf64 = elf_64_file_p (filename, &machine);
6302 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6303 ptr_size = is_elf64 ? 8 : 4;
6304
6305 while (annex[0] != '\0')
6306 {
6307 const char *sep;
6308 CORE_ADDR *addrp;
6309 int len;
6310
6311 sep = strchr (annex, '=');
6312 if (sep == NULL)
6313 break;
6314
6315 len = sep - annex;
6316 if (len == 5 && startswith (annex, "start"))
6317 addrp = &lm_addr;
6318 else if (len == 4 && startswith (annex, "prev"))
6319 addrp = &lm_prev;
6320 else
6321 {
6322 annex = strchr (sep, ';');
6323 if (annex == NULL)
6324 break;
6325 annex++;
6326 continue;
6327 }
6328
6329 annex = decode_address_to_semicolon (addrp, sep + 1);
6330 }
6331
6332 if (lm_addr == 0)
6333 {
6334 int r_version = 0;
6335
6336 if (priv->r_debug == 0)
6337 priv->r_debug = get_r_debug (pid, is_elf64);
6338
6339 /* We failed to find DT_DEBUG. Such situation will not change
6340 for this inferior - do not retry it. Report it to GDB as
6341 E01, see for the reasons at the GDB solib-svr4.c side. */
6342 if (priv->r_debug == (CORE_ADDR) -1)
6343 return -1;
6344
6345 if (priv->r_debug != 0)
6346 {
6347 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6348 (unsigned char *) &r_version,
6349 sizeof (r_version)) != 0
6350 || r_version != 1)
6351 {
6352 warning ("unexpected r_debug version %d", r_version);
6353 }
6354 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6355 &lm_addr, ptr_size) != 0)
6356 {
6357 warning ("unable to read r_map from 0x%lx",
6358 (long) priv->r_debug + lmo->r_map_offset);
6359 }
6360 }
6361 }
6362
6363 document = xmalloc (allocated);
6364 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6365 p = document + strlen (document);
6366
6367 while (lm_addr
6368 && read_one_ptr (lm_addr + lmo->l_name_offset,
6369 &l_name, ptr_size) == 0
6370 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6371 &l_addr, ptr_size) == 0
6372 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6373 &l_ld, ptr_size) == 0
6374 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6375 &l_prev, ptr_size) == 0
6376 && read_one_ptr (lm_addr + lmo->l_next_offset,
6377 &l_next, ptr_size) == 0)
6378 {
6379 unsigned char libname[PATH_MAX];
6380
6381 if (lm_prev != l_prev)
6382 {
6383 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6384 (long) lm_prev, (long) l_prev);
6385 break;
6386 }
6387
6388 /* Ignore the first entry even if it has valid name as the first entry
6389 corresponds to the main executable. The first entry should not be
6390 skipped if the dynamic loader was loaded late by a static executable
6391 (see solib-svr4.c parameter ignore_first). But in such case the main
6392 executable does not have PT_DYNAMIC present and this function already
6393 exited above due to failed get_r_debug. */
6394 if (lm_prev == 0)
6395 {
6396 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6397 p = p + strlen (p);
6398 }
6399 else
6400 {
6401 /* Not checking for error because reading may stop before
6402 we've got PATH_MAX worth of characters. */
6403 libname[0] = '\0';
6404 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6405 libname[sizeof (libname) - 1] = '\0';
6406 if (libname[0] != '\0')
6407 {
6408 /* 6x the size for xml_escape_text below. */
6409 size_t len = 6 * strlen ((char *) libname);
6410 char *name;
6411
6412 if (!header_done)
6413 {
6414 /* Terminate `<library-list-svr4'. */
6415 *p++ = '>';
6416 header_done = 1;
6417 }
6418
6419 while (allocated < p - document + len + 200)
6420 {
6421 /* Expand to guarantee sufficient storage. */
6422 uintptr_t document_len = p - document;
6423
6424 document = xrealloc (document, 2 * allocated);
6425 allocated *= 2;
6426 p = document + document_len;
6427 }
6428
6429 name = xml_escape_text ((char *) libname);
6430 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6431 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6432 name, (unsigned long) lm_addr,
6433 (unsigned long) l_addr, (unsigned long) l_ld);
6434 free (name);
6435 }
6436 }
6437
6438 lm_prev = lm_addr;
6439 lm_addr = l_next;
6440 }
6441
6442 if (!header_done)
6443 {
6444 /* Empty list; terminate `<library-list-svr4'. */
6445 strcpy (p, "/>");
6446 }
6447 else
6448 strcpy (p, "</library-list-svr4>");
6449
6450 document_len = strlen (document);
6451 if (offset < document_len)
6452 document_len -= offset;
6453 else
6454 document_len = 0;
6455 if (len > document_len)
6456 len = document_len;
6457
6458 memcpy (readbuf, document + offset, len);
6459 xfree (document);
6460
6461 return len;
6462 }
6463
6464 #ifdef HAVE_LINUX_BTRACE
6465
6466 /* See to_enable_btrace target method. */
6467
6468 static struct btrace_target_info *
6469 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6470 {
6471 struct btrace_target_info *tinfo;
6472
6473 tinfo = linux_enable_btrace (ptid, conf);
6474
6475 if (tinfo != NULL && tinfo->ptr_bits == 0)
6476 {
6477 struct thread_info *thread = find_thread_ptid (ptid);
6478 struct regcache *regcache = get_thread_regcache (thread, 0);
6479
6480 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6481 }
6482
6483 return tinfo;
6484 }
6485
6486 /* See to_disable_btrace target method. */
6487
6488 static int
6489 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6490 {
6491 enum btrace_error err;
6492
6493 err = linux_disable_btrace (tinfo);
6494 return (err == BTRACE_ERR_NONE ? 0 : -1);
6495 }
6496
6497 /* Encode an Intel(R) Processor Trace configuration. */
6498
6499 static void
6500 linux_low_encode_pt_config (struct buffer *buffer,
6501 const struct btrace_data_pt_config *config)
6502 {
6503 buffer_grow_str (buffer, "<pt-config>\n");
6504
6505 switch (config->cpu.vendor)
6506 {
6507 case CV_INTEL:
6508 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6509 "model=\"%u\" stepping=\"%u\"/>\n",
6510 config->cpu.family, config->cpu.model,
6511 config->cpu.stepping);
6512 break;
6513
6514 default:
6515 break;
6516 }
6517
6518 buffer_grow_str (buffer, "</pt-config>\n");
6519 }
6520
6521 /* Encode a raw buffer. */
6522
6523 static void
6524 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6525 unsigned int size)
6526 {
6527 if (size == 0)
6528 return;
6529
6530 /* We use hex encoding - see common/rsp-low.h. */
6531 buffer_grow_str (buffer, "<raw>\n");
6532
6533 while (size-- > 0)
6534 {
6535 char elem[2];
6536
6537 elem[0] = tohex ((*data >> 4) & 0xf);
6538 elem[1] = tohex (*data++ & 0xf);
6539
6540 buffer_grow (buffer, elem, 2);
6541 }
6542
6543 buffer_grow_str (buffer, "</raw>\n");
6544 }
6545
6546 /* See to_read_btrace target method. */
6547
6548 static int
6549 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6550 int type)
6551 {
6552 struct btrace_data btrace;
6553 struct btrace_block *block;
6554 enum btrace_error err;
6555 int i;
6556
6557 btrace_data_init (&btrace);
6558
6559 err = linux_read_btrace (&btrace, tinfo, type);
6560 if (err != BTRACE_ERR_NONE)
6561 {
6562 if (err == BTRACE_ERR_OVERFLOW)
6563 buffer_grow_str0 (buffer, "E.Overflow.");
6564 else
6565 buffer_grow_str0 (buffer, "E.Generic Error.");
6566
6567 goto err;
6568 }
6569
6570 switch (btrace.format)
6571 {
6572 case BTRACE_FORMAT_NONE:
6573 buffer_grow_str0 (buffer, "E.No Trace.");
6574 goto err;
6575
6576 case BTRACE_FORMAT_BTS:
6577 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6578 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6579
6580 for (i = 0;
6581 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6582 i++)
6583 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6584 paddress (block->begin), paddress (block->end));
6585
6586 buffer_grow_str0 (buffer, "</btrace>\n");
6587 break;
6588
6589 case BTRACE_FORMAT_PT:
6590 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6591 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6592 buffer_grow_str (buffer, "<pt>\n");
6593
6594 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6595
6596 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6597 btrace.variant.pt.size);
6598
6599 buffer_grow_str (buffer, "</pt>\n");
6600 buffer_grow_str0 (buffer, "</btrace>\n");
6601 break;
6602
6603 default:
6604 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6605 goto err;
6606 }
6607
6608 btrace_data_fini (&btrace);
6609 return 0;
6610
6611 err:
6612 btrace_data_fini (&btrace);
6613 return -1;
6614 }
6615
6616 /* See to_btrace_conf target method. */
6617
6618 static int
6619 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6620 struct buffer *buffer)
6621 {
6622 const struct btrace_config *conf;
6623
6624 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6625 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6626
6627 conf = linux_btrace_conf (tinfo);
6628 if (conf != NULL)
6629 {
6630 switch (conf->format)
6631 {
6632 case BTRACE_FORMAT_NONE:
6633 break;
6634
6635 case BTRACE_FORMAT_BTS:
6636 buffer_xml_printf (buffer, "<bts");
6637 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6638 buffer_xml_printf (buffer, " />\n");
6639 break;
6640
6641 case BTRACE_FORMAT_PT:
6642 buffer_xml_printf (buffer, "<pt");
6643 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6644 buffer_xml_printf (buffer, "/>\n");
6645 break;
6646 }
6647 }
6648
6649 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6650 return 0;
6651 }
6652 #endif /* HAVE_LINUX_BTRACE */
6653
6654 /* See nat/linux-nat.h. */
6655
6656 ptid_t
6657 current_lwp_ptid (void)
6658 {
6659 return ptid_of (current_thread);
6660 }
6661
6662 static struct target_ops linux_target_ops = {
6663 linux_create_inferior,
6664 linux_arch_setup,
6665 linux_attach,
6666 linux_kill,
6667 linux_detach,
6668 linux_mourn,
6669 linux_join,
6670 linux_thread_alive,
6671 linux_resume,
6672 linux_wait,
6673 linux_fetch_registers,
6674 linux_store_registers,
6675 linux_prepare_to_access_memory,
6676 linux_done_accessing_memory,
6677 linux_read_memory,
6678 linux_write_memory,
6679 linux_look_up_symbols,
6680 linux_request_interrupt,
6681 linux_read_auxv,
6682 linux_supports_z_point_type,
6683 linux_insert_point,
6684 linux_remove_point,
6685 linux_stopped_by_sw_breakpoint,
6686 linux_supports_stopped_by_sw_breakpoint,
6687 linux_stopped_by_hw_breakpoint,
6688 linux_supports_stopped_by_hw_breakpoint,
6689 linux_supports_conditional_breakpoints,
6690 linux_stopped_by_watchpoint,
6691 linux_stopped_data_address,
6692 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6693 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6694 && defined(PT_TEXT_END_ADDR)
6695 linux_read_offsets,
6696 #else
6697 NULL,
6698 #endif
6699 #ifdef USE_THREAD_DB
6700 thread_db_get_tls_address,
6701 #else
6702 NULL,
6703 #endif
6704 linux_qxfer_spu,
6705 hostio_last_error_from_errno,
6706 linux_qxfer_osdata,
6707 linux_xfer_siginfo,
6708 linux_supports_non_stop,
6709 linux_async,
6710 linux_start_non_stop,
6711 linux_supports_multi_process,
6712 linux_supports_fork_events,
6713 linux_supports_vfork_events,
6714 linux_handle_new_gdb_connection,
6715 #ifdef USE_THREAD_DB
6716 thread_db_handle_monitor_command,
6717 #else
6718 NULL,
6719 #endif
6720 linux_common_core_of_thread,
6721 linux_read_loadmap,
6722 linux_process_qsupported,
6723 linux_supports_tracepoints,
6724 linux_read_pc,
6725 linux_write_pc,
6726 linux_thread_stopped,
6727 NULL,
6728 linux_pause_all,
6729 linux_unpause_all,
6730 linux_stabilize_threads,
6731 linux_install_fast_tracepoint_jump_pad,
6732 linux_emit_ops,
6733 linux_supports_disable_randomization,
6734 linux_get_min_fast_tracepoint_insn_len,
6735 linux_qxfer_libraries_svr4,
6736 linux_supports_agent,
6737 #ifdef HAVE_LINUX_BTRACE
6738 linux_supports_btrace,
6739 linux_low_enable_btrace,
6740 linux_low_disable_btrace,
6741 linux_low_read_btrace,
6742 linux_low_btrace_conf,
6743 #else
6744 NULL,
6745 NULL,
6746 NULL,
6747 NULL,
6748 NULL,
6749 #endif
6750 linux_supports_range_stepping,
6751 linux_proc_pid_to_exec_file,
6752 linux_mntns_open_cloexec,
6753 linux_mntns_unlink,
6754 linux_mntns_readlink,
6755 };
6756
6757 static void
6758 linux_init_signals ()
6759 {
6760 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6761 to find what the cancel signal actually is. */
6762 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6763 signal (__SIGRTMIN+1, SIG_IGN);
6764 #endif
6765 }
6766
6767 #ifdef HAVE_LINUX_REGSETS
6768 void
6769 initialize_regsets_info (struct regsets_info *info)
6770 {
6771 for (info->num_regsets = 0;
6772 info->regsets[info->num_regsets].size >= 0;
6773 info->num_regsets++)
6774 ;
6775 }
6776 #endif
6777
6778 void
6779 initialize_low (void)
6780 {
6781 struct sigaction sigchld_action;
6782 memset (&sigchld_action, 0, sizeof (sigchld_action));
6783 set_target_ops (&linux_target_ops);
6784 set_breakpoint_data (the_low_target.breakpoint,
6785 the_low_target.breakpoint_len);
6786 linux_init_signals ();
6787 linux_ptrace_init_warnings ();
6788
6789 sigchld_action.sa_handler = sigchld_handler;
6790 sigemptyset (&sigchld_action.sa_mask);
6791 sigchld_action.sa_flags = SA_RESTART;
6792 sigaction (SIGCHLD, &sigchld_action, NULL);
6793
6794 initialize_low_arch ();
6795
6796 linux_check_ptrace_features ();
6797 }