]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
Revert the previous 7 commits of: Validate binary before use
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include <sys/ptrace.h>
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #ifndef ELFMAG0
50 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54 #include <elf.h>
55 #endif
56 #include "nat/linux-namespaces.h"
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 #ifndef W_STOPCODE
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75 #endif
76
77 /* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79 #ifndef __SIGRTMIN
80 #define __SIGRTMIN 32
81 #endif
82
83 /* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
95 #elif defined(BFIN)
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
104 #endif
105 #endif
106
107 #ifdef HAVE_LINUX_BTRACE
108 # include "nat/linux-btrace.h"
109 # include "btrace-common.h"
110 #endif
111
112 #ifndef HAVE_ELF32_AUXV_T
113 /* Copied from glibc's elf.h. */
114 typedef struct
115 {
116 uint32_t a_type; /* Entry type */
117 union
118 {
119 uint32_t a_val; /* Integer value */
120 /* We use to have pointer elements added here. We cannot do that,
121 though, since it does not work when using 32-bit definitions
122 on 64-bit platforms and vice versa. */
123 } a_un;
124 } Elf32_auxv_t;
125 #endif
126
127 #ifndef HAVE_ELF64_AUXV_T
128 /* Copied from glibc's elf.h. */
129 typedef struct
130 {
131 uint64_t a_type; /* Entry type */
132 union
133 {
134 uint64_t a_val; /* Integer value */
135 /* We use to have pointer elements added here. We cannot do that,
136 though, since it does not work when using 32-bit definitions
137 on 64-bit platforms and vice versa. */
138 } a_un;
139 } Elf64_auxv_t;
140 #endif
141
142 /* LWP accessors. */
143
144 /* See nat/linux-nat.h. */
145
146 ptid_t
147 ptid_of_lwp (struct lwp_info *lwp)
148 {
149 return ptid_of (get_lwp_thread (lwp));
150 }
151
152 /* See nat/linux-nat.h. */
153
154 void
155 lwp_set_arch_private_info (struct lwp_info *lwp,
156 struct arch_lwp_info *info)
157 {
158 lwp->arch_private = info;
159 }
160
161 /* See nat/linux-nat.h. */
162
163 struct arch_lwp_info *
164 lwp_arch_private_info (struct lwp_info *lwp)
165 {
166 return lwp->arch_private;
167 }
168
169 /* See nat/linux-nat.h. */
170
171 int
172 lwp_is_stopped (struct lwp_info *lwp)
173 {
174 return lwp->stopped;
175 }
176
177 /* See nat/linux-nat.h. */
178
179 enum target_stop_reason
180 lwp_stop_reason (struct lwp_info *lwp)
181 {
182 return lwp->stop_reason;
183 }
184
185 /* A list of all unknown processes which receive stop signals. Some
186 other process will presumably claim each of these as forked
187 children momentarily. */
188
189 struct simple_pid_list
190 {
191 /* The process ID. */
192 int pid;
193
194 /* The status as reported by waitpid. */
195 int status;
196
197 /* Next in chain. */
198 struct simple_pid_list *next;
199 };
200 struct simple_pid_list *stopped_pids;
201
202 /* Trivial list manipulation functions to keep track of a list of new
203 stopped processes. */
204
205 static void
206 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
207 {
208 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
209
210 new_pid->pid = pid;
211 new_pid->status = status;
212 new_pid->next = *listp;
213 *listp = new_pid;
214 }
215
216 static int
217 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
218 {
219 struct simple_pid_list **p;
220
221 for (p = listp; *p != NULL; p = &(*p)->next)
222 if ((*p)->pid == pid)
223 {
224 struct simple_pid_list *next = (*p)->next;
225
226 *statusp = (*p)->status;
227 xfree (*p);
228 *p = next;
229 return 1;
230 }
231 return 0;
232 }
233
234 enum stopping_threads_kind
235 {
236 /* Not stopping threads presently. */
237 NOT_STOPPING_THREADS,
238
239 /* Stopping threads. */
240 STOPPING_THREADS,
241
242 /* Stopping and suspending threads. */
243 STOPPING_AND_SUSPENDING_THREADS
244 };
245
246 /* This is set while stop_all_lwps is in effect. */
247 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
248
249 /* FIXME make into a target method? */
250 int using_threads = 1;
251
252 /* True if we're presently stabilizing threads (moving them out of
253 jump pads). */
254 static int stabilizing_threads;
255
256 static void linux_resume_one_lwp (struct lwp_info *lwp,
257 int step, int signal, siginfo_t *info);
258 static void linux_resume (struct thread_resume *resume_info, size_t n);
259 static void stop_all_lwps (int suspend, struct lwp_info *except);
260 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
261 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
262 int *wstat, int options);
263 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
264 static struct lwp_info *add_lwp (ptid_t ptid);
265 static int linux_stopped_by_watchpoint (void);
266 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
267 static void proceed_all_lwps (void);
268 static int finish_step_over (struct lwp_info *lwp);
269 static int kill_lwp (unsigned long lwpid, int signo);
270
271 /* When the event-loop is doing a step-over, this points at the thread
272 being stepped. */
273 ptid_t step_over_bkpt;
274
275 /* True if the low target can hardware single-step. Such targets
276 don't need a BREAKPOINT_REINSERT_ADDR callback. */
277
278 static int
279 can_hardware_single_step (void)
280 {
281 return (the_low_target.breakpoint_reinsert_addr == NULL);
282 }
283
284 /* True if the low target supports memory breakpoints. If so, we'll
285 have a GET_PC implementation. */
286
287 static int
288 supports_breakpoints (void)
289 {
290 return (the_low_target.get_pc != NULL);
291 }
292
293 /* Returns true if this target can support fast tracepoints. This
294 does not mean that the in-process agent has been loaded in the
295 inferior. */
296
297 static int
298 supports_fast_tracepoints (void)
299 {
300 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
301 }
302
303 /* True if LWP is stopped in its stepping range. */
304
305 static int
306 lwp_in_step_range (struct lwp_info *lwp)
307 {
308 CORE_ADDR pc = lwp->stop_pc;
309
310 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
311 }
312
313 struct pending_signals
314 {
315 int signal;
316 siginfo_t info;
317 struct pending_signals *prev;
318 };
319
320 /* The read/write ends of the pipe registered as waitable file in the
321 event loop. */
322 static int linux_event_pipe[2] = { -1, -1 };
323
324 /* True if we're currently in async mode. */
325 #define target_is_async_p() (linux_event_pipe[0] != -1)
326
327 static void send_sigstop (struct lwp_info *lwp);
328 static void wait_for_sigstop (void);
329
330 /* Return non-zero if HEADER is a 64-bit ELF file. */
331
332 static int
333 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
334 {
335 if (header->e_ident[EI_MAG0] == ELFMAG0
336 && header->e_ident[EI_MAG1] == ELFMAG1
337 && header->e_ident[EI_MAG2] == ELFMAG2
338 && header->e_ident[EI_MAG3] == ELFMAG3)
339 {
340 *machine = header->e_machine;
341 return header->e_ident[EI_CLASS] == ELFCLASS64;
342
343 }
344 *machine = EM_NONE;
345 return -1;
346 }
347
348 /* Return non-zero if FILE is a 64-bit ELF file,
349 zero if the file is not a 64-bit ELF file,
350 and -1 if the file is not accessible or doesn't exist. */
351
352 static int
353 elf_64_file_p (const char *file, unsigned int *machine)
354 {
355 Elf64_Ehdr header;
356 int fd;
357
358 fd = open (file, O_RDONLY);
359 if (fd < 0)
360 return -1;
361
362 if (read (fd, &header, sizeof (header)) != sizeof (header))
363 {
364 close (fd);
365 return 0;
366 }
367 close (fd);
368
369 return elf_64_header_p (&header, machine);
370 }
371
372 /* Accepts an integer PID; Returns true if the executable PID is
373 running is a 64-bit ELF file.. */
374
375 int
376 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
377 {
378 char file[PATH_MAX];
379
380 sprintf (file, "/proc/%d/exe", pid);
381 return elf_64_file_p (file, machine);
382 }
383
384 static void
385 delete_lwp (struct lwp_info *lwp)
386 {
387 struct thread_info *thr = get_lwp_thread (lwp);
388
389 if (debug_threads)
390 debug_printf ("deleting %ld\n", lwpid_of (thr));
391
392 remove_thread (thr);
393 free (lwp->arch_private);
394 free (lwp);
395 }
396
397 /* Add a process to the common process list, and set its private
398 data. */
399
400 static struct process_info *
401 linux_add_process (int pid, int attached)
402 {
403 struct process_info *proc;
404
405 proc = add_process (pid, attached);
406 proc->priv = xcalloc (1, sizeof (*proc->priv));
407
408 /* Set the arch when the first LWP stops. */
409 proc->priv->new_inferior = 1;
410
411 if (the_low_target.new_process != NULL)
412 proc->priv->arch_private = the_low_target.new_process ();
413
414 return proc;
415 }
416
417 static CORE_ADDR get_pc (struct lwp_info *lwp);
418
419 /* Handle a GNU/Linux extended wait response. If we see a clone
420 event, we need to add the new LWP to our list (and return 0 so as
421 not to report the trap to higher layers). */
422
423 static int
424 handle_extended_wait (struct lwp_info *event_lwp, int wstat)
425 {
426 int event = linux_ptrace_get_extended_event (wstat);
427 struct thread_info *event_thr = get_lwp_thread (event_lwp);
428 struct lwp_info *new_lwp;
429
430 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
431 || (event == PTRACE_EVENT_CLONE))
432 {
433 ptid_t ptid;
434 unsigned long new_pid;
435 int ret, status;
436
437 /* Get the pid of the new lwp. */
438 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
439 &new_pid);
440
441 /* If we haven't already seen the new PID stop, wait for it now. */
442 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
443 {
444 /* The new child has a pending SIGSTOP. We can't affect it until it
445 hits the SIGSTOP, but we're already attached. */
446
447 ret = my_waitpid (new_pid, &status, __WALL);
448
449 if (ret == -1)
450 perror_with_name ("waiting for new child");
451 else if (ret != new_pid)
452 warning ("wait returned unexpected PID %d", ret);
453 else if (!WIFSTOPPED (status))
454 warning ("wait returned unexpected status 0x%x", status);
455 }
456
457 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
458 {
459 struct process_info *parent_proc;
460 struct process_info *child_proc;
461 struct lwp_info *child_lwp;
462 struct thread_info *child_thr;
463 struct target_desc *tdesc;
464
465 ptid = ptid_build (new_pid, new_pid, 0);
466
467 if (debug_threads)
468 {
469 debug_printf ("HEW: Got fork event from LWP %ld, "
470 "new child is %d\n",
471 ptid_get_lwp (ptid_of (event_thr)),
472 ptid_get_pid (ptid));
473 }
474
475 /* Add the new process to the tables and clone the breakpoint
476 lists of the parent. We need to do this even if the new process
477 will be detached, since we will need the process object and the
478 breakpoints to remove any breakpoints from memory when we
479 detach, and the client side will access registers. */
480 child_proc = linux_add_process (new_pid, 0);
481 gdb_assert (child_proc != NULL);
482 child_lwp = add_lwp (ptid);
483 gdb_assert (child_lwp != NULL);
484 child_lwp->stopped = 1;
485 child_lwp->must_set_ptrace_flags = 1;
486 child_lwp->status_pending_p = 0;
487 child_thr = get_lwp_thread (child_lwp);
488 child_thr->last_resume_kind = resume_stop;
489 parent_proc = get_thread_process (event_thr);
490 child_proc->attached = parent_proc->attached;
491 clone_all_breakpoints (&child_proc->breakpoints,
492 &child_proc->raw_breakpoints,
493 parent_proc->breakpoints);
494
495 tdesc = xmalloc (sizeof (struct target_desc));
496 copy_target_description (tdesc, parent_proc->tdesc);
497 child_proc->tdesc = tdesc;
498
499 /* Clone arch-specific process data. */
500 if (the_low_target.new_fork != NULL)
501 the_low_target.new_fork (parent_proc, child_proc);
502
503 /* Save fork info in the parent thread. */
504 if (event == PTRACE_EVENT_FORK)
505 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
506 else if (event == PTRACE_EVENT_VFORK)
507 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
508
509 event_lwp->waitstatus.value.related_pid = ptid;
510
511 /* The status_pending field contains bits denoting the
512 extended event, so when the pending event is handled,
513 the handler will look at lwp->waitstatus. */
514 event_lwp->status_pending_p = 1;
515 event_lwp->status_pending = wstat;
516
517 /* Report the event. */
518 return 0;
519 }
520
521 if (debug_threads)
522 debug_printf ("HEW: Got clone event "
523 "from LWP %ld, new child is LWP %ld\n",
524 lwpid_of (event_thr), new_pid);
525
526 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
527 new_lwp = add_lwp (ptid);
528
529 /* Either we're going to immediately resume the new thread
530 or leave it stopped. linux_resume_one_lwp is a nop if it
531 thinks the thread is currently running, so set this first
532 before calling linux_resume_one_lwp. */
533 new_lwp->stopped = 1;
534
535 /* If we're suspending all threads, leave this one suspended
536 too. */
537 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
538 new_lwp->suspended = 1;
539
540 /* Normally we will get the pending SIGSTOP. But in some cases
541 we might get another signal delivered to the group first.
542 If we do get another signal, be sure not to lose it. */
543 if (WSTOPSIG (status) != SIGSTOP)
544 {
545 new_lwp->stop_expected = 1;
546 new_lwp->status_pending_p = 1;
547 new_lwp->status_pending = status;
548 }
549
550 /* Don't report the event. */
551 return 1;
552 }
553 else if (event == PTRACE_EVENT_VFORK_DONE)
554 {
555 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
556
557 /* Report the event. */
558 return 0;
559 }
560
561 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
562 }
563
564 /* Return the PC as read from the regcache of LWP, without any
565 adjustment. */
566
567 static CORE_ADDR
568 get_pc (struct lwp_info *lwp)
569 {
570 struct thread_info *saved_thread;
571 struct regcache *regcache;
572 CORE_ADDR pc;
573
574 if (the_low_target.get_pc == NULL)
575 return 0;
576
577 saved_thread = current_thread;
578 current_thread = get_lwp_thread (lwp);
579
580 regcache = get_thread_regcache (current_thread, 1);
581 pc = (*the_low_target.get_pc) (regcache);
582
583 if (debug_threads)
584 debug_printf ("pc is 0x%lx\n", (long) pc);
585
586 current_thread = saved_thread;
587 return pc;
588 }
589
590 /* This function should only be called if LWP got a SIGTRAP.
591 The SIGTRAP could mean several things.
592
593 On i386, where decr_pc_after_break is non-zero:
594
595 If we were single-stepping this process using PTRACE_SINGLESTEP, we
596 will get only the one SIGTRAP. The value of $eip will be the next
597 instruction. If the instruction we stepped over was a breakpoint,
598 we need to decrement the PC.
599
600 If we continue the process using PTRACE_CONT, we will get a
601 SIGTRAP when we hit a breakpoint. The value of $eip will be
602 the instruction after the breakpoint (i.e. needs to be
603 decremented). If we report the SIGTRAP to GDB, we must also
604 report the undecremented PC. If the breakpoint is removed, we
605 must resume at the decremented PC.
606
607 On a non-decr_pc_after_break machine with hardware or kernel
608 single-step:
609
610 If we either single-step a breakpoint instruction, or continue and
611 hit a breakpoint instruction, our PC will point at the breakpoint
612 instruction. */
613
614 static int
615 check_stopped_by_breakpoint (struct lwp_info *lwp)
616 {
617 CORE_ADDR pc;
618 CORE_ADDR sw_breakpoint_pc;
619 struct thread_info *saved_thread;
620 #if USE_SIGTRAP_SIGINFO
621 siginfo_t siginfo;
622 #endif
623
624 if (the_low_target.get_pc == NULL)
625 return 0;
626
627 pc = get_pc (lwp);
628 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
629
630 /* breakpoint_at reads from the current thread. */
631 saved_thread = current_thread;
632 current_thread = get_lwp_thread (lwp);
633
634 #if USE_SIGTRAP_SIGINFO
635 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
636 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
637 {
638 if (siginfo.si_signo == SIGTRAP)
639 {
640 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
641 {
642 if (debug_threads)
643 {
644 struct thread_info *thr = get_lwp_thread (lwp);
645
646 debug_printf ("CSBB: %s stopped by software breakpoint\n",
647 target_pid_to_str (ptid_of (thr)));
648 }
649
650 /* Back up the PC if necessary. */
651 if (pc != sw_breakpoint_pc)
652 {
653 struct regcache *regcache
654 = get_thread_regcache (current_thread, 1);
655 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
656 }
657
658 lwp->stop_pc = sw_breakpoint_pc;
659 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
660 current_thread = saved_thread;
661 return 1;
662 }
663 else if (siginfo.si_code == TRAP_HWBKPT)
664 {
665 if (debug_threads)
666 {
667 struct thread_info *thr = get_lwp_thread (lwp);
668
669 debug_printf ("CSBB: %s stopped by hardware "
670 "breakpoint/watchpoint\n",
671 target_pid_to_str (ptid_of (thr)));
672 }
673
674 lwp->stop_pc = pc;
675 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
676 current_thread = saved_thread;
677 return 1;
678 }
679 else if (siginfo.si_code == TRAP_TRACE)
680 {
681 if (debug_threads)
682 {
683 struct thread_info *thr = get_lwp_thread (lwp);
684
685 debug_printf ("CSBB: %s stopped by trace\n",
686 target_pid_to_str (ptid_of (thr)));
687 }
688 }
689 }
690 }
691 #else
692 /* We may have just stepped a breakpoint instruction. E.g., in
693 non-stop mode, GDB first tells the thread A to step a range, and
694 then the user inserts a breakpoint inside the range. In that
695 case we need to report the breakpoint PC. */
696 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
697 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
698 {
699 if (debug_threads)
700 {
701 struct thread_info *thr = get_lwp_thread (lwp);
702
703 debug_printf ("CSBB: %s stopped by software breakpoint\n",
704 target_pid_to_str (ptid_of (thr)));
705 }
706
707 /* Back up the PC if necessary. */
708 if (pc != sw_breakpoint_pc)
709 {
710 struct regcache *regcache
711 = get_thread_regcache (current_thread, 1);
712 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
713 }
714
715 lwp->stop_pc = sw_breakpoint_pc;
716 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
717 current_thread = saved_thread;
718 return 1;
719 }
720
721 if (hardware_breakpoint_inserted_here (pc))
722 {
723 if (debug_threads)
724 {
725 struct thread_info *thr = get_lwp_thread (lwp);
726
727 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
728 target_pid_to_str (ptid_of (thr)));
729 }
730
731 lwp->stop_pc = pc;
732 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
733 current_thread = saved_thread;
734 return 1;
735 }
736 #endif
737
738 current_thread = saved_thread;
739 return 0;
740 }
741
742 static struct lwp_info *
743 add_lwp (ptid_t ptid)
744 {
745 struct lwp_info *lwp;
746
747 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
748 memset (lwp, 0, sizeof (*lwp));
749
750 if (the_low_target.new_thread != NULL)
751 the_low_target.new_thread (lwp);
752
753 lwp->thread = add_thread (ptid, lwp);
754
755 return lwp;
756 }
757
758 /* Start an inferior process and returns its pid.
759 ALLARGS is a vector of program-name and args. */
760
761 static int
762 linux_create_inferior (char *program, char **allargs)
763 {
764 struct lwp_info *new_lwp;
765 int pid;
766 ptid_t ptid;
767 struct cleanup *restore_personality
768 = maybe_disable_address_space_randomization (disable_randomization);
769
770 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
771 pid = vfork ();
772 #else
773 pid = fork ();
774 #endif
775 if (pid < 0)
776 perror_with_name ("fork");
777
778 if (pid == 0)
779 {
780 close_most_fds ();
781 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
782
783 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
784 signal (__SIGRTMIN + 1, SIG_DFL);
785 #endif
786
787 setpgid (0, 0);
788
789 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
790 stdout to stderr so that inferior i/o doesn't corrupt the connection.
791 Also, redirect stdin to /dev/null. */
792 if (remote_connection_is_stdio ())
793 {
794 close (0);
795 open ("/dev/null", O_RDONLY);
796 dup2 (2, 1);
797 if (write (2, "stdin/stdout redirected\n",
798 sizeof ("stdin/stdout redirected\n") - 1) < 0)
799 {
800 /* Errors ignored. */;
801 }
802 }
803
804 execv (program, allargs);
805 if (errno == ENOENT)
806 execvp (program, allargs);
807
808 fprintf (stderr, "Cannot exec %s: %s.\n", program,
809 strerror (errno));
810 fflush (stderr);
811 _exit (0177);
812 }
813
814 do_cleanups (restore_personality);
815
816 linux_add_process (pid, 0);
817
818 ptid = ptid_build (pid, pid, 0);
819 new_lwp = add_lwp (ptid);
820 new_lwp->must_set_ptrace_flags = 1;
821
822 return pid;
823 }
824
825 /* Attach to an inferior process. Returns 0 on success, ERRNO on
826 error. */
827
828 int
829 linux_attach_lwp (ptid_t ptid)
830 {
831 struct lwp_info *new_lwp;
832 int lwpid = ptid_get_lwp (ptid);
833
834 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
835 != 0)
836 return errno;
837
838 new_lwp = add_lwp (ptid);
839
840 /* We need to wait for SIGSTOP before being able to make the next
841 ptrace call on this LWP. */
842 new_lwp->must_set_ptrace_flags = 1;
843
844 if (linux_proc_pid_is_stopped (lwpid))
845 {
846 if (debug_threads)
847 debug_printf ("Attached to a stopped process\n");
848
849 /* The process is definitely stopped. It is in a job control
850 stop, unless the kernel predates the TASK_STOPPED /
851 TASK_TRACED distinction, in which case it might be in a
852 ptrace stop. Make sure it is in a ptrace stop; from there we
853 can kill it, signal it, et cetera.
854
855 First make sure there is a pending SIGSTOP. Since we are
856 already attached, the process can not transition from stopped
857 to running without a PTRACE_CONT; so we know this signal will
858 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
859 probably already in the queue (unless this kernel is old
860 enough to use TASK_STOPPED for ptrace stops); but since
861 SIGSTOP is not an RT signal, it can only be queued once. */
862 kill_lwp (lwpid, SIGSTOP);
863
864 /* Finally, resume the stopped process. This will deliver the
865 SIGSTOP (or a higher priority signal, just like normal
866 PTRACE_ATTACH), which we'll catch later on. */
867 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
868 }
869
870 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
871 brings it to a halt.
872
873 There are several cases to consider here:
874
875 1) gdbserver has already attached to the process and is being notified
876 of a new thread that is being created.
877 In this case we should ignore that SIGSTOP and resume the
878 process. This is handled below by setting stop_expected = 1,
879 and the fact that add_thread sets last_resume_kind ==
880 resume_continue.
881
882 2) This is the first thread (the process thread), and we're attaching
883 to it via attach_inferior.
884 In this case we want the process thread to stop.
885 This is handled by having linux_attach set last_resume_kind ==
886 resume_stop after we return.
887
888 If the pid we are attaching to is also the tgid, we attach to and
889 stop all the existing threads. Otherwise, we attach to pid and
890 ignore any other threads in the same group as this pid.
891
892 3) GDB is connecting to gdbserver and is requesting an enumeration of all
893 existing threads.
894 In this case we want the thread to stop.
895 FIXME: This case is currently not properly handled.
896 We should wait for the SIGSTOP but don't. Things work apparently
897 because enough time passes between when we ptrace (ATTACH) and when
898 gdb makes the next ptrace call on the thread.
899
900 On the other hand, if we are currently trying to stop all threads, we
901 should treat the new thread as if we had sent it a SIGSTOP. This works
902 because we are guaranteed that the add_lwp call above added us to the
903 end of the list, and so the new thread has not yet reached
904 wait_for_sigstop (but will). */
905 new_lwp->stop_expected = 1;
906
907 return 0;
908 }
909
910 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
911 already attached. Returns true if a new LWP is found, false
912 otherwise. */
913
914 static int
915 attach_proc_task_lwp_callback (ptid_t ptid)
916 {
917 /* Is this a new thread? */
918 if (find_thread_ptid (ptid) == NULL)
919 {
920 int lwpid = ptid_get_lwp (ptid);
921 int err;
922
923 if (debug_threads)
924 debug_printf ("Found new lwp %d\n", lwpid);
925
926 err = linux_attach_lwp (ptid);
927
928 /* Be quiet if we simply raced with the thread exiting. EPERM
929 is returned if the thread's task still exists, and is marked
930 as exited or zombie, as well as other conditions, so in that
931 case, confirm the status in /proc/PID/status. */
932 if (err == ESRCH
933 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
934 {
935 if (debug_threads)
936 {
937 debug_printf ("Cannot attach to lwp %d: "
938 "thread is gone (%d: %s)\n",
939 lwpid, err, strerror (err));
940 }
941 }
942 else if (err != 0)
943 {
944 warning (_("Cannot attach to lwp %d: %s"),
945 lwpid,
946 linux_ptrace_attach_fail_reason_string (ptid, err));
947 }
948
949 return 1;
950 }
951 return 0;
952 }
953
954 /* Attach to PID. If PID is the tgid, attach to it and all
955 of its threads. */
956
957 static int
958 linux_attach (unsigned long pid)
959 {
960 ptid_t ptid = ptid_build (pid, pid, 0);
961 int err;
962
963 /* Attach to PID. We will check for other threads
964 soon. */
965 err = linux_attach_lwp (ptid);
966 if (err != 0)
967 error ("Cannot attach to process %ld: %s",
968 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
969
970 linux_add_process (pid, 1);
971
972 if (!non_stop)
973 {
974 struct thread_info *thread;
975
976 /* Don't ignore the initial SIGSTOP if we just attached to this
977 process. It will be collected by wait shortly. */
978 thread = find_thread_ptid (ptid_build (pid, pid, 0));
979 thread->last_resume_kind = resume_stop;
980 }
981
982 /* We must attach to every LWP. If /proc is mounted, use that to
983 find them now. On the one hand, the inferior may be using raw
984 clone instead of using pthreads. On the other hand, even if it
985 is using pthreads, GDB may not be connected yet (thread_db needs
986 to do symbol lookups, through qSymbol). Also, thread_db walks
987 structures in the inferior's address space to find the list of
988 threads/LWPs, and those structures may well be corrupted. Note
989 that once thread_db is loaded, we'll still use it to list threads
990 and associate pthread info with each LWP. */
991 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
992 return 0;
993 }
994
995 struct counter
996 {
997 int pid;
998 int count;
999 };
1000
1001 static int
1002 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1003 {
1004 struct counter *counter = args;
1005
1006 if (ptid_get_pid (entry->id) == counter->pid)
1007 {
1008 if (++counter->count > 1)
1009 return 1;
1010 }
1011
1012 return 0;
1013 }
1014
1015 static int
1016 last_thread_of_process_p (int pid)
1017 {
1018 struct counter counter = { pid , 0 };
1019
1020 return (find_inferior (&all_threads,
1021 second_thread_of_pid_p, &counter) == NULL);
1022 }
1023
1024 /* Kill LWP. */
1025
1026 static void
1027 linux_kill_one_lwp (struct lwp_info *lwp)
1028 {
1029 struct thread_info *thr = get_lwp_thread (lwp);
1030 int pid = lwpid_of (thr);
1031
1032 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1033 there is no signal context, and ptrace(PTRACE_KILL) (or
1034 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1035 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1036 alternative is to kill with SIGKILL. We only need one SIGKILL
1037 per process, not one for each thread. But since we still support
1038 linuxthreads, and we also support debugging programs using raw
1039 clone without CLONE_THREAD, we send one for each thread. For
1040 years, we used PTRACE_KILL only, so we're being a bit paranoid
1041 about some old kernels where PTRACE_KILL might work better
1042 (dubious if there are any such, but that's why it's paranoia), so
1043 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1044 everywhere. */
1045
1046 errno = 0;
1047 kill_lwp (pid, SIGKILL);
1048 if (debug_threads)
1049 {
1050 int save_errno = errno;
1051
1052 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1053 target_pid_to_str (ptid_of (thr)),
1054 save_errno ? strerror (save_errno) : "OK");
1055 }
1056
1057 errno = 0;
1058 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1059 if (debug_threads)
1060 {
1061 int save_errno = errno;
1062
1063 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1064 target_pid_to_str (ptid_of (thr)),
1065 save_errno ? strerror (save_errno) : "OK");
1066 }
1067 }
1068
1069 /* Kill LWP and wait for it to die. */
1070
1071 static void
1072 kill_wait_lwp (struct lwp_info *lwp)
1073 {
1074 struct thread_info *thr = get_lwp_thread (lwp);
1075 int pid = ptid_get_pid (ptid_of (thr));
1076 int lwpid = ptid_get_lwp (ptid_of (thr));
1077 int wstat;
1078 int res;
1079
1080 if (debug_threads)
1081 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1082
1083 do
1084 {
1085 linux_kill_one_lwp (lwp);
1086
1087 /* Make sure it died. Notes:
1088
1089 - The loop is most likely unnecessary.
1090
1091 - We don't use linux_wait_for_event as that could delete lwps
1092 while we're iterating over them. We're not interested in
1093 any pending status at this point, only in making sure all
1094 wait status on the kernel side are collected until the
1095 process is reaped.
1096
1097 - We don't use __WALL here as the __WALL emulation relies on
1098 SIGCHLD, and killing a stopped process doesn't generate
1099 one, nor an exit status.
1100 */
1101 res = my_waitpid (lwpid, &wstat, 0);
1102 if (res == -1 && errno == ECHILD)
1103 res = my_waitpid (lwpid, &wstat, __WCLONE);
1104 } while (res > 0 && WIFSTOPPED (wstat));
1105
1106 /* Even if it was stopped, the child may have already disappeared.
1107 E.g., if it was killed by SIGKILL. */
1108 if (res < 0 && errno != ECHILD)
1109 perror_with_name ("kill_wait_lwp");
1110 }
1111
1112 /* Callback for `find_inferior'. Kills an lwp of a given process,
1113 except the leader. */
1114
1115 static int
1116 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1117 {
1118 struct thread_info *thread = (struct thread_info *) entry;
1119 struct lwp_info *lwp = get_thread_lwp (thread);
1120 int pid = * (int *) args;
1121
1122 if (ptid_get_pid (entry->id) != pid)
1123 return 0;
1124
1125 /* We avoid killing the first thread here, because of a Linux kernel (at
1126 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1127 the children get a chance to be reaped, it will remain a zombie
1128 forever. */
1129
1130 if (lwpid_of (thread) == pid)
1131 {
1132 if (debug_threads)
1133 debug_printf ("lkop: is last of process %s\n",
1134 target_pid_to_str (entry->id));
1135 return 0;
1136 }
1137
1138 kill_wait_lwp (lwp);
1139 return 0;
1140 }
1141
1142 static int
1143 linux_kill (int pid)
1144 {
1145 struct process_info *process;
1146 struct lwp_info *lwp;
1147
1148 process = find_process_pid (pid);
1149 if (process == NULL)
1150 return -1;
1151
1152 /* If we're killing a running inferior, make sure it is stopped
1153 first, as PTRACE_KILL will not work otherwise. */
1154 stop_all_lwps (0, NULL);
1155
1156 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1157
1158 /* See the comment in linux_kill_one_lwp. We did not kill the first
1159 thread in the list, so do so now. */
1160 lwp = find_lwp_pid (pid_to_ptid (pid));
1161
1162 if (lwp == NULL)
1163 {
1164 if (debug_threads)
1165 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1166 pid);
1167 }
1168 else
1169 kill_wait_lwp (lwp);
1170
1171 the_target->mourn (process);
1172
1173 /* Since we presently can only stop all lwps of all processes, we
1174 need to unstop lwps of other processes. */
1175 unstop_all_lwps (0, NULL);
1176 return 0;
1177 }
1178
1179 /* Get pending signal of THREAD, for detaching purposes. This is the
1180 signal the thread last stopped for, which we need to deliver to the
1181 thread when detaching, otherwise, it'd be suppressed/lost. */
1182
1183 static int
1184 get_detach_signal (struct thread_info *thread)
1185 {
1186 enum gdb_signal signo = GDB_SIGNAL_0;
1187 int status;
1188 struct lwp_info *lp = get_thread_lwp (thread);
1189
1190 if (lp->status_pending_p)
1191 status = lp->status_pending;
1192 else
1193 {
1194 /* If the thread had been suspended by gdbserver, and it stopped
1195 cleanly, then it'll have stopped with SIGSTOP. But we don't
1196 want to deliver that SIGSTOP. */
1197 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1198 || thread->last_status.value.sig == GDB_SIGNAL_0)
1199 return 0;
1200
1201 /* Otherwise, we may need to deliver the signal we
1202 intercepted. */
1203 status = lp->last_status;
1204 }
1205
1206 if (!WIFSTOPPED (status))
1207 {
1208 if (debug_threads)
1209 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1210 target_pid_to_str (ptid_of (thread)));
1211 return 0;
1212 }
1213
1214 /* Extended wait statuses aren't real SIGTRAPs. */
1215 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1216 {
1217 if (debug_threads)
1218 debug_printf ("GPS: lwp %s had stopped with extended "
1219 "status: no pending signal\n",
1220 target_pid_to_str (ptid_of (thread)));
1221 return 0;
1222 }
1223
1224 signo = gdb_signal_from_host (WSTOPSIG (status));
1225
1226 if (program_signals_p && !program_signals[signo])
1227 {
1228 if (debug_threads)
1229 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1230 target_pid_to_str (ptid_of (thread)),
1231 gdb_signal_to_string (signo));
1232 return 0;
1233 }
1234 else if (!program_signals_p
1235 /* If we have no way to know which signals GDB does not
1236 want to have passed to the program, assume
1237 SIGTRAP/SIGINT, which is GDB's default. */
1238 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1239 {
1240 if (debug_threads)
1241 debug_printf ("GPS: lwp %s had signal %s, "
1242 "but we don't know if we should pass it. "
1243 "Default to not.\n",
1244 target_pid_to_str (ptid_of (thread)),
1245 gdb_signal_to_string (signo));
1246 return 0;
1247 }
1248 else
1249 {
1250 if (debug_threads)
1251 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1252 target_pid_to_str (ptid_of (thread)),
1253 gdb_signal_to_string (signo));
1254
1255 return WSTOPSIG (status);
1256 }
1257 }
1258
1259 static int
1260 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1261 {
1262 struct thread_info *thread = (struct thread_info *) entry;
1263 struct lwp_info *lwp = get_thread_lwp (thread);
1264 int pid = * (int *) args;
1265 int sig;
1266
1267 if (ptid_get_pid (entry->id) != pid)
1268 return 0;
1269
1270 /* If there is a pending SIGSTOP, get rid of it. */
1271 if (lwp->stop_expected)
1272 {
1273 if (debug_threads)
1274 debug_printf ("Sending SIGCONT to %s\n",
1275 target_pid_to_str (ptid_of (thread)));
1276
1277 kill_lwp (lwpid_of (thread), SIGCONT);
1278 lwp->stop_expected = 0;
1279 }
1280
1281 /* Flush any pending changes to the process's registers. */
1282 regcache_invalidate_thread (thread);
1283
1284 /* Pass on any pending signal for this thread. */
1285 sig = get_detach_signal (thread);
1286
1287 /* Finally, let it resume. */
1288 if (the_low_target.prepare_to_resume != NULL)
1289 the_low_target.prepare_to_resume (lwp);
1290 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1291 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1292 error (_("Can't detach %s: %s"),
1293 target_pid_to_str (ptid_of (thread)),
1294 strerror (errno));
1295
1296 delete_lwp (lwp);
1297 return 0;
1298 }
1299
1300 static int
1301 linux_detach (int pid)
1302 {
1303 struct process_info *process;
1304
1305 process = find_process_pid (pid);
1306 if (process == NULL)
1307 return -1;
1308
1309 /* Stop all threads before detaching. First, ptrace requires that
1310 the thread is stopped to sucessfully detach. Second, thread_db
1311 may need to uninstall thread event breakpoints from memory, which
1312 only works with a stopped process anyway. */
1313 stop_all_lwps (0, NULL);
1314
1315 #ifdef USE_THREAD_DB
1316 thread_db_detach (process);
1317 #endif
1318
1319 /* Stabilize threads (move out of jump pads). */
1320 stabilize_threads ();
1321
1322 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1323
1324 the_target->mourn (process);
1325
1326 /* Since we presently can only stop all lwps of all processes, we
1327 need to unstop lwps of other processes. */
1328 unstop_all_lwps (0, NULL);
1329 return 0;
1330 }
1331
1332 /* Remove all LWPs that belong to process PROC from the lwp list. */
1333
1334 static int
1335 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1336 {
1337 struct thread_info *thread = (struct thread_info *) entry;
1338 struct lwp_info *lwp = get_thread_lwp (thread);
1339 struct process_info *process = proc;
1340
1341 if (pid_of (thread) == pid_of (process))
1342 delete_lwp (lwp);
1343
1344 return 0;
1345 }
1346
1347 static void
1348 linux_mourn (struct process_info *process)
1349 {
1350 struct process_info_private *priv;
1351
1352 #ifdef USE_THREAD_DB
1353 thread_db_mourn (process);
1354 #endif
1355
1356 find_inferior (&all_threads, delete_lwp_callback, process);
1357
1358 /* Freeing all private data. */
1359 priv = process->priv;
1360 free (priv->arch_private);
1361 free (priv);
1362 process->priv = NULL;
1363
1364 remove_process (process);
1365 }
1366
1367 static void
1368 linux_join (int pid)
1369 {
1370 int status, ret;
1371
1372 do {
1373 ret = my_waitpid (pid, &status, 0);
1374 if (WIFEXITED (status) || WIFSIGNALED (status))
1375 break;
1376 } while (ret != -1 || errno != ECHILD);
1377 }
1378
1379 /* Return nonzero if the given thread is still alive. */
1380 static int
1381 linux_thread_alive (ptid_t ptid)
1382 {
1383 struct lwp_info *lwp = find_lwp_pid (ptid);
1384
1385 /* We assume we always know if a thread exits. If a whole process
1386 exited but we still haven't been able to report it to GDB, we'll
1387 hold on to the last lwp of the dead process. */
1388 if (lwp != NULL)
1389 return !lwp->dead;
1390 else
1391 return 0;
1392 }
1393
1394 /* Return 1 if this lwp still has an interesting status pending. If
1395 not (e.g., it had stopped for a breakpoint that is gone), return
1396 false. */
1397
1398 static int
1399 thread_still_has_status_pending_p (struct thread_info *thread)
1400 {
1401 struct lwp_info *lp = get_thread_lwp (thread);
1402
1403 if (!lp->status_pending_p)
1404 return 0;
1405
1406 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1407 report any status pending the LWP may have. */
1408 if (thread->last_resume_kind == resume_stop
1409 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1410 return 0;
1411
1412 if (thread->last_resume_kind != resume_stop
1413 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1414 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1415 {
1416 struct thread_info *saved_thread;
1417 CORE_ADDR pc;
1418 int discard = 0;
1419
1420 gdb_assert (lp->last_status != 0);
1421
1422 pc = get_pc (lp);
1423
1424 saved_thread = current_thread;
1425 current_thread = thread;
1426
1427 if (pc != lp->stop_pc)
1428 {
1429 if (debug_threads)
1430 debug_printf ("PC of %ld changed\n",
1431 lwpid_of (thread));
1432 discard = 1;
1433 }
1434
1435 #if !USE_SIGTRAP_SIGINFO
1436 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1437 && !(*the_low_target.breakpoint_at) (pc))
1438 {
1439 if (debug_threads)
1440 debug_printf ("previous SW breakpoint of %ld gone\n",
1441 lwpid_of (thread));
1442 discard = 1;
1443 }
1444 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1445 && !hardware_breakpoint_inserted_here (pc))
1446 {
1447 if (debug_threads)
1448 debug_printf ("previous HW breakpoint of %ld gone\n",
1449 lwpid_of (thread));
1450 discard = 1;
1451 }
1452 #endif
1453
1454 current_thread = saved_thread;
1455
1456 if (discard)
1457 {
1458 if (debug_threads)
1459 debug_printf ("discarding pending breakpoint status\n");
1460 lp->status_pending_p = 0;
1461 return 0;
1462 }
1463 }
1464
1465 return 1;
1466 }
1467
1468 /* Return 1 if this lwp has an interesting status pending. */
1469 static int
1470 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1471 {
1472 struct thread_info *thread = (struct thread_info *) entry;
1473 struct lwp_info *lp = get_thread_lwp (thread);
1474 ptid_t ptid = * (ptid_t *) arg;
1475
1476 /* Check if we're only interested in events from a specific process
1477 or a specific LWP. */
1478 if (!ptid_match (ptid_of (thread), ptid))
1479 return 0;
1480
1481 if (lp->status_pending_p
1482 && !thread_still_has_status_pending_p (thread))
1483 {
1484 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1485 return 0;
1486 }
1487
1488 return lp->status_pending_p;
1489 }
1490
1491 static int
1492 same_lwp (struct inferior_list_entry *entry, void *data)
1493 {
1494 ptid_t ptid = *(ptid_t *) data;
1495 int lwp;
1496
1497 if (ptid_get_lwp (ptid) != 0)
1498 lwp = ptid_get_lwp (ptid);
1499 else
1500 lwp = ptid_get_pid (ptid);
1501
1502 if (ptid_get_lwp (entry->id) == lwp)
1503 return 1;
1504
1505 return 0;
1506 }
1507
1508 struct lwp_info *
1509 find_lwp_pid (ptid_t ptid)
1510 {
1511 struct inferior_list_entry *thread
1512 = find_inferior (&all_threads, same_lwp, &ptid);
1513
1514 if (thread == NULL)
1515 return NULL;
1516
1517 return get_thread_lwp ((struct thread_info *) thread);
1518 }
1519
1520 /* Return the number of known LWPs in the tgid given by PID. */
1521
1522 static int
1523 num_lwps (int pid)
1524 {
1525 struct inferior_list_entry *inf, *tmp;
1526 int count = 0;
1527
1528 ALL_INFERIORS (&all_threads, inf, tmp)
1529 {
1530 if (ptid_get_pid (inf->id) == pid)
1531 count++;
1532 }
1533
1534 return count;
1535 }
1536
1537 /* The arguments passed to iterate_over_lwps. */
1538
1539 struct iterate_over_lwps_args
1540 {
1541 /* The FILTER argument passed to iterate_over_lwps. */
1542 ptid_t filter;
1543
1544 /* The CALLBACK argument passed to iterate_over_lwps. */
1545 iterate_over_lwps_ftype *callback;
1546
1547 /* The DATA argument passed to iterate_over_lwps. */
1548 void *data;
1549 };
1550
1551 /* Callback for find_inferior used by iterate_over_lwps to filter
1552 calls to the callback supplied to that function. Returning a
1553 nonzero value causes find_inferiors to stop iterating and return
1554 the current inferior_list_entry. Returning zero indicates that
1555 find_inferiors should continue iterating. */
1556
1557 static int
1558 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1559 {
1560 struct iterate_over_lwps_args *args
1561 = (struct iterate_over_lwps_args *) args_p;
1562
1563 if (ptid_match (entry->id, args->filter))
1564 {
1565 struct thread_info *thr = (struct thread_info *) entry;
1566 struct lwp_info *lwp = get_thread_lwp (thr);
1567
1568 return (*args->callback) (lwp, args->data);
1569 }
1570
1571 return 0;
1572 }
1573
1574 /* See nat/linux-nat.h. */
1575
1576 struct lwp_info *
1577 iterate_over_lwps (ptid_t filter,
1578 iterate_over_lwps_ftype callback,
1579 void *data)
1580 {
1581 struct iterate_over_lwps_args args = {filter, callback, data};
1582 struct inferior_list_entry *entry;
1583
1584 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1585 if (entry == NULL)
1586 return NULL;
1587
1588 return get_thread_lwp ((struct thread_info *) entry);
1589 }
1590
1591 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1592 their exits until all other threads in the group have exited. */
1593
1594 static void
1595 check_zombie_leaders (void)
1596 {
1597 struct process_info *proc, *tmp;
1598
1599 ALL_PROCESSES (proc, tmp)
1600 {
1601 pid_t leader_pid = pid_of (proc);
1602 struct lwp_info *leader_lp;
1603
1604 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1605
1606 if (debug_threads)
1607 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1608 "num_lwps=%d, zombie=%d\n",
1609 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1610 linux_proc_pid_is_zombie (leader_pid));
1611
1612 if (leader_lp != NULL
1613 /* Check if there are other threads in the group, as we may
1614 have raced with the inferior simply exiting. */
1615 && !last_thread_of_process_p (leader_pid)
1616 && linux_proc_pid_is_zombie (leader_pid))
1617 {
1618 /* A leader zombie can mean one of two things:
1619
1620 - It exited, and there's an exit status pending
1621 available, or only the leader exited (not the whole
1622 program). In the latter case, we can't waitpid the
1623 leader's exit status until all other threads are gone.
1624
1625 - There are 3 or more threads in the group, and a thread
1626 other than the leader exec'd. On an exec, the Linux
1627 kernel destroys all other threads (except the execing
1628 one) in the thread group, and resets the execing thread's
1629 tid to the tgid. No exit notification is sent for the
1630 execing thread -- from the ptracer's perspective, it
1631 appears as though the execing thread just vanishes.
1632 Until we reap all other threads except the leader and the
1633 execing thread, the leader will be zombie, and the
1634 execing thread will be in `D (disc sleep)'. As soon as
1635 all other threads are reaped, the execing thread changes
1636 it's tid to the tgid, and the previous (zombie) leader
1637 vanishes, giving place to the "new" leader. We could try
1638 distinguishing the exit and exec cases, by waiting once
1639 more, and seeing if something comes out, but it doesn't
1640 sound useful. The previous leader _does_ go away, and
1641 we'll re-add the new one once we see the exec event
1642 (which is just the same as what would happen if the
1643 previous leader did exit voluntarily before some other
1644 thread execs). */
1645
1646 if (debug_threads)
1647 fprintf (stderr,
1648 "CZL: Thread group leader %d zombie "
1649 "(it exited, or another thread execd).\n",
1650 leader_pid);
1651
1652 delete_lwp (leader_lp);
1653 }
1654 }
1655 }
1656
1657 /* Callback for `find_inferior'. Returns the first LWP that is not
1658 stopped. ARG is a PTID filter. */
1659
1660 static int
1661 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1662 {
1663 struct thread_info *thr = (struct thread_info *) entry;
1664 struct lwp_info *lwp;
1665 ptid_t filter = *(ptid_t *) arg;
1666
1667 if (!ptid_match (ptid_of (thr), filter))
1668 return 0;
1669
1670 lwp = get_thread_lwp (thr);
1671 if (!lwp->stopped)
1672 return 1;
1673
1674 return 0;
1675 }
1676
1677 /* This function should only be called if the LWP got a SIGTRAP.
1678
1679 Handle any tracepoint steps or hits. Return true if a tracepoint
1680 event was handled, 0 otherwise. */
1681
1682 static int
1683 handle_tracepoints (struct lwp_info *lwp)
1684 {
1685 struct thread_info *tinfo = get_lwp_thread (lwp);
1686 int tpoint_related_event = 0;
1687
1688 gdb_assert (lwp->suspended == 0);
1689
1690 /* If this tracepoint hit causes a tracing stop, we'll immediately
1691 uninsert tracepoints. To do this, we temporarily pause all
1692 threads, unpatch away, and then unpause threads. We need to make
1693 sure the unpausing doesn't resume LWP too. */
1694 lwp->suspended++;
1695
1696 /* And we need to be sure that any all-threads-stopping doesn't try
1697 to move threads out of the jump pads, as it could deadlock the
1698 inferior (LWP could be in the jump pad, maybe even holding the
1699 lock.) */
1700
1701 /* Do any necessary step collect actions. */
1702 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1703
1704 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1705
1706 /* See if we just hit a tracepoint and do its main collect
1707 actions. */
1708 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1709
1710 lwp->suspended--;
1711
1712 gdb_assert (lwp->suspended == 0);
1713 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1714
1715 if (tpoint_related_event)
1716 {
1717 if (debug_threads)
1718 debug_printf ("got a tracepoint event\n");
1719 return 1;
1720 }
1721
1722 return 0;
1723 }
1724
1725 /* Convenience wrapper. Returns true if LWP is presently collecting a
1726 fast tracepoint. */
1727
1728 static int
1729 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1730 struct fast_tpoint_collect_status *status)
1731 {
1732 CORE_ADDR thread_area;
1733 struct thread_info *thread = get_lwp_thread (lwp);
1734
1735 if (the_low_target.get_thread_area == NULL)
1736 return 0;
1737
1738 /* Get the thread area address. This is used to recognize which
1739 thread is which when tracing with the in-process agent library.
1740 We don't read anything from the address, and treat it as opaque;
1741 it's the address itself that we assume is unique per-thread. */
1742 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1743 return 0;
1744
1745 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1746 }
1747
1748 /* The reason we resume in the caller, is because we want to be able
1749 to pass lwp->status_pending as WSTAT, and we need to clear
1750 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1751 refuses to resume. */
1752
1753 static int
1754 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1755 {
1756 struct thread_info *saved_thread;
1757
1758 saved_thread = current_thread;
1759 current_thread = get_lwp_thread (lwp);
1760
1761 if ((wstat == NULL
1762 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1763 && supports_fast_tracepoints ()
1764 && agent_loaded_p ())
1765 {
1766 struct fast_tpoint_collect_status status;
1767 int r;
1768
1769 if (debug_threads)
1770 debug_printf ("Checking whether LWP %ld needs to move out of the "
1771 "jump pad.\n",
1772 lwpid_of (current_thread));
1773
1774 r = linux_fast_tracepoint_collecting (lwp, &status);
1775
1776 if (wstat == NULL
1777 || (WSTOPSIG (*wstat) != SIGILL
1778 && WSTOPSIG (*wstat) != SIGFPE
1779 && WSTOPSIG (*wstat) != SIGSEGV
1780 && WSTOPSIG (*wstat) != SIGBUS))
1781 {
1782 lwp->collecting_fast_tracepoint = r;
1783
1784 if (r != 0)
1785 {
1786 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1787 {
1788 /* Haven't executed the original instruction yet.
1789 Set breakpoint there, and wait till it's hit,
1790 then single-step until exiting the jump pad. */
1791 lwp->exit_jump_pad_bkpt
1792 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1793 }
1794
1795 if (debug_threads)
1796 debug_printf ("Checking whether LWP %ld needs to move out of "
1797 "the jump pad...it does\n",
1798 lwpid_of (current_thread));
1799 current_thread = saved_thread;
1800
1801 return 1;
1802 }
1803 }
1804 else
1805 {
1806 /* If we get a synchronous signal while collecting, *and*
1807 while executing the (relocated) original instruction,
1808 reset the PC to point at the tpoint address, before
1809 reporting to GDB. Otherwise, it's an IPA lib bug: just
1810 report the signal to GDB, and pray for the best. */
1811
1812 lwp->collecting_fast_tracepoint = 0;
1813
1814 if (r != 0
1815 && (status.adjusted_insn_addr <= lwp->stop_pc
1816 && lwp->stop_pc < status.adjusted_insn_addr_end))
1817 {
1818 siginfo_t info;
1819 struct regcache *regcache;
1820
1821 /* The si_addr on a few signals references the address
1822 of the faulting instruction. Adjust that as
1823 well. */
1824 if ((WSTOPSIG (*wstat) == SIGILL
1825 || WSTOPSIG (*wstat) == SIGFPE
1826 || WSTOPSIG (*wstat) == SIGBUS
1827 || WSTOPSIG (*wstat) == SIGSEGV)
1828 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1829 (PTRACE_TYPE_ARG3) 0, &info) == 0
1830 /* Final check just to make sure we don't clobber
1831 the siginfo of non-kernel-sent signals. */
1832 && (uintptr_t) info.si_addr == lwp->stop_pc)
1833 {
1834 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1835 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1836 (PTRACE_TYPE_ARG3) 0, &info);
1837 }
1838
1839 regcache = get_thread_regcache (current_thread, 1);
1840 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1841 lwp->stop_pc = status.tpoint_addr;
1842
1843 /* Cancel any fast tracepoint lock this thread was
1844 holding. */
1845 force_unlock_trace_buffer ();
1846 }
1847
1848 if (lwp->exit_jump_pad_bkpt != NULL)
1849 {
1850 if (debug_threads)
1851 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1852 "stopping all threads momentarily.\n");
1853
1854 stop_all_lwps (1, lwp);
1855
1856 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1857 lwp->exit_jump_pad_bkpt = NULL;
1858
1859 unstop_all_lwps (1, lwp);
1860
1861 gdb_assert (lwp->suspended >= 0);
1862 }
1863 }
1864 }
1865
1866 if (debug_threads)
1867 debug_printf ("Checking whether LWP %ld needs to move out of the "
1868 "jump pad...no\n",
1869 lwpid_of (current_thread));
1870
1871 current_thread = saved_thread;
1872 return 0;
1873 }
1874
1875 /* Enqueue one signal in the "signals to report later when out of the
1876 jump pad" list. */
1877
1878 static void
1879 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1880 {
1881 struct pending_signals *p_sig;
1882 struct thread_info *thread = get_lwp_thread (lwp);
1883
1884 if (debug_threads)
1885 debug_printf ("Deferring signal %d for LWP %ld.\n",
1886 WSTOPSIG (*wstat), lwpid_of (thread));
1887
1888 if (debug_threads)
1889 {
1890 struct pending_signals *sig;
1891
1892 for (sig = lwp->pending_signals_to_report;
1893 sig != NULL;
1894 sig = sig->prev)
1895 debug_printf (" Already queued %d\n",
1896 sig->signal);
1897
1898 debug_printf (" (no more currently queued signals)\n");
1899 }
1900
1901 /* Don't enqueue non-RT signals if they are already in the deferred
1902 queue. (SIGSTOP being the easiest signal to see ending up here
1903 twice) */
1904 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1905 {
1906 struct pending_signals *sig;
1907
1908 for (sig = lwp->pending_signals_to_report;
1909 sig != NULL;
1910 sig = sig->prev)
1911 {
1912 if (sig->signal == WSTOPSIG (*wstat))
1913 {
1914 if (debug_threads)
1915 debug_printf ("Not requeuing already queued non-RT signal %d"
1916 " for LWP %ld\n",
1917 sig->signal,
1918 lwpid_of (thread));
1919 return;
1920 }
1921 }
1922 }
1923
1924 p_sig = xmalloc (sizeof (*p_sig));
1925 p_sig->prev = lwp->pending_signals_to_report;
1926 p_sig->signal = WSTOPSIG (*wstat);
1927 memset (&p_sig->info, 0, sizeof (siginfo_t));
1928 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1929 &p_sig->info);
1930
1931 lwp->pending_signals_to_report = p_sig;
1932 }
1933
1934 /* Dequeue one signal from the "signals to report later when out of
1935 the jump pad" list. */
1936
1937 static int
1938 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1939 {
1940 struct thread_info *thread = get_lwp_thread (lwp);
1941
1942 if (lwp->pending_signals_to_report != NULL)
1943 {
1944 struct pending_signals **p_sig;
1945
1946 p_sig = &lwp->pending_signals_to_report;
1947 while ((*p_sig)->prev != NULL)
1948 p_sig = &(*p_sig)->prev;
1949
1950 *wstat = W_STOPCODE ((*p_sig)->signal);
1951 if ((*p_sig)->info.si_signo != 0)
1952 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1953 &(*p_sig)->info);
1954 free (*p_sig);
1955 *p_sig = NULL;
1956
1957 if (debug_threads)
1958 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1959 WSTOPSIG (*wstat), lwpid_of (thread));
1960
1961 if (debug_threads)
1962 {
1963 struct pending_signals *sig;
1964
1965 for (sig = lwp->pending_signals_to_report;
1966 sig != NULL;
1967 sig = sig->prev)
1968 debug_printf (" Still queued %d\n",
1969 sig->signal);
1970
1971 debug_printf (" (no more queued signals)\n");
1972 }
1973
1974 return 1;
1975 }
1976
1977 return 0;
1978 }
1979
1980 /* Fetch the possibly triggered data watchpoint info and store it in
1981 CHILD.
1982
1983 On some archs, like x86, that use debug registers to set
1984 watchpoints, it's possible that the way to know which watched
1985 address trapped, is to check the register that is used to select
1986 which address to watch. Problem is, between setting the watchpoint
1987 and reading back which data address trapped, the user may change
1988 the set of watchpoints, and, as a consequence, GDB changes the
1989 debug registers in the inferior. To avoid reading back a stale
1990 stopped-data-address when that happens, we cache in LP the fact
1991 that a watchpoint trapped, and the corresponding data address, as
1992 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1993 registers meanwhile, we have the cached data we can rely on. */
1994
1995 static int
1996 check_stopped_by_watchpoint (struct lwp_info *child)
1997 {
1998 if (the_low_target.stopped_by_watchpoint != NULL)
1999 {
2000 struct thread_info *saved_thread;
2001
2002 saved_thread = current_thread;
2003 current_thread = get_lwp_thread (child);
2004
2005 if (the_low_target.stopped_by_watchpoint ())
2006 {
2007 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2008
2009 if (the_low_target.stopped_data_address != NULL)
2010 child->stopped_data_address
2011 = the_low_target.stopped_data_address ();
2012 else
2013 child->stopped_data_address = 0;
2014 }
2015
2016 current_thread = saved_thread;
2017 }
2018
2019 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2020 }
2021
2022 /* Return the ptrace options that we want to try to enable. */
2023
2024 static int
2025 linux_low_ptrace_options (int attached)
2026 {
2027 int options = 0;
2028
2029 if (!attached)
2030 options |= PTRACE_O_EXITKILL;
2031
2032 if (report_fork_events)
2033 options |= PTRACE_O_TRACEFORK;
2034
2035 if (report_vfork_events)
2036 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2037
2038 return options;
2039 }
2040
2041 /* Do low-level handling of the event, and check if we should go on
2042 and pass it to caller code. Return the affected lwp if we are, or
2043 NULL otherwise. */
2044
2045 static struct lwp_info *
2046 linux_low_filter_event (int lwpid, int wstat)
2047 {
2048 struct lwp_info *child;
2049 struct thread_info *thread;
2050 int have_stop_pc = 0;
2051
2052 child = find_lwp_pid (pid_to_ptid (lwpid));
2053
2054 /* If we didn't find a process, one of two things presumably happened:
2055 - A process we started and then detached from has exited. Ignore it.
2056 - A process we are controlling has forked and the new child's stop
2057 was reported to us by the kernel. Save its PID. */
2058 if (child == NULL && WIFSTOPPED (wstat))
2059 {
2060 add_to_pid_list (&stopped_pids, lwpid, wstat);
2061 return NULL;
2062 }
2063 else if (child == NULL)
2064 return NULL;
2065
2066 thread = get_lwp_thread (child);
2067
2068 child->stopped = 1;
2069
2070 child->last_status = wstat;
2071
2072 /* Check if the thread has exited. */
2073 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2074 {
2075 if (debug_threads)
2076 debug_printf ("LLFE: %d exited.\n", lwpid);
2077 if (num_lwps (pid_of (thread)) > 1)
2078 {
2079
2080 /* If there is at least one more LWP, then the exit signal was
2081 not the end of the debugged application and should be
2082 ignored. */
2083 delete_lwp (child);
2084 return NULL;
2085 }
2086 else
2087 {
2088 /* This was the last lwp in the process. Since events are
2089 serialized to GDB core, and we can't report this one
2090 right now, but GDB core and the other target layers will
2091 want to be notified about the exit code/signal, leave the
2092 status pending for the next time we're able to report
2093 it. */
2094 mark_lwp_dead (child, wstat);
2095 return child;
2096 }
2097 }
2098
2099 gdb_assert (WIFSTOPPED (wstat));
2100
2101 if (WIFSTOPPED (wstat))
2102 {
2103 struct process_info *proc;
2104
2105 /* Architecture-specific setup after inferior is running. This
2106 needs to happen after we have attached to the inferior and it
2107 is stopped for the first time, but before we access any
2108 inferior registers. */
2109 proc = find_process_pid (pid_of (thread));
2110 if (proc->priv->new_inferior)
2111 {
2112 struct thread_info *saved_thread;
2113
2114 saved_thread = current_thread;
2115 current_thread = thread;
2116
2117 the_low_target.arch_setup ();
2118
2119 current_thread = saved_thread;
2120
2121 proc->priv->new_inferior = 0;
2122 }
2123 }
2124
2125 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2126 {
2127 struct process_info *proc = find_process_pid (pid_of (thread));
2128 int options = linux_low_ptrace_options (proc->attached);
2129
2130 linux_enable_event_reporting (lwpid, options);
2131 child->must_set_ptrace_flags = 0;
2132 }
2133
2134 /* Be careful to not overwrite stop_pc until
2135 check_stopped_by_breakpoint is called. */
2136 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2137 && linux_is_extended_waitstatus (wstat))
2138 {
2139 child->stop_pc = get_pc (child);
2140 if (handle_extended_wait (child, wstat))
2141 {
2142 /* The event has been handled, so just return without
2143 reporting it. */
2144 return NULL;
2145 }
2146 }
2147
2148 /* Check first whether this was a SW/HW breakpoint before checking
2149 watchpoints, because at least s390 can't tell the data address of
2150 hardware watchpoint hits, and returns stopped-by-watchpoint as
2151 long as there's a watchpoint set. */
2152 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2153 {
2154 if (check_stopped_by_breakpoint (child))
2155 have_stop_pc = 1;
2156 }
2157
2158 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2159 or hardware watchpoint. Check which is which if we got
2160 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2161 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2162 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2163 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2164 check_stopped_by_watchpoint (child);
2165
2166 if (!have_stop_pc)
2167 child->stop_pc = get_pc (child);
2168
2169 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2170 && child->stop_expected)
2171 {
2172 if (debug_threads)
2173 debug_printf ("Expected stop.\n");
2174 child->stop_expected = 0;
2175
2176 if (thread->last_resume_kind == resume_stop)
2177 {
2178 /* We want to report the stop to the core. Treat the
2179 SIGSTOP as a normal event. */
2180 if (debug_threads)
2181 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2182 target_pid_to_str (ptid_of (thread)));
2183 }
2184 else if (stopping_threads != NOT_STOPPING_THREADS)
2185 {
2186 /* Stopping threads. We don't want this SIGSTOP to end up
2187 pending. */
2188 if (debug_threads)
2189 debug_printf ("LLW: SIGSTOP caught for %s "
2190 "while stopping threads.\n",
2191 target_pid_to_str (ptid_of (thread)));
2192 return NULL;
2193 }
2194 else
2195 {
2196 /* This is a delayed SIGSTOP. Filter out the event. */
2197 if (debug_threads)
2198 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2199 child->stepping ? "step" : "continue",
2200 target_pid_to_str (ptid_of (thread)));
2201
2202 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2203 return NULL;
2204 }
2205 }
2206
2207 child->status_pending_p = 1;
2208 child->status_pending = wstat;
2209 return child;
2210 }
2211
2212 /* Resume LWPs that are currently stopped without any pending status
2213 to report, but are resumed from the core's perspective. */
2214
2215 static void
2216 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2217 {
2218 struct thread_info *thread = (struct thread_info *) entry;
2219 struct lwp_info *lp = get_thread_lwp (thread);
2220
2221 if (lp->stopped
2222 && !lp->status_pending_p
2223 && thread->last_resume_kind != resume_stop
2224 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2225 {
2226 int step = thread->last_resume_kind == resume_step;
2227
2228 if (debug_threads)
2229 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2230 target_pid_to_str (ptid_of (thread)),
2231 paddress (lp->stop_pc),
2232 step);
2233
2234 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2235 }
2236 }
2237
2238 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2239 match FILTER_PTID (leaving others pending). The PTIDs can be:
2240 minus_one_ptid, to specify any child; a pid PTID, specifying all
2241 lwps of a thread group; or a PTID representing a single lwp. Store
2242 the stop status through the status pointer WSTAT. OPTIONS is
2243 passed to the waitpid call. Return 0 if no event was found and
2244 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2245 was found. Return the PID of the stopped child otherwise. */
2246
2247 static int
2248 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2249 int *wstatp, int options)
2250 {
2251 struct thread_info *event_thread;
2252 struct lwp_info *event_child, *requested_child;
2253 sigset_t block_mask, prev_mask;
2254
2255 retry:
2256 /* N.B. event_thread points to the thread_info struct that contains
2257 event_child. Keep them in sync. */
2258 event_thread = NULL;
2259 event_child = NULL;
2260 requested_child = NULL;
2261
2262 /* Check for a lwp with a pending status. */
2263
2264 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2265 {
2266 event_thread = (struct thread_info *)
2267 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2268 if (event_thread != NULL)
2269 event_child = get_thread_lwp (event_thread);
2270 if (debug_threads && event_thread)
2271 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2272 }
2273 else if (!ptid_equal (filter_ptid, null_ptid))
2274 {
2275 requested_child = find_lwp_pid (filter_ptid);
2276
2277 if (stopping_threads == NOT_STOPPING_THREADS
2278 && requested_child->status_pending_p
2279 && requested_child->collecting_fast_tracepoint)
2280 {
2281 enqueue_one_deferred_signal (requested_child,
2282 &requested_child->status_pending);
2283 requested_child->status_pending_p = 0;
2284 requested_child->status_pending = 0;
2285 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2286 }
2287
2288 if (requested_child->suspended
2289 && requested_child->status_pending_p)
2290 {
2291 internal_error (__FILE__, __LINE__,
2292 "requesting an event out of a"
2293 " suspended child?");
2294 }
2295
2296 if (requested_child->status_pending_p)
2297 {
2298 event_child = requested_child;
2299 event_thread = get_lwp_thread (event_child);
2300 }
2301 }
2302
2303 if (event_child != NULL)
2304 {
2305 if (debug_threads)
2306 debug_printf ("Got an event from pending child %ld (%04x)\n",
2307 lwpid_of (event_thread), event_child->status_pending);
2308 *wstatp = event_child->status_pending;
2309 event_child->status_pending_p = 0;
2310 event_child->status_pending = 0;
2311 current_thread = event_thread;
2312 return lwpid_of (event_thread);
2313 }
2314
2315 /* But if we don't find a pending event, we'll have to wait.
2316
2317 We only enter this loop if no process has a pending wait status.
2318 Thus any action taken in response to a wait status inside this
2319 loop is responding as soon as we detect the status, not after any
2320 pending events. */
2321
2322 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2323 all signals while here. */
2324 sigfillset (&block_mask);
2325 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2326
2327 /* Always pull all events out of the kernel. We'll randomly select
2328 an event LWP out of all that have events, to prevent
2329 starvation. */
2330 while (event_child == NULL)
2331 {
2332 pid_t ret = 0;
2333
2334 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2335 quirks:
2336
2337 - If the thread group leader exits while other threads in the
2338 thread group still exist, waitpid(TGID, ...) hangs. That
2339 waitpid won't return an exit status until the other threads
2340 in the group are reaped.
2341
2342 - When a non-leader thread execs, that thread just vanishes
2343 without reporting an exit (so we'd hang if we waited for it
2344 explicitly in that case). The exec event is reported to
2345 the TGID pid (although we don't currently enable exec
2346 events). */
2347 errno = 0;
2348 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2349
2350 if (debug_threads)
2351 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2352 ret, errno ? strerror (errno) : "ERRNO-OK");
2353
2354 if (ret > 0)
2355 {
2356 if (debug_threads)
2357 {
2358 debug_printf ("LLW: waitpid %ld received %s\n",
2359 (long) ret, status_to_str (*wstatp));
2360 }
2361
2362 /* Filter all events. IOW, leave all events pending. We'll
2363 randomly select an event LWP out of all that have events
2364 below. */
2365 linux_low_filter_event (ret, *wstatp);
2366 /* Retry until nothing comes out of waitpid. A single
2367 SIGCHLD can indicate more than one child stopped. */
2368 continue;
2369 }
2370
2371 /* Now that we've pulled all events out of the kernel, resume
2372 LWPs that don't have an interesting event to report. */
2373 if (stopping_threads == NOT_STOPPING_THREADS)
2374 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2375
2376 /* ... and find an LWP with a status to report to the core, if
2377 any. */
2378 event_thread = (struct thread_info *)
2379 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2380 if (event_thread != NULL)
2381 {
2382 event_child = get_thread_lwp (event_thread);
2383 *wstatp = event_child->status_pending;
2384 event_child->status_pending_p = 0;
2385 event_child->status_pending = 0;
2386 break;
2387 }
2388
2389 /* Check for zombie thread group leaders. Those can't be reaped
2390 until all other threads in the thread group are. */
2391 check_zombie_leaders ();
2392
2393 /* If there are no resumed children left in the set of LWPs we
2394 want to wait for, bail. We can't just block in
2395 waitpid/sigsuspend, because lwps might have been left stopped
2396 in trace-stop state, and we'd be stuck forever waiting for
2397 their status to change (which would only happen if we resumed
2398 them). Even if WNOHANG is set, this return code is preferred
2399 over 0 (below), as it is more detailed. */
2400 if ((find_inferior (&all_threads,
2401 not_stopped_callback,
2402 &wait_ptid) == NULL))
2403 {
2404 if (debug_threads)
2405 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2406 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2407 return -1;
2408 }
2409
2410 /* No interesting event to report to the caller. */
2411 if ((options & WNOHANG))
2412 {
2413 if (debug_threads)
2414 debug_printf ("WNOHANG set, no event found\n");
2415
2416 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2417 return 0;
2418 }
2419
2420 /* Block until we get an event reported with SIGCHLD. */
2421 if (debug_threads)
2422 debug_printf ("sigsuspend'ing\n");
2423
2424 sigsuspend (&prev_mask);
2425 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2426 goto retry;
2427 }
2428
2429 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2430
2431 current_thread = event_thread;
2432
2433 /* Check for thread exit. */
2434 if (! WIFSTOPPED (*wstatp))
2435 {
2436 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2437
2438 if (debug_threads)
2439 debug_printf ("LWP %d is the last lwp of process. "
2440 "Process %ld exiting.\n",
2441 pid_of (event_thread), lwpid_of (event_thread));
2442 return lwpid_of (event_thread);
2443 }
2444
2445 return lwpid_of (event_thread);
2446 }
2447
2448 /* Wait for an event from child(ren) PTID. PTIDs can be:
2449 minus_one_ptid, to specify any child; a pid PTID, specifying all
2450 lwps of a thread group; or a PTID representing a single lwp. Store
2451 the stop status through the status pointer WSTAT. OPTIONS is
2452 passed to the waitpid call. Return 0 if no event was found and
2453 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2454 was found. Return the PID of the stopped child otherwise. */
2455
2456 static int
2457 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2458 {
2459 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2460 }
2461
2462 /* Count the LWP's that have had events. */
2463
2464 static int
2465 count_events_callback (struct inferior_list_entry *entry, void *data)
2466 {
2467 struct thread_info *thread = (struct thread_info *) entry;
2468 struct lwp_info *lp = get_thread_lwp (thread);
2469 int *count = data;
2470
2471 gdb_assert (count != NULL);
2472
2473 /* Count only resumed LWPs that have an event pending. */
2474 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2475 && lp->status_pending_p)
2476 (*count)++;
2477
2478 return 0;
2479 }
2480
2481 /* Select the LWP (if any) that is currently being single-stepped. */
2482
2483 static int
2484 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2485 {
2486 struct thread_info *thread = (struct thread_info *) entry;
2487 struct lwp_info *lp = get_thread_lwp (thread);
2488
2489 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2490 && thread->last_resume_kind == resume_step
2491 && lp->status_pending_p)
2492 return 1;
2493 else
2494 return 0;
2495 }
2496
2497 /* Select the Nth LWP that has had an event. */
2498
2499 static int
2500 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2501 {
2502 struct thread_info *thread = (struct thread_info *) entry;
2503 struct lwp_info *lp = get_thread_lwp (thread);
2504 int *selector = data;
2505
2506 gdb_assert (selector != NULL);
2507
2508 /* Select only resumed LWPs that have an event pending. */
2509 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2510 && lp->status_pending_p)
2511 if ((*selector)-- == 0)
2512 return 1;
2513
2514 return 0;
2515 }
2516
2517 /* Select one LWP out of those that have events pending. */
2518
2519 static void
2520 select_event_lwp (struct lwp_info **orig_lp)
2521 {
2522 int num_events = 0;
2523 int random_selector;
2524 struct thread_info *event_thread = NULL;
2525
2526 /* In all-stop, give preference to the LWP that is being
2527 single-stepped. There will be at most one, and it's the LWP that
2528 the core is most interested in. If we didn't do this, then we'd
2529 have to handle pending step SIGTRAPs somehow in case the core
2530 later continues the previously-stepped thread, otherwise we'd
2531 report the pending SIGTRAP, and the core, not having stepped the
2532 thread, wouldn't understand what the trap was for, and therefore
2533 would report it to the user as a random signal. */
2534 if (!non_stop)
2535 {
2536 event_thread
2537 = (struct thread_info *) find_inferior (&all_threads,
2538 select_singlestep_lwp_callback,
2539 NULL);
2540 if (event_thread != NULL)
2541 {
2542 if (debug_threads)
2543 debug_printf ("SEL: Select single-step %s\n",
2544 target_pid_to_str (ptid_of (event_thread)));
2545 }
2546 }
2547 if (event_thread == NULL)
2548 {
2549 /* No single-stepping LWP. Select one at random, out of those
2550 which have had events. */
2551
2552 /* First see how many events we have. */
2553 find_inferior (&all_threads, count_events_callback, &num_events);
2554 gdb_assert (num_events > 0);
2555
2556 /* Now randomly pick a LWP out of those that have had
2557 events. */
2558 random_selector = (int)
2559 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2560
2561 if (debug_threads && num_events > 1)
2562 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2563 num_events, random_selector);
2564
2565 event_thread
2566 = (struct thread_info *) find_inferior (&all_threads,
2567 select_event_lwp_callback,
2568 &random_selector);
2569 }
2570
2571 if (event_thread != NULL)
2572 {
2573 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2574
2575 /* Switch the event LWP. */
2576 *orig_lp = event_lp;
2577 }
2578 }
2579
2580 /* Decrement the suspend count of an LWP. */
2581
2582 static int
2583 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2584 {
2585 struct thread_info *thread = (struct thread_info *) entry;
2586 struct lwp_info *lwp = get_thread_lwp (thread);
2587
2588 /* Ignore EXCEPT. */
2589 if (lwp == except)
2590 return 0;
2591
2592 lwp->suspended--;
2593
2594 gdb_assert (lwp->suspended >= 0);
2595 return 0;
2596 }
2597
2598 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2599 NULL. */
2600
2601 static void
2602 unsuspend_all_lwps (struct lwp_info *except)
2603 {
2604 find_inferior (&all_threads, unsuspend_one_lwp, except);
2605 }
2606
2607 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2608 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2609 void *data);
2610 static int lwp_running (struct inferior_list_entry *entry, void *data);
2611 static ptid_t linux_wait_1 (ptid_t ptid,
2612 struct target_waitstatus *ourstatus,
2613 int target_options);
2614
2615 /* Stabilize threads (move out of jump pads).
2616
2617 If a thread is midway collecting a fast tracepoint, we need to
2618 finish the collection and move it out of the jump pad before
2619 reporting the signal.
2620
2621 This avoids recursion while collecting (when a signal arrives
2622 midway, and the signal handler itself collects), which would trash
2623 the trace buffer. In case the user set a breakpoint in a signal
2624 handler, this avoids the backtrace showing the jump pad, etc..
2625 Most importantly, there are certain things we can't do safely if
2626 threads are stopped in a jump pad (or in its callee's). For
2627 example:
2628
2629 - starting a new trace run. A thread still collecting the
2630 previous run, could trash the trace buffer when resumed. The trace
2631 buffer control structures would have been reset but the thread had
2632 no way to tell. The thread could even midway memcpy'ing to the
2633 buffer, which would mean that when resumed, it would clobber the
2634 trace buffer that had been set for a new run.
2635
2636 - we can't rewrite/reuse the jump pads for new tracepoints
2637 safely. Say you do tstart while a thread is stopped midway while
2638 collecting. When the thread is later resumed, it finishes the
2639 collection, and returns to the jump pad, to execute the original
2640 instruction that was under the tracepoint jump at the time the
2641 older run had been started. If the jump pad had been rewritten
2642 since for something else in the new run, the thread would now
2643 execute the wrong / random instructions. */
2644
2645 static void
2646 linux_stabilize_threads (void)
2647 {
2648 struct thread_info *saved_thread;
2649 struct thread_info *thread_stuck;
2650
2651 thread_stuck
2652 = (struct thread_info *) find_inferior (&all_threads,
2653 stuck_in_jump_pad_callback,
2654 NULL);
2655 if (thread_stuck != NULL)
2656 {
2657 if (debug_threads)
2658 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2659 lwpid_of (thread_stuck));
2660 return;
2661 }
2662
2663 saved_thread = current_thread;
2664
2665 stabilizing_threads = 1;
2666
2667 /* Kick 'em all. */
2668 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2669
2670 /* Loop until all are stopped out of the jump pads. */
2671 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2672 {
2673 struct target_waitstatus ourstatus;
2674 struct lwp_info *lwp;
2675 int wstat;
2676
2677 /* Note that we go through the full wait even loop. While
2678 moving threads out of jump pad, we need to be able to step
2679 over internal breakpoints and such. */
2680 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2681
2682 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2683 {
2684 lwp = get_thread_lwp (current_thread);
2685
2686 /* Lock it. */
2687 lwp->suspended++;
2688
2689 if (ourstatus.value.sig != GDB_SIGNAL_0
2690 || current_thread->last_resume_kind == resume_stop)
2691 {
2692 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2693 enqueue_one_deferred_signal (lwp, &wstat);
2694 }
2695 }
2696 }
2697
2698 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2699
2700 stabilizing_threads = 0;
2701
2702 current_thread = saved_thread;
2703
2704 if (debug_threads)
2705 {
2706 thread_stuck
2707 = (struct thread_info *) find_inferior (&all_threads,
2708 stuck_in_jump_pad_callback,
2709 NULL);
2710 if (thread_stuck != NULL)
2711 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2712 lwpid_of (thread_stuck));
2713 }
2714 }
2715
2716 static void async_file_mark (void);
2717
2718 /* Convenience function that is called when the kernel reports an
2719 event that is not passed out to GDB. */
2720
2721 static ptid_t
2722 ignore_event (struct target_waitstatus *ourstatus)
2723 {
2724 /* If we got an event, there may still be others, as a single
2725 SIGCHLD can indicate more than one child stopped. This forces
2726 another target_wait call. */
2727 async_file_mark ();
2728
2729 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2730 return null_ptid;
2731 }
2732
2733 /* Return non-zero if WAITSTATUS reflects an extended linux
2734 event. Otherwise, return zero. */
2735
2736 static int
2737 extended_event_reported (const struct target_waitstatus *waitstatus)
2738 {
2739 if (waitstatus == NULL)
2740 return 0;
2741
2742 return (waitstatus->kind == TARGET_WAITKIND_FORKED
2743 || waitstatus->kind == TARGET_WAITKIND_VFORKED
2744 || waitstatus->kind == TARGET_WAITKIND_VFORK_DONE);
2745 }
2746
2747 /* Wait for process, returns status. */
2748
2749 static ptid_t
2750 linux_wait_1 (ptid_t ptid,
2751 struct target_waitstatus *ourstatus, int target_options)
2752 {
2753 int w;
2754 struct lwp_info *event_child;
2755 int options;
2756 int pid;
2757 int step_over_finished;
2758 int bp_explains_trap;
2759 int maybe_internal_trap;
2760 int report_to_gdb;
2761 int trace_event;
2762 int in_step_range;
2763
2764 if (debug_threads)
2765 {
2766 debug_enter ();
2767 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2768 }
2769
2770 /* Translate generic target options into linux options. */
2771 options = __WALL;
2772 if (target_options & TARGET_WNOHANG)
2773 options |= WNOHANG;
2774
2775 bp_explains_trap = 0;
2776 trace_event = 0;
2777 in_step_range = 0;
2778 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2779
2780 if (ptid_equal (step_over_bkpt, null_ptid))
2781 pid = linux_wait_for_event (ptid, &w, options);
2782 else
2783 {
2784 if (debug_threads)
2785 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2786 target_pid_to_str (step_over_bkpt));
2787 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2788 }
2789
2790 if (pid == 0)
2791 {
2792 gdb_assert (target_options & TARGET_WNOHANG);
2793
2794 if (debug_threads)
2795 {
2796 debug_printf ("linux_wait_1 ret = null_ptid, "
2797 "TARGET_WAITKIND_IGNORE\n");
2798 debug_exit ();
2799 }
2800
2801 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2802 return null_ptid;
2803 }
2804 else if (pid == -1)
2805 {
2806 if (debug_threads)
2807 {
2808 debug_printf ("linux_wait_1 ret = null_ptid, "
2809 "TARGET_WAITKIND_NO_RESUMED\n");
2810 debug_exit ();
2811 }
2812
2813 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2814 return null_ptid;
2815 }
2816
2817 event_child = get_thread_lwp (current_thread);
2818
2819 /* linux_wait_for_event only returns an exit status for the last
2820 child of a process. Report it. */
2821 if (WIFEXITED (w) || WIFSIGNALED (w))
2822 {
2823 if (WIFEXITED (w))
2824 {
2825 ourstatus->kind = TARGET_WAITKIND_EXITED;
2826 ourstatus->value.integer = WEXITSTATUS (w);
2827
2828 if (debug_threads)
2829 {
2830 debug_printf ("linux_wait_1 ret = %s, exited with "
2831 "retcode %d\n",
2832 target_pid_to_str (ptid_of (current_thread)),
2833 WEXITSTATUS (w));
2834 debug_exit ();
2835 }
2836 }
2837 else
2838 {
2839 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2840 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2841
2842 if (debug_threads)
2843 {
2844 debug_printf ("linux_wait_1 ret = %s, terminated with "
2845 "signal %d\n",
2846 target_pid_to_str (ptid_of (current_thread)),
2847 WTERMSIG (w));
2848 debug_exit ();
2849 }
2850 }
2851
2852 return ptid_of (current_thread);
2853 }
2854
2855 /* If step-over executes a breakpoint instruction, it means a
2856 gdb/gdbserver breakpoint had been planted on top of a permanent
2857 breakpoint. The PC has been adjusted by
2858 check_stopped_by_breakpoint to point at the breakpoint address.
2859 Advance the PC manually past the breakpoint, otherwise the
2860 program would keep trapping the permanent breakpoint forever. */
2861 if (!ptid_equal (step_over_bkpt, null_ptid)
2862 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2863 {
2864 unsigned int increment_pc = the_low_target.breakpoint_len;
2865
2866 if (debug_threads)
2867 {
2868 debug_printf ("step-over for %s executed software breakpoint\n",
2869 target_pid_to_str (ptid_of (current_thread)));
2870 }
2871
2872 if (increment_pc != 0)
2873 {
2874 struct regcache *regcache
2875 = get_thread_regcache (current_thread, 1);
2876
2877 event_child->stop_pc += increment_pc;
2878 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2879
2880 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2881 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2882 }
2883 }
2884
2885 /* If this event was not handled before, and is not a SIGTRAP, we
2886 report it. SIGILL and SIGSEGV are also treated as traps in case
2887 a breakpoint is inserted at the current PC. If this target does
2888 not support internal breakpoints at all, we also report the
2889 SIGTRAP without further processing; it's of no concern to us. */
2890 maybe_internal_trap
2891 = (supports_breakpoints ()
2892 && (WSTOPSIG (w) == SIGTRAP
2893 || ((WSTOPSIG (w) == SIGILL
2894 || WSTOPSIG (w) == SIGSEGV)
2895 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2896
2897 if (maybe_internal_trap)
2898 {
2899 /* Handle anything that requires bookkeeping before deciding to
2900 report the event or continue waiting. */
2901
2902 /* First check if we can explain the SIGTRAP with an internal
2903 breakpoint, or if we should possibly report the event to GDB.
2904 Do this before anything that may remove or insert a
2905 breakpoint. */
2906 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2907
2908 /* We have a SIGTRAP, possibly a step-over dance has just
2909 finished. If so, tweak the state machine accordingly,
2910 reinsert breakpoints and delete any reinsert (software
2911 single-step) breakpoints. */
2912 step_over_finished = finish_step_over (event_child);
2913
2914 /* Now invoke the callbacks of any internal breakpoints there. */
2915 check_breakpoints (event_child->stop_pc);
2916
2917 /* Handle tracepoint data collecting. This may overflow the
2918 trace buffer, and cause a tracing stop, removing
2919 breakpoints. */
2920 trace_event = handle_tracepoints (event_child);
2921
2922 if (bp_explains_trap)
2923 {
2924 /* If we stepped or ran into an internal breakpoint, we've
2925 already handled it. So next time we resume (from this
2926 PC), we should step over it. */
2927 if (debug_threads)
2928 debug_printf ("Hit a gdbserver breakpoint.\n");
2929
2930 if (breakpoint_here (event_child->stop_pc))
2931 event_child->need_step_over = 1;
2932 }
2933 }
2934 else
2935 {
2936 /* We have some other signal, possibly a step-over dance was in
2937 progress, and it should be cancelled too. */
2938 step_over_finished = finish_step_over (event_child);
2939 }
2940
2941 /* We have all the data we need. Either report the event to GDB, or
2942 resume threads and keep waiting for more. */
2943
2944 /* If we're collecting a fast tracepoint, finish the collection and
2945 move out of the jump pad before delivering a signal. See
2946 linux_stabilize_threads. */
2947
2948 if (WIFSTOPPED (w)
2949 && WSTOPSIG (w) != SIGTRAP
2950 && supports_fast_tracepoints ()
2951 && agent_loaded_p ())
2952 {
2953 if (debug_threads)
2954 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2955 "to defer or adjust it.\n",
2956 WSTOPSIG (w), lwpid_of (current_thread));
2957
2958 /* Allow debugging the jump pad itself. */
2959 if (current_thread->last_resume_kind != resume_step
2960 && maybe_move_out_of_jump_pad (event_child, &w))
2961 {
2962 enqueue_one_deferred_signal (event_child, &w);
2963
2964 if (debug_threads)
2965 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2966 WSTOPSIG (w), lwpid_of (current_thread));
2967
2968 linux_resume_one_lwp (event_child, 0, 0, NULL);
2969
2970 return ignore_event (ourstatus);
2971 }
2972 }
2973
2974 if (event_child->collecting_fast_tracepoint)
2975 {
2976 if (debug_threads)
2977 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2978 "Check if we're already there.\n",
2979 lwpid_of (current_thread),
2980 event_child->collecting_fast_tracepoint);
2981
2982 trace_event = 1;
2983
2984 event_child->collecting_fast_tracepoint
2985 = linux_fast_tracepoint_collecting (event_child, NULL);
2986
2987 if (event_child->collecting_fast_tracepoint != 1)
2988 {
2989 /* No longer need this breakpoint. */
2990 if (event_child->exit_jump_pad_bkpt != NULL)
2991 {
2992 if (debug_threads)
2993 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2994 "stopping all threads momentarily.\n");
2995
2996 /* Other running threads could hit this breakpoint.
2997 We don't handle moribund locations like GDB does,
2998 instead we always pause all threads when removing
2999 breakpoints, so that any step-over or
3000 decr_pc_after_break adjustment is always taken
3001 care of while the breakpoint is still
3002 inserted. */
3003 stop_all_lwps (1, event_child);
3004
3005 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3006 event_child->exit_jump_pad_bkpt = NULL;
3007
3008 unstop_all_lwps (1, event_child);
3009
3010 gdb_assert (event_child->suspended >= 0);
3011 }
3012 }
3013
3014 if (event_child->collecting_fast_tracepoint == 0)
3015 {
3016 if (debug_threads)
3017 debug_printf ("fast tracepoint finished "
3018 "collecting successfully.\n");
3019
3020 /* We may have a deferred signal to report. */
3021 if (dequeue_one_deferred_signal (event_child, &w))
3022 {
3023 if (debug_threads)
3024 debug_printf ("dequeued one signal.\n");
3025 }
3026 else
3027 {
3028 if (debug_threads)
3029 debug_printf ("no deferred signals.\n");
3030
3031 if (stabilizing_threads)
3032 {
3033 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3034 ourstatus->value.sig = GDB_SIGNAL_0;
3035
3036 if (debug_threads)
3037 {
3038 debug_printf ("linux_wait_1 ret = %s, stopped "
3039 "while stabilizing threads\n",
3040 target_pid_to_str (ptid_of (current_thread)));
3041 debug_exit ();
3042 }
3043
3044 return ptid_of (current_thread);
3045 }
3046 }
3047 }
3048 }
3049
3050 /* Check whether GDB would be interested in this event. */
3051
3052 /* If GDB is not interested in this signal, don't stop other
3053 threads, and don't report it to GDB. Just resume the inferior
3054 right away. We do this for threading-related signals as well as
3055 any that GDB specifically requested we ignore. But never ignore
3056 SIGSTOP if we sent it ourselves, and do not ignore signals when
3057 stepping - they may require special handling to skip the signal
3058 handler. Also never ignore signals that could be caused by a
3059 breakpoint. */
3060 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3061 thread library? */
3062 if (WIFSTOPPED (w)
3063 && current_thread->last_resume_kind != resume_step
3064 && (
3065 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3066 (current_process ()->priv->thread_db != NULL
3067 && (WSTOPSIG (w) == __SIGRTMIN
3068 || WSTOPSIG (w) == __SIGRTMIN + 1))
3069 ||
3070 #endif
3071 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3072 && !(WSTOPSIG (w) == SIGSTOP
3073 && current_thread->last_resume_kind == resume_stop)
3074 && !linux_wstatus_maybe_breakpoint (w))))
3075 {
3076 siginfo_t info, *info_p;
3077
3078 if (debug_threads)
3079 debug_printf ("Ignored signal %d for LWP %ld.\n",
3080 WSTOPSIG (w), lwpid_of (current_thread));
3081
3082 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3083 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3084 info_p = &info;
3085 else
3086 info_p = NULL;
3087 linux_resume_one_lwp (event_child, event_child->stepping,
3088 WSTOPSIG (w), info_p);
3089 return ignore_event (ourstatus);
3090 }
3091
3092 /* Note that all addresses are always "out of the step range" when
3093 there's no range to begin with. */
3094 in_step_range = lwp_in_step_range (event_child);
3095
3096 /* If GDB wanted this thread to single step, and the thread is out
3097 of the step range, we always want to report the SIGTRAP, and let
3098 GDB handle it. Watchpoints should always be reported. So should
3099 signals we can't explain. A SIGTRAP we can't explain could be a
3100 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3101 do, we're be able to handle GDB breakpoints on top of internal
3102 breakpoints, by handling the internal breakpoint and still
3103 reporting the event to GDB. If we don't, we're out of luck, GDB
3104 won't see the breakpoint hit. */
3105 report_to_gdb = (!maybe_internal_trap
3106 || (current_thread->last_resume_kind == resume_step
3107 && !in_step_range)
3108 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3109 || (!step_over_finished && !in_step_range
3110 && !bp_explains_trap && !trace_event)
3111 || (gdb_breakpoint_here (event_child->stop_pc)
3112 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3113 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3114 || extended_event_reported (&event_child->waitstatus));
3115
3116 run_breakpoint_commands (event_child->stop_pc);
3117
3118 /* We found no reason GDB would want us to stop. We either hit one
3119 of our own breakpoints, or finished an internal step GDB
3120 shouldn't know about. */
3121 if (!report_to_gdb)
3122 {
3123 if (debug_threads)
3124 {
3125 if (bp_explains_trap)
3126 debug_printf ("Hit a gdbserver breakpoint.\n");
3127 if (step_over_finished)
3128 debug_printf ("Step-over finished.\n");
3129 if (trace_event)
3130 debug_printf ("Tracepoint event.\n");
3131 if (lwp_in_step_range (event_child))
3132 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3133 paddress (event_child->stop_pc),
3134 paddress (event_child->step_range_start),
3135 paddress (event_child->step_range_end));
3136 if (extended_event_reported (&event_child->waitstatus))
3137 {
3138 char *str = target_waitstatus_to_string (ourstatus);
3139 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3140 lwpid_of (get_lwp_thread (event_child)), str);
3141 xfree (str);
3142 }
3143 }
3144
3145 /* We're not reporting this breakpoint to GDB, so apply the
3146 decr_pc_after_break adjustment to the inferior's regcache
3147 ourselves. */
3148
3149 if (the_low_target.set_pc != NULL)
3150 {
3151 struct regcache *regcache
3152 = get_thread_regcache (current_thread, 1);
3153 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3154 }
3155
3156 /* We may have finished stepping over a breakpoint. If so,
3157 we've stopped and suspended all LWPs momentarily except the
3158 stepping one. This is where we resume them all again. We're
3159 going to keep waiting, so use proceed, which handles stepping
3160 over the next breakpoint. */
3161 if (debug_threads)
3162 debug_printf ("proceeding all threads.\n");
3163
3164 if (step_over_finished)
3165 unsuspend_all_lwps (event_child);
3166
3167 proceed_all_lwps ();
3168 return ignore_event (ourstatus);
3169 }
3170
3171 if (debug_threads)
3172 {
3173 if (current_thread->last_resume_kind == resume_step)
3174 {
3175 if (event_child->step_range_start == event_child->step_range_end)
3176 debug_printf ("GDB wanted to single-step, reporting event.\n");
3177 else if (!lwp_in_step_range (event_child))
3178 debug_printf ("Out of step range, reporting event.\n");
3179 }
3180 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3181 debug_printf ("Stopped by watchpoint.\n");
3182 else if (gdb_breakpoint_here (event_child->stop_pc))
3183 debug_printf ("Stopped by GDB breakpoint.\n");
3184 if (debug_threads)
3185 debug_printf ("Hit a non-gdbserver trap event.\n");
3186 }
3187
3188 /* Alright, we're going to report a stop. */
3189
3190 if (!stabilizing_threads)
3191 {
3192 /* In all-stop, stop all threads. */
3193 if (!non_stop)
3194 stop_all_lwps (0, NULL);
3195
3196 /* If we're not waiting for a specific LWP, choose an event LWP
3197 from among those that have had events. Giving equal priority
3198 to all LWPs that have had events helps prevent
3199 starvation. */
3200 if (ptid_equal (ptid, minus_one_ptid))
3201 {
3202 event_child->status_pending_p = 1;
3203 event_child->status_pending = w;
3204
3205 select_event_lwp (&event_child);
3206
3207 /* current_thread and event_child must stay in sync. */
3208 current_thread = get_lwp_thread (event_child);
3209
3210 event_child->status_pending_p = 0;
3211 w = event_child->status_pending;
3212 }
3213
3214 if (step_over_finished)
3215 {
3216 if (!non_stop)
3217 {
3218 /* If we were doing a step-over, all other threads but
3219 the stepping one had been paused in start_step_over,
3220 with their suspend counts incremented. We don't want
3221 to do a full unstop/unpause, because we're in
3222 all-stop mode (so we want threads stopped), but we
3223 still need to unsuspend the other threads, to
3224 decrement their `suspended' count back. */
3225 unsuspend_all_lwps (event_child);
3226 }
3227 else
3228 {
3229 /* If we just finished a step-over, then all threads had
3230 been momentarily paused. In all-stop, that's fine,
3231 we want threads stopped by now anyway. In non-stop,
3232 we need to re-resume threads that GDB wanted to be
3233 running. */
3234 unstop_all_lwps (1, event_child);
3235 }
3236 }
3237
3238 /* Stabilize threads (move out of jump pads). */
3239 if (!non_stop)
3240 stabilize_threads ();
3241 }
3242 else
3243 {
3244 /* If we just finished a step-over, then all threads had been
3245 momentarily paused. In all-stop, that's fine, we want
3246 threads stopped by now anyway. In non-stop, we need to
3247 re-resume threads that GDB wanted to be running. */
3248 if (step_over_finished)
3249 unstop_all_lwps (1, event_child);
3250 }
3251
3252 if (extended_event_reported (&event_child->waitstatus))
3253 {
3254 /* If the reported event is a fork, vfork or exec, let GDB know. */
3255 ourstatus->kind = event_child->waitstatus.kind;
3256 ourstatus->value = event_child->waitstatus.value;
3257
3258 /* Clear the event lwp's waitstatus since we handled it already. */
3259 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3260 }
3261 else
3262 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3263
3264 /* Now that we've selected our final event LWP, un-adjust its PC if
3265 it was a software breakpoint, and the client doesn't know we can
3266 adjust the breakpoint ourselves. */
3267 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3268 && !swbreak_feature)
3269 {
3270 int decr_pc = the_low_target.decr_pc_after_break;
3271
3272 if (decr_pc != 0)
3273 {
3274 struct regcache *regcache
3275 = get_thread_regcache (current_thread, 1);
3276 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3277 }
3278 }
3279
3280 if (current_thread->last_resume_kind == resume_stop
3281 && WSTOPSIG (w) == SIGSTOP)
3282 {
3283 /* A thread that has been requested to stop by GDB with vCont;t,
3284 and it stopped cleanly, so report as SIG0. The use of
3285 SIGSTOP is an implementation detail. */
3286 ourstatus->value.sig = GDB_SIGNAL_0;
3287 }
3288 else if (current_thread->last_resume_kind == resume_stop
3289 && WSTOPSIG (w) != SIGSTOP)
3290 {
3291 /* A thread that has been requested to stop by GDB with vCont;t,
3292 but, it stopped for other reasons. */
3293 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3294 }
3295 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3296 {
3297 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3298 }
3299
3300 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3301
3302 if (debug_threads)
3303 {
3304 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3305 target_pid_to_str (ptid_of (current_thread)),
3306 ourstatus->kind, ourstatus->value.sig);
3307 debug_exit ();
3308 }
3309
3310 return ptid_of (current_thread);
3311 }
3312
3313 /* Get rid of any pending event in the pipe. */
3314 static void
3315 async_file_flush (void)
3316 {
3317 int ret;
3318 char buf;
3319
3320 do
3321 ret = read (linux_event_pipe[0], &buf, 1);
3322 while (ret >= 0 || (ret == -1 && errno == EINTR));
3323 }
3324
3325 /* Put something in the pipe, so the event loop wakes up. */
3326 static void
3327 async_file_mark (void)
3328 {
3329 int ret;
3330
3331 async_file_flush ();
3332
3333 do
3334 ret = write (linux_event_pipe[1], "+", 1);
3335 while (ret == 0 || (ret == -1 && errno == EINTR));
3336
3337 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3338 be awakened anyway. */
3339 }
3340
3341 static ptid_t
3342 linux_wait (ptid_t ptid,
3343 struct target_waitstatus *ourstatus, int target_options)
3344 {
3345 ptid_t event_ptid;
3346
3347 /* Flush the async file first. */
3348 if (target_is_async_p ())
3349 async_file_flush ();
3350
3351 do
3352 {
3353 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3354 }
3355 while ((target_options & TARGET_WNOHANG) == 0
3356 && ptid_equal (event_ptid, null_ptid)
3357 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3358
3359 /* If at least one stop was reported, there may be more. A single
3360 SIGCHLD can signal more than one child stop. */
3361 if (target_is_async_p ()
3362 && (target_options & TARGET_WNOHANG) != 0
3363 && !ptid_equal (event_ptid, null_ptid))
3364 async_file_mark ();
3365
3366 return event_ptid;
3367 }
3368
3369 /* Send a signal to an LWP. */
3370
3371 static int
3372 kill_lwp (unsigned long lwpid, int signo)
3373 {
3374 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3375 fails, then we are not using nptl threads and we should be using kill. */
3376
3377 #ifdef __NR_tkill
3378 {
3379 static int tkill_failed;
3380
3381 if (!tkill_failed)
3382 {
3383 int ret;
3384
3385 errno = 0;
3386 ret = syscall (__NR_tkill, lwpid, signo);
3387 if (errno != ENOSYS)
3388 return ret;
3389 tkill_failed = 1;
3390 }
3391 }
3392 #endif
3393
3394 return kill (lwpid, signo);
3395 }
3396
3397 void
3398 linux_stop_lwp (struct lwp_info *lwp)
3399 {
3400 send_sigstop (lwp);
3401 }
3402
3403 static void
3404 send_sigstop (struct lwp_info *lwp)
3405 {
3406 int pid;
3407
3408 pid = lwpid_of (get_lwp_thread (lwp));
3409
3410 /* If we already have a pending stop signal for this process, don't
3411 send another. */
3412 if (lwp->stop_expected)
3413 {
3414 if (debug_threads)
3415 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3416
3417 return;
3418 }
3419
3420 if (debug_threads)
3421 debug_printf ("Sending sigstop to lwp %d\n", pid);
3422
3423 lwp->stop_expected = 1;
3424 kill_lwp (pid, SIGSTOP);
3425 }
3426
3427 static int
3428 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3429 {
3430 struct thread_info *thread = (struct thread_info *) entry;
3431 struct lwp_info *lwp = get_thread_lwp (thread);
3432
3433 /* Ignore EXCEPT. */
3434 if (lwp == except)
3435 return 0;
3436
3437 if (lwp->stopped)
3438 return 0;
3439
3440 send_sigstop (lwp);
3441 return 0;
3442 }
3443
3444 /* Increment the suspend count of an LWP, and stop it, if not stopped
3445 yet. */
3446 static int
3447 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3448 void *except)
3449 {
3450 struct thread_info *thread = (struct thread_info *) entry;
3451 struct lwp_info *lwp = get_thread_lwp (thread);
3452
3453 /* Ignore EXCEPT. */
3454 if (lwp == except)
3455 return 0;
3456
3457 lwp->suspended++;
3458
3459 return send_sigstop_callback (entry, except);
3460 }
3461
3462 static void
3463 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3464 {
3465 /* It's dead, really. */
3466 lwp->dead = 1;
3467
3468 /* Store the exit status for later. */
3469 lwp->status_pending_p = 1;
3470 lwp->status_pending = wstat;
3471
3472 /* Prevent trying to stop it. */
3473 lwp->stopped = 1;
3474
3475 /* No further stops are expected from a dead lwp. */
3476 lwp->stop_expected = 0;
3477 }
3478
3479 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3480
3481 static void
3482 wait_for_sigstop (void)
3483 {
3484 struct thread_info *saved_thread;
3485 ptid_t saved_tid;
3486 int wstat;
3487 int ret;
3488
3489 saved_thread = current_thread;
3490 if (saved_thread != NULL)
3491 saved_tid = saved_thread->entry.id;
3492 else
3493 saved_tid = null_ptid; /* avoid bogus unused warning */
3494
3495 if (debug_threads)
3496 debug_printf ("wait_for_sigstop: pulling events\n");
3497
3498 /* Passing NULL_PTID as filter indicates we want all events to be
3499 left pending. Eventually this returns when there are no
3500 unwaited-for children left. */
3501 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3502 &wstat, __WALL);
3503 gdb_assert (ret == -1);
3504
3505 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3506 current_thread = saved_thread;
3507 else
3508 {
3509 if (debug_threads)
3510 debug_printf ("Previously current thread died.\n");
3511
3512 if (non_stop)
3513 {
3514 /* We can't change the current inferior behind GDB's back,
3515 otherwise, a subsequent command may apply to the wrong
3516 process. */
3517 current_thread = NULL;
3518 }
3519 else
3520 {
3521 /* Set a valid thread as current. */
3522 set_desired_thread (0);
3523 }
3524 }
3525 }
3526
3527 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3528 move it out, because we need to report the stop event to GDB. For
3529 example, if the user puts a breakpoint in the jump pad, it's
3530 because she wants to debug it. */
3531
3532 static int
3533 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3534 {
3535 struct thread_info *thread = (struct thread_info *) entry;
3536 struct lwp_info *lwp = get_thread_lwp (thread);
3537
3538 gdb_assert (lwp->suspended == 0);
3539 gdb_assert (lwp->stopped);
3540
3541 /* Allow debugging the jump pad, gdb_collect, etc.. */
3542 return (supports_fast_tracepoints ()
3543 && agent_loaded_p ()
3544 && (gdb_breakpoint_here (lwp->stop_pc)
3545 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3546 || thread->last_resume_kind == resume_step)
3547 && linux_fast_tracepoint_collecting (lwp, NULL));
3548 }
3549
3550 static void
3551 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3552 {
3553 struct thread_info *thread = (struct thread_info *) entry;
3554 struct lwp_info *lwp = get_thread_lwp (thread);
3555 int *wstat;
3556
3557 gdb_assert (lwp->suspended == 0);
3558 gdb_assert (lwp->stopped);
3559
3560 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3561
3562 /* Allow debugging the jump pad, gdb_collect, etc. */
3563 if (!gdb_breakpoint_here (lwp->stop_pc)
3564 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3565 && thread->last_resume_kind != resume_step
3566 && maybe_move_out_of_jump_pad (lwp, wstat))
3567 {
3568 if (debug_threads)
3569 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3570 lwpid_of (thread));
3571
3572 if (wstat)
3573 {
3574 lwp->status_pending_p = 0;
3575 enqueue_one_deferred_signal (lwp, wstat);
3576
3577 if (debug_threads)
3578 debug_printf ("Signal %d for LWP %ld deferred "
3579 "(in jump pad)\n",
3580 WSTOPSIG (*wstat), lwpid_of (thread));
3581 }
3582
3583 linux_resume_one_lwp (lwp, 0, 0, NULL);
3584 }
3585 else
3586 lwp->suspended++;
3587 }
3588
3589 static int
3590 lwp_running (struct inferior_list_entry *entry, void *data)
3591 {
3592 struct thread_info *thread = (struct thread_info *) entry;
3593 struct lwp_info *lwp = get_thread_lwp (thread);
3594
3595 if (lwp->dead)
3596 return 0;
3597 if (lwp->stopped)
3598 return 0;
3599 return 1;
3600 }
3601
3602 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3603 If SUSPEND, then also increase the suspend count of every LWP,
3604 except EXCEPT. */
3605
3606 static void
3607 stop_all_lwps (int suspend, struct lwp_info *except)
3608 {
3609 /* Should not be called recursively. */
3610 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3611
3612 if (debug_threads)
3613 {
3614 debug_enter ();
3615 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3616 suspend ? "stop-and-suspend" : "stop",
3617 except != NULL
3618 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3619 : "none");
3620 }
3621
3622 stopping_threads = (suspend
3623 ? STOPPING_AND_SUSPENDING_THREADS
3624 : STOPPING_THREADS);
3625
3626 if (suspend)
3627 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3628 else
3629 find_inferior (&all_threads, send_sigstop_callback, except);
3630 wait_for_sigstop ();
3631 stopping_threads = NOT_STOPPING_THREADS;
3632
3633 if (debug_threads)
3634 {
3635 debug_printf ("stop_all_lwps done, setting stopping_threads "
3636 "back to !stopping\n");
3637 debug_exit ();
3638 }
3639 }
3640
3641 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3642 SIGNAL is nonzero, give it that signal. */
3643
3644 static void
3645 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3646 int step, int signal, siginfo_t *info)
3647 {
3648 struct thread_info *thread = get_lwp_thread (lwp);
3649 struct thread_info *saved_thread;
3650 int fast_tp_collecting;
3651
3652 if (lwp->stopped == 0)
3653 return;
3654
3655 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3656
3657 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3658
3659 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3660 user used the "jump" command, or "set $pc = foo"). */
3661 if (lwp->stop_pc != get_pc (lwp))
3662 {
3663 /* Collecting 'while-stepping' actions doesn't make sense
3664 anymore. */
3665 release_while_stepping_state_list (thread);
3666 }
3667
3668 /* If we have pending signals or status, and a new signal, enqueue the
3669 signal. Also enqueue the signal if we are waiting to reinsert a
3670 breakpoint; it will be picked up again below. */
3671 if (signal != 0
3672 && (lwp->status_pending_p
3673 || lwp->pending_signals != NULL
3674 || lwp->bp_reinsert != 0
3675 || fast_tp_collecting))
3676 {
3677 struct pending_signals *p_sig;
3678 p_sig = xmalloc (sizeof (*p_sig));
3679 p_sig->prev = lwp->pending_signals;
3680 p_sig->signal = signal;
3681 if (info == NULL)
3682 memset (&p_sig->info, 0, sizeof (siginfo_t));
3683 else
3684 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3685 lwp->pending_signals = p_sig;
3686 }
3687
3688 if (lwp->status_pending_p)
3689 {
3690 if (debug_threads)
3691 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3692 " has pending status\n",
3693 lwpid_of (thread), step ? "step" : "continue", signal,
3694 lwp->stop_expected ? "expected" : "not expected");
3695 return;
3696 }
3697
3698 saved_thread = current_thread;
3699 current_thread = thread;
3700
3701 if (debug_threads)
3702 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3703 lwpid_of (thread), step ? "step" : "continue", signal,
3704 lwp->stop_expected ? "expected" : "not expected");
3705
3706 /* This bit needs some thinking about. If we get a signal that
3707 we must report while a single-step reinsert is still pending,
3708 we often end up resuming the thread. It might be better to
3709 (ew) allow a stack of pending events; then we could be sure that
3710 the reinsert happened right away and not lose any signals.
3711
3712 Making this stack would also shrink the window in which breakpoints are
3713 uninserted (see comment in linux_wait_for_lwp) but not enough for
3714 complete correctness, so it won't solve that problem. It may be
3715 worthwhile just to solve this one, however. */
3716 if (lwp->bp_reinsert != 0)
3717 {
3718 if (debug_threads)
3719 debug_printf (" pending reinsert at 0x%s\n",
3720 paddress (lwp->bp_reinsert));
3721
3722 if (can_hardware_single_step ())
3723 {
3724 if (fast_tp_collecting == 0)
3725 {
3726 if (step == 0)
3727 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3728 if (lwp->suspended)
3729 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3730 lwp->suspended);
3731 }
3732
3733 step = 1;
3734 }
3735
3736 /* Postpone any pending signal. It was enqueued above. */
3737 signal = 0;
3738 }
3739
3740 if (fast_tp_collecting == 1)
3741 {
3742 if (debug_threads)
3743 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3744 " (exit-jump-pad-bkpt)\n",
3745 lwpid_of (thread));
3746
3747 /* Postpone any pending signal. It was enqueued above. */
3748 signal = 0;
3749 }
3750 else if (fast_tp_collecting == 2)
3751 {
3752 if (debug_threads)
3753 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3754 " single-stepping\n",
3755 lwpid_of (thread));
3756
3757 if (can_hardware_single_step ())
3758 step = 1;
3759 else
3760 {
3761 internal_error (__FILE__, __LINE__,
3762 "moving out of jump pad single-stepping"
3763 " not implemented on this target");
3764 }
3765
3766 /* Postpone any pending signal. It was enqueued above. */
3767 signal = 0;
3768 }
3769
3770 /* If we have while-stepping actions in this thread set it stepping.
3771 If we have a signal to deliver, it may or may not be set to
3772 SIG_IGN, we don't know. Assume so, and allow collecting
3773 while-stepping into a signal handler. A possible smart thing to
3774 do would be to set an internal breakpoint at the signal return
3775 address, continue, and carry on catching this while-stepping
3776 action only when that breakpoint is hit. A future
3777 enhancement. */
3778 if (thread->while_stepping != NULL
3779 && can_hardware_single_step ())
3780 {
3781 if (debug_threads)
3782 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3783 lwpid_of (thread));
3784 step = 1;
3785 }
3786
3787 if (the_low_target.get_pc != NULL)
3788 {
3789 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3790
3791 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3792
3793 if (debug_threads)
3794 {
3795 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3796 (long) lwp->stop_pc);
3797 }
3798 }
3799
3800 /* If we have pending signals, consume one unless we are trying to
3801 reinsert a breakpoint or we're trying to finish a fast tracepoint
3802 collect. */
3803 if (lwp->pending_signals != NULL
3804 && lwp->bp_reinsert == 0
3805 && fast_tp_collecting == 0)
3806 {
3807 struct pending_signals **p_sig;
3808
3809 p_sig = &lwp->pending_signals;
3810 while ((*p_sig)->prev != NULL)
3811 p_sig = &(*p_sig)->prev;
3812
3813 signal = (*p_sig)->signal;
3814 if ((*p_sig)->info.si_signo != 0)
3815 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3816 &(*p_sig)->info);
3817
3818 free (*p_sig);
3819 *p_sig = NULL;
3820 }
3821
3822 if (the_low_target.prepare_to_resume != NULL)
3823 the_low_target.prepare_to_resume (lwp);
3824
3825 regcache_invalidate_thread (thread);
3826 errno = 0;
3827 lwp->stepping = step;
3828 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3829 (PTRACE_TYPE_ARG3) 0,
3830 /* Coerce to a uintptr_t first to avoid potential gcc warning
3831 of coercing an 8 byte integer to a 4 byte pointer. */
3832 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3833
3834 current_thread = saved_thread;
3835 if (errno)
3836 perror_with_name ("resuming thread");
3837
3838 /* Successfully resumed. Clear state that no longer makes sense,
3839 and mark the LWP as running. Must not do this before resuming
3840 otherwise if that fails other code will be confused. E.g., we'd
3841 later try to stop the LWP and hang forever waiting for a stop
3842 status. Note that we must not throw after this is cleared,
3843 otherwise handle_zombie_lwp_error would get confused. */
3844 lwp->stopped = 0;
3845 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3846 }
3847
3848 /* Called when we try to resume a stopped LWP and that errors out. If
3849 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3850 or about to become), discard the error, clear any pending status
3851 the LWP may have, and return true (we'll collect the exit status
3852 soon enough). Otherwise, return false. */
3853
3854 static int
3855 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3856 {
3857 struct thread_info *thread = get_lwp_thread (lp);
3858
3859 /* If we get an error after resuming the LWP successfully, we'd
3860 confuse !T state for the LWP being gone. */
3861 gdb_assert (lp->stopped);
3862
3863 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3864 because even if ptrace failed with ESRCH, the tracee may be "not
3865 yet fully dead", but already refusing ptrace requests. In that
3866 case the tracee has 'R (Running)' state for a little bit
3867 (observed in Linux 3.18). See also the note on ESRCH in the
3868 ptrace(2) man page. Instead, check whether the LWP has any state
3869 other than ptrace-stopped. */
3870
3871 /* Don't assume anything if /proc/PID/status can't be read. */
3872 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3873 {
3874 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3875 lp->status_pending_p = 0;
3876 return 1;
3877 }
3878 return 0;
3879 }
3880
3881 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3882 disappears while we try to resume it. */
3883
3884 static void
3885 linux_resume_one_lwp (struct lwp_info *lwp,
3886 int step, int signal, siginfo_t *info)
3887 {
3888 TRY
3889 {
3890 linux_resume_one_lwp_throw (lwp, step, signal, info);
3891 }
3892 CATCH (ex, RETURN_MASK_ERROR)
3893 {
3894 if (!check_ptrace_stopped_lwp_gone (lwp))
3895 throw_exception (ex);
3896 }
3897 END_CATCH
3898 }
3899
3900 struct thread_resume_array
3901 {
3902 struct thread_resume *resume;
3903 size_t n;
3904 };
3905
3906 /* This function is called once per thread via find_inferior.
3907 ARG is a pointer to a thread_resume_array struct.
3908 We look up the thread specified by ENTRY in ARG, and mark the thread
3909 with a pointer to the appropriate resume request.
3910
3911 This algorithm is O(threads * resume elements), but resume elements
3912 is small (and will remain small at least until GDB supports thread
3913 suspension). */
3914
3915 static int
3916 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3917 {
3918 struct thread_info *thread = (struct thread_info *) entry;
3919 struct lwp_info *lwp = get_thread_lwp (thread);
3920 int ndx;
3921 struct thread_resume_array *r;
3922
3923 r = arg;
3924
3925 for (ndx = 0; ndx < r->n; ndx++)
3926 {
3927 ptid_t ptid = r->resume[ndx].thread;
3928 if (ptid_equal (ptid, minus_one_ptid)
3929 || ptid_equal (ptid, entry->id)
3930 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3931 of PID'. */
3932 || (ptid_get_pid (ptid) == pid_of (thread)
3933 && (ptid_is_pid (ptid)
3934 || ptid_get_lwp (ptid) == -1)))
3935 {
3936 if (r->resume[ndx].kind == resume_stop
3937 && thread->last_resume_kind == resume_stop)
3938 {
3939 if (debug_threads)
3940 debug_printf ("already %s LWP %ld at GDB's request\n",
3941 (thread->last_status.kind
3942 == TARGET_WAITKIND_STOPPED)
3943 ? "stopped"
3944 : "stopping",
3945 lwpid_of (thread));
3946
3947 continue;
3948 }
3949
3950 lwp->resume = &r->resume[ndx];
3951 thread->last_resume_kind = lwp->resume->kind;
3952
3953 lwp->step_range_start = lwp->resume->step_range_start;
3954 lwp->step_range_end = lwp->resume->step_range_end;
3955
3956 /* If we had a deferred signal to report, dequeue one now.
3957 This can happen if LWP gets more than one signal while
3958 trying to get out of a jump pad. */
3959 if (lwp->stopped
3960 && !lwp->status_pending_p
3961 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3962 {
3963 lwp->status_pending_p = 1;
3964
3965 if (debug_threads)
3966 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3967 "leaving status pending.\n",
3968 WSTOPSIG (lwp->status_pending),
3969 lwpid_of (thread));
3970 }
3971
3972 return 0;
3973 }
3974 }
3975
3976 /* No resume action for this thread. */
3977 lwp->resume = NULL;
3978
3979 return 0;
3980 }
3981
3982 /* find_inferior callback for linux_resume.
3983 Set *FLAG_P if this lwp has an interesting status pending. */
3984
3985 static int
3986 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3987 {
3988 struct thread_info *thread = (struct thread_info *) entry;
3989 struct lwp_info *lwp = get_thread_lwp (thread);
3990
3991 /* LWPs which will not be resumed are not interesting, because
3992 we might not wait for them next time through linux_wait. */
3993 if (lwp->resume == NULL)
3994 return 0;
3995
3996 if (thread_still_has_status_pending_p (thread))
3997 * (int *) flag_p = 1;
3998
3999 return 0;
4000 }
4001
4002 /* Return 1 if this lwp that GDB wants running is stopped at an
4003 internal breakpoint that we need to step over. It assumes that any
4004 required STOP_PC adjustment has already been propagated to the
4005 inferior's regcache. */
4006
4007 static int
4008 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4009 {
4010 struct thread_info *thread = (struct thread_info *) entry;
4011 struct lwp_info *lwp = get_thread_lwp (thread);
4012 struct thread_info *saved_thread;
4013 CORE_ADDR pc;
4014
4015 /* LWPs which will not be resumed are not interesting, because we
4016 might not wait for them next time through linux_wait. */
4017
4018 if (!lwp->stopped)
4019 {
4020 if (debug_threads)
4021 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4022 lwpid_of (thread));
4023 return 0;
4024 }
4025
4026 if (thread->last_resume_kind == resume_stop)
4027 {
4028 if (debug_threads)
4029 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4030 " stopped\n",
4031 lwpid_of (thread));
4032 return 0;
4033 }
4034
4035 gdb_assert (lwp->suspended >= 0);
4036
4037 if (lwp->suspended)
4038 {
4039 if (debug_threads)
4040 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4041 lwpid_of (thread));
4042 return 0;
4043 }
4044
4045 if (!lwp->need_step_over)
4046 {
4047 if (debug_threads)
4048 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4049 }
4050
4051 if (lwp->status_pending_p)
4052 {
4053 if (debug_threads)
4054 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4055 " status.\n",
4056 lwpid_of (thread));
4057 return 0;
4058 }
4059
4060 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4061 or we have. */
4062 pc = get_pc (lwp);
4063
4064 /* If the PC has changed since we stopped, then don't do anything,
4065 and let the breakpoint/tracepoint be hit. This happens if, for
4066 instance, GDB handled the decr_pc_after_break subtraction itself,
4067 GDB is OOL stepping this thread, or the user has issued a "jump"
4068 command, or poked thread's registers herself. */
4069 if (pc != lwp->stop_pc)
4070 {
4071 if (debug_threads)
4072 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4073 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4074 lwpid_of (thread),
4075 paddress (lwp->stop_pc), paddress (pc));
4076
4077 lwp->need_step_over = 0;
4078 return 0;
4079 }
4080
4081 saved_thread = current_thread;
4082 current_thread = thread;
4083
4084 /* We can only step over breakpoints we know about. */
4085 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4086 {
4087 /* Don't step over a breakpoint that GDB expects to hit
4088 though. If the condition is being evaluated on the target's side
4089 and it evaluate to false, step over this breakpoint as well. */
4090 if (gdb_breakpoint_here (pc)
4091 && gdb_condition_true_at_breakpoint (pc)
4092 && gdb_no_commands_at_breakpoint (pc))
4093 {
4094 if (debug_threads)
4095 debug_printf ("Need step over [LWP %ld]? yes, but found"
4096 " GDB breakpoint at 0x%s; skipping step over\n",
4097 lwpid_of (thread), paddress (pc));
4098
4099 current_thread = saved_thread;
4100 return 0;
4101 }
4102 else
4103 {
4104 if (debug_threads)
4105 debug_printf ("Need step over [LWP %ld]? yes, "
4106 "found breakpoint at 0x%s\n",
4107 lwpid_of (thread), paddress (pc));
4108
4109 /* We've found an lwp that needs stepping over --- return 1 so
4110 that find_inferior stops looking. */
4111 current_thread = saved_thread;
4112
4113 /* If the step over is cancelled, this is set again. */
4114 lwp->need_step_over = 0;
4115 return 1;
4116 }
4117 }
4118
4119 current_thread = saved_thread;
4120
4121 if (debug_threads)
4122 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4123 " at 0x%s\n",
4124 lwpid_of (thread), paddress (pc));
4125
4126 return 0;
4127 }
4128
4129 /* Start a step-over operation on LWP. When LWP stopped at a
4130 breakpoint, to make progress, we need to remove the breakpoint out
4131 of the way. If we let other threads run while we do that, they may
4132 pass by the breakpoint location and miss hitting it. To avoid
4133 that, a step-over momentarily stops all threads while LWP is
4134 single-stepped while the breakpoint is temporarily uninserted from
4135 the inferior. When the single-step finishes, we reinsert the
4136 breakpoint, and let all threads that are supposed to be running,
4137 run again.
4138
4139 On targets that don't support hardware single-step, we don't
4140 currently support full software single-stepping. Instead, we only
4141 support stepping over the thread event breakpoint, by asking the
4142 low target where to place a reinsert breakpoint. Since this
4143 routine assumes the breakpoint being stepped over is a thread event
4144 breakpoint, it usually assumes the return address of the current
4145 function is a good enough place to set the reinsert breakpoint. */
4146
4147 static int
4148 start_step_over (struct lwp_info *lwp)
4149 {
4150 struct thread_info *thread = get_lwp_thread (lwp);
4151 struct thread_info *saved_thread;
4152 CORE_ADDR pc;
4153 int step;
4154
4155 if (debug_threads)
4156 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4157 lwpid_of (thread));
4158
4159 stop_all_lwps (1, lwp);
4160 gdb_assert (lwp->suspended == 0);
4161
4162 if (debug_threads)
4163 debug_printf ("Done stopping all threads for step-over.\n");
4164
4165 /* Note, we should always reach here with an already adjusted PC,
4166 either by GDB (if we're resuming due to GDB's request), or by our
4167 caller, if we just finished handling an internal breakpoint GDB
4168 shouldn't care about. */
4169 pc = get_pc (lwp);
4170
4171 saved_thread = current_thread;
4172 current_thread = thread;
4173
4174 lwp->bp_reinsert = pc;
4175 uninsert_breakpoints_at (pc);
4176 uninsert_fast_tracepoint_jumps_at (pc);
4177
4178 if (can_hardware_single_step ())
4179 {
4180 step = 1;
4181 }
4182 else
4183 {
4184 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4185 set_reinsert_breakpoint (raddr);
4186 step = 0;
4187 }
4188
4189 current_thread = saved_thread;
4190
4191 linux_resume_one_lwp (lwp, step, 0, NULL);
4192
4193 /* Require next event from this LWP. */
4194 step_over_bkpt = thread->entry.id;
4195 return 1;
4196 }
4197
4198 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4199 start_step_over, if still there, and delete any reinsert
4200 breakpoints we've set, on non hardware single-step targets. */
4201
4202 static int
4203 finish_step_over (struct lwp_info *lwp)
4204 {
4205 if (lwp->bp_reinsert != 0)
4206 {
4207 if (debug_threads)
4208 debug_printf ("Finished step over.\n");
4209
4210 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4211 may be no breakpoint to reinsert there by now. */
4212 reinsert_breakpoints_at (lwp->bp_reinsert);
4213 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4214
4215 lwp->bp_reinsert = 0;
4216
4217 /* Delete any software-single-step reinsert breakpoints. No
4218 longer needed. We don't have to worry about other threads
4219 hitting this trap, and later not being able to explain it,
4220 because we were stepping over a breakpoint, and we hold all
4221 threads but LWP stopped while doing that. */
4222 if (!can_hardware_single_step ())
4223 delete_reinsert_breakpoints ();
4224
4225 step_over_bkpt = null_ptid;
4226 return 1;
4227 }
4228 else
4229 return 0;
4230 }
4231
4232 /* This function is called once per thread. We check the thread's resume
4233 request, which will tell us whether to resume, step, or leave the thread
4234 stopped; and what signal, if any, it should be sent.
4235
4236 For threads which we aren't explicitly told otherwise, we preserve
4237 the stepping flag; this is used for stepping over gdbserver-placed
4238 breakpoints.
4239
4240 If pending_flags was set in any thread, we queue any needed
4241 signals, since we won't actually resume. We already have a pending
4242 event to report, so we don't need to preserve any step requests;
4243 they should be re-issued if necessary. */
4244
4245 static int
4246 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4247 {
4248 struct thread_info *thread = (struct thread_info *) entry;
4249 struct lwp_info *lwp = get_thread_lwp (thread);
4250 int step;
4251 int leave_all_stopped = * (int *) arg;
4252 int leave_pending;
4253
4254 if (lwp->resume == NULL)
4255 return 0;
4256
4257 if (lwp->resume->kind == resume_stop)
4258 {
4259 if (debug_threads)
4260 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4261
4262 if (!lwp->stopped)
4263 {
4264 if (debug_threads)
4265 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4266
4267 /* Stop the thread, and wait for the event asynchronously,
4268 through the event loop. */
4269 send_sigstop (lwp);
4270 }
4271 else
4272 {
4273 if (debug_threads)
4274 debug_printf ("already stopped LWP %ld\n",
4275 lwpid_of (thread));
4276
4277 /* The LWP may have been stopped in an internal event that
4278 was not meant to be notified back to GDB (e.g., gdbserver
4279 breakpoint), so we should be reporting a stop event in
4280 this case too. */
4281
4282 /* If the thread already has a pending SIGSTOP, this is a
4283 no-op. Otherwise, something later will presumably resume
4284 the thread and this will cause it to cancel any pending
4285 operation, due to last_resume_kind == resume_stop. If
4286 the thread already has a pending status to report, we
4287 will still report it the next time we wait - see
4288 status_pending_p_callback. */
4289
4290 /* If we already have a pending signal to report, then
4291 there's no need to queue a SIGSTOP, as this means we're
4292 midway through moving the LWP out of the jumppad, and we
4293 will report the pending signal as soon as that is
4294 finished. */
4295 if (lwp->pending_signals_to_report == NULL)
4296 send_sigstop (lwp);
4297 }
4298
4299 /* For stop requests, we're done. */
4300 lwp->resume = NULL;
4301 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4302 return 0;
4303 }
4304
4305 /* If this thread which is about to be resumed has a pending status,
4306 then don't resume any threads - we can just report the pending
4307 status. Make sure to queue any signals that would otherwise be
4308 sent. In all-stop mode, we do this decision based on if *any*
4309 thread has a pending status. If there's a thread that needs the
4310 step-over-breakpoint dance, then don't resume any other thread
4311 but that particular one. */
4312 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4313
4314 if (!leave_pending)
4315 {
4316 if (debug_threads)
4317 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4318
4319 step = (lwp->resume->kind == resume_step);
4320 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4321 }
4322 else
4323 {
4324 if (debug_threads)
4325 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4326
4327 /* If we have a new signal, enqueue the signal. */
4328 if (lwp->resume->sig != 0)
4329 {
4330 struct pending_signals *p_sig;
4331 p_sig = xmalloc (sizeof (*p_sig));
4332 p_sig->prev = lwp->pending_signals;
4333 p_sig->signal = lwp->resume->sig;
4334 memset (&p_sig->info, 0, sizeof (siginfo_t));
4335
4336 /* If this is the same signal we were previously stopped by,
4337 make sure to queue its siginfo. We can ignore the return
4338 value of ptrace; if it fails, we'll skip
4339 PTRACE_SETSIGINFO. */
4340 if (WIFSTOPPED (lwp->last_status)
4341 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4342 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4343 &p_sig->info);
4344
4345 lwp->pending_signals = p_sig;
4346 }
4347 }
4348
4349 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4350 lwp->resume = NULL;
4351 return 0;
4352 }
4353
4354 static void
4355 linux_resume (struct thread_resume *resume_info, size_t n)
4356 {
4357 struct thread_resume_array array = { resume_info, n };
4358 struct thread_info *need_step_over = NULL;
4359 int any_pending;
4360 int leave_all_stopped;
4361
4362 if (debug_threads)
4363 {
4364 debug_enter ();
4365 debug_printf ("linux_resume:\n");
4366 }
4367
4368 find_inferior (&all_threads, linux_set_resume_request, &array);
4369
4370 /* If there is a thread which would otherwise be resumed, which has
4371 a pending status, then don't resume any threads - we can just
4372 report the pending status. Make sure to queue any signals that
4373 would otherwise be sent. In non-stop mode, we'll apply this
4374 logic to each thread individually. We consume all pending events
4375 before considering to start a step-over (in all-stop). */
4376 any_pending = 0;
4377 if (!non_stop)
4378 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4379
4380 /* If there is a thread which would otherwise be resumed, which is
4381 stopped at a breakpoint that needs stepping over, then don't
4382 resume any threads - have it step over the breakpoint with all
4383 other threads stopped, then resume all threads again. Make sure
4384 to queue any signals that would otherwise be delivered or
4385 queued. */
4386 if (!any_pending && supports_breakpoints ())
4387 need_step_over
4388 = (struct thread_info *) find_inferior (&all_threads,
4389 need_step_over_p, NULL);
4390
4391 leave_all_stopped = (need_step_over != NULL || any_pending);
4392
4393 if (debug_threads)
4394 {
4395 if (need_step_over != NULL)
4396 debug_printf ("Not resuming all, need step over\n");
4397 else if (any_pending)
4398 debug_printf ("Not resuming, all-stop and found "
4399 "an LWP with pending status\n");
4400 else
4401 debug_printf ("Resuming, no pending status or step over needed\n");
4402 }
4403
4404 /* Even if we're leaving threads stopped, queue all signals we'd
4405 otherwise deliver. */
4406 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4407
4408 if (need_step_over)
4409 start_step_over (get_thread_lwp (need_step_over));
4410
4411 if (debug_threads)
4412 {
4413 debug_printf ("linux_resume done\n");
4414 debug_exit ();
4415 }
4416 }
4417
4418 /* This function is called once per thread. We check the thread's
4419 last resume request, which will tell us whether to resume, step, or
4420 leave the thread stopped. Any signal the client requested to be
4421 delivered has already been enqueued at this point.
4422
4423 If any thread that GDB wants running is stopped at an internal
4424 breakpoint that needs stepping over, we start a step-over operation
4425 on that particular thread, and leave all others stopped. */
4426
4427 static int
4428 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4429 {
4430 struct thread_info *thread = (struct thread_info *) entry;
4431 struct lwp_info *lwp = get_thread_lwp (thread);
4432 int step;
4433
4434 if (lwp == except)
4435 return 0;
4436
4437 if (debug_threads)
4438 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4439
4440 if (!lwp->stopped)
4441 {
4442 if (debug_threads)
4443 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4444 return 0;
4445 }
4446
4447 if (thread->last_resume_kind == resume_stop
4448 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4449 {
4450 if (debug_threads)
4451 debug_printf (" client wants LWP to remain %ld stopped\n",
4452 lwpid_of (thread));
4453 return 0;
4454 }
4455
4456 if (lwp->status_pending_p)
4457 {
4458 if (debug_threads)
4459 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4460 lwpid_of (thread));
4461 return 0;
4462 }
4463
4464 gdb_assert (lwp->suspended >= 0);
4465
4466 if (lwp->suspended)
4467 {
4468 if (debug_threads)
4469 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4470 return 0;
4471 }
4472
4473 if (thread->last_resume_kind == resume_stop
4474 && lwp->pending_signals_to_report == NULL
4475 && lwp->collecting_fast_tracepoint == 0)
4476 {
4477 /* We haven't reported this LWP as stopped yet (otherwise, the
4478 last_status.kind check above would catch it, and we wouldn't
4479 reach here. This LWP may have been momentarily paused by a
4480 stop_all_lwps call while handling for example, another LWP's
4481 step-over. In that case, the pending expected SIGSTOP signal
4482 that was queued at vCont;t handling time will have already
4483 been consumed by wait_for_sigstop, and so we need to requeue
4484 another one here. Note that if the LWP already has a SIGSTOP
4485 pending, this is a no-op. */
4486
4487 if (debug_threads)
4488 debug_printf ("Client wants LWP %ld to stop. "
4489 "Making sure it has a SIGSTOP pending\n",
4490 lwpid_of (thread));
4491
4492 send_sigstop (lwp);
4493 }
4494
4495 step = thread->last_resume_kind == resume_step;
4496 linux_resume_one_lwp (lwp, step, 0, NULL);
4497 return 0;
4498 }
4499
4500 static int
4501 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4502 {
4503 struct thread_info *thread = (struct thread_info *) entry;
4504 struct lwp_info *lwp = get_thread_lwp (thread);
4505
4506 if (lwp == except)
4507 return 0;
4508
4509 lwp->suspended--;
4510 gdb_assert (lwp->suspended >= 0);
4511
4512 return proceed_one_lwp (entry, except);
4513 }
4514
4515 /* When we finish a step-over, set threads running again. If there's
4516 another thread that may need a step-over, now's the time to start
4517 it. Eventually, we'll move all threads past their breakpoints. */
4518
4519 static void
4520 proceed_all_lwps (void)
4521 {
4522 struct thread_info *need_step_over;
4523
4524 /* If there is a thread which would otherwise be resumed, which is
4525 stopped at a breakpoint that needs stepping over, then don't
4526 resume any threads - have it step over the breakpoint with all
4527 other threads stopped, then resume all threads again. */
4528
4529 if (supports_breakpoints ())
4530 {
4531 need_step_over
4532 = (struct thread_info *) find_inferior (&all_threads,
4533 need_step_over_p, NULL);
4534
4535 if (need_step_over != NULL)
4536 {
4537 if (debug_threads)
4538 debug_printf ("proceed_all_lwps: found "
4539 "thread %ld needing a step-over\n",
4540 lwpid_of (need_step_over));
4541
4542 start_step_over (get_thread_lwp (need_step_over));
4543 return;
4544 }
4545 }
4546
4547 if (debug_threads)
4548 debug_printf ("Proceeding, no step-over needed\n");
4549
4550 find_inferior (&all_threads, proceed_one_lwp, NULL);
4551 }
4552
4553 /* Stopped LWPs that the client wanted to be running, that don't have
4554 pending statuses, are set to run again, except for EXCEPT, if not
4555 NULL. This undoes a stop_all_lwps call. */
4556
4557 static void
4558 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4559 {
4560 if (debug_threads)
4561 {
4562 debug_enter ();
4563 if (except)
4564 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4565 lwpid_of (get_lwp_thread (except)));
4566 else
4567 debug_printf ("unstopping all lwps\n");
4568 }
4569
4570 if (unsuspend)
4571 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4572 else
4573 find_inferior (&all_threads, proceed_one_lwp, except);
4574
4575 if (debug_threads)
4576 {
4577 debug_printf ("unstop_all_lwps done\n");
4578 debug_exit ();
4579 }
4580 }
4581
4582
4583 #ifdef HAVE_LINUX_REGSETS
4584
4585 #define use_linux_regsets 1
4586
4587 /* Returns true if REGSET has been disabled. */
4588
4589 static int
4590 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4591 {
4592 return (info->disabled_regsets != NULL
4593 && info->disabled_regsets[regset - info->regsets]);
4594 }
4595
4596 /* Disable REGSET. */
4597
4598 static void
4599 disable_regset (struct regsets_info *info, struct regset_info *regset)
4600 {
4601 int dr_offset;
4602
4603 dr_offset = regset - info->regsets;
4604 if (info->disabled_regsets == NULL)
4605 info->disabled_regsets = xcalloc (1, info->num_regsets);
4606 info->disabled_regsets[dr_offset] = 1;
4607 }
4608
4609 static int
4610 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4611 struct regcache *regcache)
4612 {
4613 struct regset_info *regset;
4614 int saw_general_regs = 0;
4615 int pid;
4616 struct iovec iov;
4617
4618 pid = lwpid_of (current_thread);
4619 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4620 {
4621 void *buf, *data;
4622 int nt_type, res;
4623
4624 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4625 continue;
4626
4627 buf = xmalloc (regset->size);
4628
4629 nt_type = regset->nt_type;
4630 if (nt_type)
4631 {
4632 iov.iov_base = buf;
4633 iov.iov_len = regset->size;
4634 data = (void *) &iov;
4635 }
4636 else
4637 data = buf;
4638
4639 #ifndef __sparc__
4640 res = ptrace (regset->get_request, pid,
4641 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4642 #else
4643 res = ptrace (regset->get_request, pid, data, nt_type);
4644 #endif
4645 if (res < 0)
4646 {
4647 if (errno == EIO)
4648 {
4649 /* If we get EIO on a regset, do not try it again for
4650 this process mode. */
4651 disable_regset (regsets_info, regset);
4652 }
4653 else if (errno == ENODATA)
4654 {
4655 /* ENODATA may be returned if the regset is currently
4656 not "active". This can happen in normal operation,
4657 so suppress the warning in this case. */
4658 }
4659 else
4660 {
4661 char s[256];
4662 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4663 pid);
4664 perror (s);
4665 }
4666 }
4667 else
4668 {
4669 if (regset->type == GENERAL_REGS)
4670 saw_general_regs = 1;
4671 regset->store_function (regcache, buf);
4672 }
4673 free (buf);
4674 }
4675 if (saw_general_regs)
4676 return 0;
4677 else
4678 return 1;
4679 }
4680
4681 static int
4682 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4683 struct regcache *regcache)
4684 {
4685 struct regset_info *regset;
4686 int saw_general_regs = 0;
4687 int pid;
4688 struct iovec iov;
4689
4690 pid = lwpid_of (current_thread);
4691 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4692 {
4693 void *buf, *data;
4694 int nt_type, res;
4695
4696 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4697 || regset->fill_function == NULL)
4698 continue;
4699
4700 buf = xmalloc (regset->size);
4701
4702 /* First fill the buffer with the current register set contents,
4703 in case there are any items in the kernel's regset that are
4704 not in gdbserver's regcache. */
4705
4706 nt_type = regset->nt_type;
4707 if (nt_type)
4708 {
4709 iov.iov_base = buf;
4710 iov.iov_len = regset->size;
4711 data = (void *) &iov;
4712 }
4713 else
4714 data = buf;
4715
4716 #ifndef __sparc__
4717 res = ptrace (regset->get_request, pid,
4718 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4719 #else
4720 res = ptrace (regset->get_request, pid, data, nt_type);
4721 #endif
4722
4723 if (res == 0)
4724 {
4725 /* Then overlay our cached registers on that. */
4726 regset->fill_function (regcache, buf);
4727
4728 /* Only now do we write the register set. */
4729 #ifndef __sparc__
4730 res = ptrace (regset->set_request, pid,
4731 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4732 #else
4733 res = ptrace (regset->set_request, pid, data, nt_type);
4734 #endif
4735 }
4736
4737 if (res < 0)
4738 {
4739 if (errno == EIO)
4740 {
4741 /* If we get EIO on a regset, do not try it again for
4742 this process mode. */
4743 disable_regset (regsets_info, regset);
4744 }
4745 else if (errno == ESRCH)
4746 {
4747 /* At this point, ESRCH should mean the process is
4748 already gone, in which case we simply ignore attempts
4749 to change its registers. See also the related
4750 comment in linux_resume_one_lwp. */
4751 free (buf);
4752 return 0;
4753 }
4754 else
4755 {
4756 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4757 }
4758 }
4759 else if (regset->type == GENERAL_REGS)
4760 saw_general_regs = 1;
4761 free (buf);
4762 }
4763 if (saw_general_regs)
4764 return 0;
4765 else
4766 return 1;
4767 }
4768
4769 #else /* !HAVE_LINUX_REGSETS */
4770
4771 #define use_linux_regsets 0
4772 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4773 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4774
4775 #endif
4776
4777 /* Return 1 if register REGNO is supported by one of the regset ptrace
4778 calls or 0 if it has to be transferred individually. */
4779
4780 static int
4781 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4782 {
4783 unsigned char mask = 1 << (regno % 8);
4784 size_t index = regno / 8;
4785
4786 return (use_linux_regsets
4787 && (regs_info->regset_bitmap == NULL
4788 || (regs_info->regset_bitmap[index] & mask) != 0));
4789 }
4790
4791 #ifdef HAVE_LINUX_USRREGS
4792
4793 int
4794 register_addr (const struct usrregs_info *usrregs, int regnum)
4795 {
4796 int addr;
4797
4798 if (regnum < 0 || regnum >= usrregs->num_regs)
4799 error ("Invalid register number %d.", regnum);
4800
4801 addr = usrregs->regmap[regnum];
4802
4803 return addr;
4804 }
4805
4806 /* Fetch one register. */
4807 static void
4808 fetch_register (const struct usrregs_info *usrregs,
4809 struct regcache *regcache, int regno)
4810 {
4811 CORE_ADDR regaddr;
4812 int i, size;
4813 char *buf;
4814 int pid;
4815
4816 if (regno >= usrregs->num_regs)
4817 return;
4818 if ((*the_low_target.cannot_fetch_register) (regno))
4819 return;
4820
4821 regaddr = register_addr (usrregs, regno);
4822 if (regaddr == -1)
4823 return;
4824
4825 size = ((register_size (regcache->tdesc, regno)
4826 + sizeof (PTRACE_XFER_TYPE) - 1)
4827 & -sizeof (PTRACE_XFER_TYPE));
4828 buf = alloca (size);
4829
4830 pid = lwpid_of (current_thread);
4831 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4832 {
4833 errno = 0;
4834 *(PTRACE_XFER_TYPE *) (buf + i) =
4835 ptrace (PTRACE_PEEKUSER, pid,
4836 /* Coerce to a uintptr_t first to avoid potential gcc warning
4837 of coercing an 8 byte integer to a 4 byte pointer. */
4838 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4839 regaddr += sizeof (PTRACE_XFER_TYPE);
4840 if (errno != 0)
4841 error ("reading register %d: %s", regno, strerror (errno));
4842 }
4843
4844 if (the_low_target.supply_ptrace_register)
4845 the_low_target.supply_ptrace_register (regcache, regno, buf);
4846 else
4847 supply_register (regcache, regno, buf);
4848 }
4849
4850 /* Store one register. */
4851 static void
4852 store_register (const struct usrregs_info *usrregs,
4853 struct regcache *regcache, int regno)
4854 {
4855 CORE_ADDR regaddr;
4856 int i, size;
4857 char *buf;
4858 int pid;
4859
4860 if (regno >= usrregs->num_regs)
4861 return;
4862 if ((*the_low_target.cannot_store_register) (regno))
4863 return;
4864
4865 regaddr = register_addr (usrregs, regno);
4866 if (regaddr == -1)
4867 return;
4868
4869 size = ((register_size (regcache->tdesc, regno)
4870 + sizeof (PTRACE_XFER_TYPE) - 1)
4871 & -sizeof (PTRACE_XFER_TYPE));
4872 buf = alloca (size);
4873 memset (buf, 0, size);
4874
4875 if (the_low_target.collect_ptrace_register)
4876 the_low_target.collect_ptrace_register (regcache, regno, buf);
4877 else
4878 collect_register (regcache, regno, buf);
4879
4880 pid = lwpid_of (current_thread);
4881 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4882 {
4883 errno = 0;
4884 ptrace (PTRACE_POKEUSER, pid,
4885 /* Coerce to a uintptr_t first to avoid potential gcc warning
4886 about coercing an 8 byte integer to a 4 byte pointer. */
4887 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4888 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4889 if (errno != 0)
4890 {
4891 /* At this point, ESRCH should mean the process is
4892 already gone, in which case we simply ignore attempts
4893 to change its registers. See also the related
4894 comment in linux_resume_one_lwp. */
4895 if (errno == ESRCH)
4896 return;
4897
4898 if ((*the_low_target.cannot_store_register) (regno) == 0)
4899 error ("writing register %d: %s", regno, strerror (errno));
4900 }
4901 regaddr += sizeof (PTRACE_XFER_TYPE);
4902 }
4903 }
4904
4905 /* Fetch all registers, or just one, from the child process.
4906 If REGNO is -1, do this for all registers, skipping any that are
4907 assumed to have been retrieved by regsets_fetch_inferior_registers,
4908 unless ALL is non-zero.
4909 Otherwise, REGNO specifies which register (so we can save time). */
4910 static void
4911 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4912 struct regcache *regcache, int regno, int all)
4913 {
4914 struct usrregs_info *usr = regs_info->usrregs;
4915
4916 if (regno == -1)
4917 {
4918 for (regno = 0; regno < usr->num_regs; regno++)
4919 if (all || !linux_register_in_regsets (regs_info, regno))
4920 fetch_register (usr, regcache, regno);
4921 }
4922 else
4923 fetch_register (usr, regcache, regno);
4924 }
4925
4926 /* Store our register values back into the inferior.
4927 If REGNO is -1, do this for all registers, skipping any that are
4928 assumed to have been saved by regsets_store_inferior_registers,
4929 unless ALL is non-zero.
4930 Otherwise, REGNO specifies which register (so we can save time). */
4931 static void
4932 usr_store_inferior_registers (const struct regs_info *regs_info,
4933 struct regcache *regcache, int regno, int all)
4934 {
4935 struct usrregs_info *usr = regs_info->usrregs;
4936
4937 if (regno == -1)
4938 {
4939 for (regno = 0; regno < usr->num_regs; regno++)
4940 if (all || !linux_register_in_regsets (regs_info, regno))
4941 store_register (usr, regcache, regno);
4942 }
4943 else
4944 store_register (usr, regcache, regno);
4945 }
4946
4947 #else /* !HAVE_LINUX_USRREGS */
4948
4949 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4950 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4951
4952 #endif
4953
4954
4955 void
4956 linux_fetch_registers (struct regcache *regcache, int regno)
4957 {
4958 int use_regsets;
4959 int all = 0;
4960 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4961
4962 if (regno == -1)
4963 {
4964 if (the_low_target.fetch_register != NULL
4965 && regs_info->usrregs != NULL)
4966 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4967 (*the_low_target.fetch_register) (regcache, regno);
4968
4969 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4970 if (regs_info->usrregs != NULL)
4971 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4972 }
4973 else
4974 {
4975 if (the_low_target.fetch_register != NULL
4976 && (*the_low_target.fetch_register) (regcache, regno))
4977 return;
4978
4979 use_regsets = linux_register_in_regsets (regs_info, regno);
4980 if (use_regsets)
4981 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4982 regcache);
4983 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4984 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4985 }
4986 }
4987
4988 void
4989 linux_store_registers (struct regcache *regcache, int regno)
4990 {
4991 int use_regsets;
4992 int all = 0;
4993 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4994
4995 if (regno == -1)
4996 {
4997 all = regsets_store_inferior_registers (regs_info->regsets_info,
4998 regcache);
4999 if (regs_info->usrregs != NULL)
5000 usr_store_inferior_registers (regs_info, regcache, regno, all);
5001 }
5002 else
5003 {
5004 use_regsets = linux_register_in_regsets (regs_info, regno);
5005 if (use_regsets)
5006 all = regsets_store_inferior_registers (regs_info->regsets_info,
5007 regcache);
5008 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5009 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5010 }
5011 }
5012
5013
5014 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5015 to debugger memory starting at MYADDR. */
5016
5017 static int
5018 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5019 {
5020 int pid = lwpid_of (current_thread);
5021 register PTRACE_XFER_TYPE *buffer;
5022 register CORE_ADDR addr;
5023 register int count;
5024 char filename[64];
5025 register int i;
5026 int ret;
5027 int fd;
5028
5029 /* Try using /proc. Don't bother for one word. */
5030 if (len >= 3 * sizeof (long))
5031 {
5032 int bytes;
5033
5034 /* We could keep this file open and cache it - possibly one per
5035 thread. That requires some juggling, but is even faster. */
5036 sprintf (filename, "/proc/%d/mem", pid);
5037 fd = open (filename, O_RDONLY | O_LARGEFILE);
5038 if (fd == -1)
5039 goto no_proc;
5040
5041 /* If pread64 is available, use it. It's faster if the kernel
5042 supports it (only one syscall), and it's 64-bit safe even on
5043 32-bit platforms (for instance, SPARC debugging a SPARC64
5044 application). */
5045 #ifdef HAVE_PREAD64
5046 bytes = pread64 (fd, myaddr, len, memaddr);
5047 #else
5048 bytes = -1;
5049 if (lseek (fd, memaddr, SEEK_SET) != -1)
5050 bytes = read (fd, myaddr, len);
5051 #endif
5052
5053 close (fd);
5054 if (bytes == len)
5055 return 0;
5056
5057 /* Some data was read, we'll try to get the rest with ptrace. */
5058 if (bytes > 0)
5059 {
5060 memaddr += bytes;
5061 myaddr += bytes;
5062 len -= bytes;
5063 }
5064 }
5065
5066 no_proc:
5067 /* Round starting address down to longword boundary. */
5068 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5069 /* Round ending address up; get number of longwords that makes. */
5070 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5071 / sizeof (PTRACE_XFER_TYPE));
5072 /* Allocate buffer of that many longwords. */
5073 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5074
5075 /* Read all the longwords */
5076 errno = 0;
5077 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5078 {
5079 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5080 about coercing an 8 byte integer to a 4 byte pointer. */
5081 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5082 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5083 (PTRACE_TYPE_ARG4) 0);
5084 if (errno)
5085 break;
5086 }
5087 ret = errno;
5088
5089 /* Copy appropriate bytes out of the buffer. */
5090 if (i > 0)
5091 {
5092 i *= sizeof (PTRACE_XFER_TYPE);
5093 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5094 memcpy (myaddr,
5095 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5096 i < len ? i : len);
5097 }
5098
5099 return ret;
5100 }
5101
5102 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5103 memory at MEMADDR. On failure (cannot write to the inferior)
5104 returns the value of errno. Always succeeds if LEN is zero. */
5105
5106 static int
5107 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5108 {
5109 register int i;
5110 /* Round starting address down to longword boundary. */
5111 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5112 /* Round ending address up; get number of longwords that makes. */
5113 register int count
5114 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5115 / sizeof (PTRACE_XFER_TYPE);
5116
5117 /* Allocate buffer of that many longwords. */
5118 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5119 alloca (count * sizeof (PTRACE_XFER_TYPE));
5120
5121 int pid = lwpid_of (current_thread);
5122
5123 if (len == 0)
5124 {
5125 /* Zero length write always succeeds. */
5126 return 0;
5127 }
5128
5129 if (debug_threads)
5130 {
5131 /* Dump up to four bytes. */
5132 unsigned int val = * (unsigned int *) myaddr;
5133 if (len == 1)
5134 val = val & 0xff;
5135 else if (len == 2)
5136 val = val & 0xffff;
5137 else if (len == 3)
5138 val = val & 0xffffff;
5139 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5140 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
5141 }
5142
5143 /* Fill start and end extra bytes of buffer with existing memory data. */
5144
5145 errno = 0;
5146 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5147 about coercing an 8 byte integer to a 4 byte pointer. */
5148 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5149 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5150 (PTRACE_TYPE_ARG4) 0);
5151 if (errno)
5152 return errno;
5153
5154 if (count > 1)
5155 {
5156 errno = 0;
5157 buffer[count - 1]
5158 = ptrace (PTRACE_PEEKTEXT, pid,
5159 /* Coerce to a uintptr_t first to avoid potential gcc warning
5160 about coercing an 8 byte integer to a 4 byte pointer. */
5161 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5162 * sizeof (PTRACE_XFER_TYPE)),
5163 (PTRACE_TYPE_ARG4) 0);
5164 if (errno)
5165 return errno;
5166 }
5167
5168 /* Copy data to be written over corresponding part of buffer. */
5169
5170 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5171 myaddr, len);
5172
5173 /* Write the entire buffer. */
5174
5175 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5176 {
5177 errno = 0;
5178 ptrace (PTRACE_POKETEXT, pid,
5179 /* Coerce to a uintptr_t first to avoid potential gcc warning
5180 about coercing an 8 byte integer to a 4 byte pointer. */
5181 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5182 (PTRACE_TYPE_ARG4) buffer[i]);
5183 if (errno)
5184 return errno;
5185 }
5186
5187 return 0;
5188 }
5189
5190 static void
5191 linux_look_up_symbols (void)
5192 {
5193 #ifdef USE_THREAD_DB
5194 struct process_info *proc = current_process ();
5195
5196 if (proc->priv->thread_db != NULL)
5197 return;
5198
5199 /* If the kernel supports tracing clones, then we don't need to
5200 use the magic thread event breakpoint to learn about
5201 threads. */
5202 thread_db_init (!linux_supports_traceclone ());
5203 #endif
5204 }
5205
5206 static void
5207 linux_request_interrupt (void)
5208 {
5209 extern unsigned long signal_pid;
5210
5211 /* Send a SIGINT to the process group. This acts just like the user
5212 typed a ^C on the controlling terminal. */
5213 kill (-signal_pid, SIGINT);
5214 }
5215
5216 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5217 to debugger memory starting at MYADDR. */
5218
5219 static int
5220 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5221 {
5222 char filename[PATH_MAX];
5223 int fd, n;
5224 int pid = lwpid_of (current_thread);
5225
5226 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5227
5228 fd = open (filename, O_RDONLY);
5229 if (fd < 0)
5230 return -1;
5231
5232 if (offset != (CORE_ADDR) 0
5233 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5234 n = -1;
5235 else
5236 n = read (fd, myaddr, len);
5237
5238 close (fd);
5239
5240 return n;
5241 }
5242
5243 /* These breakpoint and watchpoint related wrapper functions simply
5244 pass on the function call if the target has registered a
5245 corresponding function. */
5246
5247 static int
5248 linux_supports_z_point_type (char z_type)
5249 {
5250 return (the_low_target.supports_z_point_type != NULL
5251 && the_low_target.supports_z_point_type (z_type));
5252 }
5253
5254 static int
5255 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5256 int size, struct raw_breakpoint *bp)
5257 {
5258 if (type == raw_bkpt_type_sw)
5259 return insert_memory_breakpoint (bp);
5260 else if (the_low_target.insert_point != NULL)
5261 return the_low_target.insert_point (type, addr, size, bp);
5262 else
5263 /* Unsupported (see target.h). */
5264 return 1;
5265 }
5266
5267 static int
5268 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5269 int size, struct raw_breakpoint *bp)
5270 {
5271 if (type == raw_bkpt_type_sw)
5272 return remove_memory_breakpoint (bp);
5273 else if (the_low_target.remove_point != NULL)
5274 return the_low_target.remove_point (type, addr, size, bp);
5275 else
5276 /* Unsupported (see target.h). */
5277 return 1;
5278 }
5279
5280 /* Implement the to_stopped_by_sw_breakpoint target_ops
5281 method. */
5282
5283 static int
5284 linux_stopped_by_sw_breakpoint (void)
5285 {
5286 struct lwp_info *lwp = get_thread_lwp (current_thread);
5287
5288 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5289 }
5290
5291 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5292 method. */
5293
5294 static int
5295 linux_supports_stopped_by_sw_breakpoint (void)
5296 {
5297 return USE_SIGTRAP_SIGINFO;
5298 }
5299
5300 /* Implement the to_stopped_by_hw_breakpoint target_ops
5301 method. */
5302
5303 static int
5304 linux_stopped_by_hw_breakpoint (void)
5305 {
5306 struct lwp_info *lwp = get_thread_lwp (current_thread);
5307
5308 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5309 }
5310
5311 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5312 method. */
5313
5314 static int
5315 linux_supports_stopped_by_hw_breakpoint (void)
5316 {
5317 return USE_SIGTRAP_SIGINFO;
5318 }
5319
5320 /* Implement the supports_conditional_breakpoints target_ops
5321 method. */
5322
5323 static int
5324 linux_supports_conditional_breakpoints (void)
5325 {
5326 /* GDBserver needs to step over the breakpoint if the condition is
5327 false. GDBserver software single step is too simple, so disable
5328 conditional breakpoints if the target doesn't have hardware single
5329 step. */
5330 return can_hardware_single_step ();
5331 }
5332
5333 static int
5334 linux_stopped_by_watchpoint (void)
5335 {
5336 struct lwp_info *lwp = get_thread_lwp (current_thread);
5337
5338 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5339 }
5340
5341 static CORE_ADDR
5342 linux_stopped_data_address (void)
5343 {
5344 struct lwp_info *lwp = get_thread_lwp (current_thread);
5345
5346 return lwp->stopped_data_address;
5347 }
5348
5349 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5350 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5351 && defined(PT_TEXT_END_ADDR)
5352
5353 /* This is only used for targets that define PT_TEXT_ADDR,
5354 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5355 the target has different ways of acquiring this information, like
5356 loadmaps. */
5357
5358 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5359 to tell gdb about. */
5360
5361 static int
5362 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5363 {
5364 unsigned long text, text_end, data;
5365 int pid = lwpid_of (current_thread);
5366
5367 errno = 0;
5368
5369 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5370 (PTRACE_TYPE_ARG4) 0);
5371 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5372 (PTRACE_TYPE_ARG4) 0);
5373 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5374 (PTRACE_TYPE_ARG4) 0);
5375
5376 if (errno == 0)
5377 {
5378 /* Both text and data offsets produced at compile-time (and so
5379 used by gdb) are relative to the beginning of the program,
5380 with the data segment immediately following the text segment.
5381 However, the actual runtime layout in memory may put the data
5382 somewhere else, so when we send gdb a data base-address, we
5383 use the real data base address and subtract the compile-time
5384 data base-address from it (which is just the length of the
5385 text segment). BSS immediately follows data in both
5386 cases. */
5387 *text_p = text;
5388 *data_p = data - (text_end - text);
5389
5390 return 1;
5391 }
5392 return 0;
5393 }
5394 #endif
5395
5396 static int
5397 linux_qxfer_osdata (const char *annex,
5398 unsigned char *readbuf, unsigned const char *writebuf,
5399 CORE_ADDR offset, int len)
5400 {
5401 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5402 }
5403
5404 /* Convert a native/host siginfo object, into/from the siginfo in the
5405 layout of the inferiors' architecture. */
5406
5407 static void
5408 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5409 {
5410 int done = 0;
5411
5412 if (the_low_target.siginfo_fixup != NULL)
5413 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5414
5415 /* If there was no callback, or the callback didn't do anything,
5416 then just do a straight memcpy. */
5417 if (!done)
5418 {
5419 if (direction == 1)
5420 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5421 else
5422 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5423 }
5424 }
5425
5426 static int
5427 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5428 unsigned const char *writebuf, CORE_ADDR offset, int len)
5429 {
5430 int pid;
5431 siginfo_t siginfo;
5432 char inf_siginfo[sizeof (siginfo_t)];
5433
5434 if (current_thread == NULL)
5435 return -1;
5436
5437 pid = lwpid_of (current_thread);
5438
5439 if (debug_threads)
5440 debug_printf ("%s siginfo for lwp %d.\n",
5441 readbuf != NULL ? "Reading" : "Writing",
5442 pid);
5443
5444 if (offset >= sizeof (siginfo))
5445 return -1;
5446
5447 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5448 return -1;
5449
5450 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5451 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5452 inferior with a 64-bit GDBSERVER should look the same as debugging it
5453 with a 32-bit GDBSERVER, we need to convert it. */
5454 siginfo_fixup (&siginfo, inf_siginfo, 0);
5455
5456 if (offset + len > sizeof (siginfo))
5457 len = sizeof (siginfo) - offset;
5458
5459 if (readbuf != NULL)
5460 memcpy (readbuf, inf_siginfo + offset, len);
5461 else
5462 {
5463 memcpy (inf_siginfo + offset, writebuf, len);
5464
5465 /* Convert back to ptrace layout before flushing it out. */
5466 siginfo_fixup (&siginfo, inf_siginfo, 1);
5467
5468 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5469 return -1;
5470 }
5471
5472 return len;
5473 }
5474
5475 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5476 so we notice when children change state; as the handler for the
5477 sigsuspend in my_waitpid. */
5478
5479 static void
5480 sigchld_handler (int signo)
5481 {
5482 int old_errno = errno;
5483
5484 if (debug_threads)
5485 {
5486 do
5487 {
5488 /* fprintf is not async-signal-safe, so call write
5489 directly. */
5490 if (write (2, "sigchld_handler\n",
5491 sizeof ("sigchld_handler\n") - 1) < 0)
5492 break; /* just ignore */
5493 } while (0);
5494 }
5495
5496 if (target_is_async_p ())
5497 async_file_mark (); /* trigger a linux_wait */
5498
5499 errno = old_errno;
5500 }
5501
5502 static int
5503 linux_supports_non_stop (void)
5504 {
5505 return 1;
5506 }
5507
5508 static int
5509 linux_async (int enable)
5510 {
5511 int previous = target_is_async_p ();
5512
5513 if (debug_threads)
5514 debug_printf ("linux_async (%d), previous=%d\n",
5515 enable, previous);
5516
5517 if (previous != enable)
5518 {
5519 sigset_t mask;
5520 sigemptyset (&mask);
5521 sigaddset (&mask, SIGCHLD);
5522
5523 sigprocmask (SIG_BLOCK, &mask, NULL);
5524
5525 if (enable)
5526 {
5527 if (pipe (linux_event_pipe) == -1)
5528 {
5529 linux_event_pipe[0] = -1;
5530 linux_event_pipe[1] = -1;
5531 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5532
5533 warning ("creating event pipe failed.");
5534 return previous;
5535 }
5536
5537 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5538 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5539
5540 /* Register the event loop handler. */
5541 add_file_handler (linux_event_pipe[0],
5542 handle_target_event, NULL);
5543
5544 /* Always trigger a linux_wait. */
5545 async_file_mark ();
5546 }
5547 else
5548 {
5549 delete_file_handler (linux_event_pipe[0]);
5550
5551 close (linux_event_pipe[0]);
5552 close (linux_event_pipe[1]);
5553 linux_event_pipe[0] = -1;
5554 linux_event_pipe[1] = -1;
5555 }
5556
5557 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5558 }
5559
5560 return previous;
5561 }
5562
5563 static int
5564 linux_start_non_stop (int nonstop)
5565 {
5566 /* Register or unregister from event-loop accordingly. */
5567 linux_async (nonstop);
5568
5569 if (target_is_async_p () != (nonstop != 0))
5570 return -1;
5571
5572 return 0;
5573 }
5574
5575 static int
5576 linux_supports_multi_process (void)
5577 {
5578 return 1;
5579 }
5580
5581 /* Check if fork events are supported. */
5582
5583 static int
5584 linux_supports_fork_events (void)
5585 {
5586 return linux_supports_tracefork ();
5587 }
5588
5589 /* Check if vfork events are supported. */
5590
5591 static int
5592 linux_supports_vfork_events (void)
5593 {
5594 return linux_supports_tracefork ();
5595 }
5596
5597 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5598 options for the specified lwp. */
5599
5600 static int
5601 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5602 void *args)
5603 {
5604 struct thread_info *thread = (struct thread_info *) entry;
5605 struct lwp_info *lwp = get_thread_lwp (thread);
5606
5607 if (!lwp->stopped)
5608 {
5609 /* Stop the lwp so we can modify its ptrace options. */
5610 lwp->must_set_ptrace_flags = 1;
5611 linux_stop_lwp (lwp);
5612 }
5613 else
5614 {
5615 /* Already stopped; go ahead and set the ptrace options. */
5616 struct process_info *proc = find_process_pid (pid_of (thread));
5617 int options = linux_low_ptrace_options (proc->attached);
5618
5619 linux_enable_event_reporting (lwpid_of (thread), options);
5620 lwp->must_set_ptrace_flags = 0;
5621 }
5622
5623 return 0;
5624 }
5625
5626 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5627 ptrace flags for all inferiors. This is in case the new GDB connection
5628 doesn't support the same set of events that the previous one did. */
5629
5630 static void
5631 linux_handle_new_gdb_connection (void)
5632 {
5633 pid_t pid;
5634
5635 /* Request that all the lwps reset their ptrace options. */
5636 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5637 }
5638
5639 static int
5640 linux_supports_disable_randomization (void)
5641 {
5642 #ifdef HAVE_PERSONALITY
5643 return 1;
5644 #else
5645 return 0;
5646 #endif
5647 }
5648
5649 static int
5650 linux_supports_agent (void)
5651 {
5652 return 1;
5653 }
5654
5655 static int
5656 linux_supports_range_stepping (void)
5657 {
5658 if (*the_low_target.supports_range_stepping == NULL)
5659 return 0;
5660
5661 return (*the_low_target.supports_range_stepping) ();
5662 }
5663
5664 /* Enumerate spufs IDs for process PID. */
5665 static int
5666 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5667 {
5668 int pos = 0;
5669 int written = 0;
5670 char path[128];
5671 DIR *dir;
5672 struct dirent *entry;
5673
5674 sprintf (path, "/proc/%ld/fd", pid);
5675 dir = opendir (path);
5676 if (!dir)
5677 return -1;
5678
5679 rewinddir (dir);
5680 while ((entry = readdir (dir)) != NULL)
5681 {
5682 struct stat st;
5683 struct statfs stfs;
5684 int fd;
5685
5686 fd = atoi (entry->d_name);
5687 if (!fd)
5688 continue;
5689
5690 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5691 if (stat (path, &st) != 0)
5692 continue;
5693 if (!S_ISDIR (st.st_mode))
5694 continue;
5695
5696 if (statfs (path, &stfs) != 0)
5697 continue;
5698 if (stfs.f_type != SPUFS_MAGIC)
5699 continue;
5700
5701 if (pos >= offset && pos + 4 <= offset + len)
5702 {
5703 *(unsigned int *)(buf + pos - offset) = fd;
5704 written += 4;
5705 }
5706 pos += 4;
5707 }
5708
5709 closedir (dir);
5710 return written;
5711 }
5712
5713 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5714 object type, using the /proc file system. */
5715 static int
5716 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5717 unsigned const char *writebuf,
5718 CORE_ADDR offset, int len)
5719 {
5720 long pid = lwpid_of (current_thread);
5721 char buf[128];
5722 int fd = 0;
5723 int ret = 0;
5724
5725 if (!writebuf && !readbuf)
5726 return -1;
5727
5728 if (!*annex)
5729 {
5730 if (!readbuf)
5731 return -1;
5732 else
5733 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5734 }
5735
5736 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5737 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5738 if (fd <= 0)
5739 return -1;
5740
5741 if (offset != 0
5742 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5743 {
5744 close (fd);
5745 return 0;
5746 }
5747
5748 if (writebuf)
5749 ret = write (fd, writebuf, (size_t) len);
5750 else
5751 ret = read (fd, readbuf, (size_t) len);
5752
5753 close (fd);
5754 return ret;
5755 }
5756
5757 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5758 struct target_loadseg
5759 {
5760 /* Core address to which the segment is mapped. */
5761 Elf32_Addr addr;
5762 /* VMA recorded in the program header. */
5763 Elf32_Addr p_vaddr;
5764 /* Size of this segment in memory. */
5765 Elf32_Word p_memsz;
5766 };
5767
5768 # if defined PT_GETDSBT
5769 struct target_loadmap
5770 {
5771 /* Protocol version number, must be zero. */
5772 Elf32_Word version;
5773 /* Pointer to the DSBT table, its size, and the DSBT index. */
5774 unsigned *dsbt_table;
5775 unsigned dsbt_size, dsbt_index;
5776 /* Number of segments in this map. */
5777 Elf32_Word nsegs;
5778 /* The actual memory map. */
5779 struct target_loadseg segs[/*nsegs*/];
5780 };
5781 # define LINUX_LOADMAP PT_GETDSBT
5782 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5783 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5784 # else
5785 struct target_loadmap
5786 {
5787 /* Protocol version number, must be zero. */
5788 Elf32_Half version;
5789 /* Number of segments in this map. */
5790 Elf32_Half nsegs;
5791 /* The actual memory map. */
5792 struct target_loadseg segs[/*nsegs*/];
5793 };
5794 # define LINUX_LOADMAP PTRACE_GETFDPIC
5795 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5796 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5797 # endif
5798
5799 static int
5800 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5801 unsigned char *myaddr, unsigned int len)
5802 {
5803 int pid = lwpid_of (current_thread);
5804 int addr = -1;
5805 struct target_loadmap *data = NULL;
5806 unsigned int actual_length, copy_length;
5807
5808 if (strcmp (annex, "exec") == 0)
5809 addr = (int) LINUX_LOADMAP_EXEC;
5810 else if (strcmp (annex, "interp") == 0)
5811 addr = (int) LINUX_LOADMAP_INTERP;
5812 else
5813 return -1;
5814
5815 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5816 return -1;
5817
5818 if (data == NULL)
5819 return -1;
5820
5821 actual_length = sizeof (struct target_loadmap)
5822 + sizeof (struct target_loadseg) * data->nsegs;
5823
5824 if (offset < 0 || offset > actual_length)
5825 return -1;
5826
5827 copy_length = actual_length - offset < len ? actual_length - offset : len;
5828 memcpy (myaddr, (char *) data + offset, copy_length);
5829 return copy_length;
5830 }
5831 #else
5832 # define linux_read_loadmap NULL
5833 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5834
5835 static void
5836 linux_process_qsupported (const char *query)
5837 {
5838 if (the_low_target.process_qsupported != NULL)
5839 the_low_target.process_qsupported (query);
5840 }
5841
5842 static int
5843 linux_supports_tracepoints (void)
5844 {
5845 if (*the_low_target.supports_tracepoints == NULL)
5846 return 0;
5847
5848 return (*the_low_target.supports_tracepoints) ();
5849 }
5850
5851 static CORE_ADDR
5852 linux_read_pc (struct regcache *regcache)
5853 {
5854 if (the_low_target.get_pc == NULL)
5855 return 0;
5856
5857 return (*the_low_target.get_pc) (regcache);
5858 }
5859
5860 static void
5861 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5862 {
5863 gdb_assert (the_low_target.set_pc != NULL);
5864
5865 (*the_low_target.set_pc) (regcache, pc);
5866 }
5867
5868 static int
5869 linux_thread_stopped (struct thread_info *thread)
5870 {
5871 return get_thread_lwp (thread)->stopped;
5872 }
5873
5874 /* This exposes stop-all-threads functionality to other modules. */
5875
5876 static void
5877 linux_pause_all (int freeze)
5878 {
5879 stop_all_lwps (freeze, NULL);
5880 }
5881
5882 /* This exposes unstop-all-threads functionality to other gdbserver
5883 modules. */
5884
5885 static void
5886 linux_unpause_all (int unfreeze)
5887 {
5888 unstop_all_lwps (unfreeze, NULL);
5889 }
5890
5891 static int
5892 linux_prepare_to_access_memory (void)
5893 {
5894 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5895 running LWP. */
5896 if (non_stop)
5897 linux_pause_all (1);
5898 return 0;
5899 }
5900
5901 static void
5902 linux_done_accessing_memory (void)
5903 {
5904 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5905 running LWP. */
5906 if (non_stop)
5907 linux_unpause_all (1);
5908 }
5909
5910 static int
5911 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5912 CORE_ADDR collector,
5913 CORE_ADDR lockaddr,
5914 ULONGEST orig_size,
5915 CORE_ADDR *jump_entry,
5916 CORE_ADDR *trampoline,
5917 ULONGEST *trampoline_size,
5918 unsigned char *jjump_pad_insn,
5919 ULONGEST *jjump_pad_insn_size,
5920 CORE_ADDR *adjusted_insn_addr,
5921 CORE_ADDR *adjusted_insn_addr_end,
5922 char *err)
5923 {
5924 return (*the_low_target.install_fast_tracepoint_jump_pad)
5925 (tpoint, tpaddr, collector, lockaddr, orig_size,
5926 jump_entry, trampoline, trampoline_size,
5927 jjump_pad_insn, jjump_pad_insn_size,
5928 adjusted_insn_addr, adjusted_insn_addr_end,
5929 err);
5930 }
5931
5932 static struct emit_ops *
5933 linux_emit_ops (void)
5934 {
5935 if (the_low_target.emit_ops != NULL)
5936 return (*the_low_target.emit_ops) ();
5937 else
5938 return NULL;
5939 }
5940
5941 static int
5942 linux_get_min_fast_tracepoint_insn_len (void)
5943 {
5944 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5945 }
5946
5947 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5948
5949 static int
5950 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5951 CORE_ADDR *phdr_memaddr, int *num_phdr)
5952 {
5953 char filename[PATH_MAX];
5954 int fd;
5955 const int auxv_size = is_elf64
5956 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5957 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5958
5959 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5960
5961 fd = open (filename, O_RDONLY);
5962 if (fd < 0)
5963 return 1;
5964
5965 *phdr_memaddr = 0;
5966 *num_phdr = 0;
5967 while (read (fd, buf, auxv_size) == auxv_size
5968 && (*phdr_memaddr == 0 || *num_phdr == 0))
5969 {
5970 if (is_elf64)
5971 {
5972 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5973
5974 switch (aux->a_type)
5975 {
5976 case AT_PHDR:
5977 *phdr_memaddr = aux->a_un.a_val;
5978 break;
5979 case AT_PHNUM:
5980 *num_phdr = aux->a_un.a_val;
5981 break;
5982 }
5983 }
5984 else
5985 {
5986 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5987
5988 switch (aux->a_type)
5989 {
5990 case AT_PHDR:
5991 *phdr_memaddr = aux->a_un.a_val;
5992 break;
5993 case AT_PHNUM:
5994 *num_phdr = aux->a_un.a_val;
5995 break;
5996 }
5997 }
5998 }
5999
6000 close (fd);
6001
6002 if (*phdr_memaddr == 0 || *num_phdr == 0)
6003 {
6004 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6005 "phdr_memaddr = %ld, phdr_num = %d",
6006 (long) *phdr_memaddr, *num_phdr);
6007 return 2;
6008 }
6009
6010 return 0;
6011 }
6012
6013 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6014
6015 static CORE_ADDR
6016 get_dynamic (const int pid, const int is_elf64)
6017 {
6018 CORE_ADDR phdr_memaddr, relocation;
6019 int num_phdr, i;
6020 unsigned char *phdr_buf;
6021 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6022
6023 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6024 return 0;
6025
6026 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6027 phdr_buf = alloca (num_phdr * phdr_size);
6028
6029 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6030 return 0;
6031
6032 /* Compute relocation: it is expected to be 0 for "regular" executables,
6033 non-zero for PIE ones. */
6034 relocation = -1;
6035 for (i = 0; relocation == -1 && i < num_phdr; i++)
6036 if (is_elf64)
6037 {
6038 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6039
6040 if (p->p_type == PT_PHDR)
6041 relocation = phdr_memaddr - p->p_vaddr;
6042 }
6043 else
6044 {
6045 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6046
6047 if (p->p_type == PT_PHDR)
6048 relocation = phdr_memaddr - p->p_vaddr;
6049 }
6050
6051 if (relocation == -1)
6052 {
6053 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6054 any real world executables, including PIE executables, have always
6055 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6056 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6057 or present DT_DEBUG anyway (fpc binaries are statically linked).
6058
6059 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6060
6061 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6062
6063 return 0;
6064 }
6065
6066 for (i = 0; i < num_phdr; i++)
6067 {
6068 if (is_elf64)
6069 {
6070 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6071
6072 if (p->p_type == PT_DYNAMIC)
6073 return p->p_vaddr + relocation;
6074 }
6075 else
6076 {
6077 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6078
6079 if (p->p_type == PT_DYNAMIC)
6080 return p->p_vaddr + relocation;
6081 }
6082 }
6083
6084 return 0;
6085 }
6086
6087 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6088 can be 0 if the inferior does not yet have the library list initialized.
6089 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6090 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6091
6092 static CORE_ADDR
6093 get_r_debug (const int pid, const int is_elf64)
6094 {
6095 CORE_ADDR dynamic_memaddr;
6096 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6097 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6098 CORE_ADDR map = -1;
6099
6100 dynamic_memaddr = get_dynamic (pid, is_elf64);
6101 if (dynamic_memaddr == 0)
6102 return map;
6103
6104 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6105 {
6106 if (is_elf64)
6107 {
6108 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6109 #ifdef DT_MIPS_RLD_MAP
6110 union
6111 {
6112 Elf64_Xword map;
6113 unsigned char buf[sizeof (Elf64_Xword)];
6114 }
6115 rld_map;
6116
6117 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6118 {
6119 if (linux_read_memory (dyn->d_un.d_val,
6120 rld_map.buf, sizeof (rld_map.buf)) == 0)
6121 return rld_map.map;
6122 else
6123 break;
6124 }
6125 #endif /* DT_MIPS_RLD_MAP */
6126
6127 if (dyn->d_tag == DT_DEBUG && map == -1)
6128 map = dyn->d_un.d_val;
6129
6130 if (dyn->d_tag == DT_NULL)
6131 break;
6132 }
6133 else
6134 {
6135 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6136 #ifdef DT_MIPS_RLD_MAP
6137 union
6138 {
6139 Elf32_Word map;
6140 unsigned char buf[sizeof (Elf32_Word)];
6141 }
6142 rld_map;
6143
6144 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6145 {
6146 if (linux_read_memory (dyn->d_un.d_val,
6147 rld_map.buf, sizeof (rld_map.buf)) == 0)
6148 return rld_map.map;
6149 else
6150 break;
6151 }
6152 #endif /* DT_MIPS_RLD_MAP */
6153
6154 if (dyn->d_tag == DT_DEBUG && map == -1)
6155 map = dyn->d_un.d_val;
6156
6157 if (dyn->d_tag == DT_NULL)
6158 break;
6159 }
6160
6161 dynamic_memaddr += dyn_size;
6162 }
6163
6164 return map;
6165 }
6166
6167 /* Read one pointer from MEMADDR in the inferior. */
6168
6169 static int
6170 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6171 {
6172 int ret;
6173
6174 /* Go through a union so this works on either big or little endian
6175 hosts, when the inferior's pointer size is smaller than the size
6176 of CORE_ADDR. It is assumed the inferior's endianness is the
6177 same of the superior's. */
6178 union
6179 {
6180 CORE_ADDR core_addr;
6181 unsigned int ui;
6182 unsigned char uc;
6183 } addr;
6184
6185 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6186 if (ret == 0)
6187 {
6188 if (ptr_size == sizeof (CORE_ADDR))
6189 *ptr = addr.core_addr;
6190 else if (ptr_size == sizeof (unsigned int))
6191 *ptr = addr.ui;
6192 else
6193 gdb_assert_not_reached ("unhandled pointer size");
6194 }
6195 return ret;
6196 }
6197
6198 struct link_map_offsets
6199 {
6200 /* Offset and size of r_debug.r_version. */
6201 int r_version_offset;
6202
6203 /* Offset and size of r_debug.r_map. */
6204 int r_map_offset;
6205
6206 /* Offset to l_addr field in struct link_map. */
6207 int l_addr_offset;
6208
6209 /* Offset to l_name field in struct link_map. */
6210 int l_name_offset;
6211
6212 /* Offset to l_ld field in struct link_map. */
6213 int l_ld_offset;
6214
6215 /* Offset to l_next field in struct link_map. */
6216 int l_next_offset;
6217
6218 /* Offset to l_prev field in struct link_map. */
6219 int l_prev_offset;
6220 };
6221
6222 /* Construct qXfer:libraries-svr4:read reply. */
6223
6224 static int
6225 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6226 unsigned const char *writebuf,
6227 CORE_ADDR offset, int len)
6228 {
6229 char *document;
6230 unsigned document_len;
6231 struct process_info_private *const priv = current_process ()->priv;
6232 char filename[PATH_MAX];
6233 int pid, is_elf64;
6234
6235 static const struct link_map_offsets lmo_32bit_offsets =
6236 {
6237 0, /* r_version offset. */
6238 4, /* r_debug.r_map offset. */
6239 0, /* l_addr offset in link_map. */
6240 4, /* l_name offset in link_map. */
6241 8, /* l_ld offset in link_map. */
6242 12, /* l_next offset in link_map. */
6243 16 /* l_prev offset in link_map. */
6244 };
6245
6246 static const struct link_map_offsets lmo_64bit_offsets =
6247 {
6248 0, /* r_version offset. */
6249 8, /* r_debug.r_map offset. */
6250 0, /* l_addr offset in link_map. */
6251 8, /* l_name offset in link_map. */
6252 16, /* l_ld offset in link_map. */
6253 24, /* l_next offset in link_map. */
6254 32 /* l_prev offset in link_map. */
6255 };
6256 const struct link_map_offsets *lmo;
6257 unsigned int machine;
6258 int ptr_size;
6259 CORE_ADDR lm_addr = 0, lm_prev = 0;
6260 int allocated = 1024;
6261 char *p;
6262 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6263 int header_done = 0;
6264
6265 if (writebuf != NULL)
6266 return -2;
6267 if (readbuf == NULL)
6268 return -1;
6269
6270 pid = lwpid_of (current_thread);
6271 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6272 is_elf64 = elf_64_file_p (filename, &machine);
6273 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6274 ptr_size = is_elf64 ? 8 : 4;
6275
6276 while (annex[0] != '\0')
6277 {
6278 const char *sep;
6279 CORE_ADDR *addrp;
6280 int len;
6281
6282 sep = strchr (annex, '=');
6283 if (sep == NULL)
6284 break;
6285
6286 len = sep - annex;
6287 if (len == 5 && startswith (annex, "start"))
6288 addrp = &lm_addr;
6289 else if (len == 4 && startswith (annex, "prev"))
6290 addrp = &lm_prev;
6291 else
6292 {
6293 annex = strchr (sep, ';');
6294 if (annex == NULL)
6295 break;
6296 annex++;
6297 continue;
6298 }
6299
6300 annex = decode_address_to_semicolon (addrp, sep + 1);
6301 }
6302
6303 if (lm_addr == 0)
6304 {
6305 int r_version = 0;
6306
6307 if (priv->r_debug == 0)
6308 priv->r_debug = get_r_debug (pid, is_elf64);
6309
6310 /* We failed to find DT_DEBUG. Such situation will not change
6311 for this inferior - do not retry it. Report it to GDB as
6312 E01, see for the reasons at the GDB solib-svr4.c side. */
6313 if (priv->r_debug == (CORE_ADDR) -1)
6314 return -1;
6315
6316 if (priv->r_debug != 0)
6317 {
6318 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6319 (unsigned char *) &r_version,
6320 sizeof (r_version)) != 0
6321 || r_version != 1)
6322 {
6323 warning ("unexpected r_debug version %d", r_version);
6324 }
6325 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6326 &lm_addr, ptr_size) != 0)
6327 {
6328 warning ("unable to read r_map from 0x%lx",
6329 (long) priv->r_debug + lmo->r_map_offset);
6330 }
6331 }
6332 }
6333
6334 document = xmalloc (allocated);
6335 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6336 p = document + strlen (document);
6337
6338 while (lm_addr
6339 && read_one_ptr (lm_addr + lmo->l_name_offset,
6340 &l_name, ptr_size) == 0
6341 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6342 &l_addr, ptr_size) == 0
6343 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6344 &l_ld, ptr_size) == 0
6345 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6346 &l_prev, ptr_size) == 0
6347 && read_one_ptr (lm_addr + lmo->l_next_offset,
6348 &l_next, ptr_size) == 0)
6349 {
6350 unsigned char libname[PATH_MAX];
6351
6352 if (lm_prev != l_prev)
6353 {
6354 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6355 (long) lm_prev, (long) l_prev);
6356 break;
6357 }
6358
6359 /* Ignore the first entry even if it has valid name as the first entry
6360 corresponds to the main executable. The first entry should not be
6361 skipped if the dynamic loader was loaded late by a static executable
6362 (see solib-svr4.c parameter ignore_first). But in such case the main
6363 executable does not have PT_DYNAMIC present and this function already
6364 exited above due to failed get_r_debug. */
6365 if (lm_prev == 0)
6366 {
6367 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6368 p = p + strlen (p);
6369 }
6370 else
6371 {
6372 /* Not checking for error because reading may stop before
6373 we've got PATH_MAX worth of characters. */
6374 libname[0] = '\0';
6375 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6376 libname[sizeof (libname) - 1] = '\0';
6377 if (libname[0] != '\0')
6378 {
6379 /* 6x the size for xml_escape_text below. */
6380 size_t len = 6 * strlen ((char *) libname);
6381 char *name;
6382
6383 if (!header_done)
6384 {
6385 /* Terminate `<library-list-svr4'. */
6386 *p++ = '>';
6387 header_done = 1;
6388 }
6389
6390 while (allocated < p - document + len + 200)
6391 {
6392 /* Expand to guarantee sufficient storage. */
6393 uintptr_t document_len = p - document;
6394
6395 document = xrealloc (document, 2 * allocated);
6396 allocated *= 2;
6397 p = document + document_len;
6398 }
6399
6400 name = xml_escape_text ((char *) libname);
6401 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6402 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6403 name, (unsigned long) lm_addr,
6404 (unsigned long) l_addr, (unsigned long) l_ld);
6405 free (name);
6406 }
6407 }
6408
6409 lm_prev = lm_addr;
6410 lm_addr = l_next;
6411 }
6412
6413 if (!header_done)
6414 {
6415 /* Empty list; terminate `<library-list-svr4'. */
6416 strcpy (p, "/>");
6417 }
6418 else
6419 strcpy (p, "</library-list-svr4>");
6420
6421 document_len = strlen (document);
6422 if (offset < document_len)
6423 document_len -= offset;
6424 else
6425 document_len = 0;
6426 if (len > document_len)
6427 len = document_len;
6428
6429 memcpy (readbuf, document + offset, len);
6430 xfree (document);
6431
6432 return len;
6433 }
6434
6435 #ifdef HAVE_LINUX_BTRACE
6436
6437 /* See to_enable_btrace target method. */
6438
6439 static struct btrace_target_info *
6440 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6441 {
6442 struct btrace_target_info *tinfo;
6443
6444 tinfo = linux_enable_btrace (ptid, conf);
6445
6446 if (tinfo != NULL && tinfo->ptr_bits == 0)
6447 {
6448 struct thread_info *thread = find_thread_ptid (ptid);
6449 struct regcache *regcache = get_thread_regcache (thread, 0);
6450
6451 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6452 }
6453
6454 return tinfo;
6455 }
6456
6457 /* See to_disable_btrace target method. */
6458
6459 static int
6460 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6461 {
6462 enum btrace_error err;
6463
6464 err = linux_disable_btrace (tinfo);
6465 return (err == BTRACE_ERR_NONE ? 0 : -1);
6466 }
6467
6468 /* Encode an Intel(R) Processor Trace configuration. */
6469
6470 static void
6471 linux_low_encode_pt_config (struct buffer *buffer,
6472 const struct btrace_data_pt_config *config)
6473 {
6474 buffer_grow_str (buffer, "<pt-config>\n");
6475
6476 switch (config->cpu.vendor)
6477 {
6478 case CV_INTEL:
6479 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6480 "model=\"%u\" stepping=\"%u\"/>\n",
6481 config->cpu.family, config->cpu.model,
6482 config->cpu.stepping);
6483 break;
6484
6485 default:
6486 break;
6487 }
6488
6489 buffer_grow_str (buffer, "</pt-config>\n");
6490 }
6491
6492 /* Encode a raw buffer. */
6493
6494 static void
6495 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6496 unsigned int size)
6497 {
6498 if (size == 0)
6499 return;
6500
6501 /* We use hex encoding - see common/rsp-low.h. */
6502 buffer_grow_str (buffer, "<raw>\n");
6503
6504 while (size-- > 0)
6505 {
6506 char elem[2];
6507
6508 elem[0] = tohex ((*data >> 4) & 0xf);
6509 elem[1] = tohex (*data++ & 0xf);
6510
6511 buffer_grow (buffer, elem, 2);
6512 }
6513
6514 buffer_grow_str (buffer, "</raw>\n");
6515 }
6516
6517 /* See to_read_btrace target method. */
6518
6519 static int
6520 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6521 int type)
6522 {
6523 struct btrace_data btrace;
6524 struct btrace_block *block;
6525 enum btrace_error err;
6526 int i;
6527
6528 btrace_data_init (&btrace);
6529
6530 err = linux_read_btrace (&btrace, tinfo, type);
6531 if (err != BTRACE_ERR_NONE)
6532 {
6533 if (err == BTRACE_ERR_OVERFLOW)
6534 buffer_grow_str0 (buffer, "E.Overflow.");
6535 else
6536 buffer_grow_str0 (buffer, "E.Generic Error.");
6537
6538 goto err;
6539 }
6540
6541 switch (btrace.format)
6542 {
6543 case BTRACE_FORMAT_NONE:
6544 buffer_grow_str0 (buffer, "E.No Trace.");
6545 goto err;
6546
6547 case BTRACE_FORMAT_BTS:
6548 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6549 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6550
6551 for (i = 0;
6552 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6553 i++)
6554 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6555 paddress (block->begin), paddress (block->end));
6556
6557 buffer_grow_str0 (buffer, "</btrace>\n");
6558 break;
6559
6560 case BTRACE_FORMAT_PT:
6561 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6562 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6563 buffer_grow_str (buffer, "<pt>\n");
6564
6565 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6566
6567 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6568 btrace.variant.pt.size);
6569
6570 buffer_grow_str (buffer, "</pt>\n");
6571 buffer_grow_str0 (buffer, "</btrace>\n");
6572 break;
6573
6574 default:
6575 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6576 goto err;
6577 }
6578
6579 btrace_data_fini (&btrace);
6580 return 0;
6581
6582 err:
6583 btrace_data_fini (&btrace);
6584 return -1;
6585 }
6586
6587 /* See to_btrace_conf target method. */
6588
6589 static int
6590 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6591 struct buffer *buffer)
6592 {
6593 const struct btrace_config *conf;
6594
6595 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6596 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6597
6598 conf = linux_btrace_conf (tinfo);
6599 if (conf != NULL)
6600 {
6601 switch (conf->format)
6602 {
6603 case BTRACE_FORMAT_NONE:
6604 break;
6605
6606 case BTRACE_FORMAT_BTS:
6607 buffer_xml_printf (buffer, "<bts");
6608 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6609 buffer_xml_printf (buffer, " />\n");
6610 break;
6611
6612 case BTRACE_FORMAT_PT:
6613 buffer_xml_printf (buffer, "<pt");
6614 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6615 buffer_xml_printf (buffer, "/>\n");
6616 break;
6617 }
6618 }
6619
6620 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6621 return 0;
6622 }
6623 #endif /* HAVE_LINUX_BTRACE */
6624
6625 /* See nat/linux-nat.h. */
6626
6627 ptid_t
6628 current_lwp_ptid (void)
6629 {
6630 return ptid_of (current_thread);
6631 }
6632
6633 static struct target_ops linux_target_ops = {
6634 linux_create_inferior,
6635 linux_attach,
6636 linux_kill,
6637 linux_detach,
6638 linux_mourn,
6639 linux_join,
6640 linux_thread_alive,
6641 linux_resume,
6642 linux_wait,
6643 linux_fetch_registers,
6644 linux_store_registers,
6645 linux_prepare_to_access_memory,
6646 linux_done_accessing_memory,
6647 linux_read_memory,
6648 linux_write_memory,
6649 linux_look_up_symbols,
6650 linux_request_interrupt,
6651 linux_read_auxv,
6652 linux_supports_z_point_type,
6653 linux_insert_point,
6654 linux_remove_point,
6655 linux_stopped_by_sw_breakpoint,
6656 linux_supports_stopped_by_sw_breakpoint,
6657 linux_stopped_by_hw_breakpoint,
6658 linux_supports_stopped_by_hw_breakpoint,
6659 linux_supports_conditional_breakpoints,
6660 linux_stopped_by_watchpoint,
6661 linux_stopped_data_address,
6662 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6663 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6664 && defined(PT_TEXT_END_ADDR)
6665 linux_read_offsets,
6666 #else
6667 NULL,
6668 #endif
6669 #ifdef USE_THREAD_DB
6670 thread_db_get_tls_address,
6671 #else
6672 NULL,
6673 #endif
6674 linux_qxfer_spu,
6675 hostio_last_error_from_errno,
6676 linux_qxfer_osdata,
6677 linux_xfer_siginfo,
6678 linux_supports_non_stop,
6679 linux_async,
6680 linux_start_non_stop,
6681 linux_supports_multi_process,
6682 linux_supports_fork_events,
6683 linux_supports_vfork_events,
6684 linux_handle_new_gdb_connection,
6685 #ifdef USE_THREAD_DB
6686 thread_db_handle_monitor_command,
6687 #else
6688 NULL,
6689 #endif
6690 linux_common_core_of_thread,
6691 linux_read_loadmap,
6692 linux_process_qsupported,
6693 linux_supports_tracepoints,
6694 linux_read_pc,
6695 linux_write_pc,
6696 linux_thread_stopped,
6697 NULL,
6698 linux_pause_all,
6699 linux_unpause_all,
6700 linux_stabilize_threads,
6701 linux_install_fast_tracepoint_jump_pad,
6702 linux_emit_ops,
6703 linux_supports_disable_randomization,
6704 linux_get_min_fast_tracepoint_insn_len,
6705 linux_qxfer_libraries_svr4,
6706 linux_supports_agent,
6707 #ifdef HAVE_LINUX_BTRACE
6708 linux_supports_btrace,
6709 linux_low_enable_btrace,
6710 linux_low_disable_btrace,
6711 linux_low_read_btrace,
6712 linux_low_btrace_conf,
6713 #else
6714 NULL,
6715 NULL,
6716 NULL,
6717 NULL,
6718 NULL,
6719 #endif
6720 linux_supports_range_stepping,
6721 linux_proc_pid_to_exec_file,
6722 linux_mntns_open_cloexec,
6723 linux_mntns_unlink,
6724 linux_mntns_readlink,
6725 };
6726
6727 static void
6728 linux_init_signals ()
6729 {
6730 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6731 to find what the cancel signal actually is. */
6732 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6733 signal (__SIGRTMIN+1, SIG_IGN);
6734 #endif
6735 }
6736
6737 #ifdef HAVE_LINUX_REGSETS
6738 void
6739 initialize_regsets_info (struct regsets_info *info)
6740 {
6741 for (info->num_regsets = 0;
6742 info->regsets[info->num_regsets].size >= 0;
6743 info->num_regsets++)
6744 ;
6745 }
6746 #endif
6747
6748 void
6749 initialize_low (void)
6750 {
6751 struct sigaction sigchld_action;
6752 memset (&sigchld_action, 0, sizeof (sigchld_action));
6753 set_target_ops (&linux_target_ops);
6754 set_breakpoint_data (the_low_target.breakpoint,
6755 the_low_target.breakpoint_len);
6756 linux_init_signals ();
6757 linux_ptrace_init_warnings ();
6758
6759 sigchld_action.sa_handler = sigchld_handler;
6760 sigemptyset (&sigchld_action.sa_mask);
6761 sigchld_action.sa_flags = SA_RESTART;
6762 sigaction (SIGCHLD, &sigchld_action, NULL);
6763
6764 initialize_low_arch ();
6765
6766 linux_check_ptrace_features ();
6767 }