]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
Remote thread create/exit events
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #ifndef ELFMAG0
50 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54 #include <elf.h>
55 #endif
56 #include "nat/linux-namespaces.h"
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 /* Some targets did not define these ptrace constants from the start,
74 so gdbserver defines them locally here. In the future, these may
75 be removed after they are added to asm/ptrace.h. */
76 #if !(defined(PT_TEXT_ADDR) \
77 || defined(PT_DATA_ADDR) \
78 || defined(PT_TEXT_END_ADDR))
79 #if defined(__mcoldfire__)
80 /* These are still undefined in 3.10 kernels. */
81 #define PT_TEXT_ADDR 49*4
82 #define PT_DATA_ADDR 50*4
83 #define PT_TEXT_END_ADDR 51*4
84 /* BFIN already defines these since at least 2.6.32 kernels. */
85 #elif defined(BFIN)
86 #define PT_TEXT_ADDR 220
87 #define PT_TEXT_END_ADDR 224
88 #define PT_DATA_ADDR 228
89 /* These are still undefined in 3.10 kernels. */
90 #elif defined(__TMS320C6X__)
91 #define PT_TEXT_ADDR (0x10000*4)
92 #define PT_DATA_ADDR (0x10004*4)
93 #define PT_TEXT_END_ADDR (0x10008*4)
94 #endif
95 #endif
96
97 #ifdef HAVE_LINUX_BTRACE
98 # include "nat/linux-btrace.h"
99 # include "btrace-common.h"
100 #endif
101
102 #ifndef HAVE_ELF32_AUXV_T
103 /* Copied from glibc's elf.h. */
104 typedef struct
105 {
106 uint32_t a_type; /* Entry type */
107 union
108 {
109 uint32_t a_val; /* Integer value */
110 /* We use to have pointer elements added here. We cannot do that,
111 though, since it does not work when using 32-bit definitions
112 on 64-bit platforms and vice versa. */
113 } a_un;
114 } Elf32_auxv_t;
115 #endif
116
117 #ifndef HAVE_ELF64_AUXV_T
118 /* Copied from glibc's elf.h. */
119 typedef struct
120 {
121 uint64_t a_type; /* Entry type */
122 union
123 {
124 uint64_t a_val; /* Integer value */
125 /* We use to have pointer elements added here. We cannot do that,
126 though, since it does not work when using 32-bit definitions
127 on 64-bit platforms and vice versa. */
128 } a_un;
129 } Elf64_auxv_t;
130 #endif
131
132 /* Does the current host support PTRACE_GETREGSET? */
133 int have_ptrace_getregset = -1;
134
135 /* LWP accessors. */
136
137 /* See nat/linux-nat.h. */
138
139 ptid_t
140 ptid_of_lwp (struct lwp_info *lwp)
141 {
142 return ptid_of (get_lwp_thread (lwp));
143 }
144
145 /* See nat/linux-nat.h. */
146
147 void
148 lwp_set_arch_private_info (struct lwp_info *lwp,
149 struct arch_lwp_info *info)
150 {
151 lwp->arch_private = info;
152 }
153
154 /* See nat/linux-nat.h. */
155
156 struct arch_lwp_info *
157 lwp_arch_private_info (struct lwp_info *lwp)
158 {
159 return lwp->arch_private;
160 }
161
162 /* See nat/linux-nat.h. */
163
164 int
165 lwp_is_stopped (struct lwp_info *lwp)
166 {
167 return lwp->stopped;
168 }
169
170 /* See nat/linux-nat.h. */
171
172 enum target_stop_reason
173 lwp_stop_reason (struct lwp_info *lwp)
174 {
175 return lwp->stop_reason;
176 }
177
178 /* A list of all unknown processes which receive stop signals. Some
179 other process will presumably claim each of these as forked
180 children momentarily. */
181
182 struct simple_pid_list
183 {
184 /* The process ID. */
185 int pid;
186
187 /* The status as reported by waitpid. */
188 int status;
189
190 /* Next in chain. */
191 struct simple_pid_list *next;
192 };
193 struct simple_pid_list *stopped_pids;
194
195 /* Trivial list manipulation functions to keep track of a list of new
196 stopped processes. */
197
198 static void
199 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
200 {
201 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
202
203 new_pid->pid = pid;
204 new_pid->status = status;
205 new_pid->next = *listp;
206 *listp = new_pid;
207 }
208
209 static int
210 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
211 {
212 struct simple_pid_list **p;
213
214 for (p = listp; *p != NULL; p = &(*p)->next)
215 if ((*p)->pid == pid)
216 {
217 struct simple_pid_list *next = (*p)->next;
218
219 *statusp = (*p)->status;
220 xfree (*p);
221 *p = next;
222 return 1;
223 }
224 return 0;
225 }
226
227 enum stopping_threads_kind
228 {
229 /* Not stopping threads presently. */
230 NOT_STOPPING_THREADS,
231
232 /* Stopping threads. */
233 STOPPING_THREADS,
234
235 /* Stopping and suspending threads. */
236 STOPPING_AND_SUSPENDING_THREADS
237 };
238
239 /* This is set while stop_all_lwps is in effect. */
240 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
241
242 /* FIXME make into a target method? */
243 int using_threads = 1;
244
245 /* True if we're presently stabilizing threads (moving them out of
246 jump pads). */
247 static int stabilizing_threads;
248
249 static void linux_resume_one_lwp (struct lwp_info *lwp,
250 int step, int signal, siginfo_t *info);
251 static void linux_resume (struct thread_resume *resume_info, size_t n);
252 static void stop_all_lwps (int suspend, struct lwp_info *except);
253 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
254 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
255 int *wstat, int options);
256 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
257 static struct lwp_info *add_lwp (ptid_t ptid);
258 static void linux_mourn (struct process_info *process);
259 static int linux_stopped_by_watchpoint (void);
260 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
261 static int lwp_is_marked_dead (struct lwp_info *lwp);
262 static void proceed_all_lwps (void);
263 static int finish_step_over (struct lwp_info *lwp);
264 static int kill_lwp (unsigned long lwpid, int signo);
265 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
266 static void complete_ongoing_step_over (void);
267
268 /* When the event-loop is doing a step-over, this points at the thread
269 being stepped. */
270 ptid_t step_over_bkpt;
271
272 /* True if the low target can hardware single-step. Such targets
273 don't need a BREAKPOINT_REINSERT_ADDR callback. */
274
275 static int
276 can_hardware_single_step (void)
277 {
278 return (the_low_target.breakpoint_reinsert_addr == NULL);
279 }
280
281 /* True if the low target supports memory breakpoints. If so, we'll
282 have a GET_PC implementation. */
283
284 static int
285 supports_breakpoints (void)
286 {
287 return (the_low_target.get_pc != NULL);
288 }
289
290 /* Returns true if this target can support fast tracepoints. This
291 does not mean that the in-process agent has been loaded in the
292 inferior. */
293
294 static int
295 supports_fast_tracepoints (void)
296 {
297 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
298 }
299
300 /* True if LWP is stopped in its stepping range. */
301
302 static int
303 lwp_in_step_range (struct lwp_info *lwp)
304 {
305 CORE_ADDR pc = lwp->stop_pc;
306
307 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
308 }
309
310 struct pending_signals
311 {
312 int signal;
313 siginfo_t info;
314 struct pending_signals *prev;
315 };
316
317 /* The read/write ends of the pipe registered as waitable file in the
318 event loop. */
319 static int linux_event_pipe[2] = { -1, -1 };
320
321 /* True if we're currently in async mode. */
322 #define target_is_async_p() (linux_event_pipe[0] != -1)
323
324 static void send_sigstop (struct lwp_info *lwp);
325 static void wait_for_sigstop (void);
326
327 /* Return non-zero if HEADER is a 64-bit ELF file. */
328
329 static int
330 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
331 {
332 if (header->e_ident[EI_MAG0] == ELFMAG0
333 && header->e_ident[EI_MAG1] == ELFMAG1
334 && header->e_ident[EI_MAG2] == ELFMAG2
335 && header->e_ident[EI_MAG3] == ELFMAG3)
336 {
337 *machine = header->e_machine;
338 return header->e_ident[EI_CLASS] == ELFCLASS64;
339
340 }
341 *machine = EM_NONE;
342 return -1;
343 }
344
345 /* Return non-zero if FILE is a 64-bit ELF file,
346 zero if the file is not a 64-bit ELF file,
347 and -1 if the file is not accessible or doesn't exist. */
348
349 static int
350 elf_64_file_p (const char *file, unsigned int *machine)
351 {
352 Elf64_Ehdr header;
353 int fd;
354
355 fd = open (file, O_RDONLY);
356 if (fd < 0)
357 return -1;
358
359 if (read (fd, &header, sizeof (header)) != sizeof (header))
360 {
361 close (fd);
362 return 0;
363 }
364 close (fd);
365
366 return elf_64_header_p (&header, machine);
367 }
368
369 /* Accepts an integer PID; Returns true if the executable PID is
370 running is a 64-bit ELF file.. */
371
372 int
373 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
374 {
375 char file[PATH_MAX];
376
377 sprintf (file, "/proc/%d/exe", pid);
378 return elf_64_file_p (file, machine);
379 }
380
381 static void
382 delete_lwp (struct lwp_info *lwp)
383 {
384 struct thread_info *thr = get_lwp_thread (lwp);
385
386 if (debug_threads)
387 debug_printf ("deleting %ld\n", lwpid_of (thr));
388
389 remove_thread (thr);
390 free (lwp->arch_private);
391 free (lwp);
392 }
393
394 /* Add a process to the common process list, and set its private
395 data. */
396
397 static struct process_info *
398 linux_add_process (int pid, int attached)
399 {
400 struct process_info *proc;
401
402 proc = add_process (pid, attached);
403 proc->priv = XCNEW (struct process_info_private);
404
405 if (the_low_target.new_process != NULL)
406 proc->priv->arch_private = the_low_target.new_process ();
407
408 return proc;
409 }
410
411 static CORE_ADDR get_pc (struct lwp_info *lwp);
412
413 /* Implement the arch_setup target_ops method. */
414
415 static void
416 linux_arch_setup (void)
417 {
418 the_low_target.arch_setup ();
419 }
420
421 /* Call the target arch_setup function on THREAD. */
422
423 static void
424 linux_arch_setup_thread (struct thread_info *thread)
425 {
426 struct thread_info *saved_thread;
427
428 saved_thread = current_thread;
429 current_thread = thread;
430
431 linux_arch_setup ();
432
433 current_thread = saved_thread;
434 }
435
436 /* Handle a GNU/Linux extended wait response. If we see a clone,
437 fork, or vfork event, we need to add the new LWP to our list
438 (and return 0 so as not to report the trap to higher layers).
439 If we see an exec event, we will modify ORIG_EVENT_LWP to point
440 to a new LWP representing the new program. */
441
442 static int
443 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
444 {
445 struct lwp_info *event_lwp = *orig_event_lwp;
446 int event = linux_ptrace_get_extended_event (wstat);
447 struct thread_info *event_thr = get_lwp_thread (event_lwp);
448 struct lwp_info *new_lwp;
449
450 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
451
452 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
453 || (event == PTRACE_EVENT_CLONE))
454 {
455 ptid_t ptid;
456 unsigned long new_pid;
457 int ret, status;
458
459 /* Get the pid of the new lwp. */
460 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
461 &new_pid);
462
463 /* If we haven't already seen the new PID stop, wait for it now. */
464 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
465 {
466 /* The new child has a pending SIGSTOP. We can't affect it until it
467 hits the SIGSTOP, but we're already attached. */
468
469 ret = my_waitpid (new_pid, &status, __WALL);
470
471 if (ret == -1)
472 perror_with_name ("waiting for new child");
473 else if (ret != new_pid)
474 warning ("wait returned unexpected PID %d", ret);
475 else if (!WIFSTOPPED (status))
476 warning ("wait returned unexpected status 0x%x", status);
477 }
478
479 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
480 {
481 struct process_info *parent_proc;
482 struct process_info *child_proc;
483 struct lwp_info *child_lwp;
484 struct thread_info *child_thr;
485 struct target_desc *tdesc;
486
487 ptid = ptid_build (new_pid, new_pid, 0);
488
489 if (debug_threads)
490 {
491 debug_printf ("HEW: Got fork event from LWP %ld, "
492 "new child is %d\n",
493 ptid_get_lwp (ptid_of (event_thr)),
494 ptid_get_pid (ptid));
495 }
496
497 /* Add the new process to the tables and clone the breakpoint
498 lists of the parent. We need to do this even if the new process
499 will be detached, since we will need the process object and the
500 breakpoints to remove any breakpoints from memory when we
501 detach, and the client side will access registers. */
502 child_proc = linux_add_process (new_pid, 0);
503 gdb_assert (child_proc != NULL);
504 child_lwp = add_lwp (ptid);
505 gdb_assert (child_lwp != NULL);
506 child_lwp->stopped = 1;
507 child_lwp->must_set_ptrace_flags = 1;
508 child_lwp->status_pending_p = 0;
509 child_thr = get_lwp_thread (child_lwp);
510 child_thr->last_resume_kind = resume_stop;
511 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
512
513 /* If we're suspending all threads, leave this one suspended
514 too. */
515 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
516 {
517 if (debug_threads)
518 debug_printf ("HEW: leaving child suspended\n");
519 child_lwp->suspended = 1;
520 }
521
522 parent_proc = get_thread_process (event_thr);
523 child_proc->attached = parent_proc->attached;
524 clone_all_breakpoints (&child_proc->breakpoints,
525 &child_proc->raw_breakpoints,
526 parent_proc->breakpoints);
527
528 tdesc = XNEW (struct target_desc);
529 copy_target_description (tdesc, parent_proc->tdesc);
530 child_proc->tdesc = tdesc;
531
532 /* Clone arch-specific process data. */
533 if (the_low_target.new_fork != NULL)
534 the_low_target.new_fork (parent_proc, child_proc);
535
536 /* Save fork info in the parent thread. */
537 if (event == PTRACE_EVENT_FORK)
538 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
539 else if (event == PTRACE_EVENT_VFORK)
540 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
541
542 event_lwp->waitstatus.value.related_pid = ptid;
543
544 /* The status_pending field contains bits denoting the
545 extended event, so when the pending event is handled,
546 the handler will look at lwp->waitstatus. */
547 event_lwp->status_pending_p = 1;
548 event_lwp->status_pending = wstat;
549
550 /* Report the event. */
551 return 0;
552 }
553
554 if (debug_threads)
555 debug_printf ("HEW: Got clone event "
556 "from LWP %ld, new child is LWP %ld\n",
557 lwpid_of (event_thr), new_pid);
558
559 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
560 new_lwp = add_lwp (ptid);
561
562 /* Either we're going to immediately resume the new thread
563 or leave it stopped. linux_resume_one_lwp is a nop if it
564 thinks the thread is currently running, so set this first
565 before calling linux_resume_one_lwp. */
566 new_lwp->stopped = 1;
567
568 /* If we're suspending all threads, leave this one suspended
569 too. */
570 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
571 new_lwp->suspended = 1;
572
573 /* Normally we will get the pending SIGSTOP. But in some cases
574 we might get another signal delivered to the group first.
575 If we do get another signal, be sure not to lose it. */
576 if (WSTOPSIG (status) != SIGSTOP)
577 {
578 new_lwp->stop_expected = 1;
579 new_lwp->status_pending_p = 1;
580 new_lwp->status_pending = status;
581 }
582 else if (report_thread_events)
583 {
584 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
585 new_lwp->status_pending_p = 1;
586 new_lwp->status_pending = status;
587 }
588
589 /* Don't report the event. */
590 return 1;
591 }
592 else if (event == PTRACE_EVENT_VFORK_DONE)
593 {
594 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
595
596 /* Report the event. */
597 return 0;
598 }
599 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
600 {
601 struct process_info *proc;
602 ptid_t event_ptid;
603 pid_t event_pid;
604
605 if (debug_threads)
606 {
607 debug_printf ("HEW: Got exec event from LWP %ld\n",
608 lwpid_of (event_thr));
609 }
610
611 /* Get the event ptid. */
612 event_ptid = ptid_of (event_thr);
613 event_pid = ptid_get_pid (event_ptid);
614
615 /* Delete the execing process and all its threads. */
616 proc = get_thread_process (event_thr);
617 linux_mourn (proc);
618 current_thread = NULL;
619
620 /* Create a new process/lwp/thread. */
621 proc = linux_add_process (event_pid, 0);
622 event_lwp = add_lwp (event_ptid);
623 event_thr = get_lwp_thread (event_lwp);
624 gdb_assert (current_thread == event_thr);
625 linux_arch_setup_thread (event_thr);
626
627 /* Set the event status. */
628 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
629 event_lwp->waitstatus.value.execd_pathname
630 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
631
632 /* Mark the exec status as pending. */
633 event_lwp->stopped = 1;
634 event_lwp->status_pending_p = 1;
635 event_lwp->status_pending = wstat;
636 event_thr->last_resume_kind = resume_continue;
637 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
638
639 /* Report the event. */
640 *orig_event_lwp = event_lwp;
641 return 0;
642 }
643
644 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
645 }
646
647 /* Return the PC as read from the regcache of LWP, without any
648 adjustment. */
649
650 static CORE_ADDR
651 get_pc (struct lwp_info *lwp)
652 {
653 struct thread_info *saved_thread;
654 struct regcache *regcache;
655 CORE_ADDR pc;
656
657 if (the_low_target.get_pc == NULL)
658 return 0;
659
660 saved_thread = current_thread;
661 current_thread = get_lwp_thread (lwp);
662
663 regcache = get_thread_regcache (current_thread, 1);
664 pc = (*the_low_target.get_pc) (regcache);
665
666 if (debug_threads)
667 debug_printf ("pc is 0x%lx\n", (long) pc);
668
669 current_thread = saved_thread;
670 return pc;
671 }
672
673 /* This function should only be called if LWP got a SIGTRAP.
674 The SIGTRAP could mean several things.
675
676 On i386, where decr_pc_after_break is non-zero:
677
678 If we were single-stepping this process using PTRACE_SINGLESTEP, we
679 will get only the one SIGTRAP. The value of $eip will be the next
680 instruction. If the instruction we stepped over was a breakpoint,
681 we need to decrement the PC.
682
683 If we continue the process using PTRACE_CONT, we will get a
684 SIGTRAP when we hit a breakpoint. The value of $eip will be
685 the instruction after the breakpoint (i.e. needs to be
686 decremented). If we report the SIGTRAP to GDB, we must also
687 report the undecremented PC. If the breakpoint is removed, we
688 must resume at the decremented PC.
689
690 On a non-decr_pc_after_break machine with hardware or kernel
691 single-step:
692
693 If we either single-step a breakpoint instruction, or continue and
694 hit a breakpoint instruction, our PC will point at the breakpoint
695 instruction. */
696
697 static int
698 check_stopped_by_breakpoint (struct lwp_info *lwp)
699 {
700 CORE_ADDR pc;
701 CORE_ADDR sw_breakpoint_pc;
702 struct thread_info *saved_thread;
703 #if USE_SIGTRAP_SIGINFO
704 siginfo_t siginfo;
705 #endif
706
707 if (the_low_target.get_pc == NULL)
708 return 0;
709
710 pc = get_pc (lwp);
711 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
712
713 /* breakpoint_at reads from the current thread. */
714 saved_thread = current_thread;
715 current_thread = get_lwp_thread (lwp);
716
717 #if USE_SIGTRAP_SIGINFO
718 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
719 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
720 {
721 if (siginfo.si_signo == SIGTRAP)
722 {
723 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
724 {
725 if (debug_threads)
726 {
727 struct thread_info *thr = get_lwp_thread (lwp);
728
729 debug_printf ("CSBB: %s stopped by software breakpoint\n",
730 target_pid_to_str (ptid_of (thr)));
731 }
732
733 /* Back up the PC if necessary. */
734 if (pc != sw_breakpoint_pc)
735 {
736 struct regcache *regcache
737 = get_thread_regcache (current_thread, 1);
738 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
739 }
740
741 lwp->stop_pc = sw_breakpoint_pc;
742 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
743 current_thread = saved_thread;
744 return 1;
745 }
746 else if (siginfo.si_code == TRAP_HWBKPT)
747 {
748 if (debug_threads)
749 {
750 struct thread_info *thr = get_lwp_thread (lwp);
751
752 debug_printf ("CSBB: %s stopped by hardware "
753 "breakpoint/watchpoint\n",
754 target_pid_to_str (ptid_of (thr)));
755 }
756
757 lwp->stop_pc = pc;
758 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
759 current_thread = saved_thread;
760 return 1;
761 }
762 else if (siginfo.si_code == TRAP_TRACE)
763 {
764 if (debug_threads)
765 {
766 struct thread_info *thr = get_lwp_thread (lwp);
767
768 debug_printf ("CSBB: %s stopped by trace\n",
769 target_pid_to_str (ptid_of (thr)));
770 }
771
772 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
773 }
774 }
775 }
776 #else
777 /* We may have just stepped a breakpoint instruction. E.g., in
778 non-stop mode, GDB first tells the thread A to step a range, and
779 then the user inserts a breakpoint inside the range. In that
780 case we need to report the breakpoint PC. */
781 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
782 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
783 {
784 if (debug_threads)
785 {
786 struct thread_info *thr = get_lwp_thread (lwp);
787
788 debug_printf ("CSBB: %s stopped by software breakpoint\n",
789 target_pid_to_str (ptid_of (thr)));
790 }
791
792 /* Back up the PC if necessary. */
793 if (pc != sw_breakpoint_pc)
794 {
795 struct regcache *regcache
796 = get_thread_regcache (current_thread, 1);
797 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
798 }
799
800 lwp->stop_pc = sw_breakpoint_pc;
801 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
802 current_thread = saved_thread;
803 return 1;
804 }
805
806 if (hardware_breakpoint_inserted_here (pc))
807 {
808 if (debug_threads)
809 {
810 struct thread_info *thr = get_lwp_thread (lwp);
811
812 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
813 target_pid_to_str (ptid_of (thr)));
814 }
815
816 lwp->stop_pc = pc;
817 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
818 current_thread = saved_thread;
819 return 1;
820 }
821 #endif
822
823 current_thread = saved_thread;
824 return 0;
825 }
826
827 static struct lwp_info *
828 add_lwp (ptid_t ptid)
829 {
830 struct lwp_info *lwp;
831
832 lwp = XCNEW (struct lwp_info);
833
834 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
835
836 if (the_low_target.new_thread != NULL)
837 the_low_target.new_thread (lwp);
838
839 lwp->thread = add_thread (ptid, lwp);
840
841 return lwp;
842 }
843
844 /* Start an inferior process and returns its pid.
845 ALLARGS is a vector of program-name and args. */
846
847 static int
848 linux_create_inferior (char *program, char **allargs)
849 {
850 struct lwp_info *new_lwp;
851 int pid;
852 ptid_t ptid;
853 struct cleanup *restore_personality
854 = maybe_disable_address_space_randomization (disable_randomization);
855
856 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
857 pid = vfork ();
858 #else
859 pid = fork ();
860 #endif
861 if (pid < 0)
862 perror_with_name ("fork");
863
864 if (pid == 0)
865 {
866 close_most_fds ();
867 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
868
869 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
870 signal (__SIGRTMIN + 1, SIG_DFL);
871 #endif
872
873 setpgid (0, 0);
874
875 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
876 stdout to stderr so that inferior i/o doesn't corrupt the connection.
877 Also, redirect stdin to /dev/null. */
878 if (remote_connection_is_stdio ())
879 {
880 close (0);
881 open ("/dev/null", O_RDONLY);
882 dup2 (2, 1);
883 if (write (2, "stdin/stdout redirected\n",
884 sizeof ("stdin/stdout redirected\n") - 1) < 0)
885 {
886 /* Errors ignored. */;
887 }
888 }
889
890 execv (program, allargs);
891 if (errno == ENOENT)
892 execvp (program, allargs);
893
894 fprintf (stderr, "Cannot exec %s: %s.\n", program,
895 strerror (errno));
896 fflush (stderr);
897 _exit (0177);
898 }
899
900 do_cleanups (restore_personality);
901
902 linux_add_process (pid, 0);
903
904 ptid = ptid_build (pid, pid, 0);
905 new_lwp = add_lwp (ptid);
906 new_lwp->must_set_ptrace_flags = 1;
907
908 return pid;
909 }
910
911 /* Attach to an inferior process. Returns 0 on success, ERRNO on
912 error. */
913
914 int
915 linux_attach_lwp (ptid_t ptid)
916 {
917 struct lwp_info *new_lwp;
918 int lwpid = ptid_get_lwp (ptid);
919
920 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
921 != 0)
922 return errno;
923
924 new_lwp = add_lwp (ptid);
925
926 /* We need to wait for SIGSTOP before being able to make the next
927 ptrace call on this LWP. */
928 new_lwp->must_set_ptrace_flags = 1;
929
930 if (linux_proc_pid_is_stopped (lwpid))
931 {
932 if (debug_threads)
933 debug_printf ("Attached to a stopped process\n");
934
935 /* The process is definitely stopped. It is in a job control
936 stop, unless the kernel predates the TASK_STOPPED /
937 TASK_TRACED distinction, in which case it might be in a
938 ptrace stop. Make sure it is in a ptrace stop; from there we
939 can kill it, signal it, et cetera.
940
941 First make sure there is a pending SIGSTOP. Since we are
942 already attached, the process can not transition from stopped
943 to running without a PTRACE_CONT; so we know this signal will
944 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
945 probably already in the queue (unless this kernel is old
946 enough to use TASK_STOPPED for ptrace stops); but since
947 SIGSTOP is not an RT signal, it can only be queued once. */
948 kill_lwp (lwpid, SIGSTOP);
949
950 /* Finally, resume the stopped process. This will deliver the
951 SIGSTOP (or a higher priority signal, just like normal
952 PTRACE_ATTACH), which we'll catch later on. */
953 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
954 }
955
956 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
957 brings it to a halt.
958
959 There are several cases to consider here:
960
961 1) gdbserver has already attached to the process and is being notified
962 of a new thread that is being created.
963 In this case we should ignore that SIGSTOP and resume the
964 process. This is handled below by setting stop_expected = 1,
965 and the fact that add_thread sets last_resume_kind ==
966 resume_continue.
967
968 2) This is the first thread (the process thread), and we're attaching
969 to it via attach_inferior.
970 In this case we want the process thread to stop.
971 This is handled by having linux_attach set last_resume_kind ==
972 resume_stop after we return.
973
974 If the pid we are attaching to is also the tgid, we attach to and
975 stop all the existing threads. Otherwise, we attach to pid and
976 ignore any other threads in the same group as this pid.
977
978 3) GDB is connecting to gdbserver and is requesting an enumeration of all
979 existing threads.
980 In this case we want the thread to stop.
981 FIXME: This case is currently not properly handled.
982 We should wait for the SIGSTOP but don't. Things work apparently
983 because enough time passes between when we ptrace (ATTACH) and when
984 gdb makes the next ptrace call on the thread.
985
986 On the other hand, if we are currently trying to stop all threads, we
987 should treat the new thread as if we had sent it a SIGSTOP. This works
988 because we are guaranteed that the add_lwp call above added us to the
989 end of the list, and so the new thread has not yet reached
990 wait_for_sigstop (but will). */
991 new_lwp->stop_expected = 1;
992
993 return 0;
994 }
995
996 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
997 already attached. Returns true if a new LWP is found, false
998 otherwise. */
999
1000 static int
1001 attach_proc_task_lwp_callback (ptid_t ptid)
1002 {
1003 /* Is this a new thread? */
1004 if (find_thread_ptid (ptid) == NULL)
1005 {
1006 int lwpid = ptid_get_lwp (ptid);
1007 int err;
1008
1009 if (debug_threads)
1010 debug_printf ("Found new lwp %d\n", lwpid);
1011
1012 err = linux_attach_lwp (ptid);
1013
1014 /* Be quiet if we simply raced with the thread exiting. EPERM
1015 is returned if the thread's task still exists, and is marked
1016 as exited or zombie, as well as other conditions, so in that
1017 case, confirm the status in /proc/PID/status. */
1018 if (err == ESRCH
1019 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1020 {
1021 if (debug_threads)
1022 {
1023 debug_printf ("Cannot attach to lwp %d: "
1024 "thread is gone (%d: %s)\n",
1025 lwpid, err, strerror (err));
1026 }
1027 }
1028 else if (err != 0)
1029 {
1030 warning (_("Cannot attach to lwp %d: %s"),
1031 lwpid,
1032 linux_ptrace_attach_fail_reason_string (ptid, err));
1033 }
1034
1035 return 1;
1036 }
1037 return 0;
1038 }
1039
1040 static void async_file_mark (void);
1041
1042 /* Attach to PID. If PID is the tgid, attach to it and all
1043 of its threads. */
1044
1045 static int
1046 linux_attach (unsigned long pid)
1047 {
1048 struct process_info *proc;
1049 struct thread_info *initial_thread;
1050 ptid_t ptid = ptid_build (pid, pid, 0);
1051 int err;
1052
1053 /* Attach to PID. We will check for other threads
1054 soon. */
1055 err = linux_attach_lwp (ptid);
1056 if (err != 0)
1057 error ("Cannot attach to process %ld: %s",
1058 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1059
1060 proc = linux_add_process (pid, 1);
1061
1062 /* Don't ignore the initial SIGSTOP if we just attached to this
1063 process. It will be collected by wait shortly. */
1064 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1065 initial_thread->last_resume_kind = resume_stop;
1066
1067 /* We must attach to every LWP. If /proc is mounted, use that to
1068 find them now. On the one hand, the inferior may be using raw
1069 clone instead of using pthreads. On the other hand, even if it
1070 is using pthreads, GDB may not be connected yet (thread_db needs
1071 to do symbol lookups, through qSymbol). Also, thread_db walks
1072 structures in the inferior's address space to find the list of
1073 threads/LWPs, and those structures may well be corrupted. Note
1074 that once thread_db is loaded, we'll still use it to list threads
1075 and associate pthread info with each LWP. */
1076 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1077
1078 /* GDB will shortly read the xml target description for this
1079 process, to figure out the process' architecture. But the target
1080 description is only filled in when the first process/thread in
1081 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1082 that now, otherwise, if GDB is fast enough, it could read the
1083 target description _before_ that initial stop. */
1084 if (non_stop)
1085 {
1086 struct lwp_info *lwp;
1087 int wstat, lwpid;
1088 ptid_t pid_ptid = pid_to_ptid (pid);
1089
1090 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1091 &wstat, __WALL);
1092 gdb_assert (lwpid > 0);
1093
1094 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1095
1096 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1097 {
1098 lwp->status_pending_p = 1;
1099 lwp->status_pending = wstat;
1100 }
1101
1102 initial_thread->last_resume_kind = resume_continue;
1103
1104 async_file_mark ();
1105
1106 gdb_assert (proc->tdesc != NULL);
1107 }
1108
1109 return 0;
1110 }
1111
1112 struct counter
1113 {
1114 int pid;
1115 int count;
1116 };
1117
1118 static int
1119 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1120 {
1121 struct counter *counter = (struct counter *) args;
1122
1123 if (ptid_get_pid (entry->id) == counter->pid)
1124 {
1125 if (++counter->count > 1)
1126 return 1;
1127 }
1128
1129 return 0;
1130 }
1131
1132 static int
1133 last_thread_of_process_p (int pid)
1134 {
1135 struct counter counter = { pid , 0 };
1136
1137 return (find_inferior (&all_threads,
1138 second_thread_of_pid_p, &counter) == NULL);
1139 }
1140
1141 /* Kill LWP. */
1142
1143 static void
1144 linux_kill_one_lwp (struct lwp_info *lwp)
1145 {
1146 struct thread_info *thr = get_lwp_thread (lwp);
1147 int pid = lwpid_of (thr);
1148
1149 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1150 there is no signal context, and ptrace(PTRACE_KILL) (or
1151 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1152 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1153 alternative is to kill with SIGKILL. We only need one SIGKILL
1154 per process, not one for each thread. But since we still support
1155 linuxthreads, and we also support debugging programs using raw
1156 clone without CLONE_THREAD, we send one for each thread. For
1157 years, we used PTRACE_KILL only, so we're being a bit paranoid
1158 about some old kernels where PTRACE_KILL might work better
1159 (dubious if there are any such, but that's why it's paranoia), so
1160 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1161 everywhere. */
1162
1163 errno = 0;
1164 kill_lwp (pid, SIGKILL);
1165 if (debug_threads)
1166 {
1167 int save_errno = errno;
1168
1169 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1170 target_pid_to_str (ptid_of (thr)),
1171 save_errno ? strerror (save_errno) : "OK");
1172 }
1173
1174 errno = 0;
1175 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1176 if (debug_threads)
1177 {
1178 int save_errno = errno;
1179
1180 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1181 target_pid_to_str (ptid_of (thr)),
1182 save_errno ? strerror (save_errno) : "OK");
1183 }
1184 }
1185
1186 /* Kill LWP and wait for it to die. */
1187
1188 static void
1189 kill_wait_lwp (struct lwp_info *lwp)
1190 {
1191 struct thread_info *thr = get_lwp_thread (lwp);
1192 int pid = ptid_get_pid (ptid_of (thr));
1193 int lwpid = ptid_get_lwp (ptid_of (thr));
1194 int wstat;
1195 int res;
1196
1197 if (debug_threads)
1198 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1199
1200 do
1201 {
1202 linux_kill_one_lwp (lwp);
1203
1204 /* Make sure it died. Notes:
1205
1206 - The loop is most likely unnecessary.
1207
1208 - We don't use linux_wait_for_event as that could delete lwps
1209 while we're iterating over them. We're not interested in
1210 any pending status at this point, only in making sure all
1211 wait status on the kernel side are collected until the
1212 process is reaped.
1213
1214 - We don't use __WALL here as the __WALL emulation relies on
1215 SIGCHLD, and killing a stopped process doesn't generate
1216 one, nor an exit status.
1217 */
1218 res = my_waitpid (lwpid, &wstat, 0);
1219 if (res == -1 && errno == ECHILD)
1220 res = my_waitpid (lwpid, &wstat, __WCLONE);
1221 } while (res > 0 && WIFSTOPPED (wstat));
1222
1223 /* Even if it was stopped, the child may have already disappeared.
1224 E.g., if it was killed by SIGKILL. */
1225 if (res < 0 && errno != ECHILD)
1226 perror_with_name ("kill_wait_lwp");
1227 }
1228
1229 /* Callback for `find_inferior'. Kills an lwp of a given process,
1230 except the leader. */
1231
1232 static int
1233 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1234 {
1235 struct thread_info *thread = (struct thread_info *) entry;
1236 struct lwp_info *lwp = get_thread_lwp (thread);
1237 int pid = * (int *) args;
1238
1239 if (ptid_get_pid (entry->id) != pid)
1240 return 0;
1241
1242 /* We avoid killing the first thread here, because of a Linux kernel (at
1243 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1244 the children get a chance to be reaped, it will remain a zombie
1245 forever. */
1246
1247 if (lwpid_of (thread) == pid)
1248 {
1249 if (debug_threads)
1250 debug_printf ("lkop: is last of process %s\n",
1251 target_pid_to_str (entry->id));
1252 return 0;
1253 }
1254
1255 kill_wait_lwp (lwp);
1256 return 0;
1257 }
1258
1259 static int
1260 linux_kill (int pid)
1261 {
1262 struct process_info *process;
1263 struct lwp_info *lwp;
1264
1265 process = find_process_pid (pid);
1266 if (process == NULL)
1267 return -1;
1268
1269 /* If we're killing a running inferior, make sure it is stopped
1270 first, as PTRACE_KILL will not work otherwise. */
1271 stop_all_lwps (0, NULL);
1272
1273 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1274
1275 /* See the comment in linux_kill_one_lwp. We did not kill the first
1276 thread in the list, so do so now. */
1277 lwp = find_lwp_pid (pid_to_ptid (pid));
1278
1279 if (lwp == NULL)
1280 {
1281 if (debug_threads)
1282 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1283 pid);
1284 }
1285 else
1286 kill_wait_lwp (lwp);
1287
1288 the_target->mourn (process);
1289
1290 /* Since we presently can only stop all lwps of all processes, we
1291 need to unstop lwps of other processes. */
1292 unstop_all_lwps (0, NULL);
1293 return 0;
1294 }
1295
1296 /* Get pending signal of THREAD, for detaching purposes. This is the
1297 signal the thread last stopped for, which we need to deliver to the
1298 thread when detaching, otherwise, it'd be suppressed/lost. */
1299
1300 static int
1301 get_detach_signal (struct thread_info *thread)
1302 {
1303 enum gdb_signal signo = GDB_SIGNAL_0;
1304 int status;
1305 struct lwp_info *lp = get_thread_lwp (thread);
1306
1307 if (lp->status_pending_p)
1308 status = lp->status_pending;
1309 else
1310 {
1311 /* If the thread had been suspended by gdbserver, and it stopped
1312 cleanly, then it'll have stopped with SIGSTOP. But we don't
1313 want to deliver that SIGSTOP. */
1314 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1315 || thread->last_status.value.sig == GDB_SIGNAL_0)
1316 return 0;
1317
1318 /* Otherwise, we may need to deliver the signal we
1319 intercepted. */
1320 status = lp->last_status;
1321 }
1322
1323 if (!WIFSTOPPED (status))
1324 {
1325 if (debug_threads)
1326 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1327 target_pid_to_str (ptid_of (thread)));
1328 return 0;
1329 }
1330
1331 /* Extended wait statuses aren't real SIGTRAPs. */
1332 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1333 {
1334 if (debug_threads)
1335 debug_printf ("GPS: lwp %s had stopped with extended "
1336 "status: no pending signal\n",
1337 target_pid_to_str (ptid_of (thread)));
1338 return 0;
1339 }
1340
1341 signo = gdb_signal_from_host (WSTOPSIG (status));
1342
1343 if (program_signals_p && !program_signals[signo])
1344 {
1345 if (debug_threads)
1346 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1347 target_pid_to_str (ptid_of (thread)),
1348 gdb_signal_to_string (signo));
1349 return 0;
1350 }
1351 else if (!program_signals_p
1352 /* If we have no way to know which signals GDB does not
1353 want to have passed to the program, assume
1354 SIGTRAP/SIGINT, which is GDB's default. */
1355 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1356 {
1357 if (debug_threads)
1358 debug_printf ("GPS: lwp %s had signal %s, "
1359 "but we don't know if we should pass it. "
1360 "Default to not.\n",
1361 target_pid_to_str (ptid_of (thread)),
1362 gdb_signal_to_string (signo));
1363 return 0;
1364 }
1365 else
1366 {
1367 if (debug_threads)
1368 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1369 target_pid_to_str (ptid_of (thread)),
1370 gdb_signal_to_string (signo));
1371
1372 return WSTOPSIG (status);
1373 }
1374 }
1375
1376 static int
1377 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1378 {
1379 struct thread_info *thread = (struct thread_info *) entry;
1380 struct lwp_info *lwp = get_thread_lwp (thread);
1381 int pid = * (int *) args;
1382 int sig;
1383
1384 if (ptid_get_pid (entry->id) != pid)
1385 return 0;
1386
1387 /* If there is a pending SIGSTOP, get rid of it. */
1388 if (lwp->stop_expected)
1389 {
1390 if (debug_threads)
1391 debug_printf ("Sending SIGCONT to %s\n",
1392 target_pid_to_str (ptid_of (thread)));
1393
1394 kill_lwp (lwpid_of (thread), SIGCONT);
1395 lwp->stop_expected = 0;
1396 }
1397
1398 /* Flush any pending changes to the process's registers. */
1399 regcache_invalidate_thread (thread);
1400
1401 /* Pass on any pending signal for this thread. */
1402 sig = get_detach_signal (thread);
1403
1404 /* Finally, let it resume. */
1405 if (the_low_target.prepare_to_resume != NULL)
1406 the_low_target.prepare_to_resume (lwp);
1407 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1408 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1409 error (_("Can't detach %s: %s"),
1410 target_pid_to_str (ptid_of (thread)),
1411 strerror (errno));
1412
1413 delete_lwp (lwp);
1414 return 0;
1415 }
1416
1417 static int
1418 linux_detach (int pid)
1419 {
1420 struct process_info *process;
1421
1422 process = find_process_pid (pid);
1423 if (process == NULL)
1424 return -1;
1425
1426 /* As there's a step over already in progress, let it finish first,
1427 otherwise nesting a stabilize_threads operation on top gets real
1428 messy. */
1429 complete_ongoing_step_over ();
1430
1431 /* Stop all threads before detaching. First, ptrace requires that
1432 the thread is stopped to sucessfully detach. Second, thread_db
1433 may need to uninstall thread event breakpoints from memory, which
1434 only works with a stopped process anyway. */
1435 stop_all_lwps (0, NULL);
1436
1437 #ifdef USE_THREAD_DB
1438 thread_db_detach (process);
1439 #endif
1440
1441 /* Stabilize threads (move out of jump pads). */
1442 stabilize_threads ();
1443
1444 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1445
1446 the_target->mourn (process);
1447
1448 /* Since we presently can only stop all lwps of all processes, we
1449 need to unstop lwps of other processes. */
1450 unstop_all_lwps (0, NULL);
1451 return 0;
1452 }
1453
1454 /* Remove all LWPs that belong to process PROC from the lwp list. */
1455
1456 static int
1457 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1458 {
1459 struct thread_info *thread = (struct thread_info *) entry;
1460 struct lwp_info *lwp = get_thread_lwp (thread);
1461 struct process_info *process = (struct process_info *) proc;
1462
1463 if (pid_of (thread) == pid_of (process))
1464 delete_lwp (lwp);
1465
1466 return 0;
1467 }
1468
1469 static void
1470 linux_mourn (struct process_info *process)
1471 {
1472 struct process_info_private *priv;
1473
1474 #ifdef USE_THREAD_DB
1475 thread_db_mourn (process);
1476 #endif
1477
1478 find_inferior (&all_threads, delete_lwp_callback, process);
1479
1480 /* Freeing all private data. */
1481 priv = process->priv;
1482 free (priv->arch_private);
1483 free (priv);
1484 process->priv = NULL;
1485
1486 remove_process (process);
1487 }
1488
1489 static void
1490 linux_join (int pid)
1491 {
1492 int status, ret;
1493
1494 do {
1495 ret = my_waitpid (pid, &status, 0);
1496 if (WIFEXITED (status) || WIFSIGNALED (status))
1497 break;
1498 } while (ret != -1 || errno != ECHILD);
1499 }
1500
1501 /* Return nonzero if the given thread is still alive. */
1502 static int
1503 linux_thread_alive (ptid_t ptid)
1504 {
1505 struct lwp_info *lwp = find_lwp_pid (ptid);
1506
1507 /* We assume we always know if a thread exits. If a whole process
1508 exited but we still haven't been able to report it to GDB, we'll
1509 hold on to the last lwp of the dead process. */
1510 if (lwp != NULL)
1511 return !lwp_is_marked_dead (lwp);
1512 else
1513 return 0;
1514 }
1515
1516 /* Return 1 if this lwp still has an interesting status pending. If
1517 not (e.g., it had stopped for a breakpoint that is gone), return
1518 false. */
1519
1520 static int
1521 thread_still_has_status_pending_p (struct thread_info *thread)
1522 {
1523 struct lwp_info *lp = get_thread_lwp (thread);
1524
1525 if (!lp->status_pending_p)
1526 return 0;
1527
1528 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1529 report any status pending the LWP may have. */
1530 if (thread->last_resume_kind == resume_stop
1531 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1532 return 0;
1533
1534 if (thread->last_resume_kind != resume_stop
1535 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1536 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1537 {
1538 struct thread_info *saved_thread;
1539 CORE_ADDR pc;
1540 int discard = 0;
1541
1542 gdb_assert (lp->last_status != 0);
1543
1544 pc = get_pc (lp);
1545
1546 saved_thread = current_thread;
1547 current_thread = thread;
1548
1549 if (pc != lp->stop_pc)
1550 {
1551 if (debug_threads)
1552 debug_printf ("PC of %ld changed\n",
1553 lwpid_of (thread));
1554 discard = 1;
1555 }
1556
1557 #if !USE_SIGTRAP_SIGINFO
1558 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1559 && !(*the_low_target.breakpoint_at) (pc))
1560 {
1561 if (debug_threads)
1562 debug_printf ("previous SW breakpoint of %ld gone\n",
1563 lwpid_of (thread));
1564 discard = 1;
1565 }
1566 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1567 && !hardware_breakpoint_inserted_here (pc))
1568 {
1569 if (debug_threads)
1570 debug_printf ("previous HW breakpoint of %ld gone\n",
1571 lwpid_of (thread));
1572 discard = 1;
1573 }
1574 #endif
1575
1576 current_thread = saved_thread;
1577
1578 if (discard)
1579 {
1580 if (debug_threads)
1581 debug_printf ("discarding pending breakpoint status\n");
1582 lp->status_pending_p = 0;
1583 return 0;
1584 }
1585 }
1586
1587 return 1;
1588 }
1589
1590 /* Return 1 if this lwp has an interesting status pending. */
1591 static int
1592 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1593 {
1594 struct thread_info *thread = (struct thread_info *) entry;
1595 struct lwp_info *lp = get_thread_lwp (thread);
1596 ptid_t ptid = * (ptid_t *) arg;
1597
1598 /* Check if we're only interested in events from a specific process
1599 or a specific LWP. */
1600 if (!ptid_match (ptid_of (thread), ptid))
1601 return 0;
1602
1603 if (lp->status_pending_p
1604 && !thread_still_has_status_pending_p (thread))
1605 {
1606 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1607 return 0;
1608 }
1609
1610 return lp->status_pending_p;
1611 }
1612
1613 static int
1614 same_lwp (struct inferior_list_entry *entry, void *data)
1615 {
1616 ptid_t ptid = *(ptid_t *) data;
1617 int lwp;
1618
1619 if (ptid_get_lwp (ptid) != 0)
1620 lwp = ptid_get_lwp (ptid);
1621 else
1622 lwp = ptid_get_pid (ptid);
1623
1624 if (ptid_get_lwp (entry->id) == lwp)
1625 return 1;
1626
1627 return 0;
1628 }
1629
1630 struct lwp_info *
1631 find_lwp_pid (ptid_t ptid)
1632 {
1633 struct inferior_list_entry *thread
1634 = find_inferior (&all_threads, same_lwp, &ptid);
1635
1636 if (thread == NULL)
1637 return NULL;
1638
1639 return get_thread_lwp ((struct thread_info *) thread);
1640 }
1641
1642 /* Return the number of known LWPs in the tgid given by PID. */
1643
1644 static int
1645 num_lwps (int pid)
1646 {
1647 struct inferior_list_entry *inf, *tmp;
1648 int count = 0;
1649
1650 ALL_INFERIORS (&all_threads, inf, tmp)
1651 {
1652 if (ptid_get_pid (inf->id) == pid)
1653 count++;
1654 }
1655
1656 return count;
1657 }
1658
1659 /* The arguments passed to iterate_over_lwps. */
1660
1661 struct iterate_over_lwps_args
1662 {
1663 /* The FILTER argument passed to iterate_over_lwps. */
1664 ptid_t filter;
1665
1666 /* The CALLBACK argument passed to iterate_over_lwps. */
1667 iterate_over_lwps_ftype *callback;
1668
1669 /* The DATA argument passed to iterate_over_lwps. */
1670 void *data;
1671 };
1672
1673 /* Callback for find_inferior used by iterate_over_lwps to filter
1674 calls to the callback supplied to that function. Returning a
1675 nonzero value causes find_inferiors to stop iterating and return
1676 the current inferior_list_entry. Returning zero indicates that
1677 find_inferiors should continue iterating. */
1678
1679 static int
1680 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1681 {
1682 struct iterate_over_lwps_args *args
1683 = (struct iterate_over_lwps_args *) args_p;
1684
1685 if (ptid_match (entry->id, args->filter))
1686 {
1687 struct thread_info *thr = (struct thread_info *) entry;
1688 struct lwp_info *lwp = get_thread_lwp (thr);
1689
1690 return (*args->callback) (lwp, args->data);
1691 }
1692
1693 return 0;
1694 }
1695
1696 /* See nat/linux-nat.h. */
1697
1698 struct lwp_info *
1699 iterate_over_lwps (ptid_t filter,
1700 iterate_over_lwps_ftype callback,
1701 void *data)
1702 {
1703 struct iterate_over_lwps_args args = {filter, callback, data};
1704 struct inferior_list_entry *entry;
1705
1706 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1707 if (entry == NULL)
1708 return NULL;
1709
1710 return get_thread_lwp ((struct thread_info *) entry);
1711 }
1712
1713 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1714 their exits until all other threads in the group have exited. */
1715
1716 static void
1717 check_zombie_leaders (void)
1718 {
1719 struct process_info *proc, *tmp;
1720
1721 ALL_PROCESSES (proc, tmp)
1722 {
1723 pid_t leader_pid = pid_of (proc);
1724 struct lwp_info *leader_lp;
1725
1726 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1727
1728 if (debug_threads)
1729 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1730 "num_lwps=%d, zombie=%d\n",
1731 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1732 linux_proc_pid_is_zombie (leader_pid));
1733
1734 if (leader_lp != NULL && !leader_lp->stopped
1735 /* Check if there are other threads in the group, as we may
1736 have raced with the inferior simply exiting. */
1737 && !last_thread_of_process_p (leader_pid)
1738 && linux_proc_pid_is_zombie (leader_pid))
1739 {
1740 /* A leader zombie can mean one of two things:
1741
1742 - It exited, and there's an exit status pending
1743 available, or only the leader exited (not the whole
1744 program). In the latter case, we can't waitpid the
1745 leader's exit status until all other threads are gone.
1746
1747 - There are 3 or more threads in the group, and a thread
1748 other than the leader exec'd. On an exec, the Linux
1749 kernel destroys all other threads (except the execing
1750 one) in the thread group, and resets the execing thread's
1751 tid to the tgid. No exit notification is sent for the
1752 execing thread -- from the ptracer's perspective, it
1753 appears as though the execing thread just vanishes.
1754 Until we reap all other threads except the leader and the
1755 execing thread, the leader will be zombie, and the
1756 execing thread will be in `D (disc sleep)'. As soon as
1757 all other threads are reaped, the execing thread changes
1758 it's tid to the tgid, and the previous (zombie) leader
1759 vanishes, giving place to the "new" leader. We could try
1760 distinguishing the exit and exec cases, by waiting once
1761 more, and seeing if something comes out, but it doesn't
1762 sound useful. The previous leader _does_ go away, and
1763 we'll re-add the new one once we see the exec event
1764 (which is just the same as what would happen if the
1765 previous leader did exit voluntarily before some other
1766 thread execs). */
1767
1768 if (debug_threads)
1769 fprintf (stderr,
1770 "CZL: Thread group leader %d zombie "
1771 "(it exited, or another thread execd).\n",
1772 leader_pid);
1773
1774 delete_lwp (leader_lp);
1775 }
1776 }
1777 }
1778
1779 /* Callback for `find_inferior'. Returns the first LWP that is not
1780 stopped. ARG is a PTID filter. */
1781
1782 static int
1783 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1784 {
1785 struct thread_info *thr = (struct thread_info *) entry;
1786 struct lwp_info *lwp;
1787 ptid_t filter = *(ptid_t *) arg;
1788
1789 if (!ptid_match (ptid_of (thr), filter))
1790 return 0;
1791
1792 lwp = get_thread_lwp (thr);
1793 if (!lwp->stopped)
1794 return 1;
1795
1796 return 0;
1797 }
1798
1799 /* Increment LWP's suspend count. */
1800
1801 static void
1802 lwp_suspended_inc (struct lwp_info *lwp)
1803 {
1804 lwp->suspended++;
1805
1806 if (debug_threads && lwp->suspended > 4)
1807 {
1808 struct thread_info *thread = get_lwp_thread (lwp);
1809
1810 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1811 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1812 }
1813 }
1814
1815 /* Decrement LWP's suspend count. */
1816
1817 static void
1818 lwp_suspended_decr (struct lwp_info *lwp)
1819 {
1820 lwp->suspended--;
1821
1822 if (lwp->suspended < 0)
1823 {
1824 struct thread_info *thread = get_lwp_thread (lwp);
1825
1826 internal_error (__FILE__, __LINE__,
1827 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1828 lwp->suspended);
1829 }
1830 }
1831
1832 /* This function should only be called if the LWP got a SIGTRAP.
1833
1834 Handle any tracepoint steps or hits. Return true if a tracepoint
1835 event was handled, 0 otherwise. */
1836
1837 static int
1838 handle_tracepoints (struct lwp_info *lwp)
1839 {
1840 struct thread_info *tinfo = get_lwp_thread (lwp);
1841 int tpoint_related_event = 0;
1842
1843 gdb_assert (lwp->suspended == 0);
1844
1845 /* If this tracepoint hit causes a tracing stop, we'll immediately
1846 uninsert tracepoints. To do this, we temporarily pause all
1847 threads, unpatch away, and then unpause threads. We need to make
1848 sure the unpausing doesn't resume LWP too. */
1849 lwp_suspended_inc (lwp);
1850
1851 /* And we need to be sure that any all-threads-stopping doesn't try
1852 to move threads out of the jump pads, as it could deadlock the
1853 inferior (LWP could be in the jump pad, maybe even holding the
1854 lock.) */
1855
1856 /* Do any necessary step collect actions. */
1857 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1858
1859 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1860
1861 /* See if we just hit a tracepoint and do its main collect
1862 actions. */
1863 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1864
1865 lwp_suspended_decr (lwp);
1866
1867 gdb_assert (lwp->suspended == 0);
1868 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1869
1870 if (tpoint_related_event)
1871 {
1872 if (debug_threads)
1873 debug_printf ("got a tracepoint event\n");
1874 return 1;
1875 }
1876
1877 return 0;
1878 }
1879
1880 /* Convenience wrapper. Returns true if LWP is presently collecting a
1881 fast tracepoint. */
1882
1883 static int
1884 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1885 struct fast_tpoint_collect_status *status)
1886 {
1887 CORE_ADDR thread_area;
1888 struct thread_info *thread = get_lwp_thread (lwp);
1889
1890 if (the_low_target.get_thread_area == NULL)
1891 return 0;
1892
1893 /* Get the thread area address. This is used to recognize which
1894 thread is which when tracing with the in-process agent library.
1895 We don't read anything from the address, and treat it as opaque;
1896 it's the address itself that we assume is unique per-thread. */
1897 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1898 return 0;
1899
1900 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1901 }
1902
1903 /* The reason we resume in the caller, is because we want to be able
1904 to pass lwp->status_pending as WSTAT, and we need to clear
1905 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1906 refuses to resume. */
1907
1908 static int
1909 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1910 {
1911 struct thread_info *saved_thread;
1912
1913 saved_thread = current_thread;
1914 current_thread = get_lwp_thread (lwp);
1915
1916 if ((wstat == NULL
1917 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1918 && supports_fast_tracepoints ()
1919 && agent_loaded_p ())
1920 {
1921 struct fast_tpoint_collect_status status;
1922 int r;
1923
1924 if (debug_threads)
1925 debug_printf ("Checking whether LWP %ld needs to move out of the "
1926 "jump pad.\n",
1927 lwpid_of (current_thread));
1928
1929 r = linux_fast_tracepoint_collecting (lwp, &status);
1930
1931 if (wstat == NULL
1932 || (WSTOPSIG (*wstat) != SIGILL
1933 && WSTOPSIG (*wstat) != SIGFPE
1934 && WSTOPSIG (*wstat) != SIGSEGV
1935 && WSTOPSIG (*wstat) != SIGBUS))
1936 {
1937 lwp->collecting_fast_tracepoint = r;
1938
1939 if (r != 0)
1940 {
1941 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1942 {
1943 /* Haven't executed the original instruction yet.
1944 Set breakpoint there, and wait till it's hit,
1945 then single-step until exiting the jump pad. */
1946 lwp->exit_jump_pad_bkpt
1947 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1948 }
1949
1950 if (debug_threads)
1951 debug_printf ("Checking whether LWP %ld needs to move out of "
1952 "the jump pad...it does\n",
1953 lwpid_of (current_thread));
1954 current_thread = saved_thread;
1955
1956 return 1;
1957 }
1958 }
1959 else
1960 {
1961 /* If we get a synchronous signal while collecting, *and*
1962 while executing the (relocated) original instruction,
1963 reset the PC to point at the tpoint address, before
1964 reporting to GDB. Otherwise, it's an IPA lib bug: just
1965 report the signal to GDB, and pray for the best. */
1966
1967 lwp->collecting_fast_tracepoint = 0;
1968
1969 if (r != 0
1970 && (status.adjusted_insn_addr <= lwp->stop_pc
1971 && lwp->stop_pc < status.adjusted_insn_addr_end))
1972 {
1973 siginfo_t info;
1974 struct regcache *regcache;
1975
1976 /* The si_addr on a few signals references the address
1977 of the faulting instruction. Adjust that as
1978 well. */
1979 if ((WSTOPSIG (*wstat) == SIGILL
1980 || WSTOPSIG (*wstat) == SIGFPE
1981 || WSTOPSIG (*wstat) == SIGBUS
1982 || WSTOPSIG (*wstat) == SIGSEGV)
1983 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1984 (PTRACE_TYPE_ARG3) 0, &info) == 0
1985 /* Final check just to make sure we don't clobber
1986 the siginfo of non-kernel-sent signals. */
1987 && (uintptr_t) info.si_addr == lwp->stop_pc)
1988 {
1989 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1990 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1991 (PTRACE_TYPE_ARG3) 0, &info);
1992 }
1993
1994 regcache = get_thread_regcache (current_thread, 1);
1995 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1996 lwp->stop_pc = status.tpoint_addr;
1997
1998 /* Cancel any fast tracepoint lock this thread was
1999 holding. */
2000 force_unlock_trace_buffer ();
2001 }
2002
2003 if (lwp->exit_jump_pad_bkpt != NULL)
2004 {
2005 if (debug_threads)
2006 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2007 "stopping all threads momentarily.\n");
2008
2009 stop_all_lwps (1, lwp);
2010
2011 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2012 lwp->exit_jump_pad_bkpt = NULL;
2013
2014 unstop_all_lwps (1, lwp);
2015
2016 gdb_assert (lwp->suspended >= 0);
2017 }
2018 }
2019 }
2020
2021 if (debug_threads)
2022 debug_printf ("Checking whether LWP %ld needs to move out of the "
2023 "jump pad...no\n",
2024 lwpid_of (current_thread));
2025
2026 current_thread = saved_thread;
2027 return 0;
2028 }
2029
2030 /* Enqueue one signal in the "signals to report later when out of the
2031 jump pad" list. */
2032
2033 static void
2034 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2035 {
2036 struct pending_signals *p_sig;
2037 struct thread_info *thread = get_lwp_thread (lwp);
2038
2039 if (debug_threads)
2040 debug_printf ("Deferring signal %d for LWP %ld.\n",
2041 WSTOPSIG (*wstat), lwpid_of (thread));
2042
2043 if (debug_threads)
2044 {
2045 struct pending_signals *sig;
2046
2047 for (sig = lwp->pending_signals_to_report;
2048 sig != NULL;
2049 sig = sig->prev)
2050 debug_printf (" Already queued %d\n",
2051 sig->signal);
2052
2053 debug_printf (" (no more currently queued signals)\n");
2054 }
2055
2056 /* Don't enqueue non-RT signals if they are already in the deferred
2057 queue. (SIGSTOP being the easiest signal to see ending up here
2058 twice) */
2059 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2060 {
2061 struct pending_signals *sig;
2062
2063 for (sig = lwp->pending_signals_to_report;
2064 sig != NULL;
2065 sig = sig->prev)
2066 {
2067 if (sig->signal == WSTOPSIG (*wstat))
2068 {
2069 if (debug_threads)
2070 debug_printf ("Not requeuing already queued non-RT signal %d"
2071 " for LWP %ld\n",
2072 sig->signal,
2073 lwpid_of (thread));
2074 return;
2075 }
2076 }
2077 }
2078
2079 p_sig = XCNEW (struct pending_signals);
2080 p_sig->prev = lwp->pending_signals_to_report;
2081 p_sig->signal = WSTOPSIG (*wstat);
2082
2083 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2084 &p_sig->info);
2085
2086 lwp->pending_signals_to_report = p_sig;
2087 }
2088
2089 /* Dequeue one signal from the "signals to report later when out of
2090 the jump pad" list. */
2091
2092 static int
2093 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2094 {
2095 struct thread_info *thread = get_lwp_thread (lwp);
2096
2097 if (lwp->pending_signals_to_report != NULL)
2098 {
2099 struct pending_signals **p_sig;
2100
2101 p_sig = &lwp->pending_signals_to_report;
2102 while ((*p_sig)->prev != NULL)
2103 p_sig = &(*p_sig)->prev;
2104
2105 *wstat = W_STOPCODE ((*p_sig)->signal);
2106 if ((*p_sig)->info.si_signo != 0)
2107 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2108 &(*p_sig)->info);
2109 free (*p_sig);
2110 *p_sig = NULL;
2111
2112 if (debug_threads)
2113 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2114 WSTOPSIG (*wstat), lwpid_of (thread));
2115
2116 if (debug_threads)
2117 {
2118 struct pending_signals *sig;
2119
2120 for (sig = lwp->pending_signals_to_report;
2121 sig != NULL;
2122 sig = sig->prev)
2123 debug_printf (" Still queued %d\n",
2124 sig->signal);
2125
2126 debug_printf (" (no more queued signals)\n");
2127 }
2128
2129 return 1;
2130 }
2131
2132 return 0;
2133 }
2134
2135 /* Fetch the possibly triggered data watchpoint info and store it in
2136 CHILD.
2137
2138 On some archs, like x86, that use debug registers to set
2139 watchpoints, it's possible that the way to know which watched
2140 address trapped, is to check the register that is used to select
2141 which address to watch. Problem is, between setting the watchpoint
2142 and reading back which data address trapped, the user may change
2143 the set of watchpoints, and, as a consequence, GDB changes the
2144 debug registers in the inferior. To avoid reading back a stale
2145 stopped-data-address when that happens, we cache in LP the fact
2146 that a watchpoint trapped, and the corresponding data address, as
2147 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2148 registers meanwhile, we have the cached data we can rely on. */
2149
2150 static int
2151 check_stopped_by_watchpoint (struct lwp_info *child)
2152 {
2153 if (the_low_target.stopped_by_watchpoint != NULL)
2154 {
2155 struct thread_info *saved_thread;
2156
2157 saved_thread = current_thread;
2158 current_thread = get_lwp_thread (child);
2159
2160 if (the_low_target.stopped_by_watchpoint ())
2161 {
2162 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2163
2164 if (the_low_target.stopped_data_address != NULL)
2165 child->stopped_data_address
2166 = the_low_target.stopped_data_address ();
2167 else
2168 child->stopped_data_address = 0;
2169 }
2170
2171 current_thread = saved_thread;
2172 }
2173
2174 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2175 }
2176
2177 /* Return the ptrace options that we want to try to enable. */
2178
2179 static int
2180 linux_low_ptrace_options (int attached)
2181 {
2182 int options = 0;
2183
2184 if (!attached)
2185 options |= PTRACE_O_EXITKILL;
2186
2187 if (report_fork_events)
2188 options |= PTRACE_O_TRACEFORK;
2189
2190 if (report_vfork_events)
2191 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2192
2193 if (report_exec_events)
2194 options |= PTRACE_O_TRACEEXEC;
2195
2196 return options;
2197 }
2198
2199 /* Do low-level handling of the event, and check if we should go on
2200 and pass it to caller code. Return the affected lwp if we are, or
2201 NULL otherwise. */
2202
2203 static struct lwp_info *
2204 linux_low_filter_event (int lwpid, int wstat)
2205 {
2206 struct lwp_info *child;
2207 struct thread_info *thread;
2208 int have_stop_pc = 0;
2209
2210 child = find_lwp_pid (pid_to_ptid (lwpid));
2211
2212 /* Check for stop events reported by a process we didn't already
2213 know about - anything not already in our LWP list.
2214
2215 If we're expecting to receive stopped processes after
2216 fork, vfork, and clone events, then we'll just add the
2217 new one to our list and go back to waiting for the event
2218 to be reported - the stopped process might be returned
2219 from waitpid before or after the event is.
2220
2221 But note the case of a non-leader thread exec'ing after the
2222 leader having exited, and gone from our lists (because
2223 check_zombie_leaders deleted it). The non-leader thread
2224 changes its tid to the tgid. */
2225
2226 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2227 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2228 {
2229 ptid_t child_ptid;
2230
2231 /* A multi-thread exec after we had seen the leader exiting. */
2232 if (debug_threads)
2233 {
2234 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2235 "after exec.\n", lwpid);
2236 }
2237
2238 child_ptid = ptid_build (lwpid, lwpid, 0);
2239 child = add_lwp (child_ptid);
2240 child->stopped = 1;
2241 current_thread = child->thread;
2242 }
2243
2244 /* If we didn't find a process, one of two things presumably happened:
2245 - A process we started and then detached from has exited. Ignore it.
2246 - A process we are controlling has forked and the new child's stop
2247 was reported to us by the kernel. Save its PID. */
2248 if (child == NULL && WIFSTOPPED (wstat))
2249 {
2250 add_to_pid_list (&stopped_pids, lwpid, wstat);
2251 return NULL;
2252 }
2253 else if (child == NULL)
2254 return NULL;
2255
2256 thread = get_lwp_thread (child);
2257
2258 child->stopped = 1;
2259
2260 child->last_status = wstat;
2261
2262 /* Check if the thread has exited. */
2263 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2264 {
2265 if (debug_threads)
2266 debug_printf ("LLFE: %d exited.\n", lwpid);
2267 /* If there is at least one more LWP, then the exit signal was
2268 not the end of the debugged application and should be
2269 ignored, unless GDB wants to hear about thread exits. */
2270 if (report_thread_events
2271 || last_thread_of_process_p (pid_of (thread)))
2272 {
2273 /* Since events are serialized to GDB core, and we can't
2274 report this one right now. Leave the status pending for
2275 the next time we're able to report it. */
2276 mark_lwp_dead (child, wstat);
2277 return child;
2278 }
2279 else
2280 {
2281 delete_lwp (child);
2282 return NULL;
2283 }
2284 }
2285
2286 gdb_assert (WIFSTOPPED (wstat));
2287
2288 if (WIFSTOPPED (wstat))
2289 {
2290 struct process_info *proc;
2291
2292 /* Architecture-specific setup after inferior is running. */
2293 proc = find_process_pid (pid_of (thread));
2294 if (proc->tdesc == NULL)
2295 {
2296 if (proc->attached)
2297 {
2298 /* This needs to happen after we have attached to the
2299 inferior and it is stopped for the first time, but
2300 before we access any inferior registers. */
2301 linux_arch_setup_thread (thread);
2302 }
2303 else
2304 {
2305 /* The process is started, but GDBserver will do
2306 architecture-specific setup after the program stops at
2307 the first instruction. */
2308 child->status_pending_p = 1;
2309 child->status_pending = wstat;
2310 return child;
2311 }
2312 }
2313 }
2314
2315 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2316 {
2317 struct process_info *proc = find_process_pid (pid_of (thread));
2318 int options = linux_low_ptrace_options (proc->attached);
2319
2320 linux_enable_event_reporting (lwpid, options);
2321 child->must_set_ptrace_flags = 0;
2322 }
2323
2324 /* Be careful to not overwrite stop_pc until
2325 check_stopped_by_breakpoint is called. */
2326 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2327 && linux_is_extended_waitstatus (wstat))
2328 {
2329 child->stop_pc = get_pc (child);
2330 if (handle_extended_wait (&child, wstat))
2331 {
2332 /* The event has been handled, so just return without
2333 reporting it. */
2334 return NULL;
2335 }
2336 }
2337
2338 /* Check first whether this was a SW/HW breakpoint before checking
2339 watchpoints, because at least s390 can't tell the data address of
2340 hardware watchpoint hits, and returns stopped-by-watchpoint as
2341 long as there's a watchpoint set. */
2342 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2343 {
2344 if (check_stopped_by_breakpoint (child))
2345 have_stop_pc = 1;
2346 }
2347
2348 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2349 or hardware watchpoint. Check which is which if we got
2350 TARGET_STOPPED_BY_HW_BREAKPOINT. Likewise, we may have single
2351 stepped an instruction that triggered a watchpoint. In that
2352 case, on some architectures (such as x86), instead of
2353 TRAP_HWBKPT, si_code indicates TRAP_TRACE, and we need to check
2354 the debug registers separately. */
2355 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2356 && child->stop_reason != TARGET_STOPPED_BY_SW_BREAKPOINT)
2357 check_stopped_by_watchpoint (child);
2358
2359 if (!have_stop_pc)
2360 child->stop_pc = get_pc (child);
2361
2362 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2363 && child->stop_expected)
2364 {
2365 if (debug_threads)
2366 debug_printf ("Expected stop.\n");
2367 child->stop_expected = 0;
2368
2369 if (thread->last_resume_kind == resume_stop)
2370 {
2371 /* We want to report the stop to the core. Treat the
2372 SIGSTOP as a normal event. */
2373 if (debug_threads)
2374 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2375 target_pid_to_str (ptid_of (thread)));
2376 }
2377 else if (stopping_threads != NOT_STOPPING_THREADS)
2378 {
2379 /* Stopping threads. We don't want this SIGSTOP to end up
2380 pending. */
2381 if (debug_threads)
2382 debug_printf ("LLW: SIGSTOP caught for %s "
2383 "while stopping threads.\n",
2384 target_pid_to_str (ptid_of (thread)));
2385 return NULL;
2386 }
2387 else
2388 {
2389 /* This is a delayed SIGSTOP. Filter out the event. */
2390 if (debug_threads)
2391 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2392 child->stepping ? "step" : "continue",
2393 target_pid_to_str (ptid_of (thread)));
2394
2395 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2396 return NULL;
2397 }
2398 }
2399
2400 child->status_pending_p = 1;
2401 child->status_pending = wstat;
2402 return child;
2403 }
2404
2405 /* Resume LWPs that are currently stopped without any pending status
2406 to report, but are resumed from the core's perspective. */
2407
2408 static void
2409 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2410 {
2411 struct thread_info *thread = (struct thread_info *) entry;
2412 struct lwp_info *lp = get_thread_lwp (thread);
2413
2414 if (lp->stopped
2415 && !lp->suspended
2416 && !lp->status_pending_p
2417 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2418 {
2419 int step = thread->last_resume_kind == resume_step;
2420
2421 if (debug_threads)
2422 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2423 target_pid_to_str (ptid_of (thread)),
2424 paddress (lp->stop_pc),
2425 step);
2426
2427 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2428 }
2429 }
2430
2431 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2432 match FILTER_PTID (leaving others pending). The PTIDs can be:
2433 minus_one_ptid, to specify any child; a pid PTID, specifying all
2434 lwps of a thread group; or a PTID representing a single lwp. Store
2435 the stop status through the status pointer WSTAT. OPTIONS is
2436 passed to the waitpid call. Return 0 if no event was found and
2437 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2438 was found. Return the PID of the stopped child otherwise. */
2439
2440 static int
2441 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2442 int *wstatp, int options)
2443 {
2444 struct thread_info *event_thread;
2445 struct lwp_info *event_child, *requested_child;
2446 sigset_t block_mask, prev_mask;
2447
2448 retry:
2449 /* N.B. event_thread points to the thread_info struct that contains
2450 event_child. Keep them in sync. */
2451 event_thread = NULL;
2452 event_child = NULL;
2453 requested_child = NULL;
2454
2455 /* Check for a lwp with a pending status. */
2456
2457 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2458 {
2459 event_thread = (struct thread_info *)
2460 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2461 if (event_thread != NULL)
2462 event_child = get_thread_lwp (event_thread);
2463 if (debug_threads && event_thread)
2464 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2465 }
2466 else if (!ptid_equal (filter_ptid, null_ptid))
2467 {
2468 requested_child = find_lwp_pid (filter_ptid);
2469
2470 if (stopping_threads == NOT_STOPPING_THREADS
2471 && requested_child->status_pending_p
2472 && requested_child->collecting_fast_tracepoint)
2473 {
2474 enqueue_one_deferred_signal (requested_child,
2475 &requested_child->status_pending);
2476 requested_child->status_pending_p = 0;
2477 requested_child->status_pending = 0;
2478 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2479 }
2480
2481 if (requested_child->suspended
2482 && requested_child->status_pending_p)
2483 {
2484 internal_error (__FILE__, __LINE__,
2485 "requesting an event out of a"
2486 " suspended child?");
2487 }
2488
2489 if (requested_child->status_pending_p)
2490 {
2491 event_child = requested_child;
2492 event_thread = get_lwp_thread (event_child);
2493 }
2494 }
2495
2496 if (event_child != NULL)
2497 {
2498 if (debug_threads)
2499 debug_printf ("Got an event from pending child %ld (%04x)\n",
2500 lwpid_of (event_thread), event_child->status_pending);
2501 *wstatp = event_child->status_pending;
2502 event_child->status_pending_p = 0;
2503 event_child->status_pending = 0;
2504 current_thread = event_thread;
2505 return lwpid_of (event_thread);
2506 }
2507
2508 /* But if we don't find a pending event, we'll have to wait.
2509
2510 We only enter this loop if no process has a pending wait status.
2511 Thus any action taken in response to a wait status inside this
2512 loop is responding as soon as we detect the status, not after any
2513 pending events. */
2514
2515 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2516 all signals while here. */
2517 sigfillset (&block_mask);
2518 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2519
2520 /* Always pull all events out of the kernel. We'll randomly select
2521 an event LWP out of all that have events, to prevent
2522 starvation. */
2523 while (event_child == NULL)
2524 {
2525 pid_t ret = 0;
2526
2527 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2528 quirks:
2529
2530 - If the thread group leader exits while other threads in the
2531 thread group still exist, waitpid(TGID, ...) hangs. That
2532 waitpid won't return an exit status until the other threads
2533 in the group are reaped.
2534
2535 - When a non-leader thread execs, that thread just vanishes
2536 without reporting an exit (so we'd hang if we waited for it
2537 explicitly in that case). The exec event is reported to
2538 the TGID pid. */
2539 errno = 0;
2540 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2541
2542 if (debug_threads)
2543 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2544 ret, errno ? strerror (errno) : "ERRNO-OK");
2545
2546 if (ret > 0)
2547 {
2548 if (debug_threads)
2549 {
2550 debug_printf ("LLW: waitpid %ld received %s\n",
2551 (long) ret, status_to_str (*wstatp));
2552 }
2553
2554 /* Filter all events. IOW, leave all events pending. We'll
2555 randomly select an event LWP out of all that have events
2556 below. */
2557 linux_low_filter_event (ret, *wstatp);
2558 /* Retry until nothing comes out of waitpid. A single
2559 SIGCHLD can indicate more than one child stopped. */
2560 continue;
2561 }
2562
2563 /* Now that we've pulled all events out of the kernel, resume
2564 LWPs that don't have an interesting event to report. */
2565 if (stopping_threads == NOT_STOPPING_THREADS)
2566 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2567
2568 /* ... and find an LWP with a status to report to the core, if
2569 any. */
2570 event_thread = (struct thread_info *)
2571 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2572 if (event_thread != NULL)
2573 {
2574 event_child = get_thread_lwp (event_thread);
2575 *wstatp = event_child->status_pending;
2576 event_child->status_pending_p = 0;
2577 event_child->status_pending = 0;
2578 break;
2579 }
2580
2581 /* Check for zombie thread group leaders. Those can't be reaped
2582 until all other threads in the thread group are. */
2583 check_zombie_leaders ();
2584
2585 /* If there are no resumed children left in the set of LWPs we
2586 want to wait for, bail. We can't just block in
2587 waitpid/sigsuspend, because lwps might have been left stopped
2588 in trace-stop state, and we'd be stuck forever waiting for
2589 their status to change (which would only happen if we resumed
2590 them). Even if WNOHANG is set, this return code is preferred
2591 over 0 (below), as it is more detailed. */
2592 if ((find_inferior (&all_threads,
2593 not_stopped_callback,
2594 &wait_ptid) == NULL))
2595 {
2596 if (debug_threads)
2597 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2598 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2599 return -1;
2600 }
2601
2602 /* No interesting event to report to the caller. */
2603 if ((options & WNOHANG))
2604 {
2605 if (debug_threads)
2606 debug_printf ("WNOHANG set, no event found\n");
2607
2608 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2609 return 0;
2610 }
2611
2612 /* Block until we get an event reported with SIGCHLD. */
2613 if (debug_threads)
2614 debug_printf ("sigsuspend'ing\n");
2615
2616 sigsuspend (&prev_mask);
2617 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2618 goto retry;
2619 }
2620
2621 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2622
2623 current_thread = event_thread;
2624
2625 return lwpid_of (event_thread);
2626 }
2627
2628 /* Wait for an event from child(ren) PTID. PTIDs can be:
2629 minus_one_ptid, to specify any child; a pid PTID, specifying all
2630 lwps of a thread group; or a PTID representing a single lwp. Store
2631 the stop status through the status pointer WSTAT. OPTIONS is
2632 passed to the waitpid call. Return 0 if no event was found and
2633 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2634 was found. Return the PID of the stopped child otherwise. */
2635
2636 static int
2637 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2638 {
2639 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2640 }
2641
2642 /* Count the LWP's that have had events. */
2643
2644 static int
2645 count_events_callback (struct inferior_list_entry *entry, void *data)
2646 {
2647 struct thread_info *thread = (struct thread_info *) entry;
2648 struct lwp_info *lp = get_thread_lwp (thread);
2649 int *count = (int *) data;
2650
2651 gdb_assert (count != NULL);
2652
2653 /* Count only resumed LWPs that have an event pending. */
2654 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2655 && lp->status_pending_p)
2656 (*count)++;
2657
2658 return 0;
2659 }
2660
2661 /* Select the LWP (if any) that is currently being single-stepped. */
2662
2663 static int
2664 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2665 {
2666 struct thread_info *thread = (struct thread_info *) entry;
2667 struct lwp_info *lp = get_thread_lwp (thread);
2668
2669 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2670 && thread->last_resume_kind == resume_step
2671 && lp->status_pending_p)
2672 return 1;
2673 else
2674 return 0;
2675 }
2676
2677 /* Select the Nth LWP that has had an event. */
2678
2679 static int
2680 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2681 {
2682 struct thread_info *thread = (struct thread_info *) entry;
2683 struct lwp_info *lp = get_thread_lwp (thread);
2684 int *selector = (int *) data;
2685
2686 gdb_assert (selector != NULL);
2687
2688 /* Select only resumed LWPs that have an event pending. */
2689 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2690 && lp->status_pending_p)
2691 if ((*selector)-- == 0)
2692 return 1;
2693
2694 return 0;
2695 }
2696
2697 /* Select one LWP out of those that have events pending. */
2698
2699 static void
2700 select_event_lwp (struct lwp_info **orig_lp)
2701 {
2702 int num_events = 0;
2703 int random_selector;
2704 struct thread_info *event_thread = NULL;
2705
2706 /* In all-stop, give preference to the LWP that is being
2707 single-stepped. There will be at most one, and it's the LWP that
2708 the core is most interested in. If we didn't do this, then we'd
2709 have to handle pending step SIGTRAPs somehow in case the core
2710 later continues the previously-stepped thread, otherwise we'd
2711 report the pending SIGTRAP, and the core, not having stepped the
2712 thread, wouldn't understand what the trap was for, and therefore
2713 would report it to the user as a random signal. */
2714 if (!non_stop)
2715 {
2716 event_thread
2717 = (struct thread_info *) find_inferior (&all_threads,
2718 select_singlestep_lwp_callback,
2719 NULL);
2720 if (event_thread != NULL)
2721 {
2722 if (debug_threads)
2723 debug_printf ("SEL: Select single-step %s\n",
2724 target_pid_to_str (ptid_of (event_thread)));
2725 }
2726 }
2727 if (event_thread == NULL)
2728 {
2729 /* No single-stepping LWP. Select one at random, out of those
2730 which have had events. */
2731
2732 /* First see how many events we have. */
2733 find_inferior (&all_threads, count_events_callback, &num_events);
2734 gdb_assert (num_events > 0);
2735
2736 /* Now randomly pick a LWP out of those that have had
2737 events. */
2738 random_selector = (int)
2739 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2740
2741 if (debug_threads && num_events > 1)
2742 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2743 num_events, random_selector);
2744
2745 event_thread
2746 = (struct thread_info *) find_inferior (&all_threads,
2747 select_event_lwp_callback,
2748 &random_selector);
2749 }
2750
2751 if (event_thread != NULL)
2752 {
2753 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2754
2755 /* Switch the event LWP. */
2756 *orig_lp = event_lp;
2757 }
2758 }
2759
2760 /* Decrement the suspend count of an LWP. */
2761
2762 static int
2763 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2764 {
2765 struct thread_info *thread = (struct thread_info *) entry;
2766 struct lwp_info *lwp = get_thread_lwp (thread);
2767
2768 /* Ignore EXCEPT. */
2769 if (lwp == except)
2770 return 0;
2771
2772 lwp_suspended_decr (lwp);
2773 return 0;
2774 }
2775
2776 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2777 NULL. */
2778
2779 static void
2780 unsuspend_all_lwps (struct lwp_info *except)
2781 {
2782 find_inferior (&all_threads, unsuspend_one_lwp, except);
2783 }
2784
2785 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2786 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2787 void *data);
2788 static int lwp_running (struct inferior_list_entry *entry, void *data);
2789 static ptid_t linux_wait_1 (ptid_t ptid,
2790 struct target_waitstatus *ourstatus,
2791 int target_options);
2792
2793 /* Stabilize threads (move out of jump pads).
2794
2795 If a thread is midway collecting a fast tracepoint, we need to
2796 finish the collection and move it out of the jump pad before
2797 reporting the signal.
2798
2799 This avoids recursion while collecting (when a signal arrives
2800 midway, and the signal handler itself collects), which would trash
2801 the trace buffer. In case the user set a breakpoint in a signal
2802 handler, this avoids the backtrace showing the jump pad, etc..
2803 Most importantly, there are certain things we can't do safely if
2804 threads are stopped in a jump pad (or in its callee's). For
2805 example:
2806
2807 - starting a new trace run. A thread still collecting the
2808 previous run, could trash the trace buffer when resumed. The trace
2809 buffer control structures would have been reset but the thread had
2810 no way to tell. The thread could even midway memcpy'ing to the
2811 buffer, which would mean that when resumed, it would clobber the
2812 trace buffer that had been set for a new run.
2813
2814 - we can't rewrite/reuse the jump pads for new tracepoints
2815 safely. Say you do tstart while a thread is stopped midway while
2816 collecting. When the thread is later resumed, it finishes the
2817 collection, and returns to the jump pad, to execute the original
2818 instruction that was under the tracepoint jump at the time the
2819 older run had been started. If the jump pad had been rewritten
2820 since for something else in the new run, the thread would now
2821 execute the wrong / random instructions. */
2822
2823 static void
2824 linux_stabilize_threads (void)
2825 {
2826 struct thread_info *saved_thread;
2827 struct thread_info *thread_stuck;
2828
2829 thread_stuck
2830 = (struct thread_info *) find_inferior (&all_threads,
2831 stuck_in_jump_pad_callback,
2832 NULL);
2833 if (thread_stuck != NULL)
2834 {
2835 if (debug_threads)
2836 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2837 lwpid_of (thread_stuck));
2838 return;
2839 }
2840
2841 saved_thread = current_thread;
2842
2843 stabilizing_threads = 1;
2844
2845 /* Kick 'em all. */
2846 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2847
2848 /* Loop until all are stopped out of the jump pads. */
2849 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2850 {
2851 struct target_waitstatus ourstatus;
2852 struct lwp_info *lwp;
2853 int wstat;
2854
2855 /* Note that we go through the full wait even loop. While
2856 moving threads out of jump pad, we need to be able to step
2857 over internal breakpoints and such. */
2858 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2859
2860 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2861 {
2862 lwp = get_thread_lwp (current_thread);
2863
2864 /* Lock it. */
2865 lwp_suspended_inc (lwp);
2866
2867 if (ourstatus.value.sig != GDB_SIGNAL_0
2868 || current_thread->last_resume_kind == resume_stop)
2869 {
2870 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2871 enqueue_one_deferred_signal (lwp, &wstat);
2872 }
2873 }
2874 }
2875
2876 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2877
2878 stabilizing_threads = 0;
2879
2880 current_thread = saved_thread;
2881
2882 if (debug_threads)
2883 {
2884 thread_stuck
2885 = (struct thread_info *) find_inferior (&all_threads,
2886 stuck_in_jump_pad_callback,
2887 NULL);
2888 if (thread_stuck != NULL)
2889 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2890 lwpid_of (thread_stuck));
2891 }
2892 }
2893
2894 /* Convenience function that is called when the kernel reports an
2895 event that is not passed out to GDB. */
2896
2897 static ptid_t
2898 ignore_event (struct target_waitstatus *ourstatus)
2899 {
2900 /* If we got an event, there may still be others, as a single
2901 SIGCHLD can indicate more than one child stopped. This forces
2902 another target_wait call. */
2903 async_file_mark ();
2904
2905 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2906 return null_ptid;
2907 }
2908
2909 /* Convenience function that is called when the kernel reports an exit
2910 event. This decides whether to report the event to GDB as a
2911 process exit event, a thread exit event, or to suppress the
2912 event. */
2913
2914 static ptid_t
2915 filter_exit_event (struct lwp_info *event_child,
2916 struct target_waitstatus *ourstatus)
2917 {
2918 struct thread_info *thread = get_lwp_thread (event_child);
2919 ptid_t ptid = ptid_of (thread);
2920
2921 if (!last_thread_of_process_p (pid_of (thread)))
2922 {
2923 if (report_thread_events)
2924 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2925 else
2926 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2927
2928 delete_lwp (event_child);
2929 }
2930 return ptid;
2931 }
2932
2933 /* Wait for process, returns status. */
2934
2935 static ptid_t
2936 linux_wait_1 (ptid_t ptid,
2937 struct target_waitstatus *ourstatus, int target_options)
2938 {
2939 int w;
2940 struct lwp_info *event_child;
2941 int options;
2942 int pid;
2943 int step_over_finished;
2944 int bp_explains_trap;
2945 int maybe_internal_trap;
2946 int report_to_gdb;
2947 int trace_event;
2948 int in_step_range;
2949
2950 if (debug_threads)
2951 {
2952 debug_enter ();
2953 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2954 }
2955
2956 /* Translate generic target options into linux options. */
2957 options = __WALL;
2958 if (target_options & TARGET_WNOHANG)
2959 options |= WNOHANG;
2960
2961 bp_explains_trap = 0;
2962 trace_event = 0;
2963 in_step_range = 0;
2964 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2965
2966 if (ptid_equal (step_over_bkpt, null_ptid))
2967 pid = linux_wait_for_event (ptid, &w, options);
2968 else
2969 {
2970 if (debug_threads)
2971 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2972 target_pid_to_str (step_over_bkpt));
2973 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2974 }
2975
2976 if (pid == 0)
2977 {
2978 gdb_assert (target_options & TARGET_WNOHANG);
2979
2980 if (debug_threads)
2981 {
2982 debug_printf ("linux_wait_1 ret = null_ptid, "
2983 "TARGET_WAITKIND_IGNORE\n");
2984 debug_exit ();
2985 }
2986
2987 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2988 return null_ptid;
2989 }
2990 else if (pid == -1)
2991 {
2992 if (debug_threads)
2993 {
2994 debug_printf ("linux_wait_1 ret = null_ptid, "
2995 "TARGET_WAITKIND_NO_RESUMED\n");
2996 debug_exit ();
2997 }
2998
2999 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3000 return null_ptid;
3001 }
3002
3003 event_child = get_thread_lwp (current_thread);
3004
3005 /* linux_wait_for_event only returns an exit status for the last
3006 child of a process. Report it. */
3007 if (WIFEXITED (w) || WIFSIGNALED (w))
3008 {
3009 if (WIFEXITED (w))
3010 {
3011 ourstatus->kind = TARGET_WAITKIND_EXITED;
3012 ourstatus->value.integer = WEXITSTATUS (w);
3013
3014 if (debug_threads)
3015 {
3016 debug_printf ("linux_wait_1 ret = %s, exited with "
3017 "retcode %d\n",
3018 target_pid_to_str (ptid_of (current_thread)),
3019 WEXITSTATUS (w));
3020 debug_exit ();
3021 }
3022 }
3023 else
3024 {
3025 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3026 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3027
3028 if (debug_threads)
3029 {
3030 debug_printf ("linux_wait_1 ret = %s, terminated with "
3031 "signal %d\n",
3032 target_pid_to_str (ptid_of (current_thread)),
3033 WTERMSIG (w));
3034 debug_exit ();
3035 }
3036 }
3037
3038 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3039 return filter_exit_event (event_child, ourstatus);
3040
3041 return ptid_of (current_thread);
3042 }
3043
3044 /* If step-over executes a breakpoint instruction, it means a
3045 gdb/gdbserver breakpoint had been planted on top of a permanent
3046 breakpoint. The PC has been adjusted by
3047 check_stopped_by_breakpoint to point at the breakpoint address.
3048 Advance the PC manually past the breakpoint, otherwise the
3049 program would keep trapping the permanent breakpoint forever. */
3050 if (!ptid_equal (step_over_bkpt, null_ptid)
3051 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
3052 {
3053 int increment_pc = 0;
3054 int breakpoint_kind = 0;
3055 CORE_ADDR stop_pc = event_child->stop_pc;
3056
3057 breakpoint_kind = the_target->breakpoint_kind_from_pc (&stop_pc);
3058 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3059
3060 if (debug_threads)
3061 {
3062 debug_printf ("step-over for %s executed software breakpoint\n",
3063 target_pid_to_str (ptid_of (current_thread)));
3064 }
3065
3066 if (increment_pc != 0)
3067 {
3068 struct regcache *regcache
3069 = get_thread_regcache (current_thread, 1);
3070
3071 event_child->stop_pc += increment_pc;
3072 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3073
3074 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3075 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3076 }
3077 }
3078
3079 /* If this event was not handled before, and is not a SIGTRAP, we
3080 report it. SIGILL and SIGSEGV are also treated as traps in case
3081 a breakpoint is inserted at the current PC. If this target does
3082 not support internal breakpoints at all, we also report the
3083 SIGTRAP without further processing; it's of no concern to us. */
3084 maybe_internal_trap
3085 = (supports_breakpoints ()
3086 && (WSTOPSIG (w) == SIGTRAP
3087 || ((WSTOPSIG (w) == SIGILL
3088 || WSTOPSIG (w) == SIGSEGV)
3089 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3090
3091 if (maybe_internal_trap)
3092 {
3093 /* Handle anything that requires bookkeeping before deciding to
3094 report the event or continue waiting. */
3095
3096 /* First check if we can explain the SIGTRAP with an internal
3097 breakpoint, or if we should possibly report the event to GDB.
3098 Do this before anything that may remove or insert a
3099 breakpoint. */
3100 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3101
3102 /* We have a SIGTRAP, possibly a step-over dance has just
3103 finished. If so, tweak the state machine accordingly,
3104 reinsert breakpoints and delete any reinsert (software
3105 single-step) breakpoints. */
3106 step_over_finished = finish_step_over (event_child);
3107
3108 /* Now invoke the callbacks of any internal breakpoints there. */
3109 check_breakpoints (event_child->stop_pc);
3110
3111 /* Handle tracepoint data collecting. This may overflow the
3112 trace buffer, and cause a tracing stop, removing
3113 breakpoints. */
3114 trace_event = handle_tracepoints (event_child);
3115
3116 if (bp_explains_trap)
3117 {
3118 /* If we stepped or ran into an internal breakpoint, we've
3119 already handled it. So next time we resume (from this
3120 PC), we should step over it. */
3121 if (debug_threads)
3122 debug_printf ("Hit a gdbserver breakpoint.\n");
3123
3124 if (breakpoint_here (event_child->stop_pc))
3125 event_child->need_step_over = 1;
3126 }
3127 }
3128 else
3129 {
3130 /* We have some other signal, possibly a step-over dance was in
3131 progress, and it should be cancelled too. */
3132 step_over_finished = finish_step_over (event_child);
3133 }
3134
3135 /* We have all the data we need. Either report the event to GDB, or
3136 resume threads and keep waiting for more. */
3137
3138 /* If we're collecting a fast tracepoint, finish the collection and
3139 move out of the jump pad before delivering a signal. See
3140 linux_stabilize_threads. */
3141
3142 if (WIFSTOPPED (w)
3143 && WSTOPSIG (w) != SIGTRAP
3144 && supports_fast_tracepoints ()
3145 && agent_loaded_p ())
3146 {
3147 if (debug_threads)
3148 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3149 "to defer or adjust it.\n",
3150 WSTOPSIG (w), lwpid_of (current_thread));
3151
3152 /* Allow debugging the jump pad itself. */
3153 if (current_thread->last_resume_kind != resume_step
3154 && maybe_move_out_of_jump_pad (event_child, &w))
3155 {
3156 enqueue_one_deferred_signal (event_child, &w);
3157
3158 if (debug_threads)
3159 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3160 WSTOPSIG (w), lwpid_of (current_thread));
3161
3162 linux_resume_one_lwp (event_child, 0, 0, NULL);
3163
3164 return ignore_event (ourstatus);
3165 }
3166 }
3167
3168 if (event_child->collecting_fast_tracepoint)
3169 {
3170 if (debug_threads)
3171 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3172 "Check if we're already there.\n",
3173 lwpid_of (current_thread),
3174 event_child->collecting_fast_tracepoint);
3175
3176 trace_event = 1;
3177
3178 event_child->collecting_fast_tracepoint
3179 = linux_fast_tracepoint_collecting (event_child, NULL);
3180
3181 if (event_child->collecting_fast_tracepoint != 1)
3182 {
3183 /* No longer need this breakpoint. */
3184 if (event_child->exit_jump_pad_bkpt != NULL)
3185 {
3186 if (debug_threads)
3187 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3188 "stopping all threads momentarily.\n");
3189
3190 /* Other running threads could hit this breakpoint.
3191 We don't handle moribund locations like GDB does,
3192 instead we always pause all threads when removing
3193 breakpoints, so that any step-over or
3194 decr_pc_after_break adjustment is always taken
3195 care of while the breakpoint is still
3196 inserted. */
3197 stop_all_lwps (1, event_child);
3198
3199 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3200 event_child->exit_jump_pad_bkpt = NULL;
3201
3202 unstop_all_lwps (1, event_child);
3203
3204 gdb_assert (event_child->suspended >= 0);
3205 }
3206 }
3207
3208 if (event_child->collecting_fast_tracepoint == 0)
3209 {
3210 if (debug_threads)
3211 debug_printf ("fast tracepoint finished "
3212 "collecting successfully.\n");
3213
3214 /* We may have a deferred signal to report. */
3215 if (dequeue_one_deferred_signal (event_child, &w))
3216 {
3217 if (debug_threads)
3218 debug_printf ("dequeued one signal.\n");
3219 }
3220 else
3221 {
3222 if (debug_threads)
3223 debug_printf ("no deferred signals.\n");
3224
3225 if (stabilizing_threads)
3226 {
3227 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3228 ourstatus->value.sig = GDB_SIGNAL_0;
3229
3230 if (debug_threads)
3231 {
3232 debug_printf ("linux_wait_1 ret = %s, stopped "
3233 "while stabilizing threads\n",
3234 target_pid_to_str (ptid_of (current_thread)));
3235 debug_exit ();
3236 }
3237
3238 return ptid_of (current_thread);
3239 }
3240 }
3241 }
3242 }
3243
3244 /* Check whether GDB would be interested in this event. */
3245
3246 /* If GDB is not interested in this signal, don't stop other
3247 threads, and don't report it to GDB. Just resume the inferior
3248 right away. We do this for threading-related signals as well as
3249 any that GDB specifically requested we ignore. But never ignore
3250 SIGSTOP if we sent it ourselves, and do not ignore signals when
3251 stepping - they may require special handling to skip the signal
3252 handler. Also never ignore signals that could be caused by a
3253 breakpoint. */
3254 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3255 thread library? */
3256 if (WIFSTOPPED (w)
3257 && current_thread->last_resume_kind != resume_step
3258 && (
3259 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3260 (current_process ()->priv->thread_db != NULL
3261 && (WSTOPSIG (w) == __SIGRTMIN
3262 || WSTOPSIG (w) == __SIGRTMIN + 1))
3263 ||
3264 #endif
3265 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3266 && !(WSTOPSIG (w) == SIGSTOP
3267 && current_thread->last_resume_kind == resume_stop)
3268 && !linux_wstatus_maybe_breakpoint (w))))
3269 {
3270 siginfo_t info, *info_p;
3271
3272 if (debug_threads)
3273 debug_printf ("Ignored signal %d for LWP %ld.\n",
3274 WSTOPSIG (w), lwpid_of (current_thread));
3275
3276 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3277 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3278 info_p = &info;
3279 else
3280 info_p = NULL;
3281
3282 if (step_over_finished)
3283 {
3284 /* We cancelled this thread's step-over above. We still
3285 need to unsuspend all other LWPs, and set them back
3286 running again while the signal handler runs. */
3287 unsuspend_all_lwps (event_child);
3288
3289 /* Enqueue the pending signal info so that proceed_all_lwps
3290 doesn't lose it. */
3291 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3292
3293 proceed_all_lwps ();
3294 }
3295 else
3296 {
3297 linux_resume_one_lwp (event_child, event_child->stepping,
3298 WSTOPSIG (w), info_p);
3299 }
3300 return ignore_event (ourstatus);
3301 }
3302
3303 /* Note that all addresses are always "out of the step range" when
3304 there's no range to begin with. */
3305 in_step_range = lwp_in_step_range (event_child);
3306
3307 /* If GDB wanted this thread to single step, and the thread is out
3308 of the step range, we always want to report the SIGTRAP, and let
3309 GDB handle it. Watchpoints should always be reported. So should
3310 signals we can't explain. A SIGTRAP we can't explain could be a
3311 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3312 do, we're be able to handle GDB breakpoints on top of internal
3313 breakpoints, by handling the internal breakpoint and still
3314 reporting the event to GDB. If we don't, we're out of luck, GDB
3315 won't see the breakpoint hit. If we see a single-step event but
3316 the thread should be continuing, don't pass the trap to gdb.
3317 That indicates that we had previously finished a single-step but
3318 left the single-step pending -- see
3319 complete_ongoing_step_over. */
3320 report_to_gdb = (!maybe_internal_trap
3321 || (current_thread->last_resume_kind == resume_step
3322 && !in_step_range)
3323 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3324 || (!in_step_range
3325 && !bp_explains_trap
3326 && !trace_event
3327 && !step_over_finished
3328 && !(current_thread->last_resume_kind == resume_continue
3329 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3330 || (gdb_breakpoint_here (event_child->stop_pc)
3331 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3332 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3333 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3334
3335 run_breakpoint_commands (event_child->stop_pc);
3336
3337 /* We found no reason GDB would want us to stop. We either hit one
3338 of our own breakpoints, or finished an internal step GDB
3339 shouldn't know about. */
3340 if (!report_to_gdb)
3341 {
3342 if (debug_threads)
3343 {
3344 if (bp_explains_trap)
3345 debug_printf ("Hit a gdbserver breakpoint.\n");
3346 if (step_over_finished)
3347 debug_printf ("Step-over finished.\n");
3348 if (trace_event)
3349 debug_printf ("Tracepoint event.\n");
3350 if (lwp_in_step_range (event_child))
3351 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3352 paddress (event_child->stop_pc),
3353 paddress (event_child->step_range_start),
3354 paddress (event_child->step_range_end));
3355 }
3356
3357 /* We're not reporting this breakpoint to GDB, so apply the
3358 decr_pc_after_break adjustment to the inferior's regcache
3359 ourselves. */
3360
3361 if (the_low_target.set_pc != NULL)
3362 {
3363 struct regcache *regcache
3364 = get_thread_regcache (current_thread, 1);
3365 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3366 }
3367
3368 /* We may have finished stepping over a breakpoint. If so,
3369 we've stopped and suspended all LWPs momentarily except the
3370 stepping one. This is where we resume them all again. We're
3371 going to keep waiting, so use proceed, which handles stepping
3372 over the next breakpoint. */
3373 if (debug_threads)
3374 debug_printf ("proceeding all threads.\n");
3375
3376 if (step_over_finished)
3377 unsuspend_all_lwps (event_child);
3378
3379 proceed_all_lwps ();
3380 return ignore_event (ourstatus);
3381 }
3382
3383 if (debug_threads)
3384 {
3385 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3386 {
3387 char *str;
3388
3389 str = target_waitstatus_to_string (&event_child->waitstatus);
3390 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3391 lwpid_of (get_lwp_thread (event_child)), str);
3392 xfree (str);
3393 }
3394 if (current_thread->last_resume_kind == resume_step)
3395 {
3396 if (event_child->step_range_start == event_child->step_range_end)
3397 debug_printf ("GDB wanted to single-step, reporting event.\n");
3398 else if (!lwp_in_step_range (event_child))
3399 debug_printf ("Out of step range, reporting event.\n");
3400 }
3401 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3402 debug_printf ("Stopped by watchpoint.\n");
3403 else if (gdb_breakpoint_here (event_child->stop_pc))
3404 debug_printf ("Stopped by GDB breakpoint.\n");
3405 if (debug_threads)
3406 debug_printf ("Hit a non-gdbserver trap event.\n");
3407 }
3408
3409 /* Alright, we're going to report a stop. */
3410
3411 if (!stabilizing_threads)
3412 {
3413 /* In all-stop, stop all threads. */
3414 if (!non_stop)
3415 stop_all_lwps (0, NULL);
3416
3417 /* If we're not waiting for a specific LWP, choose an event LWP
3418 from among those that have had events. Giving equal priority
3419 to all LWPs that have had events helps prevent
3420 starvation. */
3421 if (ptid_equal (ptid, minus_one_ptid))
3422 {
3423 event_child->status_pending_p = 1;
3424 event_child->status_pending = w;
3425
3426 select_event_lwp (&event_child);
3427
3428 /* current_thread and event_child must stay in sync. */
3429 current_thread = get_lwp_thread (event_child);
3430
3431 event_child->status_pending_p = 0;
3432 w = event_child->status_pending;
3433 }
3434
3435 if (step_over_finished)
3436 {
3437 if (!non_stop)
3438 {
3439 /* If we were doing a step-over, all other threads but
3440 the stepping one had been paused in start_step_over,
3441 with their suspend counts incremented. We don't want
3442 to do a full unstop/unpause, because we're in
3443 all-stop mode (so we want threads stopped), but we
3444 still need to unsuspend the other threads, to
3445 decrement their `suspended' count back. */
3446 unsuspend_all_lwps (event_child);
3447 }
3448 else
3449 {
3450 /* If we just finished a step-over, then all threads had
3451 been momentarily paused. In all-stop, that's fine,
3452 we want threads stopped by now anyway. In non-stop,
3453 we need to re-resume threads that GDB wanted to be
3454 running. */
3455 unstop_all_lwps (1, event_child);
3456 }
3457 }
3458
3459 /* Stabilize threads (move out of jump pads). */
3460 if (!non_stop)
3461 stabilize_threads ();
3462 }
3463 else
3464 {
3465 /* If we just finished a step-over, then all threads had been
3466 momentarily paused. In all-stop, that's fine, we want
3467 threads stopped by now anyway. In non-stop, we need to
3468 re-resume threads that GDB wanted to be running. */
3469 if (step_over_finished)
3470 unstop_all_lwps (1, event_child);
3471 }
3472
3473 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3474 {
3475 /* If the reported event is an exit, fork, vfork or exec, let
3476 GDB know. */
3477 *ourstatus = event_child->waitstatus;
3478 /* Clear the event lwp's waitstatus since we handled it already. */
3479 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3480 }
3481 else
3482 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3483
3484 /* Now that we've selected our final event LWP, un-adjust its PC if
3485 it was a software breakpoint, and the client doesn't know we can
3486 adjust the breakpoint ourselves. */
3487 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3488 && !swbreak_feature)
3489 {
3490 int decr_pc = the_low_target.decr_pc_after_break;
3491
3492 if (decr_pc != 0)
3493 {
3494 struct regcache *regcache
3495 = get_thread_regcache (current_thread, 1);
3496 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3497 }
3498 }
3499
3500 if (current_thread->last_resume_kind == resume_stop
3501 && WSTOPSIG (w) == SIGSTOP)
3502 {
3503 /* A thread that has been requested to stop by GDB with vCont;t,
3504 and it stopped cleanly, so report as SIG0. The use of
3505 SIGSTOP is an implementation detail. */
3506 ourstatus->value.sig = GDB_SIGNAL_0;
3507 }
3508 else if (current_thread->last_resume_kind == resume_stop
3509 && WSTOPSIG (w) != SIGSTOP)
3510 {
3511 /* A thread that has been requested to stop by GDB with vCont;t,
3512 but, it stopped for other reasons. */
3513 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3514 }
3515 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3516 {
3517 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3518 }
3519
3520 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3521
3522 if (debug_threads)
3523 {
3524 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3525 target_pid_to_str (ptid_of (current_thread)),
3526 ourstatus->kind, ourstatus->value.sig);
3527 debug_exit ();
3528 }
3529
3530 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3531 return filter_exit_event (event_child, ourstatus);
3532
3533 return ptid_of (current_thread);
3534 }
3535
3536 /* Get rid of any pending event in the pipe. */
3537 static void
3538 async_file_flush (void)
3539 {
3540 int ret;
3541 char buf;
3542
3543 do
3544 ret = read (linux_event_pipe[0], &buf, 1);
3545 while (ret >= 0 || (ret == -1 && errno == EINTR));
3546 }
3547
3548 /* Put something in the pipe, so the event loop wakes up. */
3549 static void
3550 async_file_mark (void)
3551 {
3552 int ret;
3553
3554 async_file_flush ();
3555
3556 do
3557 ret = write (linux_event_pipe[1], "+", 1);
3558 while (ret == 0 || (ret == -1 && errno == EINTR));
3559
3560 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3561 be awakened anyway. */
3562 }
3563
3564 static ptid_t
3565 linux_wait (ptid_t ptid,
3566 struct target_waitstatus *ourstatus, int target_options)
3567 {
3568 ptid_t event_ptid;
3569
3570 /* Flush the async file first. */
3571 if (target_is_async_p ())
3572 async_file_flush ();
3573
3574 do
3575 {
3576 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3577 }
3578 while ((target_options & TARGET_WNOHANG) == 0
3579 && ptid_equal (event_ptid, null_ptid)
3580 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3581
3582 /* If at least one stop was reported, there may be more. A single
3583 SIGCHLD can signal more than one child stop. */
3584 if (target_is_async_p ()
3585 && (target_options & TARGET_WNOHANG) != 0
3586 && !ptid_equal (event_ptid, null_ptid))
3587 async_file_mark ();
3588
3589 return event_ptid;
3590 }
3591
3592 /* Send a signal to an LWP. */
3593
3594 static int
3595 kill_lwp (unsigned long lwpid, int signo)
3596 {
3597 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3598 fails, then we are not using nptl threads and we should be using kill. */
3599
3600 #ifdef __NR_tkill
3601 {
3602 static int tkill_failed;
3603
3604 if (!tkill_failed)
3605 {
3606 int ret;
3607
3608 errno = 0;
3609 ret = syscall (__NR_tkill, lwpid, signo);
3610 if (errno != ENOSYS)
3611 return ret;
3612 tkill_failed = 1;
3613 }
3614 }
3615 #endif
3616
3617 return kill (lwpid, signo);
3618 }
3619
3620 void
3621 linux_stop_lwp (struct lwp_info *lwp)
3622 {
3623 send_sigstop (lwp);
3624 }
3625
3626 static void
3627 send_sigstop (struct lwp_info *lwp)
3628 {
3629 int pid;
3630
3631 pid = lwpid_of (get_lwp_thread (lwp));
3632
3633 /* If we already have a pending stop signal for this process, don't
3634 send another. */
3635 if (lwp->stop_expected)
3636 {
3637 if (debug_threads)
3638 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3639
3640 return;
3641 }
3642
3643 if (debug_threads)
3644 debug_printf ("Sending sigstop to lwp %d\n", pid);
3645
3646 lwp->stop_expected = 1;
3647 kill_lwp (pid, SIGSTOP);
3648 }
3649
3650 static int
3651 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3652 {
3653 struct thread_info *thread = (struct thread_info *) entry;
3654 struct lwp_info *lwp = get_thread_lwp (thread);
3655
3656 /* Ignore EXCEPT. */
3657 if (lwp == except)
3658 return 0;
3659
3660 if (lwp->stopped)
3661 return 0;
3662
3663 send_sigstop (lwp);
3664 return 0;
3665 }
3666
3667 /* Increment the suspend count of an LWP, and stop it, if not stopped
3668 yet. */
3669 static int
3670 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3671 void *except)
3672 {
3673 struct thread_info *thread = (struct thread_info *) entry;
3674 struct lwp_info *lwp = get_thread_lwp (thread);
3675
3676 /* Ignore EXCEPT. */
3677 if (lwp == except)
3678 return 0;
3679
3680 lwp_suspended_inc (lwp);
3681
3682 return send_sigstop_callback (entry, except);
3683 }
3684
3685 static void
3686 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3687 {
3688 /* Store the exit status for later. */
3689 lwp->status_pending_p = 1;
3690 lwp->status_pending = wstat;
3691
3692 /* Store in waitstatus as well, as there's nothing else to process
3693 for this event. */
3694 if (WIFEXITED (wstat))
3695 {
3696 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3697 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3698 }
3699 else if (WIFSIGNALED (wstat))
3700 {
3701 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3702 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3703 }
3704
3705 /* Prevent trying to stop it. */
3706 lwp->stopped = 1;
3707
3708 /* No further stops are expected from a dead lwp. */
3709 lwp->stop_expected = 0;
3710 }
3711
3712 /* Return true if LWP has exited already, and has a pending exit event
3713 to report to GDB. */
3714
3715 static int
3716 lwp_is_marked_dead (struct lwp_info *lwp)
3717 {
3718 return (lwp->status_pending_p
3719 && (WIFEXITED (lwp->status_pending)
3720 || WIFSIGNALED (lwp->status_pending)));
3721 }
3722
3723 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3724
3725 static void
3726 wait_for_sigstop (void)
3727 {
3728 struct thread_info *saved_thread;
3729 ptid_t saved_tid;
3730 int wstat;
3731 int ret;
3732
3733 saved_thread = current_thread;
3734 if (saved_thread != NULL)
3735 saved_tid = saved_thread->entry.id;
3736 else
3737 saved_tid = null_ptid; /* avoid bogus unused warning */
3738
3739 if (debug_threads)
3740 debug_printf ("wait_for_sigstop: pulling events\n");
3741
3742 /* Passing NULL_PTID as filter indicates we want all events to be
3743 left pending. Eventually this returns when there are no
3744 unwaited-for children left. */
3745 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3746 &wstat, __WALL);
3747 gdb_assert (ret == -1);
3748
3749 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3750 current_thread = saved_thread;
3751 else
3752 {
3753 if (debug_threads)
3754 debug_printf ("Previously current thread died.\n");
3755
3756 /* We can't change the current inferior behind GDB's back,
3757 otherwise, a subsequent command may apply to the wrong
3758 process. */
3759 current_thread = NULL;
3760 }
3761 }
3762
3763 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3764 move it out, because we need to report the stop event to GDB. For
3765 example, if the user puts a breakpoint in the jump pad, it's
3766 because she wants to debug it. */
3767
3768 static int
3769 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3770 {
3771 struct thread_info *thread = (struct thread_info *) entry;
3772 struct lwp_info *lwp = get_thread_lwp (thread);
3773
3774 if (lwp->suspended != 0)
3775 {
3776 internal_error (__FILE__, __LINE__,
3777 "LWP %ld is suspended, suspended=%d\n",
3778 lwpid_of (thread), lwp->suspended);
3779 }
3780 gdb_assert (lwp->stopped);
3781
3782 /* Allow debugging the jump pad, gdb_collect, etc.. */
3783 return (supports_fast_tracepoints ()
3784 && agent_loaded_p ()
3785 && (gdb_breakpoint_here (lwp->stop_pc)
3786 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3787 || thread->last_resume_kind == resume_step)
3788 && linux_fast_tracepoint_collecting (lwp, NULL));
3789 }
3790
3791 static void
3792 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3793 {
3794 struct thread_info *thread = (struct thread_info *) entry;
3795 struct thread_info *saved_thread;
3796 struct lwp_info *lwp = get_thread_lwp (thread);
3797 int *wstat;
3798
3799 if (lwp->suspended != 0)
3800 {
3801 internal_error (__FILE__, __LINE__,
3802 "LWP %ld is suspended, suspended=%d\n",
3803 lwpid_of (thread), lwp->suspended);
3804 }
3805 gdb_assert (lwp->stopped);
3806
3807 /* For gdb_breakpoint_here. */
3808 saved_thread = current_thread;
3809 current_thread = thread;
3810
3811 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3812
3813 /* Allow debugging the jump pad, gdb_collect, etc. */
3814 if (!gdb_breakpoint_here (lwp->stop_pc)
3815 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3816 && thread->last_resume_kind != resume_step
3817 && maybe_move_out_of_jump_pad (lwp, wstat))
3818 {
3819 if (debug_threads)
3820 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3821 lwpid_of (thread));
3822
3823 if (wstat)
3824 {
3825 lwp->status_pending_p = 0;
3826 enqueue_one_deferred_signal (lwp, wstat);
3827
3828 if (debug_threads)
3829 debug_printf ("Signal %d for LWP %ld deferred "
3830 "(in jump pad)\n",
3831 WSTOPSIG (*wstat), lwpid_of (thread));
3832 }
3833
3834 linux_resume_one_lwp (lwp, 0, 0, NULL);
3835 }
3836 else
3837 lwp_suspended_inc (lwp);
3838
3839 current_thread = saved_thread;
3840 }
3841
3842 static int
3843 lwp_running (struct inferior_list_entry *entry, void *data)
3844 {
3845 struct thread_info *thread = (struct thread_info *) entry;
3846 struct lwp_info *lwp = get_thread_lwp (thread);
3847
3848 if (lwp_is_marked_dead (lwp))
3849 return 0;
3850 if (lwp->stopped)
3851 return 0;
3852 return 1;
3853 }
3854
3855 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3856 If SUSPEND, then also increase the suspend count of every LWP,
3857 except EXCEPT. */
3858
3859 static void
3860 stop_all_lwps (int suspend, struct lwp_info *except)
3861 {
3862 /* Should not be called recursively. */
3863 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3864
3865 if (debug_threads)
3866 {
3867 debug_enter ();
3868 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3869 suspend ? "stop-and-suspend" : "stop",
3870 except != NULL
3871 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3872 : "none");
3873 }
3874
3875 stopping_threads = (suspend
3876 ? STOPPING_AND_SUSPENDING_THREADS
3877 : STOPPING_THREADS);
3878
3879 if (suspend)
3880 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3881 else
3882 find_inferior (&all_threads, send_sigstop_callback, except);
3883 wait_for_sigstop ();
3884 stopping_threads = NOT_STOPPING_THREADS;
3885
3886 if (debug_threads)
3887 {
3888 debug_printf ("stop_all_lwps done, setting stopping_threads "
3889 "back to !stopping\n");
3890 debug_exit ();
3891 }
3892 }
3893
3894 /* Enqueue one signal in the chain of signals which need to be
3895 delivered to this process on next resume. */
3896
3897 static void
3898 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3899 {
3900 struct pending_signals *p_sig = XNEW (struct pending_signals);
3901
3902 p_sig->prev = lwp->pending_signals;
3903 p_sig->signal = signal;
3904 if (info == NULL)
3905 memset (&p_sig->info, 0, sizeof (siginfo_t));
3906 else
3907 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3908 lwp->pending_signals = p_sig;
3909 }
3910
3911 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3912 SIGNAL is nonzero, give it that signal. */
3913
3914 static void
3915 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3916 int step, int signal, siginfo_t *info)
3917 {
3918 struct thread_info *thread = get_lwp_thread (lwp);
3919 struct thread_info *saved_thread;
3920 int fast_tp_collecting;
3921 struct process_info *proc = get_thread_process (thread);
3922
3923 /* Note that target description may not be initialised
3924 (proc->tdesc == NULL) at this point because the program hasn't
3925 stopped at the first instruction yet. It means GDBserver skips
3926 the extra traps from the wrapper program (see option --wrapper).
3927 Code in this function that requires register access should be
3928 guarded by proc->tdesc == NULL or something else. */
3929
3930 if (lwp->stopped == 0)
3931 return;
3932
3933 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
3934
3935 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3936
3937 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3938
3939 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3940 user used the "jump" command, or "set $pc = foo"). */
3941 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
3942 {
3943 /* Collecting 'while-stepping' actions doesn't make sense
3944 anymore. */
3945 release_while_stepping_state_list (thread);
3946 }
3947
3948 /* If we have pending signals or status, and a new signal, enqueue the
3949 signal. Also enqueue the signal if we are waiting to reinsert a
3950 breakpoint; it will be picked up again below. */
3951 if (signal != 0
3952 && (lwp->status_pending_p
3953 || lwp->pending_signals != NULL
3954 || lwp->bp_reinsert != 0
3955 || fast_tp_collecting))
3956 {
3957 struct pending_signals *p_sig = XNEW (struct pending_signals);
3958
3959 p_sig->prev = lwp->pending_signals;
3960 p_sig->signal = signal;
3961 if (info == NULL)
3962 memset (&p_sig->info, 0, sizeof (siginfo_t));
3963 else
3964 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3965 lwp->pending_signals = p_sig;
3966 }
3967
3968 if (lwp->status_pending_p)
3969 {
3970 if (debug_threads)
3971 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3972 " has pending status\n",
3973 lwpid_of (thread), step ? "step" : "continue", signal,
3974 lwp->stop_expected ? "expected" : "not expected");
3975 return;
3976 }
3977
3978 saved_thread = current_thread;
3979 current_thread = thread;
3980
3981 if (debug_threads)
3982 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3983 lwpid_of (thread), step ? "step" : "continue", signal,
3984 lwp->stop_expected ? "expected" : "not expected");
3985
3986 /* This bit needs some thinking about. If we get a signal that
3987 we must report while a single-step reinsert is still pending,
3988 we often end up resuming the thread. It might be better to
3989 (ew) allow a stack of pending events; then we could be sure that
3990 the reinsert happened right away and not lose any signals.
3991
3992 Making this stack would also shrink the window in which breakpoints are
3993 uninserted (see comment in linux_wait_for_lwp) but not enough for
3994 complete correctness, so it won't solve that problem. It may be
3995 worthwhile just to solve this one, however. */
3996 if (lwp->bp_reinsert != 0)
3997 {
3998 if (debug_threads)
3999 debug_printf (" pending reinsert at 0x%s\n",
4000 paddress (lwp->bp_reinsert));
4001
4002 if (can_hardware_single_step ())
4003 {
4004 if (fast_tp_collecting == 0)
4005 {
4006 if (step == 0)
4007 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4008 if (lwp->suspended)
4009 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4010 lwp->suspended);
4011 }
4012
4013 step = 1;
4014 }
4015
4016 /* Postpone any pending signal. It was enqueued above. */
4017 signal = 0;
4018 }
4019
4020 if (fast_tp_collecting == 1)
4021 {
4022 if (debug_threads)
4023 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4024 " (exit-jump-pad-bkpt)\n",
4025 lwpid_of (thread));
4026
4027 /* Postpone any pending signal. It was enqueued above. */
4028 signal = 0;
4029 }
4030 else if (fast_tp_collecting == 2)
4031 {
4032 if (debug_threads)
4033 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4034 " single-stepping\n",
4035 lwpid_of (thread));
4036
4037 if (can_hardware_single_step ())
4038 step = 1;
4039 else
4040 {
4041 internal_error (__FILE__, __LINE__,
4042 "moving out of jump pad single-stepping"
4043 " not implemented on this target");
4044 }
4045
4046 /* Postpone any pending signal. It was enqueued above. */
4047 signal = 0;
4048 }
4049
4050 /* If we have while-stepping actions in this thread set it stepping.
4051 If we have a signal to deliver, it may or may not be set to
4052 SIG_IGN, we don't know. Assume so, and allow collecting
4053 while-stepping into a signal handler. A possible smart thing to
4054 do would be to set an internal breakpoint at the signal return
4055 address, continue, and carry on catching this while-stepping
4056 action only when that breakpoint is hit. A future
4057 enhancement. */
4058 if (thread->while_stepping != NULL
4059 && can_hardware_single_step ())
4060 {
4061 if (debug_threads)
4062 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4063 lwpid_of (thread));
4064 step = 1;
4065 }
4066
4067 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4068 {
4069 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4070
4071 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4072
4073 if (debug_threads)
4074 {
4075 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4076 (long) lwp->stop_pc);
4077 }
4078 }
4079
4080 /* If we have pending signals, consume one unless we are trying to
4081 reinsert a breakpoint or we're trying to finish a fast tracepoint
4082 collect. */
4083 if (lwp->pending_signals != NULL
4084 && lwp->bp_reinsert == 0
4085 && fast_tp_collecting == 0)
4086 {
4087 struct pending_signals **p_sig;
4088
4089 p_sig = &lwp->pending_signals;
4090 while ((*p_sig)->prev != NULL)
4091 p_sig = &(*p_sig)->prev;
4092
4093 signal = (*p_sig)->signal;
4094 if ((*p_sig)->info.si_signo != 0)
4095 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4096 &(*p_sig)->info);
4097
4098 free (*p_sig);
4099 *p_sig = NULL;
4100 }
4101
4102 if (the_low_target.prepare_to_resume != NULL)
4103 the_low_target.prepare_to_resume (lwp);
4104
4105 regcache_invalidate_thread (thread);
4106 errno = 0;
4107 lwp->stepping = step;
4108 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
4109 (PTRACE_TYPE_ARG3) 0,
4110 /* Coerce to a uintptr_t first to avoid potential gcc warning
4111 of coercing an 8 byte integer to a 4 byte pointer. */
4112 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4113
4114 current_thread = saved_thread;
4115 if (errno)
4116 perror_with_name ("resuming thread");
4117
4118 /* Successfully resumed. Clear state that no longer makes sense,
4119 and mark the LWP as running. Must not do this before resuming
4120 otherwise if that fails other code will be confused. E.g., we'd
4121 later try to stop the LWP and hang forever waiting for a stop
4122 status. Note that we must not throw after this is cleared,
4123 otherwise handle_zombie_lwp_error would get confused. */
4124 lwp->stopped = 0;
4125 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4126 }
4127
4128 /* Called when we try to resume a stopped LWP and that errors out. If
4129 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4130 or about to become), discard the error, clear any pending status
4131 the LWP may have, and return true (we'll collect the exit status
4132 soon enough). Otherwise, return false. */
4133
4134 static int
4135 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4136 {
4137 struct thread_info *thread = get_lwp_thread (lp);
4138
4139 /* If we get an error after resuming the LWP successfully, we'd
4140 confuse !T state for the LWP being gone. */
4141 gdb_assert (lp->stopped);
4142
4143 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4144 because even if ptrace failed with ESRCH, the tracee may be "not
4145 yet fully dead", but already refusing ptrace requests. In that
4146 case the tracee has 'R (Running)' state for a little bit
4147 (observed in Linux 3.18). See also the note on ESRCH in the
4148 ptrace(2) man page. Instead, check whether the LWP has any state
4149 other than ptrace-stopped. */
4150
4151 /* Don't assume anything if /proc/PID/status can't be read. */
4152 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4153 {
4154 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4155 lp->status_pending_p = 0;
4156 return 1;
4157 }
4158 return 0;
4159 }
4160
4161 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4162 disappears while we try to resume it. */
4163
4164 static void
4165 linux_resume_one_lwp (struct lwp_info *lwp,
4166 int step, int signal, siginfo_t *info)
4167 {
4168 TRY
4169 {
4170 linux_resume_one_lwp_throw (lwp, step, signal, info);
4171 }
4172 CATCH (ex, RETURN_MASK_ERROR)
4173 {
4174 if (!check_ptrace_stopped_lwp_gone (lwp))
4175 throw_exception (ex);
4176 }
4177 END_CATCH
4178 }
4179
4180 struct thread_resume_array
4181 {
4182 struct thread_resume *resume;
4183 size_t n;
4184 };
4185
4186 /* This function is called once per thread via find_inferior.
4187 ARG is a pointer to a thread_resume_array struct.
4188 We look up the thread specified by ENTRY in ARG, and mark the thread
4189 with a pointer to the appropriate resume request.
4190
4191 This algorithm is O(threads * resume elements), but resume elements
4192 is small (and will remain small at least until GDB supports thread
4193 suspension). */
4194
4195 static int
4196 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4197 {
4198 struct thread_info *thread = (struct thread_info *) entry;
4199 struct lwp_info *lwp = get_thread_lwp (thread);
4200 int ndx;
4201 struct thread_resume_array *r;
4202
4203 r = (struct thread_resume_array *) arg;
4204
4205 for (ndx = 0; ndx < r->n; ndx++)
4206 {
4207 ptid_t ptid = r->resume[ndx].thread;
4208 if (ptid_equal (ptid, minus_one_ptid)
4209 || ptid_equal (ptid, entry->id)
4210 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4211 of PID'. */
4212 || (ptid_get_pid (ptid) == pid_of (thread)
4213 && (ptid_is_pid (ptid)
4214 || ptid_get_lwp (ptid) == -1)))
4215 {
4216 if (r->resume[ndx].kind == resume_stop
4217 && thread->last_resume_kind == resume_stop)
4218 {
4219 if (debug_threads)
4220 debug_printf ("already %s LWP %ld at GDB's request\n",
4221 (thread->last_status.kind
4222 == TARGET_WAITKIND_STOPPED)
4223 ? "stopped"
4224 : "stopping",
4225 lwpid_of (thread));
4226
4227 continue;
4228 }
4229
4230 lwp->resume = &r->resume[ndx];
4231 thread->last_resume_kind = lwp->resume->kind;
4232
4233 lwp->step_range_start = lwp->resume->step_range_start;
4234 lwp->step_range_end = lwp->resume->step_range_end;
4235
4236 /* If we had a deferred signal to report, dequeue one now.
4237 This can happen if LWP gets more than one signal while
4238 trying to get out of a jump pad. */
4239 if (lwp->stopped
4240 && !lwp->status_pending_p
4241 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4242 {
4243 lwp->status_pending_p = 1;
4244
4245 if (debug_threads)
4246 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4247 "leaving status pending.\n",
4248 WSTOPSIG (lwp->status_pending),
4249 lwpid_of (thread));
4250 }
4251
4252 return 0;
4253 }
4254 }
4255
4256 /* No resume action for this thread. */
4257 lwp->resume = NULL;
4258
4259 return 0;
4260 }
4261
4262 /* find_inferior callback for linux_resume.
4263 Set *FLAG_P if this lwp has an interesting status pending. */
4264
4265 static int
4266 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4267 {
4268 struct thread_info *thread = (struct thread_info *) entry;
4269 struct lwp_info *lwp = get_thread_lwp (thread);
4270
4271 /* LWPs which will not be resumed are not interesting, because
4272 we might not wait for them next time through linux_wait. */
4273 if (lwp->resume == NULL)
4274 return 0;
4275
4276 if (thread_still_has_status_pending_p (thread))
4277 * (int *) flag_p = 1;
4278
4279 return 0;
4280 }
4281
4282 /* Return 1 if this lwp that GDB wants running is stopped at an
4283 internal breakpoint that we need to step over. It assumes that any
4284 required STOP_PC adjustment has already been propagated to the
4285 inferior's regcache. */
4286
4287 static int
4288 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4289 {
4290 struct thread_info *thread = (struct thread_info *) entry;
4291 struct lwp_info *lwp = get_thread_lwp (thread);
4292 struct thread_info *saved_thread;
4293 CORE_ADDR pc;
4294 struct process_info *proc = get_thread_process (thread);
4295
4296 /* GDBserver is skipping the extra traps from the wrapper program,
4297 don't have to do step over. */
4298 if (proc->tdesc == NULL)
4299 return 0;
4300
4301 /* LWPs which will not be resumed are not interesting, because we
4302 might not wait for them next time through linux_wait. */
4303
4304 if (!lwp->stopped)
4305 {
4306 if (debug_threads)
4307 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4308 lwpid_of (thread));
4309 return 0;
4310 }
4311
4312 if (thread->last_resume_kind == resume_stop)
4313 {
4314 if (debug_threads)
4315 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4316 " stopped\n",
4317 lwpid_of (thread));
4318 return 0;
4319 }
4320
4321 gdb_assert (lwp->suspended >= 0);
4322
4323 if (lwp->suspended)
4324 {
4325 if (debug_threads)
4326 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4327 lwpid_of (thread));
4328 return 0;
4329 }
4330
4331 if (!lwp->need_step_over)
4332 {
4333 if (debug_threads)
4334 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4335 }
4336
4337 if (lwp->status_pending_p)
4338 {
4339 if (debug_threads)
4340 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4341 " status.\n",
4342 lwpid_of (thread));
4343 return 0;
4344 }
4345
4346 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4347 or we have. */
4348 pc = get_pc (lwp);
4349
4350 /* If the PC has changed since we stopped, then don't do anything,
4351 and let the breakpoint/tracepoint be hit. This happens if, for
4352 instance, GDB handled the decr_pc_after_break subtraction itself,
4353 GDB is OOL stepping this thread, or the user has issued a "jump"
4354 command, or poked thread's registers herself. */
4355 if (pc != lwp->stop_pc)
4356 {
4357 if (debug_threads)
4358 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4359 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4360 lwpid_of (thread),
4361 paddress (lwp->stop_pc), paddress (pc));
4362
4363 lwp->need_step_over = 0;
4364 return 0;
4365 }
4366
4367 saved_thread = current_thread;
4368 current_thread = thread;
4369
4370 /* We can only step over breakpoints we know about. */
4371 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4372 {
4373 /* Don't step over a breakpoint that GDB expects to hit
4374 though. If the condition is being evaluated on the target's side
4375 and it evaluate to false, step over this breakpoint as well. */
4376 if (gdb_breakpoint_here (pc)
4377 && gdb_condition_true_at_breakpoint (pc)
4378 && gdb_no_commands_at_breakpoint (pc))
4379 {
4380 if (debug_threads)
4381 debug_printf ("Need step over [LWP %ld]? yes, but found"
4382 " GDB breakpoint at 0x%s; skipping step over\n",
4383 lwpid_of (thread), paddress (pc));
4384
4385 current_thread = saved_thread;
4386 return 0;
4387 }
4388 else
4389 {
4390 if (debug_threads)
4391 debug_printf ("Need step over [LWP %ld]? yes, "
4392 "found breakpoint at 0x%s\n",
4393 lwpid_of (thread), paddress (pc));
4394
4395 /* We've found an lwp that needs stepping over --- return 1 so
4396 that find_inferior stops looking. */
4397 current_thread = saved_thread;
4398
4399 /* If the step over is cancelled, this is set again. */
4400 lwp->need_step_over = 0;
4401 return 1;
4402 }
4403 }
4404
4405 current_thread = saved_thread;
4406
4407 if (debug_threads)
4408 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4409 " at 0x%s\n",
4410 lwpid_of (thread), paddress (pc));
4411
4412 return 0;
4413 }
4414
4415 /* Start a step-over operation on LWP. When LWP stopped at a
4416 breakpoint, to make progress, we need to remove the breakpoint out
4417 of the way. If we let other threads run while we do that, they may
4418 pass by the breakpoint location and miss hitting it. To avoid
4419 that, a step-over momentarily stops all threads while LWP is
4420 single-stepped while the breakpoint is temporarily uninserted from
4421 the inferior. When the single-step finishes, we reinsert the
4422 breakpoint, and let all threads that are supposed to be running,
4423 run again.
4424
4425 On targets that don't support hardware single-step, we don't
4426 currently support full software single-stepping. Instead, we only
4427 support stepping over the thread event breakpoint, by asking the
4428 low target where to place a reinsert breakpoint. Since this
4429 routine assumes the breakpoint being stepped over is a thread event
4430 breakpoint, it usually assumes the return address of the current
4431 function is a good enough place to set the reinsert breakpoint. */
4432
4433 static int
4434 start_step_over (struct lwp_info *lwp)
4435 {
4436 struct thread_info *thread = get_lwp_thread (lwp);
4437 struct thread_info *saved_thread;
4438 CORE_ADDR pc;
4439 int step;
4440
4441 if (debug_threads)
4442 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4443 lwpid_of (thread));
4444
4445 stop_all_lwps (1, lwp);
4446
4447 if (lwp->suspended != 0)
4448 {
4449 internal_error (__FILE__, __LINE__,
4450 "LWP %ld suspended=%d\n", lwpid_of (thread),
4451 lwp->suspended);
4452 }
4453
4454 if (debug_threads)
4455 debug_printf ("Done stopping all threads for step-over.\n");
4456
4457 /* Note, we should always reach here with an already adjusted PC,
4458 either by GDB (if we're resuming due to GDB's request), or by our
4459 caller, if we just finished handling an internal breakpoint GDB
4460 shouldn't care about. */
4461 pc = get_pc (lwp);
4462
4463 saved_thread = current_thread;
4464 current_thread = thread;
4465
4466 lwp->bp_reinsert = pc;
4467 uninsert_breakpoints_at (pc);
4468 uninsert_fast_tracepoint_jumps_at (pc);
4469
4470 if (can_hardware_single_step ())
4471 {
4472 step = 1;
4473 }
4474 else
4475 {
4476 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4477 set_reinsert_breakpoint (raddr);
4478 step = 0;
4479 }
4480
4481 current_thread = saved_thread;
4482
4483 linux_resume_one_lwp (lwp, step, 0, NULL);
4484
4485 /* Require next event from this LWP. */
4486 step_over_bkpt = thread->entry.id;
4487 return 1;
4488 }
4489
4490 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4491 start_step_over, if still there, and delete any reinsert
4492 breakpoints we've set, on non hardware single-step targets. */
4493
4494 static int
4495 finish_step_over (struct lwp_info *lwp)
4496 {
4497 if (lwp->bp_reinsert != 0)
4498 {
4499 if (debug_threads)
4500 debug_printf ("Finished step over.\n");
4501
4502 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4503 may be no breakpoint to reinsert there by now. */
4504 reinsert_breakpoints_at (lwp->bp_reinsert);
4505 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4506
4507 lwp->bp_reinsert = 0;
4508
4509 /* Delete any software-single-step reinsert breakpoints. No
4510 longer needed. We don't have to worry about other threads
4511 hitting this trap, and later not being able to explain it,
4512 because we were stepping over a breakpoint, and we hold all
4513 threads but LWP stopped while doing that. */
4514 if (!can_hardware_single_step ())
4515 delete_reinsert_breakpoints ();
4516
4517 step_over_bkpt = null_ptid;
4518 return 1;
4519 }
4520 else
4521 return 0;
4522 }
4523
4524 /* If there's a step over in progress, wait until all threads stop
4525 (that is, until the stepping thread finishes its step), and
4526 unsuspend all lwps. The stepping thread ends with its status
4527 pending, which is processed later when we get back to processing
4528 events. */
4529
4530 static void
4531 complete_ongoing_step_over (void)
4532 {
4533 if (!ptid_equal (step_over_bkpt, null_ptid))
4534 {
4535 struct lwp_info *lwp;
4536 int wstat;
4537 int ret;
4538
4539 if (debug_threads)
4540 debug_printf ("detach: step over in progress, finish it first\n");
4541
4542 /* Passing NULL_PTID as filter indicates we want all events to
4543 be left pending. Eventually this returns when there are no
4544 unwaited-for children left. */
4545 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4546 &wstat, __WALL);
4547 gdb_assert (ret == -1);
4548
4549 lwp = find_lwp_pid (step_over_bkpt);
4550 if (lwp != NULL)
4551 finish_step_over (lwp);
4552 step_over_bkpt = null_ptid;
4553 unsuspend_all_lwps (lwp);
4554 }
4555 }
4556
4557 /* This function is called once per thread. We check the thread's resume
4558 request, which will tell us whether to resume, step, or leave the thread
4559 stopped; and what signal, if any, it should be sent.
4560
4561 For threads which we aren't explicitly told otherwise, we preserve
4562 the stepping flag; this is used for stepping over gdbserver-placed
4563 breakpoints.
4564
4565 If pending_flags was set in any thread, we queue any needed
4566 signals, since we won't actually resume. We already have a pending
4567 event to report, so we don't need to preserve any step requests;
4568 they should be re-issued if necessary. */
4569
4570 static int
4571 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4572 {
4573 struct thread_info *thread = (struct thread_info *) entry;
4574 struct lwp_info *lwp = get_thread_lwp (thread);
4575 int step;
4576 int leave_all_stopped = * (int *) arg;
4577 int leave_pending;
4578
4579 if (lwp->resume == NULL)
4580 return 0;
4581
4582 if (lwp->resume->kind == resume_stop)
4583 {
4584 if (debug_threads)
4585 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4586
4587 if (!lwp->stopped)
4588 {
4589 if (debug_threads)
4590 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4591
4592 /* Stop the thread, and wait for the event asynchronously,
4593 through the event loop. */
4594 send_sigstop (lwp);
4595 }
4596 else
4597 {
4598 if (debug_threads)
4599 debug_printf ("already stopped LWP %ld\n",
4600 lwpid_of (thread));
4601
4602 /* The LWP may have been stopped in an internal event that
4603 was not meant to be notified back to GDB (e.g., gdbserver
4604 breakpoint), so we should be reporting a stop event in
4605 this case too. */
4606
4607 /* If the thread already has a pending SIGSTOP, this is a
4608 no-op. Otherwise, something later will presumably resume
4609 the thread and this will cause it to cancel any pending
4610 operation, due to last_resume_kind == resume_stop. If
4611 the thread already has a pending status to report, we
4612 will still report it the next time we wait - see
4613 status_pending_p_callback. */
4614
4615 /* If we already have a pending signal to report, then
4616 there's no need to queue a SIGSTOP, as this means we're
4617 midway through moving the LWP out of the jumppad, and we
4618 will report the pending signal as soon as that is
4619 finished. */
4620 if (lwp->pending_signals_to_report == NULL)
4621 send_sigstop (lwp);
4622 }
4623
4624 /* For stop requests, we're done. */
4625 lwp->resume = NULL;
4626 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4627 return 0;
4628 }
4629
4630 /* If this thread which is about to be resumed has a pending status,
4631 then don't resume it - we can just report the pending status.
4632 Likewise if it is suspended, because e.g., another thread is
4633 stepping past a breakpoint. Make sure to queue any signals that
4634 would otherwise be sent. In all-stop mode, we do this decision
4635 based on if *any* thread has a pending status. If there's a
4636 thread that needs the step-over-breakpoint dance, then don't
4637 resume any other thread but that particular one. */
4638 leave_pending = (lwp->suspended
4639 || lwp->status_pending_p
4640 || leave_all_stopped);
4641
4642 if (!leave_pending)
4643 {
4644 if (debug_threads)
4645 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4646
4647 step = (lwp->resume->kind == resume_step);
4648 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4649 }
4650 else
4651 {
4652 if (debug_threads)
4653 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4654
4655 /* If we have a new signal, enqueue the signal. */
4656 if (lwp->resume->sig != 0)
4657 {
4658 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4659
4660 p_sig->prev = lwp->pending_signals;
4661 p_sig->signal = lwp->resume->sig;
4662
4663 /* If this is the same signal we were previously stopped by,
4664 make sure to queue its siginfo. We can ignore the return
4665 value of ptrace; if it fails, we'll skip
4666 PTRACE_SETSIGINFO. */
4667 if (WIFSTOPPED (lwp->last_status)
4668 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4669 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4670 &p_sig->info);
4671
4672 lwp->pending_signals = p_sig;
4673 }
4674 }
4675
4676 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4677 lwp->resume = NULL;
4678 return 0;
4679 }
4680
4681 static void
4682 linux_resume (struct thread_resume *resume_info, size_t n)
4683 {
4684 struct thread_resume_array array = { resume_info, n };
4685 struct thread_info *need_step_over = NULL;
4686 int any_pending;
4687 int leave_all_stopped;
4688
4689 if (debug_threads)
4690 {
4691 debug_enter ();
4692 debug_printf ("linux_resume:\n");
4693 }
4694
4695 find_inferior (&all_threads, linux_set_resume_request, &array);
4696
4697 /* If there is a thread which would otherwise be resumed, which has
4698 a pending status, then don't resume any threads - we can just
4699 report the pending status. Make sure to queue any signals that
4700 would otherwise be sent. In non-stop mode, we'll apply this
4701 logic to each thread individually. We consume all pending events
4702 before considering to start a step-over (in all-stop). */
4703 any_pending = 0;
4704 if (!non_stop)
4705 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4706
4707 /* If there is a thread which would otherwise be resumed, which is
4708 stopped at a breakpoint that needs stepping over, then don't
4709 resume any threads - have it step over the breakpoint with all
4710 other threads stopped, then resume all threads again. Make sure
4711 to queue any signals that would otherwise be delivered or
4712 queued. */
4713 if (!any_pending && supports_breakpoints ())
4714 need_step_over
4715 = (struct thread_info *) find_inferior (&all_threads,
4716 need_step_over_p, NULL);
4717
4718 leave_all_stopped = (need_step_over != NULL || any_pending);
4719
4720 if (debug_threads)
4721 {
4722 if (need_step_over != NULL)
4723 debug_printf ("Not resuming all, need step over\n");
4724 else if (any_pending)
4725 debug_printf ("Not resuming, all-stop and found "
4726 "an LWP with pending status\n");
4727 else
4728 debug_printf ("Resuming, no pending status or step over needed\n");
4729 }
4730
4731 /* Even if we're leaving threads stopped, queue all signals we'd
4732 otherwise deliver. */
4733 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4734
4735 if (need_step_over)
4736 start_step_over (get_thread_lwp (need_step_over));
4737
4738 if (debug_threads)
4739 {
4740 debug_printf ("linux_resume done\n");
4741 debug_exit ();
4742 }
4743 }
4744
4745 /* This function is called once per thread. We check the thread's
4746 last resume request, which will tell us whether to resume, step, or
4747 leave the thread stopped. Any signal the client requested to be
4748 delivered has already been enqueued at this point.
4749
4750 If any thread that GDB wants running is stopped at an internal
4751 breakpoint that needs stepping over, we start a step-over operation
4752 on that particular thread, and leave all others stopped. */
4753
4754 static int
4755 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4756 {
4757 struct thread_info *thread = (struct thread_info *) entry;
4758 struct lwp_info *lwp = get_thread_lwp (thread);
4759 int step;
4760
4761 if (lwp == except)
4762 return 0;
4763
4764 if (debug_threads)
4765 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4766
4767 if (!lwp->stopped)
4768 {
4769 if (debug_threads)
4770 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4771 return 0;
4772 }
4773
4774 if (thread->last_resume_kind == resume_stop
4775 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4776 {
4777 if (debug_threads)
4778 debug_printf (" client wants LWP to remain %ld stopped\n",
4779 lwpid_of (thread));
4780 return 0;
4781 }
4782
4783 if (lwp->status_pending_p)
4784 {
4785 if (debug_threads)
4786 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4787 lwpid_of (thread));
4788 return 0;
4789 }
4790
4791 gdb_assert (lwp->suspended >= 0);
4792
4793 if (lwp->suspended)
4794 {
4795 if (debug_threads)
4796 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4797 return 0;
4798 }
4799
4800 if (thread->last_resume_kind == resume_stop
4801 && lwp->pending_signals_to_report == NULL
4802 && lwp->collecting_fast_tracepoint == 0)
4803 {
4804 /* We haven't reported this LWP as stopped yet (otherwise, the
4805 last_status.kind check above would catch it, and we wouldn't
4806 reach here. This LWP may have been momentarily paused by a
4807 stop_all_lwps call while handling for example, another LWP's
4808 step-over. In that case, the pending expected SIGSTOP signal
4809 that was queued at vCont;t handling time will have already
4810 been consumed by wait_for_sigstop, and so we need to requeue
4811 another one here. Note that if the LWP already has a SIGSTOP
4812 pending, this is a no-op. */
4813
4814 if (debug_threads)
4815 debug_printf ("Client wants LWP %ld to stop. "
4816 "Making sure it has a SIGSTOP pending\n",
4817 lwpid_of (thread));
4818
4819 send_sigstop (lwp);
4820 }
4821
4822 if (thread->last_resume_kind == resume_step)
4823 {
4824 if (debug_threads)
4825 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4826 lwpid_of (thread));
4827 step = 1;
4828 }
4829 else if (lwp->bp_reinsert != 0)
4830 {
4831 if (debug_threads)
4832 debug_printf (" stepping LWP %ld, reinsert set\n",
4833 lwpid_of (thread));
4834 step = 1;
4835 }
4836 else
4837 step = 0;
4838
4839 linux_resume_one_lwp (lwp, step, 0, NULL);
4840 return 0;
4841 }
4842
4843 static int
4844 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4845 {
4846 struct thread_info *thread = (struct thread_info *) entry;
4847 struct lwp_info *lwp = get_thread_lwp (thread);
4848
4849 if (lwp == except)
4850 return 0;
4851
4852 lwp_suspended_decr (lwp);
4853
4854 return proceed_one_lwp (entry, except);
4855 }
4856
4857 /* When we finish a step-over, set threads running again. If there's
4858 another thread that may need a step-over, now's the time to start
4859 it. Eventually, we'll move all threads past their breakpoints. */
4860
4861 static void
4862 proceed_all_lwps (void)
4863 {
4864 struct thread_info *need_step_over;
4865
4866 /* If there is a thread which would otherwise be resumed, which is
4867 stopped at a breakpoint that needs stepping over, then don't
4868 resume any threads - have it step over the breakpoint with all
4869 other threads stopped, then resume all threads again. */
4870
4871 if (supports_breakpoints ())
4872 {
4873 need_step_over
4874 = (struct thread_info *) find_inferior (&all_threads,
4875 need_step_over_p, NULL);
4876
4877 if (need_step_over != NULL)
4878 {
4879 if (debug_threads)
4880 debug_printf ("proceed_all_lwps: found "
4881 "thread %ld needing a step-over\n",
4882 lwpid_of (need_step_over));
4883
4884 start_step_over (get_thread_lwp (need_step_over));
4885 return;
4886 }
4887 }
4888
4889 if (debug_threads)
4890 debug_printf ("Proceeding, no step-over needed\n");
4891
4892 find_inferior (&all_threads, proceed_one_lwp, NULL);
4893 }
4894
4895 /* Stopped LWPs that the client wanted to be running, that don't have
4896 pending statuses, are set to run again, except for EXCEPT, if not
4897 NULL. This undoes a stop_all_lwps call. */
4898
4899 static void
4900 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4901 {
4902 if (debug_threads)
4903 {
4904 debug_enter ();
4905 if (except)
4906 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4907 lwpid_of (get_lwp_thread (except)));
4908 else
4909 debug_printf ("unstopping all lwps\n");
4910 }
4911
4912 if (unsuspend)
4913 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4914 else
4915 find_inferior (&all_threads, proceed_one_lwp, except);
4916
4917 if (debug_threads)
4918 {
4919 debug_printf ("unstop_all_lwps done\n");
4920 debug_exit ();
4921 }
4922 }
4923
4924
4925 #ifdef HAVE_LINUX_REGSETS
4926
4927 #define use_linux_regsets 1
4928
4929 /* Returns true if REGSET has been disabled. */
4930
4931 static int
4932 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4933 {
4934 return (info->disabled_regsets != NULL
4935 && info->disabled_regsets[regset - info->regsets]);
4936 }
4937
4938 /* Disable REGSET. */
4939
4940 static void
4941 disable_regset (struct regsets_info *info, struct regset_info *regset)
4942 {
4943 int dr_offset;
4944
4945 dr_offset = regset - info->regsets;
4946 if (info->disabled_regsets == NULL)
4947 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
4948 info->disabled_regsets[dr_offset] = 1;
4949 }
4950
4951 static int
4952 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4953 struct regcache *regcache)
4954 {
4955 struct regset_info *regset;
4956 int saw_general_regs = 0;
4957 int pid;
4958 struct iovec iov;
4959
4960 pid = lwpid_of (current_thread);
4961 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4962 {
4963 void *buf, *data;
4964 int nt_type, res;
4965
4966 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4967 continue;
4968
4969 buf = xmalloc (regset->size);
4970
4971 nt_type = regset->nt_type;
4972 if (nt_type)
4973 {
4974 iov.iov_base = buf;
4975 iov.iov_len = regset->size;
4976 data = (void *) &iov;
4977 }
4978 else
4979 data = buf;
4980
4981 #ifndef __sparc__
4982 res = ptrace (regset->get_request, pid,
4983 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4984 #else
4985 res = ptrace (regset->get_request, pid, data, nt_type);
4986 #endif
4987 if (res < 0)
4988 {
4989 if (errno == EIO)
4990 {
4991 /* If we get EIO on a regset, do not try it again for
4992 this process mode. */
4993 disable_regset (regsets_info, regset);
4994 }
4995 else if (errno == ENODATA)
4996 {
4997 /* ENODATA may be returned if the regset is currently
4998 not "active". This can happen in normal operation,
4999 so suppress the warning in this case. */
5000 }
5001 else
5002 {
5003 char s[256];
5004 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5005 pid);
5006 perror (s);
5007 }
5008 }
5009 else
5010 {
5011 if (regset->type == GENERAL_REGS)
5012 saw_general_regs = 1;
5013 regset->store_function (regcache, buf);
5014 }
5015 free (buf);
5016 }
5017 if (saw_general_regs)
5018 return 0;
5019 else
5020 return 1;
5021 }
5022
5023 static int
5024 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5025 struct regcache *regcache)
5026 {
5027 struct regset_info *regset;
5028 int saw_general_regs = 0;
5029 int pid;
5030 struct iovec iov;
5031
5032 pid = lwpid_of (current_thread);
5033 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5034 {
5035 void *buf, *data;
5036 int nt_type, res;
5037
5038 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5039 || regset->fill_function == NULL)
5040 continue;
5041
5042 buf = xmalloc (regset->size);
5043
5044 /* First fill the buffer with the current register set contents,
5045 in case there are any items in the kernel's regset that are
5046 not in gdbserver's regcache. */
5047
5048 nt_type = regset->nt_type;
5049 if (nt_type)
5050 {
5051 iov.iov_base = buf;
5052 iov.iov_len = regset->size;
5053 data = (void *) &iov;
5054 }
5055 else
5056 data = buf;
5057
5058 #ifndef __sparc__
5059 res = ptrace (regset->get_request, pid,
5060 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5061 #else
5062 res = ptrace (regset->get_request, pid, data, nt_type);
5063 #endif
5064
5065 if (res == 0)
5066 {
5067 /* Then overlay our cached registers on that. */
5068 regset->fill_function (regcache, buf);
5069
5070 /* Only now do we write the register set. */
5071 #ifndef __sparc__
5072 res = ptrace (regset->set_request, pid,
5073 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5074 #else
5075 res = ptrace (regset->set_request, pid, data, nt_type);
5076 #endif
5077 }
5078
5079 if (res < 0)
5080 {
5081 if (errno == EIO)
5082 {
5083 /* If we get EIO on a regset, do not try it again for
5084 this process mode. */
5085 disable_regset (regsets_info, regset);
5086 }
5087 else if (errno == ESRCH)
5088 {
5089 /* At this point, ESRCH should mean the process is
5090 already gone, in which case we simply ignore attempts
5091 to change its registers. See also the related
5092 comment in linux_resume_one_lwp. */
5093 free (buf);
5094 return 0;
5095 }
5096 else
5097 {
5098 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5099 }
5100 }
5101 else if (regset->type == GENERAL_REGS)
5102 saw_general_regs = 1;
5103 free (buf);
5104 }
5105 if (saw_general_regs)
5106 return 0;
5107 else
5108 return 1;
5109 }
5110
5111 #else /* !HAVE_LINUX_REGSETS */
5112
5113 #define use_linux_regsets 0
5114 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5115 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5116
5117 #endif
5118
5119 /* Return 1 if register REGNO is supported by one of the regset ptrace
5120 calls or 0 if it has to be transferred individually. */
5121
5122 static int
5123 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5124 {
5125 unsigned char mask = 1 << (regno % 8);
5126 size_t index = regno / 8;
5127
5128 return (use_linux_regsets
5129 && (regs_info->regset_bitmap == NULL
5130 || (regs_info->regset_bitmap[index] & mask) != 0));
5131 }
5132
5133 #ifdef HAVE_LINUX_USRREGS
5134
5135 int
5136 register_addr (const struct usrregs_info *usrregs, int regnum)
5137 {
5138 int addr;
5139
5140 if (regnum < 0 || regnum >= usrregs->num_regs)
5141 error ("Invalid register number %d.", regnum);
5142
5143 addr = usrregs->regmap[regnum];
5144
5145 return addr;
5146 }
5147
5148 /* Fetch one register. */
5149 static void
5150 fetch_register (const struct usrregs_info *usrregs,
5151 struct regcache *regcache, int regno)
5152 {
5153 CORE_ADDR regaddr;
5154 int i, size;
5155 char *buf;
5156 int pid;
5157
5158 if (regno >= usrregs->num_regs)
5159 return;
5160 if ((*the_low_target.cannot_fetch_register) (regno))
5161 return;
5162
5163 regaddr = register_addr (usrregs, regno);
5164 if (regaddr == -1)
5165 return;
5166
5167 size = ((register_size (regcache->tdesc, regno)
5168 + sizeof (PTRACE_XFER_TYPE) - 1)
5169 & -sizeof (PTRACE_XFER_TYPE));
5170 buf = (char *) alloca (size);
5171
5172 pid = lwpid_of (current_thread);
5173 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5174 {
5175 errno = 0;
5176 *(PTRACE_XFER_TYPE *) (buf + i) =
5177 ptrace (PTRACE_PEEKUSER, pid,
5178 /* Coerce to a uintptr_t first to avoid potential gcc warning
5179 of coercing an 8 byte integer to a 4 byte pointer. */
5180 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5181 regaddr += sizeof (PTRACE_XFER_TYPE);
5182 if (errno != 0)
5183 error ("reading register %d: %s", regno, strerror (errno));
5184 }
5185
5186 if (the_low_target.supply_ptrace_register)
5187 the_low_target.supply_ptrace_register (regcache, regno, buf);
5188 else
5189 supply_register (regcache, regno, buf);
5190 }
5191
5192 /* Store one register. */
5193 static void
5194 store_register (const struct usrregs_info *usrregs,
5195 struct regcache *regcache, int regno)
5196 {
5197 CORE_ADDR regaddr;
5198 int i, size;
5199 char *buf;
5200 int pid;
5201
5202 if (regno >= usrregs->num_regs)
5203 return;
5204 if ((*the_low_target.cannot_store_register) (regno))
5205 return;
5206
5207 regaddr = register_addr (usrregs, regno);
5208 if (regaddr == -1)
5209 return;
5210
5211 size = ((register_size (regcache->tdesc, regno)
5212 + sizeof (PTRACE_XFER_TYPE) - 1)
5213 & -sizeof (PTRACE_XFER_TYPE));
5214 buf = (char *) alloca (size);
5215 memset (buf, 0, size);
5216
5217 if (the_low_target.collect_ptrace_register)
5218 the_low_target.collect_ptrace_register (regcache, regno, buf);
5219 else
5220 collect_register (regcache, regno, buf);
5221
5222 pid = lwpid_of (current_thread);
5223 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5224 {
5225 errno = 0;
5226 ptrace (PTRACE_POKEUSER, pid,
5227 /* Coerce to a uintptr_t first to avoid potential gcc warning
5228 about coercing an 8 byte integer to a 4 byte pointer. */
5229 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5230 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5231 if (errno != 0)
5232 {
5233 /* At this point, ESRCH should mean the process is
5234 already gone, in which case we simply ignore attempts
5235 to change its registers. See also the related
5236 comment in linux_resume_one_lwp. */
5237 if (errno == ESRCH)
5238 return;
5239
5240 if ((*the_low_target.cannot_store_register) (regno) == 0)
5241 error ("writing register %d: %s", regno, strerror (errno));
5242 }
5243 regaddr += sizeof (PTRACE_XFER_TYPE);
5244 }
5245 }
5246
5247 /* Fetch all registers, or just one, from the child process.
5248 If REGNO is -1, do this for all registers, skipping any that are
5249 assumed to have been retrieved by regsets_fetch_inferior_registers,
5250 unless ALL is non-zero.
5251 Otherwise, REGNO specifies which register (so we can save time). */
5252 static void
5253 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5254 struct regcache *regcache, int regno, int all)
5255 {
5256 struct usrregs_info *usr = regs_info->usrregs;
5257
5258 if (regno == -1)
5259 {
5260 for (regno = 0; regno < usr->num_regs; regno++)
5261 if (all || !linux_register_in_regsets (regs_info, regno))
5262 fetch_register (usr, regcache, regno);
5263 }
5264 else
5265 fetch_register (usr, regcache, regno);
5266 }
5267
5268 /* Store our register values back into the inferior.
5269 If REGNO is -1, do this for all registers, skipping any that are
5270 assumed to have been saved by regsets_store_inferior_registers,
5271 unless ALL is non-zero.
5272 Otherwise, REGNO specifies which register (so we can save time). */
5273 static void
5274 usr_store_inferior_registers (const struct regs_info *regs_info,
5275 struct regcache *regcache, int regno, int all)
5276 {
5277 struct usrregs_info *usr = regs_info->usrregs;
5278
5279 if (regno == -1)
5280 {
5281 for (regno = 0; regno < usr->num_regs; regno++)
5282 if (all || !linux_register_in_regsets (regs_info, regno))
5283 store_register (usr, regcache, regno);
5284 }
5285 else
5286 store_register (usr, regcache, regno);
5287 }
5288
5289 #else /* !HAVE_LINUX_USRREGS */
5290
5291 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5292 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5293
5294 #endif
5295
5296
5297 void
5298 linux_fetch_registers (struct regcache *regcache, int regno)
5299 {
5300 int use_regsets;
5301 int all = 0;
5302 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5303
5304 if (regno == -1)
5305 {
5306 if (the_low_target.fetch_register != NULL
5307 && regs_info->usrregs != NULL)
5308 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5309 (*the_low_target.fetch_register) (regcache, regno);
5310
5311 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5312 if (regs_info->usrregs != NULL)
5313 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5314 }
5315 else
5316 {
5317 if (the_low_target.fetch_register != NULL
5318 && (*the_low_target.fetch_register) (regcache, regno))
5319 return;
5320
5321 use_regsets = linux_register_in_regsets (regs_info, regno);
5322 if (use_regsets)
5323 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5324 regcache);
5325 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5326 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5327 }
5328 }
5329
5330 void
5331 linux_store_registers (struct regcache *regcache, int regno)
5332 {
5333 int use_regsets;
5334 int all = 0;
5335 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5336
5337 if (regno == -1)
5338 {
5339 all = regsets_store_inferior_registers (regs_info->regsets_info,
5340 regcache);
5341 if (regs_info->usrregs != NULL)
5342 usr_store_inferior_registers (regs_info, regcache, regno, all);
5343 }
5344 else
5345 {
5346 use_regsets = linux_register_in_regsets (regs_info, regno);
5347 if (use_regsets)
5348 all = regsets_store_inferior_registers (regs_info->regsets_info,
5349 regcache);
5350 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5351 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5352 }
5353 }
5354
5355
5356 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5357 to debugger memory starting at MYADDR. */
5358
5359 static int
5360 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5361 {
5362 int pid = lwpid_of (current_thread);
5363 register PTRACE_XFER_TYPE *buffer;
5364 register CORE_ADDR addr;
5365 register int count;
5366 char filename[64];
5367 register int i;
5368 int ret;
5369 int fd;
5370
5371 /* Try using /proc. Don't bother for one word. */
5372 if (len >= 3 * sizeof (long))
5373 {
5374 int bytes;
5375
5376 /* We could keep this file open and cache it - possibly one per
5377 thread. That requires some juggling, but is even faster. */
5378 sprintf (filename, "/proc/%d/mem", pid);
5379 fd = open (filename, O_RDONLY | O_LARGEFILE);
5380 if (fd == -1)
5381 goto no_proc;
5382
5383 /* If pread64 is available, use it. It's faster if the kernel
5384 supports it (only one syscall), and it's 64-bit safe even on
5385 32-bit platforms (for instance, SPARC debugging a SPARC64
5386 application). */
5387 #ifdef HAVE_PREAD64
5388 bytes = pread64 (fd, myaddr, len, memaddr);
5389 #else
5390 bytes = -1;
5391 if (lseek (fd, memaddr, SEEK_SET) != -1)
5392 bytes = read (fd, myaddr, len);
5393 #endif
5394
5395 close (fd);
5396 if (bytes == len)
5397 return 0;
5398
5399 /* Some data was read, we'll try to get the rest with ptrace. */
5400 if (bytes > 0)
5401 {
5402 memaddr += bytes;
5403 myaddr += bytes;
5404 len -= bytes;
5405 }
5406 }
5407
5408 no_proc:
5409 /* Round starting address down to longword boundary. */
5410 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5411 /* Round ending address up; get number of longwords that makes. */
5412 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5413 / sizeof (PTRACE_XFER_TYPE));
5414 /* Allocate buffer of that many longwords. */
5415 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5416
5417 /* Read all the longwords */
5418 errno = 0;
5419 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5420 {
5421 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5422 about coercing an 8 byte integer to a 4 byte pointer. */
5423 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5424 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5425 (PTRACE_TYPE_ARG4) 0);
5426 if (errno)
5427 break;
5428 }
5429 ret = errno;
5430
5431 /* Copy appropriate bytes out of the buffer. */
5432 if (i > 0)
5433 {
5434 i *= sizeof (PTRACE_XFER_TYPE);
5435 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5436 memcpy (myaddr,
5437 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5438 i < len ? i : len);
5439 }
5440
5441 return ret;
5442 }
5443
5444 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5445 memory at MEMADDR. On failure (cannot write to the inferior)
5446 returns the value of errno. Always succeeds if LEN is zero. */
5447
5448 static int
5449 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5450 {
5451 register int i;
5452 /* Round starting address down to longword boundary. */
5453 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5454 /* Round ending address up; get number of longwords that makes. */
5455 register int count
5456 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5457 / sizeof (PTRACE_XFER_TYPE);
5458
5459 /* Allocate buffer of that many longwords. */
5460 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5461
5462 int pid = lwpid_of (current_thread);
5463
5464 if (len == 0)
5465 {
5466 /* Zero length write always succeeds. */
5467 return 0;
5468 }
5469
5470 if (debug_threads)
5471 {
5472 /* Dump up to four bytes. */
5473 char str[4 * 2 + 1];
5474 char *p = str;
5475 int dump = len < 4 ? len : 4;
5476
5477 for (i = 0; i < dump; i++)
5478 {
5479 sprintf (p, "%02x", myaddr[i]);
5480 p += 2;
5481 }
5482 *p = '\0';
5483
5484 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5485 str, (long) memaddr, pid);
5486 }
5487
5488 /* Fill start and end extra bytes of buffer with existing memory data. */
5489
5490 errno = 0;
5491 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5492 about coercing an 8 byte integer to a 4 byte pointer. */
5493 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5494 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5495 (PTRACE_TYPE_ARG4) 0);
5496 if (errno)
5497 return errno;
5498
5499 if (count > 1)
5500 {
5501 errno = 0;
5502 buffer[count - 1]
5503 = ptrace (PTRACE_PEEKTEXT, pid,
5504 /* Coerce to a uintptr_t first to avoid potential gcc warning
5505 about coercing an 8 byte integer to a 4 byte pointer. */
5506 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5507 * sizeof (PTRACE_XFER_TYPE)),
5508 (PTRACE_TYPE_ARG4) 0);
5509 if (errno)
5510 return errno;
5511 }
5512
5513 /* Copy data to be written over corresponding part of buffer. */
5514
5515 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5516 myaddr, len);
5517
5518 /* Write the entire buffer. */
5519
5520 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5521 {
5522 errno = 0;
5523 ptrace (PTRACE_POKETEXT, pid,
5524 /* Coerce to a uintptr_t first to avoid potential gcc warning
5525 about coercing an 8 byte integer to a 4 byte pointer. */
5526 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5527 (PTRACE_TYPE_ARG4) buffer[i]);
5528 if (errno)
5529 return errno;
5530 }
5531
5532 return 0;
5533 }
5534
5535 static void
5536 linux_look_up_symbols (void)
5537 {
5538 #ifdef USE_THREAD_DB
5539 struct process_info *proc = current_process ();
5540
5541 if (proc->priv->thread_db != NULL)
5542 return;
5543
5544 /* If the kernel supports tracing clones, then we don't need to
5545 use the magic thread event breakpoint to learn about
5546 threads. */
5547 thread_db_init (!linux_supports_traceclone ());
5548 #endif
5549 }
5550
5551 static void
5552 linux_request_interrupt (void)
5553 {
5554 extern unsigned long signal_pid;
5555
5556 /* Send a SIGINT to the process group. This acts just like the user
5557 typed a ^C on the controlling terminal. */
5558 kill (-signal_pid, SIGINT);
5559 }
5560
5561 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5562 to debugger memory starting at MYADDR. */
5563
5564 static int
5565 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5566 {
5567 char filename[PATH_MAX];
5568 int fd, n;
5569 int pid = lwpid_of (current_thread);
5570
5571 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5572
5573 fd = open (filename, O_RDONLY);
5574 if (fd < 0)
5575 return -1;
5576
5577 if (offset != (CORE_ADDR) 0
5578 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5579 n = -1;
5580 else
5581 n = read (fd, myaddr, len);
5582
5583 close (fd);
5584
5585 return n;
5586 }
5587
5588 /* These breakpoint and watchpoint related wrapper functions simply
5589 pass on the function call if the target has registered a
5590 corresponding function. */
5591
5592 static int
5593 linux_supports_z_point_type (char z_type)
5594 {
5595 return (the_low_target.supports_z_point_type != NULL
5596 && the_low_target.supports_z_point_type (z_type));
5597 }
5598
5599 static int
5600 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5601 int size, struct raw_breakpoint *bp)
5602 {
5603 if (type == raw_bkpt_type_sw)
5604 return insert_memory_breakpoint (bp);
5605 else if (the_low_target.insert_point != NULL)
5606 return the_low_target.insert_point (type, addr, size, bp);
5607 else
5608 /* Unsupported (see target.h). */
5609 return 1;
5610 }
5611
5612 static int
5613 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5614 int size, struct raw_breakpoint *bp)
5615 {
5616 if (type == raw_bkpt_type_sw)
5617 return remove_memory_breakpoint (bp);
5618 else if (the_low_target.remove_point != NULL)
5619 return the_low_target.remove_point (type, addr, size, bp);
5620 else
5621 /* Unsupported (see target.h). */
5622 return 1;
5623 }
5624
5625 /* Implement the to_stopped_by_sw_breakpoint target_ops
5626 method. */
5627
5628 static int
5629 linux_stopped_by_sw_breakpoint (void)
5630 {
5631 struct lwp_info *lwp = get_thread_lwp (current_thread);
5632
5633 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5634 }
5635
5636 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5637 method. */
5638
5639 static int
5640 linux_supports_stopped_by_sw_breakpoint (void)
5641 {
5642 return USE_SIGTRAP_SIGINFO;
5643 }
5644
5645 /* Implement the to_stopped_by_hw_breakpoint target_ops
5646 method. */
5647
5648 static int
5649 linux_stopped_by_hw_breakpoint (void)
5650 {
5651 struct lwp_info *lwp = get_thread_lwp (current_thread);
5652
5653 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5654 }
5655
5656 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5657 method. */
5658
5659 static int
5660 linux_supports_stopped_by_hw_breakpoint (void)
5661 {
5662 return USE_SIGTRAP_SIGINFO;
5663 }
5664
5665 /* Implement the supports_hardware_single_step target_ops method. */
5666
5667 static int
5668 linux_supports_hardware_single_step (void)
5669 {
5670 return can_hardware_single_step ();
5671 }
5672
5673 static int
5674 linux_stopped_by_watchpoint (void)
5675 {
5676 struct lwp_info *lwp = get_thread_lwp (current_thread);
5677
5678 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5679 }
5680
5681 static CORE_ADDR
5682 linux_stopped_data_address (void)
5683 {
5684 struct lwp_info *lwp = get_thread_lwp (current_thread);
5685
5686 return lwp->stopped_data_address;
5687 }
5688
5689 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5690 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5691 && defined(PT_TEXT_END_ADDR)
5692
5693 /* This is only used for targets that define PT_TEXT_ADDR,
5694 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5695 the target has different ways of acquiring this information, like
5696 loadmaps. */
5697
5698 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5699 to tell gdb about. */
5700
5701 static int
5702 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5703 {
5704 unsigned long text, text_end, data;
5705 int pid = lwpid_of (current_thread);
5706
5707 errno = 0;
5708
5709 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5710 (PTRACE_TYPE_ARG4) 0);
5711 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5712 (PTRACE_TYPE_ARG4) 0);
5713 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5714 (PTRACE_TYPE_ARG4) 0);
5715
5716 if (errno == 0)
5717 {
5718 /* Both text and data offsets produced at compile-time (and so
5719 used by gdb) are relative to the beginning of the program,
5720 with the data segment immediately following the text segment.
5721 However, the actual runtime layout in memory may put the data
5722 somewhere else, so when we send gdb a data base-address, we
5723 use the real data base address and subtract the compile-time
5724 data base-address from it (which is just the length of the
5725 text segment). BSS immediately follows data in both
5726 cases. */
5727 *text_p = text;
5728 *data_p = data - (text_end - text);
5729
5730 return 1;
5731 }
5732 return 0;
5733 }
5734 #endif
5735
5736 static int
5737 linux_qxfer_osdata (const char *annex,
5738 unsigned char *readbuf, unsigned const char *writebuf,
5739 CORE_ADDR offset, int len)
5740 {
5741 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5742 }
5743
5744 /* Convert a native/host siginfo object, into/from the siginfo in the
5745 layout of the inferiors' architecture. */
5746
5747 static void
5748 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5749 {
5750 int done = 0;
5751
5752 if (the_low_target.siginfo_fixup != NULL)
5753 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5754
5755 /* If there was no callback, or the callback didn't do anything,
5756 then just do a straight memcpy. */
5757 if (!done)
5758 {
5759 if (direction == 1)
5760 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5761 else
5762 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5763 }
5764 }
5765
5766 static int
5767 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5768 unsigned const char *writebuf, CORE_ADDR offset, int len)
5769 {
5770 int pid;
5771 siginfo_t siginfo;
5772 char inf_siginfo[sizeof (siginfo_t)];
5773
5774 if (current_thread == NULL)
5775 return -1;
5776
5777 pid = lwpid_of (current_thread);
5778
5779 if (debug_threads)
5780 debug_printf ("%s siginfo for lwp %d.\n",
5781 readbuf != NULL ? "Reading" : "Writing",
5782 pid);
5783
5784 if (offset >= sizeof (siginfo))
5785 return -1;
5786
5787 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5788 return -1;
5789
5790 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5791 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5792 inferior with a 64-bit GDBSERVER should look the same as debugging it
5793 with a 32-bit GDBSERVER, we need to convert it. */
5794 siginfo_fixup (&siginfo, inf_siginfo, 0);
5795
5796 if (offset + len > sizeof (siginfo))
5797 len = sizeof (siginfo) - offset;
5798
5799 if (readbuf != NULL)
5800 memcpy (readbuf, inf_siginfo + offset, len);
5801 else
5802 {
5803 memcpy (inf_siginfo + offset, writebuf, len);
5804
5805 /* Convert back to ptrace layout before flushing it out. */
5806 siginfo_fixup (&siginfo, inf_siginfo, 1);
5807
5808 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5809 return -1;
5810 }
5811
5812 return len;
5813 }
5814
5815 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5816 so we notice when children change state; as the handler for the
5817 sigsuspend in my_waitpid. */
5818
5819 static void
5820 sigchld_handler (int signo)
5821 {
5822 int old_errno = errno;
5823
5824 if (debug_threads)
5825 {
5826 do
5827 {
5828 /* fprintf is not async-signal-safe, so call write
5829 directly. */
5830 if (write (2, "sigchld_handler\n",
5831 sizeof ("sigchld_handler\n") - 1) < 0)
5832 break; /* just ignore */
5833 } while (0);
5834 }
5835
5836 if (target_is_async_p ())
5837 async_file_mark (); /* trigger a linux_wait */
5838
5839 errno = old_errno;
5840 }
5841
5842 static int
5843 linux_supports_non_stop (void)
5844 {
5845 return 1;
5846 }
5847
5848 static int
5849 linux_async (int enable)
5850 {
5851 int previous = target_is_async_p ();
5852
5853 if (debug_threads)
5854 debug_printf ("linux_async (%d), previous=%d\n",
5855 enable, previous);
5856
5857 if (previous != enable)
5858 {
5859 sigset_t mask;
5860 sigemptyset (&mask);
5861 sigaddset (&mask, SIGCHLD);
5862
5863 sigprocmask (SIG_BLOCK, &mask, NULL);
5864
5865 if (enable)
5866 {
5867 if (pipe (linux_event_pipe) == -1)
5868 {
5869 linux_event_pipe[0] = -1;
5870 linux_event_pipe[1] = -1;
5871 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5872
5873 warning ("creating event pipe failed.");
5874 return previous;
5875 }
5876
5877 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5878 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5879
5880 /* Register the event loop handler. */
5881 add_file_handler (linux_event_pipe[0],
5882 handle_target_event, NULL);
5883
5884 /* Always trigger a linux_wait. */
5885 async_file_mark ();
5886 }
5887 else
5888 {
5889 delete_file_handler (linux_event_pipe[0]);
5890
5891 close (linux_event_pipe[0]);
5892 close (linux_event_pipe[1]);
5893 linux_event_pipe[0] = -1;
5894 linux_event_pipe[1] = -1;
5895 }
5896
5897 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5898 }
5899
5900 return previous;
5901 }
5902
5903 static int
5904 linux_start_non_stop (int nonstop)
5905 {
5906 /* Register or unregister from event-loop accordingly. */
5907 linux_async (nonstop);
5908
5909 if (target_is_async_p () != (nonstop != 0))
5910 return -1;
5911
5912 return 0;
5913 }
5914
5915 static int
5916 linux_supports_multi_process (void)
5917 {
5918 return 1;
5919 }
5920
5921 /* Check if fork events are supported. */
5922
5923 static int
5924 linux_supports_fork_events (void)
5925 {
5926 return linux_supports_tracefork ();
5927 }
5928
5929 /* Check if vfork events are supported. */
5930
5931 static int
5932 linux_supports_vfork_events (void)
5933 {
5934 return linux_supports_tracefork ();
5935 }
5936
5937 /* Check if exec events are supported. */
5938
5939 static int
5940 linux_supports_exec_events (void)
5941 {
5942 return linux_supports_traceexec ();
5943 }
5944
5945 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5946 options for the specified lwp. */
5947
5948 static int
5949 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5950 void *args)
5951 {
5952 struct thread_info *thread = (struct thread_info *) entry;
5953 struct lwp_info *lwp = get_thread_lwp (thread);
5954
5955 if (!lwp->stopped)
5956 {
5957 /* Stop the lwp so we can modify its ptrace options. */
5958 lwp->must_set_ptrace_flags = 1;
5959 linux_stop_lwp (lwp);
5960 }
5961 else
5962 {
5963 /* Already stopped; go ahead and set the ptrace options. */
5964 struct process_info *proc = find_process_pid (pid_of (thread));
5965 int options = linux_low_ptrace_options (proc->attached);
5966
5967 linux_enable_event_reporting (lwpid_of (thread), options);
5968 lwp->must_set_ptrace_flags = 0;
5969 }
5970
5971 return 0;
5972 }
5973
5974 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5975 ptrace flags for all inferiors. This is in case the new GDB connection
5976 doesn't support the same set of events that the previous one did. */
5977
5978 static void
5979 linux_handle_new_gdb_connection (void)
5980 {
5981 pid_t pid;
5982
5983 /* Request that all the lwps reset their ptrace options. */
5984 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5985 }
5986
5987 static int
5988 linux_supports_disable_randomization (void)
5989 {
5990 #ifdef HAVE_PERSONALITY
5991 return 1;
5992 #else
5993 return 0;
5994 #endif
5995 }
5996
5997 static int
5998 linux_supports_agent (void)
5999 {
6000 return 1;
6001 }
6002
6003 static int
6004 linux_supports_range_stepping (void)
6005 {
6006 if (*the_low_target.supports_range_stepping == NULL)
6007 return 0;
6008
6009 return (*the_low_target.supports_range_stepping) ();
6010 }
6011
6012 /* Enumerate spufs IDs for process PID. */
6013 static int
6014 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6015 {
6016 int pos = 0;
6017 int written = 0;
6018 char path[128];
6019 DIR *dir;
6020 struct dirent *entry;
6021
6022 sprintf (path, "/proc/%ld/fd", pid);
6023 dir = opendir (path);
6024 if (!dir)
6025 return -1;
6026
6027 rewinddir (dir);
6028 while ((entry = readdir (dir)) != NULL)
6029 {
6030 struct stat st;
6031 struct statfs stfs;
6032 int fd;
6033
6034 fd = atoi (entry->d_name);
6035 if (!fd)
6036 continue;
6037
6038 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6039 if (stat (path, &st) != 0)
6040 continue;
6041 if (!S_ISDIR (st.st_mode))
6042 continue;
6043
6044 if (statfs (path, &stfs) != 0)
6045 continue;
6046 if (stfs.f_type != SPUFS_MAGIC)
6047 continue;
6048
6049 if (pos >= offset && pos + 4 <= offset + len)
6050 {
6051 *(unsigned int *)(buf + pos - offset) = fd;
6052 written += 4;
6053 }
6054 pos += 4;
6055 }
6056
6057 closedir (dir);
6058 return written;
6059 }
6060
6061 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6062 object type, using the /proc file system. */
6063 static int
6064 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6065 unsigned const char *writebuf,
6066 CORE_ADDR offset, int len)
6067 {
6068 long pid = lwpid_of (current_thread);
6069 char buf[128];
6070 int fd = 0;
6071 int ret = 0;
6072
6073 if (!writebuf && !readbuf)
6074 return -1;
6075
6076 if (!*annex)
6077 {
6078 if (!readbuf)
6079 return -1;
6080 else
6081 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6082 }
6083
6084 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6085 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6086 if (fd <= 0)
6087 return -1;
6088
6089 if (offset != 0
6090 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6091 {
6092 close (fd);
6093 return 0;
6094 }
6095
6096 if (writebuf)
6097 ret = write (fd, writebuf, (size_t) len);
6098 else
6099 ret = read (fd, readbuf, (size_t) len);
6100
6101 close (fd);
6102 return ret;
6103 }
6104
6105 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6106 struct target_loadseg
6107 {
6108 /* Core address to which the segment is mapped. */
6109 Elf32_Addr addr;
6110 /* VMA recorded in the program header. */
6111 Elf32_Addr p_vaddr;
6112 /* Size of this segment in memory. */
6113 Elf32_Word p_memsz;
6114 };
6115
6116 # if defined PT_GETDSBT
6117 struct target_loadmap
6118 {
6119 /* Protocol version number, must be zero. */
6120 Elf32_Word version;
6121 /* Pointer to the DSBT table, its size, and the DSBT index. */
6122 unsigned *dsbt_table;
6123 unsigned dsbt_size, dsbt_index;
6124 /* Number of segments in this map. */
6125 Elf32_Word nsegs;
6126 /* The actual memory map. */
6127 struct target_loadseg segs[/*nsegs*/];
6128 };
6129 # define LINUX_LOADMAP PT_GETDSBT
6130 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6131 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6132 # else
6133 struct target_loadmap
6134 {
6135 /* Protocol version number, must be zero. */
6136 Elf32_Half version;
6137 /* Number of segments in this map. */
6138 Elf32_Half nsegs;
6139 /* The actual memory map. */
6140 struct target_loadseg segs[/*nsegs*/];
6141 };
6142 # define LINUX_LOADMAP PTRACE_GETFDPIC
6143 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6144 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6145 # endif
6146
6147 static int
6148 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6149 unsigned char *myaddr, unsigned int len)
6150 {
6151 int pid = lwpid_of (current_thread);
6152 int addr = -1;
6153 struct target_loadmap *data = NULL;
6154 unsigned int actual_length, copy_length;
6155
6156 if (strcmp (annex, "exec") == 0)
6157 addr = (int) LINUX_LOADMAP_EXEC;
6158 else if (strcmp (annex, "interp") == 0)
6159 addr = (int) LINUX_LOADMAP_INTERP;
6160 else
6161 return -1;
6162
6163 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6164 return -1;
6165
6166 if (data == NULL)
6167 return -1;
6168
6169 actual_length = sizeof (struct target_loadmap)
6170 + sizeof (struct target_loadseg) * data->nsegs;
6171
6172 if (offset < 0 || offset > actual_length)
6173 return -1;
6174
6175 copy_length = actual_length - offset < len ? actual_length - offset : len;
6176 memcpy (myaddr, (char *) data + offset, copy_length);
6177 return copy_length;
6178 }
6179 #else
6180 # define linux_read_loadmap NULL
6181 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6182
6183 static void
6184 linux_process_qsupported (char **features, int count)
6185 {
6186 if (the_low_target.process_qsupported != NULL)
6187 the_low_target.process_qsupported (features, count);
6188 }
6189
6190 static int
6191 linux_supports_tracepoints (void)
6192 {
6193 if (*the_low_target.supports_tracepoints == NULL)
6194 return 0;
6195
6196 return (*the_low_target.supports_tracepoints) ();
6197 }
6198
6199 static CORE_ADDR
6200 linux_read_pc (struct regcache *regcache)
6201 {
6202 if (the_low_target.get_pc == NULL)
6203 return 0;
6204
6205 return (*the_low_target.get_pc) (regcache);
6206 }
6207
6208 static void
6209 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6210 {
6211 gdb_assert (the_low_target.set_pc != NULL);
6212
6213 (*the_low_target.set_pc) (regcache, pc);
6214 }
6215
6216 static int
6217 linux_thread_stopped (struct thread_info *thread)
6218 {
6219 return get_thread_lwp (thread)->stopped;
6220 }
6221
6222 /* This exposes stop-all-threads functionality to other modules. */
6223
6224 static void
6225 linux_pause_all (int freeze)
6226 {
6227 stop_all_lwps (freeze, NULL);
6228 }
6229
6230 /* This exposes unstop-all-threads functionality to other gdbserver
6231 modules. */
6232
6233 static void
6234 linux_unpause_all (int unfreeze)
6235 {
6236 unstop_all_lwps (unfreeze, NULL);
6237 }
6238
6239 static int
6240 linux_prepare_to_access_memory (void)
6241 {
6242 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6243 running LWP. */
6244 if (non_stop)
6245 linux_pause_all (1);
6246 return 0;
6247 }
6248
6249 static void
6250 linux_done_accessing_memory (void)
6251 {
6252 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6253 running LWP. */
6254 if (non_stop)
6255 linux_unpause_all (1);
6256 }
6257
6258 static int
6259 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6260 CORE_ADDR collector,
6261 CORE_ADDR lockaddr,
6262 ULONGEST orig_size,
6263 CORE_ADDR *jump_entry,
6264 CORE_ADDR *trampoline,
6265 ULONGEST *trampoline_size,
6266 unsigned char *jjump_pad_insn,
6267 ULONGEST *jjump_pad_insn_size,
6268 CORE_ADDR *adjusted_insn_addr,
6269 CORE_ADDR *adjusted_insn_addr_end,
6270 char *err)
6271 {
6272 return (*the_low_target.install_fast_tracepoint_jump_pad)
6273 (tpoint, tpaddr, collector, lockaddr, orig_size,
6274 jump_entry, trampoline, trampoline_size,
6275 jjump_pad_insn, jjump_pad_insn_size,
6276 adjusted_insn_addr, adjusted_insn_addr_end,
6277 err);
6278 }
6279
6280 static struct emit_ops *
6281 linux_emit_ops (void)
6282 {
6283 if (the_low_target.emit_ops != NULL)
6284 return (*the_low_target.emit_ops) ();
6285 else
6286 return NULL;
6287 }
6288
6289 static int
6290 linux_get_min_fast_tracepoint_insn_len (void)
6291 {
6292 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6293 }
6294
6295 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6296
6297 static int
6298 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6299 CORE_ADDR *phdr_memaddr, int *num_phdr)
6300 {
6301 char filename[PATH_MAX];
6302 int fd;
6303 const int auxv_size = is_elf64
6304 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6305 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6306
6307 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6308
6309 fd = open (filename, O_RDONLY);
6310 if (fd < 0)
6311 return 1;
6312
6313 *phdr_memaddr = 0;
6314 *num_phdr = 0;
6315 while (read (fd, buf, auxv_size) == auxv_size
6316 && (*phdr_memaddr == 0 || *num_phdr == 0))
6317 {
6318 if (is_elf64)
6319 {
6320 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6321
6322 switch (aux->a_type)
6323 {
6324 case AT_PHDR:
6325 *phdr_memaddr = aux->a_un.a_val;
6326 break;
6327 case AT_PHNUM:
6328 *num_phdr = aux->a_un.a_val;
6329 break;
6330 }
6331 }
6332 else
6333 {
6334 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6335
6336 switch (aux->a_type)
6337 {
6338 case AT_PHDR:
6339 *phdr_memaddr = aux->a_un.a_val;
6340 break;
6341 case AT_PHNUM:
6342 *num_phdr = aux->a_un.a_val;
6343 break;
6344 }
6345 }
6346 }
6347
6348 close (fd);
6349
6350 if (*phdr_memaddr == 0 || *num_phdr == 0)
6351 {
6352 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6353 "phdr_memaddr = %ld, phdr_num = %d",
6354 (long) *phdr_memaddr, *num_phdr);
6355 return 2;
6356 }
6357
6358 return 0;
6359 }
6360
6361 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6362
6363 static CORE_ADDR
6364 get_dynamic (const int pid, const int is_elf64)
6365 {
6366 CORE_ADDR phdr_memaddr, relocation;
6367 int num_phdr, i;
6368 unsigned char *phdr_buf;
6369 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6370
6371 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6372 return 0;
6373
6374 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6375 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6376
6377 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6378 return 0;
6379
6380 /* Compute relocation: it is expected to be 0 for "regular" executables,
6381 non-zero for PIE ones. */
6382 relocation = -1;
6383 for (i = 0; relocation == -1 && i < num_phdr; i++)
6384 if (is_elf64)
6385 {
6386 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6387
6388 if (p->p_type == PT_PHDR)
6389 relocation = phdr_memaddr - p->p_vaddr;
6390 }
6391 else
6392 {
6393 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6394
6395 if (p->p_type == PT_PHDR)
6396 relocation = phdr_memaddr - p->p_vaddr;
6397 }
6398
6399 if (relocation == -1)
6400 {
6401 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6402 any real world executables, including PIE executables, have always
6403 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6404 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6405 or present DT_DEBUG anyway (fpc binaries are statically linked).
6406
6407 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6408
6409 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6410
6411 return 0;
6412 }
6413
6414 for (i = 0; i < num_phdr; i++)
6415 {
6416 if (is_elf64)
6417 {
6418 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6419
6420 if (p->p_type == PT_DYNAMIC)
6421 return p->p_vaddr + relocation;
6422 }
6423 else
6424 {
6425 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6426
6427 if (p->p_type == PT_DYNAMIC)
6428 return p->p_vaddr + relocation;
6429 }
6430 }
6431
6432 return 0;
6433 }
6434
6435 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6436 can be 0 if the inferior does not yet have the library list initialized.
6437 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6438 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6439
6440 static CORE_ADDR
6441 get_r_debug (const int pid, const int is_elf64)
6442 {
6443 CORE_ADDR dynamic_memaddr;
6444 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6445 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6446 CORE_ADDR map = -1;
6447
6448 dynamic_memaddr = get_dynamic (pid, is_elf64);
6449 if (dynamic_memaddr == 0)
6450 return map;
6451
6452 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6453 {
6454 if (is_elf64)
6455 {
6456 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6457 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6458 union
6459 {
6460 Elf64_Xword map;
6461 unsigned char buf[sizeof (Elf64_Xword)];
6462 }
6463 rld_map;
6464 #endif
6465 #ifdef DT_MIPS_RLD_MAP
6466 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6467 {
6468 if (linux_read_memory (dyn->d_un.d_val,
6469 rld_map.buf, sizeof (rld_map.buf)) == 0)
6470 return rld_map.map;
6471 else
6472 break;
6473 }
6474 #endif /* DT_MIPS_RLD_MAP */
6475 #ifdef DT_MIPS_RLD_MAP_REL
6476 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6477 {
6478 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6479 rld_map.buf, sizeof (rld_map.buf)) == 0)
6480 return rld_map.map;
6481 else
6482 break;
6483 }
6484 #endif /* DT_MIPS_RLD_MAP_REL */
6485
6486 if (dyn->d_tag == DT_DEBUG && map == -1)
6487 map = dyn->d_un.d_val;
6488
6489 if (dyn->d_tag == DT_NULL)
6490 break;
6491 }
6492 else
6493 {
6494 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6495 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6496 union
6497 {
6498 Elf32_Word map;
6499 unsigned char buf[sizeof (Elf32_Word)];
6500 }
6501 rld_map;
6502 #endif
6503 #ifdef DT_MIPS_RLD_MAP
6504 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6505 {
6506 if (linux_read_memory (dyn->d_un.d_val,
6507 rld_map.buf, sizeof (rld_map.buf)) == 0)
6508 return rld_map.map;
6509 else
6510 break;
6511 }
6512 #endif /* DT_MIPS_RLD_MAP */
6513 #ifdef DT_MIPS_RLD_MAP_REL
6514 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6515 {
6516 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6517 rld_map.buf, sizeof (rld_map.buf)) == 0)
6518 return rld_map.map;
6519 else
6520 break;
6521 }
6522 #endif /* DT_MIPS_RLD_MAP_REL */
6523
6524 if (dyn->d_tag == DT_DEBUG && map == -1)
6525 map = dyn->d_un.d_val;
6526
6527 if (dyn->d_tag == DT_NULL)
6528 break;
6529 }
6530
6531 dynamic_memaddr += dyn_size;
6532 }
6533
6534 return map;
6535 }
6536
6537 /* Read one pointer from MEMADDR in the inferior. */
6538
6539 static int
6540 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6541 {
6542 int ret;
6543
6544 /* Go through a union so this works on either big or little endian
6545 hosts, when the inferior's pointer size is smaller than the size
6546 of CORE_ADDR. It is assumed the inferior's endianness is the
6547 same of the superior's. */
6548 union
6549 {
6550 CORE_ADDR core_addr;
6551 unsigned int ui;
6552 unsigned char uc;
6553 } addr;
6554
6555 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6556 if (ret == 0)
6557 {
6558 if (ptr_size == sizeof (CORE_ADDR))
6559 *ptr = addr.core_addr;
6560 else if (ptr_size == sizeof (unsigned int))
6561 *ptr = addr.ui;
6562 else
6563 gdb_assert_not_reached ("unhandled pointer size");
6564 }
6565 return ret;
6566 }
6567
6568 struct link_map_offsets
6569 {
6570 /* Offset and size of r_debug.r_version. */
6571 int r_version_offset;
6572
6573 /* Offset and size of r_debug.r_map. */
6574 int r_map_offset;
6575
6576 /* Offset to l_addr field in struct link_map. */
6577 int l_addr_offset;
6578
6579 /* Offset to l_name field in struct link_map. */
6580 int l_name_offset;
6581
6582 /* Offset to l_ld field in struct link_map. */
6583 int l_ld_offset;
6584
6585 /* Offset to l_next field in struct link_map. */
6586 int l_next_offset;
6587
6588 /* Offset to l_prev field in struct link_map. */
6589 int l_prev_offset;
6590 };
6591
6592 /* Construct qXfer:libraries-svr4:read reply. */
6593
6594 static int
6595 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6596 unsigned const char *writebuf,
6597 CORE_ADDR offset, int len)
6598 {
6599 char *document;
6600 unsigned document_len;
6601 struct process_info_private *const priv = current_process ()->priv;
6602 char filename[PATH_MAX];
6603 int pid, is_elf64;
6604
6605 static const struct link_map_offsets lmo_32bit_offsets =
6606 {
6607 0, /* r_version offset. */
6608 4, /* r_debug.r_map offset. */
6609 0, /* l_addr offset in link_map. */
6610 4, /* l_name offset in link_map. */
6611 8, /* l_ld offset in link_map. */
6612 12, /* l_next offset in link_map. */
6613 16 /* l_prev offset in link_map. */
6614 };
6615
6616 static const struct link_map_offsets lmo_64bit_offsets =
6617 {
6618 0, /* r_version offset. */
6619 8, /* r_debug.r_map offset. */
6620 0, /* l_addr offset in link_map. */
6621 8, /* l_name offset in link_map. */
6622 16, /* l_ld offset in link_map. */
6623 24, /* l_next offset in link_map. */
6624 32 /* l_prev offset in link_map. */
6625 };
6626 const struct link_map_offsets *lmo;
6627 unsigned int machine;
6628 int ptr_size;
6629 CORE_ADDR lm_addr = 0, lm_prev = 0;
6630 int allocated = 1024;
6631 char *p;
6632 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6633 int header_done = 0;
6634
6635 if (writebuf != NULL)
6636 return -2;
6637 if (readbuf == NULL)
6638 return -1;
6639
6640 pid = lwpid_of (current_thread);
6641 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6642 is_elf64 = elf_64_file_p (filename, &machine);
6643 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6644 ptr_size = is_elf64 ? 8 : 4;
6645
6646 while (annex[0] != '\0')
6647 {
6648 const char *sep;
6649 CORE_ADDR *addrp;
6650 int len;
6651
6652 sep = strchr (annex, '=');
6653 if (sep == NULL)
6654 break;
6655
6656 len = sep - annex;
6657 if (len == 5 && startswith (annex, "start"))
6658 addrp = &lm_addr;
6659 else if (len == 4 && startswith (annex, "prev"))
6660 addrp = &lm_prev;
6661 else
6662 {
6663 annex = strchr (sep, ';');
6664 if (annex == NULL)
6665 break;
6666 annex++;
6667 continue;
6668 }
6669
6670 annex = decode_address_to_semicolon (addrp, sep + 1);
6671 }
6672
6673 if (lm_addr == 0)
6674 {
6675 int r_version = 0;
6676
6677 if (priv->r_debug == 0)
6678 priv->r_debug = get_r_debug (pid, is_elf64);
6679
6680 /* We failed to find DT_DEBUG. Such situation will not change
6681 for this inferior - do not retry it. Report it to GDB as
6682 E01, see for the reasons at the GDB solib-svr4.c side. */
6683 if (priv->r_debug == (CORE_ADDR) -1)
6684 return -1;
6685
6686 if (priv->r_debug != 0)
6687 {
6688 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6689 (unsigned char *) &r_version,
6690 sizeof (r_version)) != 0
6691 || r_version != 1)
6692 {
6693 warning ("unexpected r_debug version %d", r_version);
6694 }
6695 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6696 &lm_addr, ptr_size) != 0)
6697 {
6698 warning ("unable to read r_map from 0x%lx",
6699 (long) priv->r_debug + lmo->r_map_offset);
6700 }
6701 }
6702 }
6703
6704 document = (char *) xmalloc (allocated);
6705 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6706 p = document + strlen (document);
6707
6708 while (lm_addr
6709 && read_one_ptr (lm_addr + lmo->l_name_offset,
6710 &l_name, ptr_size) == 0
6711 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6712 &l_addr, ptr_size) == 0
6713 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6714 &l_ld, ptr_size) == 0
6715 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6716 &l_prev, ptr_size) == 0
6717 && read_one_ptr (lm_addr + lmo->l_next_offset,
6718 &l_next, ptr_size) == 0)
6719 {
6720 unsigned char libname[PATH_MAX];
6721
6722 if (lm_prev != l_prev)
6723 {
6724 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6725 (long) lm_prev, (long) l_prev);
6726 break;
6727 }
6728
6729 /* Ignore the first entry even if it has valid name as the first entry
6730 corresponds to the main executable. The first entry should not be
6731 skipped if the dynamic loader was loaded late by a static executable
6732 (see solib-svr4.c parameter ignore_first). But in such case the main
6733 executable does not have PT_DYNAMIC present and this function already
6734 exited above due to failed get_r_debug. */
6735 if (lm_prev == 0)
6736 {
6737 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6738 p = p + strlen (p);
6739 }
6740 else
6741 {
6742 /* Not checking for error because reading may stop before
6743 we've got PATH_MAX worth of characters. */
6744 libname[0] = '\0';
6745 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6746 libname[sizeof (libname) - 1] = '\0';
6747 if (libname[0] != '\0')
6748 {
6749 /* 6x the size for xml_escape_text below. */
6750 size_t len = 6 * strlen ((char *) libname);
6751 char *name;
6752
6753 if (!header_done)
6754 {
6755 /* Terminate `<library-list-svr4'. */
6756 *p++ = '>';
6757 header_done = 1;
6758 }
6759
6760 while (allocated < p - document + len + 200)
6761 {
6762 /* Expand to guarantee sufficient storage. */
6763 uintptr_t document_len = p - document;
6764
6765 document = (char *) xrealloc (document, 2 * allocated);
6766 allocated *= 2;
6767 p = document + document_len;
6768 }
6769
6770 name = xml_escape_text ((char *) libname);
6771 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6772 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6773 name, (unsigned long) lm_addr,
6774 (unsigned long) l_addr, (unsigned long) l_ld);
6775 free (name);
6776 }
6777 }
6778
6779 lm_prev = lm_addr;
6780 lm_addr = l_next;
6781 }
6782
6783 if (!header_done)
6784 {
6785 /* Empty list; terminate `<library-list-svr4'. */
6786 strcpy (p, "/>");
6787 }
6788 else
6789 strcpy (p, "</library-list-svr4>");
6790
6791 document_len = strlen (document);
6792 if (offset < document_len)
6793 document_len -= offset;
6794 else
6795 document_len = 0;
6796 if (len > document_len)
6797 len = document_len;
6798
6799 memcpy (readbuf, document + offset, len);
6800 xfree (document);
6801
6802 return len;
6803 }
6804
6805 #ifdef HAVE_LINUX_BTRACE
6806
6807 /* See to_disable_btrace target method. */
6808
6809 static int
6810 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6811 {
6812 enum btrace_error err;
6813
6814 err = linux_disable_btrace (tinfo);
6815 return (err == BTRACE_ERR_NONE ? 0 : -1);
6816 }
6817
6818 /* Encode an Intel(R) Processor Trace configuration. */
6819
6820 static void
6821 linux_low_encode_pt_config (struct buffer *buffer,
6822 const struct btrace_data_pt_config *config)
6823 {
6824 buffer_grow_str (buffer, "<pt-config>\n");
6825
6826 switch (config->cpu.vendor)
6827 {
6828 case CV_INTEL:
6829 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6830 "model=\"%u\" stepping=\"%u\"/>\n",
6831 config->cpu.family, config->cpu.model,
6832 config->cpu.stepping);
6833 break;
6834
6835 default:
6836 break;
6837 }
6838
6839 buffer_grow_str (buffer, "</pt-config>\n");
6840 }
6841
6842 /* Encode a raw buffer. */
6843
6844 static void
6845 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6846 unsigned int size)
6847 {
6848 if (size == 0)
6849 return;
6850
6851 /* We use hex encoding - see common/rsp-low.h. */
6852 buffer_grow_str (buffer, "<raw>\n");
6853
6854 while (size-- > 0)
6855 {
6856 char elem[2];
6857
6858 elem[0] = tohex ((*data >> 4) & 0xf);
6859 elem[1] = tohex (*data++ & 0xf);
6860
6861 buffer_grow (buffer, elem, 2);
6862 }
6863
6864 buffer_grow_str (buffer, "</raw>\n");
6865 }
6866
6867 /* See to_read_btrace target method. */
6868
6869 static int
6870 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6871 enum btrace_read_type type)
6872 {
6873 struct btrace_data btrace;
6874 struct btrace_block *block;
6875 enum btrace_error err;
6876 int i;
6877
6878 btrace_data_init (&btrace);
6879
6880 err = linux_read_btrace (&btrace, tinfo, type);
6881 if (err != BTRACE_ERR_NONE)
6882 {
6883 if (err == BTRACE_ERR_OVERFLOW)
6884 buffer_grow_str0 (buffer, "E.Overflow.");
6885 else
6886 buffer_grow_str0 (buffer, "E.Generic Error.");
6887
6888 goto err;
6889 }
6890
6891 switch (btrace.format)
6892 {
6893 case BTRACE_FORMAT_NONE:
6894 buffer_grow_str0 (buffer, "E.No Trace.");
6895 goto err;
6896
6897 case BTRACE_FORMAT_BTS:
6898 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6899 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6900
6901 for (i = 0;
6902 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6903 i++)
6904 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6905 paddress (block->begin), paddress (block->end));
6906
6907 buffer_grow_str0 (buffer, "</btrace>\n");
6908 break;
6909
6910 case BTRACE_FORMAT_PT:
6911 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6912 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6913 buffer_grow_str (buffer, "<pt>\n");
6914
6915 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6916
6917 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6918 btrace.variant.pt.size);
6919
6920 buffer_grow_str (buffer, "</pt>\n");
6921 buffer_grow_str0 (buffer, "</btrace>\n");
6922 break;
6923
6924 default:
6925 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6926 goto err;
6927 }
6928
6929 btrace_data_fini (&btrace);
6930 return 0;
6931
6932 err:
6933 btrace_data_fini (&btrace);
6934 return -1;
6935 }
6936
6937 /* See to_btrace_conf target method. */
6938
6939 static int
6940 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6941 struct buffer *buffer)
6942 {
6943 const struct btrace_config *conf;
6944
6945 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6946 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6947
6948 conf = linux_btrace_conf (tinfo);
6949 if (conf != NULL)
6950 {
6951 switch (conf->format)
6952 {
6953 case BTRACE_FORMAT_NONE:
6954 break;
6955
6956 case BTRACE_FORMAT_BTS:
6957 buffer_xml_printf (buffer, "<bts");
6958 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6959 buffer_xml_printf (buffer, " />\n");
6960 break;
6961
6962 case BTRACE_FORMAT_PT:
6963 buffer_xml_printf (buffer, "<pt");
6964 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6965 buffer_xml_printf (buffer, "/>\n");
6966 break;
6967 }
6968 }
6969
6970 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6971 return 0;
6972 }
6973 #endif /* HAVE_LINUX_BTRACE */
6974
6975 /* See nat/linux-nat.h. */
6976
6977 ptid_t
6978 current_lwp_ptid (void)
6979 {
6980 return ptid_of (current_thread);
6981 }
6982
6983 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
6984
6985 static int
6986 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
6987 {
6988 if (the_low_target.breakpoint_kind_from_pc != NULL)
6989 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
6990 else
6991 return default_breakpoint_kind_from_pc (pcptr);
6992 }
6993
6994 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
6995
6996 static const gdb_byte *
6997 linux_sw_breakpoint_from_kind (int kind, int *size)
6998 {
6999 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7000
7001 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7002 }
7003
7004 static struct target_ops linux_target_ops = {
7005 linux_create_inferior,
7006 linux_arch_setup,
7007 linux_attach,
7008 linux_kill,
7009 linux_detach,
7010 linux_mourn,
7011 linux_join,
7012 linux_thread_alive,
7013 linux_resume,
7014 linux_wait,
7015 linux_fetch_registers,
7016 linux_store_registers,
7017 linux_prepare_to_access_memory,
7018 linux_done_accessing_memory,
7019 linux_read_memory,
7020 linux_write_memory,
7021 linux_look_up_symbols,
7022 linux_request_interrupt,
7023 linux_read_auxv,
7024 linux_supports_z_point_type,
7025 linux_insert_point,
7026 linux_remove_point,
7027 linux_stopped_by_sw_breakpoint,
7028 linux_supports_stopped_by_sw_breakpoint,
7029 linux_stopped_by_hw_breakpoint,
7030 linux_supports_stopped_by_hw_breakpoint,
7031 linux_supports_hardware_single_step,
7032 linux_stopped_by_watchpoint,
7033 linux_stopped_data_address,
7034 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7035 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7036 && defined(PT_TEXT_END_ADDR)
7037 linux_read_offsets,
7038 #else
7039 NULL,
7040 #endif
7041 #ifdef USE_THREAD_DB
7042 thread_db_get_tls_address,
7043 #else
7044 NULL,
7045 #endif
7046 linux_qxfer_spu,
7047 hostio_last_error_from_errno,
7048 linux_qxfer_osdata,
7049 linux_xfer_siginfo,
7050 linux_supports_non_stop,
7051 linux_async,
7052 linux_start_non_stop,
7053 linux_supports_multi_process,
7054 linux_supports_fork_events,
7055 linux_supports_vfork_events,
7056 linux_supports_exec_events,
7057 linux_handle_new_gdb_connection,
7058 #ifdef USE_THREAD_DB
7059 thread_db_handle_monitor_command,
7060 #else
7061 NULL,
7062 #endif
7063 linux_common_core_of_thread,
7064 linux_read_loadmap,
7065 linux_process_qsupported,
7066 linux_supports_tracepoints,
7067 linux_read_pc,
7068 linux_write_pc,
7069 linux_thread_stopped,
7070 NULL,
7071 linux_pause_all,
7072 linux_unpause_all,
7073 linux_stabilize_threads,
7074 linux_install_fast_tracepoint_jump_pad,
7075 linux_emit_ops,
7076 linux_supports_disable_randomization,
7077 linux_get_min_fast_tracepoint_insn_len,
7078 linux_qxfer_libraries_svr4,
7079 linux_supports_agent,
7080 #ifdef HAVE_LINUX_BTRACE
7081 linux_supports_btrace,
7082 linux_enable_btrace,
7083 linux_low_disable_btrace,
7084 linux_low_read_btrace,
7085 linux_low_btrace_conf,
7086 #else
7087 NULL,
7088 NULL,
7089 NULL,
7090 NULL,
7091 NULL,
7092 #endif
7093 linux_supports_range_stepping,
7094 linux_proc_pid_to_exec_file,
7095 linux_mntns_open_cloexec,
7096 linux_mntns_unlink,
7097 linux_mntns_readlink,
7098 linux_breakpoint_kind_from_pc,
7099 linux_sw_breakpoint_from_kind,
7100 linux_proc_tid_get_name,
7101 };
7102
7103 static void
7104 linux_init_signals ()
7105 {
7106 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
7107 to find what the cancel signal actually is. */
7108 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
7109 signal (__SIGRTMIN+1, SIG_IGN);
7110 #endif
7111 }
7112
7113 #ifdef HAVE_LINUX_REGSETS
7114 void
7115 initialize_regsets_info (struct regsets_info *info)
7116 {
7117 for (info->num_regsets = 0;
7118 info->regsets[info->num_regsets].size >= 0;
7119 info->num_regsets++)
7120 ;
7121 }
7122 #endif
7123
7124 void
7125 initialize_low (void)
7126 {
7127 struct sigaction sigchld_action;
7128
7129 memset (&sigchld_action, 0, sizeof (sigchld_action));
7130 set_target_ops (&linux_target_ops);
7131
7132 linux_init_signals ();
7133 linux_ptrace_init_warnings ();
7134
7135 sigchld_action.sa_handler = sigchld_handler;
7136 sigemptyset (&sigchld_action.sa_mask);
7137 sigchld_action.sa_flags = SA_RESTART;
7138 sigaction (SIGCHLD, &sigchld_action, NULL);
7139
7140 initialize_low_arch ();
7141
7142 linux_check_ptrace_features ();
7143 }