]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
d48fdbf73a0f570e0266a97bf17508864385f02b
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #ifndef ELFMAG0
50 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54 #include <elf.h>
55 #endif
56 #include "nat/linux-namespaces.h"
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 /* Some targets did not define these ptrace constants from the start,
74 so gdbserver defines them locally here. In the future, these may
75 be removed after they are added to asm/ptrace.h. */
76 #if !(defined(PT_TEXT_ADDR) \
77 || defined(PT_DATA_ADDR) \
78 || defined(PT_TEXT_END_ADDR))
79 #if defined(__mcoldfire__)
80 /* These are still undefined in 3.10 kernels. */
81 #define PT_TEXT_ADDR 49*4
82 #define PT_DATA_ADDR 50*4
83 #define PT_TEXT_END_ADDR 51*4
84 /* BFIN already defines these since at least 2.6.32 kernels. */
85 #elif defined(BFIN)
86 #define PT_TEXT_ADDR 220
87 #define PT_TEXT_END_ADDR 224
88 #define PT_DATA_ADDR 228
89 /* These are still undefined in 3.10 kernels. */
90 #elif defined(__TMS320C6X__)
91 #define PT_TEXT_ADDR (0x10000*4)
92 #define PT_DATA_ADDR (0x10004*4)
93 #define PT_TEXT_END_ADDR (0x10008*4)
94 #endif
95 #endif
96
97 #ifdef HAVE_LINUX_BTRACE
98 # include "nat/linux-btrace.h"
99 # include "btrace-common.h"
100 #endif
101
102 #ifndef HAVE_ELF32_AUXV_T
103 /* Copied from glibc's elf.h. */
104 typedef struct
105 {
106 uint32_t a_type; /* Entry type */
107 union
108 {
109 uint32_t a_val; /* Integer value */
110 /* We use to have pointer elements added here. We cannot do that,
111 though, since it does not work when using 32-bit definitions
112 on 64-bit platforms and vice versa. */
113 } a_un;
114 } Elf32_auxv_t;
115 #endif
116
117 #ifndef HAVE_ELF64_AUXV_T
118 /* Copied from glibc's elf.h. */
119 typedef struct
120 {
121 uint64_t a_type; /* Entry type */
122 union
123 {
124 uint64_t a_val; /* Integer value */
125 /* We use to have pointer elements added here. We cannot do that,
126 though, since it does not work when using 32-bit definitions
127 on 64-bit platforms and vice versa. */
128 } a_un;
129 } Elf64_auxv_t;
130 #endif
131
132 /* Does the current host support PTRACE_GETREGSET? */
133 int have_ptrace_getregset = -1;
134
135 /* LWP accessors. */
136
137 /* See nat/linux-nat.h. */
138
139 ptid_t
140 ptid_of_lwp (struct lwp_info *lwp)
141 {
142 return ptid_of (get_lwp_thread (lwp));
143 }
144
145 /* See nat/linux-nat.h. */
146
147 void
148 lwp_set_arch_private_info (struct lwp_info *lwp,
149 struct arch_lwp_info *info)
150 {
151 lwp->arch_private = info;
152 }
153
154 /* See nat/linux-nat.h. */
155
156 struct arch_lwp_info *
157 lwp_arch_private_info (struct lwp_info *lwp)
158 {
159 return lwp->arch_private;
160 }
161
162 /* See nat/linux-nat.h. */
163
164 int
165 lwp_is_stopped (struct lwp_info *lwp)
166 {
167 return lwp->stopped;
168 }
169
170 /* See nat/linux-nat.h. */
171
172 enum target_stop_reason
173 lwp_stop_reason (struct lwp_info *lwp)
174 {
175 return lwp->stop_reason;
176 }
177
178 /* A list of all unknown processes which receive stop signals. Some
179 other process will presumably claim each of these as forked
180 children momentarily. */
181
182 struct simple_pid_list
183 {
184 /* The process ID. */
185 int pid;
186
187 /* The status as reported by waitpid. */
188 int status;
189
190 /* Next in chain. */
191 struct simple_pid_list *next;
192 };
193 struct simple_pid_list *stopped_pids;
194
195 /* Trivial list manipulation functions to keep track of a list of new
196 stopped processes. */
197
198 static void
199 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
200 {
201 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
202
203 new_pid->pid = pid;
204 new_pid->status = status;
205 new_pid->next = *listp;
206 *listp = new_pid;
207 }
208
209 static int
210 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
211 {
212 struct simple_pid_list **p;
213
214 for (p = listp; *p != NULL; p = &(*p)->next)
215 if ((*p)->pid == pid)
216 {
217 struct simple_pid_list *next = (*p)->next;
218
219 *statusp = (*p)->status;
220 xfree (*p);
221 *p = next;
222 return 1;
223 }
224 return 0;
225 }
226
227 enum stopping_threads_kind
228 {
229 /* Not stopping threads presently. */
230 NOT_STOPPING_THREADS,
231
232 /* Stopping threads. */
233 STOPPING_THREADS,
234
235 /* Stopping and suspending threads. */
236 STOPPING_AND_SUSPENDING_THREADS
237 };
238
239 /* This is set while stop_all_lwps is in effect. */
240 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
241
242 /* FIXME make into a target method? */
243 int using_threads = 1;
244
245 /* True if we're presently stabilizing threads (moving them out of
246 jump pads). */
247 static int stabilizing_threads;
248
249 static void linux_resume_one_lwp (struct lwp_info *lwp,
250 int step, int signal, siginfo_t *info);
251 static void linux_resume (struct thread_resume *resume_info, size_t n);
252 static void stop_all_lwps (int suspend, struct lwp_info *except);
253 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
254 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
255 int *wstat, int options);
256 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
257 static struct lwp_info *add_lwp (ptid_t ptid);
258 static void linux_mourn (struct process_info *process);
259 static int linux_stopped_by_watchpoint (void);
260 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
261 static int lwp_is_marked_dead (struct lwp_info *lwp);
262 static void proceed_all_lwps (void);
263 static int finish_step_over (struct lwp_info *lwp);
264 static int kill_lwp (unsigned long lwpid, int signo);
265 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
266 static void complete_ongoing_step_over (void);
267 static int linux_low_ptrace_options (int attached);
268
269 /* When the event-loop is doing a step-over, this points at the thread
270 being stepped. */
271 ptid_t step_over_bkpt;
272
273 /* True if the low target can hardware single-step. */
274
275 static int
276 can_hardware_single_step (void)
277 {
278 if (the_low_target.supports_hardware_single_step != NULL)
279 return the_low_target.supports_hardware_single_step ();
280 else
281 return 0;
282 }
283
284 /* True if the low target can software single-step. Such targets
285 implement the GET_NEXT_PCS callback. */
286
287 static int
288 can_software_single_step (void)
289 {
290 return (the_low_target.get_next_pcs != NULL);
291 }
292
293 /* True if the low target supports memory breakpoints. If so, we'll
294 have a GET_PC implementation. */
295
296 static int
297 supports_breakpoints (void)
298 {
299 return (the_low_target.get_pc != NULL);
300 }
301
302 /* Returns true if this target can support fast tracepoints. This
303 does not mean that the in-process agent has been loaded in the
304 inferior. */
305
306 static int
307 supports_fast_tracepoints (void)
308 {
309 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
310 }
311
312 /* True if LWP is stopped in its stepping range. */
313
314 static int
315 lwp_in_step_range (struct lwp_info *lwp)
316 {
317 CORE_ADDR pc = lwp->stop_pc;
318
319 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
320 }
321
322 struct pending_signals
323 {
324 int signal;
325 siginfo_t info;
326 struct pending_signals *prev;
327 };
328
329 /* The read/write ends of the pipe registered as waitable file in the
330 event loop. */
331 static int linux_event_pipe[2] = { -1, -1 };
332
333 /* True if we're currently in async mode. */
334 #define target_is_async_p() (linux_event_pipe[0] != -1)
335
336 static void send_sigstop (struct lwp_info *lwp);
337 static void wait_for_sigstop (void);
338
339 /* Return non-zero if HEADER is a 64-bit ELF file. */
340
341 static int
342 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
343 {
344 if (header->e_ident[EI_MAG0] == ELFMAG0
345 && header->e_ident[EI_MAG1] == ELFMAG1
346 && header->e_ident[EI_MAG2] == ELFMAG2
347 && header->e_ident[EI_MAG3] == ELFMAG3)
348 {
349 *machine = header->e_machine;
350 return header->e_ident[EI_CLASS] == ELFCLASS64;
351
352 }
353 *machine = EM_NONE;
354 return -1;
355 }
356
357 /* Return non-zero if FILE is a 64-bit ELF file,
358 zero if the file is not a 64-bit ELF file,
359 and -1 if the file is not accessible or doesn't exist. */
360
361 static int
362 elf_64_file_p (const char *file, unsigned int *machine)
363 {
364 Elf64_Ehdr header;
365 int fd;
366
367 fd = open (file, O_RDONLY);
368 if (fd < 0)
369 return -1;
370
371 if (read (fd, &header, sizeof (header)) != sizeof (header))
372 {
373 close (fd);
374 return 0;
375 }
376 close (fd);
377
378 return elf_64_header_p (&header, machine);
379 }
380
381 /* Accepts an integer PID; Returns true if the executable PID is
382 running is a 64-bit ELF file.. */
383
384 int
385 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
386 {
387 char file[PATH_MAX];
388
389 sprintf (file, "/proc/%d/exe", pid);
390 return elf_64_file_p (file, machine);
391 }
392
393 static void
394 delete_lwp (struct lwp_info *lwp)
395 {
396 struct thread_info *thr = get_lwp_thread (lwp);
397
398 if (debug_threads)
399 debug_printf ("deleting %ld\n", lwpid_of (thr));
400
401 remove_thread (thr);
402 free (lwp->arch_private);
403 free (lwp);
404 }
405
406 /* Add a process to the common process list, and set its private
407 data. */
408
409 static struct process_info *
410 linux_add_process (int pid, int attached)
411 {
412 struct process_info *proc;
413
414 proc = add_process (pid, attached);
415 proc->priv = XCNEW (struct process_info_private);
416
417 if (the_low_target.new_process != NULL)
418 proc->priv->arch_private = the_low_target.new_process ();
419
420 return proc;
421 }
422
423 static CORE_ADDR get_pc (struct lwp_info *lwp);
424
425 /* Call the target arch_setup function on the current thread. */
426
427 static void
428 linux_arch_setup (void)
429 {
430 the_low_target.arch_setup ();
431 }
432
433 /* Call the target arch_setup function on THREAD. */
434
435 static void
436 linux_arch_setup_thread (struct thread_info *thread)
437 {
438 struct thread_info *saved_thread;
439
440 saved_thread = current_thread;
441 current_thread = thread;
442
443 linux_arch_setup ();
444
445 current_thread = saved_thread;
446 }
447
448 /* Handle a GNU/Linux extended wait response. If we see a clone,
449 fork, or vfork event, we need to add the new LWP to our list
450 (and return 0 so as not to report the trap to higher layers).
451 If we see an exec event, we will modify ORIG_EVENT_LWP to point
452 to a new LWP representing the new program. */
453
454 static int
455 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
456 {
457 struct lwp_info *event_lwp = *orig_event_lwp;
458 int event = linux_ptrace_get_extended_event (wstat);
459 struct thread_info *event_thr = get_lwp_thread (event_lwp);
460 struct lwp_info *new_lwp;
461
462 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
463
464 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
465 || (event == PTRACE_EVENT_CLONE))
466 {
467 ptid_t ptid;
468 unsigned long new_pid;
469 int ret, status;
470
471 /* Get the pid of the new lwp. */
472 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
473 &new_pid);
474
475 /* If we haven't already seen the new PID stop, wait for it now. */
476 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
477 {
478 /* The new child has a pending SIGSTOP. We can't affect it until it
479 hits the SIGSTOP, but we're already attached. */
480
481 ret = my_waitpid (new_pid, &status, __WALL);
482
483 if (ret == -1)
484 perror_with_name ("waiting for new child");
485 else if (ret != new_pid)
486 warning ("wait returned unexpected PID %d", ret);
487 else if (!WIFSTOPPED (status))
488 warning ("wait returned unexpected status 0x%x", status);
489 }
490
491 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
492 {
493 struct process_info *parent_proc;
494 struct process_info *child_proc;
495 struct lwp_info *child_lwp;
496 struct thread_info *child_thr;
497 struct target_desc *tdesc;
498
499 ptid = ptid_build (new_pid, new_pid, 0);
500
501 if (debug_threads)
502 {
503 debug_printf ("HEW: Got fork event from LWP %ld, "
504 "new child is %d\n",
505 ptid_get_lwp (ptid_of (event_thr)),
506 ptid_get_pid (ptid));
507 }
508
509 /* Add the new process to the tables and clone the breakpoint
510 lists of the parent. We need to do this even if the new process
511 will be detached, since we will need the process object and the
512 breakpoints to remove any breakpoints from memory when we
513 detach, and the client side will access registers. */
514 child_proc = linux_add_process (new_pid, 0);
515 gdb_assert (child_proc != NULL);
516 child_lwp = add_lwp (ptid);
517 gdb_assert (child_lwp != NULL);
518 child_lwp->stopped = 1;
519 child_lwp->must_set_ptrace_flags = 1;
520 child_lwp->status_pending_p = 0;
521 child_thr = get_lwp_thread (child_lwp);
522 child_thr->last_resume_kind = resume_stop;
523 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
524
525 /* If we're suspending all threads, leave this one suspended
526 too. */
527 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
528 {
529 if (debug_threads)
530 debug_printf ("HEW: leaving child suspended\n");
531 child_lwp->suspended = 1;
532 }
533
534 parent_proc = get_thread_process (event_thr);
535 child_proc->attached = parent_proc->attached;
536 clone_all_breakpoints (&child_proc->breakpoints,
537 &child_proc->raw_breakpoints,
538 parent_proc->breakpoints);
539
540 tdesc = XNEW (struct target_desc);
541 copy_target_description (tdesc, parent_proc->tdesc);
542 child_proc->tdesc = tdesc;
543
544 /* Clone arch-specific process data. */
545 if (the_low_target.new_fork != NULL)
546 the_low_target.new_fork (parent_proc, child_proc);
547
548 /* Save fork info in the parent thread. */
549 if (event == PTRACE_EVENT_FORK)
550 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
551 else if (event == PTRACE_EVENT_VFORK)
552 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
553
554 event_lwp->waitstatus.value.related_pid = ptid;
555
556 /* The status_pending field contains bits denoting the
557 extended event, so when the pending event is handled,
558 the handler will look at lwp->waitstatus. */
559 event_lwp->status_pending_p = 1;
560 event_lwp->status_pending = wstat;
561
562 /* Report the event. */
563 return 0;
564 }
565
566 if (debug_threads)
567 debug_printf ("HEW: Got clone event "
568 "from LWP %ld, new child is LWP %ld\n",
569 lwpid_of (event_thr), new_pid);
570
571 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
572 new_lwp = add_lwp (ptid);
573
574 /* Either we're going to immediately resume the new thread
575 or leave it stopped. linux_resume_one_lwp is a nop if it
576 thinks the thread is currently running, so set this first
577 before calling linux_resume_one_lwp. */
578 new_lwp->stopped = 1;
579
580 /* If we're suspending all threads, leave this one suspended
581 too. */
582 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
583 new_lwp->suspended = 1;
584
585 /* Normally we will get the pending SIGSTOP. But in some cases
586 we might get another signal delivered to the group first.
587 If we do get another signal, be sure not to lose it. */
588 if (WSTOPSIG (status) != SIGSTOP)
589 {
590 new_lwp->stop_expected = 1;
591 new_lwp->status_pending_p = 1;
592 new_lwp->status_pending = status;
593 }
594 else if (report_thread_events)
595 {
596 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
597 new_lwp->status_pending_p = 1;
598 new_lwp->status_pending = status;
599 }
600
601 /* Don't report the event. */
602 return 1;
603 }
604 else if (event == PTRACE_EVENT_VFORK_DONE)
605 {
606 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
607
608 /* Report the event. */
609 return 0;
610 }
611 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
612 {
613 struct process_info *proc;
614 ptid_t event_ptid;
615 pid_t event_pid;
616
617 if (debug_threads)
618 {
619 debug_printf ("HEW: Got exec event from LWP %ld\n",
620 lwpid_of (event_thr));
621 }
622
623 /* Get the event ptid. */
624 event_ptid = ptid_of (event_thr);
625 event_pid = ptid_get_pid (event_ptid);
626
627 /* Delete the execing process and all its threads. */
628 proc = get_thread_process (event_thr);
629 linux_mourn (proc);
630 current_thread = NULL;
631
632 /* Create a new process/lwp/thread. */
633 proc = linux_add_process (event_pid, 0);
634 event_lwp = add_lwp (event_ptid);
635 event_thr = get_lwp_thread (event_lwp);
636 gdb_assert (current_thread == event_thr);
637 linux_arch_setup_thread (event_thr);
638
639 /* Set the event status. */
640 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
641 event_lwp->waitstatus.value.execd_pathname
642 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
643
644 /* Mark the exec status as pending. */
645 event_lwp->stopped = 1;
646 event_lwp->status_pending_p = 1;
647 event_lwp->status_pending = wstat;
648 event_thr->last_resume_kind = resume_continue;
649 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
650
651 /* Report the event. */
652 *orig_event_lwp = event_lwp;
653 return 0;
654 }
655
656 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
657 }
658
659 /* Return the PC as read from the regcache of LWP, without any
660 adjustment. */
661
662 static CORE_ADDR
663 get_pc (struct lwp_info *lwp)
664 {
665 struct thread_info *saved_thread;
666 struct regcache *regcache;
667 CORE_ADDR pc;
668
669 if (the_low_target.get_pc == NULL)
670 return 0;
671
672 saved_thread = current_thread;
673 current_thread = get_lwp_thread (lwp);
674
675 regcache = get_thread_regcache (current_thread, 1);
676 pc = (*the_low_target.get_pc) (regcache);
677
678 if (debug_threads)
679 debug_printf ("pc is 0x%lx\n", (long) pc);
680
681 current_thread = saved_thread;
682 return pc;
683 }
684
685 /* This function should only be called if LWP got a SIGTRAP.
686 The SIGTRAP could mean several things.
687
688 On i386, where decr_pc_after_break is non-zero:
689
690 If we were single-stepping this process using PTRACE_SINGLESTEP, we
691 will get only the one SIGTRAP. The value of $eip will be the next
692 instruction. If the instruction we stepped over was a breakpoint,
693 we need to decrement the PC.
694
695 If we continue the process using PTRACE_CONT, we will get a
696 SIGTRAP when we hit a breakpoint. The value of $eip will be
697 the instruction after the breakpoint (i.e. needs to be
698 decremented). If we report the SIGTRAP to GDB, we must also
699 report the undecremented PC. If the breakpoint is removed, we
700 must resume at the decremented PC.
701
702 On a non-decr_pc_after_break machine with hardware or kernel
703 single-step:
704
705 If we either single-step a breakpoint instruction, or continue and
706 hit a breakpoint instruction, our PC will point at the breakpoint
707 instruction. */
708
709 static int
710 check_stopped_by_breakpoint (struct lwp_info *lwp)
711 {
712 CORE_ADDR pc;
713 CORE_ADDR sw_breakpoint_pc;
714 struct thread_info *saved_thread;
715 #if USE_SIGTRAP_SIGINFO
716 siginfo_t siginfo;
717 #endif
718
719 if (the_low_target.get_pc == NULL)
720 return 0;
721
722 pc = get_pc (lwp);
723 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
724
725 /* breakpoint_at reads from the current thread. */
726 saved_thread = current_thread;
727 current_thread = get_lwp_thread (lwp);
728
729 #if USE_SIGTRAP_SIGINFO
730 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
731 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
732 {
733 if (siginfo.si_signo == SIGTRAP)
734 {
735 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
736 {
737 if (debug_threads)
738 {
739 struct thread_info *thr = get_lwp_thread (lwp);
740
741 debug_printf ("CSBB: %s stopped by software breakpoint\n",
742 target_pid_to_str (ptid_of (thr)));
743 }
744
745 /* Back up the PC if necessary. */
746 if (pc != sw_breakpoint_pc)
747 {
748 struct regcache *regcache
749 = get_thread_regcache (current_thread, 1);
750 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
751 }
752
753 lwp->stop_pc = sw_breakpoint_pc;
754 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
755 current_thread = saved_thread;
756 return 1;
757 }
758 else if (siginfo.si_code == TRAP_HWBKPT)
759 {
760 if (debug_threads)
761 {
762 struct thread_info *thr = get_lwp_thread (lwp);
763
764 debug_printf ("CSBB: %s stopped by hardware "
765 "breakpoint/watchpoint\n",
766 target_pid_to_str (ptid_of (thr)));
767 }
768
769 lwp->stop_pc = pc;
770 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
771 current_thread = saved_thread;
772 return 1;
773 }
774 else if (siginfo.si_code == TRAP_TRACE)
775 {
776 if (debug_threads)
777 {
778 struct thread_info *thr = get_lwp_thread (lwp);
779
780 debug_printf ("CSBB: %s stopped by trace\n",
781 target_pid_to_str (ptid_of (thr)));
782 }
783
784 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
785 }
786 }
787 }
788 #else
789 /* We may have just stepped a breakpoint instruction. E.g., in
790 non-stop mode, GDB first tells the thread A to step a range, and
791 then the user inserts a breakpoint inside the range. In that
792 case we need to report the breakpoint PC. */
793 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
794 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
795 {
796 if (debug_threads)
797 {
798 struct thread_info *thr = get_lwp_thread (lwp);
799
800 debug_printf ("CSBB: %s stopped by software breakpoint\n",
801 target_pid_to_str (ptid_of (thr)));
802 }
803
804 /* Back up the PC if necessary. */
805 if (pc != sw_breakpoint_pc)
806 {
807 struct regcache *regcache
808 = get_thread_regcache (current_thread, 1);
809 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
810 }
811
812 lwp->stop_pc = sw_breakpoint_pc;
813 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
814 current_thread = saved_thread;
815 return 1;
816 }
817
818 if (hardware_breakpoint_inserted_here (pc))
819 {
820 if (debug_threads)
821 {
822 struct thread_info *thr = get_lwp_thread (lwp);
823
824 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
825 target_pid_to_str (ptid_of (thr)));
826 }
827
828 lwp->stop_pc = pc;
829 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
830 current_thread = saved_thread;
831 return 1;
832 }
833 #endif
834
835 current_thread = saved_thread;
836 return 0;
837 }
838
839 static struct lwp_info *
840 add_lwp (ptid_t ptid)
841 {
842 struct lwp_info *lwp;
843
844 lwp = XCNEW (struct lwp_info);
845
846 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
847
848 if (the_low_target.new_thread != NULL)
849 the_low_target.new_thread (lwp);
850
851 lwp->thread = add_thread (ptid, lwp);
852
853 return lwp;
854 }
855
856 /* Start an inferior process and returns its pid.
857 ALLARGS is a vector of program-name and args. */
858
859 static int
860 linux_create_inferior (char *program, char **allargs)
861 {
862 struct lwp_info *new_lwp;
863 int pid;
864 ptid_t ptid;
865 struct cleanup *restore_personality
866 = maybe_disable_address_space_randomization (disable_randomization);
867
868 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
869 pid = vfork ();
870 #else
871 pid = fork ();
872 #endif
873 if (pid < 0)
874 perror_with_name ("fork");
875
876 if (pid == 0)
877 {
878 close_most_fds ();
879 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
880
881 setpgid (0, 0);
882
883 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
884 stdout to stderr so that inferior i/o doesn't corrupt the connection.
885 Also, redirect stdin to /dev/null. */
886 if (remote_connection_is_stdio ())
887 {
888 close (0);
889 open ("/dev/null", O_RDONLY);
890 dup2 (2, 1);
891 if (write (2, "stdin/stdout redirected\n",
892 sizeof ("stdin/stdout redirected\n") - 1) < 0)
893 {
894 /* Errors ignored. */;
895 }
896 }
897
898 execv (program, allargs);
899 if (errno == ENOENT)
900 execvp (program, allargs);
901
902 fprintf (stderr, "Cannot exec %s: %s.\n", program,
903 strerror (errno));
904 fflush (stderr);
905 _exit (0177);
906 }
907
908 do_cleanups (restore_personality);
909
910 linux_add_process (pid, 0);
911
912 ptid = ptid_build (pid, pid, 0);
913 new_lwp = add_lwp (ptid);
914 new_lwp->must_set_ptrace_flags = 1;
915
916 return pid;
917 }
918
919 /* Implement the post_create_inferior target_ops method. */
920
921 static void
922 linux_post_create_inferior (void)
923 {
924 struct lwp_info *lwp = get_thread_lwp (current_thread);
925
926 linux_arch_setup ();
927
928 if (lwp->must_set_ptrace_flags)
929 {
930 struct process_info *proc = current_process ();
931 int options = linux_low_ptrace_options (proc->attached);
932
933 linux_enable_event_reporting (lwpid_of (current_thread), options);
934 lwp->must_set_ptrace_flags = 0;
935 }
936 }
937
938 /* Attach to an inferior process. Returns 0 on success, ERRNO on
939 error. */
940
941 int
942 linux_attach_lwp (ptid_t ptid)
943 {
944 struct lwp_info *new_lwp;
945 int lwpid = ptid_get_lwp (ptid);
946
947 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
948 != 0)
949 return errno;
950
951 new_lwp = add_lwp (ptid);
952
953 /* We need to wait for SIGSTOP before being able to make the next
954 ptrace call on this LWP. */
955 new_lwp->must_set_ptrace_flags = 1;
956
957 if (linux_proc_pid_is_stopped (lwpid))
958 {
959 if (debug_threads)
960 debug_printf ("Attached to a stopped process\n");
961
962 /* The process is definitely stopped. It is in a job control
963 stop, unless the kernel predates the TASK_STOPPED /
964 TASK_TRACED distinction, in which case it might be in a
965 ptrace stop. Make sure it is in a ptrace stop; from there we
966 can kill it, signal it, et cetera.
967
968 First make sure there is a pending SIGSTOP. Since we are
969 already attached, the process can not transition from stopped
970 to running without a PTRACE_CONT; so we know this signal will
971 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
972 probably already in the queue (unless this kernel is old
973 enough to use TASK_STOPPED for ptrace stops); but since
974 SIGSTOP is not an RT signal, it can only be queued once. */
975 kill_lwp (lwpid, SIGSTOP);
976
977 /* Finally, resume the stopped process. This will deliver the
978 SIGSTOP (or a higher priority signal, just like normal
979 PTRACE_ATTACH), which we'll catch later on. */
980 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
981 }
982
983 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
984 brings it to a halt.
985
986 There are several cases to consider here:
987
988 1) gdbserver has already attached to the process and is being notified
989 of a new thread that is being created.
990 In this case we should ignore that SIGSTOP and resume the
991 process. This is handled below by setting stop_expected = 1,
992 and the fact that add_thread sets last_resume_kind ==
993 resume_continue.
994
995 2) This is the first thread (the process thread), and we're attaching
996 to it via attach_inferior.
997 In this case we want the process thread to stop.
998 This is handled by having linux_attach set last_resume_kind ==
999 resume_stop after we return.
1000
1001 If the pid we are attaching to is also the tgid, we attach to and
1002 stop all the existing threads. Otherwise, we attach to pid and
1003 ignore any other threads in the same group as this pid.
1004
1005 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1006 existing threads.
1007 In this case we want the thread to stop.
1008 FIXME: This case is currently not properly handled.
1009 We should wait for the SIGSTOP but don't. Things work apparently
1010 because enough time passes between when we ptrace (ATTACH) and when
1011 gdb makes the next ptrace call on the thread.
1012
1013 On the other hand, if we are currently trying to stop all threads, we
1014 should treat the new thread as if we had sent it a SIGSTOP. This works
1015 because we are guaranteed that the add_lwp call above added us to the
1016 end of the list, and so the new thread has not yet reached
1017 wait_for_sigstop (but will). */
1018 new_lwp->stop_expected = 1;
1019
1020 return 0;
1021 }
1022
1023 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1024 already attached. Returns true if a new LWP is found, false
1025 otherwise. */
1026
1027 static int
1028 attach_proc_task_lwp_callback (ptid_t ptid)
1029 {
1030 /* Is this a new thread? */
1031 if (find_thread_ptid (ptid) == NULL)
1032 {
1033 int lwpid = ptid_get_lwp (ptid);
1034 int err;
1035
1036 if (debug_threads)
1037 debug_printf ("Found new lwp %d\n", lwpid);
1038
1039 err = linux_attach_lwp (ptid);
1040
1041 /* Be quiet if we simply raced with the thread exiting. EPERM
1042 is returned if the thread's task still exists, and is marked
1043 as exited or zombie, as well as other conditions, so in that
1044 case, confirm the status in /proc/PID/status. */
1045 if (err == ESRCH
1046 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1047 {
1048 if (debug_threads)
1049 {
1050 debug_printf ("Cannot attach to lwp %d: "
1051 "thread is gone (%d: %s)\n",
1052 lwpid, err, strerror (err));
1053 }
1054 }
1055 else if (err != 0)
1056 {
1057 warning (_("Cannot attach to lwp %d: %s"),
1058 lwpid,
1059 linux_ptrace_attach_fail_reason_string (ptid, err));
1060 }
1061
1062 return 1;
1063 }
1064 return 0;
1065 }
1066
1067 static void async_file_mark (void);
1068
1069 /* Attach to PID. If PID is the tgid, attach to it and all
1070 of its threads. */
1071
1072 static int
1073 linux_attach (unsigned long pid)
1074 {
1075 struct process_info *proc;
1076 struct thread_info *initial_thread;
1077 ptid_t ptid = ptid_build (pid, pid, 0);
1078 int err;
1079
1080 /* Attach to PID. We will check for other threads
1081 soon. */
1082 err = linux_attach_lwp (ptid);
1083 if (err != 0)
1084 error ("Cannot attach to process %ld: %s",
1085 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1086
1087 proc = linux_add_process (pid, 1);
1088
1089 /* Don't ignore the initial SIGSTOP if we just attached to this
1090 process. It will be collected by wait shortly. */
1091 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1092 initial_thread->last_resume_kind = resume_stop;
1093
1094 /* We must attach to every LWP. If /proc is mounted, use that to
1095 find them now. On the one hand, the inferior may be using raw
1096 clone instead of using pthreads. On the other hand, even if it
1097 is using pthreads, GDB may not be connected yet (thread_db needs
1098 to do symbol lookups, through qSymbol). Also, thread_db walks
1099 structures in the inferior's address space to find the list of
1100 threads/LWPs, and those structures may well be corrupted. Note
1101 that once thread_db is loaded, we'll still use it to list threads
1102 and associate pthread info with each LWP. */
1103 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1104
1105 /* GDB will shortly read the xml target description for this
1106 process, to figure out the process' architecture. But the target
1107 description is only filled in when the first process/thread in
1108 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1109 that now, otherwise, if GDB is fast enough, it could read the
1110 target description _before_ that initial stop. */
1111 if (non_stop)
1112 {
1113 struct lwp_info *lwp;
1114 int wstat, lwpid;
1115 ptid_t pid_ptid = pid_to_ptid (pid);
1116
1117 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1118 &wstat, __WALL);
1119 gdb_assert (lwpid > 0);
1120
1121 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1122
1123 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1124 {
1125 lwp->status_pending_p = 1;
1126 lwp->status_pending = wstat;
1127 }
1128
1129 initial_thread->last_resume_kind = resume_continue;
1130
1131 async_file_mark ();
1132
1133 gdb_assert (proc->tdesc != NULL);
1134 }
1135
1136 return 0;
1137 }
1138
1139 struct counter
1140 {
1141 int pid;
1142 int count;
1143 };
1144
1145 static int
1146 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1147 {
1148 struct counter *counter = (struct counter *) args;
1149
1150 if (ptid_get_pid (entry->id) == counter->pid)
1151 {
1152 if (++counter->count > 1)
1153 return 1;
1154 }
1155
1156 return 0;
1157 }
1158
1159 static int
1160 last_thread_of_process_p (int pid)
1161 {
1162 struct counter counter = { pid , 0 };
1163
1164 return (find_inferior (&all_threads,
1165 second_thread_of_pid_p, &counter) == NULL);
1166 }
1167
1168 /* Kill LWP. */
1169
1170 static void
1171 linux_kill_one_lwp (struct lwp_info *lwp)
1172 {
1173 struct thread_info *thr = get_lwp_thread (lwp);
1174 int pid = lwpid_of (thr);
1175
1176 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1177 there is no signal context, and ptrace(PTRACE_KILL) (or
1178 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1179 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1180 alternative is to kill with SIGKILL. We only need one SIGKILL
1181 per process, not one for each thread. But since we still support
1182 support debugging programs using raw clone without CLONE_THREAD,
1183 we send one for each thread. For years, we used PTRACE_KILL
1184 only, so we're being a bit paranoid about some old kernels where
1185 PTRACE_KILL might work better (dubious if there are any such, but
1186 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1187 second, and so we're fine everywhere. */
1188
1189 errno = 0;
1190 kill_lwp (pid, SIGKILL);
1191 if (debug_threads)
1192 {
1193 int save_errno = errno;
1194
1195 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1196 target_pid_to_str (ptid_of (thr)),
1197 save_errno ? strerror (save_errno) : "OK");
1198 }
1199
1200 errno = 0;
1201 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1202 if (debug_threads)
1203 {
1204 int save_errno = errno;
1205
1206 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1207 target_pid_to_str (ptid_of (thr)),
1208 save_errno ? strerror (save_errno) : "OK");
1209 }
1210 }
1211
1212 /* Kill LWP and wait for it to die. */
1213
1214 static void
1215 kill_wait_lwp (struct lwp_info *lwp)
1216 {
1217 struct thread_info *thr = get_lwp_thread (lwp);
1218 int pid = ptid_get_pid (ptid_of (thr));
1219 int lwpid = ptid_get_lwp (ptid_of (thr));
1220 int wstat;
1221 int res;
1222
1223 if (debug_threads)
1224 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1225
1226 do
1227 {
1228 linux_kill_one_lwp (lwp);
1229
1230 /* Make sure it died. Notes:
1231
1232 - The loop is most likely unnecessary.
1233
1234 - We don't use linux_wait_for_event as that could delete lwps
1235 while we're iterating over them. We're not interested in
1236 any pending status at this point, only in making sure all
1237 wait status on the kernel side are collected until the
1238 process is reaped.
1239
1240 - We don't use __WALL here as the __WALL emulation relies on
1241 SIGCHLD, and killing a stopped process doesn't generate
1242 one, nor an exit status.
1243 */
1244 res = my_waitpid (lwpid, &wstat, 0);
1245 if (res == -1 && errno == ECHILD)
1246 res = my_waitpid (lwpid, &wstat, __WCLONE);
1247 } while (res > 0 && WIFSTOPPED (wstat));
1248
1249 /* Even if it was stopped, the child may have already disappeared.
1250 E.g., if it was killed by SIGKILL. */
1251 if (res < 0 && errno != ECHILD)
1252 perror_with_name ("kill_wait_lwp");
1253 }
1254
1255 /* Callback for `find_inferior'. Kills an lwp of a given process,
1256 except the leader. */
1257
1258 static int
1259 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1260 {
1261 struct thread_info *thread = (struct thread_info *) entry;
1262 struct lwp_info *lwp = get_thread_lwp (thread);
1263 int pid = * (int *) args;
1264
1265 if (ptid_get_pid (entry->id) != pid)
1266 return 0;
1267
1268 /* We avoid killing the first thread here, because of a Linux kernel (at
1269 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1270 the children get a chance to be reaped, it will remain a zombie
1271 forever. */
1272
1273 if (lwpid_of (thread) == pid)
1274 {
1275 if (debug_threads)
1276 debug_printf ("lkop: is last of process %s\n",
1277 target_pid_to_str (entry->id));
1278 return 0;
1279 }
1280
1281 kill_wait_lwp (lwp);
1282 return 0;
1283 }
1284
1285 static int
1286 linux_kill (int pid)
1287 {
1288 struct process_info *process;
1289 struct lwp_info *lwp;
1290
1291 process = find_process_pid (pid);
1292 if (process == NULL)
1293 return -1;
1294
1295 /* If we're killing a running inferior, make sure it is stopped
1296 first, as PTRACE_KILL will not work otherwise. */
1297 stop_all_lwps (0, NULL);
1298
1299 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1300
1301 /* See the comment in linux_kill_one_lwp. We did not kill the first
1302 thread in the list, so do so now. */
1303 lwp = find_lwp_pid (pid_to_ptid (pid));
1304
1305 if (lwp == NULL)
1306 {
1307 if (debug_threads)
1308 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1309 pid);
1310 }
1311 else
1312 kill_wait_lwp (lwp);
1313
1314 the_target->mourn (process);
1315
1316 /* Since we presently can only stop all lwps of all processes, we
1317 need to unstop lwps of other processes. */
1318 unstop_all_lwps (0, NULL);
1319 return 0;
1320 }
1321
1322 /* Get pending signal of THREAD, for detaching purposes. This is the
1323 signal the thread last stopped for, which we need to deliver to the
1324 thread when detaching, otherwise, it'd be suppressed/lost. */
1325
1326 static int
1327 get_detach_signal (struct thread_info *thread)
1328 {
1329 enum gdb_signal signo = GDB_SIGNAL_0;
1330 int status;
1331 struct lwp_info *lp = get_thread_lwp (thread);
1332
1333 if (lp->status_pending_p)
1334 status = lp->status_pending;
1335 else
1336 {
1337 /* If the thread had been suspended by gdbserver, and it stopped
1338 cleanly, then it'll have stopped with SIGSTOP. But we don't
1339 want to deliver that SIGSTOP. */
1340 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1341 || thread->last_status.value.sig == GDB_SIGNAL_0)
1342 return 0;
1343
1344 /* Otherwise, we may need to deliver the signal we
1345 intercepted. */
1346 status = lp->last_status;
1347 }
1348
1349 if (!WIFSTOPPED (status))
1350 {
1351 if (debug_threads)
1352 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1353 target_pid_to_str (ptid_of (thread)));
1354 return 0;
1355 }
1356
1357 /* Extended wait statuses aren't real SIGTRAPs. */
1358 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1359 {
1360 if (debug_threads)
1361 debug_printf ("GPS: lwp %s had stopped with extended "
1362 "status: no pending signal\n",
1363 target_pid_to_str (ptid_of (thread)));
1364 return 0;
1365 }
1366
1367 signo = gdb_signal_from_host (WSTOPSIG (status));
1368
1369 if (program_signals_p && !program_signals[signo])
1370 {
1371 if (debug_threads)
1372 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1373 target_pid_to_str (ptid_of (thread)),
1374 gdb_signal_to_string (signo));
1375 return 0;
1376 }
1377 else if (!program_signals_p
1378 /* If we have no way to know which signals GDB does not
1379 want to have passed to the program, assume
1380 SIGTRAP/SIGINT, which is GDB's default. */
1381 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1382 {
1383 if (debug_threads)
1384 debug_printf ("GPS: lwp %s had signal %s, "
1385 "but we don't know if we should pass it. "
1386 "Default to not.\n",
1387 target_pid_to_str (ptid_of (thread)),
1388 gdb_signal_to_string (signo));
1389 return 0;
1390 }
1391 else
1392 {
1393 if (debug_threads)
1394 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1395 target_pid_to_str (ptid_of (thread)),
1396 gdb_signal_to_string (signo));
1397
1398 return WSTOPSIG (status);
1399 }
1400 }
1401
1402 static int
1403 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1404 {
1405 struct thread_info *thread = (struct thread_info *) entry;
1406 struct lwp_info *lwp = get_thread_lwp (thread);
1407 int pid = * (int *) args;
1408 int sig;
1409
1410 if (ptid_get_pid (entry->id) != pid)
1411 return 0;
1412
1413 /* If there is a pending SIGSTOP, get rid of it. */
1414 if (lwp->stop_expected)
1415 {
1416 if (debug_threads)
1417 debug_printf ("Sending SIGCONT to %s\n",
1418 target_pid_to_str (ptid_of (thread)));
1419
1420 kill_lwp (lwpid_of (thread), SIGCONT);
1421 lwp->stop_expected = 0;
1422 }
1423
1424 /* Flush any pending changes to the process's registers. */
1425 regcache_invalidate_thread (thread);
1426
1427 /* Pass on any pending signal for this thread. */
1428 sig = get_detach_signal (thread);
1429
1430 /* Finally, let it resume. */
1431 if (the_low_target.prepare_to_resume != NULL)
1432 the_low_target.prepare_to_resume (lwp);
1433 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1434 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1435 error (_("Can't detach %s: %s"),
1436 target_pid_to_str (ptid_of (thread)),
1437 strerror (errno));
1438
1439 delete_lwp (lwp);
1440 return 0;
1441 }
1442
1443 static int
1444 linux_detach (int pid)
1445 {
1446 struct process_info *process;
1447
1448 process = find_process_pid (pid);
1449 if (process == NULL)
1450 return -1;
1451
1452 /* As there's a step over already in progress, let it finish first,
1453 otherwise nesting a stabilize_threads operation on top gets real
1454 messy. */
1455 complete_ongoing_step_over ();
1456
1457 /* Stop all threads before detaching. First, ptrace requires that
1458 the thread is stopped to sucessfully detach. Second, thread_db
1459 may need to uninstall thread event breakpoints from memory, which
1460 only works with a stopped process anyway. */
1461 stop_all_lwps (0, NULL);
1462
1463 #ifdef USE_THREAD_DB
1464 thread_db_detach (process);
1465 #endif
1466
1467 /* Stabilize threads (move out of jump pads). */
1468 stabilize_threads ();
1469
1470 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1471
1472 the_target->mourn (process);
1473
1474 /* Since we presently can only stop all lwps of all processes, we
1475 need to unstop lwps of other processes. */
1476 unstop_all_lwps (0, NULL);
1477 return 0;
1478 }
1479
1480 /* Remove all LWPs that belong to process PROC from the lwp list. */
1481
1482 static int
1483 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1484 {
1485 struct thread_info *thread = (struct thread_info *) entry;
1486 struct lwp_info *lwp = get_thread_lwp (thread);
1487 struct process_info *process = (struct process_info *) proc;
1488
1489 if (pid_of (thread) == pid_of (process))
1490 delete_lwp (lwp);
1491
1492 return 0;
1493 }
1494
1495 static void
1496 linux_mourn (struct process_info *process)
1497 {
1498 struct process_info_private *priv;
1499
1500 #ifdef USE_THREAD_DB
1501 thread_db_mourn (process);
1502 #endif
1503
1504 find_inferior (&all_threads, delete_lwp_callback, process);
1505
1506 /* Freeing all private data. */
1507 priv = process->priv;
1508 free (priv->arch_private);
1509 free (priv);
1510 process->priv = NULL;
1511
1512 remove_process (process);
1513 }
1514
1515 static void
1516 linux_join (int pid)
1517 {
1518 int status, ret;
1519
1520 do {
1521 ret = my_waitpid (pid, &status, 0);
1522 if (WIFEXITED (status) || WIFSIGNALED (status))
1523 break;
1524 } while (ret != -1 || errno != ECHILD);
1525 }
1526
1527 /* Return nonzero if the given thread is still alive. */
1528 static int
1529 linux_thread_alive (ptid_t ptid)
1530 {
1531 struct lwp_info *lwp = find_lwp_pid (ptid);
1532
1533 /* We assume we always know if a thread exits. If a whole process
1534 exited but we still haven't been able to report it to GDB, we'll
1535 hold on to the last lwp of the dead process. */
1536 if (lwp != NULL)
1537 return !lwp_is_marked_dead (lwp);
1538 else
1539 return 0;
1540 }
1541
1542 /* Return 1 if this lwp still has an interesting status pending. If
1543 not (e.g., it had stopped for a breakpoint that is gone), return
1544 false. */
1545
1546 static int
1547 thread_still_has_status_pending_p (struct thread_info *thread)
1548 {
1549 struct lwp_info *lp = get_thread_lwp (thread);
1550
1551 if (!lp->status_pending_p)
1552 return 0;
1553
1554 if (thread->last_resume_kind != resume_stop
1555 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1556 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1557 {
1558 struct thread_info *saved_thread;
1559 CORE_ADDR pc;
1560 int discard = 0;
1561
1562 gdb_assert (lp->last_status != 0);
1563
1564 pc = get_pc (lp);
1565
1566 saved_thread = current_thread;
1567 current_thread = thread;
1568
1569 if (pc != lp->stop_pc)
1570 {
1571 if (debug_threads)
1572 debug_printf ("PC of %ld changed\n",
1573 lwpid_of (thread));
1574 discard = 1;
1575 }
1576
1577 #if !USE_SIGTRAP_SIGINFO
1578 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1579 && !(*the_low_target.breakpoint_at) (pc))
1580 {
1581 if (debug_threads)
1582 debug_printf ("previous SW breakpoint of %ld gone\n",
1583 lwpid_of (thread));
1584 discard = 1;
1585 }
1586 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1587 && !hardware_breakpoint_inserted_here (pc))
1588 {
1589 if (debug_threads)
1590 debug_printf ("previous HW breakpoint of %ld gone\n",
1591 lwpid_of (thread));
1592 discard = 1;
1593 }
1594 #endif
1595
1596 current_thread = saved_thread;
1597
1598 if (discard)
1599 {
1600 if (debug_threads)
1601 debug_printf ("discarding pending breakpoint status\n");
1602 lp->status_pending_p = 0;
1603 return 0;
1604 }
1605 }
1606
1607 return 1;
1608 }
1609
1610 /* Returns true if LWP is resumed from the client's perspective. */
1611
1612 static int
1613 lwp_resumed (struct lwp_info *lwp)
1614 {
1615 struct thread_info *thread = get_lwp_thread (lwp);
1616
1617 if (thread->last_resume_kind != resume_stop)
1618 return 1;
1619
1620 /* Did gdb send us a `vCont;t', but we haven't reported the
1621 corresponding stop to gdb yet? If so, the thread is still
1622 resumed/running from gdb's perspective. */
1623 if (thread->last_resume_kind == resume_stop
1624 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1625 return 1;
1626
1627 return 0;
1628 }
1629
1630 /* Return 1 if this lwp has an interesting status pending. */
1631 static int
1632 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1633 {
1634 struct thread_info *thread = (struct thread_info *) entry;
1635 struct lwp_info *lp = get_thread_lwp (thread);
1636 ptid_t ptid = * (ptid_t *) arg;
1637
1638 /* Check if we're only interested in events from a specific process
1639 or a specific LWP. */
1640 if (!ptid_match (ptid_of (thread), ptid))
1641 return 0;
1642
1643 if (!lwp_resumed (lp))
1644 return 0;
1645
1646 if (lp->status_pending_p
1647 && !thread_still_has_status_pending_p (thread))
1648 {
1649 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1650 return 0;
1651 }
1652
1653 return lp->status_pending_p;
1654 }
1655
1656 static int
1657 same_lwp (struct inferior_list_entry *entry, void *data)
1658 {
1659 ptid_t ptid = *(ptid_t *) data;
1660 int lwp;
1661
1662 if (ptid_get_lwp (ptid) != 0)
1663 lwp = ptid_get_lwp (ptid);
1664 else
1665 lwp = ptid_get_pid (ptid);
1666
1667 if (ptid_get_lwp (entry->id) == lwp)
1668 return 1;
1669
1670 return 0;
1671 }
1672
1673 struct lwp_info *
1674 find_lwp_pid (ptid_t ptid)
1675 {
1676 struct inferior_list_entry *thread
1677 = find_inferior (&all_threads, same_lwp, &ptid);
1678
1679 if (thread == NULL)
1680 return NULL;
1681
1682 return get_thread_lwp ((struct thread_info *) thread);
1683 }
1684
1685 /* Return the number of known LWPs in the tgid given by PID. */
1686
1687 static int
1688 num_lwps (int pid)
1689 {
1690 struct inferior_list_entry *inf, *tmp;
1691 int count = 0;
1692
1693 ALL_INFERIORS (&all_threads, inf, tmp)
1694 {
1695 if (ptid_get_pid (inf->id) == pid)
1696 count++;
1697 }
1698
1699 return count;
1700 }
1701
1702 /* The arguments passed to iterate_over_lwps. */
1703
1704 struct iterate_over_lwps_args
1705 {
1706 /* The FILTER argument passed to iterate_over_lwps. */
1707 ptid_t filter;
1708
1709 /* The CALLBACK argument passed to iterate_over_lwps. */
1710 iterate_over_lwps_ftype *callback;
1711
1712 /* The DATA argument passed to iterate_over_lwps. */
1713 void *data;
1714 };
1715
1716 /* Callback for find_inferior used by iterate_over_lwps to filter
1717 calls to the callback supplied to that function. Returning a
1718 nonzero value causes find_inferiors to stop iterating and return
1719 the current inferior_list_entry. Returning zero indicates that
1720 find_inferiors should continue iterating. */
1721
1722 static int
1723 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1724 {
1725 struct iterate_over_lwps_args *args
1726 = (struct iterate_over_lwps_args *) args_p;
1727
1728 if (ptid_match (entry->id, args->filter))
1729 {
1730 struct thread_info *thr = (struct thread_info *) entry;
1731 struct lwp_info *lwp = get_thread_lwp (thr);
1732
1733 return (*args->callback) (lwp, args->data);
1734 }
1735
1736 return 0;
1737 }
1738
1739 /* See nat/linux-nat.h. */
1740
1741 struct lwp_info *
1742 iterate_over_lwps (ptid_t filter,
1743 iterate_over_lwps_ftype callback,
1744 void *data)
1745 {
1746 struct iterate_over_lwps_args args = {filter, callback, data};
1747 struct inferior_list_entry *entry;
1748
1749 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1750 if (entry == NULL)
1751 return NULL;
1752
1753 return get_thread_lwp ((struct thread_info *) entry);
1754 }
1755
1756 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1757 their exits until all other threads in the group have exited. */
1758
1759 static void
1760 check_zombie_leaders (void)
1761 {
1762 struct process_info *proc, *tmp;
1763
1764 ALL_PROCESSES (proc, tmp)
1765 {
1766 pid_t leader_pid = pid_of (proc);
1767 struct lwp_info *leader_lp;
1768
1769 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1770
1771 if (debug_threads)
1772 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1773 "num_lwps=%d, zombie=%d\n",
1774 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1775 linux_proc_pid_is_zombie (leader_pid));
1776
1777 if (leader_lp != NULL && !leader_lp->stopped
1778 /* Check if there are other threads in the group, as we may
1779 have raced with the inferior simply exiting. */
1780 && !last_thread_of_process_p (leader_pid)
1781 && linux_proc_pid_is_zombie (leader_pid))
1782 {
1783 /* A leader zombie can mean one of two things:
1784
1785 - It exited, and there's an exit status pending
1786 available, or only the leader exited (not the whole
1787 program). In the latter case, we can't waitpid the
1788 leader's exit status until all other threads are gone.
1789
1790 - There are 3 or more threads in the group, and a thread
1791 other than the leader exec'd. On an exec, the Linux
1792 kernel destroys all other threads (except the execing
1793 one) in the thread group, and resets the execing thread's
1794 tid to the tgid. No exit notification is sent for the
1795 execing thread -- from the ptracer's perspective, it
1796 appears as though the execing thread just vanishes.
1797 Until we reap all other threads except the leader and the
1798 execing thread, the leader will be zombie, and the
1799 execing thread will be in `D (disc sleep)'. As soon as
1800 all other threads are reaped, the execing thread changes
1801 it's tid to the tgid, and the previous (zombie) leader
1802 vanishes, giving place to the "new" leader. We could try
1803 distinguishing the exit and exec cases, by waiting once
1804 more, and seeing if something comes out, but it doesn't
1805 sound useful. The previous leader _does_ go away, and
1806 we'll re-add the new one once we see the exec event
1807 (which is just the same as what would happen if the
1808 previous leader did exit voluntarily before some other
1809 thread execs). */
1810
1811 if (debug_threads)
1812 fprintf (stderr,
1813 "CZL: Thread group leader %d zombie "
1814 "(it exited, or another thread execd).\n",
1815 leader_pid);
1816
1817 delete_lwp (leader_lp);
1818 }
1819 }
1820 }
1821
1822 /* Callback for `find_inferior'. Returns the first LWP that is not
1823 stopped. ARG is a PTID filter. */
1824
1825 static int
1826 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1827 {
1828 struct thread_info *thr = (struct thread_info *) entry;
1829 struct lwp_info *lwp;
1830 ptid_t filter = *(ptid_t *) arg;
1831
1832 if (!ptid_match (ptid_of (thr), filter))
1833 return 0;
1834
1835 lwp = get_thread_lwp (thr);
1836 if (!lwp->stopped)
1837 return 1;
1838
1839 return 0;
1840 }
1841
1842 /* Increment LWP's suspend count. */
1843
1844 static void
1845 lwp_suspended_inc (struct lwp_info *lwp)
1846 {
1847 lwp->suspended++;
1848
1849 if (debug_threads && lwp->suspended > 4)
1850 {
1851 struct thread_info *thread = get_lwp_thread (lwp);
1852
1853 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1854 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1855 }
1856 }
1857
1858 /* Decrement LWP's suspend count. */
1859
1860 static void
1861 lwp_suspended_decr (struct lwp_info *lwp)
1862 {
1863 lwp->suspended--;
1864
1865 if (lwp->suspended < 0)
1866 {
1867 struct thread_info *thread = get_lwp_thread (lwp);
1868
1869 internal_error (__FILE__, __LINE__,
1870 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1871 lwp->suspended);
1872 }
1873 }
1874
1875 /* This function should only be called if the LWP got a SIGTRAP.
1876
1877 Handle any tracepoint steps or hits. Return true if a tracepoint
1878 event was handled, 0 otherwise. */
1879
1880 static int
1881 handle_tracepoints (struct lwp_info *lwp)
1882 {
1883 struct thread_info *tinfo = get_lwp_thread (lwp);
1884 int tpoint_related_event = 0;
1885
1886 gdb_assert (lwp->suspended == 0);
1887
1888 /* If this tracepoint hit causes a tracing stop, we'll immediately
1889 uninsert tracepoints. To do this, we temporarily pause all
1890 threads, unpatch away, and then unpause threads. We need to make
1891 sure the unpausing doesn't resume LWP too. */
1892 lwp_suspended_inc (lwp);
1893
1894 /* And we need to be sure that any all-threads-stopping doesn't try
1895 to move threads out of the jump pads, as it could deadlock the
1896 inferior (LWP could be in the jump pad, maybe even holding the
1897 lock.) */
1898
1899 /* Do any necessary step collect actions. */
1900 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1901
1902 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1903
1904 /* See if we just hit a tracepoint and do its main collect
1905 actions. */
1906 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1907
1908 lwp_suspended_decr (lwp);
1909
1910 gdb_assert (lwp->suspended == 0);
1911 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1912
1913 if (tpoint_related_event)
1914 {
1915 if (debug_threads)
1916 debug_printf ("got a tracepoint event\n");
1917 return 1;
1918 }
1919
1920 return 0;
1921 }
1922
1923 /* Convenience wrapper. Returns true if LWP is presently collecting a
1924 fast tracepoint. */
1925
1926 static int
1927 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1928 struct fast_tpoint_collect_status *status)
1929 {
1930 CORE_ADDR thread_area;
1931 struct thread_info *thread = get_lwp_thread (lwp);
1932
1933 if (the_low_target.get_thread_area == NULL)
1934 return 0;
1935
1936 /* Get the thread area address. This is used to recognize which
1937 thread is which when tracing with the in-process agent library.
1938 We don't read anything from the address, and treat it as opaque;
1939 it's the address itself that we assume is unique per-thread. */
1940 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1941 return 0;
1942
1943 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1944 }
1945
1946 /* The reason we resume in the caller, is because we want to be able
1947 to pass lwp->status_pending as WSTAT, and we need to clear
1948 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1949 refuses to resume. */
1950
1951 static int
1952 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1953 {
1954 struct thread_info *saved_thread;
1955
1956 saved_thread = current_thread;
1957 current_thread = get_lwp_thread (lwp);
1958
1959 if ((wstat == NULL
1960 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1961 && supports_fast_tracepoints ()
1962 && agent_loaded_p ())
1963 {
1964 struct fast_tpoint_collect_status status;
1965 int r;
1966
1967 if (debug_threads)
1968 debug_printf ("Checking whether LWP %ld needs to move out of the "
1969 "jump pad.\n",
1970 lwpid_of (current_thread));
1971
1972 r = linux_fast_tracepoint_collecting (lwp, &status);
1973
1974 if (wstat == NULL
1975 || (WSTOPSIG (*wstat) != SIGILL
1976 && WSTOPSIG (*wstat) != SIGFPE
1977 && WSTOPSIG (*wstat) != SIGSEGV
1978 && WSTOPSIG (*wstat) != SIGBUS))
1979 {
1980 lwp->collecting_fast_tracepoint = r;
1981
1982 if (r != 0)
1983 {
1984 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1985 {
1986 /* Haven't executed the original instruction yet.
1987 Set breakpoint there, and wait till it's hit,
1988 then single-step until exiting the jump pad. */
1989 lwp->exit_jump_pad_bkpt
1990 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1991 }
1992
1993 if (debug_threads)
1994 debug_printf ("Checking whether LWP %ld needs to move out of "
1995 "the jump pad...it does\n",
1996 lwpid_of (current_thread));
1997 current_thread = saved_thread;
1998
1999 return 1;
2000 }
2001 }
2002 else
2003 {
2004 /* If we get a synchronous signal while collecting, *and*
2005 while executing the (relocated) original instruction,
2006 reset the PC to point at the tpoint address, before
2007 reporting to GDB. Otherwise, it's an IPA lib bug: just
2008 report the signal to GDB, and pray for the best. */
2009
2010 lwp->collecting_fast_tracepoint = 0;
2011
2012 if (r != 0
2013 && (status.adjusted_insn_addr <= lwp->stop_pc
2014 && lwp->stop_pc < status.adjusted_insn_addr_end))
2015 {
2016 siginfo_t info;
2017 struct regcache *regcache;
2018
2019 /* The si_addr on a few signals references the address
2020 of the faulting instruction. Adjust that as
2021 well. */
2022 if ((WSTOPSIG (*wstat) == SIGILL
2023 || WSTOPSIG (*wstat) == SIGFPE
2024 || WSTOPSIG (*wstat) == SIGBUS
2025 || WSTOPSIG (*wstat) == SIGSEGV)
2026 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2027 (PTRACE_TYPE_ARG3) 0, &info) == 0
2028 /* Final check just to make sure we don't clobber
2029 the siginfo of non-kernel-sent signals. */
2030 && (uintptr_t) info.si_addr == lwp->stop_pc)
2031 {
2032 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2033 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2034 (PTRACE_TYPE_ARG3) 0, &info);
2035 }
2036
2037 regcache = get_thread_regcache (current_thread, 1);
2038 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2039 lwp->stop_pc = status.tpoint_addr;
2040
2041 /* Cancel any fast tracepoint lock this thread was
2042 holding. */
2043 force_unlock_trace_buffer ();
2044 }
2045
2046 if (lwp->exit_jump_pad_bkpt != NULL)
2047 {
2048 if (debug_threads)
2049 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2050 "stopping all threads momentarily.\n");
2051
2052 stop_all_lwps (1, lwp);
2053
2054 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2055 lwp->exit_jump_pad_bkpt = NULL;
2056
2057 unstop_all_lwps (1, lwp);
2058
2059 gdb_assert (lwp->suspended >= 0);
2060 }
2061 }
2062 }
2063
2064 if (debug_threads)
2065 debug_printf ("Checking whether LWP %ld needs to move out of the "
2066 "jump pad...no\n",
2067 lwpid_of (current_thread));
2068
2069 current_thread = saved_thread;
2070 return 0;
2071 }
2072
2073 /* Enqueue one signal in the "signals to report later when out of the
2074 jump pad" list. */
2075
2076 static void
2077 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2078 {
2079 struct pending_signals *p_sig;
2080 struct thread_info *thread = get_lwp_thread (lwp);
2081
2082 if (debug_threads)
2083 debug_printf ("Deferring signal %d for LWP %ld.\n",
2084 WSTOPSIG (*wstat), lwpid_of (thread));
2085
2086 if (debug_threads)
2087 {
2088 struct pending_signals *sig;
2089
2090 for (sig = lwp->pending_signals_to_report;
2091 sig != NULL;
2092 sig = sig->prev)
2093 debug_printf (" Already queued %d\n",
2094 sig->signal);
2095
2096 debug_printf (" (no more currently queued signals)\n");
2097 }
2098
2099 /* Don't enqueue non-RT signals if they are already in the deferred
2100 queue. (SIGSTOP being the easiest signal to see ending up here
2101 twice) */
2102 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2103 {
2104 struct pending_signals *sig;
2105
2106 for (sig = lwp->pending_signals_to_report;
2107 sig != NULL;
2108 sig = sig->prev)
2109 {
2110 if (sig->signal == WSTOPSIG (*wstat))
2111 {
2112 if (debug_threads)
2113 debug_printf ("Not requeuing already queued non-RT signal %d"
2114 " for LWP %ld\n",
2115 sig->signal,
2116 lwpid_of (thread));
2117 return;
2118 }
2119 }
2120 }
2121
2122 p_sig = XCNEW (struct pending_signals);
2123 p_sig->prev = lwp->pending_signals_to_report;
2124 p_sig->signal = WSTOPSIG (*wstat);
2125
2126 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2127 &p_sig->info);
2128
2129 lwp->pending_signals_to_report = p_sig;
2130 }
2131
2132 /* Dequeue one signal from the "signals to report later when out of
2133 the jump pad" list. */
2134
2135 static int
2136 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2137 {
2138 struct thread_info *thread = get_lwp_thread (lwp);
2139
2140 if (lwp->pending_signals_to_report != NULL)
2141 {
2142 struct pending_signals **p_sig;
2143
2144 p_sig = &lwp->pending_signals_to_report;
2145 while ((*p_sig)->prev != NULL)
2146 p_sig = &(*p_sig)->prev;
2147
2148 *wstat = W_STOPCODE ((*p_sig)->signal);
2149 if ((*p_sig)->info.si_signo != 0)
2150 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2151 &(*p_sig)->info);
2152 free (*p_sig);
2153 *p_sig = NULL;
2154
2155 if (debug_threads)
2156 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2157 WSTOPSIG (*wstat), lwpid_of (thread));
2158
2159 if (debug_threads)
2160 {
2161 struct pending_signals *sig;
2162
2163 for (sig = lwp->pending_signals_to_report;
2164 sig != NULL;
2165 sig = sig->prev)
2166 debug_printf (" Still queued %d\n",
2167 sig->signal);
2168
2169 debug_printf (" (no more queued signals)\n");
2170 }
2171
2172 return 1;
2173 }
2174
2175 return 0;
2176 }
2177
2178 /* Fetch the possibly triggered data watchpoint info and store it in
2179 CHILD.
2180
2181 On some archs, like x86, that use debug registers to set
2182 watchpoints, it's possible that the way to know which watched
2183 address trapped, is to check the register that is used to select
2184 which address to watch. Problem is, between setting the watchpoint
2185 and reading back which data address trapped, the user may change
2186 the set of watchpoints, and, as a consequence, GDB changes the
2187 debug registers in the inferior. To avoid reading back a stale
2188 stopped-data-address when that happens, we cache in LP the fact
2189 that a watchpoint trapped, and the corresponding data address, as
2190 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2191 registers meanwhile, we have the cached data we can rely on. */
2192
2193 static int
2194 check_stopped_by_watchpoint (struct lwp_info *child)
2195 {
2196 if (the_low_target.stopped_by_watchpoint != NULL)
2197 {
2198 struct thread_info *saved_thread;
2199
2200 saved_thread = current_thread;
2201 current_thread = get_lwp_thread (child);
2202
2203 if (the_low_target.stopped_by_watchpoint ())
2204 {
2205 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2206
2207 if (the_low_target.stopped_data_address != NULL)
2208 child->stopped_data_address
2209 = the_low_target.stopped_data_address ();
2210 else
2211 child->stopped_data_address = 0;
2212 }
2213
2214 current_thread = saved_thread;
2215 }
2216
2217 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2218 }
2219
2220 /* Return the ptrace options that we want to try to enable. */
2221
2222 static int
2223 linux_low_ptrace_options (int attached)
2224 {
2225 int options = 0;
2226
2227 if (!attached)
2228 options |= PTRACE_O_EXITKILL;
2229
2230 if (report_fork_events)
2231 options |= PTRACE_O_TRACEFORK;
2232
2233 if (report_vfork_events)
2234 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2235
2236 if (report_exec_events)
2237 options |= PTRACE_O_TRACEEXEC;
2238
2239 return options;
2240 }
2241
2242 /* Do low-level handling of the event, and check if we should go on
2243 and pass it to caller code. Return the affected lwp if we are, or
2244 NULL otherwise. */
2245
2246 static struct lwp_info *
2247 linux_low_filter_event (int lwpid, int wstat)
2248 {
2249 struct lwp_info *child;
2250 struct thread_info *thread;
2251 int have_stop_pc = 0;
2252
2253 child = find_lwp_pid (pid_to_ptid (lwpid));
2254
2255 /* Check for stop events reported by a process we didn't already
2256 know about - anything not already in our LWP list.
2257
2258 If we're expecting to receive stopped processes after
2259 fork, vfork, and clone events, then we'll just add the
2260 new one to our list and go back to waiting for the event
2261 to be reported - the stopped process might be returned
2262 from waitpid before or after the event is.
2263
2264 But note the case of a non-leader thread exec'ing after the
2265 leader having exited, and gone from our lists (because
2266 check_zombie_leaders deleted it). The non-leader thread
2267 changes its tid to the tgid. */
2268
2269 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2270 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2271 {
2272 ptid_t child_ptid;
2273
2274 /* A multi-thread exec after we had seen the leader exiting. */
2275 if (debug_threads)
2276 {
2277 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2278 "after exec.\n", lwpid);
2279 }
2280
2281 child_ptid = ptid_build (lwpid, lwpid, 0);
2282 child = add_lwp (child_ptid);
2283 child->stopped = 1;
2284 current_thread = child->thread;
2285 }
2286
2287 /* If we didn't find a process, one of two things presumably happened:
2288 - A process we started and then detached from has exited. Ignore it.
2289 - A process we are controlling has forked and the new child's stop
2290 was reported to us by the kernel. Save its PID. */
2291 if (child == NULL && WIFSTOPPED (wstat))
2292 {
2293 add_to_pid_list (&stopped_pids, lwpid, wstat);
2294 return NULL;
2295 }
2296 else if (child == NULL)
2297 return NULL;
2298
2299 thread = get_lwp_thread (child);
2300
2301 child->stopped = 1;
2302
2303 child->last_status = wstat;
2304
2305 /* Check if the thread has exited. */
2306 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2307 {
2308 if (debug_threads)
2309 debug_printf ("LLFE: %d exited.\n", lwpid);
2310 /* If there is at least one more LWP, then the exit signal was
2311 not the end of the debugged application and should be
2312 ignored, unless GDB wants to hear about thread exits. */
2313 if (report_thread_events
2314 || last_thread_of_process_p (pid_of (thread)))
2315 {
2316 /* Since events are serialized to GDB core, and we can't
2317 report this one right now. Leave the status pending for
2318 the next time we're able to report it. */
2319 mark_lwp_dead (child, wstat);
2320 return child;
2321 }
2322 else
2323 {
2324 delete_lwp (child);
2325 return NULL;
2326 }
2327 }
2328
2329 gdb_assert (WIFSTOPPED (wstat));
2330
2331 if (WIFSTOPPED (wstat))
2332 {
2333 struct process_info *proc;
2334
2335 /* Architecture-specific setup after inferior is running. */
2336 proc = find_process_pid (pid_of (thread));
2337 if (proc->tdesc == NULL)
2338 {
2339 if (proc->attached)
2340 {
2341 /* This needs to happen after we have attached to the
2342 inferior and it is stopped for the first time, but
2343 before we access any inferior registers. */
2344 linux_arch_setup_thread (thread);
2345 }
2346 else
2347 {
2348 /* The process is started, but GDBserver will do
2349 architecture-specific setup after the program stops at
2350 the first instruction. */
2351 child->status_pending_p = 1;
2352 child->status_pending = wstat;
2353 return child;
2354 }
2355 }
2356 }
2357
2358 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2359 {
2360 struct process_info *proc = find_process_pid (pid_of (thread));
2361 int options = linux_low_ptrace_options (proc->attached);
2362
2363 linux_enable_event_reporting (lwpid, options);
2364 child->must_set_ptrace_flags = 0;
2365 }
2366
2367 /* Be careful to not overwrite stop_pc until
2368 check_stopped_by_breakpoint is called. */
2369 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2370 && linux_is_extended_waitstatus (wstat))
2371 {
2372 child->stop_pc = get_pc (child);
2373 if (handle_extended_wait (&child, wstat))
2374 {
2375 /* The event has been handled, so just return without
2376 reporting it. */
2377 return NULL;
2378 }
2379 }
2380
2381 /* Check first whether this was a SW/HW breakpoint before checking
2382 watchpoints, because at least s390 can't tell the data address of
2383 hardware watchpoint hits, and returns stopped-by-watchpoint as
2384 long as there's a watchpoint set. */
2385 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2386 {
2387 if (check_stopped_by_breakpoint (child))
2388 have_stop_pc = 1;
2389 }
2390
2391 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2392 or hardware watchpoint. Check which is which if we got
2393 TARGET_STOPPED_BY_HW_BREAKPOINT. Likewise, we may have single
2394 stepped an instruction that triggered a watchpoint. In that
2395 case, on some architectures (such as x86), instead of
2396 TRAP_HWBKPT, si_code indicates TRAP_TRACE, and we need to check
2397 the debug registers separately. */
2398 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2399 && child->stop_reason != TARGET_STOPPED_BY_SW_BREAKPOINT)
2400 check_stopped_by_watchpoint (child);
2401
2402 if (!have_stop_pc)
2403 child->stop_pc = get_pc (child);
2404
2405 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2406 && child->stop_expected)
2407 {
2408 if (debug_threads)
2409 debug_printf ("Expected stop.\n");
2410 child->stop_expected = 0;
2411
2412 if (thread->last_resume_kind == resume_stop)
2413 {
2414 /* We want to report the stop to the core. Treat the
2415 SIGSTOP as a normal event. */
2416 if (debug_threads)
2417 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2418 target_pid_to_str (ptid_of (thread)));
2419 }
2420 else if (stopping_threads != NOT_STOPPING_THREADS)
2421 {
2422 /* Stopping threads. We don't want this SIGSTOP to end up
2423 pending. */
2424 if (debug_threads)
2425 debug_printf ("LLW: SIGSTOP caught for %s "
2426 "while stopping threads.\n",
2427 target_pid_to_str (ptid_of (thread)));
2428 return NULL;
2429 }
2430 else
2431 {
2432 /* This is a delayed SIGSTOP. Filter out the event. */
2433 if (debug_threads)
2434 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2435 child->stepping ? "step" : "continue",
2436 target_pid_to_str (ptid_of (thread)));
2437
2438 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2439 return NULL;
2440 }
2441 }
2442
2443 child->status_pending_p = 1;
2444 child->status_pending = wstat;
2445 return child;
2446 }
2447
2448 /* Resume LWPs that are currently stopped without any pending status
2449 to report, but are resumed from the core's perspective. */
2450
2451 static void
2452 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2453 {
2454 struct thread_info *thread = (struct thread_info *) entry;
2455 struct lwp_info *lp = get_thread_lwp (thread);
2456
2457 if (lp->stopped
2458 && !lp->suspended
2459 && !lp->status_pending_p
2460 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2461 {
2462 int step = thread->last_resume_kind == resume_step;
2463
2464 if (debug_threads)
2465 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2466 target_pid_to_str (ptid_of (thread)),
2467 paddress (lp->stop_pc),
2468 step);
2469
2470 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2471 }
2472 }
2473
2474 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2475 match FILTER_PTID (leaving others pending). The PTIDs can be:
2476 minus_one_ptid, to specify any child; a pid PTID, specifying all
2477 lwps of a thread group; or a PTID representing a single lwp. Store
2478 the stop status through the status pointer WSTAT. OPTIONS is
2479 passed to the waitpid call. Return 0 if no event was found and
2480 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2481 was found. Return the PID of the stopped child otherwise. */
2482
2483 static int
2484 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2485 int *wstatp, int options)
2486 {
2487 struct thread_info *event_thread;
2488 struct lwp_info *event_child, *requested_child;
2489 sigset_t block_mask, prev_mask;
2490
2491 retry:
2492 /* N.B. event_thread points to the thread_info struct that contains
2493 event_child. Keep them in sync. */
2494 event_thread = NULL;
2495 event_child = NULL;
2496 requested_child = NULL;
2497
2498 /* Check for a lwp with a pending status. */
2499
2500 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2501 {
2502 event_thread = (struct thread_info *)
2503 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2504 if (event_thread != NULL)
2505 event_child = get_thread_lwp (event_thread);
2506 if (debug_threads && event_thread)
2507 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2508 }
2509 else if (!ptid_equal (filter_ptid, null_ptid))
2510 {
2511 requested_child = find_lwp_pid (filter_ptid);
2512
2513 if (stopping_threads == NOT_STOPPING_THREADS
2514 && requested_child->status_pending_p
2515 && requested_child->collecting_fast_tracepoint)
2516 {
2517 enqueue_one_deferred_signal (requested_child,
2518 &requested_child->status_pending);
2519 requested_child->status_pending_p = 0;
2520 requested_child->status_pending = 0;
2521 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2522 }
2523
2524 if (requested_child->suspended
2525 && requested_child->status_pending_p)
2526 {
2527 internal_error (__FILE__, __LINE__,
2528 "requesting an event out of a"
2529 " suspended child?");
2530 }
2531
2532 if (requested_child->status_pending_p)
2533 {
2534 event_child = requested_child;
2535 event_thread = get_lwp_thread (event_child);
2536 }
2537 }
2538
2539 if (event_child != NULL)
2540 {
2541 if (debug_threads)
2542 debug_printf ("Got an event from pending child %ld (%04x)\n",
2543 lwpid_of (event_thread), event_child->status_pending);
2544 *wstatp = event_child->status_pending;
2545 event_child->status_pending_p = 0;
2546 event_child->status_pending = 0;
2547 current_thread = event_thread;
2548 return lwpid_of (event_thread);
2549 }
2550
2551 /* But if we don't find a pending event, we'll have to wait.
2552
2553 We only enter this loop if no process has a pending wait status.
2554 Thus any action taken in response to a wait status inside this
2555 loop is responding as soon as we detect the status, not after any
2556 pending events. */
2557
2558 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2559 all signals while here. */
2560 sigfillset (&block_mask);
2561 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2562
2563 /* Always pull all events out of the kernel. We'll randomly select
2564 an event LWP out of all that have events, to prevent
2565 starvation. */
2566 while (event_child == NULL)
2567 {
2568 pid_t ret = 0;
2569
2570 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2571 quirks:
2572
2573 - If the thread group leader exits while other threads in the
2574 thread group still exist, waitpid(TGID, ...) hangs. That
2575 waitpid won't return an exit status until the other threads
2576 in the group are reaped.
2577
2578 - When a non-leader thread execs, that thread just vanishes
2579 without reporting an exit (so we'd hang if we waited for it
2580 explicitly in that case). The exec event is reported to
2581 the TGID pid. */
2582 errno = 0;
2583 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2584
2585 if (debug_threads)
2586 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2587 ret, errno ? strerror (errno) : "ERRNO-OK");
2588
2589 if (ret > 0)
2590 {
2591 if (debug_threads)
2592 {
2593 debug_printf ("LLW: waitpid %ld received %s\n",
2594 (long) ret, status_to_str (*wstatp));
2595 }
2596
2597 /* Filter all events. IOW, leave all events pending. We'll
2598 randomly select an event LWP out of all that have events
2599 below. */
2600 linux_low_filter_event (ret, *wstatp);
2601 /* Retry until nothing comes out of waitpid. A single
2602 SIGCHLD can indicate more than one child stopped. */
2603 continue;
2604 }
2605
2606 /* Now that we've pulled all events out of the kernel, resume
2607 LWPs that don't have an interesting event to report. */
2608 if (stopping_threads == NOT_STOPPING_THREADS)
2609 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2610
2611 /* ... and find an LWP with a status to report to the core, if
2612 any. */
2613 event_thread = (struct thread_info *)
2614 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2615 if (event_thread != NULL)
2616 {
2617 event_child = get_thread_lwp (event_thread);
2618 *wstatp = event_child->status_pending;
2619 event_child->status_pending_p = 0;
2620 event_child->status_pending = 0;
2621 break;
2622 }
2623
2624 /* Check for zombie thread group leaders. Those can't be reaped
2625 until all other threads in the thread group are. */
2626 check_zombie_leaders ();
2627
2628 /* If there are no resumed children left in the set of LWPs we
2629 want to wait for, bail. We can't just block in
2630 waitpid/sigsuspend, because lwps might have been left stopped
2631 in trace-stop state, and we'd be stuck forever waiting for
2632 their status to change (which would only happen if we resumed
2633 them). Even if WNOHANG is set, this return code is preferred
2634 over 0 (below), as it is more detailed. */
2635 if ((find_inferior (&all_threads,
2636 not_stopped_callback,
2637 &wait_ptid) == NULL))
2638 {
2639 if (debug_threads)
2640 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2641 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2642 return -1;
2643 }
2644
2645 /* No interesting event to report to the caller. */
2646 if ((options & WNOHANG))
2647 {
2648 if (debug_threads)
2649 debug_printf ("WNOHANG set, no event found\n");
2650
2651 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2652 return 0;
2653 }
2654
2655 /* Block until we get an event reported with SIGCHLD. */
2656 if (debug_threads)
2657 debug_printf ("sigsuspend'ing\n");
2658
2659 sigsuspend (&prev_mask);
2660 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2661 goto retry;
2662 }
2663
2664 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2665
2666 current_thread = event_thread;
2667
2668 return lwpid_of (event_thread);
2669 }
2670
2671 /* Wait for an event from child(ren) PTID. PTIDs can be:
2672 minus_one_ptid, to specify any child; a pid PTID, specifying all
2673 lwps of a thread group; or a PTID representing a single lwp. Store
2674 the stop status through the status pointer WSTAT. OPTIONS is
2675 passed to the waitpid call. Return 0 if no event was found and
2676 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2677 was found. Return the PID of the stopped child otherwise. */
2678
2679 static int
2680 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2681 {
2682 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2683 }
2684
2685 /* Count the LWP's that have had events. */
2686
2687 static int
2688 count_events_callback (struct inferior_list_entry *entry, void *data)
2689 {
2690 struct thread_info *thread = (struct thread_info *) entry;
2691 struct lwp_info *lp = get_thread_lwp (thread);
2692 int *count = (int *) data;
2693
2694 gdb_assert (count != NULL);
2695
2696 /* Count only resumed LWPs that have an event pending. */
2697 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2698 && lp->status_pending_p)
2699 (*count)++;
2700
2701 return 0;
2702 }
2703
2704 /* Select the LWP (if any) that is currently being single-stepped. */
2705
2706 static int
2707 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2708 {
2709 struct thread_info *thread = (struct thread_info *) entry;
2710 struct lwp_info *lp = get_thread_lwp (thread);
2711
2712 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2713 && thread->last_resume_kind == resume_step
2714 && lp->status_pending_p)
2715 return 1;
2716 else
2717 return 0;
2718 }
2719
2720 /* Select the Nth LWP that has had an event. */
2721
2722 static int
2723 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2724 {
2725 struct thread_info *thread = (struct thread_info *) entry;
2726 struct lwp_info *lp = get_thread_lwp (thread);
2727 int *selector = (int *) data;
2728
2729 gdb_assert (selector != NULL);
2730
2731 /* Select only resumed LWPs that have an event pending. */
2732 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2733 && lp->status_pending_p)
2734 if ((*selector)-- == 0)
2735 return 1;
2736
2737 return 0;
2738 }
2739
2740 /* Select one LWP out of those that have events pending. */
2741
2742 static void
2743 select_event_lwp (struct lwp_info **orig_lp)
2744 {
2745 int num_events = 0;
2746 int random_selector;
2747 struct thread_info *event_thread = NULL;
2748
2749 /* In all-stop, give preference to the LWP that is being
2750 single-stepped. There will be at most one, and it's the LWP that
2751 the core is most interested in. If we didn't do this, then we'd
2752 have to handle pending step SIGTRAPs somehow in case the core
2753 later continues the previously-stepped thread, otherwise we'd
2754 report the pending SIGTRAP, and the core, not having stepped the
2755 thread, wouldn't understand what the trap was for, and therefore
2756 would report it to the user as a random signal. */
2757 if (!non_stop)
2758 {
2759 event_thread
2760 = (struct thread_info *) find_inferior (&all_threads,
2761 select_singlestep_lwp_callback,
2762 NULL);
2763 if (event_thread != NULL)
2764 {
2765 if (debug_threads)
2766 debug_printf ("SEL: Select single-step %s\n",
2767 target_pid_to_str (ptid_of (event_thread)));
2768 }
2769 }
2770 if (event_thread == NULL)
2771 {
2772 /* No single-stepping LWP. Select one at random, out of those
2773 which have had events. */
2774
2775 /* First see how many events we have. */
2776 find_inferior (&all_threads, count_events_callback, &num_events);
2777 gdb_assert (num_events > 0);
2778
2779 /* Now randomly pick a LWP out of those that have had
2780 events. */
2781 random_selector = (int)
2782 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2783
2784 if (debug_threads && num_events > 1)
2785 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2786 num_events, random_selector);
2787
2788 event_thread
2789 = (struct thread_info *) find_inferior (&all_threads,
2790 select_event_lwp_callback,
2791 &random_selector);
2792 }
2793
2794 if (event_thread != NULL)
2795 {
2796 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2797
2798 /* Switch the event LWP. */
2799 *orig_lp = event_lp;
2800 }
2801 }
2802
2803 /* Decrement the suspend count of an LWP. */
2804
2805 static int
2806 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2807 {
2808 struct thread_info *thread = (struct thread_info *) entry;
2809 struct lwp_info *lwp = get_thread_lwp (thread);
2810
2811 /* Ignore EXCEPT. */
2812 if (lwp == except)
2813 return 0;
2814
2815 lwp_suspended_decr (lwp);
2816 return 0;
2817 }
2818
2819 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2820 NULL. */
2821
2822 static void
2823 unsuspend_all_lwps (struct lwp_info *except)
2824 {
2825 find_inferior (&all_threads, unsuspend_one_lwp, except);
2826 }
2827
2828 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2829 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2830 void *data);
2831 static int lwp_running (struct inferior_list_entry *entry, void *data);
2832 static ptid_t linux_wait_1 (ptid_t ptid,
2833 struct target_waitstatus *ourstatus,
2834 int target_options);
2835
2836 /* Stabilize threads (move out of jump pads).
2837
2838 If a thread is midway collecting a fast tracepoint, we need to
2839 finish the collection and move it out of the jump pad before
2840 reporting the signal.
2841
2842 This avoids recursion while collecting (when a signal arrives
2843 midway, and the signal handler itself collects), which would trash
2844 the trace buffer. In case the user set a breakpoint in a signal
2845 handler, this avoids the backtrace showing the jump pad, etc..
2846 Most importantly, there are certain things we can't do safely if
2847 threads are stopped in a jump pad (or in its callee's). For
2848 example:
2849
2850 - starting a new trace run. A thread still collecting the
2851 previous run, could trash the trace buffer when resumed. The trace
2852 buffer control structures would have been reset but the thread had
2853 no way to tell. The thread could even midway memcpy'ing to the
2854 buffer, which would mean that when resumed, it would clobber the
2855 trace buffer that had been set for a new run.
2856
2857 - we can't rewrite/reuse the jump pads for new tracepoints
2858 safely. Say you do tstart while a thread is stopped midway while
2859 collecting. When the thread is later resumed, it finishes the
2860 collection, and returns to the jump pad, to execute the original
2861 instruction that was under the tracepoint jump at the time the
2862 older run had been started. If the jump pad had been rewritten
2863 since for something else in the new run, the thread would now
2864 execute the wrong / random instructions. */
2865
2866 static void
2867 linux_stabilize_threads (void)
2868 {
2869 struct thread_info *saved_thread;
2870 struct thread_info *thread_stuck;
2871
2872 thread_stuck
2873 = (struct thread_info *) find_inferior (&all_threads,
2874 stuck_in_jump_pad_callback,
2875 NULL);
2876 if (thread_stuck != NULL)
2877 {
2878 if (debug_threads)
2879 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2880 lwpid_of (thread_stuck));
2881 return;
2882 }
2883
2884 saved_thread = current_thread;
2885
2886 stabilizing_threads = 1;
2887
2888 /* Kick 'em all. */
2889 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2890
2891 /* Loop until all are stopped out of the jump pads. */
2892 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2893 {
2894 struct target_waitstatus ourstatus;
2895 struct lwp_info *lwp;
2896 int wstat;
2897
2898 /* Note that we go through the full wait even loop. While
2899 moving threads out of jump pad, we need to be able to step
2900 over internal breakpoints and such. */
2901 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2902
2903 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2904 {
2905 lwp = get_thread_lwp (current_thread);
2906
2907 /* Lock it. */
2908 lwp_suspended_inc (lwp);
2909
2910 if (ourstatus.value.sig != GDB_SIGNAL_0
2911 || current_thread->last_resume_kind == resume_stop)
2912 {
2913 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2914 enqueue_one_deferred_signal (lwp, &wstat);
2915 }
2916 }
2917 }
2918
2919 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2920
2921 stabilizing_threads = 0;
2922
2923 current_thread = saved_thread;
2924
2925 if (debug_threads)
2926 {
2927 thread_stuck
2928 = (struct thread_info *) find_inferior (&all_threads,
2929 stuck_in_jump_pad_callback,
2930 NULL);
2931 if (thread_stuck != NULL)
2932 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2933 lwpid_of (thread_stuck));
2934 }
2935 }
2936
2937 /* Convenience function that is called when the kernel reports an
2938 event that is not passed out to GDB. */
2939
2940 static ptid_t
2941 ignore_event (struct target_waitstatus *ourstatus)
2942 {
2943 /* If we got an event, there may still be others, as a single
2944 SIGCHLD can indicate more than one child stopped. This forces
2945 another target_wait call. */
2946 async_file_mark ();
2947
2948 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2949 return null_ptid;
2950 }
2951
2952 /* Convenience function that is called when the kernel reports an exit
2953 event. This decides whether to report the event to GDB as a
2954 process exit event, a thread exit event, or to suppress the
2955 event. */
2956
2957 static ptid_t
2958 filter_exit_event (struct lwp_info *event_child,
2959 struct target_waitstatus *ourstatus)
2960 {
2961 struct thread_info *thread = get_lwp_thread (event_child);
2962 ptid_t ptid = ptid_of (thread);
2963
2964 if (!last_thread_of_process_p (pid_of (thread)))
2965 {
2966 if (report_thread_events)
2967 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2968 else
2969 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2970
2971 delete_lwp (event_child);
2972 }
2973 return ptid;
2974 }
2975
2976 /* Wait for process, returns status. */
2977
2978 static ptid_t
2979 linux_wait_1 (ptid_t ptid,
2980 struct target_waitstatus *ourstatus, int target_options)
2981 {
2982 int w;
2983 struct lwp_info *event_child;
2984 int options;
2985 int pid;
2986 int step_over_finished;
2987 int bp_explains_trap;
2988 int maybe_internal_trap;
2989 int report_to_gdb;
2990 int trace_event;
2991 int in_step_range;
2992 int any_resumed;
2993
2994 if (debug_threads)
2995 {
2996 debug_enter ();
2997 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2998 }
2999
3000 /* Translate generic target options into linux options. */
3001 options = __WALL;
3002 if (target_options & TARGET_WNOHANG)
3003 options |= WNOHANG;
3004
3005 bp_explains_trap = 0;
3006 trace_event = 0;
3007 in_step_range = 0;
3008 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3009
3010 /* Find a resumed LWP, if any. */
3011 if (find_inferior (&all_threads,
3012 status_pending_p_callback,
3013 &minus_one_ptid) != NULL)
3014 any_resumed = 1;
3015 else if ((find_inferior (&all_threads,
3016 not_stopped_callback,
3017 &minus_one_ptid) != NULL))
3018 any_resumed = 1;
3019 else
3020 any_resumed = 0;
3021
3022 if (ptid_equal (step_over_bkpt, null_ptid))
3023 pid = linux_wait_for_event (ptid, &w, options);
3024 else
3025 {
3026 if (debug_threads)
3027 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3028 target_pid_to_str (step_over_bkpt));
3029 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3030 }
3031
3032 if (pid == 0 || (pid == -1 && !any_resumed))
3033 {
3034 gdb_assert (target_options & TARGET_WNOHANG);
3035
3036 if (debug_threads)
3037 {
3038 debug_printf ("linux_wait_1 ret = null_ptid, "
3039 "TARGET_WAITKIND_IGNORE\n");
3040 debug_exit ();
3041 }
3042
3043 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3044 return null_ptid;
3045 }
3046 else if (pid == -1)
3047 {
3048 if (debug_threads)
3049 {
3050 debug_printf ("linux_wait_1 ret = null_ptid, "
3051 "TARGET_WAITKIND_NO_RESUMED\n");
3052 debug_exit ();
3053 }
3054
3055 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3056 return null_ptid;
3057 }
3058
3059 event_child = get_thread_lwp (current_thread);
3060
3061 /* linux_wait_for_event only returns an exit status for the last
3062 child of a process. Report it. */
3063 if (WIFEXITED (w) || WIFSIGNALED (w))
3064 {
3065 if (WIFEXITED (w))
3066 {
3067 ourstatus->kind = TARGET_WAITKIND_EXITED;
3068 ourstatus->value.integer = WEXITSTATUS (w);
3069
3070 if (debug_threads)
3071 {
3072 debug_printf ("linux_wait_1 ret = %s, exited with "
3073 "retcode %d\n",
3074 target_pid_to_str (ptid_of (current_thread)),
3075 WEXITSTATUS (w));
3076 debug_exit ();
3077 }
3078 }
3079 else
3080 {
3081 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3082 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3083
3084 if (debug_threads)
3085 {
3086 debug_printf ("linux_wait_1 ret = %s, terminated with "
3087 "signal %d\n",
3088 target_pid_to_str (ptid_of (current_thread)),
3089 WTERMSIG (w));
3090 debug_exit ();
3091 }
3092 }
3093
3094 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3095 return filter_exit_event (event_child, ourstatus);
3096
3097 return ptid_of (current_thread);
3098 }
3099
3100 /* If step-over executes a breakpoint instruction, in the case of a
3101 hardware single step it means a gdb/gdbserver breakpoint had been
3102 planted on top of a permanent breakpoint, in the case of a software
3103 single step it may just mean that gdbserver hit the reinsert breakpoint.
3104 The PC has been adjusted by check_stopped_by_breakpoint to point at
3105 the breakpoint address.
3106 So in the case of the hardware single step advance the PC manually
3107 past the breakpoint and in the case of software single step advance only
3108 if it's not the reinsert_breakpoint we are hitting.
3109 This avoids that a program would keep trapping a permanent breakpoint
3110 forever. */
3111 if (!ptid_equal (step_over_bkpt, null_ptid)
3112 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3113 && (event_child->stepping
3114 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
3115 {
3116 int increment_pc = 0;
3117 int breakpoint_kind = 0;
3118 CORE_ADDR stop_pc = event_child->stop_pc;
3119
3120 breakpoint_kind =
3121 the_target->breakpoint_kind_from_current_state (&stop_pc);
3122 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3123
3124 if (debug_threads)
3125 {
3126 debug_printf ("step-over for %s executed software breakpoint\n",
3127 target_pid_to_str (ptid_of (current_thread)));
3128 }
3129
3130 if (increment_pc != 0)
3131 {
3132 struct regcache *regcache
3133 = get_thread_regcache (current_thread, 1);
3134
3135 event_child->stop_pc += increment_pc;
3136 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3137
3138 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3139 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3140 }
3141 }
3142
3143 /* If this event was not handled before, and is not a SIGTRAP, we
3144 report it. SIGILL and SIGSEGV are also treated as traps in case
3145 a breakpoint is inserted at the current PC. If this target does
3146 not support internal breakpoints at all, we also report the
3147 SIGTRAP without further processing; it's of no concern to us. */
3148 maybe_internal_trap
3149 = (supports_breakpoints ()
3150 && (WSTOPSIG (w) == SIGTRAP
3151 || ((WSTOPSIG (w) == SIGILL
3152 || WSTOPSIG (w) == SIGSEGV)
3153 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3154
3155 if (maybe_internal_trap)
3156 {
3157 /* Handle anything that requires bookkeeping before deciding to
3158 report the event or continue waiting. */
3159
3160 /* First check if we can explain the SIGTRAP with an internal
3161 breakpoint, or if we should possibly report the event to GDB.
3162 Do this before anything that may remove or insert a
3163 breakpoint. */
3164 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3165
3166 /* We have a SIGTRAP, possibly a step-over dance has just
3167 finished. If so, tweak the state machine accordingly,
3168 reinsert breakpoints and delete any reinsert (software
3169 single-step) breakpoints. */
3170 step_over_finished = finish_step_over (event_child);
3171
3172 /* Now invoke the callbacks of any internal breakpoints there. */
3173 check_breakpoints (event_child->stop_pc);
3174
3175 /* Handle tracepoint data collecting. This may overflow the
3176 trace buffer, and cause a tracing stop, removing
3177 breakpoints. */
3178 trace_event = handle_tracepoints (event_child);
3179
3180 if (bp_explains_trap)
3181 {
3182 /* If we stepped or ran into an internal breakpoint, we've
3183 already handled it. So next time we resume (from this
3184 PC), we should step over it. */
3185 if (debug_threads)
3186 debug_printf ("Hit a gdbserver breakpoint.\n");
3187
3188 if (breakpoint_here (event_child->stop_pc))
3189 event_child->need_step_over = 1;
3190 }
3191 }
3192 else
3193 {
3194 /* We have some other signal, possibly a step-over dance was in
3195 progress, and it should be cancelled too. */
3196 step_over_finished = finish_step_over (event_child);
3197 }
3198
3199 /* We have all the data we need. Either report the event to GDB, or
3200 resume threads and keep waiting for more. */
3201
3202 /* If we're collecting a fast tracepoint, finish the collection and
3203 move out of the jump pad before delivering a signal. See
3204 linux_stabilize_threads. */
3205
3206 if (WIFSTOPPED (w)
3207 && WSTOPSIG (w) != SIGTRAP
3208 && supports_fast_tracepoints ()
3209 && agent_loaded_p ())
3210 {
3211 if (debug_threads)
3212 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3213 "to defer or adjust it.\n",
3214 WSTOPSIG (w), lwpid_of (current_thread));
3215
3216 /* Allow debugging the jump pad itself. */
3217 if (current_thread->last_resume_kind != resume_step
3218 && maybe_move_out_of_jump_pad (event_child, &w))
3219 {
3220 enqueue_one_deferred_signal (event_child, &w);
3221
3222 if (debug_threads)
3223 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3224 WSTOPSIG (w), lwpid_of (current_thread));
3225
3226 linux_resume_one_lwp (event_child, 0, 0, NULL);
3227
3228 return ignore_event (ourstatus);
3229 }
3230 }
3231
3232 if (event_child->collecting_fast_tracepoint)
3233 {
3234 if (debug_threads)
3235 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3236 "Check if we're already there.\n",
3237 lwpid_of (current_thread),
3238 event_child->collecting_fast_tracepoint);
3239
3240 trace_event = 1;
3241
3242 event_child->collecting_fast_tracepoint
3243 = linux_fast_tracepoint_collecting (event_child, NULL);
3244
3245 if (event_child->collecting_fast_tracepoint != 1)
3246 {
3247 /* No longer need this breakpoint. */
3248 if (event_child->exit_jump_pad_bkpt != NULL)
3249 {
3250 if (debug_threads)
3251 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3252 "stopping all threads momentarily.\n");
3253
3254 /* Other running threads could hit this breakpoint.
3255 We don't handle moribund locations like GDB does,
3256 instead we always pause all threads when removing
3257 breakpoints, so that any step-over or
3258 decr_pc_after_break adjustment is always taken
3259 care of while the breakpoint is still
3260 inserted. */
3261 stop_all_lwps (1, event_child);
3262
3263 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3264 event_child->exit_jump_pad_bkpt = NULL;
3265
3266 unstop_all_lwps (1, event_child);
3267
3268 gdb_assert (event_child->suspended >= 0);
3269 }
3270 }
3271
3272 if (event_child->collecting_fast_tracepoint == 0)
3273 {
3274 if (debug_threads)
3275 debug_printf ("fast tracepoint finished "
3276 "collecting successfully.\n");
3277
3278 /* We may have a deferred signal to report. */
3279 if (dequeue_one_deferred_signal (event_child, &w))
3280 {
3281 if (debug_threads)
3282 debug_printf ("dequeued one signal.\n");
3283 }
3284 else
3285 {
3286 if (debug_threads)
3287 debug_printf ("no deferred signals.\n");
3288
3289 if (stabilizing_threads)
3290 {
3291 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3292 ourstatus->value.sig = GDB_SIGNAL_0;
3293
3294 if (debug_threads)
3295 {
3296 debug_printf ("linux_wait_1 ret = %s, stopped "
3297 "while stabilizing threads\n",
3298 target_pid_to_str (ptid_of (current_thread)));
3299 debug_exit ();
3300 }
3301
3302 return ptid_of (current_thread);
3303 }
3304 }
3305 }
3306 }
3307
3308 /* Check whether GDB would be interested in this event. */
3309
3310 /* If GDB is not interested in this signal, don't stop other
3311 threads, and don't report it to GDB. Just resume the inferior
3312 right away. We do this for threading-related signals as well as
3313 any that GDB specifically requested we ignore. But never ignore
3314 SIGSTOP if we sent it ourselves, and do not ignore signals when
3315 stepping - they may require special handling to skip the signal
3316 handler. Also never ignore signals that could be caused by a
3317 breakpoint. */
3318 if (WIFSTOPPED (w)
3319 && current_thread->last_resume_kind != resume_step
3320 && (
3321 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3322 (current_process ()->priv->thread_db != NULL
3323 && (WSTOPSIG (w) == __SIGRTMIN
3324 || WSTOPSIG (w) == __SIGRTMIN + 1))
3325 ||
3326 #endif
3327 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3328 && !(WSTOPSIG (w) == SIGSTOP
3329 && current_thread->last_resume_kind == resume_stop)
3330 && !linux_wstatus_maybe_breakpoint (w))))
3331 {
3332 siginfo_t info, *info_p;
3333
3334 if (debug_threads)
3335 debug_printf ("Ignored signal %d for LWP %ld.\n",
3336 WSTOPSIG (w), lwpid_of (current_thread));
3337
3338 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3339 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3340 info_p = &info;
3341 else
3342 info_p = NULL;
3343
3344 if (step_over_finished)
3345 {
3346 /* We cancelled this thread's step-over above. We still
3347 need to unsuspend all other LWPs, and set them back
3348 running again while the signal handler runs. */
3349 unsuspend_all_lwps (event_child);
3350
3351 /* Enqueue the pending signal info so that proceed_all_lwps
3352 doesn't lose it. */
3353 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3354
3355 proceed_all_lwps ();
3356 }
3357 else
3358 {
3359 linux_resume_one_lwp (event_child, event_child->stepping,
3360 WSTOPSIG (w), info_p);
3361 }
3362 return ignore_event (ourstatus);
3363 }
3364
3365 /* Note that all addresses are always "out of the step range" when
3366 there's no range to begin with. */
3367 in_step_range = lwp_in_step_range (event_child);
3368
3369 /* If GDB wanted this thread to single step, and the thread is out
3370 of the step range, we always want to report the SIGTRAP, and let
3371 GDB handle it. Watchpoints should always be reported. So should
3372 signals we can't explain. A SIGTRAP we can't explain could be a
3373 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3374 do, we're be able to handle GDB breakpoints on top of internal
3375 breakpoints, by handling the internal breakpoint and still
3376 reporting the event to GDB. If we don't, we're out of luck, GDB
3377 won't see the breakpoint hit. If we see a single-step event but
3378 the thread should be continuing, don't pass the trap to gdb.
3379 That indicates that we had previously finished a single-step but
3380 left the single-step pending -- see
3381 complete_ongoing_step_over. */
3382 report_to_gdb = (!maybe_internal_trap
3383 || (current_thread->last_resume_kind == resume_step
3384 && !in_step_range)
3385 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3386 || (!in_step_range
3387 && !bp_explains_trap
3388 && !trace_event
3389 && !step_over_finished
3390 && !(current_thread->last_resume_kind == resume_continue
3391 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3392 || (gdb_breakpoint_here (event_child->stop_pc)
3393 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3394 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3395 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3396
3397 run_breakpoint_commands (event_child->stop_pc);
3398
3399 /* We found no reason GDB would want us to stop. We either hit one
3400 of our own breakpoints, or finished an internal step GDB
3401 shouldn't know about. */
3402 if (!report_to_gdb)
3403 {
3404 if (debug_threads)
3405 {
3406 if (bp_explains_trap)
3407 debug_printf ("Hit a gdbserver breakpoint.\n");
3408 if (step_over_finished)
3409 debug_printf ("Step-over finished.\n");
3410 if (trace_event)
3411 debug_printf ("Tracepoint event.\n");
3412 if (lwp_in_step_range (event_child))
3413 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3414 paddress (event_child->stop_pc),
3415 paddress (event_child->step_range_start),
3416 paddress (event_child->step_range_end));
3417 }
3418
3419 /* We're not reporting this breakpoint to GDB, so apply the
3420 decr_pc_after_break adjustment to the inferior's regcache
3421 ourselves. */
3422
3423 if (the_low_target.set_pc != NULL)
3424 {
3425 struct regcache *regcache
3426 = get_thread_regcache (current_thread, 1);
3427 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3428 }
3429
3430 /* We may have finished stepping over a breakpoint. If so,
3431 we've stopped and suspended all LWPs momentarily except the
3432 stepping one. This is where we resume them all again. We're
3433 going to keep waiting, so use proceed, which handles stepping
3434 over the next breakpoint. */
3435 if (debug_threads)
3436 debug_printf ("proceeding all threads.\n");
3437
3438 if (step_over_finished)
3439 unsuspend_all_lwps (event_child);
3440
3441 proceed_all_lwps ();
3442 return ignore_event (ourstatus);
3443 }
3444
3445 if (debug_threads)
3446 {
3447 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3448 {
3449 char *str;
3450
3451 str = target_waitstatus_to_string (&event_child->waitstatus);
3452 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3453 lwpid_of (get_lwp_thread (event_child)), str);
3454 xfree (str);
3455 }
3456 if (current_thread->last_resume_kind == resume_step)
3457 {
3458 if (event_child->step_range_start == event_child->step_range_end)
3459 debug_printf ("GDB wanted to single-step, reporting event.\n");
3460 else if (!lwp_in_step_range (event_child))
3461 debug_printf ("Out of step range, reporting event.\n");
3462 }
3463 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3464 debug_printf ("Stopped by watchpoint.\n");
3465 else if (gdb_breakpoint_here (event_child->stop_pc))
3466 debug_printf ("Stopped by GDB breakpoint.\n");
3467 if (debug_threads)
3468 debug_printf ("Hit a non-gdbserver trap event.\n");
3469 }
3470
3471 /* Alright, we're going to report a stop. */
3472
3473 if (!stabilizing_threads)
3474 {
3475 /* In all-stop, stop all threads. */
3476 if (!non_stop)
3477 stop_all_lwps (0, NULL);
3478
3479 /* If we're not waiting for a specific LWP, choose an event LWP
3480 from among those that have had events. Giving equal priority
3481 to all LWPs that have had events helps prevent
3482 starvation. */
3483 if (ptid_equal (ptid, minus_one_ptid))
3484 {
3485 event_child->status_pending_p = 1;
3486 event_child->status_pending = w;
3487
3488 select_event_lwp (&event_child);
3489
3490 /* current_thread and event_child must stay in sync. */
3491 current_thread = get_lwp_thread (event_child);
3492
3493 event_child->status_pending_p = 0;
3494 w = event_child->status_pending;
3495 }
3496
3497 if (step_over_finished)
3498 {
3499 if (!non_stop)
3500 {
3501 /* If we were doing a step-over, all other threads but
3502 the stepping one had been paused in start_step_over,
3503 with their suspend counts incremented. We don't want
3504 to do a full unstop/unpause, because we're in
3505 all-stop mode (so we want threads stopped), but we
3506 still need to unsuspend the other threads, to
3507 decrement their `suspended' count back. */
3508 unsuspend_all_lwps (event_child);
3509 }
3510 else
3511 {
3512 /* If we just finished a step-over, then all threads had
3513 been momentarily paused. In all-stop, that's fine,
3514 we want threads stopped by now anyway. In non-stop,
3515 we need to re-resume threads that GDB wanted to be
3516 running. */
3517 unstop_all_lwps (1, event_child);
3518 }
3519 }
3520
3521 /* Stabilize threads (move out of jump pads). */
3522 if (!non_stop)
3523 stabilize_threads ();
3524 }
3525 else
3526 {
3527 /* If we just finished a step-over, then all threads had been
3528 momentarily paused. In all-stop, that's fine, we want
3529 threads stopped by now anyway. In non-stop, we need to
3530 re-resume threads that GDB wanted to be running. */
3531 if (step_over_finished)
3532 unstop_all_lwps (1, event_child);
3533 }
3534
3535 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3536 {
3537 /* If the reported event is an exit, fork, vfork or exec, let
3538 GDB know. */
3539 *ourstatus = event_child->waitstatus;
3540 /* Clear the event lwp's waitstatus since we handled it already. */
3541 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3542 }
3543 else
3544 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3545
3546 /* Now that we've selected our final event LWP, un-adjust its PC if
3547 it was a software breakpoint, and the client doesn't know we can
3548 adjust the breakpoint ourselves. */
3549 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3550 && !swbreak_feature)
3551 {
3552 int decr_pc = the_low_target.decr_pc_after_break;
3553
3554 if (decr_pc != 0)
3555 {
3556 struct regcache *regcache
3557 = get_thread_regcache (current_thread, 1);
3558 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3559 }
3560 }
3561
3562 if (current_thread->last_resume_kind == resume_stop
3563 && WSTOPSIG (w) == SIGSTOP)
3564 {
3565 /* A thread that has been requested to stop by GDB with vCont;t,
3566 and it stopped cleanly, so report as SIG0. The use of
3567 SIGSTOP is an implementation detail. */
3568 ourstatus->value.sig = GDB_SIGNAL_0;
3569 }
3570 else if (current_thread->last_resume_kind == resume_stop
3571 && WSTOPSIG (w) != SIGSTOP)
3572 {
3573 /* A thread that has been requested to stop by GDB with vCont;t,
3574 but, it stopped for other reasons. */
3575 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3576 }
3577 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3578 {
3579 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3580 }
3581
3582 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3583
3584 if (debug_threads)
3585 {
3586 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3587 target_pid_to_str (ptid_of (current_thread)),
3588 ourstatus->kind, ourstatus->value.sig);
3589 debug_exit ();
3590 }
3591
3592 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3593 return filter_exit_event (event_child, ourstatus);
3594
3595 return ptid_of (current_thread);
3596 }
3597
3598 /* Get rid of any pending event in the pipe. */
3599 static void
3600 async_file_flush (void)
3601 {
3602 int ret;
3603 char buf;
3604
3605 do
3606 ret = read (linux_event_pipe[0], &buf, 1);
3607 while (ret >= 0 || (ret == -1 && errno == EINTR));
3608 }
3609
3610 /* Put something in the pipe, so the event loop wakes up. */
3611 static void
3612 async_file_mark (void)
3613 {
3614 int ret;
3615
3616 async_file_flush ();
3617
3618 do
3619 ret = write (linux_event_pipe[1], "+", 1);
3620 while (ret == 0 || (ret == -1 && errno == EINTR));
3621
3622 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3623 be awakened anyway. */
3624 }
3625
3626 static ptid_t
3627 linux_wait (ptid_t ptid,
3628 struct target_waitstatus *ourstatus, int target_options)
3629 {
3630 ptid_t event_ptid;
3631
3632 /* Flush the async file first. */
3633 if (target_is_async_p ())
3634 async_file_flush ();
3635
3636 do
3637 {
3638 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3639 }
3640 while ((target_options & TARGET_WNOHANG) == 0
3641 && ptid_equal (event_ptid, null_ptid)
3642 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3643
3644 /* If at least one stop was reported, there may be more. A single
3645 SIGCHLD can signal more than one child stop. */
3646 if (target_is_async_p ()
3647 && (target_options & TARGET_WNOHANG) != 0
3648 && !ptid_equal (event_ptid, null_ptid))
3649 async_file_mark ();
3650
3651 return event_ptid;
3652 }
3653
3654 /* Send a signal to an LWP. */
3655
3656 static int
3657 kill_lwp (unsigned long lwpid, int signo)
3658 {
3659 int ret;
3660
3661 errno = 0;
3662 ret = syscall (__NR_tkill, lwpid, signo);
3663 if (errno == ENOSYS)
3664 {
3665 /* If tkill fails, then we are not using nptl threads, a
3666 configuration we no longer support. */
3667 perror_with_name (("tkill"));
3668 }
3669 return ret;
3670 }
3671
3672 void
3673 linux_stop_lwp (struct lwp_info *lwp)
3674 {
3675 send_sigstop (lwp);
3676 }
3677
3678 static void
3679 send_sigstop (struct lwp_info *lwp)
3680 {
3681 int pid;
3682
3683 pid = lwpid_of (get_lwp_thread (lwp));
3684
3685 /* If we already have a pending stop signal for this process, don't
3686 send another. */
3687 if (lwp->stop_expected)
3688 {
3689 if (debug_threads)
3690 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3691
3692 return;
3693 }
3694
3695 if (debug_threads)
3696 debug_printf ("Sending sigstop to lwp %d\n", pid);
3697
3698 lwp->stop_expected = 1;
3699 kill_lwp (pid, SIGSTOP);
3700 }
3701
3702 static int
3703 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3704 {
3705 struct thread_info *thread = (struct thread_info *) entry;
3706 struct lwp_info *lwp = get_thread_lwp (thread);
3707
3708 /* Ignore EXCEPT. */
3709 if (lwp == except)
3710 return 0;
3711
3712 if (lwp->stopped)
3713 return 0;
3714
3715 send_sigstop (lwp);
3716 return 0;
3717 }
3718
3719 /* Increment the suspend count of an LWP, and stop it, if not stopped
3720 yet. */
3721 static int
3722 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3723 void *except)
3724 {
3725 struct thread_info *thread = (struct thread_info *) entry;
3726 struct lwp_info *lwp = get_thread_lwp (thread);
3727
3728 /* Ignore EXCEPT. */
3729 if (lwp == except)
3730 return 0;
3731
3732 lwp_suspended_inc (lwp);
3733
3734 return send_sigstop_callback (entry, except);
3735 }
3736
3737 static void
3738 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3739 {
3740 /* Store the exit status for later. */
3741 lwp->status_pending_p = 1;
3742 lwp->status_pending = wstat;
3743
3744 /* Store in waitstatus as well, as there's nothing else to process
3745 for this event. */
3746 if (WIFEXITED (wstat))
3747 {
3748 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3749 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3750 }
3751 else if (WIFSIGNALED (wstat))
3752 {
3753 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3754 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3755 }
3756
3757 /* Prevent trying to stop it. */
3758 lwp->stopped = 1;
3759
3760 /* No further stops are expected from a dead lwp. */
3761 lwp->stop_expected = 0;
3762 }
3763
3764 /* Return true if LWP has exited already, and has a pending exit event
3765 to report to GDB. */
3766
3767 static int
3768 lwp_is_marked_dead (struct lwp_info *lwp)
3769 {
3770 return (lwp->status_pending_p
3771 && (WIFEXITED (lwp->status_pending)
3772 || WIFSIGNALED (lwp->status_pending)));
3773 }
3774
3775 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3776
3777 static void
3778 wait_for_sigstop (void)
3779 {
3780 struct thread_info *saved_thread;
3781 ptid_t saved_tid;
3782 int wstat;
3783 int ret;
3784
3785 saved_thread = current_thread;
3786 if (saved_thread != NULL)
3787 saved_tid = saved_thread->entry.id;
3788 else
3789 saved_tid = null_ptid; /* avoid bogus unused warning */
3790
3791 if (debug_threads)
3792 debug_printf ("wait_for_sigstop: pulling events\n");
3793
3794 /* Passing NULL_PTID as filter indicates we want all events to be
3795 left pending. Eventually this returns when there are no
3796 unwaited-for children left. */
3797 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3798 &wstat, __WALL);
3799 gdb_assert (ret == -1);
3800
3801 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3802 current_thread = saved_thread;
3803 else
3804 {
3805 if (debug_threads)
3806 debug_printf ("Previously current thread died.\n");
3807
3808 /* We can't change the current inferior behind GDB's back,
3809 otherwise, a subsequent command may apply to the wrong
3810 process. */
3811 current_thread = NULL;
3812 }
3813 }
3814
3815 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3816 move it out, because we need to report the stop event to GDB. For
3817 example, if the user puts a breakpoint in the jump pad, it's
3818 because she wants to debug it. */
3819
3820 static int
3821 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3822 {
3823 struct thread_info *thread = (struct thread_info *) entry;
3824 struct lwp_info *lwp = get_thread_lwp (thread);
3825
3826 if (lwp->suspended != 0)
3827 {
3828 internal_error (__FILE__, __LINE__,
3829 "LWP %ld is suspended, suspended=%d\n",
3830 lwpid_of (thread), lwp->suspended);
3831 }
3832 gdb_assert (lwp->stopped);
3833
3834 /* Allow debugging the jump pad, gdb_collect, etc.. */
3835 return (supports_fast_tracepoints ()
3836 && agent_loaded_p ()
3837 && (gdb_breakpoint_here (lwp->stop_pc)
3838 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3839 || thread->last_resume_kind == resume_step)
3840 && linux_fast_tracepoint_collecting (lwp, NULL));
3841 }
3842
3843 static void
3844 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3845 {
3846 struct thread_info *thread = (struct thread_info *) entry;
3847 struct thread_info *saved_thread;
3848 struct lwp_info *lwp = get_thread_lwp (thread);
3849 int *wstat;
3850
3851 if (lwp->suspended != 0)
3852 {
3853 internal_error (__FILE__, __LINE__,
3854 "LWP %ld is suspended, suspended=%d\n",
3855 lwpid_of (thread), lwp->suspended);
3856 }
3857 gdb_assert (lwp->stopped);
3858
3859 /* For gdb_breakpoint_here. */
3860 saved_thread = current_thread;
3861 current_thread = thread;
3862
3863 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3864
3865 /* Allow debugging the jump pad, gdb_collect, etc. */
3866 if (!gdb_breakpoint_here (lwp->stop_pc)
3867 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3868 && thread->last_resume_kind != resume_step
3869 && maybe_move_out_of_jump_pad (lwp, wstat))
3870 {
3871 if (debug_threads)
3872 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3873 lwpid_of (thread));
3874
3875 if (wstat)
3876 {
3877 lwp->status_pending_p = 0;
3878 enqueue_one_deferred_signal (lwp, wstat);
3879
3880 if (debug_threads)
3881 debug_printf ("Signal %d for LWP %ld deferred "
3882 "(in jump pad)\n",
3883 WSTOPSIG (*wstat), lwpid_of (thread));
3884 }
3885
3886 linux_resume_one_lwp (lwp, 0, 0, NULL);
3887 }
3888 else
3889 lwp_suspended_inc (lwp);
3890
3891 current_thread = saved_thread;
3892 }
3893
3894 static int
3895 lwp_running (struct inferior_list_entry *entry, void *data)
3896 {
3897 struct thread_info *thread = (struct thread_info *) entry;
3898 struct lwp_info *lwp = get_thread_lwp (thread);
3899
3900 if (lwp_is_marked_dead (lwp))
3901 return 0;
3902 if (lwp->stopped)
3903 return 0;
3904 return 1;
3905 }
3906
3907 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3908 If SUSPEND, then also increase the suspend count of every LWP,
3909 except EXCEPT. */
3910
3911 static void
3912 stop_all_lwps (int suspend, struct lwp_info *except)
3913 {
3914 /* Should not be called recursively. */
3915 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3916
3917 if (debug_threads)
3918 {
3919 debug_enter ();
3920 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3921 suspend ? "stop-and-suspend" : "stop",
3922 except != NULL
3923 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3924 : "none");
3925 }
3926
3927 stopping_threads = (suspend
3928 ? STOPPING_AND_SUSPENDING_THREADS
3929 : STOPPING_THREADS);
3930
3931 if (suspend)
3932 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3933 else
3934 find_inferior (&all_threads, send_sigstop_callback, except);
3935 wait_for_sigstop ();
3936 stopping_threads = NOT_STOPPING_THREADS;
3937
3938 if (debug_threads)
3939 {
3940 debug_printf ("stop_all_lwps done, setting stopping_threads "
3941 "back to !stopping\n");
3942 debug_exit ();
3943 }
3944 }
3945
3946 /* Enqueue one signal in the chain of signals which need to be
3947 delivered to this process on next resume. */
3948
3949 static void
3950 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3951 {
3952 struct pending_signals *p_sig = XNEW (struct pending_signals);
3953
3954 p_sig->prev = lwp->pending_signals;
3955 p_sig->signal = signal;
3956 if (info == NULL)
3957 memset (&p_sig->info, 0, sizeof (siginfo_t));
3958 else
3959 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3960 lwp->pending_signals = p_sig;
3961 }
3962
3963 /* Install breakpoints for software single stepping. */
3964
3965 static void
3966 install_software_single_step_breakpoints (struct lwp_info *lwp)
3967 {
3968 int i;
3969 CORE_ADDR pc;
3970 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3971 VEC (CORE_ADDR) *next_pcs = NULL;
3972 struct cleanup *old_chain = make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
3973
3974 pc = get_pc (lwp);
3975 next_pcs = (*the_low_target.get_next_pcs) (pc, regcache);
3976
3977 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
3978 set_reinsert_breakpoint (pc);
3979
3980 do_cleanups (old_chain);
3981 }
3982
3983 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3984 SIGNAL is nonzero, give it that signal. */
3985
3986 static void
3987 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3988 int step, int signal, siginfo_t *info)
3989 {
3990 struct thread_info *thread = get_lwp_thread (lwp);
3991 struct thread_info *saved_thread;
3992 int fast_tp_collecting;
3993 struct process_info *proc = get_thread_process (thread);
3994
3995 /* Note that target description may not be initialised
3996 (proc->tdesc == NULL) at this point because the program hasn't
3997 stopped at the first instruction yet. It means GDBserver skips
3998 the extra traps from the wrapper program (see option --wrapper).
3999 Code in this function that requires register access should be
4000 guarded by proc->tdesc == NULL or something else. */
4001
4002 if (lwp->stopped == 0)
4003 return;
4004
4005 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4006
4007 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4008
4009 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4010
4011 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4012 user used the "jump" command, or "set $pc = foo"). */
4013 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4014 {
4015 /* Collecting 'while-stepping' actions doesn't make sense
4016 anymore. */
4017 release_while_stepping_state_list (thread);
4018 }
4019
4020 /* If we have pending signals or status, and a new signal, enqueue the
4021 signal. Also enqueue the signal if we are waiting to reinsert a
4022 breakpoint; it will be picked up again below. */
4023 if (signal != 0
4024 && (lwp->status_pending_p
4025 || lwp->pending_signals != NULL
4026 || lwp->bp_reinsert != 0
4027 || fast_tp_collecting))
4028 {
4029 struct pending_signals *p_sig = XNEW (struct pending_signals);
4030
4031 p_sig->prev = lwp->pending_signals;
4032 p_sig->signal = signal;
4033 if (info == NULL)
4034 memset (&p_sig->info, 0, sizeof (siginfo_t));
4035 else
4036 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4037 lwp->pending_signals = p_sig;
4038 }
4039
4040 if (lwp->status_pending_p)
4041 {
4042 if (debug_threads)
4043 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
4044 " has pending status\n",
4045 lwpid_of (thread), step ? "step" : "continue", signal,
4046 lwp->stop_expected ? "expected" : "not expected");
4047 return;
4048 }
4049
4050 saved_thread = current_thread;
4051 current_thread = thread;
4052
4053 if (debug_threads)
4054 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4055 lwpid_of (thread), step ? "step" : "continue", signal,
4056 lwp->stop_expected ? "expected" : "not expected");
4057
4058 /* This bit needs some thinking about. If we get a signal that
4059 we must report while a single-step reinsert is still pending,
4060 we often end up resuming the thread. It might be better to
4061 (ew) allow a stack of pending events; then we could be sure that
4062 the reinsert happened right away and not lose any signals.
4063
4064 Making this stack would also shrink the window in which breakpoints are
4065 uninserted (see comment in linux_wait_for_lwp) but not enough for
4066 complete correctness, so it won't solve that problem. It may be
4067 worthwhile just to solve this one, however. */
4068 if (lwp->bp_reinsert != 0)
4069 {
4070 if (debug_threads)
4071 debug_printf (" pending reinsert at 0x%s\n",
4072 paddress (lwp->bp_reinsert));
4073
4074 if (can_hardware_single_step ())
4075 {
4076 if (fast_tp_collecting == 0)
4077 {
4078 if (step == 0)
4079 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4080 if (lwp->suspended)
4081 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4082 lwp->suspended);
4083 }
4084
4085 step = 1;
4086 }
4087
4088 /* Postpone any pending signal. It was enqueued above. */
4089 signal = 0;
4090 }
4091
4092 if (fast_tp_collecting == 1)
4093 {
4094 if (debug_threads)
4095 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4096 " (exit-jump-pad-bkpt)\n",
4097 lwpid_of (thread));
4098
4099 /* Postpone any pending signal. It was enqueued above. */
4100 signal = 0;
4101 }
4102 else if (fast_tp_collecting == 2)
4103 {
4104 if (debug_threads)
4105 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4106 " single-stepping\n",
4107 lwpid_of (thread));
4108
4109 if (can_hardware_single_step ())
4110 step = 1;
4111 else
4112 {
4113 internal_error (__FILE__, __LINE__,
4114 "moving out of jump pad single-stepping"
4115 " not implemented on this target");
4116 }
4117
4118 /* Postpone any pending signal. It was enqueued above. */
4119 signal = 0;
4120 }
4121
4122 /* If we have while-stepping actions in this thread set it stepping.
4123 If we have a signal to deliver, it may or may not be set to
4124 SIG_IGN, we don't know. Assume so, and allow collecting
4125 while-stepping into a signal handler. A possible smart thing to
4126 do would be to set an internal breakpoint at the signal return
4127 address, continue, and carry on catching this while-stepping
4128 action only when that breakpoint is hit. A future
4129 enhancement. */
4130 if (thread->while_stepping != NULL
4131 && can_hardware_single_step ())
4132 {
4133 if (debug_threads)
4134 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4135 lwpid_of (thread));
4136 step = 1;
4137 }
4138
4139 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4140 {
4141 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4142
4143 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4144
4145 if (debug_threads)
4146 {
4147 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4148 (long) lwp->stop_pc);
4149 }
4150 }
4151
4152 /* If we have pending signals, consume one unless we are trying to
4153 reinsert a breakpoint or we're trying to finish a fast tracepoint
4154 collect. */
4155 if (lwp->pending_signals != NULL
4156 && lwp->bp_reinsert == 0
4157 && fast_tp_collecting == 0)
4158 {
4159 struct pending_signals **p_sig;
4160
4161 p_sig = &lwp->pending_signals;
4162 while ((*p_sig)->prev != NULL)
4163 p_sig = &(*p_sig)->prev;
4164
4165 signal = (*p_sig)->signal;
4166 if ((*p_sig)->info.si_signo != 0)
4167 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4168 &(*p_sig)->info);
4169
4170 free (*p_sig);
4171 *p_sig = NULL;
4172 }
4173
4174 if (the_low_target.prepare_to_resume != NULL)
4175 the_low_target.prepare_to_resume (lwp);
4176
4177 regcache_invalidate_thread (thread);
4178 errno = 0;
4179 lwp->stepping = step;
4180 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
4181 (PTRACE_TYPE_ARG3) 0,
4182 /* Coerce to a uintptr_t first to avoid potential gcc warning
4183 of coercing an 8 byte integer to a 4 byte pointer. */
4184 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4185
4186 current_thread = saved_thread;
4187 if (errno)
4188 perror_with_name ("resuming thread");
4189
4190 /* Successfully resumed. Clear state that no longer makes sense,
4191 and mark the LWP as running. Must not do this before resuming
4192 otherwise if that fails other code will be confused. E.g., we'd
4193 later try to stop the LWP and hang forever waiting for a stop
4194 status. Note that we must not throw after this is cleared,
4195 otherwise handle_zombie_lwp_error would get confused. */
4196 lwp->stopped = 0;
4197 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4198 }
4199
4200 /* Called when we try to resume a stopped LWP and that errors out. If
4201 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4202 or about to become), discard the error, clear any pending status
4203 the LWP may have, and return true (we'll collect the exit status
4204 soon enough). Otherwise, return false. */
4205
4206 static int
4207 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4208 {
4209 struct thread_info *thread = get_lwp_thread (lp);
4210
4211 /* If we get an error after resuming the LWP successfully, we'd
4212 confuse !T state for the LWP being gone. */
4213 gdb_assert (lp->stopped);
4214
4215 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4216 because even if ptrace failed with ESRCH, the tracee may be "not
4217 yet fully dead", but already refusing ptrace requests. In that
4218 case the tracee has 'R (Running)' state for a little bit
4219 (observed in Linux 3.18). See also the note on ESRCH in the
4220 ptrace(2) man page. Instead, check whether the LWP has any state
4221 other than ptrace-stopped. */
4222
4223 /* Don't assume anything if /proc/PID/status can't be read. */
4224 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4225 {
4226 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4227 lp->status_pending_p = 0;
4228 return 1;
4229 }
4230 return 0;
4231 }
4232
4233 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4234 disappears while we try to resume it. */
4235
4236 static void
4237 linux_resume_one_lwp (struct lwp_info *lwp,
4238 int step, int signal, siginfo_t *info)
4239 {
4240 TRY
4241 {
4242 linux_resume_one_lwp_throw (lwp, step, signal, info);
4243 }
4244 CATCH (ex, RETURN_MASK_ERROR)
4245 {
4246 if (!check_ptrace_stopped_lwp_gone (lwp))
4247 throw_exception (ex);
4248 }
4249 END_CATCH
4250 }
4251
4252 struct thread_resume_array
4253 {
4254 struct thread_resume *resume;
4255 size_t n;
4256 };
4257
4258 /* This function is called once per thread via find_inferior.
4259 ARG is a pointer to a thread_resume_array struct.
4260 We look up the thread specified by ENTRY in ARG, and mark the thread
4261 with a pointer to the appropriate resume request.
4262
4263 This algorithm is O(threads * resume elements), but resume elements
4264 is small (and will remain small at least until GDB supports thread
4265 suspension). */
4266
4267 static int
4268 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4269 {
4270 struct thread_info *thread = (struct thread_info *) entry;
4271 struct lwp_info *lwp = get_thread_lwp (thread);
4272 int ndx;
4273 struct thread_resume_array *r;
4274
4275 r = (struct thread_resume_array *) arg;
4276
4277 for (ndx = 0; ndx < r->n; ndx++)
4278 {
4279 ptid_t ptid = r->resume[ndx].thread;
4280 if (ptid_equal (ptid, minus_one_ptid)
4281 || ptid_equal (ptid, entry->id)
4282 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4283 of PID'. */
4284 || (ptid_get_pid (ptid) == pid_of (thread)
4285 && (ptid_is_pid (ptid)
4286 || ptid_get_lwp (ptid) == -1)))
4287 {
4288 if (r->resume[ndx].kind == resume_stop
4289 && thread->last_resume_kind == resume_stop)
4290 {
4291 if (debug_threads)
4292 debug_printf ("already %s LWP %ld at GDB's request\n",
4293 (thread->last_status.kind
4294 == TARGET_WAITKIND_STOPPED)
4295 ? "stopped"
4296 : "stopping",
4297 lwpid_of (thread));
4298
4299 continue;
4300 }
4301
4302 lwp->resume = &r->resume[ndx];
4303 thread->last_resume_kind = lwp->resume->kind;
4304
4305 lwp->step_range_start = lwp->resume->step_range_start;
4306 lwp->step_range_end = lwp->resume->step_range_end;
4307
4308 /* If we had a deferred signal to report, dequeue one now.
4309 This can happen if LWP gets more than one signal while
4310 trying to get out of a jump pad. */
4311 if (lwp->stopped
4312 && !lwp->status_pending_p
4313 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4314 {
4315 lwp->status_pending_p = 1;
4316
4317 if (debug_threads)
4318 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4319 "leaving status pending.\n",
4320 WSTOPSIG (lwp->status_pending),
4321 lwpid_of (thread));
4322 }
4323
4324 return 0;
4325 }
4326 }
4327
4328 /* No resume action for this thread. */
4329 lwp->resume = NULL;
4330
4331 return 0;
4332 }
4333
4334 /* find_inferior callback for linux_resume.
4335 Set *FLAG_P if this lwp has an interesting status pending. */
4336
4337 static int
4338 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4339 {
4340 struct thread_info *thread = (struct thread_info *) entry;
4341 struct lwp_info *lwp = get_thread_lwp (thread);
4342
4343 /* LWPs which will not be resumed are not interesting, because
4344 we might not wait for them next time through linux_wait. */
4345 if (lwp->resume == NULL)
4346 return 0;
4347
4348 if (thread_still_has_status_pending_p (thread))
4349 * (int *) flag_p = 1;
4350
4351 return 0;
4352 }
4353
4354 /* Return 1 if this lwp that GDB wants running is stopped at an
4355 internal breakpoint that we need to step over. It assumes that any
4356 required STOP_PC adjustment has already been propagated to the
4357 inferior's regcache. */
4358
4359 static int
4360 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4361 {
4362 struct thread_info *thread = (struct thread_info *) entry;
4363 struct lwp_info *lwp = get_thread_lwp (thread);
4364 struct thread_info *saved_thread;
4365 CORE_ADDR pc;
4366 struct process_info *proc = get_thread_process (thread);
4367
4368 /* GDBserver is skipping the extra traps from the wrapper program,
4369 don't have to do step over. */
4370 if (proc->tdesc == NULL)
4371 return 0;
4372
4373 /* LWPs which will not be resumed are not interesting, because we
4374 might not wait for them next time through linux_wait. */
4375
4376 if (!lwp->stopped)
4377 {
4378 if (debug_threads)
4379 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4380 lwpid_of (thread));
4381 return 0;
4382 }
4383
4384 if (thread->last_resume_kind == resume_stop)
4385 {
4386 if (debug_threads)
4387 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4388 " stopped\n",
4389 lwpid_of (thread));
4390 return 0;
4391 }
4392
4393 gdb_assert (lwp->suspended >= 0);
4394
4395 if (lwp->suspended)
4396 {
4397 if (debug_threads)
4398 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4399 lwpid_of (thread));
4400 return 0;
4401 }
4402
4403 if (!lwp->need_step_over)
4404 {
4405 if (debug_threads)
4406 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4407 }
4408
4409 if (lwp->status_pending_p)
4410 {
4411 if (debug_threads)
4412 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4413 " status.\n",
4414 lwpid_of (thread));
4415 return 0;
4416 }
4417
4418 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4419 or we have. */
4420 pc = get_pc (lwp);
4421
4422 /* If the PC has changed since we stopped, then don't do anything,
4423 and let the breakpoint/tracepoint be hit. This happens if, for
4424 instance, GDB handled the decr_pc_after_break subtraction itself,
4425 GDB is OOL stepping this thread, or the user has issued a "jump"
4426 command, or poked thread's registers herself. */
4427 if (pc != lwp->stop_pc)
4428 {
4429 if (debug_threads)
4430 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4431 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4432 lwpid_of (thread),
4433 paddress (lwp->stop_pc), paddress (pc));
4434
4435 lwp->need_step_over = 0;
4436 return 0;
4437 }
4438
4439 saved_thread = current_thread;
4440 current_thread = thread;
4441
4442 /* We can only step over breakpoints we know about. */
4443 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4444 {
4445 /* Don't step over a breakpoint that GDB expects to hit
4446 though. If the condition is being evaluated on the target's side
4447 and it evaluate to false, step over this breakpoint as well. */
4448 if (gdb_breakpoint_here (pc)
4449 && gdb_condition_true_at_breakpoint (pc)
4450 && gdb_no_commands_at_breakpoint (pc))
4451 {
4452 if (debug_threads)
4453 debug_printf ("Need step over [LWP %ld]? yes, but found"
4454 " GDB breakpoint at 0x%s; skipping step over\n",
4455 lwpid_of (thread), paddress (pc));
4456
4457 current_thread = saved_thread;
4458 return 0;
4459 }
4460 else
4461 {
4462 if (debug_threads)
4463 debug_printf ("Need step over [LWP %ld]? yes, "
4464 "found breakpoint at 0x%s\n",
4465 lwpid_of (thread), paddress (pc));
4466
4467 /* We've found an lwp that needs stepping over --- return 1 so
4468 that find_inferior stops looking. */
4469 current_thread = saved_thread;
4470
4471 /* If the step over is cancelled, this is set again. */
4472 lwp->need_step_over = 0;
4473 return 1;
4474 }
4475 }
4476
4477 current_thread = saved_thread;
4478
4479 if (debug_threads)
4480 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4481 " at 0x%s\n",
4482 lwpid_of (thread), paddress (pc));
4483
4484 return 0;
4485 }
4486
4487 /* Start a step-over operation on LWP. When LWP stopped at a
4488 breakpoint, to make progress, we need to remove the breakpoint out
4489 of the way. If we let other threads run while we do that, they may
4490 pass by the breakpoint location and miss hitting it. To avoid
4491 that, a step-over momentarily stops all threads while LWP is
4492 single-stepped while the breakpoint is temporarily uninserted from
4493 the inferior. When the single-step finishes, we reinsert the
4494 breakpoint, and let all threads that are supposed to be running,
4495 run again.
4496
4497 On targets that don't support hardware single-step, we don't
4498 currently support full software single-stepping. Instead, we only
4499 support stepping over the thread event breakpoint, by asking the
4500 low target where to place a reinsert breakpoint. Since this
4501 routine assumes the breakpoint being stepped over is a thread event
4502 breakpoint, it usually assumes the return address of the current
4503 function is a good enough place to set the reinsert breakpoint. */
4504
4505 static int
4506 start_step_over (struct lwp_info *lwp)
4507 {
4508 struct thread_info *thread = get_lwp_thread (lwp);
4509 struct thread_info *saved_thread;
4510 CORE_ADDR pc;
4511 int step;
4512
4513 if (debug_threads)
4514 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4515 lwpid_of (thread));
4516
4517 stop_all_lwps (1, lwp);
4518
4519 if (lwp->suspended != 0)
4520 {
4521 internal_error (__FILE__, __LINE__,
4522 "LWP %ld suspended=%d\n", lwpid_of (thread),
4523 lwp->suspended);
4524 }
4525
4526 if (debug_threads)
4527 debug_printf ("Done stopping all threads for step-over.\n");
4528
4529 /* Note, we should always reach here with an already adjusted PC,
4530 either by GDB (if we're resuming due to GDB's request), or by our
4531 caller, if we just finished handling an internal breakpoint GDB
4532 shouldn't care about. */
4533 pc = get_pc (lwp);
4534
4535 saved_thread = current_thread;
4536 current_thread = thread;
4537
4538 lwp->bp_reinsert = pc;
4539 uninsert_breakpoints_at (pc);
4540 uninsert_fast_tracepoint_jumps_at (pc);
4541
4542 if (can_hardware_single_step ())
4543 {
4544 step = 1;
4545 }
4546 else if (can_software_single_step ())
4547 {
4548 install_software_single_step_breakpoints (lwp);
4549 step = 0;
4550 }
4551 else
4552 {
4553 internal_error (__FILE__, __LINE__,
4554 "stepping is not implemented on this target");
4555 }
4556
4557 current_thread = saved_thread;
4558
4559 linux_resume_one_lwp (lwp, step, 0, NULL);
4560
4561 /* Require next event from this LWP. */
4562 step_over_bkpt = thread->entry.id;
4563 return 1;
4564 }
4565
4566 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4567 start_step_over, if still there, and delete any reinsert
4568 breakpoints we've set, on non hardware single-step targets. */
4569
4570 static int
4571 finish_step_over (struct lwp_info *lwp)
4572 {
4573 if (lwp->bp_reinsert != 0)
4574 {
4575 if (debug_threads)
4576 debug_printf ("Finished step over.\n");
4577
4578 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4579 may be no breakpoint to reinsert there by now. */
4580 reinsert_breakpoints_at (lwp->bp_reinsert);
4581 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4582
4583 lwp->bp_reinsert = 0;
4584
4585 /* Delete any software-single-step reinsert breakpoints. No
4586 longer needed. We don't have to worry about other threads
4587 hitting this trap, and later not being able to explain it,
4588 because we were stepping over a breakpoint, and we hold all
4589 threads but LWP stopped while doing that. */
4590 if (!can_hardware_single_step ())
4591 delete_reinsert_breakpoints ();
4592
4593 step_over_bkpt = null_ptid;
4594 return 1;
4595 }
4596 else
4597 return 0;
4598 }
4599
4600 /* If there's a step over in progress, wait until all threads stop
4601 (that is, until the stepping thread finishes its step), and
4602 unsuspend all lwps. The stepping thread ends with its status
4603 pending, which is processed later when we get back to processing
4604 events. */
4605
4606 static void
4607 complete_ongoing_step_over (void)
4608 {
4609 if (!ptid_equal (step_over_bkpt, null_ptid))
4610 {
4611 struct lwp_info *lwp;
4612 int wstat;
4613 int ret;
4614
4615 if (debug_threads)
4616 debug_printf ("detach: step over in progress, finish it first\n");
4617
4618 /* Passing NULL_PTID as filter indicates we want all events to
4619 be left pending. Eventually this returns when there are no
4620 unwaited-for children left. */
4621 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4622 &wstat, __WALL);
4623 gdb_assert (ret == -1);
4624
4625 lwp = find_lwp_pid (step_over_bkpt);
4626 if (lwp != NULL)
4627 finish_step_over (lwp);
4628 step_over_bkpt = null_ptid;
4629 unsuspend_all_lwps (lwp);
4630 }
4631 }
4632
4633 /* This function is called once per thread. We check the thread's resume
4634 request, which will tell us whether to resume, step, or leave the thread
4635 stopped; and what signal, if any, it should be sent.
4636
4637 For threads which we aren't explicitly told otherwise, we preserve
4638 the stepping flag; this is used for stepping over gdbserver-placed
4639 breakpoints.
4640
4641 If pending_flags was set in any thread, we queue any needed
4642 signals, since we won't actually resume. We already have a pending
4643 event to report, so we don't need to preserve any step requests;
4644 they should be re-issued if necessary. */
4645
4646 static int
4647 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4648 {
4649 struct thread_info *thread = (struct thread_info *) entry;
4650 struct lwp_info *lwp = get_thread_lwp (thread);
4651 int step;
4652 int leave_all_stopped = * (int *) arg;
4653 int leave_pending;
4654
4655 if (lwp->resume == NULL)
4656 return 0;
4657
4658 if (lwp->resume->kind == resume_stop)
4659 {
4660 if (debug_threads)
4661 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4662
4663 if (!lwp->stopped)
4664 {
4665 if (debug_threads)
4666 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4667
4668 /* Stop the thread, and wait for the event asynchronously,
4669 through the event loop. */
4670 send_sigstop (lwp);
4671 }
4672 else
4673 {
4674 if (debug_threads)
4675 debug_printf ("already stopped LWP %ld\n",
4676 lwpid_of (thread));
4677
4678 /* The LWP may have been stopped in an internal event that
4679 was not meant to be notified back to GDB (e.g., gdbserver
4680 breakpoint), so we should be reporting a stop event in
4681 this case too. */
4682
4683 /* If the thread already has a pending SIGSTOP, this is a
4684 no-op. Otherwise, something later will presumably resume
4685 the thread and this will cause it to cancel any pending
4686 operation, due to last_resume_kind == resume_stop. If
4687 the thread already has a pending status to report, we
4688 will still report it the next time we wait - see
4689 status_pending_p_callback. */
4690
4691 /* If we already have a pending signal to report, then
4692 there's no need to queue a SIGSTOP, as this means we're
4693 midway through moving the LWP out of the jumppad, and we
4694 will report the pending signal as soon as that is
4695 finished. */
4696 if (lwp->pending_signals_to_report == NULL)
4697 send_sigstop (lwp);
4698 }
4699
4700 /* For stop requests, we're done. */
4701 lwp->resume = NULL;
4702 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4703 return 0;
4704 }
4705
4706 /* If this thread which is about to be resumed has a pending status,
4707 then don't resume it - we can just report the pending status.
4708 Likewise if it is suspended, because e.g., another thread is
4709 stepping past a breakpoint. Make sure to queue any signals that
4710 would otherwise be sent. In all-stop mode, we do this decision
4711 based on if *any* thread has a pending status. If there's a
4712 thread that needs the step-over-breakpoint dance, then don't
4713 resume any other thread but that particular one. */
4714 leave_pending = (lwp->suspended
4715 || lwp->status_pending_p
4716 || leave_all_stopped);
4717
4718 if (!leave_pending)
4719 {
4720 if (debug_threads)
4721 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4722
4723 step = (lwp->resume->kind == resume_step);
4724 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4725 }
4726 else
4727 {
4728 if (debug_threads)
4729 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4730
4731 /* If we have a new signal, enqueue the signal. */
4732 if (lwp->resume->sig != 0)
4733 {
4734 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4735
4736 p_sig->prev = lwp->pending_signals;
4737 p_sig->signal = lwp->resume->sig;
4738
4739 /* If this is the same signal we were previously stopped by,
4740 make sure to queue its siginfo. We can ignore the return
4741 value of ptrace; if it fails, we'll skip
4742 PTRACE_SETSIGINFO. */
4743 if (WIFSTOPPED (lwp->last_status)
4744 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4745 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4746 &p_sig->info);
4747
4748 lwp->pending_signals = p_sig;
4749 }
4750 }
4751
4752 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4753 lwp->resume = NULL;
4754 return 0;
4755 }
4756
4757 static void
4758 linux_resume (struct thread_resume *resume_info, size_t n)
4759 {
4760 struct thread_resume_array array = { resume_info, n };
4761 struct thread_info *need_step_over = NULL;
4762 int any_pending;
4763 int leave_all_stopped;
4764
4765 if (debug_threads)
4766 {
4767 debug_enter ();
4768 debug_printf ("linux_resume:\n");
4769 }
4770
4771 find_inferior (&all_threads, linux_set_resume_request, &array);
4772
4773 /* If there is a thread which would otherwise be resumed, which has
4774 a pending status, then don't resume any threads - we can just
4775 report the pending status. Make sure to queue any signals that
4776 would otherwise be sent. In non-stop mode, we'll apply this
4777 logic to each thread individually. We consume all pending events
4778 before considering to start a step-over (in all-stop). */
4779 any_pending = 0;
4780 if (!non_stop)
4781 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4782
4783 /* If there is a thread which would otherwise be resumed, which is
4784 stopped at a breakpoint that needs stepping over, then don't
4785 resume any threads - have it step over the breakpoint with all
4786 other threads stopped, then resume all threads again. Make sure
4787 to queue any signals that would otherwise be delivered or
4788 queued. */
4789 if (!any_pending && supports_breakpoints ())
4790 need_step_over
4791 = (struct thread_info *) find_inferior (&all_threads,
4792 need_step_over_p, NULL);
4793
4794 leave_all_stopped = (need_step_over != NULL || any_pending);
4795
4796 if (debug_threads)
4797 {
4798 if (need_step_over != NULL)
4799 debug_printf ("Not resuming all, need step over\n");
4800 else if (any_pending)
4801 debug_printf ("Not resuming, all-stop and found "
4802 "an LWP with pending status\n");
4803 else
4804 debug_printf ("Resuming, no pending status or step over needed\n");
4805 }
4806
4807 /* Even if we're leaving threads stopped, queue all signals we'd
4808 otherwise deliver. */
4809 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4810
4811 if (need_step_over)
4812 start_step_over (get_thread_lwp (need_step_over));
4813
4814 if (debug_threads)
4815 {
4816 debug_printf ("linux_resume done\n");
4817 debug_exit ();
4818 }
4819
4820 /* We may have events that were pending that can/should be sent to
4821 the client now. Trigger a linux_wait call. */
4822 if (target_is_async_p ())
4823 async_file_mark ();
4824 }
4825
4826 /* This function is called once per thread. We check the thread's
4827 last resume request, which will tell us whether to resume, step, or
4828 leave the thread stopped. Any signal the client requested to be
4829 delivered has already been enqueued at this point.
4830
4831 If any thread that GDB wants running is stopped at an internal
4832 breakpoint that needs stepping over, we start a step-over operation
4833 on that particular thread, and leave all others stopped. */
4834
4835 static int
4836 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4837 {
4838 struct thread_info *thread = (struct thread_info *) entry;
4839 struct lwp_info *lwp = get_thread_lwp (thread);
4840 int step;
4841
4842 if (lwp == except)
4843 return 0;
4844
4845 if (debug_threads)
4846 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4847
4848 if (!lwp->stopped)
4849 {
4850 if (debug_threads)
4851 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4852 return 0;
4853 }
4854
4855 if (thread->last_resume_kind == resume_stop
4856 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4857 {
4858 if (debug_threads)
4859 debug_printf (" client wants LWP to remain %ld stopped\n",
4860 lwpid_of (thread));
4861 return 0;
4862 }
4863
4864 if (lwp->status_pending_p)
4865 {
4866 if (debug_threads)
4867 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4868 lwpid_of (thread));
4869 return 0;
4870 }
4871
4872 gdb_assert (lwp->suspended >= 0);
4873
4874 if (lwp->suspended)
4875 {
4876 if (debug_threads)
4877 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4878 return 0;
4879 }
4880
4881 if (thread->last_resume_kind == resume_stop
4882 && lwp->pending_signals_to_report == NULL
4883 && lwp->collecting_fast_tracepoint == 0)
4884 {
4885 /* We haven't reported this LWP as stopped yet (otherwise, the
4886 last_status.kind check above would catch it, and we wouldn't
4887 reach here. This LWP may have been momentarily paused by a
4888 stop_all_lwps call while handling for example, another LWP's
4889 step-over. In that case, the pending expected SIGSTOP signal
4890 that was queued at vCont;t handling time will have already
4891 been consumed by wait_for_sigstop, and so we need to requeue
4892 another one here. Note that if the LWP already has a SIGSTOP
4893 pending, this is a no-op. */
4894
4895 if (debug_threads)
4896 debug_printf ("Client wants LWP %ld to stop. "
4897 "Making sure it has a SIGSTOP pending\n",
4898 lwpid_of (thread));
4899
4900 send_sigstop (lwp);
4901 }
4902
4903 if (thread->last_resume_kind == resume_step)
4904 {
4905 if (debug_threads)
4906 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4907 lwpid_of (thread));
4908 step = 1;
4909 }
4910 else if (lwp->bp_reinsert != 0)
4911 {
4912 if (debug_threads)
4913 debug_printf (" stepping LWP %ld, reinsert set\n",
4914 lwpid_of (thread));
4915 step = 1;
4916 }
4917 else
4918 step = 0;
4919
4920 linux_resume_one_lwp (lwp, step, 0, NULL);
4921 return 0;
4922 }
4923
4924 static int
4925 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4926 {
4927 struct thread_info *thread = (struct thread_info *) entry;
4928 struct lwp_info *lwp = get_thread_lwp (thread);
4929
4930 if (lwp == except)
4931 return 0;
4932
4933 lwp_suspended_decr (lwp);
4934
4935 return proceed_one_lwp (entry, except);
4936 }
4937
4938 /* When we finish a step-over, set threads running again. If there's
4939 another thread that may need a step-over, now's the time to start
4940 it. Eventually, we'll move all threads past their breakpoints. */
4941
4942 static void
4943 proceed_all_lwps (void)
4944 {
4945 struct thread_info *need_step_over;
4946
4947 /* If there is a thread which would otherwise be resumed, which is
4948 stopped at a breakpoint that needs stepping over, then don't
4949 resume any threads - have it step over the breakpoint with all
4950 other threads stopped, then resume all threads again. */
4951
4952 if (supports_breakpoints ())
4953 {
4954 need_step_over
4955 = (struct thread_info *) find_inferior (&all_threads,
4956 need_step_over_p, NULL);
4957
4958 if (need_step_over != NULL)
4959 {
4960 if (debug_threads)
4961 debug_printf ("proceed_all_lwps: found "
4962 "thread %ld needing a step-over\n",
4963 lwpid_of (need_step_over));
4964
4965 start_step_over (get_thread_lwp (need_step_over));
4966 return;
4967 }
4968 }
4969
4970 if (debug_threads)
4971 debug_printf ("Proceeding, no step-over needed\n");
4972
4973 find_inferior (&all_threads, proceed_one_lwp, NULL);
4974 }
4975
4976 /* Stopped LWPs that the client wanted to be running, that don't have
4977 pending statuses, are set to run again, except for EXCEPT, if not
4978 NULL. This undoes a stop_all_lwps call. */
4979
4980 static void
4981 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4982 {
4983 if (debug_threads)
4984 {
4985 debug_enter ();
4986 if (except)
4987 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4988 lwpid_of (get_lwp_thread (except)));
4989 else
4990 debug_printf ("unstopping all lwps\n");
4991 }
4992
4993 if (unsuspend)
4994 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4995 else
4996 find_inferior (&all_threads, proceed_one_lwp, except);
4997
4998 if (debug_threads)
4999 {
5000 debug_printf ("unstop_all_lwps done\n");
5001 debug_exit ();
5002 }
5003 }
5004
5005
5006 #ifdef HAVE_LINUX_REGSETS
5007
5008 #define use_linux_regsets 1
5009
5010 /* Returns true if REGSET has been disabled. */
5011
5012 static int
5013 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5014 {
5015 return (info->disabled_regsets != NULL
5016 && info->disabled_regsets[regset - info->regsets]);
5017 }
5018
5019 /* Disable REGSET. */
5020
5021 static void
5022 disable_regset (struct regsets_info *info, struct regset_info *regset)
5023 {
5024 int dr_offset;
5025
5026 dr_offset = regset - info->regsets;
5027 if (info->disabled_regsets == NULL)
5028 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5029 info->disabled_regsets[dr_offset] = 1;
5030 }
5031
5032 static int
5033 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5034 struct regcache *regcache)
5035 {
5036 struct regset_info *regset;
5037 int saw_general_regs = 0;
5038 int pid;
5039 struct iovec iov;
5040
5041 pid = lwpid_of (current_thread);
5042 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5043 {
5044 void *buf, *data;
5045 int nt_type, res;
5046
5047 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5048 continue;
5049
5050 buf = xmalloc (regset->size);
5051
5052 nt_type = regset->nt_type;
5053 if (nt_type)
5054 {
5055 iov.iov_base = buf;
5056 iov.iov_len = regset->size;
5057 data = (void *) &iov;
5058 }
5059 else
5060 data = buf;
5061
5062 #ifndef __sparc__
5063 res = ptrace (regset->get_request, pid,
5064 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5065 #else
5066 res = ptrace (regset->get_request, pid, data, nt_type);
5067 #endif
5068 if (res < 0)
5069 {
5070 if (errno == EIO)
5071 {
5072 /* If we get EIO on a regset, do not try it again for
5073 this process mode. */
5074 disable_regset (regsets_info, regset);
5075 }
5076 else if (errno == ENODATA)
5077 {
5078 /* ENODATA may be returned if the regset is currently
5079 not "active". This can happen in normal operation,
5080 so suppress the warning in this case. */
5081 }
5082 else
5083 {
5084 char s[256];
5085 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5086 pid);
5087 perror (s);
5088 }
5089 }
5090 else
5091 {
5092 if (regset->type == GENERAL_REGS)
5093 saw_general_regs = 1;
5094 regset->store_function (regcache, buf);
5095 }
5096 free (buf);
5097 }
5098 if (saw_general_regs)
5099 return 0;
5100 else
5101 return 1;
5102 }
5103
5104 static int
5105 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5106 struct regcache *regcache)
5107 {
5108 struct regset_info *regset;
5109 int saw_general_regs = 0;
5110 int pid;
5111 struct iovec iov;
5112
5113 pid = lwpid_of (current_thread);
5114 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5115 {
5116 void *buf, *data;
5117 int nt_type, res;
5118
5119 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5120 || regset->fill_function == NULL)
5121 continue;
5122
5123 buf = xmalloc (regset->size);
5124
5125 /* First fill the buffer with the current register set contents,
5126 in case there are any items in the kernel's regset that are
5127 not in gdbserver's regcache. */
5128
5129 nt_type = regset->nt_type;
5130 if (nt_type)
5131 {
5132 iov.iov_base = buf;
5133 iov.iov_len = regset->size;
5134 data = (void *) &iov;
5135 }
5136 else
5137 data = buf;
5138
5139 #ifndef __sparc__
5140 res = ptrace (regset->get_request, pid,
5141 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5142 #else
5143 res = ptrace (regset->get_request, pid, data, nt_type);
5144 #endif
5145
5146 if (res == 0)
5147 {
5148 /* Then overlay our cached registers on that. */
5149 regset->fill_function (regcache, buf);
5150
5151 /* Only now do we write the register set. */
5152 #ifndef __sparc__
5153 res = ptrace (regset->set_request, pid,
5154 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5155 #else
5156 res = ptrace (regset->set_request, pid, data, nt_type);
5157 #endif
5158 }
5159
5160 if (res < 0)
5161 {
5162 if (errno == EIO)
5163 {
5164 /* If we get EIO on a regset, do not try it again for
5165 this process mode. */
5166 disable_regset (regsets_info, regset);
5167 }
5168 else if (errno == ESRCH)
5169 {
5170 /* At this point, ESRCH should mean the process is
5171 already gone, in which case we simply ignore attempts
5172 to change its registers. See also the related
5173 comment in linux_resume_one_lwp. */
5174 free (buf);
5175 return 0;
5176 }
5177 else
5178 {
5179 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5180 }
5181 }
5182 else if (regset->type == GENERAL_REGS)
5183 saw_general_regs = 1;
5184 free (buf);
5185 }
5186 if (saw_general_regs)
5187 return 0;
5188 else
5189 return 1;
5190 }
5191
5192 #else /* !HAVE_LINUX_REGSETS */
5193
5194 #define use_linux_regsets 0
5195 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5196 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5197
5198 #endif
5199
5200 /* Return 1 if register REGNO is supported by one of the regset ptrace
5201 calls or 0 if it has to be transferred individually. */
5202
5203 static int
5204 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5205 {
5206 unsigned char mask = 1 << (regno % 8);
5207 size_t index = regno / 8;
5208
5209 return (use_linux_regsets
5210 && (regs_info->regset_bitmap == NULL
5211 || (regs_info->regset_bitmap[index] & mask) != 0));
5212 }
5213
5214 #ifdef HAVE_LINUX_USRREGS
5215
5216 int
5217 register_addr (const struct usrregs_info *usrregs, int regnum)
5218 {
5219 int addr;
5220
5221 if (regnum < 0 || regnum >= usrregs->num_regs)
5222 error ("Invalid register number %d.", regnum);
5223
5224 addr = usrregs->regmap[regnum];
5225
5226 return addr;
5227 }
5228
5229 /* Fetch one register. */
5230 static void
5231 fetch_register (const struct usrregs_info *usrregs,
5232 struct regcache *regcache, int regno)
5233 {
5234 CORE_ADDR regaddr;
5235 int i, size;
5236 char *buf;
5237 int pid;
5238
5239 if (regno >= usrregs->num_regs)
5240 return;
5241 if ((*the_low_target.cannot_fetch_register) (regno))
5242 return;
5243
5244 regaddr = register_addr (usrregs, regno);
5245 if (regaddr == -1)
5246 return;
5247
5248 size = ((register_size (regcache->tdesc, regno)
5249 + sizeof (PTRACE_XFER_TYPE) - 1)
5250 & -sizeof (PTRACE_XFER_TYPE));
5251 buf = (char *) alloca (size);
5252
5253 pid = lwpid_of (current_thread);
5254 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5255 {
5256 errno = 0;
5257 *(PTRACE_XFER_TYPE *) (buf + i) =
5258 ptrace (PTRACE_PEEKUSER, pid,
5259 /* Coerce to a uintptr_t first to avoid potential gcc warning
5260 of coercing an 8 byte integer to a 4 byte pointer. */
5261 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5262 regaddr += sizeof (PTRACE_XFER_TYPE);
5263 if (errno != 0)
5264 error ("reading register %d: %s", regno, strerror (errno));
5265 }
5266
5267 if (the_low_target.supply_ptrace_register)
5268 the_low_target.supply_ptrace_register (regcache, regno, buf);
5269 else
5270 supply_register (regcache, regno, buf);
5271 }
5272
5273 /* Store one register. */
5274 static void
5275 store_register (const struct usrregs_info *usrregs,
5276 struct regcache *regcache, int regno)
5277 {
5278 CORE_ADDR regaddr;
5279 int i, size;
5280 char *buf;
5281 int pid;
5282
5283 if (regno >= usrregs->num_regs)
5284 return;
5285 if ((*the_low_target.cannot_store_register) (regno))
5286 return;
5287
5288 regaddr = register_addr (usrregs, regno);
5289 if (regaddr == -1)
5290 return;
5291
5292 size = ((register_size (regcache->tdesc, regno)
5293 + sizeof (PTRACE_XFER_TYPE) - 1)
5294 & -sizeof (PTRACE_XFER_TYPE));
5295 buf = (char *) alloca (size);
5296 memset (buf, 0, size);
5297
5298 if (the_low_target.collect_ptrace_register)
5299 the_low_target.collect_ptrace_register (regcache, regno, buf);
5300 else
5301 collect_register (regcache, regno, buf);
5302
5303 pid = lwpid_of (current_thread);
5304 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5305 {
5306 errno = 0;
5307 ptrace (PTRACE_POKEUSER, pid,
5308 /* Coerce to a uintptr_t first to avoid potential gcc warning
5309 about coercing an 8 byte integer to a 4 byte pointer. */
5310 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5311 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5312 if (errno != 0)
5313 {
5314 /* At this point, ESRCH should mean the process is
5315 already gone, in which case we simply ignore attempts
5316 to change its registers. See also the related
5317 comment in linux_resume_one_lwp. */
5318 if (errno == ESRCH)
5319 return;
5320
5321 if ((*the_low_target.cannot_store_register) (regno) == 0)
5322 error ("writing register %d: %s", regno, strerror (errno));
5323 }
5324 regaddr += sizeof (PTRACE_XFER_TYPE);
5325 }
5326 }
5327
5328 /* Fetch all registers, or just one, from the child process.
5329 If REGNO is -1, do this for all registers, skipping any that are
5330 assumed to have been retrieved by regsets_fetch_inferior_registers,
5331 unless ALL is non-zero.
5332 Otherwise, REGNO specifies which register (so we can save time). */
5333 static void
5334 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5335 struct regcache *regcache, int regno, int all)
5336 {
5337 struct usrregs_info *usr = regs_info->usrregs;
5338
5339 if (regno == -1)
5340 {
5341 for (regno = 0; regno < usr->num_regs; regno++)
5342 if (all || !linux_register_in_regsets (regs_info, regno))
5343 fetch_register (usr, regcache, regno);
5344 }
5345 else
5346 fetch_register (usr, regcache, regno);
5347 }
5348
5349 /* Store our register values back into the inferior.
5350 If REGNO is -1, do this for all registers, skipping any that are
5351 assumed to have been saved by regsets_store_inferior_registers,
5352 unless ALL is non-zero.
5353 Otherwise, REGNO specifies which register (so we can save time). */
5354 static void
5355 usr_store_inferior_registers (const struct regs_info *regs_info,
5356 struct regcache *regcache, int regno, int all)
5357 {
5358 struct usrregs_info *usr = regs_info->usrregs;
5359
5360 if (regno == -1)
5361 {
5362 for (regno = 0; regno < usr->num_regs; regno++)
5363 if (all || !linux_register_in_regsets (regs_info, regno))
5364 store_register (usr, regcache, regno);
5365 }
5366 else
5367 store_register (usr, regcache, regno);
5368 }
5369
5370 #else /* !HAVE_LINUX_USRREGS */
5371
5372 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5373 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5374
5375 #endif
5376
5377
5378 void
5379 linux_fetch_registers (struct regcache *regcache, int regno)
5380 {
5381 int use_regsets;
5382 int all = 0;
5383 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5384
5385 if (regno == -1)
5386 {
5387 if (the_low_target.fetch_register != NULL
5388 && regs_info->usrregs != NULL)
5389 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5390 (*the_low_target.fetch_register) (regcache, regno);
5391
5392 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5393 if (regs_info->usrregs != NULL)
5394 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5395 }
5396 else
5397 {
5398 if (the_low_target.fetch_register != NULL
5399 && (*the_low_target.fetch_register) (regcache, regno))
5400 return;
5401
5402 use_regsets = linux_register_in_regsets (regs_info, regno);
5403 if (use_regsets)
5404 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5405 regcache);
5406 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5407 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5408 }
5409 }
5410
5411 void
5412 linux_store_registers (struct regcache *regcache, int regno)
5413 {
5414 int use_regsets;
5415 int all = 0;
5416 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5417
5418 if (regno == -1)
5419 {
5420 all = regsets_store_inferior_registers (regs_info->regsets_info,
5421 regcache);
5422 if (regs_info->usrregs != NULL)
5423 usr_store_inferior_registers (regs_info, regcache, regno, all);
5424 }
5425 else
5426 {
5427 use_regsets = linux_register_in_regsets (regs_info, regno);
5428 if (use_regsets)
5429 all = regsets_store_inferior_registers (regs_info->regsets_info,
5430 regcache);
5431 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5432 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5433 }
5434 }
5435
5436
5437 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5438 to debugger memory starting at MYADDR. */
5439
5440 static int
5441 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5442 {
5443 int pid = lwpid_of (current_thread);
5444 register PTRACE_XFER_TYPE *buffer;
5445 register CORE_ADDR addr;
5446 register int count;
5447 char filename[64];
5448 register int i;
5449 int ret;
5450 int fd;
5451
5452 /* Try using /proc. Don't bother for one word. */
5453 if (len >= 3 * sizeof (long))
5454 {
5455 int bytes;
5456
5457 /* We could keep this file open and cache it - possibly one per
5458 thread. That requires some juggling, but is even faster. */
5459 sprintf (filename, "/proc/%d/mem", pid);
5460 fd = open (filename, O_RDONLY | O_LARGEFILE);
5461 if (fd == -1)
5462 goto no_proc;
5463
5464 /* If pread64 is available, use it. It's faster if the kernel
5465 supports it (only one syscall), and it's 64-bit safe even on
5466 32-bit platforms (for instance, SPARC debugging a SPARC64
5467 application). */
5468 #ifdef HAVE_PREAD64
5469 bytes = pread64 (fd, myaddr, len, memaddr);
5470 #else
5471 bytes = -1;
5472 if (lseek (fd, memaddr, SEEK_SET) != -1)
5473 bytes = read (fd, myaddr, len);
5474 #endif
5475
5476 close (fd);
5477 if (bytes == len)
5478 return 0;
5479
5480 /* Some data was read, we'll try to get the rest with ptrace. */
5481 if (bytes > 0)
5482 {
5483 memaddr += bytes;
5484 myaddr += bytes;
5485 len -= bytes;
5486 }
5487 }
5488
5489 no_proc:
5490 /* Round starting address down to longword boundary. */
5491 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5492 /* Round ending address up; get number of longwords that makes. */
5493 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5494 / sizeof (PTRACE_XFER_TYPE));
5495 /* Allocate buffer of that many longwords. */
5496 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5497
5498 /* Read all the longwords */
5499 errno = 0;
5500 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5501 {
5502 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5503 about coercing an 8 byte integer to a 4 byte pointer. */
5504 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5505 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5506 (PTRACE_TYPE_ARG4) 0);
5507 if (errno)
5508 break;
5509 }
5510 ret = errno;
5511
5512 /* Copy appropriate bytes out of the buffer. */
5513 if (i > 0)
5514 {
5515 i *= sizeof (PTRACE_XFER_TYPE);
5516 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5517 memcpy (myaddr,
5518 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5519 i < len ? i : len);
5520 }
5521
5522 return ret;
5523 }
5524
5525 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5526 memory at MEMADDR. On failure (cannot write to the inferior)
5527 returns the value of errno. Always succeeds if LEN is zero. */
5528
5529 static int
5530 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5531 {
5532 register int i;
5533 /* Round starting address down to longword boundary. */
5534 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5535 /* Round ending address up; get number of longwords that makes. */
5536 register int count
5537 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5538 / sizeof (PTRACE_XFER_TYPE);
5539
5540 /* Allocate buffer of that many longwords. */
5541 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5542
5543 int pid = lwpid_of (current_thread);
5544
5545 if (len == 0)
5546 {
5547 /* Zero length write always succeeds. */
5548 return 0;
5549 }
5550
5551 if (debug_threads)
5552 {
5553 /* Dump up to four bytes. */
5554 char str[4 * 2 + 1];
5555 char *p = str;
5556 int dump = len < 4 ? len : 4;
5557
5558 for (i = 0; i < dump; i++)
5559 {
5560 sprintf (p, "%02x", myaddr[i]);
5561 p += 2;
5562 }
5563 *p = '\0';
5564
5565 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5566 str, (long) memaddr, pid);
5567 }
5568
5569 /* Fill start and end extra bytes of buffer with existing memory data. */
5570
5571 errno = 0;
5572 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5573 about coercing an 8 byte integer to a 4 byte pointer. */
5574 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5575 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5576 (PTRACE_TYPE_ARG4) 0);
5577 if (errno)
5578 return errno;
5579
5580 if (count > 1)
5581 {
5582 errno = 0;
5583 buffer[count - 1]
5584 = ptrace (PTRACE_PEEKTEXT, pid,
5585 /* Coerce to a uintptr_t first to avoid potential gcc warning
5586 about coercing an 8 byte integer to a 4 byte pointer. */
5587 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5588 * sizeof (PTRACE_XFER_TYPE)),
5589 (PTRACE_TYPE_ARG4) 0);
5590 if (errno)
5591 return errno;
5592 }
5593
5594 /* Copy data to be written over corresponding part of buffer. */
5595
5596 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5597 myaddr, len);
5598
5599 /* Write the entire buffer. */
5600
5601 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5602 {
5603 errno = 0;
5604 ptrace (PTRACE_POKETEXT, pid,
5605 /* Coerce to a uintptr_t first to avoid potential gcc warning
5606 about coercing an 8 byte integer to a 4 byte pointer. */
5607 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5608 (PTRACE_TYPE_ARG4) buffer[i]);
5609 if (errno)
5610 return errno;
5611 }
5612
5613 return 0;
5614 }
5615
5616 static void
5617 linux_look_up_symbols (void)
5618 {
5619 #ifdef USE_THREAD_DB
5620 struct process_info *proc = current_process ();
5621
5622 if (proc->priv->thread_db != NULL)
5623 return;
5624
5625 thread_db_init ();
5626 #endif
5627 }
5628
5629 static void
5630 linux_request_interrupt (void)
5631 {
5632 extern unsigned long signal_pid;
5633
5634 /* Send a SIGINT to the process group. This acts just like the user
5635 typed a ^C on the controlling terminal. */
5636 kill (-signal_pid, SIGINT);
5637 }
5638
5639 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5640 to debugger memory starting at MYADDR. */
5641
5642 static int
5643 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5644 {
5645 char filename[PATH_MAX];
5646 int fd, n;
5647 int pid = lwpid_of (current_thread);
5648
5649 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5650
5651 fd = open (filename, O_RDONLY);
5652 if (fd < 0)
5653 return -1;
5654
5655 if (offset != (CORE_ADDR) 0
5656 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5657 n = -1;
5658 else
5659 n = read (fd, myaddr, len);
5660
5661 close (fd);
5662
5663 return n;
5664 }
5665
5666 /* These breakpoint and watchpoint related wrapper functions simply
5667 pass on the function call if the target has registered a
5668 corresponding function. */
5669
5670 static int
5671 linux_supports_z_point_type (char z_type)
5672 {
5673 return (the_low_target.supports_z_point_type != NULL
5674 && the_low_target.supports_z_point_type (z_type));
5675 }
5676
5677 static int
5678 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5679 int size, struct raw_breakpoint *bp)
5680 {
5681 if (type == raw_bkpt_type_sw)
5682 return insert_memory_breakpoint (bp);
5683 else if (the_low_target.insert_point != NULL)
5684 return the_low_target.insert_point (type, addr, size, bp);
5685 else
5686 /* Unsupported (see target.h). */
5687 return 1;
5688 }
5689
5690 static int
5691 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5692 int size, struct raw_breakpoint *bp)
5693 {
5694 if (type == raw_bkpt_type_sw)
5695 return remove_memory_breakpoint (bp);
5696 else if (the_low_target.remove_point != NULL)
5697 return the_low_target.remove_point (type, addr, size, bp);
5698 else
5699 /* Unsupported (see target.h). */
5700 return 1;
5701 }
5702
5703 /* Implement the to_stopped_by_sw_breakpoint target_ops
5704 method. */
5705
5706 static int
5707 linux_stopped_by_sw_breakpoint (void)
5708 {
5709 struct lwp_info *lwp = get_thread_lwp (current_thread);
5710
5711 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5712 }
5713
5714 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5715 method. */
5716
5717 static int
5718 linux_supports_stopped_by_sw_breakpoint (void)
5719 {
5720 return USE_SIGTRAP_SIGINFO;
5721 }
5722
5723 /* Implement the to_stopped_by_hw_breakpoint target_ops
5724 method. */
5725
5726 static int
5727 linux_stopped_by_hw_breakpoint (void)
5728 {
5729 struct lwp_info *lwp = get_thread_lwp (current_thread);
5730
5731 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5732 }
5733
5734 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5735 method. */
5736
5737 static int
5738 linux_supports_stopped_by_hw_breakpoint (void)
5739 {
5740 return USE_SIGTRAP_SIGINFO;
5741 }
5742
5743 /* Implement the supports_hardware_single_step target_ops method. */
5744
5745 static int
5746 linux_supports_hardware_single_step (void)
5747 {
5748 return can_hardware_single_step ();
5749 }
5750
5751 static int
5752 linux_supports_software_single_step (void)
5753 {
5754 return can_software_single_step ();
5755 }
5756
5757 static int
5758 linux_stopped_by_watchpoint (void)
5759 {
5760 struct lwp_info *lwp = get_thread_lwp (current_thread);
5761
5762 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5763 }
5764
5765 static CORE_ADDR
5766 linux_stopped_data_address (void)
5767 {
5768 struct lwp_info *lwp = get_thread_lwp (current_thread);
5769
5770 return lwp->stopped_data_address;
5771 }
5772
5773 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5774 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5775 && defined(PT_TEXT_END_ADDR)
5776
5777 /* This is only used for targets that define PT_TEXT_ADDR,
5778 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5779 the target has different ways of acquiring this information, like
5780 loadmaps. */
5781
5782 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5783 to tell gdb about. */
5784
5785 static int
5786 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5787 {
5788 unsigned long text, text_end, data;
5789 int pid = lwpid_of (current_thread);
5790
5791 errno = 0;
5792
5793 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5794 (PTRACE_TYPE_ARG4) 0);
5795 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5796 (PTRACE_TYPE_ARG4) 0);
5797 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5798 (PTRACE_TYPE_ARG4) 0);
5799
5800 if (errno == 0)
5801 {
5802 /* Both text and data offsets produced at compile-time (and so
5803 used by gdb) are relative to the beginning of the program,
5804 with the data segment immediately following the text segment.
5805 However, the actual runtime layout in memory may put the data
5806 somewhere else, so when we send gdb a data base-address, we
5807 use the real data base address and subtract the compile-time
5808 data base-address from it (which is just the length of the
5809 text segment). BSS immediately follows data in both
5810 cases. */
5811 *text_p = text;
5812 *data_p = data - (text_end - text);
5813
5814 return 1;
5815 }
5816 return 0;
5817 }
5818 #endif
5819
5820 static int
5821 linux_qxfer_osdata (const char *annex,
5822 unsigned char *readbuf, unsigned const char *writebuf,
5823 CORE_ADDR offset, int len)
5824 {
5825 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5826 }
5827
5828 /* Convert a native/host siginfo object, into/from the siginfo in the
5829 layout of the inferiors' architecture. */
5830
5831 static void
5832 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5833 {
5834 int done = 0;
5835
5836 if (the_low_target.siginfo_fixup != NULL)
5837 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5838
5839 /* If there was no callback, or the callback didn't do anything,
5840 then just do a straight memcpy. */
5841 if (!done)
5842 {
5843 if (direction == 1)
5844 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5845 else
5846 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5847 }
5848 }
5849
5850 static int
5851 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5852 unsigned const char *writebuf, CORE_ADDR offset, int len)
5853 {
5854 int pid;
5855 siginfo_t siginfo;
5856 char inf_siginfo[sizeof (siginfo_t)];
5857
5858 if (current_thread == NULL)
5859 return -1;
5860
5861 pid = lwpid_of (current_thread);
5862
5863 if (debug_threads)
5864 debug_printf ("%s siginfo for lwp %d.\n",
5865 readbuf != NULL ? "Reading" : "Writing",
5866 pid);
5867
5868 if (offset >= sizeof (siginfo))
5869 return -1;
5870
5871 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5872 return -1;
5873
5874 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5875 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5876 inferior with a 64-bit GDBSERVER should look the same as debugging it
5877 with a 32-bit GDBSERVER, we need to convert it. */
5878 siginfo_fixup (&siginfo, inf_siginfo, 0);
5879
5880 if (offset + len > sizeof (siginfo))
5881 len = sizeof (siginfo) - offset;
5882
5883 if (readbuf != NULL)
5884 memcpy (readbuf, inf_siginfo + offset, len);
5885 else
5886 {
5887 memcpy (inf_siginfo + offset, writebuf, len);
5888
5889 /* Convert back to ptrace layout before flushing it out. */
5890 siginfo_fixup (&siginfo, inf_siginfo, 1);
5891
5892 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5893 return -1;
5894 }
5895
5896 return len;
5897 }
5898
5899 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5900 so we notice when children change state; as the handler for the
5901 sigsuspend in my_waitpid. */
5902
5903 static void
5904 sigchld_handler (int signo)
5905 {
5906 int old_errno = errno;
5907
5908 if (debug_threads)
5909 {
5910 do
5911 {
5912 /* fprintf is not async-signal-safe, so call write
5913 directly. */
5914 if (write (2, "sigchld_handler\n",
5915 sizeof ("sigchld_handler\n") - 1) < 0)
5916 break; /* just ignore */
5917 } while (0);
5918 }
5919
5920 if (target_is_async_p ())
5921 async_file_mark (); /* trigger a linux_wait */
5922
5923 errno = old_errno;
5924 }
5925
5926 static int
5927 linux_supports_non_stop (void)
5928 {
5929 return 1;
5930 }
5931
5932 static int
5933 linux_async (int enable)
5934 {
5935 int previous = target_is_async_p ();
5936
5937 if (debug_threads)
5938 debug_printf ("linux_async (%d), previous=%d\n",
5939 enable, previous);
5940
5941 if (previous != enable)
5942 {
5943 sigset_t mask;
5944 sigemptyset (&mask);
5945 sigaddset (&mask, SIGCHLD);
5946
5947 sigprocmask (SIG_BLOCK, &mask, NULL);
5948
5949 if (enable)
5950 {
5951 if (pipe (linux_event_pipe) == -1)
5952 {
5953 linux_event_pipe[0] = -1;
5954 linux_event_pipe[1] = -1;
5955 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5956
5957 warning ("creating event pipe failed.");
5958 return previous;
5959 }
5960
5961 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5962 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5963
5964 /* Register the event loop handler. */
5965 add_file_handler (linux_event_pipe[0],
5966 handle_target_event, NULL);
5967
5968 /* Always trigger a linux_wait. */
5969 async_file_mark ();
5970 }
5971 else
5972 {
5973 delete_file_handler (linux_event_pipe[0]);
5974
5975 close (linux_event_pipe[0]);
5976 close (linux_event_pipe[1]);
5977 linux_event_pipe[0] = -1;
5978 linux_event_pipe[1] = -1;
5979 }
5980
5981 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5982 }
5983
5984 return previous;
5985 }
5986
5987 static int
5988 linux_start_non_stop (int nonstop)
5989 {
5990 /* Register or unregister from event-loop accordingly. */
5991 linux_async (nonstop);
5992
5993 if (target_is_async_p () != (nonstop != 0))
5994 return -1;
5995
5996 return 0;
5997 }
5998
5999 static int
6000 linux_supports_multi_process (void)
6001 {
6002 return 1;
6003 }
6004
6005 /* Check if fork events are supported. */
6006
6007 static int
6008 linux_supports_fork_events (void)
6009 {
6010 return linux_supports_tracefork ();
6011 }
6012
6013 /* Check if vfork events are supported. */
6014
6015 static int
6016 linux_supports_vfork_events (void)
6017 {
6018 return linux_supports_tracefork ();
6019 }
6020
6021 /* Check if exec events are supported. */
6022
6023 static int
6024 linux_supports_exec_events (void)
6025 {
6026 return linux_supports_traceexec ();
6027 }
6028
6029 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6030 options for the specified lwp. */
6031
6032 static int
6033 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6034 void *args)
6035 {
6036 struct thread_info *thread = (struct thread_info *) entry;
6037 struct lwp_info *lwp = get_thread_lwp (thread);
6038
6039 if (!lwp->stopped)
6040 {
6041 /* Stop the lwp so we can modify its ptrace options. */
6042 lwp->must_set_ptrace_flags = 1;
6043 linux_stop_lwp (lwp);
6044 }
6045 else
6046 {
6047 /* Already stopped; go ahead and set the ptrace options. */
6048 struct process_info *proc = find_process_pid (pid_of (thread));
6049 int options = linux_low_ptrace_options (proc->attached);
6050
6051 linux_enable_event_reporting (lwpid_of (thread), options);
6052 lwp->must_set_ptrace_flags = 0;
6053 }
6054
6055 return 0;
6056 }
6057
6058 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6059 ptrace flags for all inferiors. This is in case the new GDB connection
6060 doesn't support the same set of events that the previous one did. */
6061
6062 static void
6063 linux_handle_new_gdb_connection (void)
6064 {
6065 pid_t pid;
6066
6067 /* Request that all the lwps reset their ptrace options. */
6068 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6069 }
6070
6071 static int
6072 linux_supports_disable_randomization (void)
6073 {
6074 #ifdef HAVE_PERSONALITY
6075 return 1;
6076 #else
6077 return 0;
6078 #endif
6079 }
6080
6081 static int
6082 linux_supports_agent (void)
6083 {
6084 return 1;
6085 }
6086
6087 static int
6088 linux_supports_range_stepping (void)
6089 {
6090 if (*the_low_target.supports_range_stepping == NULL)
6091 return 0;
6092
6093 return (*the_low_target.supports_range_stepping) ();
6094 }
6095
6096 /* Enumerate spufs IDs for process PID. */
6097 static int
6098 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6099 {
6100 int pos = 0;
6101 int written = 0;
6102 char path[128];
6103 DIR *dir;
6104 struct dirent *entry;
6105
6106 sprintf (path, "/proc/%ld/fd", pid);
6107 dir = opendir (path);
6108 if (!dir)
6109 return -1;
6110
6111 rewinddir (dir);
6112 while ((entry = readdir (dir)) != NULL)
6113 {
6114 struct stat st;
6115 struct statfs stfs;
6116 int fd;
6117
6118 fd = atoi (entry->d_name);
6119 if (!fd)
6120 continue;
6121
6122 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6123 if (stat (path, &st) != 0)
6124 continue;
6125 if (!S_ISDIR (st.st_mode))
6126 continue;
6127
6128 if (statfs (path, &stfs) != 0)
6129 continue;
6130 if (stfs.f_type != SPUFS_MAGIC)
6131 continue;
6132
6133 if (pos >= offset && pos + 4 <= offset + len)
6134 {
6135 *(unsigned int *)(buf + pos - offset) = fd;
6136 written += 4;
6137 }
6138 pos += 4;
6139 }
6140
6141 closedir (dir);
6142 return written;
6143 }
6144
6145 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6146 object type, using the /proc file system. */
6147 static int
6148 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6149 unsigned const char *writebuf,
6150 CORE_ADDR offset, int len)
6151 {
6152 long pid = lwpid_of (current_thread);
6153 char buf[128];
6154 int fd = 0;
6155 int ret = 0;
6156
6157 if (!writebuf && !readbuf)
6158 return -1;
6159
6160 if (!*annex)
6161 {
6162 if (!readbuf)
6163 return -1;
6164 else
6165 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6166 }
6167
6168 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6169 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6170 if (fd <= 0)
6171 return -1;
6172
6173 if (offset != 0
6174 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6175 {
6176 close (fd);
6177 return 0;
6178 }
6179
6180 if (writebuf)
6181 ret = write (fd, writebuf, (size_t) len);
6182 else
6183 ret = read (fd, readbuf, (size_t) len);
6184
6185 close (fd);
6186 return ret;
6187 }
6188
6189 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6190 struct target_loadseg
6191 {
6192 /* Core address to which the segment is mapped. */
6193 Elf32_Addr addr;
6194 /* VMA recorded in the program header. */
6195 Elf32_Addr p_vaddr;
6196 /* Size of this segment in memory. */
6197 Elf32_Word p_memsz;
6198 };
6199
6200 # if defined PT_GETDSBT
6201 struct target_loadmap
6202 {
6203 /* Protocol version number, must be zero. */
6204 Elf32_Word version;
6205 /* Pointer to the DSBT table, its size, and the DSBT index. */
6206 unsigned *dsbt_table;
6207 unsigned dsbt_size, dsbt_index;
6208 /* Number of segments in this map. */
6209 Elf32_Word nsegs;
6210 /* The actual memory map. */
6211 struct target_loadseg segs[/*nsegs*/];
6212 };
6213 # define LINUX_LOADMAP PT_GETDSBT
6214 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6215 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6216 # else
6217 struct target_loadmap
6218 {
6219 /* Protocol version number, must be zero. */
6220 Elf32_Half version;
6221 /* Number of segments in this map. */
6222 Elf32_Half nsegs;
6223 /* The actual memory map. */
6224 struct target_loadseg segs[/*nsegs*/];
6225 };
6226 # define LINUX_LOADMAP PTRACE_GETFDPIC
6227 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6228 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6229 # endif
6230
6231 static int
6232 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6233 unsigned char *myaddr, unsigned int len)
6234 {
6235 int pid = lwpid_of (current_thread);
6236 int addr = -1;
6237 struct target_loadmap *data = NULL;
6238 unsigned int actual_length, copy_length;
6239
6240 if (strcmp (annex, "exec") == 0)
6241 addr = (int) LINUX_LOADMAP_EXEC;
6242 else if (strcmp (annex, "interp") == 0)
6243 addr = (int) LINUX_LOADMAP_INTERP;
6244 else
6245 return -1;
6246
6247 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6248 return -1;
6249
6250 if (data == NULL)
6251 return -1;
6252
6253 actual_length = sizeof (struct target_loadmap)
6254 + sizeof (struct target_loadseg) * data->nsegs;
6255
6256 if (offset < 0 || offset > actual_length)
6257 return -1;
6258
6259 copy_length = actual_length - offset < len ? actual_length - offset : len;
6260 memcpy (myaddr, (char *) data + offset, copy_length);
6261 return copy_length;
6262 }
6263 #else
6264 # define linux_read_loadmap NULL
6265 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6266
6267 static void
6268 linux_process_qsupported (char **features, int count)
6269 {
6270 if (the_low_target.process_qsupported != NULL)
6271 the_low_target.process_qsupported (features, count);
6272 }
6273
6274 static int
6275 linux_supports_tracepoints (void)
6276 {
6277 if (*the_low_target.supports_tracepoints == NULL)
6278 return 0;
6279
6280 return (*the_low_target.supports_tracepoints) ();
6281 }
6282
6283 static CORE_ADDR
6284 linux_read_pc (struct regcache *regcache)
6285 {
6286 if (the_low_target.get_pc == NULL)
6287 return 0;
6288
6289 return (*the_low_target.get_pc) (regcache);
6290 }
6291
6292 static void
6293 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6294 {
6295 gdb_assert (the_low_target.set_pc != NULL);
6296
6297 (*the_low_target.set_pc) (regcache, pc);
6298 }
6299
6300 static int
6301 linux_thread_stopped (struct thread_info *thread)
6302 {
6303 return get_thread_lwp (thread)->stopped;
6304 }
6305
6306 /* This exposes stop-all-threads functionality to other modules. */
6307
6308 static void
6309 linux_pause_all (int freeze)
6310 {
6311 stop_all_lwps (freeze, NULL);
6312 }
6313
6314 /* This exposes unstop-all-threads functionality to other gdbserver
6315 modules. */
6316
6317 static void
6318 linux_unpause_all (int unfreeze)
6319 {
6320 unstop_all_lwps (unfreeze, NULL);
6321 }
6322
6323 static int
6324 linux_prepare_to_access_memory (void)
6325 {
6326 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6327 running LWP. */
6328 if (non_stop)
6329 linux_pause_all (1);
6330 return 0;
6331 }
6332
6333 static void
6334 linux_done_accessing_memory (void)
6335 {
6336 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6337 running LWP. */
6338 if (non_stop)
6339 linux_unpause_all (1);
6340 }
6341
6342 static int
6343 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6344 CORE_ADDR collector,
6345 CORE_ADDR lockaddr,
6346 ULONGEST orig_size,
6347 CORE_ADDR *jump_entry,
6348 CORE_ADDR *trampoline,
6349 ULONGEST *trampoline_size,
6350 unsigned char *jjump_pad_insn,
6351 ULONGEST *jjump_pad_insn_size,
6352 CORE_ADDR *adjusted_insn_addr,
6353 CORE_ADDR *adjusted_insn_addr_end,
6354 char *err)
6355 {
6356 return (*the_low_target.install_fast_tracepoint_jump_pad)
6357 (tpoint, tpaddr, collector, lockaddr, orig_size,
6358 jump_entry, trampoline, trampoline_size,
6359 jjump_pad_insn, jjump_pad_insn_size,
6360 adjusted_insn_addr, adjusted_insn_addr_end,
6361 err);
6362 }
6363
6364 static struct emit_ops *
6365 linux_emit_ops (void)
6366 {
6367 if (the_low_target.emit_ops != NULL)
6368 return (*the_low_target.emit_ops) ();
6369 else
6370 return NULL;
6371 }
6372
6373 static int
6374 linux_get_min_fast_tracepoint_insn_len (void)
6375 {
6376 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6377 }
6378
6379 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6380
6381 static int
6382 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6383 CORE_ADDR *phdr_memaddr, int *num_phdr)
6384 {
6385 char filename[PATH_MAX];
6386 int fd;
6387 const int auxv_size = is_elf64
6388 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6389 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6390
6391 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6392
6393 fd = open (filename, O_RDONLY);
6394 if (fd < 0)
6395 return 1;
6396
6397 *phdr_memaddr = 0;
6398 *num_phdr = 0;
6399 while (read (fd, buf, auxv_size) == auxv_size
6400 && (*phdr_memaddr == 0 || *num_phdr == 0))
6401 {
6402 if (is_elf64)
6403 {
6404 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6405
6406 switch (aux->a_type)
6407 {
6408 case AT_PHDR:
6409 *phdr_memaddr = aux->a_un.a_val;
6410 break;
6411 case AT_PHNUM:
6412 *num_phdr = aux->a_un.a_val;
6413 break;
6414 }
6415 }
6416 else
6417 {
6418 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6419
6420 switch (aux->a_type)
6421 {
6422 case AT_PHDR:
6423 *phdr_memaddr = aux->a_un.a_val;
6424 break;
6425 case AT_PHNUM:
6426 *num_phdr = aux->a_un.a_val;
6427 break;
6428 }
6429 }
6430 }
6431
6432 close (fd);
6433
6434 if (*phdr_memaddr == 0 || *num_phdr == 0)
6435 {
6436 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6437 "phdr_memaddr = %ld, phdr_num = %d",
6438 (long) *phdr_memaddr, *num_phdr);
6439 return 2;
6440 }
6441
6442 return 0;
6443 }
6444
6445 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6446
6447 static CORE_ADDR
6448 get_dynamic (const int pid, const int is_elf64)
6449 {
6450 CORE_ADDR phdr_memaddr, relocation;
6451 int num_phdr, i;
6452 unsigned char *phdr_buf;
6453 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6454
6455 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6456 return 0;
6457
6458 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6459 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6460
6461 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6462 return 0;
6463
6464 /* Compute relocation: it is expected to be 0 for "regular" executables,
6465 non-zero for PIE ones. */
6466 relocation = -1;
6467 for (i = 0; relocation == -1 && i < num_phdr; i++)
6468 if (is_elf64)
6469 {
6470 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6471
6472 if (p->p_type == PT_PHDR)
6473 relocation = phdr_memaddr - p->p_vaddr;
6474 }
6475 else
6476 {
6477 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6478
6479 if (p->p_type == PT_PHDR)
6480 relocation = phdr_memaddr - p->p_vaddr;
6481 }
6482
6483 if (relocation == -1)
6484 {
6485 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6486 any real world executables, including PIE executables, have always
6487 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6488 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6489 or present DT_DEBUG anyway (fpc binaries are statically linked).
6490
6491 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6492
6493 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6494
6495 return 0;
6496 }
6497
6498 for (i = 0; i < num_phdr; i++)
6499 {
6500 if (is_elf64)
6501 {
6502 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6503
6504 if (p->p_type == PT_DYNAMIC)
6505 return p->p_vaddr + relocation;
6506 }
6507 else
6508 {
6509 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6510
6511 if (p->p_type == PT_DYNAMIC)
6512 return p->p_vaddr + relocation;
6513 }
6514 }
6515
6516 return 0;
6517 }
6518
6519 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6520 can be 0 if the inferior does not yet have the library list initialized.
6521 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6522 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6523
6524 static CORE_ADDR
6525 get_r_debug (const int pid, const int is_elf64)
6526 {
6527 CORE_ADDR dynamic_memaddr;
6528 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6529 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6530 CORE_ADDR map = -1;
6531
6532 dynamic_memaddr = get_dynamic (pid, is_elf64);
6533 if (dynamic_memaddr == 0)
6534 return map;
6535
6536 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6537 {
6538 if (is_elf64)
6539 {
6540 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6541 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6542 union
6543 {
6544 Elf64_Xword map;
6545 unsigned char buf[sizeof (Elf64_Xword)];
6546 }
6547 rld_map;
6548 #endif
6549 #ifdef DT_MIPS_RLD_MAP
6550 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6551 {
6552 if (linux_read_memory (dyn->d_un.d_val,
6553 rld_map.buf, sizeof (rld_map.buf)) == 0)
6554 return rld_map.map;
6555 else
6556 break;
6557 }
6558 #endif /* DT_MIPS_RLD_MAP */
6559 #ifdef DT_MIPS_RLD_MAP_REL
6560 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6561 {
6562 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6563 rld_map.buf, sizeof (rld_map.buf)) == 0)
6564 return rld_map.map;
6565 else
6566 break;
6567 }
6568 #endif /* DT_MIPS_RLD_MAP_REL */
6569
6570 if (dyn->d_tag == DT_DEBUG && map == -1)
6571 map = dyn->d_un.d_val;
6572
6573 if (dyn->d_tag == DT_NULL)
6574 break;
6575 }
6576 else
6577 {
6578 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6579 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6580 union
6581 {
6582 Elf32_Word map;
6583 unsigned char buf[sizeof (Elf32_Word)];
6584 }
6585 rld_map;
6586 #endif
6587 #ifdef DT_MIPS_RLD_MAP
6588 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6589 {
6590 if (linux_read_memory (dyn->d_un.d_val,
6591 rld_map.buf, sizeof (rld_map.buf)) == 0)
6592 return rld_map.map;
6593 else
6594 break;
6595 }
6596 #endif /* DT_MIPS_RLD_MAP */
6597 #ifdef DT_MIPS_RLD_MAP_REL
6598 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6599 {
6600 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6601 rld_map.buf, sizeof (rld_map.buf)) == 0)
6602 return rld_map.map;
6603 else
6604 break;
6605 }
6606 #endif /* DT_MIPS_RLD_MAP_REL */
6607
6608 if (dyn->d_tag == DT_DEBUG && map == -1)
6609 map = dyn->d_un.d_val;
6610
6611 if (dyn->d_tag == DT_NULL)
6612 break;
6613 }
6614
6615 dynamic_memaddr += dyn_size;
6616 }
6617
6618 return map;
6619 }
6620
6621 /* Read one pointer from MEMADDR in the inferior. */
6622
6623 static int
6624 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6625 {
6626 int ret;
6627
6628 /* Go through a union so this works on either big or little endian
6629 hosts, when the inferior's pointer size is smaller than the size
6630 of CORE_ADDR. It is assumed the inferior's endianness is the
6631 same of the superior's. */
6632 union
6633 {
6634 CORE_ADDR core_addr;
6635 unsigned int ui;
6636 unsigned char uc;
6637 } addr;
6638
6639 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6640 if (ret == 0)
6641 {
6642 if (ptr_size == sizeof (CORE_ADDR))
6643 *ptr = addr.core_addr;
6644 else if (ptr_size == sizeof (unsigned int))
6645 *ptr = addr.ui;
6646 else
6647 gdb_assert_not_reached ("unhandled pointer size");
6648 }
6649 return ret;
6650 }
6651
6652 struct link_map_offsets
6653 {
6654 /* Offset and size of r_debug.r_version. */
6655 int r_version_offset;
6656
6657 /* Offset and size of r_debug.r_map. */
6658 int r_map_offset;
6659
6660 /* Offset to l_addr field in struct link_map. */
6661 int l_addr_offset;
6662
6663 /* Offset to l_name field in struct link_map. */
6664 int l_name_offset;
6665
6666 /* Offset to l_ld field in struct link_map. */
6667 int l_ld_offset;
6668
6669 /* Offset to l_next field in struct link_map. */
6670 int l_next_offset;
6671
6672 /* Offset to l_prev field in struct link_map. */
6673 int l_prev_offset;
6674 };
6675
6676 /* Construct qXfer:libraries-svr4:read reply. */
6677
6678 static int
6679 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6680 unsigned const char *writebuf,
6681 CORE_ADDR offset, int len)
6682 {
6683 char *document;
6684 unsigned document_len;
6685 struct process_info_private *const priv = current_process ()->priv;
6686 char filename[PATH_MAX];
6687 int pid, is_elf64;
6688
6689 static const struct link_map_offsets lmo_32bit_offsets =
6690 {
6691 0, /* r_version offset. */
6692 4, /* r_debug.r_map offset. */
6693 0, /* l_addr offset in link_map. */
6694 4, /* l_name offset in link_map. */
6695 8, /* l_ld offset in link_map. */
6696 12, /* l_next offset in link_map. */
6697 16 /* l_prev offset in link_map. */
6698 };
6699
6700 static const struct link_map_offsets lmo_64bit_offsets =
6701 {
6702 0, /* r_version offset. */
6703 8, /* r_debug.r_map offset. */
6704 0, /* l_addr offset in link_map. */
6705 8, /* l_name offset in link_map. */
6706 16, /* l_ld offset in link_map. */
6707 24, /* l_next offset in link_map. */
6708 32 /* l_prev offset in link_map. */
6709 };
6710 const struct link_map_offsets *lmo;
6711 unsigned int machine;
6712 int ptr_size;
6713 CORE_ADDR lm_addr = 0, lm_prev = 0;
6714 int allocated = 1024;
6715 char *p;
6716 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6717 int header_done = 0;
6718
6719 if (writebuf != NULL)
6720 return -2;
6721 if (readbuf == NULL)
6722 return -1;
6723
6724 pid = lwpid_of (current_thread);
6725 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6726 is_elf64 = elf_64_file_p (filename, &machine);
6727 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6728 ptr_size = is_elf64 ? 8 : 4;
6729
6730 while (annex[0] != '\0')
6731 {
6732 const char *sep;
6733 CORE_ADDR *addrp;
6734 int len;
6735
6736 sep = strchr (annex, '=');
6737 if (sep == NULL)
6738 break;
6739
6740 len = sep - annex;
6741 if (len == 5 && startswith (annex, "start"))
6742 addrp = &lm_addr;
6743 else if (len == 4 && startswith (annex, "prev"))
6744 addrp = &lm_prev;
6745 else
6746 {
6747 annex = strchr (sep, ';');
6748 if (annex == NULL)
6749 break;
6750 annex++;
6751 continue;
6752 }
6753
6754 annex = decode_address_to_semicolon (addrp, sep + 1);
6755 }
6756
6757 if (lm_addr == 0)
6758 {
6759 int r_version = 0;
6760
6761 if (priv->r_debug == 0)
6762 priv->r_debug = get_r_debug (pid, is_elf64);
6763
6764 /* We failed to find DT_DEBUG. Such situation will not change
6765 for this inferior - do not retry it. Report it to GDB as
6766 E01, see for the reasons at the GDB solib-svr4.c side. */
6767 if (priv->r_debug == (CORE_ADDR) -1)
6768 return -1;
6769
6770 if (priv->r_debug != 0)
6771 {
6772 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6773 (unsigned char *) &r_version,
6774 sizeof (r_version)) != 0
6775 || r_version != 1)
6776 {
6777 warning ("unexpected r_debug version %d", r_version);
6778 }
6779 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6780 &lm_addr, ptr_size) != 0)
6781 {
6782 warning ("unable to read r_map from 0x%lx",
6783 (long) priv->r_debug + lmo->r_map_offset);
6784 }
6785 }
6786 }
6787
6788 document = (char *) xmalloc (allocated);
6789 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6790 p = document + strlen (document);
6791
6792 while (lm_addr
6793 && read_one_ptr (lm_addr + lmo->l_name_offset,
6794 &l_name, ptr_size) == 0
6795 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6796 &l_addr, ptr_size) == 0
6797 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6798 &l_ld, ptr_size) == 0
6799 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6800 &l_prev, ptr_size) == 0
6801 && read_one_ptr (lm_addr + lmo->l_next_offset,
6802 &l_next, ptr_size) == 0)
6803 {
6804 unsigned char libname[PATH_MAX];
6805
6806 if (lm_prev != l_prev)
6807 {
6808 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6809 (long) lm_prev, (long) l_prev);
6810 break;
6811 }
6812
6813 /* Ignore the first entry even if it has valid name as the first entry
6814 corresponds to the main executable. The first entry should not be
6815 skipped if the dynamic loader was loaded late by a static executable
6816 (see solib-svr4.c parameter ignore_first). But in such case the main
6817 executable does not have PT_DYNAMIC present and this function already
6818 exited above due to failed get_r_debug. */
6819 if (lm_prev == 0)
6820 {
6821 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6822 p = p + strlen (p);
6823 }
6824 else
6825 {
6826 /* Not checking for error because reading may stop before
6827 we've got PATH_MAX worth of characters. */
6828 libname[0] = '\0';
6829 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6830 libname[sizeof (libname) - 1] = '\0';
6831 if (libname[0] != '\0')
6832 {
6833 /* 6x the size for xml_escape_text below. */
6834 size_t len = 6 * strlen ((char *) libname);
6835 char *name;
6836
6837 if (!header_done)
6838 {
6839 /* Terminate `<library-list-svr4'. */
6840 *p++ = '>';
6841 header_done = 1;
6842 }
6843
6844 while (allocated < p - document + len + 200)
6845 {
6846 /* Expand to guarantee sufficient storage. */
6847 uintptr_t document_len = p - document;
6848
6849 document = (char *) xrealloc (document, 2 * allocated);
6850 allocated *= 2;
6851 p = document + document_len;
6852 }
6853
6854 name = xml_escape_text ((char *) libname);
6855 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6856 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6857 name, (unsigned long) lm_addr,
6858 (unsigned long) l_addr, (unsigned long) l_ld);
6859 free (name);
6860 }
6861 }
6862
6863 lm_prev = lm_addr;
6864 lm_addr = l_next;
6865 }
6866
6867 if (!header_done)
6868 {
6869 /* Empty list; terminate `<library-list-svr4'. */
6870 strcpy (p, "/>");
6871 }
6872 else
6873 strcpy (p, "</library-list-svr4>");
6874
6875 document_len = strlen (document);
6876 if (offset < document_len)
6877 document_len -= offset;
6878 else
6879 document_len = 0;
6880 if (len > document_len)
6881 len = document_len;
6882
6883 memcpy (readbuf, document + offset, len);
6884 xfree (document);
6885
6886 return len;
6887 }
6888
6889 #ifdef HAVE_LINUX_BTRACE
6890
6891 /* See to_disable_btrace target method. */
6892
6893 static int
6894 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6895 {
6896 enum btrace_error err;
6897
6898 err = linux_disable_btrace (tinfo);
6899 return (err == BTRACE_ERR_NONE ? 0 : -1);
6900 }
6901
6902 /* Encode an Intel(R) Processor Trace configuration. */
6903
6904 static void
6905 linux_low_encode_pt_config (struct buffer *buffer,
6906 const struct btrace_data_pt_config *config)
6907 {
6908 buffer_grow_str (buffer, "<pt-config>\n");
6909
6910 switch (config->cpu.vendor)
6911 {
6912 case CV_INTEL:
6913 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6914 "model=\"%u\" stepping=\"%u\"/>\n",
6915 config->cpu.family, config->cpu.model,
6916 config->cpu.stepping);
6917 break;
6918
6919 default:
6920 break;
6921 }
6922
6923 buffer_grow_str (buffer, "</pt-config>\n");
6924 }
6925
6926 /* Encode a raw buffer. */
6927
6928 static void
6929 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6930 unsigned int size)
6931 {
6932 if (size == 0)
6933 return;
6934
6935 /* We use hex encoding - see common/rsp-low.h. */
6936 buffer_grow_str (buffer, "<raw>\n");
6937
6938 while (size-- > 0)
6939 {
6940 char elem[2];
6941
6942 elem[0] = tohex ((*data >> 4) & 0xf);
6943 elem[1] = tohex (*data++ & 0xf);
6944
6945 buffer_grow (buffer, elem, 2);
6946 }
6947
6948 buffer_grow_str (buffer, "</raw>\n");
6949 }
6950
6951 /* See to_read_btrace target method. */
6952
6953 static int
6954 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6955 enum btrace_read_type type)
6956 {
6957 struct btrace_data btrace;
6958 struct btrace_block *block;
6959 enum btrace_error err;
6960 int i;
6961
6962 btrace_data_init (&btrace);
6963
6964 err = linux_read_btrace (&btrace, tinfo, type);
6965 if (err != BTRACE_ERR_NONE)
6966 {
6967 if (err == BTRACE_ERR_OVERFLOW)
6968 buffer_grow_str0 (buffer, "E.Overflow.");
6969 else
6970 buffer_grow_str0 (buffer, "E.Generic Error.");
6971
6972 goto err;
6973 }
6974
6975 switch (btrace.format)
6976 {
6977 case BTRACE_FORMAT_NONE:
6978 buffer_grow_str0 (buffer, "E.No Trace.");
6979 goto err;
6980
6981 case BTRACE_FORMAT_BTS:
6982 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6983 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6984
6985 for (i = 0;
6986 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6987 i++)
6988 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6989 paddress (block->begin), paddress (block->end));
6990
6991 buffer_grow_str0 (buffer, "</btrace>\n");
6992 break;
6993
6994 case BTRACE_FORMAT_PT:
6995 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6996 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6997 buffer_grow_str (buffer, "<pt>\n");
6998
6999 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7000
7001 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7002 btrace.variant.pt.size);
7003
7004 buffer_grow_str (buffer, "</pt>\n");
7005 buffer_grow_str0 (buffer, "</btrace>\n");
7006 break;
7007
7008 default:
7009 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7010 goto err;
7011 }
7012
7013 btrace_data_fini (&btrace);
7014 return 0;
7015
7016 err:
7017 btrace_data_fini (&btrace);
7018 return -1;
7019 }
7020
7021 /* See to_btrace_conf target method. */
7022
7023 static int
7024 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7025 struct buffer *buffer)
7026 {
7027 const struct btrace_config *conf;
7028
7029 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7030 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7031
7032 conf = linux_btrace_conf (tinfo);
7033 if (conf != NULL)
7034 {
7035 switch (conf->format)
7036 {
7037 case BTRACE_FORMAT_NONE:
7038 break;
7039
7040 case BTRACE_FORMAT_BTS:
7041 buffer_xml_printf (buffer, "<bts");
7042 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7043 buffer_xml_printf (buffer, " />\n");
7044 break;
7045
7046 case BTRACE_FORMAT_PT:
7047 buffer_xml_printf (buffer, "<pt");
7048 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7049 buffer_xml_printf (buffer, "/>\n");
7050 break;
7051 }
7052 }
7053
7054 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7055 return 0;
7056 }
7057 #endif /* HAVE_LINUX_BTRACE */
7058
7059 /* See nat/linux-nat.h. */
7060
7061 ptid_t
7062 current_lwp_ptid (void)
7063 {
7064 return ptid_of (current_thread);
7065 }
7066
7067 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7068
7069 static int
7070 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7071 {
7072 if (the_low_target.breakpoint_kind_from_pc != NULL)
7073 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7074 else
7075 return default_breakpoint_kind_from_pc (pcptr);
7076 }
7077
7078 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7079
7080 static const gdb_byte *
7081 linux_sw_breakpoint_from_kind (int kind, int *size)
7082 {
7083 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7084
7085 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7086 }
7087
7088 /* Implementation of the target_ops method
7089 "breakpoint_kind_from_current_state". */
7090
7091 static int
7092 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7093 {
7094 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7095 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7096 else
7097 return linux_breakpoint_kind_from_pc (pcptr);
7098 }
7099
7100 static struct target_ops linux_target_ops = {
7101 linux_create_inferior,
7102 linux_post_create_inferior,
7103 linux_attach,
7104 linux_kill,
7105 linux_detach,
7106 linux_mourn,
7107 linux_join,
7108 linux_thread_alive,
7109 linux_resume,
7110 linux_wait,
7111 linux_fetch_registers,
7112 linux_store_registers,
7113 linux_prepare_to_access_memory,
7114 linux_done_accessing_memory,
7115 linux_read_memory,
7116 linux_write_memory,
7117 linux_look_up_symbols,
7118 linux_request_interrupt,
7119 linux_read_auxv,
7120 linux_supports_z_point_type,
7121 linux_insert_point,
7122 linux_remove_point,
7123 linux_stopped_by_sw_breakpoint,
7124 linux_supports_stopped_by_sw_breakpoint,
7125 linux_stopped_by_hw_breakpoint,
7126 linux_supports_stopped_by_hw_breakpoint,
7127 linux_supports_hardware_single_step,
7128 linux_stopped_by_watchpoint,
7129 linux_stopped_data_address,
7130 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7131 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7132 && defined(PT_TEXT_END_ADDR)
7133 linux_read_offsets,
7134 #else
7135 NULL,
7136 #endif
7137 #ifdef USE_THREAD_DB
7138 thread_db_get_tls_address,
7139 #else
7140 NULL,
7141 #endif
7142 linux_qxfer_spu,
7143 hostio_last_error_from_errno,
7144 linux_qxfer_osdata,
7145 linux_xfer_siginfo,
7146 linux_supports_non_stop,
7147 linux_async,
7148 linux_start_non_stop,
7149 linux_supports_multi_process,
7150 linux_supports_fork_events,
7151 linux_supports_vfork_events,
7152 linux_supports_exec_events,
7153 linux_handle_new_gdb_connection,
7154 #ifdef USE_THREAD_DB
7155 thread_db_handle_monitor_command,
7156 #else
7157 NULL,
7158 #endif
7159 linux_common_core_of_thread,
7160 linux_read_loadmap,
7161 linux_process_qsupported,
7162 linux_supports_tracepoints,
7163 linux_read_pc,
7164 linux_write_pc,
7165 linux_thread_stopped,
7166 NULL,
7167 linux_pause_all,
7168 linux_unpause_all,
7169 linux_stabilize_threads,
7170 linux_install_fast_tracepoint_jump_pad,
7171 linux_emit_ops,
7172 linux_supports_disable_randomization,
7173 linux_get_min_fast_tracepoint_insn_len,
7174 linux_qxfer_libraries_svr4,
7175 linux_supports_agent,
7176 #ifdef HAVE_LINUX_BTRACE
7177 linux_supports_btrace,
7178 linux_enable_btrace,
7179 linux_low_disable_btrace,
7180 linux_low_read_btrace,
7181 linux_low_btrace_conf,
7182 #else
7183 NULL,
7184 NULL,
7185 NULL,
7186 NULL,
7187 NULL,
7188 #endif
7189 linux_supports_range_stepping,
7190 linux_proc_pid_to_exec_file,
7191 linux_mntns_open_cloexec,
7192 linux_mntns_unlink,
7193 linux_mntns_readlink,
7194 linux_breakpoint_kind_from_pc,
7195 linux_sw_breakpoint_from_kind,
7196 linux_proc_tid_get_name,
7197 linux_breakpoint_kind_from_current_state,
7198 linux_supports_software_single_step
7199 };
7200
7201 #ifdef HAVE_LINUX_REGSETS
7202 void
7203 initialize_regsets_info (struct regsets_info *info)
7204 {
7205 for (info->num_regsets = 0;
7206 info->regsets[info->num_regsets].size >= 0;
7207 info->num_regsets++)
7208 ;
7209 }
7210 #endif
7211
7212 void
7213 initialize_low (void)
7214 {
7215 struct sigaction sigchld_action;
7216
7217 memset (&sigchld_action, 0, sizeof (sigchld_action));
7218 set_target_ops (&linux_target_ops);
7219
7220 linux_ptrace_init_warnings ();
7221
7222 sigchld_action.sa_handler = sigchld_handler;
7223 sigemptyset (&sigchld_action.sa_mask);
7224 sigchld_action.sa_flags = SA_RESTART;
7225 sigaction (SIGCHLD, &sigchld_action, NULL);
7226
7227 initialize_low_arch ();
7228
7229 linux_check_ptrace_features ();
7230 }