]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
[PR gdb/13808] gdb.trace: Pass tdesc selected in gdbserver to IPA.
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2016 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57 #include "nat/linux-namespaces.h"
58
59 #ifndef SPUFS_MAGIC
60 #define SPUFS_MAGIC 0x23c9b64e
61 #endif
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* BFIN already defines these since at least 2.6.32 kernels. */
86 #elif defined(BFIN)
87 #define PT_TEXT_ADDR 220
88 #define PT_TEXT_END_ADDR 224
89 #define PT_DATA_ADDR 228
90 /* These are still undefined in 3.10 kernels. */
91 #elif defined(__TMS320C6X__)
92 #define PT_TEXT_ADDR (0x10000*4)
93 #define PT_DATA_ADDR (0x10004*4)
94 #define PT_TEXT_END_ADDR (0x10008*4)
95 #endif
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* A list of all unknown processes which receive stop signals. Some
180 other process will presumably claim each of these as forked
181 children momentarily. */
182
183 struct simple_pid_list
184 {
185 /* The process ID. */
186 int pid;
187
188 /* The status as reported by waitpid. */
189 int status;
190
191 /* Next in chain. */
192 struct simple_pid_list *next;
193 };
194 struct simple_pid_list *stopped_pids;
195
196 /* Trivial list manipulation functions to keep track of a list of new
197 stopped processes. */
198
199 static void
200 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
201 {
202 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
203
204 new_pid->pid = pid;
205 new_pid->status = status;
206 new_pid->next = *listp;
207 *listp = new_pid;
208 }
209
210 static int
211 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
212 {
213 struct simple_pid_list **p;
214
215 for (p = listp; *p != NULL; p = &(*p)->next)
216 if ((*p)->pid == pid)
217 {
218 struct simple_pid_list *next = (*p)->next;
219
220 *statusp = (*p)->status;
221 xfree (*p);
222 *p = next;
223 return 1;
224 }
225 return 0;
226 }
227
228 enum stopping_threads_kind
229 {
230 /* Not stopping threads presently. */
231 NOT_STOPPING_THREADS,
232
233 /* Stopping threads. */
234 STOPPING_THREADS,
235
236 /* Stopping and suspending threads. */
237 STOPPING_AND_SUSPENDING_THREADS
238 };
239
240 /* This is set while stop_all_lwps is in effect. */
241 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
242
243 /* FIXME make into a target method? */
244 int using_threads = 1;
245
246 /* True if we're presently stabilizing threads (moving them out of
247 jump pads). */
248 static int stabilizing_threads;
249
250 static void linux_resume_one_lwp (struct lwp_info *lwp,
251 int step, int signal, siginfo_t *info);
252 static void linux_resume (struct thread_resume *resume_info, size_t n);
253 static void stop_all_lwps (int suspend, struct lwp_info *except);
254 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
255 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
256 int *wstat, int options);
257 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
258 static struct lwp_info *add_lwp (ptid_t ptid);
259 static void linux_mourn (struct process_info *process);
260 static int linux_stopped_by_watchpoint (void);
261 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
262 static int lwp_is_marked_dead (struct lwp_info *lwp);
263 static void proceed_all_lwps (void);
264 static int finish_step_over (struct lwp_info *lwp);
265 static int kill_lwp (unsigned long lwpid, int signo);
266 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
267 static void complete_ongoing_step_over (void);
268 static int linux_low_ptrace_options (int attached);
269
270 /* When the event-loop is doing a step-over, this points at the thread
271 being stepped. */
272 ptid_t step_over_bkpt;
273
274 /* True if the low target can hardware single-step. */
275
276 static int
277 can_hardware_single_step (void)
278 {
279 if (the_low_target.supports_hardware_single_step != NULL)
280 return the_low_target.supports_hardware_single_step ();
281 else
282 return 0;
283 }
284
285 /* True if the low target can software single-step. Such targets
286 implement the GET_NEXT_PCS callback. */
287
288 static int
289 can_software_single_step (void)
290 {
291 return (the_low_target.get_next_pcs != NULL);
292 }
293
294 /* True if the low target supports memory breakpoints. If so, we'll
295 have a GET_PC implementation. */
296
297 static int
298 supports_breakpoints (void)
299 {
300 return (the_low_target.get_pc != NULL);
301 }
302
303 /* Returns true if this target can support fast tracepoints. This
304 does not mean that the in-process agent has been loaded in the
305 inferior. */
306
307 static int
308 supports_fast_tracepoints (void)
309 {
310 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
311 }
312
313 /* True if LWP is stopped in its stepping range. */
314
315 static int
316 lwp_in_step_range (struct lwp_info *lwp)
317 {
318 CORE_ADDR pc = lwp->stop_pc;
319
320 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
321 }
322
323 struct pending_signals
324 {
325 int signal;
326 siginfo_t info;
327 struct pending_signals *prev;
328 };
329
330 /* The read/write ends of the pipe registered as waitable file in the
331 event loop. */
332 static int linux_event_pipe[2] = { -1, -1 };
333
334 /* True if we're currently in async mode. */
335 #define target_is_async_p() (linux_event_pipe[0] != -1)
336
337 static void send_sigstop (struct lwp_info *lwp);
338 static void wait_for_sigstop (void);
339
340 /* Return non-zero if HEADER is a 64-bit ELF file. */
341
342 static int
343 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
344 {
345 if (header->e_ident[EI_MAG0] == ELFMAG0
346 && header->e_ident[EI_MAG1] == ELFMAG1
347 && header->e_ident[EI_MAG2] == ELFMAG2
348 && header->e_ident[EI_MAG3] == ELFMAG3)
349 {
350 *machine = header->e_machine;
351 return header->e_ident[EI_CLASS] == ELFCLASS64;
352
353 }
354 *machine = EM_NONE;
355 return -1;
356 }
357
358 /* Return non-zero if FILE is a 64-bit ELF file,
359 zero if the file is not a 64-bit ELF file,
360 and -1 if the file is not accessible or doesn't exist. */
361
362 static int
363 elf_64_file_p (const char *file, unsigned int *machine)
364 {
365 Elf64_Ehdr header;
366 int fd;
367
368 fd = open (file, O_RDONLY);
369 if (fd < 0)
370 return -1;
371
372 if (read (fd, &header, sizeof (header)) != sizeof (header))
373 {
374 close (fd);
375 return 0;
376 }
377 close (fd);
378
379 return elf_64_header_p (&header, machine);
380 }
381
382 /* Accepts an integer PID; Returns true if the executable PID is
383 running is a 64-bit ELF file.. */
384
385 int
386 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
387 {
388 char file[PATH_MAX];
389
390 sprintf (file, "/proc/%d/exe", pid);
391 return elf_64_file_p (file, machine);
392 }
393
394 static void
395 delete_lwp (struct lwp_info *lwp)
396 {
397 struct thread_info *thr = get_lwp_thread (lwp);
398
399 if (debug_threads)
400 debug_printf ("deleting %ld\n", lwpid_of (thr));
401
402 remove_thread (thr);
403 free (lwp->arch_private);
404 free (lwp);
405 }
406
407 /* Add a process to the common process list, and set its private
408 data. */
409
410 static struct process_info *
411 linux_add_process (int pid, int attached)
412 {
413 struct process_info *proc;
414
415 proc = add_process (pid, attached);
416 proc->priv = XCNEW (struct process_info_private);
417
418 if (the_low_target.new_process != NULL)
419 proc->priv->arch_private = the_low_target.new_process ();
420
421 return proc;
422 }
423
424 static CORE_ADDR get_pc (struct lwp_info *lwp);
425
426 /* Call the target arch_setup function on the current thread. */
427
428 static void
429 linux_arch_setup (void)
430 {
431 the_low_target.arch_setup ();
432 }
433
434 /* Call the target arch_setup function on THREAD. */
435
436 static void
437 linux_arch_setup_thread (struct thread_info *thread)
438 {
439 struct thread_info *saved_thread;
440
441 saved_thread = current_thread;
442 current_thread = thread;
443
444 linux_arch_setup ();
445
446 current_thread = saved_thread;
447 }
448
449 /* Handle a GNU/Linux extended wait response. If we see a clone,
450 fork, or vfork event, we need to add the new LWP to our list
451 (and return 0 so as not to report the trap to higher layers).
452 If we see an exec event, we will modify ORIG_EVENT_LWP to point
453 to a new LWP representing the new program. */
454
455 static int
456 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
457 {
458 struct lwp_info *event_lwp = *orig_event_lwp;
459 int event = linux_ptrace_get_extended_event (wstat);
460 struct thread_info *event_thr = get_lwp_thread (event_lwp);
461 struct lwp_info *new_lwp;
462
463 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
464
465 /* All extended events we currently use are mid-syscall. Only
466 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
467 you have to be using PTRACE_SEIZE to get that. */
468 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
469
470 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
471 || (event == PTRACE_EVENT_CLONE))
472 {
473 ptid_t ptid;
474 unsigned long new_pid;
475 int ret, status;
476
477 /* Get the pid of the new lwp. */
478 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
479 &new_pid);
480
481 /* If we haven't already seen the new PID stop, wait for it now. */
482 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
483 {
484 /* The new child has a pending SIGSTOP. We can't affect it until it
485 hits the SIGSTOP, but we're already attached. */
486
487 ret = my_waitpid (new_pid, &status, __WALL);
488
489 if (ret == -1)
490 perror_with_name ("waiting for new child");
491 else if (ret != new_pid)
492 warning ("wait returned unexpected PID %d", ret);
493 else if (!WIFSTOPPED (status))
494 warning ("wait returned unexpected status 0x%x", status);
495 }
496
497 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
498 {
499 struct process_info *parent_proc;
500 struct process_info *child_proc;
501 struct lwp_info *child_lwp;
502 struct thread_info *child_thr;
503 struct target_desc *tdesc;
504
505 ptid = ptid_build (new_pid, new_pid, 0);
506
507 if (debug_threads)
508 {
509 debug_printf ("HEW: Got fork event from LWP %ld, "
510 "new child is %d\n",
511 ptid_get_lwp (ptid_of (event_thr)),
512 ptid_get_pid (ptid));
513 }
514
515 /* Add the new process to the tables and clone the breakpoint
516 lists of the parent. We need to do this even if the new process
517 will be detached, since we will need the process object and the
518 breakpoints to remove any breakpoints from memory when we
519 detach, and the client side will access registers. */
520 child_proc = linux_add_process (new_pid, 0);
521 gdb_assert (child_proc != NULL);
522 child_lwp = add_lwp (ptid);
523 gdb_assert (child_lwp != NULL);
524 child_lwp->stopped = 1;
525 child_lwp->must_set_ptrace_flags = 1;
526 child_lwp->status_pending_p = 0;
527 child_thr = get_lwp_thread (child_lwp);
528 child_thr->last_resume_kind = resume_stop;
529 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
530
531 /* If we're suspending all threads, leave this one suspended
532 too. */
533 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
534 {
535 if (debug_threads)
536 debug_printf ("HEW: leaving child suspended\n");
537 child_lwp->suspended = 1;
538 }
539
540 parent_proc = get_thread_process (event_thr);
541 child_proc->attached = parent_proc->attached;
542 clone_all_breakpoints (&child_proc->breakpoints,
543 &child_proc->raw_breakpoints,
544 parent_proc->breakpoints);
545
546 tdesc = XNEW (struct target_desc);
547 copy_target_description (tdesc, parent_proc->tdesc);
548 child_proc->tdesc = tdesc;
549
550 /* Clone arch-specific process data. */
551 if (the_low_target.new_fork != NULL)
552 the_low_target.new_fork (parent_proc, child_proc);
553
554 /* Save fork info in the parent thread. */
555 if (event == PTRACE_EVENT_FORK)
556 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
557 else if (event == PTRACE_EVENT_VFORK)
558 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
559
560 event_lwp->waitstatus.value.related_pid = ptid;
561
562 /* The status_pending field contains bits denoting the
563 extended event, so when the pending event is handled,
564 the handler will look at lwp->waitstatus. */
565 event_lwp->status_pending_p = 1;
566 event_lwp->status_pending = wstat;
567
568 /* Report the event. */
569 return 0;
570 }
571
572 if (debug_threads)
573 debug_printf ("HEW: Got clone event "
574 "from LWP %ld, new child is LWP %ld\n",
575 lwpid_of (event_thr), new_pid);
576
577 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
578 new_lwp = add_lwp (ptid);
579
580 /* Either we're going to immediately resume the new thread
581 or leave it stopped. linux_resume_one_lwp is a nop if it
582 thinks the thread is currently running, so set this first
583 before calling linux_resume_one_lwp. */
584 new_lwp->stopped = 1;
585
586 /* If we're suspending all threads, leave this one suspended
587 too. */
588 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
589 new_lwp->suspended = 1;
590
591 /* Normally we will get the pending SIGSTOP. But in some cases
592 we might get another signal delivered to the group first.
593 If we do get another signal, be sure not to lose it. */
594 if (WSTOPSIG (status) != SIGSTOP)
595 {
596 new_lwp->stop_expected = 1;
597 new_lwp->status_pending_p = 1;
598 new_lwp->status_pending = status;
599 }
600 else if (report_thread_events)
601 {
602 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
603 new_lwp->status_pending_p = 1;
604 new_lwp->status_pending = status;
605 }
606
607 /* Don't report the event. */
608 return 1;
609 }
610 else if (event == PTRACE_EVENT_VFORK_DONE)
611 {
612 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
613
614 /* Report the event. */
615 return 0;
616 }
617 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
618 {
619 struct process_info *proc;
620 VEC (int) *syscalls_to_catch;
621 ptid_t event_ptid;
622 pid_t event_pid;
623
624 if (debug_threads)
625 {
626 debug_printf ("HEW: Got exec event from LWP %ld\n",
627 lwpid_of (event_thr));
628 }
629
630 /* Get the event ptid. */
631 event_ptid = ptid_of (event_thr);
632 event_pid = ptid_get_pid (event_ptid);
633
634 /* Save the syscall list from the execing process. */
635 proc = get_thread_process (event_thr);
636 syscalls_to_catch = proc->syscalls_to_catch;
637 proc->syscalls_to_catch = NULL;
638
639 /* Delete the execing process and all its threads. */
640 linux_mourn (proc);
641 current_thread = NULL;
642
643 /* Create a new process/lwp/thread. */
644 proc = linux_add_process (event_pid, 0);
645 event_lwp = add_lwp (event_ptid);
646 event_thr = get_lwp_thread (event_lwp);
647 gdb_assert (current_thread == event_thr);
648 linux_arch_setup_thread (event_thr);
649
650 /* Set the event status. */
651 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
652 event_lwp->waitstatus.value.execd_pathname
653 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
654
655 /* Mark the exec status as pending. */
656 event_lwp->stopped = 1;
657 event_lwp->status_pending_p = 1;
658 event_lwp->status_pending = wstat;
659 event_thr->last_resume_kind = resume_continue;
660 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
661
662 /* Update syscall state in the new lwp, effectively mid-syscall too. */
663 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
664
665 /* Restore the list to catch. Don't rely on the client, which is free
666 to avoid sending a new list when the architecture doesn't change.
667 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
668 proc->syscalls_to_catch = syscalls_to_catch;
669
670 /* Report the event. */
671 *orig_event_lwp = event_lwp;
672 return 0;
673 }
674
675 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
676 }
677
678 /* Return the PC as read from the regcache of LWP, without any
679 adjustment. */
680
681 static CORE_ADDR
682 get_pc (struct lwp_info *lwp)
683 {
684 struct thread_info *saved_thread;
685 struct regcache *regcache;
686 CORE_ADDR pc;
687
688 if (the_low_target.get_pc == NULL)
689 return 0;
690
691 saved_thread = current_thread;
692 current_thread = get_lwp_thread (lwp);
693
694 regcache = get_thread_regcache (current_thread, 1);
695 pc = (*the_low_target.get_pc) (regcache);
696
697 if (debug_threads)
698 debug_printf ("pc is 0x%lx\n", (long) pc);
699
700 current_thread = saved_thread;
701 return pc;
702 }
703
704 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
705 Fill *SYSNO with the syscall nr trapped. Fill *SYSRET with the
706 return code. */
707
708 static void
709 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno, int *sysret)
710 {
711 struct thread_info *saved_thread;
712 struct regcache *regcache;
713
714 if (the_low_target.get_syscall_trapinfo == NULL)
715 {
716 /* If we cannot get the syscall trapinfo, report an unknown
717 system call number and -ENOSYS return value. */
718 *sysno = UNKNOWN_SYSCALL;
719 *sysret = -ENOSYS;
720 return;
721 }
722
723 saved_thread = current_thread;
724 current_thread = get_lwp_thread (lwp);
725
726 regcache = get_thread_regcache (current_thread, 1);
727 (*the_low_target.get_syscall_trapinfo) (regcache, sysno, sysret);
728
729 if (debug_threads)
730 {
731 debug_printf ("get_syscall_trapinfo sysno %d sysret %d\n",
732 *sysno, *sysret);
733 }
734
735 current_thread = saved_thread;
736 }
737
738 static int check_stopped_by_watchpoint (struct lwp_info *child);
739
740 /* Called when the LWP stopped for a signal/trap. If it stopped for a
741 trap check what caused it (breakpoint, watchpoint, trace, etc.),
742 and save the result in the LWP's stop_reason field. If it stopped
743 for a breakpoint, decrement the PC if necessary on the lwp's
744 architecture. Returns true if we now have the LWP's stop PC. */
745
746 static int
747 save_stop_reason (struct lwp_info *lwp)
748 {
749 CORE_ADDR pc;
750 CORE_ADDR sw_breakpoint_pc;
751 struct thread_info *saved_thread;
752 #if USE_SIGTRAP_SIGINFO
753 siginfo_t siginfo;
754 #endif
755
756 if (the_low_target.get_pc == NULL)
757 return 0;
758
759 pc = get_pc (lwp);
760 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
761
762 /* breakpoint_at reads from the current thread. */
763 saved_thread = current_thread;
764 current_thread = get_lwp_thread (lwp);
765
766 #if USE_SIGTRAP_SIGINFO
767 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
768 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
769 {
770 if (siginfo.si_signo == SIGTRAP)
771 {
772 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
773 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
774 {
775 /* The si_code is ambiguous on this arch -- check debug
776 registers. */
777 if (!check_stopped_by_watchpoint (lwp))
778 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
779 }
780 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
781 {
782 /* If we determine the LWP stopped for a SW breakpoint,
783 trust it. Particularly don't check watchpoint
784 registers, because at least on s390, we'd find
785 stopped-by-watchpoint as long as there's a watchpoint
786 set. */
787 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
788 }
789 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
790 {
791 /* This can indicate either a hardware breakpoint or
792 hardware watchpoint. Check debug registers. */
793 if (!check_stopped_by_watchpoint (lwp))
794 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
795 }
796 else if (siginfo.si_code == TRAP_TRACE)
797 {
798 /* We may have single stepped an instruction that
799 triggered a watchpoint. In that case, on some
800 architectures (such as x86), instead of TRAP_HWBKPT,
801 si_code indicates TRAP_TRACE, and we need to check
802 the debug registers separately. */
803 if (!check_stopped_by_watchpoint (lwp))
804 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
805 }
806 }
807 }
808 #else
809 /* We may have just stepped a breakpoint instruction. E.g., in
810 non-stop mode, GDB first tells the thread A to step a range, and
811 then the user inserts a breakpoint inside the range. In that
812 case we need to report the breakpoint PC. */
813 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
814 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
815 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
816
817 if (hardware_breakpoint_inserted_here (pc))
818 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
819
820 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
821 check_stopped_by_watchpoint (lwp);
822 #endif
823
824 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
825 {
826 if (debug_threads)
827 {
828 struct thread_info *thr = get_lwp_thread (lwp);
829
830 debug_printf ("CSBB: %s stopped by software breakpoint\n",
831 target_pid_to_str (ptid_of (thr)));
832 }
833
834 /* Back up the PC if necessary. */
835 if (pc != sw_breakpoint_pc)
836 {
837 struct regcache *regcache
838 = get_thread_regcache (current_thread, 1);
839 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
840 }
841
842 /* Update this so we record the correct stop PC below. */
843 pc = sw_breakpoint_pc;
844 }
845 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
846 {
847 if (debug_threads)
848 {
849 struct thread_info *thr = get_lwp_thread (lwp);
850
851 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
852 target_pid_to_str (ptid_of (thr)));
853 }
854 }
855 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
856 {
857 if (debug_threads)
858 {
859 struct thread_info *thr = get_lwp_thread (lwp);
860
861 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
862 target_pid_to_str (ptid_of (thr)));
863 }
864 }
865 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
866 {
867 if (debug_threads)
868 {
869 struct thread_info *thr = get_lwp_thread (lwp);
870
871 debug_printf ("CSBB: %s stopped by trace\n",
872 target_pid_to_str (ptid_of (thr)));
873 }
874 }
875
876 lwp->stop_pc = pc;
877 current_thread = saved_thread;
878 return 1;
879 }
880
881 static struct lwp_info *
882 add_lwp (ptid_t ptid)
883 {
884 struct lwp_info *lwp;
885
886 lwp = XCNEW (struct lwp_info);
887
888 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
889
890 if (the_low_target.new_thread != NULL)
891 the_low_target.new_thread (lwp);
892
893 lwp->thread = add_thread (ptid, lwp);
894
895 return lwp;
896 }
897
898 /* Start an inferior process and returns its pid.
899 ALLARGS is a vector of program-name and args. */
900
901 static int
902 linux_create_inferior (char *program, char **allargs)
903 {
904 struct lwp_info *new_lwp;
905 int pid;
906 ptid_t ptid;
907 struct cleanup *restore_personality
908 = maybe_disable_address_space_randomization (disable_randomization);
909
910 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
911 pid = vfork ();
912 #else
913 pid = fork ();
914 #endif
915 if (pid < 0)
916 perror_with_name ("fork");
917
918 if (pid == 0)
919 {
920 close_most_fds ();
921 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
922
923 setpgid (0, 0);
924
925 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
926 stdout to stderr so that inferior i/o doesn't corrupt the connection.
927 Also, redirect stdin to /dev/null. */
928 if (remote_connection_is_stdio ())
929 {
930 close (0);
931 open ("/dev/null", O_RDONLY);
932 dup2 (2, 1);
933 if (write (2, "stdin/stdout redirected\n",
934 sizeof ("stdin/stdout redirected\n") - 1) < 0)
935 {
936 /* Errors ignored. */;
937 }
938 }
939
940 execv (program, allargs);
941 if (errno == ENOENT)
942 execvp (program, allargs);
943
944 fprintf (stderr, "Cannot exec %s: %s.\n", program,
945 strerror (errno));
946 fflush (stderr);
947 _exit (0177);
948 }
949
950 do_cleanups (restore_personality);
951
952 linux_add_process (pid, 0);
953
954 ptid = ptid_build (pid, pid, 0);
955 new_lwp = add_lwp (ptid);
956 new_lwp->must_set_ptrace_flags = 1;
957
958 return pid;
959 }
960
961 /* Implement the post_create_inferior target_ops method. */
962
963 static void
964 linux_post_create_inferior (void)
965 {
966 struct lwp_info *lwp = get_thread_lwp (current_thread);
967
968 linux_arch_setup ();
969
970 if (lwp->must_set_ptrace_flags)
971 {
972 struct process_info *proc = current_process ();
973 int options = linux_low_ptrace_options (proc->attached);
974
975 linux_enable_event_reporting (lwpid_of (current_thread), options);
976 lwp->must_set_ptrace_flags = 0;
977 }
978 }
979
980 /* Attach to an inferior process. Returns 0 on success, ERRNO on
981 error. */
982
983 int
984 linux_attach_lwp (ptid_t ptid)
985 {
986 struct lwp_info *new_lwp;
987 int lwpid = ptid_get_lwp (ptid);
988
989 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
990 != 0)
991 return errno;
992
993 new_lwp = add_lwp (ptid);
994
995 /* We need to wait for SIGSTOP before being able to make the next
996 ptrace call on this LWP. */
997 new_lwp->must_set_ptrace_flags = 1;
998
999 if (linux_proc_pid_is_stopped (lwpid))
1000 {
1001 if (debug_threads)
1002 debug_printf ("Attached to a stopped process\n");
1003
1004 /* The process is definitely stopped. It is in a job control
1005 stop, unless the kernel predates the TASK_STOPPED /
1006 TASK_TRACED distinction, in which case it might be in a
1007 ptrace stop. Make sure it is in a ptrace stop; from there we
1008 can kill it, signal it, et cetera.
1009
1010 First make sure there is a pending SIGSTOP. Since we are
1011 already attached, the process can not transition from stopped
1012 to running without a PTRACE_CONT; so we know this signal will
1013 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1014 probably already in the queue (unless this kernel is old
1015 enough to use TASK_STOPPED for ptrace stops); but since
1016 SIGSTOP is not an RT signal, it can only be queued once. */
1017 kill_lwp (lwpid, SIGSTOP);
1018
1019 /* Finally, resume the stopped process. This will deliver the
1020 SIGSTOP (or a higher priority signal, just like normal
1021 PTRACE_ATTACH), which we'll catch later on. */
1022 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1023 }
1024
1025 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1026 brings it to a halt.
1027
1028 There are several cases to consider here:
1029
1030 1) gdbserver has already attached to the process and is being notified
1031 of a new thread that is being created.
1032 In this case we should ignore that SIGSTOP and resume the
1033 process. This is handled below by setting stop_expected = 1,
1034 and the fact that add_thread sets last_resume_kind ==
1035 resume_continue.
1036
1037 2) This is the first thread (the process thread), and we're attaching
1038 to it via attach_inferior.
1039 In this case we want the process thread to stop.
1040 This is handled by having linux_attach set last_resume_kind ==
1041 resume_stop after we return.
1042
1043 If the pid we are attaching to is also the tgid, we attach to and
1044 stop all the existing threads. Otherwise, we attach to pid and
1045 ignore any other threads in the same group as this pid.
1046
1047 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1048 existing threads.
1049 In this case we want the thread to stop.
1050 FIXME: This case is currently not properly handled.
1051 We should wait for the SIGSTOP but don't. Things work apparently
1052 because enough time passes between when we ptrace (ATTACH) and when
1053 gdb makes the next ptrace call on the thread.
1054
1055 On the other hand, if we are currently trying to stop all threads, we
1056 should treat the new thread as if we had sent it a SIGSTOP. This works
1057 because we are guaranteed that the add_lwp call above added us to the
1058 end of the list, and so the new thread has not yet reached
1059 wait_for_sigstop (but will). */
1060 new_lwp->stop_expected = 1;
1061
1062 return 0;
1063 }
1064
1065 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1066 already attached. Returns true if a new LWP is found, false
1067 otherwise. */
1068
1069 static int
1070 attach_proc_task_lwp_callback (ptid_t ptid)
1071 {
1072 /* Is this a new thread? */
1073 if (find_thread_ptid (ptid) == NULL)
1074 {
1075 int lwpid = ptid_get_lwp (ptid);
1076 int err;
1077
1078 if (debug_threads)
1079 debug_printf ("Found new lwp %d\n", lwpid);
1080
1081 err = linux_attach_lwp (ptid);
1082
1083 /* Be quiet if we simply raced with the thread exiting. EPERM
1084 is returned if the thread's task still exists, and is marked
1085 as exited or zombie, as well as other conditions, so in that
1086 case, confirm the status in /proc/PID/status. */
1087 if (err == ESRCH
1088 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1089 {
1090 if (debug_threads)
1091 {
1092 debug_printf ("Cannot attach to lwp %d: "
1093 "thread is gone (%d: %s)\n",
1094 lwpid, err, strerror (err));
1095 }
1096 }
1097 else if (err != 0)
1098 {
1099 warning (_("Cannot attach to lwp %d: %s"),
1100 lwpid,
1101 linux_ptrace_attach_fail_reason_string (ptid, err));
1102 }
1103
1104 return 1;
1105 }
1106 return 0;
1107 }
1108
1109 static void async_file_mark (void);
1110
1111 /* Attach to PID. If PID is the tgid, attach to it and all
1112 of its threads. */
1113
1114 static int
1115 linux_attach (unsigned long pid)
1116 {
1117 struct process_info *proc;
1118 struct thread_info *initial_thread;
1119 ptid_t ptid = ptid_build (pid, pid, 0);
1120 int err;
1121
1122 /* Attach to PID. We will check for other threads
1123 soon. */
1124 err = linux_attach_lwp (ptid);
1125 if (err != 0)
1126 error ("Cannot attach to process %ld: %s",
1127 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1128
1129 proc = linux_add_process (pid, 1);
1130
1131 /* Don't ignore the initial SIGSTOP if we just attached to this
1132 process. It will be collected by wait shortly. */
1133 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1134 initial_thread->last_resume_kind = resume_stop;
1135
1136 /* We must attach to every LWP. If /proc is mounted, use that to
1137 find them now. On the one hand, the inferior may be using raw
1138 clone instead of using pthreads. On the other hand, even if it
1139 is using pthreads, GDB may not be connected yet (thread_db needs
1140 to do symbol lookups, through qSymbol). Also, thread_db walks
1141 structures in the inferior's address space to find the list of
1142 threads/LWPs, and those structures may well be corrupted. Note
1143 that once thread_db is loaded, we'll still use it to list threads
1144 and associate pthread info with each LWP. */
1145 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1146
1147 /* GDB will shortly read the xml target description for this
1148 process, to figure out the process' architecture. But the target
1149 description is only filled in when the first process/thread in
1150 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1151 that now, otherwise, if GDB is fast enough, it could read the
1152 target description _before_ that initial stop. */
1153 if (non_stop)
1154 {
1155 struct lwp_info *lwp;
1156 int wstat, lwpid;
1157 ptid_t pid_ptid = pid_to_ptid (pid);
1158
1159 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1160 &wstat, __WALL);
1161 gdb_assert (lwpid > 0);
1162
1163 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1164
1165 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1166 {
1167 lwp->status_pending_p = 1;
1168 lwp->status_pending = wstat;
1169 }
1170
1171 initial_thread->last_resume_kind = resume_continue;
1172
1173 async_file_mark ();
1174
1175 gdb_assert (proc->tdesc != NULL);
1176 }
1177
1178 return 0;
1179 }
1180
1181 struct counter
1182 {
1183 int pid;
1184 int count;
1185 };
1186
1187 static int
1188 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1189 {
1190 struct counter *counter = (struct counter *) args;
1191
1192 if (ptid_get_pid (entry->id) == counter->pid)
1193 {
1194 if (++counter->count > 1)
1195 return 1;
1196 }
1197
1198 return 0;
1199 }
1200
1201 static int
1202 last_thread_of_process_p (int pid)
1203 {
1204 struct counter counter = { pid , 0 };
1205
1206 return (find_inferior (&all_threads,
1207 second_thread_of_pid_p, &counter) == NULL);
1208 }
1209
1210 /* Kill LWP. */
1211
1212 static void
1213 linux_kill_one_lwp (struct lwp_info *lwp)
1214 {
1215 struct thread_info *thr = get_lwp_thread (lwp);
1216 int pid = lwpid_of (thr);
1217
1218 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1219 there is no signal context, and ptrace(PTRACE_KILL) (or
1220 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1221 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1222 alternative is to kill with SIGKILL. We only need one SIGKILL
1223 per process, not one for each thread. But since we still support
1224 support debugging programs using raw clone without CLONE_THREAD,
1225 we send one for each thread. For years, we used PTRACE_KILL
1226 only, so we're being a bit paranoid about some old kernels where
1227 PTRACE_KILL might work better (dubious if there are any such, but
1228 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1229 second, and so we're fine everywhere. */
1230
1231 errno = 0;
1232 kill_lwp (pid, SIGKILL);
1233 if (debug_threads)
1234 {
1235 int save_errno = errno;
1236
1237 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1238 target_pid_to_str (ptid_of (thr)),
1239 save_errno ? strerror (save_errno) : "OK");
1240 }
1241
1242 errno = 0;
1243 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1244 if (debug_threads)
1245 {
1246 int save_errno = errno;
1247
1248 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1249 target_pid_to_str (ptid_of (thr)),
1250 save_errno ? strerror (save_errno) : "OK");
1251 }
1252 }
1253
1254 /* Kill LWP and wait for it to die. */
1255
1256 static void
1257 kill_wait_lwp (struct lwp_info *lwp)
1258 {
1259 struct thread_info *thr = get_lwp_thread (lwp);
1260 int pid = ptid_get_pid (ptid_of (thr));
1261 int lwpid = ptid_get_lwp (ptid_of (thr));
1262 int wstat;
1263 int res;
1264
1265 if (debug_threads)
1266 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1267
1268 do
1269 {
1270 linux_kill_one_lwp (lwp);
1271
1272 /* Make sure it died. Notes:
1273
1274 - The loop is most likely unnecessary.
1275
1276 - We don't use linux_wait_for_event as that could delete lwps
1277 while we're iterating over them. We're not interested in
1278 any pending status at this point, only in making sure all
1279 wait status on the kernel side are collected until the
1280 process is reaped.
1281
1282 - We don't use __WALL here as the __WALL emulation relies on
1283 SIGCHLD, and killing a stopped process doesn't generate
1284 one, nor an exit status.
1285 */
1286 res = my_waitpid (lwpid, &wstat, 0);
1287 if (res == -1 && errno == ECHILD)
1288 res = my_waitpid (lwpid, &wstat, __WCLONE);
1289 } while (res > 0 && WIFSTOPPED (wstat));
1290
1291 /* Even if it was stopped, the child may have already disappeared.
1292 E.g., if it was killed by SIGKILL. */
1293 if (res < 0 && errno != ECHILD)
1294 perror_with_name ("kill_wait_lwp");
1295 }
1296
1297 /* Callback for `find_inferior'. Kills an lwp of a given process,
1298 except the leader. */
1299
1300 static int
1301 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1302 {
1303 struct thread_info *thread = (struct thread_info *) entry;
1304 struct lwp_info *lwp = get_thread_lwp (thread);
1305 int pid = * (int *) args;
1306
1307 if (ptid_get_pid (entry->id) != pid)
1308 return 0;
1309
1310 /* We avoid killing the first thread here, because of a Linux kernel (at
1311 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1312 the children get a chance to be reaped, it will remain a zombie
1313 forever. */
1314
1315 if (lwpid_of (thread) == pid)
1316 {
1317 if (debug_threads)
1318 debug_printf ("lkop: is last of process %s\n",
1319 target_pid_to_str (entry->id));
1320 return 0;
1321 }
1322
1323 kill_wait_lwp (lwp);
1324 return 0;
1325 }
1326
1327 static int
1328 linux_kill (int pid)
1329 {
1330 struct process_info *process;
1331 struct lwp_info *lwp;
1332
1333 process = find_process_pid (pid);
1334 if (process == NULL)
1335 return -1;
1336
1337 /* If we're killing a running inferior, make sure it is stopped
1338 first, as PTRACE_KILL will not work otherwise. */
1339 stop_all_lwps (0, NULL);
1340
1341 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1342
1343 /* See the comment in linux_kill_one_lwp. We did not kill the first
1344 thread in the list, so do so now. */
1345 lwp = find_lwp_pid (pid_to_ptid (pid));
1346
1347 if (lwp == NULL)
1348 {
1349 if (debug_threads)
1350 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1351 pid);
1352 }
1353 else
1354 kill_wait_lwp (lwp);
1355
1356 the_target->mourn (process);
1357
1358 /* Since we presently can only stop all lwps of all processes, we
1359 need to unstop lwps of other processes. */
1360 unstop_all_lwps (0, NULL);
1361 return 0;
1362 }
1363
1364 /* Get pending signal of THREAD, for detaching purposes. This is the
1365 signal the thread last stopped for, which we need to deliver to the
1366 thread when detaching, otherwise, it'd be suppressed/lost. */
1367
1368 static int
1369 get_detach_signal (struct thread_info *thread)
1370 {
1371 enum gdb_signal signo = GDB_SIGNAL_0;
1372 int status;
1373 struct lwp_info *lp = get_thread_lwp (thread);
1374
1375 if (lp->status_pending_p)
1376 status = lp->status_pending;
1377 else
1378 {
1379 /* If the thread had been suspended by gdbserver, and it stopped
1380 cleanly, then it'll have stopped with SIGSTOP. But we don't
1381 want to deliver that SIGSTOP. */
1382 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1383 || thread->last_status.value.sig == GDB_SIGNAL_0)
1384 return 0;
1385
1386 /* Otherwise, we may need to deliver the signal we
1387 intercepted. */
1388 status = lp->last_status;
1389 }
1390
1391 if (!WIFSTOPPED (status))
1392 {
1393 if (debug_threads)
1394 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1395 target_pid_to_str (ptid_of (thread)));
1396 return 0;
1397 }
1398
1399 /* Extended wait statuses aren't real SIGTRAPs. */
1400 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1401 {
1402 if (debug_threads)
1403 debug_printf ("GPS: lwp %s had stopped with extended "
1404 "status: no pending signal\n",
1405 target_pid_to_str (ptid_of (thread)));
1406 return 0;
1407 }
1408
1409 signo = gdb_signal_from_host (WSTOPSIG (status));
1410
1411 if (program_signals_p && !program_signals[signo])
1412 {
1413 if (debug_threads)
1414 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1415 target_pid_to_str (ptid_of (thread)),
1416 gdb_signal_to_string (signo));
1417 return 0;
1418 }
1419 else if (!program_signals_p
1420 /* If we have no way to know which signals GDB does not
1421 want to have passed to the program, assume
1422 SIGTRAP/SIGINT, which is GDB's default. */
1423 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1424 {
1425 if (debug_threads)
1426 debug_printf ("GPS: lwp %s had signal %s, "
1427 "but we don't know if we should pass it. "
1428 "Default to not.\n",
1429 target_pid_to_str (ptid_of (thread)),
1430 gdb_signal_to_string (signo));
1431 return 0;
1432 }
1433 else
1434 {
1435 if (debug_threads)
1436 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1437 target_pid_to_str (ptid_of (thread)),
1438 gdb_signal_to_string (signo));
1439
1440 return WSTOPSIG (status);
1441 }
1442 }
1443
1444 static int
1445 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1446 {
1447 struct thread_info *thread = (struct thread_info *) entry;
1448 struct lwp_info *lwp = get_thread_lwp (thread);
1449 int pid = * (int *) args;
1450 int sig;
1451
1452 if (ptid_get_pid (entry->id) != pid)
1453 return 0;
1454
1455 /* If there is a pending SIGSTOP, get rid of it. */
1456 if (lwp->stop_expected)
1457 {
1458 if (debug_threads)
1459 debug_printf ("Sending SIGCONT to %s\n",
1460 target_pid_to_str (ptid_of (thread)));
1461
1462 kill_lwp (lwpid_of (thread), SIGCONT);
1463 lwp->stop_expected = 0;
1464 }
1465
1466 /* Flush any pending changes to the process's registers. */
1467 regcache_invalidate_thread (thread);
1468
1469 /* Pass on any pending signal for this thread. */
1470 sig = get_detach_signal (thread);
1471
1472 /* Finally, let it resume. */
1473 if (the_low_target.prepare_to_resume != NULL)
1474 the_low_target.prepare_to_resume (lwp);
1475 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1476 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1477 error (_("Can't detach %s: %s"),
1478 target_pid_to_str (ptid_of (thread)),
1479 strerror (errno));
1480
1481 delete_lwp (lwp);
1482 return 0;
1483 }
1484
1485 static int
1486 linux_detach (int pid)
1487 {
1488 struct process_info *process;
1489
1490 process = find_process_pid (pid);
1491 if (process == NULL)
1492 return -1;
1493
1494 /* As there's a step over already in progress, let it finish first,
1495 otherwise nesting a stabilize_threads operation on top gets real
1496 messy. */
1497 complete_ongoing_step_over ();
1498
1499 /* Stop all threads before detaching. First, ptrace requires that
1500 the thread is stopped to sucessfully detach. Second, thread_db
1501 may need to uninstall thread event breakpoints from memory, which
1502 only works with a stopped process anyway. */
1503 stop_all_lwps (0, NULL);
1504
1505 #ifdef USE_THREAD_DB
1506 thread_db_detach (process);
1507 #endif
1508
1509 /* Stabilize threads (move out of jump pads). */
1510 stabilize_threads ();
1511
1512 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1513
1514 the_target->mourn (process);
1515
1516 /* Since we presently can only stop all lwps of all processes, we
1517 need to unstop lwps of other processes. */
1518 unstop_all_lwps (0, NULL);
1519 return 0;
1520 }
1521
1522 /* Remove all LWPs that belong to process PROC from the lwp list. */
1523
1524 static int
1525 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1526 {
1527 struct thread_info *thread = (struct thread_info *) entry;
1528 struct lwp_info *lwp = get_thread_lwp (thread);
1529 struct process_info *process = (struct process_info *) proc;
1530
1531 if (pid_of (thread) == pid_of (process))
1532 delete_lwp (lwp);
1533
1534 return 0;
1535 }
1536
1537 static void
1538 linux_mourn (struct process_info *process)
1539 {
1540 struct process_info_private *priv;
1541
1542 #ifdef USE_THREAD_DB
1543 thread_db_mourn (process);
1544 #endif
1545
1546 find_inferior (&all_threads, delete_lwp_callback, process);
1547
1548 /* Freeing all private data. */
1549 priv = process->priv;
1550 free (priv->arch_private);
1551 free (priv);
1552 process->priv = NULL;
1553
1554 remove_process (process);
1555 }
1556
1557 static void
1558 linux_join (int pid)
1559 {
1560 int status, ret;
1561
1562 do {
1563 ret = my_waitpid (pid, &status, 0);
1564 if (WIFEXITED (status) || WIFSIGNALED (status))
1565 break;
1566 } while (ret != -1 || errno != ECHILD);
1567 }
1568
1569 /* Return nonzero if the given thread is still alive. */
1570 static int
1571 linux_thread_alive (ptid_t ptid)
1572 {
1573 struct lwp_info *lwp = find_lwp_pid (ptid);
1574
1575 /* We assume we always know if a thread exits. If a whole process
1576 exited but we still haven't been able to report it to GDB, we'll
1577 hold on to the last lwp of the dead process. */
1578 if (lwp != NULL)
1579 return !lwp_is_marked_dead (lwp);
1580 else
1581 return 0;
1582 }
1583
1584 /* Return 1 if this lwp still has an interesting status pending. If
1585 not (e.g., it had stopped for a breakpoint that is gone), return
1586 false. */
1587
1588 static int
1589 thread_still_has_status_pending_p (struct thread_info *thread)
1590 {
1591 struct lwp_info *lp = get_thread_lwp (thread);
1592
1593 if (!lp->status_pending_p)
1594 return 0;
1595
1596 if (thread->last_resume_kind != resume_stop
1597 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1598 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1599 {
1600 struct thread_info *saved_thread;
1601 CORE_ADDR pc;
1602 int discard = 0;
1603
1604 gdb_assert (lp->last_status != 0);
1605
1606 pc = get_pc (lp);
1607
1608 saved_thread = current_thread;
1609 current_thread = thread;
1610
1611 if (pc != lp->stop_pc)
1612 {
1613 if (debug_threads)
1614 debug_printf ("PC of %ld changed\n",
1615 lwpid_of (thread));
1616 discard = 1;
1617 }
1618
1619 #if !USE_SIGTRAP_SIGINFO
1620 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1621 && !(*the_low_target.breakpoint_at) (pc))
1622 {
1623 if (debug_threads)
1624 debug_printf ("previous SW breakpoint of %ld gone\n",
1625 lwpid_of (thread));
1626 discard = 1;
1627 }
1628 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1629 && !hardware_breakpoint_inserted_here (pc))
1630 {
1631 if (debug_threads)
1632 debug_printf ("previous HW breakpoint of %ld gone\n",
1633 lwpid_of (thread));
1634 discard = 1;
1635 }
1636 #endif
1637
1638 current_thread = saved_thread;
1639
1640 if (discard)
1641 {
1642 if (debug_threads)
1643 debug_printf ("discarding pending breakpoint status\n");
1644 lp->status_pending_p = 0;
1645 return 0;
1646 }
1647 }
1648
1649 return 1;
1650 }
1651
1652 /* Returns true if LWP is resumed from the client's perspective. */
1653
1654 static int
1655 lwp_resumed (struct lwp_info *lwp)
1656 {
1657 struct thread_info *thread = get_lwp_thread (lwp);
1658
1659 if (thread->last_resume_kind != resume_stop)
1660 return 1;
1661
1662 /* Did gdb send us a `vCont;t', but we haven't reported the
1663 corresponding stop to gdb yet? If so, the thread is still
1664 resumed/running from gdb's perspective. */
1665 if (thread->last_resume_kind == resume_stop
1666 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1667 return 1;
1668
1669 return 0;
1670 }
1671
1672 /* Return 1 if this lwp has an interesting status pending. */
1673 static int
1674 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1675 {
1676 struct thread_info *thread = (struct thread_info *) entry;
1677 struct lwp_info *lp = get_thread_lwp (thread);
1678 ptid_t ptid = * (ptid_t *) arg;
1679
1680 /* Check if we're only interested in events from a specific process
1681 or a specific LWP. */
1682 if (!ptid_match (ptid_of (thread), ptid))
1683 return 0;
1684
1685 if (!lwp_resumed (lp))
1686 return 0;
1687
1688 if (lp->status_pending_p
1689 && !thread_still_has_status_pending_p (thread))
1690 {
1691 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1692 return 0;
1693 }
1694
1695 return lp->status_pending_p;
1696 }
1697
1698 static int
1699 same_lwp (struct inferior_list_entry *entry, void *data)
1700 {
1701 ptid_t ptid = *(ptid_t *) data;
1702 int lwp;
1703
1704 if (ptid_get_lwp (ptid) != 0)
1705 lwp = ptid_get_lwp (ptid);
1706 else
1707 lwp = ptid_get_pid (ptid);
1708
1709 if (ptid_get_lwp (entry->id) == lwp)
1710 return 1;
1711
1712 return 0;
1713 }
1714
1715 struct lwp_info *
1716 find_lwp_pid (ptid_t ptid)
1717 {
1718 struct inferior_list_entry *thread
1719 = find_inferior (&all_threads, same_lwp, &ptid);
1720
1721 if (thread == NULL)
1722 return NULL;
1723
1724 return get_thread_lwp ((struct thread_info *) thread);
1725 }
1726
1727 /* Return the number of known LWPs in the tgid given by PID. */
1728
1729 static int
1730 num_lwps (int pid)
1731 {
1732 struct inferior_list_entry *inf, *tmp;
1733 int count = 0;
1734
1735 ALL_INFERIORS (&all_threads, inf, tmp)
1736 {
1737 if (ptid_get_pid (inf->id) == pid)
1738 count++;
1739 }
1740
1741 return count;
1742 }
1743
1744 /* The arguments passed to iterate_over_lwps. */
1745
1746 struct iterate_over_lwps_args
1747 {
1748 /* The FILTER argument passed to iterate_over_lwps. */
1749 ptid_t filter;
1750
1751 /* The CALLBACK argument passed to iterate_over_lwps. */
1752 iterate_over_lwps_ftype *callback;
1753
1754 /* The DATA argument passed to iterate_over_lwps. */
1755 void *data;
1756 };
1757
1758 /* Callback for find_inferior used by iterate_over_lwps to filter
1759 calls to the callback supplied to that function. Returning a
1760 nonzero value causes find_inferiors to stop iterating and return
1761 the current inferior_list_entry. Returning zero indicates that
1762 find_inferiors should continue iterating. */
1763
1764 static int
1765 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1766 {
1767 struct iterate_over_lwps_args *args
1768 = (struct iterate_over_lwps_args *) args_p;
1769
1770 if (ptid_match (entry->id, args->filter))
1771 {
1772 struct thread_info *thr = (struct thread_info *) entry;
1773 struct lwp_info *lwp = get_thread_lwp (thr);
1774
1775 return (*args->callback) (lwp, args->data);
1776 }
1777
1778 return 0;
1779 }
1780
1781 /* See nat/linux-nat.h. */
1782
1783 struct lwp_info *
1784 iterate_over_lwps (ptid_t filter,
1785 iterate_over_lwps_ftype callback,
1786 void *data)
1787 {
1788 struct iterate_over_lwps_args args = {filter, callback, data};
1789 struct inferior_list_entry *entry;
1790
1791 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1792 if (entry == NULL)
1793 return NULL;
1794
1795 return get_thread_lwp ((struct thread_info *) entry);
1796 }
1797
1798 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1799 their exits until all other threads in the group have exited. */
1800
1801 static void
1802 check_zombie_leaders (void)
1803 {
1804 struct process_info *proc, *tmp;
1805
1806 ALL_PROCESSES (proc, tmp)
1807 {
1808 pid_t leader_pid = pid_of (proc);
1809 struct lwp_info *leader_lp;
1810
1811 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1812
1813 if (debug_threads)
1814 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1815 "num_lwps=%d, zombie=%d\n",
1816 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1817 linux_proc_pid_is_zombie (leader_pid));
1818
1819 if (leader_lp != NULL && !leader_lp->stopped
1820 /* Check if there are other threads in the group, as we may
1821 have raced with the inferior simply exiting. */
1822 && !last_thread_of_process_p (leader_pid)
1823 && linux_proc_pid_is_zombie (leader_pid))
1824 {
1825 /* A leader zombie can mean one of two things:
1826
1827 - It exited, and there's an exit status pending
1828 available, or only the leader exited (not the whole
1829 program). In the latter case, we can't waitpid the
1830 leader's exit status until all other threads are gone.
1831
1832 - There are 3 or more threads in the group, and a thread
1833 other than the leader exec'd. On an exec, the Linux
1834 kernel destroys all other threads (except the execing
1835 one) in the thread group, and resets the execing thread's
1836 tid to the tgid. No exit notification is sent for the
1837 execing thread -- from the ptracer's perspective, it
1838 appears as though the execing thread just vanishes.
1839 Until we reap all other threads except the leader and the
1840 execing thread, the leader will be zombie, and the
1841 execing thread will be in `D (disc sleep)'. As soon as
1842 all other threads are reaped, the execing thread changes
1843 it's tid to the tgid, and the previous (zombie) leader
1844 vanishes, giving place to the "new" leader. We could try
1845 distinguishing the exit and exec cases, by waiting once
1846 more, and seeing if something comes out, but it doesn't
1847 sound useful. The previous leader _does_ go away, and
1848 we'll re-add the new one once we see the exec event
1849 (which is just the same as what would happen if the
1850 previous leader did exit voluntarily before some other
1851 thread execs). */
1852
1853 if (debug_threads)
1854 fprintf (stderr,
1855 "CZL: Thread group leader %d zombie "
1856 "(it exited, or another thread execd).\n",
1857 leader_pid);
1858
1859 delete_lwp (leader_lp);
1860 }
1861 }
1862 }
1863
1864 /* Callback for `find_inferior'. Returns the first LWP that is not
1865 stopped. ARG is a PTID filter. */
1866
1867 static int
1868 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1869 {
1870 struct thread_info *thr = (struct thread_info *) entry;
1871 struct lwp_info *lwp;
1872 ptid_t filter = *(ptid_t *) arg;
1873
1874 if (!ptid_match (ptid_of (thr), filter))
1875 return 0;
1876
1877 lwp = get_thread_lwp (thr);
1878 if (!lwp->stopped)
1879 return 1;
1880
1881 return 0;
1882 }
1883
1884 /* Increment LWP's suspend count. */
1885
1886 static void
1887 lwp_suspended_inc (struct lwp_info *lwp)
1888 {
1889 lwp->suspended++;
1890
1891 if (debug_threads && lwp->suspended > 4)
1892 {
1893 struct thread_info *thread = get_lwp_thread (lwp);
1894
1895 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1896 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1897 }
1898 }
1899
1900 /* Decrement LWP's suspend count. */
1901
1902 static void
1903 lwp_suspended_decr (struct lwp_info *lwp)
1904 {
1905 lwp->suspended--;
1906
1907 if (lwp->suspended < 0)
1908 {
1909 struct thread_info *thread = get_lwp_thread (lwp);
1910
1911 internal_error (__FILE__, __LINE__,
1912 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1913 lwp->suspended);
1914 }
1915 }
1916
1917 /* This function should only be called if the LWP got a SIGTRAP.
1918
1919 Handle any tracepoint steps or hits. Return true if a tracepoint
1920 event was handled, 0 otherwise. */
1921
1922 static int
1923 handle_tracepoints (struct lwp_info *lwp)
1924 {
1925 struct thread_info *tinfo = get_lwp_thread (lwp);
1926 int tpoint_related_event = 0;
1927
1928 gdb_assert (lwp->suspended == 0);
1929
1930 /* If this tracepoint hit causes a tracing stop, we'll immediately
1931 uninsert tracepoints. To do this, we temporarily pause all
1932 threads, unpatch away, and then unpause threads. We need to make
1933 sure the unpausing doesn't resume LWP too. */
1934 lwp_suspended_inc (lwp);
1935
1936 /* And we need to be sure that any all-threads-stopping doesn't try
1937 to move threads out of the jump pads, as it could deadlock the
1938 inferior (LWP could be in the jump pad, maybe even holding the
1939 lock.) */
1940
1941 /* Do any necessary step collect actions. */
1942 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1943
1944 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1945
1946 /* See if we just hit a tracepoint and do its main collect
1947 actions. */
1948 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1949
1950 lwp_suspended_decr (lwp);
1951
1952 gdb_assert (lwp->suspended == 0);
1953 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1954
1955 if (tpoint_related_event)
1956 {
1957 if (debug_threads)
1958 debug_printf ("got a tracepoint event\n");
1959 return 1;
1960 }
1961
1962 return 0;
1963 }
1964
1965 /* Convenience wrapper. Returns true if LWP is presently collecting a
1966 fast tracepoint. */
1967
1968 static int
1969 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1970 struct fast_tpoint_collect_status *status)
1971 {
1972 CORE_ADDR thread_area;
1973 struct thread_info *thread = get_lwp_thread (lwp);
1974
1975 if (the_low_target.get_thread_area == NULL)
1976 return 0;
1977
1978 /* Get the thread area address. This is used to recognize which
1979 thread is which when tracing with the in-process agent library.
1980 We don't read anything from the address, and treat it as opaque;
1981 it's the address itself that we assume is unique per-thread. */
1982 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1983 return 0;
1984
1985 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1986 }
1987
1988 /* The reason we resume in the caller, is because we want to be able
1989 to pass lwp->status_pending as WSTAT, and we need to clear
1990 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1991 refuses to resume. */
1992
1993 static int
1994 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1995 {
1996 struct thread_info *saved_thread;
1997
1998 saved_thread = current_thread;
1999 current_thread = get_lwp_thread (lwp);
2000
2001 if ((wstat == NULL
2002 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2003 && supports_fast_tracepoints ()
2004 && agent_loaded_p ())
2005 {
2006 struct fast_tpoint_collect_status status;
2007 int r;
2008
2009 if (debug_threads)
2010 debug_printf ("Checking whether LWP %ld needs to move out of the "
2011 "jump pad.\n",
2012 lwpid_of (current_thread));
2013
2014 r = linux_fast_tracepoint_collecting (lwp, &status);
2015
2016 if (wstat == NULL
2017 || (WSTOPSIG (*wstat) != SIGILL
2018 && WSTOPSIG (*wstat) != SIGFPE
2019 && WSTOPSIG (*wstat) != SIGSEGV
2020 && WSTOPSIG (*wstat) != SIGBUS))
2021 {
2022 lwp->collecting_fast_tracepoint = r;
2023
2024 if (r != 0)
2025 {
2026 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2027 {
2028 /* Haven't executed the original instruction yet.
2029 Set breakpoint there, and wait till it's hit,
2030 then single-step until exiting the jump pad. */
2031 lwp->exit_jump_pad_bkpt
2032 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2033 }
2034
2035 if (debug_threads)
2036 debug_printf ("Checking whether LWP %ld needs to move out of "
2037 "the jump pad...it does\n",
2038 lwpid_of (current_thread));
2039 current_thread = saved_thread;
2040
2041 return 1;
2042 }
2043 }
2044 else
2045 {
2046 /* If we get a synchronous signal while collecting, *and*
2047 while executing the (relocated) original instruction,
2048 reset the PC to point at the tpoint address, before
2049 reporting to GDB. Otherwise, it's an IPA lib bug: just
2050 report the signal to GDB, and pray for the best. */
2051
2052 lwp->collecting_fast_tracepoint = 0;
2053
2054 if (r != 0
2055 && (status.adjusted_insn_addr <= lwp->stop_pc
2056 && lwp->stop_pc < status.adjusted_insn_addr_end))
2057 {
2058 siginfo_t info;
2059 struct regcache *regcache;
2060
2061 /* The si_addr on a few signals references the address
2062 of the faulting instruction. Adjust that as
2063 well. */
2064 if ((WSTOPSIG (*wstat) == SIGILL
2065 || WSTOPSIG (*wstat) == SIGFPE
2066 || WSTOPSIG (*wstat) == SIGBUS
2067 || WSTOPSIG (*wstat) == SIGSEGV)
2068 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2069 (PTRACE_TYPE_ARG3) 0, &info) == 0
2070 /* Final check just to make sure we don't clobber
2071 the siginfo of non-kernel-sent signals. */
2072 && (uintptr_t) info.si_addr == lwp->stop_pc)
2073 {
2074 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2075 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2076 (PTRACE_TYPE_ARG3) 0, &info);
2077 }
2078
2079 regcache = get_thread_regcache (current_thread, 1);
2080 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2081 lwp->stop_pc = status.tpoint_addr;
2082
2083 /* Cancel any fast tracepoint lock this thread was
2084 holding. */
2085 force_unlock_trace_buffer ();
2086 }
2087
2088 if (lwp->exit_jump_pad_bkpt != NULL)
2089 {
2090 if (debug_threads)
2091 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2092 "stopping all threads momentarily.\n");
2093
2094 stop_all_lwps (1, lwp);
2095
2096 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2097 lwp->exit_jump_pad_bkpt = NULL;
2098
2099 unstop_all_lwps (1, lwp);
2100
2101 gdb_assert (lwp->suspended >= 0);
2102 }
2103 }
2104 }
2105
2106 if (debug_threads)
2107 debug_printf ("Checking whether LWP %ld needs to move out of the "
2108 "jump pad...no\n",
2109 lwpid_of (current_thread));
2110
2111 current_thread = saved_thread;
2112 return 0;
2113 }
2114
2115 /* Enqueue one signal in the "signals to report later when out of the
2116 jump pad" list. */
2117
2118 static void
2119 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2120 {
2121 struct pending_signals *p_sig;
2122 struct thread_info *thread = get_lwp_thread (lwp);
2123
2124 if (debug_threads)
2125 debug_printf ("Deferring signal %d for LWP %ld.\n",
2126 WSTOPSIG (*wstat), lwpid_of (thread));
2127
2128 if (debug_threads)
2129 {
2130 struct pending_signals *sig;
2131
2132 for (sig = lwp->pending_signals_to_report;
2133 sig != NULL;
2134 sig = sig->prev)
2135 debug_printf (" Already queued %d\n",
2136 sig->signal);
2137
2138 debug_printf (" (no more currently queued signals)\n");
2139 }
2140
2141 /* Don't enqueue non-RT signals if they are already in the deferred
2142 queue. (SIGSTOP being the easiest signal to see ending up here
2143 twice) */
2144 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2145 {
2146 struct pending_signals *sig;
2147
2148 for (sig = lwp->pending_signals_to_report;
2149 sig != NULL;
2150 sig = sig->prev)
2151 {
2152 if (sig->signal == WSTOPSIG (*wstat))
2153 {
2154 if (debug_threads)
2155 debug_printf ("Not requeuing already queued non-RT signal %d"
2156 " for LWP %ld\n",
2157 sig->signal,
2158 lwpid_of (thread));
2159 return;
2160 }
2161 }
2162 }
2163
2164 p_sig = XCNEW (struct pending_signals);
2165 p_sig->prev = lwp->pending_signals_to_report;
2166 p_sig->signal = WSTOPSIG (*wstat);
2167
2168 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2169 &p_sig->info);
2170
2171 lwp->pending_signals_to_report = p_sig;
2172 }
2173
2174 /* Dequeue one signal from the "signals to report later when out of
2175 the jump pad" list. */
2176
2177 static int
2178 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2179 {
2180 struct thread_info *thread = get_lwp_thread (lwp);
2181
2182 if (lwp->pending_signals_to_report != NULL)
2183 {
2184 struct pending_signals **p_sig;
2185
2186 p_sig = &lwp->pending_signals_to_report;
2187 while ((*p_sig)->prev != NULL)
2188 p_sig = &(*p_sig)->prev;
2189
2190 *wstat = W_STOPCODE ((*p_sig)->signal);
2191 if ((*p_sig)->info.si_signo != 0)
2192 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2193 &(*p_sig)->info);
2194 free (*p_sig);
2195 *p_sig = NULL;
2196
2197 if (debug_threads)
2198 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2199 WSTOPSIG (*wstat), lwpid_of (thread));
2200
2201 if (debug_threads)
2202 {
2203 struct pending_signals *sig;
2204
2205 for (sig = lwp->pending_signals_to_report;
2206 sig != NULL;
2207 sig = sig->prev)
2208 debug_printf (" Still queued %d\n",
2209 sig->signal);
2210
2211 debug_printf (" (no more queued signals)\n");
2212 }
2213
2214 return 1;
2215 }
2216
2217 return 0;
2218 }
2219
2220 /* Fetch the possibly triggered data watchpoint info and store it in
2221 CHILD.
2222
2223 On some archs, like x86, that use debug registers to set
2224 watchpoints, it's possible that the way to know which watched
2225 address trapped, is to check the register that is used to select
2226 which address to watch. Problem is, between setting the watchpoint
2227 and reading back which data address trapped, the user may change
2228 the set of watchpoints, and, as a consequence, GDB changes the
2229 debug registers in the inferior. To avoid reading back a stale
2230 stopped-data-address when that happens, we cache in LP the fact
2231 that a watchpoint trapped, and the corresponding data address, as
2232 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2233 registers meanwhile, we have the cached data we can rely on. */
2234
2235 static int
2236 check_stopped_by_watchpoint (struct lwp_info *child)
2237 {
2238 if (the_low_target.stopped_by_watchpoint != NULL)
2239 {
2240 struct thread_info *saved_thread;
2241
2242 saved_thread = current_thread;
2243 current_thread = get_lwp_thread (child);
2244
2245 if (the_low_target.stopped_by_watchpoint ())
2246 {
2247 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2248
2249 if (the_low_target.stopped_data_address != NULL)
2250 child->stopped_data_address
2251 = the_low_target.stopped_data_address ();
2252 else
2253 child->stopped_data_address = 0;
2254 }
2255
2256 current_thread = saved_thread;
2257 }
2258
2259 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2260 }
2261
2262 /* Return the ptrace options that we want to try to enable. */
2263
2264 static int
2265 linux_low_ptrace_options (int attached)
2266 {
2267 int options = 0;
2268
2269 if (!attached)
2270 options |= PTRACE_O_EXITKILL;
2271
2272 if (report_fork_events)
2273 options |= PTRACE_O_TRACEFORK;
2274
2275 if (report_vfork_events)
2276 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2277
2278 if (report_exec_events)
2279 options |= PTRACE_O_TRACEEXEC;
2280
2281 options |= PTRACE_O_TRACESYSGOOD;
2282
2283 return options;
2284 }
2285
2286 /* Do low-level handling of the event, and check if we should go on
2287 and pass it to caller code. Return the affected lwp if we are, or
2288 NULL otherwise. */
2289
2290 static struct lwp_info *
2291 linux_low_filter_event (int lwpid, int wstat)
2292 {
2293 struct lwp_info *child;
2294 struct thread_info *thread;
2295 int have_stop_pc = 0;
2296
2297 child = find_lwp_pid (pid_to_ptid (lwpid));
2298
2299 /* Check for stop events reported by a process we didn't already
2300 know about - anything not already in our LWP list.
2301
2302 If we're expecting to receive stopped processes after
2303 fork, vfork, and clone events, then we'll just add the
2304 new one to our list and go back to waiting for the event
2305 to be reported - the stopped process might be returned
2306 from waitpid before or after the event is.
2307
2308 But note the case of a non-leader thread exec'ing after the
2309 leader having exited, and gone from our lists (because
2310 check_zombie_leaders deleted it). The non-leader thread
2311 changes its tid to the tgid. */
2312
2313 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2314 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2315 {
2316 ptid_t child_ptid;
2317
2318 /* A multi-thread exec after we had seen the leader exiting. */
2319 if (debug_threads)
2320 {
2321 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2322 "after exec.\n", lwpid);
2323 }
2324
2325 child_ptid = ptid_build (lwpid, lwpid, 0);
2326 child = add_lwp (child_ptid);
2327 child->stopped = 1;
2328 current_thread = child->thread;
2329 }
2330
2331 /* If we didn't find a process, one of two things presumably happened:
2332 - A process we started and then detached from has exited. Ignore it.
2333 - A process we are controlling has forked and the new child's stop
2334 was reported to us by the kernel. Save its PID. */
2335 if (child == NULL && WIFSTOPPED (wstat))
2336 {
2337 add_to_pid_list (&stopped_pids, lwpid, wstat);
2338 return NULL;
2339 }
2340 else if (child == NULL)
2341 return NULL;
2342
2343 thread = get_lwp_thread (child);
2344
2345 child->stopped = 1;
2346
2347 child->last_status = wstat;
2348
2349 /* Check if the thread has exited. */
2350 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2351 {
2352 if (debug_threads)
2353 debug_printf ("LLFE: %d exited.\n", lwpid);
2354 /* If there is at least one more LWP, then the exit signal was
2355 not the end of the debugged application and should be
2356 ignored, unless GDB wants to hear about thread exits. */
2357 if (report_thread_events
2358 || last_thread_of_process_p (pid_of (thread)))
2359 {
2360 /* Since events are serialized to GDB core, and we can't
2361 report this one right now. Leave the status pending for
2362 the next time we're able to report it. */
2363 mark_lwp_dead (child, wstat);
2364 return child;
2365 }
2366 else
2367 {
2368 delete_lwp (child);
2369 return NULL;
2370 }
2371 }
2372
2373 gdb_assert (WIFSTOPPED (wstat));
2374
2375 if (WIFSTOPPED (wstat))
2376 {
2377 struct process_info *proc;
2378
2379 /* Architecture-specific setup after inferior is running. */
2380 proc = find_process_pid (pid_of (thread));
2381 if (proc->tdesc == NULL)
2382 {
2383 if (proc->attached)
2384 {
2385 /* This needs to happen after we have attached to the
2386 inferior and it is stopped for the first time, but
2387 before we access any inferior registers. */
2388 linux_arch_setup_thread (thread);
2389 }
2390 else
2391 {
2392 /* The process is started, but GDBserver will do
2393 architecture-specific setup after the program stops at
2394 the first instruction. */
2395 child->status_pending_p = 1;
2396 child->status_pending = wstat;
2397 return child;
2398 }
2399 }
2400 }
2401
2402 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2403 {
2404 struct process_info *proc = find_process_pid (pid_of (thread));
2405 int options = linux_low_ptrace_options (proc->attached);
2406
2407 linux_enable_event_reporting (lwpid, options);
2408 child->must_set_ptrace_flags = 0;
2409 }
2410
2411 /* Always update syscall_state, even if it will be filtered later. */
2412 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2413 {
2414 child->syscall_state
2415 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2416 ? TARGET_WAITKIND_SYSCALL_RETURN
2417 : TARGET_WAITKIND_SYSCALL_ENTRY);
2418 }
2419 else
2420 {
2421 /* Almost all other ptrace-stops are known to be outside of system
2422 calls, with further exceptions in handle_extended_wait. */
2423 child->syscall_state = TARGET_WAITKIND_IGNORE;
2424 }
2425
2426 /* Be careful to not overwrite stop_pc until save_stop_reason is
2427 called. */
2428 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2429 && linux_is_extended_waitstatus (wstat))
2430 {
2431 child->stop_pc = get_pc (child);
2432 if (handle_extended_wait (&child, wstat))
2433 {
2434 /* The event has been handled, so just return without
2435 reporting it. */
2436 return NULL;
2437 }
2438 }
2439
2440 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2441 {
2442 if (save_stop_reason (child))
2443 have_stop_pc = 1;
2444 }
2445
2446 if (!have_stop_pc)
2447 child->stop_pc = get_pc (child);
2448
2449 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2450 && child->stop_expected)
2451 {
2452 if (debug_threads)
2453 debug_printf ("Expected stop.\n");
2454 child->stop_expected = 0;
2455
2456 if (thread->last_resume_kind == resume_stop)
2457 {
2458 /* We want to report the stop to the core. Treat the
2459 SIGSTOP as a normal event. */
2460 if (debug_threads)
2461 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2462 target_pid_to_str (ptid_of (thread)));
2463 }
2464 else if (stopping_threads != NOT_STOPPING_THREADS)
2465 {
2466 /* Stopping threads. We don't want this SIGSTOP to end up
2467 pending. */
2468 if (debug_threads)
2469 debug_printf ("LLW: SIGSTOP caught for %s "
2470 "while stopping threads.\n",
2471 target_pid_to_str (ptid_of (thread)));
2472 return NULL;
2473 }
2474 else
2475 {
2476 /* This is a delayed SIGSTOP. Filter out the event. */
2477 if (debug_threads)
2478 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2479 child->stepping ? "step" : "continue",
2480 target_pid_to_str (ptid_of (thread)));
2481
2482 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2483 return NULL;
2484 }
2485 }
2486
2487 child->status_pending_p = 1;
2488 child->status_pending = wstat;
2489 return child;
2490 }
2491
2492 /* Resume LWPs that are currently stopped without any pending status
2493 to report, but are resumed from the core's perspective. */
2494
2495 static void
2496 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2497 {
2498 struct thread_info *thread = (struct thread_info *) entry;
2499 struct lwp_info *lp = get_thread_lwp (thread);
2500
2501 if (lp->stopped
2502 && !lp->suspended
2503 && !lp->status_pending_p
2504 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2505 {
2506 int step = thread->last_resume_kind == resume_step;
2507
2508 if (debug_threads)
2509 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2510 target_pid_to_str (ptid_of (thread)),
2511 paddress (lp->stop_pc),
2512 step);
2513
2514 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2515 }
2516 }
2517
2518 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2519 match FILTER_PTID (leaving others pending). The PTIDs can be:
2520 minus_one_ptid, to specify any child; a pid PTID, specifying all
2521 lwps of a thread group; or a PTID representing a single lwp. Store
2522 the stop status through the status pointer WSTAT. OPTIONS is
2523 passed to the waitpid call. Return 0 if no event was found and
2524 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2525 was found. Return the PID of the stopped child otherwise. */
2526
2527 static int
2528 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2529 int *wstatp, int options)
2530 {
2531 struct thread_info *event_thread;
2532 struct lwp_info *event_child, *requested_child;
2533 sigset_t block_mask, prev_mask;
2534
2535 retry:
2536 /* N.B. event_thread points to the thread_info struct that contains
2537 event_child. Keep them in sync. */
2538 event_thread = NULL;
2539 event_child = NULL;
2540 requested_child = NULL;
2541
2542 /* Check for a lwp with a pending status. */
2543
2544 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2545 {
2546 event_thread = (struct thread_info *)
2547 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2548 if (event_thread != NULL)
2549 event_child = get_thread_lwp (event_thread);
2550 if (debug_threads && event_thread)
2551 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2552 }
2553 else if (!ptid_equal (filter_ptid, null_ptid))
2554 {
2555 requested_child = find_lwp_pid (filter_ptid);
2556
2557 if (stopping_threads == NOT_STOPPING_THREADS
2558 && requested_child->status_pending_p
2559 && requested_child->collecting_fast_tracepoint)
2560 {
2561 enqueue_one_deferred_signal (requested_child,
2562 &requested_child->status_pending);
2563 requested_child->status_pending_p = 0;
2564 requested_child->status_pending = 0;
2565 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2566 }
2567
2568 if (requested_child->suspended
2569 && requested_child->status_pending_p)
2570 {
2571 internal_error (__FILE__, __LINE__,
2572 "requesting an event out of a"
2573 " suspended child?");
2574 }
2575
2576 if (requested_child->status_pending_p)
2577 {
2578 event_child = requested_child;
2579 event_thread = get_lwp_thread (event_child);
2580 }
2581 }
2582
2583 if (event_child != NULL)
2584 {
2585 if (debug_threads)
2586 debug_printf ("Got an event from pending child %ld (%04x)\n",
2587 lwpid_of (event_thread), event_child->status_pending);
2588 *wstatp = event_child->status_pending;
2589 event_child->status_pending_p = 0;
2590 event_child->status_pending = 0;
2591 current_thread = event_thread;
2592 return lwpid_of (event_thread);
2593 }
2594
2595 /* But if we don't find a pending event, we'll have to wait.
2596
2597 We only enter this loop if no process has a pending wait status.
2598 Thus any action taken in response to a wait status inside this
2599 loop is responding as soon as we detect the status, not after any
2600 pending events. */
2601
2602 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2603 all signals while here. */
2604 sigfillset (&block_mask);
2605 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2606
2607 /* Always pull all events out of the kernel. We'll randomly select
2608 an event LWP out of all that have events, to prevent
2609 starvation. */
2610 while (event_child == NULL)
2611 {
2612 pid_t ret = 0;
2613
2614 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2615 quirks:
2616
2617 - If the thread group leader exits while other threads in the
2618 thread group still exist, waitpid(TGID, ...) hangs. That
2619 waitpid won't return an exit status until the other threads
2620 in the group are reaped.
2621
2622 - When a non-leader thread execs, that thread just vanishes
2623 without reporting an exit (so we'd hang if we waited for it
2624 explicitly in that case). The exec event is reported to
2625 the TGID pid. */
2626 errno = 0;
2627 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2628
2629 if (debug_threads)
2630 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2631 ret, errno ? strerror (errno) : "ERRNO-OK");
2632
2633 if (ret > 0)
2634 {
2635 if (debug_threads)
2636 {
2637 debug_printf ("LLW: waitpid %ld received %s\n",
2638 (long) ret, status_to_str (*wstatp));
2639 }
2640
2641 /* Filter all events. IOW, leave all events pending. We'll
2642 randomly select an event LWP out of all that have events
2643 below. */
2644 linux_low_filter_event (ret, *wstatp);
2645 /* Retry until nothing comes out of waitpid. A single
2646 SIGCHLD can indicate more than one child stopped. */
2647 continue;
2648 }
2649
2650 /* Now that we've pulled all events out of the kernel, resume
2651 LWPs that don't have an interesting event to report. */
2652 if (stopping_threads == NOT_STOPPING_THREADS)
2653 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2654
2655 /* ... and find an LWP with a status to report to the core, if
2656 any. */
2657 event_thread = (struct thread_info *)
2658 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2659 if (event_thread != NULL)
2660 {
2661 event_child = get_thread_lwp (event_thread);
2662 *wstatp = event_child->status_pending;
2663 event_child->status_pending_p = 0;
2664 event_child->status_pending = 0;
2665 break;
2666 }
2667
2668 /* Check for zombie thread group leaders. Those can't be reaped
2669 until all other threads in the thread group are. */
2670 check_zombie_leaders ();
2671
2672 /* If there are no resumed children left in the set of LWPs we
2673 want to wait for, bail. We can't just block in
2674 waitpid/sigsuspend, because lwps might have been left stopped
2675 in trace-stop state, and we'd be stuck forever waiting for
2676 their status to change (which would only happen if we resumed
2677 them). Even if WNOHANG is set, this return code is preferred
2678 over 0 (below), as it is more detailed. */
2679 if ((find_inferior (&all_threads,
2680 not_stopped_callback,
2681 &wait_ptid) == NULL))
2682 {
2683 if (debug_threads)
2684 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2685 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2686 return -1;
2687 }
2688
2689 /* No interesting event to report to the caller. */
2690 if ((options & WNOHANG))
2691 {
2692 if (debug_threads)
2693 debug_printf ("WNOHANG set, no event found\n");
2694
2695 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2696 return 0;
2697 }
2698
2699 /* Block until we get an event reported with SIGCHLD. */
2700 if (debug_threads)
2701 debug_printf ("sigsuspend'ing\n");
2702
2703 sigsuspend (&prev_mask);
2704 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2705 goto retry;
2706 }
2707
2708 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2709
2710 current_thread = event_thread;
2711
2712 return lwpid_of (event_thread);
2713 }
2714
2715 /* Wait for an event from child(ren) PTID. PTIDs can be:
2716 minus_one_ptid, to specify any child; a pid PTID, specifying all
2717 lwps of a thread group; or a PTID representing a single lwp. Store
2718 the stop status through the status pointer WSTAT. OPTIONS is
2719 passed to the waitpid call. Return 0 if no event was found and
2720 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2721 was found. Return the PID of the stopped child otherwise. */
2722
2723 static int
2724 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2725 {
2726 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2727 }
2728
2729 /* Count the LWP's that have had events. */
2730
2731 static int
2732 count_events_callback (struct inferior_list_entry *entry, void *data)
2733 {
2734 struct thread_info *thread = (struct thread_info *) entry;
2735 struct lwp_info *lp = get_thread_lwp (thread);
2736 int *count = (int *) data;
2737
2738 gdb_assert (count != NULL);
2739
2740 /* Count only resumed LWPs that have an event pending. */
2741 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2742 && lp->status_pending_p)
2743 (*count)++;
2744
2745 return 0;
2746 }
2747
2748 /* Select the LWP (if any) that is currently being single-stepped. */
2749
2750 static int
2751 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2752 {
2753 struct thread_info *thread = (struct thread_info *) entry;
2754 struct lwp_info *lp = get_thread_lwp (thread);
2755
2756 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2757 && thread->last_resume_kind == resume_step
2758 && lp->status_pending_p)
2759 return 1;
2760 else
2761 return 0;
2762 }
2763
2764 /* Select the Nth LWP that has had an event. */
2765
2766 static int
2767 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2768 {
2769 struct thread_info *thread = (struct thread_info *) entry;
2770 struct lwp_info *lp = get_thread_lwp (thread);
2771 int *selector = (int *) data;
2772
2773 gdb_assert (selector != NULL);
2774
2775 /* Select only resumed LWPs that have an event pending. */
2776 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2777 && lp->status_pending_p)
2778 if ((*selector)-- == 0)
2779 return 1;
2780
2781 return 0;
2782 }
2783
2784 /* Select one LWP out of those that have events pending. */
2785
2786 static void
2787 select_event_lwp (struct lwp_info **orig_lp)
2788 {
2789 int num_events = 0;
2790 int random_selector;
2791 struct thread_info *event_thread = NULL;
2792
2793 /* In all-stop, give preference to the LWP that is being
2794 single-stepped. There will be at most one, and it's the LWP that
2795 the core is most interested in. If we didn't do this, then we'd
2796 have to handle pending step SIGTRAPs somehow in case the core
2797 later continues the previously-stepped thread, otherwise we'd
2798 report the pending SIGTRAP, and the core, not having stepped the
2799 thread, wouldn't understand what the trap was for, and therefore
2800 would report it to the user as a random signal. */
2801 if (!non_stop)
2802 {
2803 event_thread
2804 = (struct thread_info *) find_inferior (&all_threads,
2805 select_singlestep_lwp_callback,
2806 NULL);
2807 if (event_thread != NULL)
2808 {
2809 if (debug_threads)
2810 debug_printf ("SEL: Select single-step %s\n",
2811 target_pid_to_str (ptid_of (event_thread)));
2812 }
2813 }
2814 if (event_thread == NULL)
2815 {
2816 /* No single-stepping LWP. Select one at random, out of those
2817 which have had events. */
2818
2819 /* First see how many events we have. */
2820 find_inferior (&all_threads, count_events_callback, &num_events);
2821 gdb_assert (num_events > 0);
2822
2823 /* Now randomly pick a LWP out of those that have had
2824 events. */
2825 random_selector = (int)
2826 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2827
2828 if (debug_threads && num_events > 1)
2829 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2830 num_events, random_selector);
2831
2832 event_thread
2833 = (struct thread_info *) find_inferior (&all_threads,
2834 select_event_lwp_callback,
2835 &random_selector);
2836 }
2837
2838 if (event_thread != NULL)
2839 {
2840 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2841
2842 /* Switch the event LWP. */
2843 *orig_lp = event_lp;
2844 }
2845 }
2846
2847 /* Decrement the suspend count of an LWP. */
2848
2849 static int
2850 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2851 {
2852 struct thread_info *thread = (struct thread_info *) entry;
2853 struct lwp_info *lwp = get_thread_lwp (thread);
2854
2855 /* Ignore EXCEPT. */
2856 if (lwp == except)
2857 return 0;
2858
2859 lwp_suspended_decr (lwp);
2860 return 0;
2861 }
2862
2863 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2864 NULL. */
2865
2866 static void
2867 unsuspend_all_lwps (struct lwp_info *except)
2868 {
2869 find_inferior (&all_threads, unsuspend_one_lwp, except);
2870 }
2871
2872 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2873 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2874 void *data);
2875 static int lwp_running (struct inferior_list_entry *entry, void *data);
2876 static ptid_t linux_wait_1 (ptid_t ptid,
2877 struct target_waitstatus *ourstatus,
2878 int target_options);
2879
2880 /* Stabilize threads (move out of jump pads).
2881
2882 If a thread is midway collecting a fast tracepoint, we need to
2883 finish the collection and move it out of the jump pad before
2884 reporting the signal.
2885
2886 This avoids recursion while collecting (when a signal arrives
2887 midway, and the signal handler itself collects), which would trash
2888 the trace buffer. In case the user set a breakpoint in a signal
2889 handler, this avoids the backtrace showing the jump pad, etc..
2890 Most importantly, there are certain things we can't do safely if
2891 threads are stopped in a jump pad (or in its callee's). For
2892 example:
2893
2894 - starting a new trace run. A thread still collecting the
2895 previous run, could trash the trace buffer when resumed. The trace
2896 buffer control structures would have been reset but the thread had
2897 no way to tell. The thread could even midway memcpy'ing to the
2898 buffer, which would mean that when resumed, it would clobber the
2899 trace buffer that had been set for a new run.
2900
2901 - we can't rewrite/reuse the jump pads for new tracepoints
2902 safely. Say you do tstart while a thread is stopped midway while
2903 collecting. When the thread is later resumed, it finishes the
2904 collection, and returns to the jump pad, to execute the original
2905 instruction that was under the tracepoint jump at the time the
2906 older run had been started. If the jump pad had been rewritten
2907 since for something else in the new run, the thread would now
2908 execute the wrong / random instructions. */
2909
2910 static void
2911 linux_stabilize_threads (void)
2912 {
2913 struct thread_info *saved_thread;
2914 struct thread_info *thread_stuck;
2915
2916 thread_stuck
2917 = (struct thread_info *) find_inferior (&all_threads,
2918 stuck_in_jump_pad_callback,
2919 NULL);
2920 if (thread_stuck != NULL)
2921 {
2922 if (debug_threads)
2923 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2924 lwpid_of (thread_stuck));
2925 return;
2926 }
2927
2928 saved_thread = current_thread;
2929
2930 stabilizing_threads = 1;
2931
2932 /* Kick 'em all. */
2933 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2934
2935 /* Loop until all are stopped out of the jump pads. */
2936 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2937 {
2938 struct target_waitstatus ourstatus;
2939 struct lwp_info *lwp;
2940 int wstat;
2941
2942 /* Note that we go through the full wait even loop. While
2943 moving threads out of jump pad, we need to be able to step
2944 over internal breakpoints and such. */
2945 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2946
2947 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2948 {
2949 lwp = get_thread_lwp (current_thread);
2950
2951 /* Lock it. */
2952 lwp_suspended_inc (lwp);
2953
2954 if (ourstatus.value.sig != GDB_SIGNAL_0
2955 || current_thread->last_resume_kind == resume_stop)
2956 {
2957 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2958 enqueue_one_deferred_signal (lwp, &wstat);
2959 }
2960 }
2961 }
2962
2963 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2964
2965 stabilizing_threads = 0;
2966
2967 current_thread = saved_thread;
2968
2969 if (debug_threads)
2970 {
2971 thread_stuck
2972 = (struct thread_info *) find_inferior (&all_threads,
2973 stuck_in_jump_pad_callback,
2974 NULL);
2975 if (thread_stuck != NULL)
2976 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2977 lwpid_of (thread_stuck));
2978 }
2979 }
2980
2981 /* Convenience function that is called when the kernel reports an
2982 event that is not passed out to GDB. */
2983
2984 static ptid_t
2985 ignore_event (struct target_waitstatus *ourstatus)
2986 {
2987 /* If we got an event, there may still be others, as a single
2988 SIGCHLD can indicate more than one child stopped. This forces
2989 another target_wait call. */
2990 async_file_mark ();
2991
2992 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2993 return null_ptid;
2994 }
2995
2996 /* Convenience function that is called when the kernel reports an exit
2997 event. This decides whether to report the event to GDB as a
2998 process exit event, a thread exit event, or to suppress the
2999 event. */
3000
3001 static ptid_t
3002 filter_exit_event (struct lwp_info *event_child,
3003 struct target_waitstatus *ourstatus)
3004 {
3005 struct thread_info *thread = get_lwp_thread (event_child);
3006 ptid_t ptid = ptid_of (thread);
3007
3008 if (!last_thread_of_process_p (pid_of (thread)))
3009 {
3010 if (report_thread_events)
3011 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3012 else
3013 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3014
3015 delete_lwp (event_child);
3016 }
3017 return ptid;
3018 }
3019
3020 /* Returns 1 if GDB is interested in any event_child syscalls. */
3021
3022 static int
3023 gdb_catching_syscalls_p (struct lwp_info *event_child)
3024 {
3025 struct thread_info *thread = get_lwp_thread (event_child);
3026 struct process_info *proc = get_thread_process (thread);
3027
3028 return !VEC_empty (int, proc->syscalls_to_catch);
3029 }
3030
3031 /* Returns 1 if GDB is interested in the event_child syscall.
3032 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3033
3034 static int
3035 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3036 {
3037 int i, iter;
3038 int sysno, sysret;
3039 struct thread_info *thread = get_lwp_thread (event_child);
3040 struct process_info *proc = get_thread_process (thread);
3041
3042 if (VEC_empty (int, proc->syscalls_to_catch))
3043 return 0;
3044
3045 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3046 return 1;
3047
3048 get_syscall_trapinfo (event_child, &sysno, &sysret);
3049 for (i = 0;
3050 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3051 i++)
3052 if (iter == sysno)
3053 return 1;
3054
3055 return 0;
3056 }
3057
3058 /* Wait for process, returns status. */
3059
3060 static ptid_t
3061 linux_wait_1 (ptid_t ptid,
3062 struct target_waitstatus *ourstatus, int target_options)
3063 {
3064 int w;
3065 struct lwp_info *event_child;
3066 int options;
3067 int pid;
3068 int step_over_finished;
3069 int bp_explains_trap;
3070 int maybe_internal_trap;
3071 int report_to_gdb;
3072 int trace_event;
3073 int in_step_range;
3074 int any_resumed;
3075
3076 if (debug_threads)
3077 {
3078 debug_enter ();
3079 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3080 }
3081
3082 /* Translate generic target options into linux options. */
3083 options = __WALL;
3084 if (target_options & TARGET_WNOHANG)
3085 options |= WNOHANG;
3086
3087 bp_explains_trap = 0;
3088 trace_event = 0;
3089 in_step_range = 0;
3090 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3091
3092 /* Find a resumed LWP, if any. */
3093 if (find_inferior (&all_threads,
3094 status_pending_p_callback,
3095 &minus_one_ptid) != NULL)
3096 any_resumed = 1;
3097 else if ((find_inferior (&all_threads,
3098 not_stopped_callback,
3099 &minus_one_ptid) != NULL))
3100 any_resumed = 1;
3101 else
3102 any_resumed = 0;
3103
3104 if (ptid_equal (step_over_bkpt, null_ptid))
3105 pid = linux_wait_for_event (ptid, &w, options);
3106 else
3107 {
3108 if (debug_threads)
3109 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3110 target_pid_to_str (step_over_bkpt));
3111 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3112 }
3113
3114 if (pid == 0 || (pid == -1 && !any_resumed))
3115 {
3116 gdb_assert (target_options & TARGET_WNOHANG);
3117
3118 if (debug_threads)
3119 {
3120 debug_printf ("linux_wait_1 ret = null_ptid, "
3121 "TARGET_WAITKIND_IGNORE\n");
3122 debug_exit ();
3123 }
3124
3125 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3126 return null_ptid;
3127 }
3128 else if (pid == -1)
3129 {
3130 if (debug_threads)
3131 {
3132 debug_printf ("linux_wait_1 ret = null_ptid, "
3133 "TARGET_WAITKIND_NO_RESUMED\n");
3134 debug_exit ();
3135 }
3136
3137 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3138 return null_ptid;
3139 }
3140
3141 event_child = get_thread_lwp (current_thread);
3142
3143 /* linux_wait_for_event only returns an exit status for the last
3144 child of a process. Report it. */
3145 if (WIFEXITED (w) || WIFSIGNALED (w))
3146 {
3147 if (WIFEXITED (w))
3148 {
3149 ourstatus->kind = TARGET_WAITKIND_EXITED;
3150 ourstatus->value.integer = WEXITSTATUS (w);
3151
3152 if (debug_threads)
3153 {
3154 debug_printf ("linux_wait_1 ret = %s, exited with "
3155 "retcode %d\n",
3156 target_pid_to_str (ptid_of (current_thread)),
3157 WEXITSTATUS (w));
3158 debug_exit ();
3159 }
3160 }
3161 else
3162 {
3163 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3164 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3165
3166 if (debug_threads)
3167 {
3168 debug_printf ("linux_wait_1 ret = %s, terminated with "
3169 "signal %d\n",
3170 target_pid_to_str (ptid_of (current_thread)),
3171 WTERMSIG (w));
3172 debug_exit ();
3173 }
3174 }
3175
3176 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3177 return filter_exit_event (event_child, ourstatus);
3178
3179 return ptid_of (current_thread);
3180 }
3181
3182 /* If step-over executes a breakpoint instruction, in the case of a
3183 hardware single step it means a gdb/gdbserver breakpoint had been
3184 planted on top of a permanent breakpoint, in the case of a software
3185 single step it may just mean that gdbserver hit the reinsert breakpoint.
3186 The PC has been adjusted by save_stop_reason to point at
3187 the breakpoint address.
3188 So in the case of the hardware single step advance the PC manually
3189 past the breakpoint and in the case of software single step advance only
3190 if it's not the reinsert_breakpoint we are hitting.
3191 This avoids that a program would keep trapping a permanent breakpoint
3192 forever. */
3193 if (!ptid_equal (step_over_bkpt, null_ptid)
3194 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3195 && (event_child->stepping
3196 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
3197 {
3198 int increment_pc = 0;
3199 int breakpoint_kind = 0;
3200 CORE_ADDR stop_pc = event_child->stop_pc;
3201
3202 breakpoint_kind =
3203 the_target->breakpoint_kind_from_current_state (&stop_pc);
3204 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3205
3206 if (debug_threads)
3207 {
3208 debug_printf ("step-over for %s executed software breakpoint\n",
3209 target_pid_to_str (ptid_of (current_thread)));
3210 }
3211
3212 if (increment_pc != 0)
3213 {
3214 struct regcache *regcache
3215 = get_thread_regcache (current_thread, 1);
3216
3217 event_child->stop_pc += increment_pc;
3218 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3219
3220 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3221 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3222 }
3223 }
3224
3225 /* If this event was not handled before, and is not a SIGTRAP, we
3226 report it. SIGILL and SIGSEGV are also treated as traps in case
3227 a breakpoint is inserted at the current PC. If this target does
3228 not support internal breakpoints at all, we also report the
3229 SIGTRAP without further processing; it's of no concern to us. */
3230 maybe_internal_trap
3231 = (supports_breakpoints ()
3232 && (WSTOPSIG (w) == SIGTRAP
3233 || ((WSTOPSIG (w) == SIGILL
3234 || WSTOPSIG (w) == SIGSEGV)
3235 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3236
3237 if (maybe_internal_trap)
3238 {
3239 /* Handle anything that requires bookkeeping before deciding to
3240 report the event or continue waiting. */
3241
3242 /* First check if we can explain the SIGTRAP with an internal
3243 breakpoint, or if we should possibly report the event to GDB.
3244 Do this before anything that may remove or insert a
3245 breakpoint. */
3246 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3247
3248 /* We have a SIGTRAP, possibly a step-over dance has just
3249 finished. If so, tweak the state machine accordingly,
3250 reinsert breakpoints and delete any reinsert (software
3251 single-step) breakpoints. */
3252 step_over_finished = finish_step_over (event_child);
3253
3254 /* Now invoke the callbacks of any internal breakpoints there. */
3255 check_breakpoints (event_child->stop_pc);
3256
3257 /* Handle tracepoint data collecting. This may overflow the
3258 trace buffer, and cause a tracing stop, removing
3259 breakpoints. */
3260 trace_event = handle_tracepoints (event_child);
3261
3262 if (bp_explains_trap)
3263 {
3264 /* If we stepped or ran into an internal breakpoint, we've
3265 already handled it. So next time we resume (from this
3266 PC), we should step over it. */
3267 if (debug_threads)
3268 debug_printf ("Hit a gdbserver breakpoint.\n");
3269
3270 if (breakpoint_here (event_child->stop_pc))
3271 event_child->need_step_over = 1;
3272 }
3273 }
3274 else
3275 {
3276 /* We have some other signal, possibly a step-over dance was in
3277 progress, and it should be cancelled too. */
3278 step_over_finished = finish_step_over (event_child);
3279 }
3280
3281 /* We have all the data we need. Either report the event to GDB, or
3282 resume threads and keep waiting for more. */
3283
3284 /* If we're collecting a fast tracepoint, finish the collection and
3285 move out of the jump pad before delivering a signal. See
3286 linux_stabilize_threads. */
3287
3288 if (WIFSTOPPED (w)
3289 && WSTOPSIG (w) != SIGTRAP
3290 && supports_fast_tracepoints ()
3291 && agent_loaded_p ())
3292 {
3293 if (debug_threads)
3294 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3295 "to defer or adjust it.\n",
3296 WSTOPSIG (w), lwpid_of (current_thread));
3297
3298 /* Allow debugging the jump pad itself. */
3299 if (current_thread->last_resume_kind != resume_step
3300 && maybe_move_out_of_jump_pad (event_child, &w))
3301 {
3302 enqueue_one_deferred_signal (event_child, &w);
3303
3304 if (debug_threads)
3305 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3306 WSTOPSIG (w), lwpid_of (current_thread));
3307
3308 linux_resume_one_lwp (event_child, 0, 0, NULL);
3309
3310 return ignore_event (ourstatus);
3311 }
3312 }
3313
3314 if (event_child->collecting_fast_tracepoint)
3315 {
3316 if (debug_threads)
3317 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3318 "Check if we're already there.\n",
3319 lwpid_of (current_thread),
3320 event_child->collecting_fast_tracepoint);
3321
3322 trace_event = 1;
3323
3324 event_child->collecting_fast_tracepoint
3325 = linux_fast_tracepoint_collecting (event_child, NULL);
3326
3327 if (event_child->collecting_fast_tracepoint != 1)
3328 {
3329 /* No longer need this breakpoint. */
3330 if (event_child->exit_jump_pad_bkpt != NULL)
3331 {
3332 if (debug_threads)
3333 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3334 "stopping all threads momentarily.\n");
3335
3336 /* Other running threads could hit this breakpoint.
3337 We don't handle moribund locations like GDB does,
3338 instead we always pause all threads when removing
3339 breakpoints, so that any step-over or
3340 decr_pc_after_break adjustment is always taken
3341 care of while the breakpoint is still
3342 inserted. */
3343 stop_all_lwps (1, event_child);
3344
3345 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3346 event_child->exit_jump_pad_bkpt = NULL;
3347
3348 unstop_all_lwps (1, event_child);
3349
3350 gdb_assert (event_child->suspended >= 0);
3351 }
3352 }
3353
3354 if (event_child->collecting_fast_tracepoint == 0)
3355 {
3356 if (debug_threads)
3357 debug_printf ("fast tracepoint finished "
3358 "collecting successfully.\n");
3359
3360 /* We may have a deferred signal to report. */
3361 if (dequeue_one_deferred_signal (event_child, &w))
3362 {
3363 if (debug_threads)
3364 debug_printf ("dequeued one signal.\n");
3365 }
3366 else
3367 {
3368 if (debug_threads)
3369 debug_printf ("no deferred signals.\n");
3370
3371 if (stabilizing_threads)
3372 {
3373 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3374 ourstatus->value.sig = GDB_SIGNAL_0;
3375
3376 if (debug_threads)
3377 {
3378 debug_printf ("linux_wait_1 ret = %s, stopped "
3379 "while stabilizing threads\n",
3380 target_pid_to_str (ptid_of (current_thread)));
3381 debug_exit ();
3382 }
3383
3384 return ptid_of (current_thread);
3385 }
3386 }
3387 }
3388 }
3389
3390 /* Check whether GDB would be interested in this event. */
3391
3392 /* Check if GDB is interested in this syscall. */
3393 if (WIFSTOPPED (w)
3394 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3395 && !gdb_catch_this_syscall_p (event_child))
3396 {
3397 if (debug_threads)
3398 {
3399 debug_printf ("Ignored syscall for LWP %ld.\n",
3400 lwpid_of (current_thread));
3401 }
3402
3403 linux_resume_one_lwp (event_child, event_child->stepping,
3404 0, NULL);
3405 return ignore_event (ourstatus);
3406 }
3407
3408 /* If GDB is not interested in this signal, don't stop other
3409 threads, and don't report it to GDB. Just resume the inferior
3410 right away. We do this for threading-related signals as well as
3411 any that GDB specifically requested we ignore. But never ignore
3412 SIGSTOP if we sent it ourselves, and do not ignore signals when
3413 stepping - they may require special handling to skip the signal
3414 handler. Also never ignore signals that could be caused by a
3415 breakpoint. */
3416 if (WIFSTOPPED (w)
3417 && current_thread->last_resume_kind != resume_step
3418 && (
3419 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3420 (current_process ()->priv->thread_db != NULL
3421 && (WSTOPSIG (w) == __SIGRTMIN
3422 || WSTOPSIG (w) == __SIGRTMIN + 1))
3423 ||
3424 #endif
3425 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3426 && !(WSTOPSIG (w) == SIGSTOP
3427 && current_thread->last_resume_kind == resume_stop)
3428 && !linux_wstatus_maybe_breakpoint (w))))
3429 {
3430 siginfo_t info, *info_p;
3431
3432 if (debug_threads)
3433 debug_printf ("Ignored signal %d for LWP %ld.\n",
3434 WSTOPSIG (w), lwpid_of (current_thread));
3435
3436 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3437 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3438 info_p = &info;
3439 else
3440 info_p = NULL;
3441
3442 if (step_over_finished)
3443 {
3444 /* We cancelled this thread's step-over above. We still
3445 need to unsuspend all other LWPs, and set them back
3446 running again while the signal handler runs. */
3447 unsuspend_all_lwps (event_child);
3448
3449 /* Enqueue the pending signal info so that proceed_all_lwps
3450 doesn't lose it. */
3451 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3452
3453 proceed_all_lwps ();
3454 }
3455 else
3456 {
3457 linux_resume_one_lwp (event_child, event_child->stepping,
3458 WSTOPSIG (w), info_p);
3459 }
3460 return ignore_event (ourstatus);
3461 }
3462
3463 /* Note that all addresses are always "out of the step range" when
3464 there's no range to begin with. */
3465 in_step_range = lwp_in_step_range (event_child);
3466
3467 /* If GDB wanted this thread to single step, and the thread is out
3468 of the step range, we always want to report the SIGTRAP, and let
3469 GDB handle it. Watchpoints should always be reported. So should
3470 signals we can't explain. A SIGTRAP we can't explain could be a
3471 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3472 do, we're be able to handle GDB breakpoints on top of internal
3473 breakpoints, by handling the internal breakpoint and still
3474 reporting the event to GDB. If we don't, we're out of luck, GDB
3475 won't see the breakpoint hit. If we see a single-step event but
3476 the thread should be continuing, don't pass the trap to gdb.
3477 That indicates that we had previously finished a single-step but
3478 left the single-step pending -- see
3479 complete_ongoing_step_over. */
3480 report_to_gdb = (!maybe_internal_trap
3481 || (current_thread->last_resume_kind == resume_step
3482 && !in_step_range)
3483 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3484 || (!in_step_range
3485 && !bp_explains_trap
3486 && !trace_event
3487 && !step_over_finished
3488 && !(current_thread->last_resume_kind == resume_continue
3489 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3490 || (gdb_breakpoint_here (event_child->stop_pc)
3491 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3492 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3493 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3494
3495 run_breakpoint_commands (event_child->stop_pc);
3496
3497 /* We found no reason GDB would want us to stop. We either hit one
3498 of our own breakpoints, or finished an internal step GDB
3499 shouldn't know about. */
3500 if (!report_to_gdb)
3501 {
3502 if (debug_threads)
3503 {
3504 if (bp_explains_trap)
3505 debug_printf ("Hit a gdbserver breakpoint.\n");
3506 if (step_over_finished)
3507 debug_printf ("Step-over finished.\n");
3508 if (trace_event)
3509 debug_printf ("Tracepoint event.\n");
3510 if (lwp_in_step_range (event_child))
3511 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3512 paddress (event_child->stop_pc),
3513 paddress (event_child->step_range_start),
3514 paddress (event_child->step_range_end));
3515 }
3516
3517 /* We're not reporting this breakpoint to GDB, so apply the
3518 decr_pc_after_break adjustment to the inferior's regcache
3519 ourselves. */
3520
3521 if (the_low_target.set_pc != NULL)
3522 {
3523 struct regcache *regcache
3524 = get_thread_regcache (current_thread, 1);
3525 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3526 }
3527
3528 /* We may have finished stepping over a breakpoint. If so,
3529 we've stopped and suspended all LWPs momentarily except the
3530 stepping one. This is where we resume them all again. We're
3531 going to keep waiting, so use proceed, which handles stepping
3532 over the next breakpoint. */
3533 if (debug_threads)
3534 debug_printf ("proceeding all threads.\n");
3535
3536 if (step_over_finished)
3537 unsuspend_all_lwps (event_child);
3538
3539 proceed_all_lwps ();
3540 return ignore_event (ourstatus);
3541 }
3542
3543 if (debug_threads)
3544 {
3545 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3546 {
3547 char *str;
3548
3549 str = target_waitstatus_to_string (&event_child->waitstatus);
3550 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3551 lwpid_of (get_lwp_thread (event_child)), str);
3552 xfree (str);
3553 }
3554 if (current_thread->last_resume_kind == resume_step)
3555 {
3556 if (event_child->step_range_start == event_child->step_range_end)
3557 debug_printf ("GDB wanted to single-step, reporting event.\n");
3558 else if (!lwp_in_step_range (event_child))
3559 debug_printf ("Out of step range, reporting event.\n");
3560 }
3561 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3562 debug_printf ("Stopped by watchpoint.\n");
3563 else if (gdb_breakpoint_here (event_child->stop_pc))
3564 debug_printf ("Stopped by GDB breakpoint.\n");
3565 if (debug_threads)
3566 debug_printf ("Hit a non-gdbserver trap event.\n");
3567 }
3568
3569 /* Alright, we're going to report a stop. */
3570
3571 if (!stabilizing_threads)
3572 {
3573 /* In all-stop, stop all threads. */
3574 if (!non_stop)
3575 stop_all_lwps (0, NULL);
3576
3577 /* If we're not waiting for a specific LWP, choose an event LWP
3578 from among those that have had events. Giving equal priority
3579 to all LWPs that have had events helps prevent
3580 starvation. */
3581 if (ptid_equal (ptid, minus_one_ptid))
3582 {
3583 event_child->status_pending_p = 1;
3584 event_child->status_pending = w;
3585
3586 select_event_lwp (&event_child);
3587
3588 /* current_thread and event_child must stay in sync. */
3589 current_thread = get_lwp_thread (event_child);
3590
3591 event_child->status_pending_p = 0;
3592 w = event_child->status_pending;
3593 }
3594
3595 if (step_over_finished)
3596 {
3597 if (!non_stop)
3598 {
3599 /* If we were doing a step-over, all other threads but
3600 the stepping one had been paused in start_step_over,
3601 with their suspend counts incremented. We don't want
3602 to do a full unstop/unpause, because we're in
3603 all-stop mode (so we want threads stopped), but we
3604 still need to unsuspend the other threads, to
3605 decrement their `suspended' count back. */
3606 unsuspend_all_lwps (event_child);
3607 }
3608 else
3609 {
3610 /* If we just finished a step-over, then all threads had
3611 been momentarily paused. In all-stop, that's fine,
3612 we want threads stopped by now anyway. In non-stop,
3613 we need to re-resume threads that GDB wanted to be
3614 running. */
3615 unstop_all_lwps (1, event_child);
3616 }
3617 }
3618
3619 /* Stabilize threads (move out of jump pads). */
3620 if (!non_stop)
3621 stabilize_threads ();
3622 }
3623 else
3624 {
3625 /* If we just finished a step-over, then all threads had been
3626 momentarily paused. In all-stop, that's fine, we want
3627 threads stopped by now anyway. In non-stop, we need to
3628 re-resume threads that GDB wanted to be running. */
3629 if (step_over_finished)
3630 unstop_all_lwps (1, event_child);
3631 }
3632
3633 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3634 {
3635 /* If the reported event is an exit, fork, vfork or exec, let
3636 GDB know. */
3637 *ourstatus = event_child->waitstatus;
3638 /* Clear the event lwp's waitstatus since we handled it already. */
3639 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3640 }
3641 else
3642 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3643
3644 /* Now that we've selected our final event LWP, un-adjust its PC if
3645 it was a software breakpoint, and the client doesn't know we can
3646 adjust the breakpoint ourselves. */
3647 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3648 && !swbreak_feature)
3649 {
3650 int decr_pc = the_low_target.decr_pc_after_break;
3651
3652 if (decr_pc != 0)
3653 {
3654 struct regcache *regcache
3655 = get_thread_regcache (current_thread, 1);
3656 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3657 }
3658 }
3659
3660 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3661 {
3662 int sysret;
3663
3664 get_syscall_trapinfo (event_child,
3665 &ourstatus->value.syscall_number, &sysret);
3666 ourstatus->kind = event_child->syscall_state;
3667 }
3668 else if (current_thread->last_resume_kind == resume_stop
3669 && WSTOPSIG (w) == SIGSTOP)
3670 {
3671 /* A thread that has been requested to stop by GDB with vCont;t,
3672 and it stopped cleanly, so report as SIG0. The use of
3673 SIGSTOP is an implementation detail. */
3674 ourstatus->value.sig = GDB_SIGNAL_0;
3675 }
3676 else if (current_thread->last_resume_kind == resume_stop
3677 && WSTOPSIG (w) != SIGSTOP)
3678 {
3679 /* A thread that has been requested to stop by GDB with vCont;t,
3680 but, it stopped for other reasons. */
3681 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3682 }
3683 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3684 {
3685 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3686 }
3687
3688 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3689
3690 if (debug_threads)
3691 {
3692 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3693 target_pid_to_str (ptid_of (current_thread)),
3694 ourstatus->kind, ourstatus->value.sig);
3695 debug_exit ();
3696 }
3697
3698 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3699 return filter_exit_event (event_child, ourstatus);
3700
3701 return ptid_of (current_thread);
3702 }
3703
3704 /* Get rid of any pending event in the pipe. */
3705 static void
3706 async_file_flush (void)
3707 {
3708 int ret;
3709 char buf;
3710
3711 do
3712 ret = read (linux_event_pipe[0], &buf, 1);
3713 while (ret >= 0 || (ret == -1 && errno == EINTR));
3714 }
3715
3716 /* Put something in the pipe, so the event loop wakes up. */
3717 static void
3718 async_file_mark (void)
3719 {
3720 int ret;
3721
3722 async_file_flush ();
3723
3724 do
3725 ret = write (linux_event_pipe[1], "+", 1);
3726 while (ret == 0 || (ret == -1 && errno == EINTR));
3727
3728 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3729 be awakened anyway. */
3730 }
3731
3732 static ptid_t
3733 linux_wait (ptid_t ptid,
3734 struct target_waitstatus *ourstatus, int target_options)
3735 {
3736 ptid_t event_ptid;
3737
3738 /* Flush the async file first. */
3739 if (target_is_async_p ())
3740 async_file_flush ();
3741
3742 do
3743 {
3744 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3745 }
3746 while ((target_options & TARGET_WNOHANG) == 0
3747 && ptid_equal (event_ptid, null_ptid)
3748 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3749
3750 /* If at least one stop was reported, there may be more. A single
3751 SIGCHLD can signal more than one child stop. */
3752 if (target_is_async_p ()
3753 && (target_options & TARGET_WNOHANG) != 0
3754 && !ptid_equal (event_ptid, null_ptid))
3755 async_file_mark ();
3756
3757 return event_ptid;
3758 }
3759
3760 /* Send a signal to an LWP. */
3761
3762 static int
3763 kill_lwp (unsigned long lwpid, int signo)
3764 {
3765 int ret;
3766
3767 errno = 0;
3768 ret = syscall (__NR_tkill, lwpid, signo);
3769 if (errno == ENOSYS)
3770 {
3771 /* If tkill fails, then we are not using nptl threads, a
3772 configuration we no longer support. */
3773 perror_with_name (("tkill"));
3774 }
3775 return ret;
3776 }
3777
3778 void
3779 linux_stop_lwp (struct lwp_info *lwp)
3780 {
3781 send_sigstop (lwp);
3782 }
3783
3784 static void
3785 send_sigstop (struct lwp_info *lwp)
3786 {
3787 int pid;
3788
3789 pid = lwpid_of (get_lwp_thread (lwp));
3790
3791 /* If we already have a pending stop signal for this process, don't
3792 send another. */
3793 if (lwp->stop_expected)
3794 {
3795 if (debug_threads)
3796 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3797
3798 return;
3799 }
3800
3801 if (debug_threads)
3802 debug_printf ("Sending sigstop to lwp %d\n", pid);
3803
3804 lwp->stop_expected = 1;
3805 kill_lwp (pid, SIGSTOP);
3806 }
3807
3808 static int
3809 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3810 {
3811 struct thread_info *thread = (struct thread_info *) entry;
3812 struct lwp_info *lwp = get_thread_lwp (thread);
3813
3814 /* Ignore EXCEPT. */
3815 if (lwp == except)
3816 return 0;
3817
3818 if (lwp->stopped)
3819 return 0;
3820
3821 send_sigstop (lwp);
3822 return 0;
3823 }
3824
3825 /* Increment the suspend count of an LWP, and stop it, if not stopped
3826 yet. */
3827 static int
3828 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3829 void *except)
3830 {
3831 struct thread_info *thread = (struct thread_info *) entry;
3832 struct lwp_info *lwp = get_thread_lwp (thread);
3833
3834 /* Ignore EXCEPT. */
3835 if (lwp == except)
3836 return 0;
3837
3838 lwp_suspended_inc (lwp);
3839
3840 return send_sigstop_callback (entry, except);
3841 }
3842
3843 static void
3844 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3845 {
3846 /* Store the exit status for later. */
3847 lwp->status_pending_p = 1;
3848 lwp->status_pending = wstat;
3849
3850 /* Store in waitstatus as well, as there's nothing else to process
3851 for this event. */
3852 if (WIFEXITED (wstat))
3853 {
3854 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3855 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3856 }
3857 else if (WIFSIGNALED (wstat))
3858 {
3859 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3860 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3861 }
3862
3863 /* Prevent trying to stop it. */
3864 lwp->stopped = 1;
3865
3866 /* No further stops are expected from a dead lwp. */
3867 lwp->stop_expected = 0;
3868 }
3869
3870 /* Return true if LWP has exited already, and has a pending exit event
3871 to report to GDB. */
3872
3873 static int
3874 lwp_is_marked_dead (struct lwp_info *lwp)
3875 {
3876 return (lwp->status_pending_p
3877 && (WIFEXITED (lwp->status_pending)
3878 || WIFSIGNALED (lwp->status_pending)));
3879 }
3880
3881 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3882
3883 static void
3884 wait_for_sigstop (void)
3885 {
3886 struct thread_info *saved_thread;
3887 ptid_t saved_tid;
3888 int wstat;
3889 int ret;
3890
3891 saved_thread = current_thread;
3892 if (saved_thread != NULL)
3893 saved_tid = saved_thread->entry.id;
3894 else
3895 saved_tid = null_ptid; /* avoid bogus unused warning */
3896
3897 if (debug_threads)
3898 debug_printf ("wait_for_sigstop: pulling events\n");
3899
3900 /* Passing NULL_PTID as filter indicates we want all events to be
3901 left pending. Eventually this returns when there are no
3902 unwaited-for children left. */
3903 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3904 &wstat, __WALL);
3905 gdb_assert (ret == -1);
3906
3907 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3908 current_thread = saved_thread;
3909 else
3910 {
3911 if (debug_threads)
3912 debug_printf ("Previously current thread died.\n");
3913
3914 /* We can't change the current inferior behind GDB's back,
3915 otherwise, a subsequent command may apply to the wrong
3916 process. */
3917 current_thread = NULL;
3918 }
3919 }
3920
3921 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3922 move it out, because we need to report the stop event to GDB. For
3923 example, if the user puts a breakpoint in the jump pad, it's
3924 because she wants to debug it. */
3925
3926 static int
3927 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3928 {
3929 struct thread_info *thread = (struct thread_info *) entry;
3930 struct lwp_info *lwp = get_thread_lwp (thread);
3931
3932 if (lwp->suspended != 0)
3933 {
3934 internal_error (__FILE__, __LINE__,
3935 "LWP %ld is suspended, suspended=%d\n",
3936 lwpid_of (thread), lwp->suspended);
3937 }
3938 gdb_assert (lwp->stopped);
3939
3940 /* Allow debugging the jump pad, gdb_collect, etc.. */
3941 return (supports_fast_tracepoints ()
3942 && agent_loaded_p ()
3943 && (gdb_breakpoint_here (lwp->stop_pc)
3944 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3945 || thread->last_resume_kind == resume_step)
3946 && linux_fast_tracepoint_collecting (lwp, NULL));
3947 }
3948
3949 static void
3950 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3951 {
3952 struct thread_info *thread = (struct thread_info *) entry;
3953 struct thread_info *saved_thread;
3954 struct lwp_info *lwp = get_thread_lwp (thread);
3955 int *wstat;
3956
3957 if (lwp->suspended != 0)
3958 {
3959 internal_error (__FILE__, __LINE__,
3960 "LWP %ld is suspended, suspended=%d\n",
3961 lwpid_of (thread), lwp->suspended);
3962 }
3963 gdb_assert (lwp->stopped);
3964
3965 /* For gdb_breakpoint_here. */
3966 saved_thread = current_thread;
3967 current_thread = thread;
3968
3969 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3970
3971 /* Allow debugging the jump pad, gdb_collect, etc. */
3972 if (!gdb_breakpoint_here (lwp->stop_pc)
3973 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3974 && thread->last_resume_kind != resume_step
3975 && maybe_move_out_of_jump_pad (lwp, wstat))
3976 {
3977 if (debug_threads)
3978 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3979 lwpid_of (thread));
3980
3981 if (wstat)
3982 {
3983 lwp->status_pending_p = 0;
3984 enqueue_one_deferred_signal (lwp, wstat);
3985
3986 if (debug_threads)
3987 debug_printf ("Signal %d for LWP %ld deferred "
3988 "(in jump pad)\n",
3989 WSTOPSIG (*wstat), lwpid_of (thread));
3990 }
3991
3992 linux_resume_one_lwp (lwp, 0, 0, NULL);
3993 }
3994 else
3995 lwp_suspended_inc (lwp);
3996
3997 current_thread = saved_thread;
3998 }
3999
4000 static int
4001 lwp_running (struct inferior_list_entry *entry, void *data)
4002 {
4003 struct thread_info *thread = (struct thread_info *) entry;
4004 struct lwp_info *lwp = get_thread_lwp (thread);
4005
4006 if (lwp_is_marked_dead (lwp))
4007 return 0;
4008 if (lwp->stopped)
4009 return 0;
4010 return 1;
4011 }
4012
4013 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4014 If SUSPEND, then also increase the suspend count of every LWP,
4015 except EXCEPT. */
4016
4017 static void
4018 stop_all_lwps (int suspend, struct lwp_info *except)
4019 {
4020 /* Should not be called recursively. */
4021 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4022
4023 if (debug_threads)
4024 {
4025 debug_enter ();
4026 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4027 suspend ? "stop-and-suspend" : "stop",
4028 except != NULL
4029 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4030 : "none");
4031 }
4032
4033 stopping_threads = (suspend
4034 ? STOPPING_AND_SUSPENDING_THREADS
4035 : STOPPING_THREADS);
4036
4037 if (suspend)
4038 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4039 else
4040 find_inferior (&all_threads, send_sigstop_callback, except);
4041 wait_for_sigstop ();
4042 stopping_threads = NOT_STOPPING_THREADS;
4043
4044 if (debug_threads)
4045 {
4046 debug_printf ("stop_all_lwps done, setting stopping_threads "
4047 "back to !stopping\n");
4048 debug_exit ();
4049 }
4050 }
4051
4052 /* Enqueue one signal in the chain of signals which need to be
4053 delivered to this process on next resume. */
4054
4055 static void
4056 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4057 {
4058 struct pending_signals *p_sig = XNEW (struct pending_signals);
4059
4060 p_sig->prev = lwp->pending_signals;
4061 p_sig->signal = signal;
4062 if (info == NULL)
4063 memset (&p_sig->info, 0, sizeof (siginfo_t));
4064 else
4065 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4066 lwp->pending_signals = p_sig;
4067 }
4068
4069 /* Install breakpoints for software single stepping. */
4070
4071 static void
4072 install_software_single_step_breakpoints (struct lwp_info *lwp)
4073 {
4074 int i;
4075 CORE_ADDR pc;
4076 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4077 VEC (CORE_ADDR) *next_pcs = NULL;
4078 struct cleanup *old_chain = make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
4079
4080 next_pcs = (*the_low_target.get_next_pcs) (regcache);
4081
4082 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
4083 set_reinsert_breakpoint (pc);
4084
4085 do_cleanups (old_chain);
4086 }
4087
4088 /* Single step via hardware or software single step.
4089 Return 1 if hardware single stepping, 0 if software single stepping
4090 or can't single step. */
4091
4092 static int
4093 single_step (struct lwp_info* lwp)
4094 {
4095 int step = 0;
4096
4097 if (can_hardware_single_step ())
4098 {
4099 step = 1;
4100 }
4101 else if (can_software_single_step ())
4102 {
4103 install_software_single_step_breakpoints (lwp);
4104 step = 0;
4105 }
4106 else
4107 {
4108 if (debug_threads)
4109 debug_printf ("stepping is not implemented on this target");
4110 }
4111
4112 return step;
4113 }
4114
4115 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4116 SIGNAL is nonzero, give it that signal. */
4117
4118 static void
4119 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4120 int step, int signal, siginfo_t *info)
4121 {
4122 struct thread_info *thread = get_lwp_thread (lwp);
4123 struct thread_info *saved_thread;
4124 int fast_tp_collecting;
4125 int ptrace_request;
4126 struct process_info *proc = get_thread_process (thread);
4127
4128 /* Note that target description may not be initialised
4129 (proc->tdesc == NULL) at this point because the program hasn't
4130 stopped at the first instruction yet. It means GDBserver skips
4131 the extra traps from the wrapper program (see option --wrapper).
4132 Code in this function that requires register access should be
4133 guarded by proc->tdesc == NULL or something else. */
4134
4135 if (lwp->stopped == 0)
4136 return;
4137
4138 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4139
4140 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4141
4142 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4143
4144 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4145 user used the "jump" command, or "set $pc = foo"). */
4146 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4147 {
4148 /* Collecting 'while-stepping' actions doesn't make sense
4149 anymore. */
4150 release_while_stepping_state_list (thread);
4151 }
4152
4153 /* If we have pending signals or status, and a new signal, enqueue the
4154 signal. Also enqueue the signal if we are waiting to reinsert a
4155 breakpoint; it will be picked up again below. */
4156 if (signal != 0
4157 && (lwp->status_pending_p
4158 || lwp->pending_signals != NULL
4159 || lwp->bp_reinsert != 0
4160 || fast_tp_collecting))
4161 {
4162 struct pending_signals *p_sig = XNEW (struct pending_signals);
4163
4164 p_sig->prev = lwp->pending_signals;
4165 p_sig->signal = signal;
4166 if (info == NULL)
4167 memset (&p_sig->info, 0, sizeof (siginfo_t));
4168 else
4169 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4170 lwp->pending_signals = p_sig;
4171 }
4172
4173 if (lwp->status_pending_p)
4174 {
4175 if (debug_threads)
4176 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
4177 " has pending status\n",
4178 lwpid_of (thread), step ? "step" : "continue", signal,
4179 lwp->stop_expected ? "expected" : "not expected");
4180 return;
4181 }
4182
4183 saved_thread = current_thread;
4184 current_thread = thread;
4185
4186 if (debug_threads)
4187 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4188 lwpid_of (thread), step ? "step" : "continue", signal,
4189 lwp->stop_expected ? "expected" : "not expected");
4190
4191 /* This bit needs some thinking about. If we get a signal that
4192 we must report while a single-step reinsert is still pending,
4193 we often end up resuming the thread. It might be better to
4194 (ew) allow a stack of pending events; then we could be sure that
4195 the reinsert happened right away and not lose any signals.
4196
4197 Making this stack would also shrink the window in which breakpoints are
4198 uninserted (see comment in linux_wait_for_lwp) but not enough for
4199 complete correctness, so it won't solve that problem. It may be
4200 worthwhile just to solve this one, however. */
4201 if (lwp->bp_reinsert != 0)
4202 {
4203 if (debug_threads)
4204 debug_printf (" pending reinsert at 0x%s\n",
4205 paddress (lwp->bp_reinsert));
4206
4207 if (can_hardware_single_step ())
4208 {
4209 if (fast_tp_collecting == 0)
4210 {
4211 if (step == 0)
4212 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4213 if (lwp->suspended)
4214 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4215 lwp->suspended);
4216 }
4217
4218 step = 1;
4219 }
4220
4221 /* Postpone any pending signal. It was enqueued above. */
4222 signal = 0;
4223 }
4224
4225 if (fast_tp_collecting == 1)
4226 {
4227 if (debug_threads)
4228 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4229 " (exit-jump-pad-bkpt)\n",
4230 lwpid_of (thread));
4231
4232 /* Postpone any pending signal. It was enqueued above. */
4233 signal = 0;
4234 }
4235 else if (fast_tp_collecting == 2)
4236 {
4237 if (debug_threads)
4238 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4239 " single-stepping\n",
4240 lwpid_of (thread));
4241
4242 if (can_hardware_single_step ())
4243 step = 1;
4244 else
4245 {
4246 internal_error (__FILE__, __LINE__,
4247 "moving out of jump pad single-stepping"
4248 " not implemented on this target");
4249 }
4250
4251 /* Postpone any pending signal. It was enqueued above. */
4252 signal = 0;
4253 }
4254
4255 /* If we have while-stepping actions in this thread set it stepping.
4256 If we have a signal to deliver, it may or may not be set to
4257 SIG_IGN, we don't know. Assume so, and allow collecting
4258 while-stepping into a signal handler. A possible smart thing to
4259 do would be to set an internal breakpoint at the signal return
4260 address, continue, and carry on catching this while-stepping
4261 action only when that breakpoint is hit. A future
4262 enhancement. */
4263 if (thread->while_stepping != NULL)
4264 {
4265 if (debug_threads)
4266 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4267 lwpid_of (thread));
4268
4269 step = single_step (lwp);
4270 }
4271
4272 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4273 {
4274 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4275
4276 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4277
4278 if (debug_threads)
4279 {
4280 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4281 (long) lwp->stop_pc);
4282 }
4283 }
4284
4285 /* If we have pending signals, consume one unless we are trying to
4286 reinsert a breakpoint or we're trying to finish a fast tracepoint
4287 collect. */
4288 if (lwp->pending_signals != NULL
4289 && lwp->bp_reinsert == 0
4290 && fast_tp_collecting == 0)
4291 {
4292 struct pending_signals **p_sig;
4293
4294 p_sig = &lwp->pending_signals;
4295 while ((*p_sig)->prev != NULL)
4296 p_sig = &(*p_sig)->prev;
4297
4298 signal = (*p_sig)->signal;
4299 if ((*p_sig)->info.si_signo != 0)
4300 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4301 &(*p_sig)->info);
4302
4303 free (*p_sig);
4304 *p_sig = NULL;
4305 }
4306
4307 if (the_low_target.prepare_to_resume != NULL)
4308 the_low_target.prepare_to_resume (lwp);
4309
4310 regcache_invalidate_thread (thread);
4311 errno = 0;
4312 lwp->stepping = step;
4313 if (step)
4314 ptrace_request = PTRACE_SINGLESTEP;
4315 else if (gdb_catching_syscalls_p (lwp))
4316 ptrace_request = PTRACE_SYSCALL;
4317 else
4318 ptrace_request = PTRACE_CONT;
4319 ptrace (ptrace_request,
4320 lwpid_of (thread),
4321 (PTRACE_TYPE_ARG3) 0,
4322 /* Coerce to a uintptr_t first to avoid potential gcc warning
4323 of coercing an 8 byte integer to a 4 byte pointer. */
4324 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4325
4326 current_thread = saved_thread;
4327 if (errno)
4328 perror_with_name ("resuming thread");
4329
4330 /* Successfully resumed. Clear state that no longer makes sense,
4331 and mark the LWP as running. Must not do this before resuming
4332 otherwise if that fails other code will be confused. E.g., we'd
4333 later try to stop the LWP and hang forever waiting for a stop
4334 status. Note that we must not throw after this is cleared,
4335 otherwise handle_zombie_lwp_error would get confused. */
4336 lwp->stopped = 0;
4337 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4338 }
4339
4340 /* Called when we try to resume a stopped LWP and that errors out. If
4341 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4342 or about to become), discard the error, clear any pending status
4343 the LWP may have, and return true (we'll collect the exit status
4344 soon enough). Otherwise, return false. */
4345
4346 static int
4347 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4348 {
4349 struct thread_info *thread = get_lwp_thread (lp);
4350
4351 /* If we get an error after resuming the LWP successfully, we'd
4352 confuse !T state for the LWP being gone. */
4353 gdb_assert (lp->stopped);
4354
4355 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4356 because even if ptrace failed with ESRCH, the tracee may be "not
4357 yet fully dead", but already refusing ptrace requests. In that
4358 case the tracee has 'R (Running)' state for a little bit
4359 (observed in Linux 3.18). See also the note on ESRCH in the
4360 ptrace(2) man page. Instead, check whether the LWP has any state
4361 other than ptrace-stopped. */
4362
4363 /* Don't assume anything if /proc/PID/status can't be read. */
4364 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4365 {
4366 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4367 lp->status_pending_p = 0;
4368 return 1;
4369 }
4370 return 0;
4371 }
4372
4373 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4374 disappears while we try to resume it. */
4375
4376 static void
4377 linux_resume_one_lwp (struct lwp_info *lwp,
4378 int step, int signal, siginfo_t *info)
4379 {
4380 TRY
4381 {
4382 linux_resume_one_lwp_throw (lwp, step, signal, info);
4383 }
4384 CATCH (ex, RETURN_MASK_ERROR)
4385 {
4386 if (!check_ptrace_stopped_lwp_gone (lwp))
4387 throw_exception (ex);
4388 }
4389 END_CATCH
4390 }
4391
4392 struct thread_resume_array
4393 {
4394 struct thread_resume *resume;
4395 size_t n;
4396 };
4397
4398 /* This function is called once per thread via find_inferior.
4399 ARG is a pointer to a thread_resume_array struct.
4400 We look up the thread specified by ENTRY in ARG, and mark the thread
4401 with a pointer to the appropriate resume request.
4402
4403 This algorithm is O(threads * resume elements), but resume elements
4404 is small (and will remain small at least until GDB supports thread
4405 suspension). */
4406
4407 static int
4408 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4409 {
4410 struct thread_info *thread = (struct thread_info *) entry;
4411 struct lwp_info *lwp = get_thread_lwp (thread);
4412 int ndx;
4413 struct thread_resume_array *r;
4414
4415 r = (struct thread_resume_array *) arg;
4416
4417 for (ndx = 0; ndx < r->n; ndx++)
4418 {
4419 ptid_t ptid = r->resume[ndx].thread;
4420 if (ptid_equal (ptid, minus_one_ptid)
4421 || ptid_equal (ptid, entry->id)
4422 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4423 of PID'. */
4424 || (ptid_get_pid (ptid) == pid_of (thread)
4425 && (ptid_is_pid (ptid)
4426 || ptid_get_lwp (ptid) == -1)))
4427 {
4428 if (r->resume[ndx].kind == resume_stop
4429 && thread->last_resume_kind == resume_stop)
4430 {
4431 if (debug_threads)
4432 debug_printf ("already %s LWP %ld at GDB's request\n",
4433 (thread->last_status.kind
4434 == TARGET_WAITKIND_STOPPED)
4435 ? "stopped"
4436 : "stopping",
4437 lwpid_of (thread));
4438
4439 continue;
4440 }
4441
4442 lwp->resume = &r->resume[ndx];
4443 thread->last_resume_kind = lwp->resume->kind;
4444
4445 lwp->step_range_start = lwp->resume->step_range_start;
4446 lwp->step_range_end = lwp->resume->step_range_end;
4447
4448 /* If we had a deferred signal to report, dequeue one now.
4449 This can happen if LWP gets more than one signal while
4450 trying to get out of a jump pad. */
4451 if (lwp->stopped
4452 && !lwp->status_pending_p
4453 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4454 {
4455 lwp->status_pending_p = 1;
4456
4457 if (debug_threads)
4458 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4459 "leaving status pending.\n",
4460 WSTOPSIG (lwp->status_pending),
4461 lwpid_of (thread));
4462 }
4463
4464 return 0;
4465 }
4466 }
4467
4468 /* No resume action for this thread. */
4469 lwp->resume = NULL;
4470
4471 return 0;
4472 }
4473
4474 /* find_inferior callback for linux_resume.
4475 Set *FLAG_P if this lwp has an interesting status pending. */
4476
4477 static int
4478 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4479 {
4480 struct thread_info *thread = (struct thread_info *) entry;
4481 struct lwp_info *lwp = get_thread_lwp (thread);
4482
4483 /* LWPs which will not be resumed are not interesting, because
4484 we might not wait for them next time through linux_wait. */
4485 if (lwp->resume == NULL)
4486 return 0;
4487
4488 if (thread_still_has_status_pending_p (thread))
4489 * (int *) flag_p = 1;
4490
4491 return 0;
4492 }
4493
4494 /* Return 1 if this lwp that GDB wants running is stopped at an
4495 internal breakpoint that we need to step over. It assumes that any
4496 required STOP_PC adjustment has already been propagated to the
4497 inferior's regcache. */
4498
4499 static int
4500 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4501 {
4502 struct thread_info *thread = (struct thread_info *) entry;
4503 struct lwp_info *lwp = get_thread_lwp (thread);
4504 struct thread_info *saved_thread;
4505 CORE_ADDR pc;
4506 struct process_info *proc = get_thread_process (thread);
4507
4508 /* GDBserver is skipping the extra traps from the wrapper program,
4509 don't have to do step over. */
4510 if (proc->tdesc == NULL)
4511 return 0;
4512
4513 /* LWPs which will not be resumed are not interesting, because we
4514 might not wait for them next time through linux_wait. */
4515
4516 if (!lwp->stopped)
4517 {
4518 if (debug_threads)
4519 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4520 lwpid_of (thread));
4521 return 0;
4522 }
4523
4524 if (thread->last_resume_kind == resume_stop)
4525 {
4526 if (debug_threads)
4527 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4528 " stopped\n",
4529 lwpid_of (thread));
4530 return 0;
4531 }
4532
4533 gdb_assert (lwp->suspended >= 0);
4534
4535 if (lwp->suspended)
4536 {
4537 if (debug_threads)
4538 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4539 lwpid_of (thread));
4540 return 0;
4541 }
4542
4543 if (!lwp->need_step_over)
4544 {
4545 if (debug_threads)
4546 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4547 }
4548
4549 if (lwp->status_pending_p)
4550 {
4551 if (debug_threads)
4552 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4553 " status.\n",
4554 lwpid_of (thread));
4555 return 0;
4556 }
4557
4558 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4559 or we have. */
4560 pc = get_pc (lwp);
4561
4562 /* If the PC has changed since we stopped, then don't do anything,
4563 and let the breakpoint/tracepoint be hit. This happens if, for
4564 instance, GDB handled the decr_pc_after_break subtraction itself,
4565 GDB is OOL stepping this thread, or the user has issued a "jump"
4566 command, or poked thread's registers herself. */
4567 if (pc != lwp->stop_pc)
4568 {
4569 if (debug_threads)
4570 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4571 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4572 lwpid_of (thread),
4573 paddress (lwp->stop_pc), paddress (pc));
4574
4575 lwp->need_step_over = 0;
4576 return 0;
4577 }
4578
4579 saved_thread = current_thread;
4580 current_thread = thread;
4581
4582 /* We can only step over breakpoints we know about. */
4583 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4584 {
4585 /* Don't step over a breakpoint that GDB expects to hit
4586 though. If the condition is being evaluated on the target's side
4587 and it evaluate to false, step over this breakpoint as well. */
4588 if (gdb_breakpoint_here (pc)
4589 && gdb_condition_true_at_breakpoint (pc)
4590 && gdb_no_commands_at_breakpoint (pc))
4591 {
4592 if (debug_threads)
4593 debug_printf ("Need step over [LWP %ld]? yes, but found"
4594 " GDB breakpoint at 0x%s; skipping step over\n",
4595 lwpid_of (thread), paddress (pc));
4596
4597 current_thread = saved_thread;
4598 return 0;
4599 }
4600 else
4601 {
4602 if (debug_threads)
4603 debug_printf ("Need step over [LWP %ld]? yes, "
4604 "found breakpoint at 0x%s\n",
4605 lwpid_of (thread), paddress (pc));
4606
4607 /* We've found an lwp that needs stepping over --- return 1 so
4608 that find_inferior stops looking. */
4609 current_thread = saved_thread;
4610
4611 /* If the step over is cancelled, this is set again. */
4612 lwp->need_step_over = 0;
4613 return 1;
4614 }
4615 }
4616
4617 current_thread = saved_thread;
4618
4619 if (debug_threads)
4620 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4621 " at 0x%s\n",
4622 lwpid_of (thread), paddress (pc));
4623
4624 return 0;
4625 }
4626
4627 /* Start a step-over operation on LWP. When LWP stopped at a
4628 breakpoint, to make progress, we need to remove the breakpoint out
4629 of the way. If we let other threads run while we do that, they may
4630 pass by the breakpoint location and miss hitting it. To avoid
4631 that, a step-over momentarily stops all threads while LWP is
4632 single-stepped while the breakpoint is temporarily uninserted from
4633 the inferior. When the single-step finishes, we reinsert the
4634 breakpoint, and let all threads that are supposed to be running,
4635 run again.
4636
4637 On targets that don't support hardware single-step, we don't
4638 currently support full software single-stepping. Instead, we only
4639 support stepping over the thread event breakpoint, by asking the
4640 low target where to place a reinsert breakpoint. Since this
4641 routine assumes the breakpoint being stepped over is a thread event
4642 breakpoint, it usually assumes the return address of the current
4643 function is a good enough place to set the reinsert breakpoint. */
4644
4645 static int
4646 start_step_over (struct lwp_info *lwp)
4647 {
4648 struct thread_info *thread = get_lwp_thread (lwp);
4649 struct thread_info *saved_thread;
4650 CORE_ADDR pc;
4651 int step;
4652
4653 if (debug_threads)
4654 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4655 lwpid_of (thread));
4656
4657 stop_all_lwps (1, lwp);
4658
4659 if (lwp->suspended != 0)
4660 {
4661 internal_error (__FILE__, __LINE__,
4662 "LWP %ld suspended=%d\n", lwpid_of (thread),
4663 lwp->suspended);
4664 }
4665
4666 if (debug_threads)
4667 debug_printf ("Done stopping all threads for step-over.\n");
4668
4669 /* Note, we should always reach here with an already adjusted PC,
4670 either by GDB (if we're resuming due to GDB's request), or by our
4671 caller, if we just finished handling an internal breakpoint GDB
4672 shouldn't care about. */
4673 pc = get_pc (lwp);
4674
4675 saved_thread = current_thread;
4676 current_thread = thread;
4677
4678 lwp->bp_reinsert = pc;
4679 uninsert_breakpoints_at (pc);
4680 uninsert_fast_tracepoint_jumps_at (pc);
4681
4682 step = single_step (lwp);
4683
4684 current_thread = saved_thread;
4685
4686 linux_resume_one_lwp (lwp, step, 0, NULL);
4687
4688 /* Require next event from this LWP. */
4689 step_over_bkpt = thread->entry.id;
4690 return 1;
4691 }
4692
4693 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4694 start_step_over, if still there, and delete any reinsert
4695 breakpoints we've set, on non hardware single-step targets. */
4696
4697 static int
4698 finish_step_over (struct lwp_info *lwp)
4699 {
4700 if (lwp->bp_reinsert != 0)
4701 {
4702 if (debug_threads)
4703 debug_printf ("Finished step over.\n");
4704
4705 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4706 may be no breakpoint to reinsert there by now. */
4707 reinsert_breakpoints_at (lwp->bp_reinsert);
4708 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4709
4710 lwp->bp_reinsert = 0;
4711
4712 /* Delete any software-single-step reinsert breakpoints. No
4713 longer needed. We don't have to worry about other threads
4714 hitting this trap, and later not being able to explain it,
4715 because we were stepping over a breakpoint, and we hold all
4716 threads but LWP stopped while doing that. */
4717 if (!can_hardware_single_step ())
4718 delete_reinsert_breakpoints ();
4719
4720 step_over_bkpt = null_ptid;
4721 return 1;
4722 }
4723 else
4724 return 0;
4725 }
4726
4727 /* If there's a step over in progress, wait until all threads stop
4728 (that is, until the stepping thread finishes its step), and
4729 unsuspend all lwps. The stepping thread ends with its status
4730 pending, which is processed later when we get back to processing
4731 events. */
4732
4733 static void
4734 complete_ongoing_step_over (void)
4735 {
4736 if (!ptid_equal (step_over_bkpt, null_ptid))
4737 {
4738 struct lwp_info *lwp;
4739 int wstat;
4740 int ret;
4741
4742 if (debug_threads)
4743 debug_printf ("detach: step over in progress, finish it first\n");
4744
4745 /* Passing NULL_PTID as filter indicates we want all events to
4746 be left pending. Eventually this returns when there are no
4747 unwaited-for children left. */
4748 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4749 &wstat, __WALL);
4750 gdb_assert (ret == -1);
4751
4752 lwp = find_lwp_pid (step_over_bkpt);
4753 if (lwp != NULL)
4754 finish_step_over (lwp);
4755 step_over_bkpt = null_ptid;
4756 unsuspend_all_lwps (lwp);
4757 }
4758 }
4759
4760 /* This function is called once per thread. We check the thread's resume
4761 request, which will tell us whether to resume, step, or leave the thread
4762 stopped; and what signal, if any, it should be sent.
4763
4764 For threads which we aren't explicitly told otherwise, we preserve
4765 the stepping flag; this is used for stepping over gdbserver-placed
4766 breakpoints.
4767
4768 If pending_flags was set in any thread, we queue any needed
4769 signals, since we won't actually resume. We already have a pending
4770 event to report, so we don't need to preserve any step requests;
4771 they should be re-issued if necessary. */
4772
4773 static int
4774 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4775 {
4776 struct thread_info *thread = (struct thread_info *) entry;
4777 struct lwp_info *lwp = get_thread_lwp (thread);
4778 int step;
4779 int leave_all_stopped = * (int *) arg;
4780 int leave_pending;
4781
4782 if (lwp->resume == NULL)
4783 return 0;
4784
4785 if (lwp->resume->kind == resume_stop)
4786 {
4787 if (debug_threads)
4788 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4789
4790 if (!lwp->stopped)
4791 {
4792 if (debug_threads)
4793 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4794
4795 /* Stop the thread, and wait for the event asynchronously,
4796 through the event loop. */
4797 send_sigstop (lwp);
4798 }
4799 else
4800 {
4801 if (debug_threads)
4802 debug_printf ("already stopped LWP %ld\n",
4803 lwpid_of (thread));
4804
4805 /* The LWP may have been stopped in an internal event that
4806 was not meant to be notified back to GDB (e.g., gdbserver
4807 breakpoint), so we should be reporting a stop event in
4808 this case too. */
4809
4810 /* If the thread already has a pending SIGSTOP, this is a
4811 no-op. Otherwise, something later will presumably resume
4812 the thread and this will cause it to cancel any pending
4813 operation, due to last_resume_kind == resume_stop. If
4814 the thread already has a pending status to report, we
4815 will still report it the next time we wait - see
4816 status_pending_p_callback. */
4817
4818 /* If we already have a pending signal to report, then
4819 there's no need to queue a SIGSTOP, as this means we're
4820 midway through moving the LWP out of the jumppad, and we
4821 will report the pending signal as soon as that is
4822 finished. */
4823 if (lwp->pending_signals_to_report == NULL)
4824 send_sigstop (lwp);
4825 }
4826
4827 /* For stop requests, we're done. */
4828 lwp->resume = NULL;
4829 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4830 return 0;
4831 }
4832
4833 /* If this thread which is about to be resumed has a pending status,
4834 then don't resume it - we can just report the pending status.
4835 Likewise if it is suspended, because e.g., another thread is
4836 stepping past a breakpoint. Make sure to queue any signals that
4837 would otherwise be sent. In all-stop mode, we do this decision
4838 based on if *any* thread has a pending status. If there's a
4839 thread that needs the step-over-breakpoint dance, then don't
4840 resume any other thread but that particular one. */
4841 leave_pending = (lwp->suspended
4842 || lwp->status_pending_p
4843 || leave_all_stopped);
4844
4845 if (!leave_pending)
4846 {
4847 if (debug_threads)
4848 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4849
4850 step = (lwp->resume->kind == resume_step);
4851 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4852 }
4853 else
4854 {
4855 if (debug_threads)
4856 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4857
4858 /* If we have a new signal, enqueue the signal. */
4859 if (lwp->resume->sig != 0)
4860 {
4861 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4862
4863 p_sig->prev = lwp->pending_signals;
4864 p_sig->signal = lwp->resume->sig;
4865
4866 /* If this is the same signal we were previously stopped by,
4867 make sure to queue its siginfo. We can ignore the return
4868 value of ptrace; if it fails, we'll skip
4869 PTRACE_SETSIGINFO. */
4870 if (WIFSTOPPED (lwp->last_status)
4871 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4872 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4873 &p_sig->info);
4874
4875 lwp->pending_signals = p_sig;
4876 }
4877 }
4878
4879 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4880 lwp->resume = NULL;
4881 return 0;
4882 }
4883
4884 static void
4885 linux_resume (struct thread_resume *resume_info, size_t n)
4886 {
4887 struct thread_resume_array array = { resume_info, n };
4888 struct thread_info *need_step_over = NULL;
4889 int any_pending;
4890 int leave_all_stopped;
4891
4892 if (debug_threads)
4893 {
4894 debug_enter ();
4895 debug_printf ("linux_resume:\n");
4896 }
4897
4898 find_inferior (&all_threads, linux_set_resume_request, &array);
4899
4900 /* If there is a thread which would otherwise be resumed, which has
4901 a pending status, then don't resume any threads - we can just
4902 report the pending status. Make sure to queue any signals that
4903 would otherwise be sent. In non-stop mode, we'll apply this
4904 logic to each thread individually. We consume all pending events
4905 before considering to start a step-over (in all-stop). */
4906 any_pending = 0;
4907 if (!non_stop)
4908 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4909
4910 /* If there is a thread which would otherwise be resumed, which is
4911 stopped at a breakpoint that needs stepping over, then don't
4912 resume any threads - have it step over the breakpoint with all
4913 other threads stopped, then resume all threads again. Make sure
4914 to queue any signals that would otherwise be delivered or
4915 queued. */
4916 if (!any_pending && supports_breakpoints ())
4917 need_step_over
4918 = (struct thread_info *) find_inferior (&all_threads,
4919 need_step_over_p, NULL);
4920
4921 leave_all_stopped = (need_step_over != NULL || any_pending);
4922
4923 if (debug_threads)
4924 {
4925 if (need_step_over != NULL)
4926 debug_printf ("Not resuming all, need step over\n");
4927 else if (any_pending)
4928 debug_printf ("Not resuming, all-stop and found "
4929 "an LWP with pending status\n");
4930 else
4931 debug_printf ("Resuming, no pending status or step over needed\n");
4932 }
4933
4934 /* Even if we're leaving threads stopped, queue all signals we'd
4935 otherwise deliver. */
4936 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4937
4938 if (need_step_over)
4939 start_step_over (get_thread_lwp (need_step_over));
4940
4941 if (debug_threads)
4942 {
4943 debug_printf ("linux_resume done\n");
4944 debug_exit ();
4945 }
4946
4947 /* We may have events that were pending that can/should be sent to
4948 the client now. Trigger a linux_wait call. */
4949 if (target_is_async_p ())
4950 async_file_mark ();
4951 }
4952
4953 /* This function is called once per thread. We check the thread's
4954 last resume request, which will tell us whether to resume, step, or
4955 leave the thread stopped. Any signal the client requested to be
4956 delivered has already been enqueued at this point.
4957
4958 If any thread that GDB wants running is stopped at an internal
4959 breakpoint that needs stepping over, we start a step-over operation
4960 on that particular thread, and leave all others stopped. */
4961
4962 static int
4963 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4964 {
4965 struct thread_info *thread = (struct thread_info *) entry;
4966 struct lwp_info *lwp = get_thread_lwp (thread);
4967 int step;
4968
4969 if (lwp == except)
4970 return 0;
4971
4972 if (debug_threads)
4973 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4974
4975 if (!lwp->stopped)
4976 {
4977 if (debug_threads)
4978 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4979 return 0;
4980 }
4981
4982 if (thread->last_resume_kind == resume_stop
4983 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4984 {
4985 if (debug_threads)
4986 debug_printf (" client wants LWP to remain %ld stopped\n",
4987 lwpid_of (thread));
4988 return 0;
4989 }
4990
4991 if (lwp->status_pending_p)
4992 {
4993 if (debug_threads)
4994 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4995 lwpid_of (thread));
4996 return 0;
4997 }
4998
4999 gdb_assert (lwp->suspended >= 0);
5000
5001 if (lwp->suspended)
5002 {
5003 if (debug_threads)
5004 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5005 return 0;
5006 }
5007
5008 if (thread->last_resume_kind == resume_stop
5009 && lwp->pending_signals_to_report == NULL
5010 && lwp->collecting_fast_tracepoint == 0)
5011 {
5012 /* We haven't reported this LWP as stopped yet (otherwise, the
5013 last_status.kind check above would catch it, and we wouldn't
5014 reach here. This LWP may have been momentarily paused by a
5015 stop_all_lwps call while handling for example, another LWP's
5016 step-over. In that case, the pending expected SIGSTOP signal
5017 that was queued at vCont;t handling time will have already
5018 been consumed by wait_for_sigstop, and so we need to requeue
5019 another one here. Note that if the LWP already has a SIGSTOP
5020 pending, this is a no-op. */
5021
5022 if (debug_threads)
5023 debug_printf ("Client wants LWP %ld to stop. "
5024 "Making sure it has a SIGSTOP pending\n",
5025 lwpid_of (thread));
5026
5027 send_sigstop (lwp);
5028 }
5029
5030 if (thread->last_resume_kind == resume_step)
5031 {
5032 if (debug_threads)
5033 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5034 lwpid_of (thread));
5035 step = 1;
5036 }
5037 else if (lwp->bp_reinsert != 0)
5038 {
5039 if (debug_threads)
5040 debug_printf (" stepping LWP %ld, reinsert set\n",
5041 lwpid_of (thread));
5042 step = 1;
5043 }
5044 else
5045 step = 0;
5046
5047 linux_resume_one_lwp (lwp, step, 0, NULL);
5048 return 0;
5049 }
5050
5051 static int
5052 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5053 {
5054 struct thread_info *thread = (struct thread_info *) entry;
5055 struct lwp_info *lwp = get_thread_lwp (thread);
5056
5057 if (lwp == except)
5058 return 0;
5059
5060 lwp_suspended_decr (lwp);
5061
5062 return proceed_one_lwp (entry, except);
5063 }
5064
5065 /* When we finish a step-over, set threads running again. If there's
5066 another thread that may need a step-over, now's the time to start
5067 it. Eventually, we'll move all threads past their breakpoints. */
5068
5069 static void
5070 proceed_all_lwps (void)
5071 {
5072 struct thread_info *need_step_over;
5073
5074 /* If there is a thread which would otherwise be resumed, which is
5075 stopped at a breakpoint that needs stepping over, then don't
5076 resume any threads - have it step over the breakpoint with all
5077 other threads stopped, then resume all threads again. */
5078
5079 if (supports_breakpoints ())
5080 {
5081 need_step_over
5082 = (struct thread_info *) find_inferior (&all_threads,
5083 need_step_over_p, NULL);
5084
5085 if (need_step_over != NULL)
5086 {
5087 if (debug_threads)
5088 debug_printf ("proceed_all_lwps: found "
5089 "thread %ld needing a step-over\n",
5090 lwpid_of (need_step_over));
5091
5092 start_step_over (get_thread_lwp (need_step_over));
5093 return;
5094 }
5095 }
5096
5097 if (debug_threads)
5098 debug_printf ("Proceeding, no step-over needed\n");
5099
5100 find_inferior (&all_threads, proceed_one_lwp, NULL);
5101 }
5102
5103 /* Stopped LWPs that the client wanted to be running, that don't have
5104 pending statuses, are set to run again, except for EXCEPT, if not
5105 NULL. This undoes a stop_all_lwps call. */
5106
5107 static void
5108 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5109 {
5110 if (debug_threads)
5111 {
5112 debug_enter ();
5113 if (except)
5114 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5115 lwpid_of (get_lwp_thread (except)));
5116 else
5117 debug_printf ("unstopping all lwps\n");
5118 }
5119
5120 if (unsuspend)
5121 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5122 else
5123 find_inferior (&all_threads, proceed_one_lwp, except);
5124
5125 if (debug_threads)
5126 {
5127 debug_printf ("unstop_all_lwps done\n");
5128 debug_exit ();
5129 }
5130 }
5131
5132
5133 #ifdef HAVE_LINUX_REGSETS
5134
5135 #define use_linux_regsets 1
5136
5137 /* Returns true if REGSET has been disabled. */
5138
5139 static int
5140 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5141 {
5142 return (info->disabled_regsets != NULL
5143 && info->disabled_regsets[regset - info->regsets]);
5144 }
5145
5146 /* Disable REGSET. */
5147
5148 static void
5149 disable_regset (struct regsets_info *info, struct regset_info *regset)
5150 {
5151 int dr_offset;
5152
5153 dr_offset = regset - info->regsets;
5154 if (info->disabled_regsets == NULL)
5155 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5156 info->disabled_regsets[dr_offset] = 1;
5157 }
5158
5159 static int
5160 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5161 struct regcache *regcache)
5162 {
5163 struct regset_info *regset;
5164 int saw_general_regs = 0;
5165 int pid;
5166 struct iovec iov;
5167
5168 pid = lwpid_of (current_thread);
5169 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5170 {
5171 void *buf, *data;
5172 int nt_type, res;
5173
5174 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5175 continue;
5176
5177 buf = xmalloc (regset->size);
5178
5179 nt_type = regset->nt_type;
5180 if (nt_type)
5181 {
5182 iov.iov_base = buf;
5183 iov.iov_len = regset->size;
5184 data = (void *) &iov;
5185 }
5186 else
5187 data = buf;
5188
5189 #ifndef __sparc__
5190 res = ptrace (regset->get_request, pid,
5191 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5192 #else
5193 res = ptrace (regset->get_request, pid, data, nt_type);
5194 #endif
5195 if (res < 0)
5196 {
5197 if (errno == EIO)
5198 {
5199 /* If we get EIO on a regset, do not try it again for
5200 this process mode. */
5201 disable_regset (regsets_info, regset);
5202 }
5203 else if (errno == ENODATA)
5204 {
5205 /* ENODATA may be returned if the regset is currently
5206 not "active". This can happen in normal operation,
5207 so suppress the warning in this case. */
5208 }
5209 else
5210 {
5211 char s[256];
5212 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5213 pid);
5214 perror (s);
5215 }
5216 }
5217 else
5218 {
5219 if (regset->type == GENERAL_REGS)
5220 saw_general_regs = 1;
5221 regset->store_function (regcache, buf);
5222 }
5223 free (buf);
5224 }
5225 if (saw_general_regs)
5226 return 0;
5227 else
5228 return 1;
5229 }
5230
5231 static int
5232 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5233 struct regcache *regcache)
5234 {
5235 struct regset_info *regset;
5236 int saw_general_regs = 0;
5237 int pid;
5238 struct iovec iov;
5239
5240 pid = lwpid_of (current_thread);
5241 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5242 {
5243 void *buf, *data;
5244 int nt_type, res;
5245
5246 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5247 || regset->fill_function == NULL)
5248 continue;
5249
5250 buf = xmalloc (regset->size);
5251
5252 /* First fill the buffer with the current register set contents,
5253 in case there are any items in the kernel's regset that are
5254 not in gdbserver's regcache. */
5255
5256 nt_type = regset->nt_type;
5257 if (nt_type)
5258 {
5259 iov.iov_base = buf;
5260 iov.iov_len = regset->size;
5261 data = (void *) &iov;
5262 }
5263 else
5264 data = buf;
5265
5266 #ifndef __sparc__
5267 res = ptrace (regset->get_request, pid,
5268 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5269 #else
5270 res = ptrace (regset->get_request, pid, data, nt_type);
5271 #endif
5272
5273 if (res == 0)
5274 {
5275 /* Then overlay our cached registers on that. */
5276 regset->fill_function (regcache, buf);
5277
5278 /* Only now do we write the register set. */
5279 #ifndef __sparc__
5280 res = ptrace (regset->set_request, pid,
5281 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5282 #else
5283 res = ptrace (regset->set_request, pid, data, nt_type);
5284 #endif
5285 }
5286
5287 if (res < 0)
5288 {
5289 if (errno == EIO)
5290 {
5291 /* If we get EIO on a regset, do not try it again for
5292 this process mode. */
5293 disable_regset (regsets_info, regset);
5294 }
5295 else if (errno == ESRCH)
5296 {
5297 /* At this point, ESRCH should mean the process is
5298 already gone, in which case we simply ignore attempts
5299 to change its registers. See also the related
5300 comment in linux_resume_one_lwp. */
5301 free (buf);
5302 return 0;
5303 }
5304 else
5305 {
5306 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5307 }
5308 }
5309 else if (regset->type == GENERAL_REGS)
5310 saw_general_regs = 1;
5311 free (buf);
5312 }
5313 if (saw_general_regs)
5314 return 0;
5315 else
5316 return 1;
5317 }
5318
5319 #else /* !HAVE_LINUX_REGSETS */
5320
5321 #define use_linux_regsets 0
5322 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5323 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5324
5325 #endif
5326
5327 /* Return 1 if register REGNO is supported by one of the regset ptrace
5328 calls or 0 if it has to be transferred individually. */
5329
5330 static int
5331 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5332 {
5333 unsigned char mask = 1 << (regno % 8);
5334 size_t index = regno / 8;
5335
5336 return (use_linux_regsets
5337 && (regs_info->regset_bitmap == NULL
5338 || (regs_info->regset_bitmap[index] & mask) != 0));
5339 }
5340
5341 #ifdef HAVE_LINUX_USRREGS
5342
5343 static int
5344 register_addr (const struct usrregs_info *usrregs, int regnum)
5345 {
5346 int addr;
5347
5348 if (regnum < 0 || regnum >= usrregs->num_regs)
5349 error ("Invalid register number %d.", regnum);
5350
5351 addr = usrregs->regmap[regnum];
5352
5353 return addr;
5354 }
5355
5356 /* Fetch one register. */
5357 static void
5358 fetch_register (const struct usrregs_info *usrregs,
5359 struct regcache *regcache, int regno)
5360 {
5361 CORE_ADDR regaddr;
5362 int i, size;
5363 char *buf;
5364 int pid;
5365
5366 if (regno >= usrregs->num_regs)
5367 return;
5368 if ((*the_low_target.cannot_fetch_register) (regno))
5369 return;
5370
5371 regaddr = register_addr (usrregs, regno);
5372 if (regaddr == -1)
5373 return;
5374
5375 size = ((register_size (regcache->tdesc, regno)
5376 + sizeof (PTRACE_XFER_TYPE) - 1)
5377 & -sizeof (PTRACE_XFER_TYPE));
5378 buf = (char *) alloca (size);
5379
5380 pid = lwpid_of (current_thread);
5381 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5382 {
5383 errno = 0;
5384 *(PTRACE_XFER_TYPE *) (buf + i) =
5385 ptrace (PTRACE_PEEKUSER, pid,
5386 /* Coerce to a uintptr_t first to avoid potential gcc warning
5387 of coercing an 8 byte integer to a 4 byte pointer. */
5388 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5389 regaddr += sizeof (PTRACE_XFER_TYPE);
5390 if (errno != 0)
5391 error ("reading register %d: %s", regno, strerror (errno));
5392 }
5393
5394 if (the_low_target.supply_ptrace_register)
5395 the_low_target.supply_ptrace_register (regcache, regno, buf);
5396 else
5397 supply_register (regcache, regno, buf);
5398 }
5399
5400 /* Store one register. */
5401 static void
5402 store_register (const struct usrregs_info *usrregs,
5403 struct regcache *regcache, int regno)
5404 {
5405 CORE_ADDR regaddr;
5406 int i, size;
5407 char *buf;
5408 int pid;
5409
5410 if (regno >= usrregs->num_regs)
5411 return;
5412 if ((*the_low_target.cannot_store_register) (regno))
5413 return;
5414
5415 regaddr = register_addr (usrregs, regno);
5416 if (regaddr == -1)
5417 return;
5418
5419 size = ((register_size (regcache->tdesc, regno)
5420 + sizeof (PTRACE_XFER_TYPE) - 1)
5421 & -sizeof (PTRACE_XFER_TYPE));
5422 buf = (char *) alloca (size);
5423 memset (buf, 0, size);
5424
5425 if (the_low_target.collect_ptrace_register)
5426 the_low_target.collect_ptrace_register (regcache, regno, buf);
5427 else
5428 collect_register (regcache, regno, buf);
5429
5430 pid = lwpid_of (current_thread);
5431 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5432 {
5433 errno = 0;
5434 ptrace (PTRACE_POKEUSER, pid,
5435 /* Coerce to a uintptr_t first to avoid potential gcc warning
5436 about coercing an 8 byte integer to a 4 byte pointer. */
5437 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5438 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5439 if (errno != 0)
5440 {
5441 /* At this point, ESRCH should mean the process is
5442 already gone, in which case we simply ignore attempts
5443 to change its registers. See also the related
5444 comment in linux_resume_one_lwp. */
5445 if (errno == ESRCH)
5446 return;
5447
5448 if ((*the_low_target.cannot_store_register) (regno) == 0)
5449 error ("writing register %d: %s", regno, strerror (errno));
5450 }
5451 regaddr += sizeof (PTRACE_XFER_TYPE);
5452 }
5453 }
5454
5455 /* Fetch all registers, or just one, from the child process.
5456 If REGNO is -1, do this for all registers, skipping any that are
5457 assumed to have been retrieved by regsets_fetch_inferior_registers,
5458 unless ALL is non-zero.
5459 Otherwise, REGNO specifies which register (so we can save time). */
5460 static void
5461 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5462 struct regcache *regcache, int regno, int all)
5463 {
5464 struct usrregs_info *usr = regs_info->usrregs;
5465
5466 if (regno == -1)
5467 {
5468 for (regno = 0; regno < usr->num_regs; regno++)
5469 if (all || !linux_register_in_regsets (regs_info, regno))
5470 fetch_register (usr, regcache, regno);
5471 }
5472 else
5473 fetch_register (usr, regcache, regno);
5474 }
5475
5476 /* Store our register values back into the inferior.
5477 If REGNO is -1, do this for all registers, skipping any that are
5478 assumed to have been saved by regsets_store_inferior_registers,
5479 unless ALL is non-zero.
5480 Otherwise, REGNO specifies which register (so we can save time). */
5481 static void
5482 usr_store_inferior_registers (const struct regs_info *regs_info,
5483 struct regcache *regcache, int regno, int all)
5484 {
5485 struct usrregs_info *usr = regs_info->usrregs;
5486
5487 if (regno == -1)
5488 {
5489 for (regno = 0; regno < usr->num_regs; regno++)
5490 if (all || !linux_register_in_regsets (regs_info, regno))
5491 store_register (usr, regcache, regno);
5492 }
5493 else
5494 store_register (usr, regcache, regno);
5495 }
5496
5497 #else /* !HAVE_LINUX_USRREGS */
5498
5499 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5500 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5501
5502 #endif
5503
5504
5505 static void
5506 linux_fetch_registers (struct regcache *regcache, int regno)
5507 {
5508 int use_regsets;
5509 int all = 0;
5510 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5511
5512 if (regno == -1)
5513 {
5514 if (the_low_target.fetch_register != NULL
5515 && regs_info->usrregs != NULL)
5516 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5517 (*the_low_target.fetch_register) (regcache, regno);
5518
5519 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5520 if (regs_info->usrregs != NULL)
5521 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5522 }
5523 else
5524 {
5525 if (the_low_target.fetch_register != NULL
5526 && (*the_low_target.fetch_register) (regcache, regno))
5527 return;
5528
5529 use_regsets = linux_register_in_regsets (regs_info, regno);
5530 if (use_regsets)
5531 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5532 regcache);
5533 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5534 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5535 }
5536 }
5537
5538 static void
5539 linux_store_registers (struct regcache *regcache, int regno)
5540 {
5541 int use_regsets;
5542 int all = 0;
5543 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5544
5545 if (regno == -1)
5546 {
5547 all = regsets_store_inferior_registers (regs_info->regsets_info,
5548 regcache);
5549 if (regs_info->usrregs != NULL)
5550 usr_store_inferior_registers (regs_info, regcache, regno, all);
5551 }
5552 else
5553 {
5554 use_regsets = linux_register_in_regsets (regs_info, regno);
5555 if (use_regsets)
5556 all = regsets_store_inferior_registers (regs_info->regsets_info,
5557 regcache);
5558 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5559 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5560 }
5561 }
5562
5563
5564 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5565 to debugger memory starting at MYADDR. */
5566
5567 static int
5568 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5569 {
5570 int pid = lwpid_of (current_thread);
5571 register PTRACE_XFER_TYPE *buffer;
5572 register CORE_ADDR addr;
5573 register int count;
5574 char filename[64];
5575 register int i;
5576 int ret;
5577 int fd;
5578
5579 /* Try using /proc. Don't bother for one word. */
5580 if (len >= 3 * sizeof (long))
5581 {
5582 int bytes;
5583
5584 /* We could keep this file open and cache it - possibly one per
5585 thread. That requires some juggling, but is even faster. */
5586 sprintf (filename, "/proc/%d/mem", pid);
5587 fd = open (filename, O_RDONLY | O_LARGEFILE);
5588 if (fd == -1)
5589 goto no_proc;
5590
5591 /* If pread64 is available, use it. It's faster if the kernel
5592 supports it (only one syscall), and it's 64-bit safe even on
5593 32-bit platforms (for instance, SPARC debugging a SPARC64
5594 application). */
5595 #ifdef HAVE_PREAD64
5596 bytes = pread64 (fd, myaddr, len, memaddr);
5597 #else
5598 bytes = -1;
5599 if (lseek (fd, memaddr, SEEK_SET) != -1)
5600 bytes = read (fd, myaddr, len);
5601 #endif
5602
5603 close (fd);
5604 if (bytes == len)
5605 return 0;
5606
5607 /* Some data was read, we'll try to get the rest with ptrace. */
5608 if (bytes > 0)
5609 {
5610 memaddr += bytes;
5611 myaddr += bytes;
5612 len -= bytes;
5613 }
5614 }
5615
5616 no_proc:
5617 /* Round starting address down to longword boundary. */
5618 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5619 /* Round ending address up; get number of longwords that makes. */
5620 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5621 / sizeof (PTRACE_XFER_TYPE));
5622 /* Allocate buffer of that many longwords. */
5623 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5624
5625 /* Read all the longwords */
5626 errno = 0;
5627 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5628 {
5629 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5630 about coercing an 8 byte integer to a 4 byte pointer. */
5631 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5632 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5633 (PTRACE_TYPE_ARG4) 0);
5634 if (errno)
5635 break;
5636 }
5637 ret = errno;
5638
5639 /* Copy appropriate bytes out of the buffer. */
5640 if (i > 0)
5641 {
5642 i *= sizeof (PTRACE_XFER_TYPE);
5643 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5644 memcpy (myaddr,
5645 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5646 i < len ? i : len);
5647 }
5648
5649 return ret;
5650 }
5651
5652 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5653 memory at MEMADDR. On failure (cannot write to the inferior)
5654 returns the value of errno. Always succeeds if LEN is zero. */
5655
5656 static int
5657 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5658 {
5659 register int i;
5660 /* Round starting address down to longword boundary. */
5661 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5662 /* Round ending address up; get number of longwords that makes. */
5663 register int count
5664 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5665 / sizeof (PTRACE_XFER_TYPE);
5666
5667 /* Allocate buffer of that many longwords. */
5668 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5669
5670 int pid = lwpid_of (current_thread);
5671
5672 if (len == 0)
5673 {
5674 /* Zero length write always succeeds. */
5675 return 0;
5676 }
5677
5678 if (debug_threads)
5679 {
5680 /* Dump up to four bytes. */
5681 char str[4 * 2 + 1];
5682 char *p = str;
5683 int dump = len < 4 ? len : 4;
5684
5685 for (i = 0; i < dump; i++)
5686 {
5687 sprintf (p, "%02x", myaddr[i]);
5688 p += 2;
5689 }
5690 *p = '\0';
5691
5692 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5693 str, (long) memaddr, pid);
5694 }
5695
5696 /* Fill start and end extra bytes of buffer with existing memory data. */
5697
5698 errno = 0;
5699 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5700 about coercing an 8 byte integer to a 4 byte pointer. */
5701 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5702 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5703 (PTRACE_TYPE_ARG4) 0);
5704 if (errno)
5705 return errno;
5706
5707 if (count > 1)
5708 {
5709 errno = 0;
5710 buffer[count - 1]
5711 = ptrace (PTRACE_PEEKTEXT, pid,
5712 /* Coerce to a uintptr_t first to avoid potential gcc warning
5713 about coercing an 8 byte integer to a 4 byte pointer. */
5714 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5715 * sizeof (PTRACE_XFER_TYPE)),
5716 (PTRACE_TYPE_ARG4) 0);
5717 if (errno)
5718 return errno;
5719 }
5720
5721 /* Copy data to be written over corresponding part of buffer. */
5722
5723 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5724 myaddr, len);
5725
5726 /* Write the entire buffer. */
5727
5728 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5729 {
5730 errno = 0;
5731 ptrace (PTRACE_POKETEXT, pid,
5732 /* Coerce to a uintptr_t first to avoid potential gcc warning
5733 about coercing an 8 byte integer to a 4 byte pointer. */
5734 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5735 (PTRACE_TYPE_ARG4) buffer[i]);
5736 if (errno)
5737 return errno;
5738 }
5739
5740 return 0;
5741 }
5742
5743 static void
5744 linux_look_up_symbols (void)
5745 {
5746 #ifdef USE_THREAD_DB
5747 struct process_info *proc = current_process ();
5748
5749 if (proc->priv->thread_db != NULL)
5750 return;
5751
5752 thread_db_init ();
5753 #endif
5754 }
5755
5756 static void
5757 linux_request_interrupt (void)
5758 {
5759 extern unsigned long signal_pid;
5760
5761 /* Send a SIGINT to the process group. This acts just like the user
5762 typed a ^C on the controlling terminal. */
5763 kill (-signal_pid, SIGINT);
5764 }
5765
5766 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5767 to debugger memory starting at MYADDR. */
5768
5769 static int
5770 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5771 {
5772 char filename[PATH_MAX];
5773 int fd, n;
5774 int pid = lwpid_of (current_thread);
5775
5776 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5777
5778 fd = open (filename, O_RDONLY);
5779 if (fd < 0)
5780 return -1;
5781
5782 if (offset != (CORE_ADDR) 0
5783 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5784 n = -1;
5785 else
5786 n = read (fd, myaddr, len);
5787
5788 close (fd);
5789
5790 return n;
5791 }
5792
5793 /* These breakpoint and watchpoint related wrapper functions simply
5794 pass on the function call if the target has registered a
5795 corresponding function. */
5796
5797 static int
5798 linux_supports_z_point_type (char z_type)
5799 {
5800 return (the_low_target.supports_z_point_type != NULL
5801 && the_low_target.supports_z_point_type (z_type));
5802 }
5803
5804 static int
5805 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5806 int size, struct raw_breakpoint *bp)
5807 {
5808 if (type == raw_bkpt_type_sw)
5809 return insert_memory_breakpoint (bp);
5810 else if (the_low_target.insert_point != NULL)
5811 return the_low_target.insert_point (type, addr, size, bp);
5812 else
5813 /* Unsupported (see target.h). */
5814 return 1;
5815 }
5816
5817 static int
5818 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5819 int size, struct raw_breakpoint *bp)
5820 {
5821 if (type == raw_bkpt_type_sw)
5822 return remove_memory_breakpoint (bp);
5823 else if (the_low_target.remove_point != NULL)
5824 return the_low_target.remove_point (type, addr, size, bp);
5825 else
5826 /* Unsupported (see target.h). */
5827 return 1;
5828 }
5829
5830 /* Implement the to_stopped_by_sw_breakpoint target_ops
5831 method. */
5832
5833 static int
5834 linux_stopped_by_sw_breakpoint (void)
5835 {
5836 struct lwp_info *lwp = get_thread_lwp (current_thread);
5837
5838 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5839 }
5840
5841 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5842 method. */
5843
5844 static int
5845 linux_supports_stopped_by_sw_breakpoint (void)
5846 {
5847 return USE_SIGTRAP_SIGINFO;
5848 }
5849
5850 /* Implement the to_stopped_by_hw_breakpoint target_ops
5851 method. */
5852
5853 static int
5854 linux_stopped_by_hw_breakpoint (void)
5855 {
5856 struct lwp_info *lwp = get_thread_lwp (current_thread);
5857
5858 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5859 }
5860
5861 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5862 method. */
5863
5864 static int
5865 linux_supports_stopped_by_hw_breakpoint (void)
5866 {
5867 return USE_SIGTRAP_SIGINFO;
5868 }
5869
5870 /* Implement the supports_hardware_single_step target_ops method. */
5871
5872 static int
5873 linux_supports_hardware_single_step (void)
5874 {
5875 return can_hardware_single_step ();
5876 }
5877
5878 static int
5879 linux_supports_software_single_step (void)
5880 {
5881 return can_software_single_step ();
5882 }
5883
5884 static int
5885 linux_stopped_by_watchpoint (void)
5886 {
5887 struct lwp_info *lwp = get_thread_lwp (current_thread);
5888
5889 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5890 }
5891
5892 static CORE_ADDR
5893 linux_stopped_data_address (void)
5894 {
5895 struct lwp_info *lwp = get_thread_lwp (current_thread);
5896
5897 return lwp->stopped_data_address;
5898 }
5899
5900 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5901 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5902 && defined(PT_TEXT_END_ADDR)
5903
5904 /* This is only used for targets that define PT_TEXT_ADDR,
5905 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5906 the target has different ways of acquiring this information, like
5907 loadmaps. */
5908
5909 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5910 to tell gdb about. */
5911
5912 static int
5913 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5914 {
5915 unsigned long text, text_end, data;
5916 int pid = lwpid_of (current_thread);
5917
5918 errno = 0;
5919
5920 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5921 (PTRACE_TYPE_ARG4) 0);
5922 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5923 (PTRACE_TYPE_ARG4) 0);
5924 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5925 (PTRACE_TYPE_ARG4) 0);
5926
5927 if (errno == 0)
5928 {
5929 /* Both text and data offsets produced at compile-time (and so
5930 used by gdb) are relative to the beginning of the program,
5931 with the data segment immediately following the text segment.
5932 However, the actual runtime layout in memory may put the data
5933 somewhere else, so when we send gdb a data base-address, we
5934 use the real data base address and subtract the compile-time
5935 data base-address from it (which is just the length of the
5936 text segment). BSS immediately follows data in both
5937 cases. */
5938 *text_p = text;
5939 *data_p = data - (text_end - text);
5940
5941 return 1;
5942 }
5943 return 0;
5944 }
5945 #endif
5946
5947 static int
5948 linux_qxfer_osdata (const char *annex,
5949 unsigned char *readbuf, unsigned const char *writebuf,
5950 CORE_ADDR offset, int len)
5951 {
5952 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5953 }
5954
5955 /* Convert a native/host siginfo object, into/from the siginfo in the
5956 layout of the inferiors' architecture. */
5957
5958 static void
5959 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5960 {
5961 int done = 0;
5962
5963 if (the_low_target.siginfo_fixup != NULL)
5964 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5965
5966 /* If there was no callback, or the callback didn't do anything,
5967 then just do a straight memcpy. */
5968 if (!done)
5969 {
5970 if (direction == 1)
5971 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5972 else
5973 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5974 }
5975 }
5976
5977 static int
5978 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5979 unsigned const char *writebuf, CORE_ADDR offset, int len)
5980 {
5981 int pid;
5982 siginfo_t siginfo;
5983 gdb_byte inf_siginfo[sizeof (siginfo_t)];
5984
5985 if (current_thread == NULL)
5986 return -1;
5987
5988 pid = lwpid_of (current_thread);
5989
5990 if (debug_threads)
5991 debug_printf ("%s siginfo for lwp %d.\n",
5992 readbuf != NULL ? "Reading" : "Writing",
5993 pid);
5994
5995 if (offset >= sizeof (siginfo))
5996 return -1;
5997
5998 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5999 return -1;
6000
6001 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6002 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6003 inferior with a 64-bit GDBSERVER should look the same as debugging it
6004 with a 32-bit GDBSERVER, we need to convert it. */
6005 siginfo_fixup (&siginfo, inf_siginfo, 0);
6006
6007 if (offset + len > sizeof (siginfo))
6008 len = sizeof (siginfo) - offset;
6009
6010 if (readbuf != NULL)
6011 memcpy (readbuf, inf_siginfo + offset, len);
6012 else
6013 {
6014 memcpy (inf_siginfo + offset, writebuf, len);
6015
6016 /* Convert back to ptrace layout before flushing it out. */
6017 siginfo_fixup (&siginfo, inf_siginfo, 1);
6018
6019 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6020 return -1;
6021 }
6022
6023 return len;
6024 }
6025
6026 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6027 so we notice when children change state; as the handler for the
6028 sigsuspend in my_waitpid. */
6029
6030 static void
6031 sigchld_handler (int signo)
6032 {
6033 int old_errno = errno;
6034
6035 if (debug_threads)
6036 {
6037 do
6038 {
6039 /* fprintf is not async-signal-safe, so call write
6040 directly. */
6041 if (write (2, "sigchld_handler\n",
6042 sizeof ("sigchld_handler\n") - 1) < 0)
6043 break; /* just ignore */
6044 } while (0);
6045 }
6046
6047 if (target_is_async_p ())
6048 async_file_mark (); /* trigger a linux_wait */
6049
6050 errno = old_errno;
6051 }
6052
6053 static int
6054 linux_supports_non_stop (void)
6055 {
6056 return 1;
6057 }
6058
6059 static int
6060 linux_async (int enable)
6061 {
6062 int previous = target_is_async_p ();
6063
6064 if (debug_threads)
6065 debug_printf ("linux_async (%d), previous=%d\n",
6066 enable, previous);
6067
6068 if (previous != enable)
6069 {
6070 sigset_t mask;
6071 sigemptyset (&mask);
6072 sigaddset (&mask, SIGCHLD);
6073
6074 sigprocmask (SIG_BLOCK, &mask, NULL);
6075
6076 if (enable)
6077 {
6078 if (pipe (linux_event_pipe) == -1)
6079 {
6080 linux_event_pipe[0] = -1;
6081 linux_event_pipe[1] = -1;
6082 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6083
6084 warning ("creating event pipe failed.");
6085 return previous;
6086 }
6087
6088 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6089 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6090
6091 /* Register the event loop handler. */
6092 add_file_handler (linux_event_pipe[0],
6093 handle_target_event, NULL);
6094
6095 /* Always trigger a linux_wait. */
6096 async_file_mark ();
6097 }
6098 else
6099 {
6100 delete_file_handler (linux_event_pipe[0]);
6101
6102 close (linux_event_pipe[0]);
6103 close (linux_event_pipe[1]);
6104 linux_event_pipe[0] = -1;
6105 linux_event_pipe[1] = -1;
6106 }
6107
6108 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6109 }
6110
6111 return previous;
6112 }
6113
6114 static int
6115 linux_start_non_stop (int nonstop)
6116 {
6117 /* Register or unregister from event-loop accordingly. */
6118 linux_async (nonstop);
6119
6120 if (target_is_async_p () != (nonstop != 0))
6121 return -1;
6122
6123 return 0;
6124 }
6125
6126 static int
6127 linux_supports_multi_process (void)
6128 {
6129 return 1;
6130 }
6131
6132 /* Check if fork events are supported. */
6133
6134 static int
6135 linux_supports_fork_events (void)
6136 {
6137 return linux_supports_tracefork ();
6138 }
6139
6140 /* Check if vfork events are supported. */
6141
6142 static int
6143 linux_supports_vfork_events (void)
6144 {
6145 return linux_supports_tracefork ();
6146 }
6147
6148 /* Check if exec events are supported. */
6149
6150 static int
6151 linux_supports_exec_events (void)
6152 {
6153 return linux_supports_traceexec ();
6154 }
6155
6156 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6157 options for the specified lwp. */
6158
6159 static int
6160 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6161 void *args)
6162 {
6163 struct thread_info *thread = (struct thread_info *) entry;
6164 struct lwp_info *lwp = get_thread_lwp (thread);
6165
6166 if (!lwp->stopped)
6167 {
6168 /* Stop the lwp so we can modify its ptrace options. */
6169 lwp->must_set_ptrace_flags = 1;
6170 linux_stop_lwp (lwp);
6171 }
6172 else
6173 {
6174 /* Already stopped; go ahead and set the ptrace options. */
6175 struct process_info *proc = find_process_pid (pid_of (thread));
6176 int options = linux_low_ptrace_options (proc->attached);
6177
6178 linux_enable_event_reporting (lwpid_of (thread), options);
6179 lwp->must_set_ptrace_flags = 0;
6180 }
6181
6182 return 0;
6183 }
6184
6185 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6186 ptrace flags for all inferiors. This is in case the new GDB connection
6187 doesn't support the same set of events that the previous one did. */
6188
6189 static void
6190 linux_handle_new_gdb_connection (void)
6191 {
6192 pid_t pid;
6193
6194 /* Request that all the lwps reset their ptrace options. */
6195 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6196 }
6197
6198 static int
6199 linux_supports_disable_randomization (void)
6200 {
6201 #ifdef HAVE_PERSONALITY
6202 return 1;
6203 #else
6204 return 0;
6205 #endif
6206 }
6207
6208 static int
6209 linux_supports_agent (void)
6210 {
6211 return 1;
6212 }
6213
6214 static int
6215 linux_supports_range_stepping (void)
6216 {
6217 if (*the_low_target.supports_range_stepping == NULL)
6218 return 0;
6219
6220 return (*the_low_target.supports_range_stepping) ();
6221 }
6222
6223 /* Enumerate spufs IDs for process PID. */
6224 static int
6225 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6226 {
6227 int pos = 0;
6228 int written = 0;
6229 char path[128];
6230 DIR *dir;
6231 struct dirent *entry;
6232
6233 sprintf (path, "/proc/%ld/fd", pid);
6234 dir = opendir (path);
6235 if (!dir)
6236 return -1;
6237
6238 rewinddir (dir);
6239 while ((entry = readdir (dir)) != NULL)
6240 {
6241 struct stat st;
6242 struct statfs stfs;
6243 int fd;
6244
6245 fd = atoi (entry->d_name);
6246 if (!fd)
6247 continue;
6248
6249 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6250 if (stat (path, &st) != 0)
6251 continue;
6252 if (!S_ISDIR (st.st_mode))
6253 continue;
6254
6255 if (statfs (path, &stfs) != 0)
6256 continue;
6257 if (stfs.f_type != SPUFS_MAGIC)
6258 continue;
6259
6260 if (pos >= offset && pos + 4 <= offset + len)
6261 {
6262 *(unsigned int *)(buf + pos - offset) = fd;
6263 written += 4;
6264 }
6265 pos += 4;
6266 }
6267
6268 closedir (dir);
6269 return written;
6270 }
6271
6272 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6273 object type, using the /proc file system. */
6274 static int
6275 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6276 unsigned const char *writebuf,
6277 CORE_ADDR offset, int len)
6278 {
6279 long pid = lwpid_of (current_thread);
6280 char buf[128];
6281 int fd = 0;
6282 int ret = 0;
6283
6284 if (!writebuf && !readbuf)
6285 return -1;
6286
6287 if (!*annex)
6288 {
6289 if (!readbuf)
6290 return -1;
6291 else
6292 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6293 }
6294
6295 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6296 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6297 if (fd <= 0)
6298 return -1;
6299
6300 if (offset != 0
6301 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6302 {
6303 close (fd);
6304 return 0;
6305 }
6306
6307 if (writebuf)
6308 ret = write (fd, writebuf, (size_t) len);
6309 else
6310 ret = read (fd, readbuf, (size_t) len);
6311
6312 close (fd);
6313 return ret;
6314 }
6315
6316 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6317 struct target_loadseg
6318 {
6319 /* Core address to which the segment is mapped. */
6320 Elf32_Addr addr;
6321 /* VMA recorded in the program header. */
6322 Elf32_Addr p_vaddr;
6323 /* Size of this segment in memory. */
6324 Elf32_Word p_memsz;
6325 };
6326
6327 # if defined PT_GETDSBT
6328 struct target_loadmap
6329 {
6330 /* Protocol version number, must be zero. */
6331 Elf32_Word version;
6332 /* Pointer to the DSBT table, its size, and the DSBT index. */
6333 unsigned *dsbt_table;
6334 unsigned dsbt_size, dsbt_index;
6335 /* Number of segments in this map. */
6336 Elf32_Word nsegs;
6337 /* The actual memory map. */
6338 struct target_loadseg segs[/*nsegs*/];
6339 };
6340 # define LINUX_LOADMAP PT_GETDSBT
6341 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6342 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6343 # else
6344 struct target_loadmap
6345 {
6346 /* Protocol version number, must be zero. */
6347 Elf32_Half version;
6348 /* Number of segments in this map. */
6349 Elf32_Half nsegs;
6350 /* The actual memory map. */
6351 struct target_loadseg segs[/*nsegs*/];
6352 };
6353 # define LINUX_LOADMAP PTRACE_GETFDPIC
6354 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6355 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6356 # endif
6357
6358 static int
6359 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6360 unsigned char *myaddr, unsigned int len)
6361 {
6362 int pid = lwpid_of (current_thread);
6363 int addr = -1;
6364 struct target_loadmap *data = NULL;
6365 unsigned int actual_length, copy_length;
6366
6367 if (strcmp (annex, "exec") == 0)
6368 addr = (int) LINUX_LOADMAP_EXEC;
6369 else if (strcmp (annex, "interp") == 0)
6370 addr = (int) LINUX_LOADMAP_INTERP;
6371 else
6372 return -1;
6373
6374 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6375 return -1;
6376
6377 if (data == NULL)
6378 return -1;
6379
6380 actual_length = sizeof (struct target_loadmap)
6381 + sizeof (struct target_loadseg) * data->nsegs;
6382
6383 if (offset < 0 || offset > actual_length)
6384 return -1;
6385
6386 copy_length = actual_length - offset < len ? actual_length - offset : len;
6387 memcpy (myaddr, (char *) data + offset, copy_length);
6388 return copy_length;
6389 }
6390 #else
6391 # define linux_read_loadmap NULL
6392 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6393
6394 static void
6395 linux_process_qsupported (char **features, int count)
6396 {
6397 if (the_low_target.process_qsupported != NULL)
6398 the_low_target.process_qsupported (features, count);
6399 }
6400
6401 static int
6402 linux_supports_catch_syscall (void)
6403 {
6404 return (the_low_target.get_syscall_trapinfo != NULL
6405 && linux_supports_tracesysgood ());
6406 }
6407
6408 static int
6409 linux_get_ipa_tdesc_idx (void)
6410 {
6411 if (the_low_target.get_ipa_tdesc_idx == NULL)
6412 return 0;
6413
6414 return (*the_low_target.get_ipa_tdesc_idx) ();
6415 }
6416
6417 static int
6418 linux_supports_tracepoints (void)
6419 {
6420 if (*the_low_target.supports_tracepoints == NULL)
6421 return 0;
6422
6423 return (*the_low_target.supports_tracepoints) ();
6424 }
6425
6426 static CORE_ADDR
6427 linux_read_pc (struct regcache *regcache)
6428 {
6429 if (the_low_target.get_pc == NULL)
6430 return 0;
6431
6432 return (*the_low_target.get_pc) (regcache);
6433 }
6434
6435 static void
6436 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6437 {
6438 gdb_assert (the_low_target.set_pc != NULL);
6439
6440 (*the_low_target.set_pc) (regcache, pc);
6441 }
6442
6443 static int
6444 linux_thread_stopped (struct thread_info *thread)
6445 {
6446 return get_thread_lwp (thread)->stopped;
6447 }
6448
6449 /* This exposes stop-all-threads functionality to other modules. */
6450
6451 static void
6452 linux_pause_all (int freeze)
6453 {
6454 stop_all_lwps (freeze, NULL);
6455 }
6456
6457 /* This exposes unstop-all-threads functionality to other gdbserver
6458 modules. */
6459
6460 static void
6461 linux_unpause_all (int unfreeze)
6462 {
6463 unstop_all_lwps (unfreeze, NULL);
6464 }
6465
6466 static int
6467 linux_prepare_to_access_memory (void)
6468 {
6469 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6470 running LWP. */
6471 if (non_stop)
6472 linux_pause_all (1);
6473 return 0;
6474 }
6475
6476 static void
6477 linux_done_accessing_memory (void)
6478 {
6479 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6480 running LWP. */
6481 if (non_stop)
6482 linux_unpause_all (1);
6483 }
6484
6485 static int
6486 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6487 CORE_ADDR collector,
6488 CORE_ADDR lockaddr,
6489 ULONGEST orig_size,
6490 CORE_ADDR *jump_entry,
6491 CORE_ADDR *trampoline,
6492 ULONGEST *trampoline_size,
6493 unsigned char *jjump_pad_insn,
6494 ULONGEST *jjump_pad_insn_size,
6495 CORE_ADDR *adjusted_insn_addr,
6496 CORE_ADDR *adjusted_insn_addr_end,
6497 char *err)
6498 {
6499 return (*the_low_target.install_fast_tracepoint_jump_pad)
6500 (tpoint, tpaddr, collector, lockaddr, orig_size,
6501 jump_entry, trampoline, trampoline_size,
6502 jjump_pad_insn, jjump_pad_insn_size,
6503 adjusted_insn_addr, adjusted_insn_addr_end,
6504 err);
6505 }
6506
6507 static struct emit_ops *
6508 linux_emit_ops (void)
6509 {
6510 if (the_low_target.emit_ops != NULL)
6511 return (*the_low_target.emit_ops) ();
6512 else
6513 return NULL;
6514 }
6515
6516 static int
6517 linux_get_min_fast_tracepoint_insn_len (void)
6518 {
6519 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6520 }
6521
6522 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6523
6524 static int
6525 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6526 CORE_ADDR *phdr_memaddr, int *num_phdr)
6527 {
6528 char filename[PATH_MAX];
6529 int fd;
6530 const int auxv_size = is_elf64
6531 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6532 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6533
6534 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6535
6536 fd = open (filename, O_RDONLY);
6537 if (fd < 0)
6538 return 1;
6539
6540 *phdr_memaddr = 0;
6541 *num_phdr = 0;
6542 while (read (fd, buf, auxv_size) == auxv_size
6543 && (*phdr_memaddr == 0 || *num_phdr == 0))
6544 {
6545 if (is_elf64)
6546 {
6547 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6548
6549 switch (aux->a_type)
6550 {
6551 case AT_PHDR:
6552 *phdr_memaddr = aux->a_un.a_val;
6553 break;
6554 case AT_PHNUM:
6555 *num_phdr = aux->a_un.a_val;
6556 break;
6557 }
6558 }
6559 else
6560 {
6561 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6562
6563 switch (aux->a_type)
6564 {
6565 case AT_PHDR:
6566 *phdr_memaddr = aux->a_un.a_val;
6567 break;
6568 case AT_PHNUM:
6569 *num_phdr = aux->a_un.a_val;
6570 break;
6571 }
6572 }
6573 }
6574
6575 close (fd);
6576
6577 if (*phdr_memaddr == 0 || *num_phdr == 0)
6578 {
6579 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6580 "phdr_memaddr = %ld, phdr_num = %d",
6581 (long) *phdr_memaddr, *num_phdr);
6582 return 2;
6583 }
6584
6585 return 0;
6586 }
6587
6588 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6589
6590 static CORE_ADDR
6591 get_dynamic (const int pid, const int is_elf64)
6592 {
6593 CORE_ADDR phdr_memaddr, relocation;
6594 int num_phdr, i;
6595 unsigned char *phdr_buf;
6596 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6597
6598 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6599 return 0;
6600
6601 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6602 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6603
6604 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6605 return 0;
6606
6607 /* Compute relocation: it is expected to be 0 for "regular" executables,
6608 non-zero for PIE ones. */
6609 relocation = -1;
6610 for (i = 0; relocation == -1 && i < num_phdr; i++)
6611 if (is_elf64)
6612 {
6613 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6614
6615 if (p->p_type == PT_PHDR)
6616 relocation = phdr_memaddr - p->p_vaddr;
6617 }
6618 else
6619 {
6620 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6621
6622 if (p->p_type == PT_PHDR)
6623 relocation = phdr_memaddr - p->p_vaddr;
6624 }
6625
6626 if (relocation == -1)
6627 {
6628 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6629 any real world executables, including PIE executables, have always
6630 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6631 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6632 or present DT_DEBUG anyway (fpc binaries are statically linked).
6633
6634 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6635
6636 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6637
6638 return 0;
6639 }
6640
6641 for (i = 0; i < num_phdr; i++)
6642 {
6643 if (is_elf64)
6644 {
6645 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6646
6647 if (p->p_type == PT_DYNAMIC)
6648 return p->p_vaddr + relocation;
6649 }
6650 else
6651 {
6652 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6653
6654 if (p->p_type == PT_DYNAMIC)
6655 return p->p_vaddr + relocation;
6656 }
6657 }
6658
6659 return 0;
6660 }
6661
6662 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6663 can be 0 if the inferior does not yet have the library list initialized.
6664 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6665 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6666
6667 static CORE_ADDR
6668 get_r_debug (const int pid, const int is_elf64)
6669 {
6670 CORE_ADDR dynamic_memaddr;
6671 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6672 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6673 CORE_ADDR map = -1;
6674
6675 dynamic_memaddr = get_dynamic (pid, is_elf64);
6676 if (dynamic_memaddr == 0)
6677 return map;
6678
6679 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6680 {
6681 if (is_elf64)
6682 {
6683 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6684 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6685 union
6686 {
6687 Elf64_Xword map;
6688 unsigned char buf[sizeof (Elf64_Xword)];
6689 }
6690 rld_map;
6691 #endif
6692 #ifdef DT_MIPS_RLD_MAP
6693 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6694 {
6695 if (linux_read_memory (dyn->d_un.d_val,
6696 rld_map.buf, sizeof (rld_map.buf)) == 0)
6697 return rld_map.map;
6698 else
6699 break;
6700 }
6701 #endif /* DT_MIPS_RLD_MAP */
6702 #ifdef DT_MIPS_RLD_MAP_REL
6703 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6704 {
6705 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6706 rld_map.buf, sizeof (rld_map.buf)) == 0)
6707 return rld_map.map;
6708 else
6709 break;
6710 }
6711 #endif /* DT_MIPS_RLD_MAP_REL */
6712
6713 if (dyn->d_tag == DT_DEBUG && map == -1)
6714 map = dyn->d_un.d_val;
6715
6716 if (dyn->d_tag == DT_NULL)
6717 break;
6718 }
6719 else
6720 {
6721 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6722 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6723 union
6724 {
6725 Elf32_Word map;
6726 unsigned char buf[sizeof (Elf32_Word)];
6727 }
6728 rld_map;
6729 #endif
6730 #ifdef DT_MIPS_RLD_MAP
6731 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6732 {
6733 if (linux_read_memory (dyn->d_un.d_val,
6734 rld_map.buf, sizeof (rld_map.buf)) == 0)
6735 return rld_map.map;
6736 else
6737 break;
6738 }
6739 #endif /* DT_MIPS_RLD_MAP */
6740 #ifdef DT_MIPS_RLD_MAP_REL
6741 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6742 {
6743 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6744 rld_map.buf, sizeof (rld_map.buf)) == 0)
6745 return rld_map.map;
6746 else
6747 break;
6748 }
6749 #endif /* DT_MIPS_RLD_MAP_REL */
6750
6751 if (dyn->d_tag == DT_DEBUG && map == -1)
6752 map = dyn->d_un.d_val;
6753
6754 if (dyn->d_tag == DT_NULL)
6755 break;
6756 }
6757
6758 dynamic_memaddr += dyn_size;
6759 }
6760
6761 return map;
6762 }
6763
6764 /* Read one pointer from MEMADDR in the inferior. */
6765
6766 static int
6767 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6768 {
6769 int ret;
6770
6771 /* Go through a union so this works on either big or little endian
6772 hosts, when the inferior's pointer size is smaller than the size
6773 of CORE_ADDR. It is assumed the inferior's endianness is the
6774 same of the superior's. */
6775 union
6776 {
6777 CORE_ADDR core_addr;
6778 unsigned int ui;
6779 unsigned char uc;
6780 } addr;
6781
6782 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6783 if (ret == 0)
6784 {
6785 if (ptr_size == sizeof (CORE_ADDR))
6786 *ptr = addr.core_addr;
6787 else if (ptr_size == sizeof (unsigned int))
6788 *ptr = addr.ui;
6789 else
6790 gdb_assert_not_reached ("unhandled pointer size");
6791 }
6792 return ret;
6793 }
6794
6795 struct link_map_offsets
6796 {
6797 /* Offset and size of r_debug.r_version. */
6798 int r_version_offset;
6799
6800 /* Offset and size of r_debug.r_map. */
6801 int r_map_offset;
6802
6803 /* Offset to l_addr field in struct link_map. */
6804 int l_addr_offset;
6805
6806 /* Offset to l_name field in struct link_map. */
6807 int l_name_offset;
6808
6809 /* Offset to l_ld field in struct link_map. */
6810 int l_ld_offset;
6811
6812 /* Offset to l_next field in struct link_map. */
6813 int l_next_offset;
6814
6815 /* Offset to l_prev field in struct link_map. */
6816 int l_prev_offset;
6817 };
6818
6819 /* Construct qXfer:libraries-svr4:read reply. */
6820
6821 static int
6822 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6823 unsigned const char *writebuf,
6824 CORE_ADDR offset, int len)
6825 {
6826 char *document;
6827 unsigned document_len;
6828 struct process_info_private *const priv = current_process ()->priv;
6829 char filename[PATH_MAX];
6830 int pid, is_elf64;
6831
6832 static const struct link_map_offsets lmo_32bit_offsets =
6833 {
6834 0, /* r_version offset. */
6835 4, /* r_debug.r_map offset. */
6836 0, /* l_addr offset in link_map. */
6837 4, /* l_name offset in link_map. */
6838 8, /* l_ld offset in link_map. */
6839 12, /* l_next offset in link_map. */
6840 16 /* l_prev offset in link_map. */
6841 };
6842
6843 static const struct link_map_offsets lmo_64bit_offsets =
6844 {
6845 0, /* r_version offset. */
6846 8, /* r_debug.r_map offset. */
6847 0, /* l_addr offset in link_map. */
6848 8, /* l_name offset in link_map. */
6849 16, /* l_ld offset in link_map. */
6850 24, /* l_next offset in link_map. */
6851 32 /* l_prev offset in link_map. */
6852 };
6853 const struct link_map_offsets *lmo;
6854 unsigned int machine;
6855 int ptr_size;
6856 CORE_ADDR lm_addr = 0, lm_prev = 0;
6857 int allocated = 1024;
6858 char *p;
6859 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6860 int header_done = 0;
6861
6862 if (writebuf != NULL)
6863 return -2;
6864 if (readbuf == NULL)
6865 return -1;
6866
6867 pid = lwpid_of (current_thread);
6868 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6869 is_elf64 = elf_64_file_p (filename, &machine);
6870 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6871 ptr_size = is_elf64 ? 8 : 4;
6872
6873 while (annex[0] != '\0')
6874 {
6875 const char *sep;
6876 CORE_ADDR *addrp;
6877 int len;
6878
6879 sep = strchr (annex, '=');
6880 if (sep == NULL)
6881 break;
6882
6883 len = sep - annex;
6884 if (len == 5 && startswith (annex, "start"))
6885 addrp = &lm_addr;
6886 else if (len == 4 && startswith (annex, "prev"))
6887 addrp = &lm_prev;
6888 else
6889 {
6890 annex = strchr (sep, ';');
6891 if (annex == NULL)
6892 break;
6893 annex++;
6894 continue;
6895 }
6896
6897 annex = decode_address_to_semicolon (addrp, sep + 1);
6898 }
6899
6900 if (lm_addr == 0)
6901 {
6902 int r_version = 0;
6903
6904 if (priv->r_debug == 0)
6905 priv->r_debug = get_r_debug (pid, is_elf64);
6906
6907 /* We failed to find DT_DEBUG. Such situation will not change
6908 for this inferior - do not retry it. Report it to GDB as
6909 E01, see for the reasons at the GDB solib-svr4.c side. */
6910 if (priv->r_debug == (CORE_ADDR) -1)
6911 return -1;
6912
6913 if (priv->r_debug != 0)
6914 {
6915 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6916 (unsigned char *) &r_version,
6917 sizeof (r_version)) != 0
6918 || r_version != 1)
6919 {
6920 warning ("unexpected r_debug version %d", r_version);
6921 }
6922 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6923 &lm_addr, ptr_size) != 0)
6924 {
6925 warning ("unable to read r_map from 0x%lx",
6926 (long) priv->r_debug + lmo->r_map_offset);
6927 }
6928 }
6929 }
6930
6931 document = (char *) xmalloc (allocated);
6932 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6933 p = document + strlen (document);
6934
6935 while (lm_addr
6936 && read_one_ptr (lm_addr + lmo->l_name_offset,
6937 &l_name, ptr_size) == 0
6938 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6939 &l_addr, ptr_size) == 0
6940 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6941 &l_ld, ptr_size) == 0
6942 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6943 &l_prev, ptr_size) == 0
6944 && read_one_ptr (lm_addr + lmo->l_next_offset,
6945 &l_next, ptr_size) == 0)
6946 {
6947 unsigned char libname[PATH_MAX];
6948
6949 if (lm_prev != l_prev)
6950 {
6951 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6952 (long) lm_prev, (long) l_prev);
6953 break;
6954 }
6955
6956 /* Ignore the first entry even if it has valid name as the first entry
6957 corresponds to the main executable. The first entry should not be
6958 skipped if the dynamic loader was loaded late by a static executable
6959 (see solib-svr4.c parameter ignore_first). But in such case the main
6960 executable does not have PT_DYNAMIC present and this function already
6961 exited above due to failed get_r_debug. */
6962 if (lm_prev == 0)
6963 {
6964 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6965 p = p + strlen (p);
6966 }
6967 else
6968 {
6969 /* Not checking for error because reading may stop before
6970 we've got PATH_MAX worth of characters. */
6971 libname[0] = '\0';
6972 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6973 libname[sizeof (libname) - 1] = '\0';
6974 if (libname[0] != '\0')
6975 {
6976 /* 6x the size for xml_escape_text below. */
6977 size_t len = 6 * strlen ((char *) libname);
6978 char *name;
6979
6980 if (!header_done)
6981 {
6982 /* Terminate `<library-list-svr4'. */
6983 *p++ = '>';
6984 header_done = 1;
6985 }
6986
6987 while (allocated < p - document + len + 200)
6988 {
6989 /* Expand to guarantee sufficient storage. */
6990 uintptr_t document_len = p - document;
6991
6992 document = (char *) xrealloc (document, 2 * allocated);
6993 allocated *= 2;
6994 p = document + document_len;
6995 }
6996
6997 name = xml_escape_text ((char *) libname);
6998 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6999 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7000 name, (unsigned long) lm_addr,
7001 (unsigned long) l_addr, (unsigned long) l_ld);
7002 free (name);
7003 }
7004 }
7005
7006 lm_prev = lm_addr;
7007 lm_addr = l_next;
7008 }
7009
7010 if (!header_done)
7011 {
7012 /* Empty list; terminate `<library-list-svr4'. */
7013 strcpy (p, "/>");
7014 }
7015 else
7016 strcpy (p, "</library-list-svr4>");
7017
7018 document_len = strlen (document);
7019 if (offset < document_len)
7020 document_len -= offset;
7021 else
7022 document_len = 0;
7023 if (len > document_len)
7024 len = document_len;
7025
7026 memcpy (readbuf, document + offset, len);
7027 xfree (document);
7028
7029 return len;
7030 }
7031
7032 #ifdef HAVE_LINUX_BTRACE
7033
7034 /* See to_disable_btrace target method. */
7035
7036 static int
7037 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7038 {
7039 enum btrace_error err;
7040
7041 err = linux_disable_btrace (tinfo);
7042 return (err == BTRACE_ERR_NONE ? 0 : -1);
7043 }
7044
7045 /* Encode an Intel Processor Trace configuration. */
7046
7047 static void
7048 linux_low_encode_pt_config (struct buffer *buffer,
7049 const struct btrace_data_pt_config *config)
7050 {
7051 buffer_grow_str (buffer, "<pt-config>\n");
7052
7053 switch (config->cpu.vendor)
7054 {
7055 case CV_INTEL:
7056 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7057 "model=\"%u\" stepping=\"%u\"/>\n",
7058 config->cpu.family, config->cpu.model,
7059 config->cpu.stepping);
7060 break;
7061
7062 default:
7063 break;
7064 }
7065
7066 buffer_grow_str (buffer, "</pt-config>\n");
7067 }
7068
7069 /* Encode a raw buffer. */
7070
7071 static void
7072 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7073 unsigned int size)
7074 {
7075 if (size == 0)
7076 return;
7077
7078 /* We use hex encoding - see common/rsp-low.h. */
7079 buffer_grow_str (buffer, "<raw>\n");
7080
7081 while (size-- > 0)
7082 {
7083 char elem[2];
7084
7085 elem[0] = tohex ((*data >> 4) & 0xf);
7086 elem[1] = tohex (*data++ & 0xf);
7087
7088 buffer_grow (buffer, elem, 2);
7089 }
7090
7091 buffer_grow_str (buffer, "</raw>\n");
7092 }
7093
7094 /* See to_read_btrace target method. */
7095
7096 static int
7097 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7098 enum btrace_read_type type)
7099 {
7100 struct btrace_data btrace;
7101 struct btrace_block *block;
7102 enum btrace_error err;
7103 int i;
7104
7105 btrace_data_init (&btrace);
7106
7107 err = linux_read_btrace (&btrace, tinfo, type);
7108 if (err != BTRACE_ERR_NONE)
7109 {
7110 if (err == BTRACE_ERR_OVERFLOW)
7111 buffer_grow_str0 (buffer, "E.Overflow.");
7112 else
7113 buffer_grow_str0 (buffer, "E.Generic Error.");
7114
7115 goto err;
7116 }
7117
7118 switch (btrace.format)
7119 {
7120 case BTRACE_FORMAT_NONE:
7121 buffer_grow_str0 (buffer, "E.No Trace.");
7122 goto err;
7123
7124 case BTRACE_FORMAT_BTS:
7125 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7126 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7127
7128 for (i = 0;
7129 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7130 i++)
7131 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7132 paddress (block->begin), paddress (block->end));
7133
7134 buffer_grow_str0 (buffer, "</btrace>\n");
7135 break;
7136
7137 case BTRACE_FORMAT_PT:
7138 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7139 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7140 buffer_grow_str (buffer, "<pt>\n");
7141
7142 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7143
7144 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7145 btrace.variant.pt.size);
7146
7147 buffer_grow_str (buffer, "</pt>\n");
7148 buffer_grow_str0 (buffer, "</btrace>\n");
7149 break;
7150
7151 default:
7152 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7153 goto err;
7154 }
7155
7156 btrace_data_fini (&btrace);
7157 return 0;
7158
7159 err:
7160 btrace_data_fini (&btrace);
7161 return -1;
7162 }
7163
7164 /* See to_btrace_conf target method. */
7165
7166 static int
7167 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7168 struct buffer *buffer)
7169 {
7170 const struct btrace_config *conf;
7171
7172 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7173 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7174
7175 conf = linux_btrace_conf (tinfo);
7176 if (conf != NULL)
7177 {
7178 switch (conf->format)
7179 {
7180 case BTRACE_FORMAT_NONE:
7181 break;
7182
7183 case BTRACE_FORMAT_BTS:
7184 buffer_xml_printf (buffer, "<bts");
7185 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7186 buffer_xml_printf (buffer, " />\n");
7187 break;
7188
7189 case BTRACE_FORMAT_PT:
7190 buffer_xml_printf (buffer, "<pt");
7191 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7192 buffer_xml_printf (buffer, "/>\n");
7193 break;
7194 }
7195 }
7196
7197 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7198 return 0;
7199 }
7200 #endif /* HAVE_LINUX_BTRACE */
7201
7202 /* See nat/linux-nat.h. */
7203
7204 ptid_t
7205 current_lwp_ptid (void)
7206 {
7207 return ptid_of (current_thread);
7208 }
7209
7210 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7211
7212 static int
7213 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7214 {
7215 if (the_low_target.breakpoint_kind_from_pc != NULL)
7216 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7217 else
7218 return default_breakpoint_kind_from_pc (pcptr);
7219 }
7220
7221 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7222
7223 static const gdb_byte *
7224 linux_sw_breakpoint_from_kind (int kind, int *size)
7225 {
7226 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7227
7228 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7229 }
7230
7231 /* Implementation of the target_ops method
7232 "breakpoint_kind_from_current_state". */
7233
7234 static int
7235 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7236 {
7237 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7238 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7239 else
7240 return linux_breakpoint_kind_from_pc (pcptr);
7241 }
7242
7243 /* Default implementation of linux_target_ops method "set_pc" for
7244 32-bit pc register which is literally named "pc". */
7245
7246 void
7247 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7248 {
7249 uint32_t newpc = pc;
7250
7251 supply_register_by_name (regcache, "pc", &newpc);
7252 }
7253
7254 /* Default implementation of linux_target_ops method "get_pc" for
7255 32-bit pc register which is literally named "pc". */
7256
7257 CORE_ADDR
7258 linux_get_pc_32bit (struct regcache *regcache)
7259 {
7260 uint32_t pc;
7261
7262 collect_register_by_name (regcache, "pc", &pc);
7263 if (debug_threads)
7264 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7265 return pc;
7266 }
7267
7268 /* Default implementation of linux_target_ops method "set_pc" for
7269 64-bit pc register which is literally named "pc". */
7270
7271 void
7272 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7273 {
7274 uint64_t newpc = pc;
7275
7276 supply_register_by_name (regcache, "pc", &newpc);
7277 }
7278
7279 /* Default implementation of linux_target_ops method "get_pc" for
7280 64-bit pc register which is literally named "pc". */
7281
7282 CORE_ADDR
7283 linux_get_pc_64bit (struct regcache *regcache)
7284 {
7285 uint64_t pc;
7286
7287 collect_register_by_name (regcache, "pc", &pc);
7288 if (debug_threads)
7289 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7290 return pc;
7291 }
7292
7293
7294 static struct target_ops linux_target_ops = {
7295 linux_create_inferior,
7296 linux_post_create_inferior,
7297 linux_attach,
7298 linux_kill,
7299 linux_detach,
7300 linux_mourn,
7301 linux_join,
7302 linux_thread_alive,
7303 linux_resume,
7304 linux_wait,
7305 linux_fetch_registers,
7306 linux_store_registers,
7307 linux_prepare_to_access_memory,
7308 linux_done_accessing_memory,
7309 linux_read_memory,
7310 linux_write_memory,
7311 linux_look_up_symbols,
7312 linux_request_interrupt,
7313 linux_read_auxv,
7314 linux_supports_z_point_type,
7315 linux_insert_point,
7316 linux_remove_point,
7317 linux_stopped_by_sw_breakpoint,
7318 linux_supports_stopped_by_sw_breakpoint,
7319 linux_stopped_by_hw_breakpoint,
7320 linux_supports_stopped_by_hw_breakpoint,
7321 linux_supports_hardware_single_step,
7322 linux_stopped_by_watchpoint,
7323 linux_stopped_data_address,
7324 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7325 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7326 && defined(PT_TEXT_END_ADDR)
7327 linux_read_offsets,
7328 #else
7329 NULL,
7330 #endif
7331 #ifdef USE_THREAD_DB
7332 thread_db_get_tls_address,
7333 #else
7334 NULL,
7335 #endif
7336 linux_qxfer_spu,
7337 hostio_last_error_from_errno,
7338 linux_qxfer_osdata,
7339 linux_xfer_siginfo,
7340 linux_supports_non_stop,
7341 linux_async,
7342 linux_start_non_stop,
7343 linux_supports_multi_process,
7344 linux_supports_fork_events,
7345 linux_supports_vfork_events,
7346 linux_supports_exec_events,
7347 linux_handle_new_gdb_connection,
7348 #ifdef USE_THREAD_DB
7349 thread_db_handle_monitor_command,
7350 #else
7351 NULL,
7352 #endif
7353 linux_common_core_of_thread,
7354 linux_read_loadmap,
7355 linux_process_qsupported,
7356 linux_supports_tracepoints,
7357 linux_read_pc,
7358 linux_write_pc,
7359 linux_thread_stopped,
7360 NULL,
7361 linux_pause_all,
7362 linux_unpause_all,
7363 linux_stabilize_threads,
7364 linux_install_fast_tracepoint_jump_pad,
7365 linux_emit_ops,
7366 linux_supports_disable_randomization,
7367 linux_get_min_fast_tracepoint_insn_len,
7368 linux_qxfer_libraries_svr4,
7369 linux_supports_agent,
7370 #ifdef HAVE_LINUX_BTRACE
7371 linux_supports_btrace,
7372 linux_enable_btrace,
7373 linux_low_disable_btrace,
7374 linux_low_read_btrace,
7375 linux_low_btrace_conf,
7376 #else
7377 NULL,
7378 NULL,
7379 NULL,
7380 NULL,
7381 NULL,
7382 #endif
7383 linux_supports_range_stepping,
7384 linux_proc_pid_to_exec_file,
7385 linux_mntns_open_cloexec,
7386 linux_mntns_unlink,
7387 linux_mntns_readlink,
7388 linux_breakpoint_kind_from_pc,
7389 linux_sw_breakpoint_from_kind,
7390 linux_proc_tid_get_name,
7391 linux_breakpoint_kind_from_current_state,
7392 linux_supports_software_single_step,
7393 linux_supports_catch_syscall,
7394 linux_get_ipa_tdesc_idx,
7395 };
7396
7397 #ifdef HAVE_LINUX_REGSETS
7398 void
7399 initialize_regsets_info (struct regsets_info *info)
7400 {
7401 for (info->num_regsets = 0;
7402 info->regsets[info->num_regsets].size >= 0;
7403 info->num_regsets++)
7404 ;
7405 }
7406 #endif
7407
7408 void
7409 initialize_low (void)
7410 {
7411 struct sigaction sigchld_action;
7412
7413 memset (&sigchld_action, 0, sizeof (sigchld_action));
7414 set_target_ops (&linux_target_ops);
7415
7416 linux_ptrace_init_warnings ();
7417
7418 sigchld_action.sa_handler = sigchld_handler;
7419 sigemptyset (&sigchld_action.sa_mask);
7420 sigchld_action.sa_flags = SA_RESTART;
7421 sigaction (SIGCHLD, &sigchld_action, NULL);
7422
7423 initialize_low_arch ();
7424
7425 linux_check_ptrace_features ();
7426 }