]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-low.c
8bc73a473a565b230df1006eb34d8756a0dcf668
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <sys/ptrace.h>
28 #include "nat/linux-ptrace.h"
29 #include "nat/linux-procfs.h"
30 #include "nat/linux-personality.h"
31 #include <signal.h>
32 #include <sys/ioctl.h>
33 #include <fcntl.h>
34 #include <unistd.h>
35 #include <sys/syscall.h>
36 #include <sched.h>
37 #include <ctype.h>
38 #include <pwd.h>
39 #include <sys/types.h>
40 #include <dirent.h>
41 #include <sys/stat.h>
42 #include <sys/vfs.h>
43 #include <sys/uio.h>
44 #include "filestuff.h"
45 #include "tracepoint.h"
46 #include "hostio.h"
47 #ifndef ELFMAG0
48 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
49 then ELFMAG0 will have been defined. If it didn't get included by
50 gdb_proc_service.h then including it will likely introduce a duplicate
51 definition of elf_fpregset_t. */
52 #include <elf.h>
53 #endif
54
55 #ifndef SPUFS_MAGIC
56 #define SPUFS_MAGIC 0x23c9b64e
57 #endif
58
59 #ifdef HAVE_PERSONALITY
60 # include <sys/personality.h>
61 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
62 # define ADDR_NO_RANDOMIZE 0x0040000
63 # endif
64 #endif
65
66 #ifndef O_LARGEFILE
67 #define O_LARGEFILE 0
68 #endif
69
70 #ifndef W_STOPCODE
71 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
72 #endif
73
74 /* This is the kernel's hard limit. Not to be confused with
75 SIGRTMIN. */
76 #ifndef __SIGRTMIN
77 #define __SIGRTMIN 32
78 #endif
79
80 /* Some targets did not define these ptrace constants from the start,
81 so gdbserver defines them locally here. In the future, these may
82 be removed after they are added to asm/ptrace.h. */
83 #if !(defined(PT_TEXT_ADDR) \
84 || defined(PT_DATA_ADDR) \
85 || defined(PT_TEXT_END_ADDR))
86 #if defined(__mcoldfire__)
87 /* These are still undefined in 3.10 kernels. */
88 #define PT_TEXT_ADDR 49*4
89 #define PT_DATA_ADDR 50*4
90 #define PT_TEXT_END_ADDR 51*4
91 /* BFIN already defines these since at least 2.6.32 kernels. */
92 #elif defined(BFIN)
93 #define PT_TEXT_ADDR 220
94 #define PT_TEXT_END_ADDR 224
95 #define PT_DATA_ADDR 228
96 /* These are still undefined in 3.10 kernels. */
97 #elif defined(__TMS320C6X__)
98 #define PT_TEXT_ADDR (0x10000*4)
99 #define PT_DATA_ADDR (0x10004*4)
100 #define PT_TEXT_END_ADDR (0x10008*4)
101 #endif
102 #endif
103
104 #ifdef HAVE_LINUX_BTRACE
105 # include "nat/linux-btrace.h"
106 # include "btrace-common.h"
107 #endif
108
109 #ifndef HAVE_ELF32_AUXV_T
110 /* Copied from glibc's elf.h. */
111 typedef struct
112 {
113 uint32_t a_type; /* Entry type */
114 union
115 {
116 uint32_t a_val; /* Integer value */
117 /* We use to have pointer elements added here. We cannot do that,
118 though, since it does not work when using 32-bit definitions
119 on 64-bit platforms and vice versa. */
120 } a_un;
121 } Elf32_auxv_t;
122 #endif
123
124 #ifndef HAVE_ELF64_AUXV_T
125 /* Copied from glibc's elf.h. */
126 typedef struct
127 {
128 uint64_t a_type; /* Entry type */
129 union
130 {
131 uint64_t a_val; /* Integer value */
132 /* We use to have pointer elements added here. We cannot do that,
133 though, since it does not work when using 32-bit definitions
134 on 64-bit platforms and vice versa. */
135 } a_un;
136 } Elf64_auxv_t;
137 #endif
138
139 /* A list of all unknown processes which receive stop signals. Some
140 other process will presumably claim each of these as forked
141 children momentarily. */
142
143 struct simple_pid_list
144 {
145 /* The process ID. */
146 int pid;
147
148 /* The status as reported by waitpid. */
149 int status;
150
151 /* Next in chain. */
152 struct simple_pid_list *next;
153 };
154 struct simple_pid_list *stopped_pids;
155
156 /* Trivial list manipulation functions to keep track of a list of new
157 stopped processes. */
158
159 static void
160 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
161 {
162 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
163
164 new_pid->pid = pid;
165 new_pid->status = status;
166 new_pid->next = *listp;
167 *listp = new_pid;
168 }
169
170 static int
171 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
172 {
173 struct simple_pid_list **p;
174
175 for (p = listp; *p != NULL; p = &(*p)->next)
176 if ((*p)->pid == pid)
177 {
178 struct simple_pid_list *next = (*p)->next;
179
180 *statusp = (*p)->status;
181 xfree (*p);
182 *p = next;
183 return 1;
184 }
185 return 0;
186 }
187
188 enum stopping_threads_kind
189 {
190 /* Not stopping threads presently. */
191 NOT_STOPPING_THREADS,
192
193 /* Stopping threads. */
194 STOPPING_THREADS,
195
196 /* Stopping and suspending threads. */
197 STOPPING_AND_SUSPENDING_THREADS
198 };
199
200 /* This is set while stop_all_lwps is in effect. */
201 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
202
203 /* FIXME make into a target method? */
204 int using_threads = 1;
205
206 /* True if we're presently stabilizing threads (moving them out of
207 jump pads). */
208 static int stabilizing_threads;
209
210 static void linux_resume_one_lwp (struct lwp_info *lwp,
211 int step, int signal, siginfo_t *info);
212 static void linux_resume (struct thread_resume *resume_info, size_t n);
213 static void stop_all_lwps (int suspend, struct lwp_info *except);
214 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
215 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
216 int *wstat, int options);
217 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
218 static struct lwp_info *add_lwp (ptid_t ptid);
219 static int linux_stopped_by_watchpoint (void);
220 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
221 static void proceed_all_lwps (void);
222 static int finish_step_over (struct lwp_info *lwp);
223 static int kill_lwp (unsigned long lwpid, int signo);
224
225 /* When the event-loop is doing a step-over, this points at the thread
226 being stepped. */
227 ptid_t step_over_bkpt;
228
229 /* True if the low target can hardware single-step. Such targets
230 don't need a BREAKPOINT_REINSERT_ADDR callback. */
231
232 static int
233 can_hardware_single_step (void)
234 {
235 return (the_low_target.breakpoint_reinsert_addr == NULL);
236 }
237
238 /* True if the low target supports memory breakpoints. If so, we'll
239 have a GET_PC implementation. */
240
241 static int
242 supports_breakpoints (void)
243 {
244 return (the_low_target.get_pc != NULL);
245 }
246
247 /* Returns true if this target can support fast tracepoints. This
248 does not mean that the in-process agent has been loaded in the
249 inferior. */
250
251 static int
252 supports_fast_tracepoints (void)
253 {
254 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
255 }
256
257 /* True if LWP is stopped in its stepping range. */
258
259 static int
260 lwp_in_step_range (struct lwp_info *lwp)
261 {
262 CORE_ADDR pc = lwp->stop_pc;
263
264 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
265 }
266
267 struct pending_signals
268 {
269 int signal;
270 siginfo_t info;
271 struct pending_signals *prev;
272 };
273
274 /* The read/write ends of the pipe registered as waitable file in the
275 event loop. */
276 static int linux_event_pipe[2] = { -1, -1 };
277
278 /* True if we're currently in async mode. */
279 #define target_is_async_p() (linux_event_pipe[0] != -1)
280
281 static void send_sigstop (struct lwp_info *lwp);
282 static void wait_for_sigstop (void);
283
284 /* Return non-zero if HEADER is a 64-bit ELF file. */
285
286 static int
287 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
288 {
289 if (header->e_ident[EI_MAG0] == ELFMAG0
290 && header->e_ident[EI_MAG1] == ELFMAG1
291 && header->e_ident[EI_MAG2] == ELFMAG2
292 && header->e_ident[EI_MAG3] == ELFMAG3)
293 {
294 *machine = header->e_machine;
295 return header->e_ident[EI_CLASS] == ELFCLASS64;
296
297 }
298 *machine = EM_NONE;
299 return -1;
300 }
301
302 /* Return non-zero if FILE is a 64-bit ELF file,
303 zero if the file is not a 64-bit ELF file,
304 and -1 if the file is not accessible or doesn't exist. */
305
306 static int
307 elf_64_file_p (const char *file, unsigned int *machine)
308 {
309 Elf64_Ehdr header;
310 int fd;
311
312 fd = open (file, O_RDONLY);
313 if (fd < 0)
314 return -1;
315
316 if (read (fd, &header, sizeof (header)) != sizeof (header))
317 {
318 close (fd);
319 return 0;
320 }
321 close (fd);
322
323 return elf_64_header_p (&header, machine);
324 }
325
326 /* Accepts an integer PID; Returns true if the executable PID is
327 running is a 64-bit ELF file.. */
328
329 int
330 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
331 {
332 char file[PATH_MAX];
333
334 sprintf (file, "/proc/%d/exe", pid);
335 return elf_64_file_p (file, machine);
336 }
337
338 static void
339 delete_lwp (struct lwp_info *lwp)
340 {
341 struct thread_info *thr = get_lwp_thread (lwp);
342
343 if (debug_threads)
344 debug_printf ("deleting %ld\n", lwpid_of (thr));
345
346 remove_thread (thr);
347 free (lwp->arch_private);
348 free (lwp);
349 }
350
351 /* Add a process to the common process list, and set its private
352 data. */
353
354 static struct process_info *
355 linux_add_process (int pid, int attached)
356 {
357 struct process_info *proc;
358
359 proc = add_process (pid, attached);
360 proc->private = xcalloc (1, sizeof (*proc->private));
361
362 /* Set the arch when the first LWP stops. */
363 proc->private->new_inferior = 1;
364
365 if (the_low_target.new_process != NULL)
366 proc->private->arch_private = the_low_target.new_process ();
367
368 return proc;
369 }
370
371 static CORE_ADDR get_pc (struct lwp_info *lwp);
372
373 /* Handle a GNU/Linux extended wait response. If we see a clone
374 event, we need to add the new LWP to our list (and not report the
375 trap to higher layers). */
376
377 static void
378 handle_extended_wait (struct lwp_info *event_child, int wstat)
379 {
380 int event = linux_ptrace_get_extended_event (wstat);
381 struct thread_info *event_thr = get_lwp_thread (event_child);
382 struct lwp_info *new_lwp;
383
384 if (event == PTRACE_EVENT_CLONE)
385 {
386 ptid_t ptid;
387 unsigned long new_pid;
388 int ret, status;
389
390 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
391 &new_pid);
392
393 /* If we haven't already seen the new PID stop, wait for it now. */
394 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
395 {
396 /* The new child has a pending SIGSTOP. We can't affect it until it
397 hits the SIGSTOP, but we're already attached. */
398
399 ret = my_waitpid (new_pid, &status, __WALL);
400
401 if (ret == -1)
402 perror_with_name ("waiting for new child");
403 else if (ret != new_pid)
404 warning ("wait returned unexpected PID %d", ret);
405 else if (!WIFSTOPPED (status))
406 warning ("wait returned unexpected status 0x%x", status);
407 }
408
409 if (debug_threads)
410 debug_printf ("HEW: Got clone event "
411 "from LWP %ld, new child is LWP %ld\n",
412 lwpid_of (event_thr), new_pid);
413
414 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
415 new_lwp = add_lwp (ptid);
416
417 /* Either we're going to immediately resume the new thread
418 or leave it stopped. linux_resume_one_lwp is a nop if it
419 thinks the thread is currently running, so set this first
420 before calling linux_resume_one_lwp. */
421 new_lwp->stopped = 1;
422
423 /* If we're suspending all threads, leave this one suspended
424 too. */
425 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
426 new_lwp->suspended = 1;
427
428 /* Normally we will get the pending SIGSTOP. But in some cases
429 we might get another signal delivered to the group first.
430 If we do get another signal, be sure not to lose it. */
431 if (WSTOPSIG (status) != SIGSTOP)
432 {
433 new_lwp->stop_expected = 1;
434 new_lwp->status_pending_p = 1;
435 new_lwp->status_pending = status;
436 }
437 }
438 }
439
440 /* Return the PC as read from the regcache of LWP, without any
441 adjustment. */
442
443 static CORE_ADDR
444 get_pc (struct lwp_info *lwp)
445 {
446 struct thread_info *saved_thread;
447 struct regcache *regcache;
448 CORE_ADDR pc;
449
450 if (the_low_target.get_pc == NULL)
451 return 0;
452
453 saved_thread = current_thread;
454 current_thread = get_lwp_thread (lwp);
455
456 regcache = get_thread_regcache (current_thread, 1);
457 pc = (*the_low_target.get_pc) (regcache);
458
459 if (debug_threads)
460 debug_printf ("pc is 0x%lx\n", (long) pc);
461
462 current_thread = saved_thread;
463 return pc;
464 }
465
466 /* This function should only be called if LWP got a SIGTRAP.
467 The SIGTRAP could mean several things.
468
469 On i386, where decr_pc_after_break is non-zero:
470
471 If we were single-stepping this process using PTRACE_SINGLESTEP, we
472 will get only the one SIGTRAP. The value of $eip will be the next
473 instruction. If the instruction we stepped over was a breakpoint,
474 we need to decrement the PC.
475
476 If we continue the process using PTRACE_CONT, we will get a
477 SIGTRAP when we hit a breakpoint. The value of $eip will be
478 the instruction after the breakpoint (i.e. needs to be
479 decremented). If we report the SIGTRAP to GDB, we must also
480 report the undecremented PC. If the breakpoint is removed, we
481 must resume at the decremented PC.
482
483 On a non-decr_pc_after_break machine with hardware or kernel
484 single-step:
485
486 If we either single-step a breakpoint instruction, or continue and
487 hit a breakpoint instruction, our PC will point at the breakpoint
488 instruction. */
489
490 static int
491 check_stopped_by_breakpoint (struct lwp_info *lwp)
492 {
493 CORE_ADDR pc;
494 CORE_ADDR sw_breakpoint_pc;
495 struct thread_info *saved_thread;
496
497 if (the_low_target.get_pc == NULL)
498 return 0;
499
500 pc = get_pc (lwp);
501 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
502
503 /* breakpoint_at reads from the current thread. */
504 saved_thread = current_thread;
505 current_thread = get_lwp_thread (lwp);
506
507 /* We may have just stepped a breakpoint instruction. E.g., in
508 non-stop mode, GDB first tells the thread A to step a range, and
509 then the user inserts a breakpoint inside the range. In that
510 case, we need to report the breakpoint PC. But, when we're
511 trying to step past one of our own breakpoints, that happens to
512 have been placed on top of a permanent breakpoint instruction, we
513 shouldn't adjust the PC, otherwise the program would keep
514 trapping the permanent breakpoint forever. */
515 if ((!lwp->stepping
516 || (!ptid_equal (ptid_of (current_thread), step_over_bkpt)
517 && lwp->stop_pc == sw_breakpoint_pc))
518 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
519 {
520 if (debug_threads)
521 {
522 struct thread_info *thr = get_lwp_thread (lwp);
523
524 debug_printf ("CSBB: %s stopped by software breakpoint\n",
525 target_pid_to_str (ptid_of (thr)));
526 }
527
528 /* Back up the PC if necessary. */
529 if (pc != sw_breakpoint_pc)
530 {
531 struct regcache *regcache
532 = get_thread_regcache (current_thread, 1);
533 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
534 }
535
536 lwp->stop_pc = sw_breakpoint_pc;
537 lwp->stop_reason = LWP_STOPPED_BY_SW_BREAKPOINT;
538 current_thread = saved_thread;
539 return 1;
540 }
541
542 if (hardware_breakpoint_inserted_here (pc))
543 {
544 if (debug_threads)
545 {
546 struct thread_info *thr = get_lwp_thread (lwp);
547
548 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
549 target_pid_to_str (ptid_of (thr)));
550 }
551
552 lwp->stop_pc = pc;
553 lwp->stop_reason = LWP_STOPPED_BY_HW_BREAKPOINT;
554 current_thread = saved_thread;
555 return 1;
556 }
557
558 current_thread = saved_thread;
559 return 0;
560 }
561
562 static struct lwp_info *
563 add_lwp (ptid_t ptid)
564 {
565 struct lwp_info *lwp;
566
567 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
568 memset (lwp, 0, sizeof (*lwp));
569
570 if (the_low_target.new_thread != NULL)
571 lwp->arch_private = the_low_target.new_thread ();
572
573 lwp->thread = add_thread (ptid, lwp);
574
575 return lwp;
576 }
577
578 /* Start an inferior process and returns its pid.
579 ALLARGS is a vector of program-name and args. */
580
581 static int
582 linux_create_inferior (char *program, char **allargs)
583 {
584 struct lwp_info *new_lwp;
585 int pid;
586 ptid_t ptid;
587 struct cleanup *restore_personality
588 = maybe_disable_address_space_randomization (disable_randomization);
589
590 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
591 pid = vfork ();
592 #else
593 pid = fork ();
594 #endif
595 if (pid < 0)
596 perror_with_name ("fork");
597
598 if (pid == 0)
599 {
600 close_most_fds ();
601 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
602
603 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
604 signal (__SIGRTMIN + 1, SIG_DFL);
605 #endif
606
607 setpgid (0, 0);
608
609 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
610 stdout to stderr so that inferior i/o doesn't corrupt the connection.
611 Also, redirect stdin to /dev/null. */
612 if (remote_connection_is_stdio ())
613 {
614 close (0);
615 open ("/dev/null", O_RDONLY);
616 dup2 (2, 1);
617 if (write (2, "stdin/stdout redirected\n",
618 sizeof ("stdin/stdout redirected\n") - 1) < 0)
619 {
620 /* Errors ignored. */;
621 }
622 }
623
624 execv (program, allargs);
625 if (errno == ENOENT)
626 execvp (program, allargs);
627
628 fprintf (stderr, "Cannot exec %s: %s.\n", program,
629 strerror (errno));
630 fflush (stderr);
631 _exit (0177);
632 }
633
634 do_cleanups (restore_personality);
635
636 linux_add_process (pid, 0);
637
638 ptid = ptid_build (pid, pid, 0);
639 new_lwp = add_lwp (ptid);
640 new_lwp->must_set_ptrace_flags = 1;
641
642 return pid;
643 }
644
645 /* Attach to an inferior process. Returns 0 on success, ERRNO on
646 error. */
647
648 int
649 linux_attach_lwp (ptid_t ptid)
650 {
651 struct lwp_info *new_lwp;
652 int lwpid = ptid_get_lwp (ptid);
653
654 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
655 != 0)
656 return errno;
657
658 new_lwp = add_lwp (ptid);
659
660 /* We need to wait for SIGSTOP before being able to make the next
661 ptrace call on this LWP. */
662 new_lwp->must_set_ptrace_flags = 1;
663
664 if (linux_proc_pid_is_stopped (lwpid))
665 {
666 if (debug_threads)
667 debug_printf ("Attached to a stopped process\n");
668
669 /* The process is definitely stopped. It is in a job control
670 stop, unless the kernel predates the TASK_STOPPED /
671 TASK_TRACED distinction, in which case it might be in a
672 ptrace stop. Make sure it is in a ptrace stop; from there we
673 can kill it, signal it, et cetera.
674
675 First make sure there is a pending SIGSTOP. Since we are
676 already attached, the process can not transition from stopped
677 to running without a PTRACE_CONT; so we know this signal will
678 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
679 probably already in the queue (unless this kernel is old
680 enough to use TASK_STOPPED for ptrace stops); but since
681 SIGSTOP is not an RT signal, it can only be queued once. */
682 kill_lwp (lwpid, SIGSTOP);
683
684 /* Finally, resume the stopped process. This will deliver the
685 SIGSTOP (or a higher priority signal, just like normal
686 PTRACE_ATTACH), which we'll catch later on. */
687 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
688 }
689
690 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
691 brings it to a halt.
692
693 There are several cases to consider here:
694
695 1) gdbserver has already attached to the process and is being notified
696 of a new thread that is being created.
697 In this case we should ignore that SIGSTOP and resume the
698 process. This is handled below by setting stop_expected = 1,
699 and the fact that add_thread sets last_resume_kind ==
700 resume_continue.
701
702 2) This is the first thread (the process thread), and we're attaching
703 to it via attach_inferior.
704 In this case we want the process thread to stop.
705 This is handled by having linux_attach set last_resume_kind ==
706 resume_stop after we return.
707
708 If the pid we are attaching to is also the tgid, we attach to and
709 stop all the existing threads. Otherwise, we attach to pid and
710 ignore any other threads in the same group as this pid.
711
712 3) GDB is connecting to gdbserver and is requesting an enumeration of all
713 existing threads.
714 In this case we want the thread to stop.
715 FIXME: This case is currently not properly handled.
716 We should wait for the SIGSTOP but don't. Things work apparently
717 because enough time passes between when we ptrace (ATTACH) and when
718 gdb makes the next ptrace call on the thread.
719
720 On the other hand, if we are currently trying to stop all threads, we
721 should treat the new thread as if we had sent it a SIGSTOP. This works
722 because we are guaranteed that the add_lwp call above added us to the
723 end of the list, and so the new thread has not yet reached
724 wait_for_sigstop (but will). */
725 new_lwp->stop_expected = 1;
726
727 return 0;
728 }
729
730 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
731 already attached. Returns true if a new LWP is found, false
732 otherwise. */
733
734 static int
735 attach_proc_task_lwp_callback (ptid_t ptid)
736 {
737 /* Is this a new thread? */
738 if (find_thread_ptid (ptid) == NULL)
739 {
740 int lwpid = ptid_get_lwp (ptid);
741 int err;
742
743 if (debug_threads)
744 debug_printf ("Found new lwp %d\n", lwpid);
745
746 err = linux_attach_lwp (ptid);
747
748 /* Be quiet if we simply raced with the thread exiting. EPERM
749 is returned if the thread's task still exists, and is marked
750 as exited or zombie, as well as other conditions, so in that
751 case, confirm the status in /proc/PID/status. */
752 if (err == ESRCH
753 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
754 {
755 if (debug_threads)
756 {
757 debug_printf ("Cannot attach to lwp %d: "
758 "thread is gone (%d: %s)\n",
759 lwpid, err, strerror (err));
760 }
761 }
762 else if (err != 0)
763 {
764 warning (_("Cannot attach to lwp %d: %s"),
765 lwpid,
766 linux_ptrace_attach_fail_reason_string (ptid, err));
767 }
768
769 return 1;
770 }
771 return 0;
772 }
773
774 /* Attach to PID. If PID is the tgid, attach to it and all
775 of its threads. */
776
777 static int
778 linux_attach (unsigned long pid)
779 {
780 ptid_t ptid = ptid_build (pid, pid, 0);
781 int err;
782
783 /* Attach to PID. We will check for other threads
784 soon. */
785 err = linux_attach_lwp (ptid);
786 if (err != 0)
787 error ("Cannot attach to process %ld: %s",
788 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
789
790 linux_add_process (pid, 1);
791
792 if (!non_stop)
793 {
794 struct thread_info *thread;
795
796 /* Don't ignore the initial SIGSTOP if we just attached to this
797 process. It will be collected by wait shortly. */
798 thread = find_thread_ptid (ptid_build (pid, pid, 0));
799 thread->last_resume_kind = resume_stop;
800 }
801
802 /* We must attach to every LWP. If /proc is mounted, use that to
803 find them now. On the one hand, the inferior may be using raw
804 clone instead of using pthreads. On the other hand, even if it
805 is using pthreads, GDB may not be connected yet (thread_db needs
806 to do symbol lookups, through qSymbol). Also, thread_db walks
807 structures in the inferior's address space to find the list of
808 threads/LWPs, and those structures may well be corrupted. Note
809 that once thread_db is loaded, we'll still use it to list threads
810 and associate pthread info with each LWP. */
811 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
812 return 0;
813 }
814
815 struct counter
816 {
817 int pid;
818 int count;
819 };
820
821 static int
822 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
823 {
824 struct counter *counter = args;
825
826 if (ptid_get_pid (entry->id) == counter->pid)
827 {
828 if (++counter->count > 1)
829 return 1;
830 }
831
832 return 0;
833 }
834
835 static int
836 last_thread_of_process_p (int pid)
837 {
838 struct counter counter = { pid , 0 };
839
840 return (find_inferior (&all_threads,
841 second_thread_of_pid_p, &counter) == NULL);
842 }
843
844 /* Kill LWP. */
845
846 static void
847 linux_kill_one_lwp (struct lwp_info *lwp)
848 {
849 struct thread_info *thr = get_lwp_thread (lwp);
850 int pid = lwpid_of (thr);
851
852 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
853 there is no signal context, and ptrace(PTRACE_KILL) (or
854 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
855 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
856 alternative is to kill with SIGKILL. We only need one SIGKILL
857 per process, not one for each thread. But since we still support
858 linuxthreads, and we also support debugging programs using raw
859 clone without CLONE_THREAD, we send one for each thread. For
860 years, we used PTRACE_KILL only, so we're being a bit paranoid
861 about some old kernels where PTRACE_KILL might work better
862 (dubious if there are any such, but that's why it's paranoia), so
863 we try SIGKILL first, PTRACE_KILL second, and so we're fine
864 everywhere. */
865
866 errno = 0;
867 kill_lwp (pid, SIGKILL);
868 if (debug_threads)
869 {
870 int save_errno = errno;
871
872 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
873 target_pid_to_str (ptid_of (thr)),
874 save_errno ? strerror (save_errno) : "OK");
875 }
876
877 errno = 0;
878 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
879 if (debug_threads)
880 {
881 int save_errno = errno;
882
883 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
884 target_pid_to_str (ptid_of (thr)),
885 save_errno ? strerror (save_errno) : "OK");
886 }
887 }
888
889 /* Kill LWP and wait for it to die. */
890
891 static void
892 kill_wait_lwp (struct lwp_info *lwp)
893 {
894 struct thread_info *thr = get_lwp_thread (lwp);
895 int pid = ptid_get_pid (ptid_of (thr));
896 int lwpid = ptid_get_lwp (ptid_of (thr));
897 int wstat;
898 int res;
899
900 if (debug_threads)
901 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
902
903 do
904 {
905 linux_kill_one_lwp (lwp);
906
907 /* Make sure it died. Notes:
908
909 - The loop is most likely unnecessary.
910
911 - We don't use linux_wait_for_event as that could delete lwps
912 while we're iterating over them. We're not interested in
913 any pending status at this point, only in making sure all
914 wait status on the kernel side are collected until the
915 process is reaped.
916
917 - We don't use __WALL here as the __WALL emulation relies on
918 SIGCHLD, and killing a stopped process doesn't generate
919 one, nor an exit status.
920 */
921 res = my_waitpid (lwpid, &wstat, 0);
922 if (res == -1 && errno == ECHILD)
923 res = my_waitpid (lwpid, &wstat, __WCLONE);
924 } while (res > 0 && WIFSTOPPED (wstat));
925
926 gdb_assert (res > 0);
927 }
928
929 /* Callback for `find_inferior'. Kills an lwp of a given process,
930 except the leader. */
931
932 static int
933 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
934 {
935 struct thread_info *thread = (struct thread_info *) entry;
936 struct lwp_info *lwp = get_thread_lwp (thread);
937 int pid = * (int *) args;
938
939 if (ptid_get_pid (entry->id) != pid)
940 return 0;
941
942 /* We avoid killing the first thread here, because of a Linux kernel (at
943 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
944 the children get a chance to be reaped, it will remain a zombie
945 forever. */
946
947 if (lwpid_of (thread) == pid)
948 {
949 if (debug_threads)
950 debug_printf ("lkop: is last of process %s\n",
951 target_pid_to_str (entry->id));
952 return 0;
953 }
954
955 kill_wait_lwp (lwp);
956 return 0;
957 }
958
959 static int
960 linux_kill (int pid)
961 {
962 struct process_info *process;
963 struct lwp_info *lwp;
964
965 process = find_process_pid (pid);
966 if (process == NULL)
967 return -1;
968
969 /* If we're killing a running inferior, make sure it is stopped
970 first, as PTRACE_KILL will not work otherwise. */
971 stop_all_lwps (0, NULL);
972
973 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
974
975 /* See the comment in linux_kill_one_lwp. We did not kill the first
976 thread in the list, so do so now. */
977 lwp = find_lwp_pid (pid_to_ptid (pid));
978
979 if (lwp == NULL)
980 {
981 if (debug_threads)
982 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
983 pid);
984 }
985 else
986 kill_wait_lwp (lwp);
987
988 the_target->mourn (process);
989
990 /* Since we presently can only stop all lwps of all processes, we
991 need to unstop lwps of other processes. */
992 unstop_all_lwps (0, NULL);
993 return 0;
994 }
995
996 /* Get pending signal of THREAD, for detaching purposes. This is the
997 signal the thread last stopped for, which we need to deliver to the
998 thread when detaching, otherwise, it'd be suppressed/lost. */
999
1000 static int
1001 get_detach_signal (struct thread_info *thread)
1002 {
1003 enum gdb_signal signo = GDB_SIGNAL_0;
1004 int status;
1005 struct lwp_info *lp = get_thread_lwp (thread);
1006
1007 if (lp->status_pending_p)
1008 status = lp->status_pending;
1009 else
1010 {
1011 /* If the thread had been suspended by gdbserver, and it stopped
1012 cleanly, then it'll have stopped with SIGSTOP. But we don't
1013 want to deliver that SIGSTOP. */
1014 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1015 || thread->last_status.value.sig == GDB_SIGNAL_0)
1016 return 0;
1017
1018 /* Otherwise, we may need to deliver the signal we
1019 intercepted. */
1020 status = lp->last_status;
1021 }
1022
1023 if (!WIFSTOPPED (status))
1024 {
1025 if (debug_threads)
1026 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1027 target_pid_to_str (ptid_of (thread)));
1028 return 0;
1029 }
1030
1031 /* Extended wait statuses aren't real SIGTRAPs. */
1032 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1033 {
1034 if (debug_threads)
1035 debug_printf ("GPS: lwp %s had stopped with extended "
1036 "status: no pending signal\n",
1037 target_pid_to_str (ptid_of (thread)));
1038 return 0;
1039 }
1040
1041 signo = gdb_signal_from_host (WSTOPSIG (status));
1042
1043 if (program_signals_p && !program_signals[signo])
1044 {
1045 if (debug_threads)
1046 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1047 target_pid_to_str (ptid_of (thread)),
1048 gdb_signal_to_string (signo));
1049 return 0;
1050 }
1051 else if (!program_signals_p
1052 /* If we have no way to know which signals GDB does not
1053 want to have passed to the program, assume
1054 SIGTRAP/SIGINT, which is GDB's default. */
1055 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1056 {
1057 if (debug_threads)
1058 debug_printf ("GPS: lwp %s had signal %s, "
1059 "but we don't know if we should pass it. "
1060 "Default to not.\n",
1061 target_pid_to_str (ptid_of (thread)),
1062 gdb_signal_to_string (signo));
1063 return 0;
1064 }
1065 else
1066 {
1067 if (debug_threads)
1068 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1069 target_pid_to_str (ptid_of (thread)),
1070 gdb_signal_to_string (signo));
1071
1072 return WSTOPSIG (status);
1073 }
1074 }
1075
1076 static int
1077 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1078 {
1079 struct thread_info *thread = (struct thread_info *) entry;
1080 struct lwp_info *lwp = get_thread_lwp (thread);
1081 int pid = * (int *) args;
1082 int sig;
1083
1084 if (ptid_get_pid (entry->id) != pid)
1085 return 0;
1086
1087 /* If there is a pending SIGSTOP, get rid of it. */
1088 if (lwp->stop_expected)
1089 {
1090 if (debug_threads)
1091 debug_printf ("Sending SIGCONT to %s\n",
1092 target_pid_to_str (ptid_of (thread)));
1093
1094 kill_lwp (lwpid_of (thread), SIGCONT);
1095 lwp->stop_expected = 0;
1096 }
1097
1098 /* Flush any pending changes to the process's registers. */
1099 regcache_invalidate_thread (thread);
1100
1101 /* Pass on any pending signal for this thread. */
1102 sig = get_detach_signal (thread);
1103
1104 /* Finally, let it resume. */
1105 if (the_low_target.prepare_to_resume != NULL)
1106 the_low_target.prepare_to_resume (lwp);
1107 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1108 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1109 error (_("Can't detach %s: %s"),
1110 target_pid_to_str (ptid_of (thread)),
1111 strerror (errno));
1112
1113 delete_lwp (lwp);
1114 return 0;
1115 }
1116
1117 static int
1118 linux_detach (int pid)
1119 {
1120 struct process_info *process;
1121
1122 process = find_process_pid (pid);
1123 if (process == NULL)
1124 return -1;
1125
1126 /* Stop all threads before detaching. First, ptrace requires that
1127 the thread is stopped to sucessfully detach. Second, thread_db
1128 may need to uninstall thread event breakpoints from memory, which
1129 only works with a stopped process anyway. */
1130 stop_all_lwps (0, NULL);
1131
1132 #ifdef USE_THREAD_DB
1133 thread_db_detach (process);
1134 #endif
1135
1136 /* Stabilize threads (move out of jump pads). */
1137 stabilize_threads ();
1138
1139 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1140
1141 the_target->mourn (process);
1142
1143 /* Since we presently can only stop all lwps of all processes, we
1144 need to unstop lwps of other processes. */
1145 unstop_all_lwps (0, NULL);
1146 return 0;
1147 }
1148
1149 /* Remove all LWPs that belong to process PROC from the lwp list. */
1150
1151 static int
1152 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1153 {
1154 struct thread_info *thread = (struct thread_info *) entry;
1155 struct lwp_info *lwp = get_thread_lwp (thread);
1156 struct process_info *process = proc;
1157
1158 if (pid_of (thread) == pid_of (process))
1159 delete_lwp (lwp);
1160
1161 return 0;
1162 }
1163
1164 static void
1165 linux_mourn (struct process_info *process)
1166 {
1167 struct process_info_private *priv;
1168
1169 #ifdef USE_THREAD_DB
1170 thread_db_mourn (process);
1171 #endif
1172
1173 find_inferior (&all_threads, delete_lwp_callback, process);
1174
1175 /* Freeing all private data. */
1176 priv = process->private;
1177 free (priv->arch_private);
1178 free (priv);
1179 process->private = NULL;
1180
1181 remove_process (process);
1182 }
1183
1184 static void
1185 linux_join (int pid)
1186 {
1187 int status, ret;
1188
1189 do {
1190 ret = my_waitpid (pid, &status, 0);
1191 if (WIFEXITED (status) || WIFSIGNALED (status))
1192 break;
1193 } while (ret != -1 || errno != ECHILD);
1194 }
1195
1196 /* Return nonzero if the given thread is still alive. */
1197 static int
1198 linux_thread_alive (ptid_t ptid)
1199 {
1200 struct lwp_info *lwp = find_lwp_pid (ptid);
1201
1202 /* We assume we always know if a thread exits. If a whole process
1203 exited but we still haven't been able to report it to GDB, we'll
1204 hold on to the last lwp of the dead process. */
1205 if (lwp != NULL)
1206 return !lwp->dead;
1207 else
1208 return 0;
1209 }
1210
1211 /* Return 1 if this lwp still has an interesting status pending. If
1212 not (e.g., it had stopped for a breakpoint that is gone), return
1213 false. */
1214
1215 static int
1216 thread_still_has_status_pending_p (struct thread_info *thread)
1217 {
1218 struct lwp_info *lp = get_thread_lwp (thread);
1219
1220 if (!lp->status_pending_p)
1221 return 0;
1222
1223 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1224 report any status pending the LWP may have. */
1225 if (thread->last_resume_kind == resume_stop
1226 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1227 return 0;
1228
1229 if (thread->last_resume_kind != resume_stop
1230 && (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT
1231 || lp->stop_reason == LWP_STOPPED_BY_HW_BREAKPOINT))
1232 {
1233 struct thread_info *saved_thread;
1234 CORE_ADDR pc;
1235 int discard = 0;
1236
1237 gdb_assert (lp->last_status != 0);
1238
1239 pc = get_pc (lp);
1240
1241 saved_thread = current_thread;
1242 current_thread = thread;
1243
1244 if (pc != lp->stop_pc)
1245 {
1246 if (debug_threads)
1247 debug_printf ("PC of %ld changed\n",
1248 lwpid_of (thread));
1249 discard = 1;
1250 }
1251 else if (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT
1252 && !(*the_low_target.breakpoint_at) (pc))
1253 {
1254 if (debug_threads)
1255 debug_printf ("previous SW breakpoint of %ld gone\n",
1256 lwpid_of (thread));
1257 discard = 1;
1258 }
1259 else if (lp->stop_reason == LWP_STOPPED_BY_HW_BREAKPOINT
1260 && !hardware_breakpoint_inserted_here (pc))
1261 {
1262 if (debug_threads)
1263 debug_printf ("previous HW breakpoint of %ld gone\n",
1264 lwpid_of (thread));
1265 discard = 1;
1266 }
1267
1268 current_thread = saved_thread;
1269
1270 if (discard)
1271 {
1272 if (debug_threads)
1273 debug_printf ("discarding pending breakpoint status\n");
1274 lp->status_pending_p = 0;
1275 return 0;
1276 }
1277 }
1278
1279 return 1;
1280 }
1281
1282 /* Return 1 if this lwp has an interesting status pending. */
1283 static int
1284 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1285 {
1286 struct thread_info *thread = (struct thread_info *) entry;
1287 struct lwp_info *lp = get_thread_lwp (thread);
1288 ptid_t ptid = * (ptid_t *) arg;
1289
1290 /* Check if we're only interested in events from a specific process
1291 or its lwps. */
1292 if (!ptid_equal (minus_one_ptid, ptid)
1293 && ptid_get_pid (ptid) != ptid_get_pid (thread->entry.id))
1294 return 0;
1295
1296 if (lp->status_pending_p
1297 && !thread_still_has_status_pending_p (thread))
1298 {
1299 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1300 return 0;
1301 }
1302
1303 return lp->status_pending_p;
1304 }
1305
1306 static int
1307 same_lwp (struct inferior_list_entry *entry, void *data)
1308 {
1309 ptid_t ptid = *(ptid_t *) data;
1310 int lwp;
1311
1312 if (ptid_get_lwp (ptid) != 0)
1313 lwp = ptid_get_lwp (ptid);
1314 else
1315 lwp = ptid_get_pid (ptid);
1316
1317 if (ptid_get_lwp (entry->id) == lwp)
1318 return 1;
1319
1320 return 0;
1321 }
1322
1323 struct lwp_info *
1324 find_lwp_pid (ptid_t ptid)
1325 {
1326 struct inferior_list_entry *thread
1327 = find_inferior (&all_threads, same_lwp, &ptid);
1328
1329 if (thread == NULL)
1330 return NULL;
1331
1332 return get_thread_lwp ((struct thread_info *) thread);
1333 }
1334
1335 /* Return the number of known LWPs in the tgid given by PID. */
1336
1337 static int
1338 num_lwps (int pid)
1339 {
1340 struct inferior_list_entry *inf, *tmp;
1341 int count = 0;
1342
1343 ALL_INFERIORS (&all_threads, inf, tmp)
1344 {
1345 if (ptid_get_pid (inf->id) == pid)
1346 count++;
1347 }
1348
1349 return count;
1350 }
1351
1352 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1353 their exits until all other threads in the group have exited. */
1354
1355 static void
1356 check_zombie_leaders (void)
1357 {
1358 struct process_info *proc, *tmp;
1359
1360 ALL_PROCESSES (proc, tmp)
1361 {
1362 pid_t leader_pid = pid_of (proc);
1363 struct lwp_info *leader_lp;
1364
1365 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1366
1367 if (debug_threads)
1368 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1369 "num_lwps=%d, zombie=%d\n",
1370 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1371 linux_proc_pid_is_zombie (leader_pid));
1372
1373 if (leader_lp != NULL
1374 /* Check if there are other threads in the group, as we may
1375 have raced with the inferior simply exiting. */
1376 && !last_thread_of_process_p (leader_pid)
1377 && linux_proc_pid_is_zombie (leader_pid))
1378 {
1379 /* A leader zombie can mean one of two things:
1380
1381 - It exited, and there's an exit status pending
1382 available, or only the leader exited (not the whole
1383 program). In the latter case, we can't waitpid the
1384 leader's exit status until all other threads are gone.
1385
1386 - There are 3 or more threads in the group, and a thread
1387 other than the leader exec'd. On an exec, the Linux
1388 kernel destroys all other threads (except the execing
1389 one) in the thread group, and resets the execing thread's
1390 tid to the tgid. No exit notification is sent for the
1391 execing thread -- from the ptracer's perspective, it
1392 appears as though the execing thread just vanishes.
1393 Until we reap all other threads except the leader and the
1394 execing thread, the leader will be zombie, and the
1395 execing thread will be in `D (disc sleep)'. As soon as
1396 all other threads are reaped, the execing thread changes
1397 it's tid to the tgid, and the previous (zombie) leader
1398 vanishes, giving place to the "new" leader. We could try
1399 distinguishing the exit and exec cases, by waiting once
1400 more, and seeing if something comes out, but it doesn't
1401 sound useful. The previous leader _does_ go away, and
1402 we'll re-add the new one once we see the exec event
1403 (which is just the same as what would happen if the
1404 previous leader did exit voluntarily before some other
1405 thread execs). */
1406
1407 if (debug_threads)
1408 fprintf (stderr,
1409 "CZL: Thread group leader %d zombie "
1410 "(it exited, or another thread execd).\n",
1411 leader_pid);
1412
1413 delete_lwp (leader_lp);
1414 }
1415 }
1416 }
1417
1418 /* Callback for `find_inferior'. Returns the first LWP that is not
1419 stopped. ARG is a PTID filter. */
1420
1421 static int
1422 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1423 {
1424 struct thread_info *thr = (struct thread_info *) entry;
1425 struct lwp_info *lwp;
1426 ptid_t filter = *(ptid_t *) arg;
1427
1428 if (!ptid_match (ptid_of (thr), filter))
1429 return 0;
1430
1431 lwp = get_thread_lwp (thr);
1432 if (!lwp->stopped)
1433 return 1;
1434
1435 return 0;
1436 }
1437
1438 /* This function should only be called if the LWP got a SIGTRAP.
1439
1440 Handle any tracepoint steps or hits. Return true if a tracepoint
1441 event was handled, 0 otherwise. */
1442
1443 static int
1444 handle_tracepoints (struct lwp_info *lwp)
1445 {
1446 struct thread_info *tinfo = get_lwp_thread (lwp);
1447 int tpoint_related_event = 0;
1448
1449 gdb_assert (lwp->suspended == 0);
1450
1451 /* If this tracepoint hit causes a tracing stop, we'll immediately
1452 uninsert tracepoints. To do this, we temporarily pause all
1453 threads, unpatch away, and then unpause threads. We need to make
1454 sure the unpausing doesn't resume LWP too. */
1455 lwp->suspended++;
1456
1457 /* And we need to be sure that any all-threads-stopping doesn't try
1458 to move threads out of the jump pads, as it could deadlock the
1459 inferior (LWP could be in the jump pad, maybe even holding the
1460 lock.) */
1461
1462 /* Do any necessary step collect actions. */
1463 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1464
1465 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1466
1467 /* See if we just hit a tracepoint and do its main collect
1468 actions. */
1469 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1470
1471 lwp->suspended--;
1472
1473 gdb_assert (lwp->suspended == 0);
1474 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1475
1476 if (tpoint_related_event)
1477 {
1478 if (debug_threads)
1479 debug_printf ("got a tracepoint event\n");
1480 return 1;
1481 }
1482
1483 return 0;
1484 }
1485
1486 /* Convenience wrapper. Returns true if LWP is presently collecting a
1487 fast tracepoint. */
1488
1489 static int
1490 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1491 struct fast_tpoint_collect_status *status)
1492 {
1493 CORE_ADDR thread_area;
1494 struct thread_info *thread = get_lwp_thread (lwp);
1495
1496 if (the_low_target.get_thread_area == NULL)
1497 return 0;
1498
1499 /* Get the thread area address. This is used to recognize which
1500 thread is which when tracing with the in-process agent library.
1501 We don't read anything from the address, and treat it as opaque;
1502 it's the address itself that we assume is unique per-thread. */
1503 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1504 return 0;
1505
1506 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1507 }
1508
1509 /* The reason we resume in the caller, is because we want to be able
1510 to pass lwp->status_pending as WSTAT, and we need to clear
1511 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1512 refuses to resume. */
1513
1514 static int
1515 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1516 {
1517 struct thread_info *saved_thread;
1518
1519 saved_thread = current_thread;
1520 current_thread = get_lwp_thread (lwp);
1521
1522 if ((wstat == NULL
1523 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1524 && supports_fast_tracepoints ()
1525 && agent_loaded_p ())
1526 {
1527 struct fast_tpoint_collect_status status;
1528 int r;
1529
1530 if (debug_threads)
1531 debug_printf ("Checking whether LWP %ld needs to move out of the "
1532 "jump pad.\n",
1533 lwpid_of (current_thread));
1534
1535 r = linux_fast_tracepoint_collecting (lwp, &status);
1536
1537 if (wstat == NULL
1538 || (WSTOPSIG (*wstat) != SIGILL
1539 && WSTOPSIG (*wstat) != SIGFPE
1540 && WSTOPSIG (*wstat) != SIGSEGV
1541 && WSTOPSIG (*wstat) != SIGBUS))
1542 {
1543 lwp->collecting_fast_tracepoint = r;
1544
1545 if (r != 0)
1546 {
1547 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1548 {
1549 /* Haven't executed the original instruction yet.
1550 Set breakpoint there, and wait till it's hit,
1551 then single-step until exiting the jump pad. */
1552 lwp->exit_jump_pad_bkpt
1553 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1554 }
1555
1556 if (debug_threads)
1557 debug_printf ("Checking whether LWP %ld needs to move out of "
1558 "the jump pad...it does\n",
1559 lwpid_of (current_thread));
1560 current_thread = saved_thread;
1561
1562 return 1;
1563 }
1564 }
1565 else
1566 {
1567 /* If we get a synchronous signal while collecting, *and*
1568 while executing the (relocated) original instruction,
1569 reset the PC to point at the tpoint address, before
1570 reporting to GDB. Otherwise, it's an IPA lib bug: just
1571 report the signal to GDB, and pray for the best. */
1572
1573 lwp->collecting_fast_tracepoint = 0;
1574
1575 if (r != 0
1576 && (status.adjusted_insn_addr <= lwp->stop_pc
1577 && lwp->stop_pc < status.adjusted_insn_addr_end))
1578 {
1579 siginfo_t info;
1580 struct regcache *regcache;
1581
1582 /* The si_addr on a few signals references the address
1583 of the faulting instruction. Adjust that as
1584 well. */
1585 if ((WSTOPSIG (*wstat) == SIGILL
1586 || WSTOPSIG (*wstat) == SIGFPE
1587 || WSTOPSIG (*wstat) == SIGBUS
1588 || WSTOPSIG (*wstat) == SIGSEGV)
1589 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1590 (PTRACE_TYPE_ARG3) 0, &info) == 0
1591 /* Final check just to make sure we don't clobber
1592 the siginfo of non-kernel-sent signals. */
1593 && (uintptr_t) info.si_addr == lwp->stop_pc)
1594 {
1595 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1596 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1597 (PTRACE_TYPE_ARG3) 0, &info);
1598 }
1599
1600 regcache = get_thread_regcache (current_thread, 1);
1601 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1602 lwp->stop_pc = status.tpoint_addr;
1603
1604 /* Cancel any fast tracepoint lock this thread was
1605 holding. */
1606 force_unlock_trace_buffer ();
1607 }
1608
1609 if (lwp->exit_jump_pad_bkpt != NULL)
1610 {
1611 if (debug_threads)
1612 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1613 "stopping all threads momentarily.\n");
1614
1615 stop_all_lwps (1, lwp);
1616
1617 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1618 lwp->exit_jump_pad_bkpt = NULL;
1619
1620 unstop_all_lwps (1, lwp);
1621
1622 gdb_assert (lwp->suspended >= 0);
1623 }
1624 }
1625 }
1626
1627 if (debug_threads)
1628 debug_printf ("Checking whether LWP %ld needs to move out of the "
1629 "jump pad...no\n",
1630 lwpid_of (current_thread));
1631
1632 current_thread = saved_thread;
1633 return 0;
1634 }
1635
1636 /* Enqueue one signal in the "signals to report later when out of the
1637 jump pad" list. */
1638
1639 static void
1640 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1641 {
1642 struct pending_signals *p_sig;
1643 struct thread_info *thread = get_lwp_thread (lwp);
1644
1645 if (debug_threads)
1646 debug_printf ("Deferring signal %d for LWP %ld.\n",
1647 WSTOPSIG (*wstat), lwpid_of (thread));
1648
1649 if (debug_threads)
1650 {
1651 struct pending_signals *sig;
1652
1653 for (sig = lwp->pending_signals_to_report;
1654 sig != NULL;
1655 sig = sig->prev)
1656 debug_printf (" Already queued %d\n",
1657 sig->signal);
1658
1659 debug_printf (" (no more currently queued signals)\n");
1660 }
1661
1662 /* Don't enqueue non-RT signals if they are already in the deferred
1663 queue. (SIGSTOP being the easiest signal to see ending up here
1664 twice) */
1665 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1666 {
1667 struct pending_signals *sig;
1668
1669 for (sig = lwp->pending_signals_to_report;
1670 sig != NULL;
1671 sig = sig->prev)
1672 {
1673 if (sig->signal == WSTOPSIG (*wstat))
1674 {
1675 if (debug_threads)
1676 debug_printf ("Not requeuing already queued non-RT signal %d"
1677 " for LWP %ld\n",
1678 sig->signal,
1679 lwpid_of (thread));
1680 return;
1681 }
1682 }
1683 }
1684
1685 p_sig = xmalloc (sizeof (*p_sig));
1686 p_sig->prev = lwp->pending_signals_to_report;
1687 p_sig->signal = WSTOPSIG (*wstat);
1688 memset (&p_sig->info, 0, sizeof (siginfo_t));
1689 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1690 &p_sig->info);
1691
1692 lwp->pending_signals_to_report = p_sig;
1693 }
1694
1695 /* Dequeue one signal from the "signals to report later when out of
1696 the jump pad" list. */
1697
1698 static int
1699 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1700 {
1701 struct thread_info *thread = get_lwp_thread (lwp);
1702
1703 if (lwp->pending_signals_to_report != NULL)
1704 {
1705 struct pending_signals **p_sig;
1706
1707 p_sig = &lwp->pending_signals_to_report;
1708 while ((*p_sig)->prev != NULL)
1709 p_sig = &(*p_sig)->prev;
1710
1711 *wstat = W_STOPCODE ((*p_sig)->signal);
1712 if ((*p_sig)->info.si_signo != 0)
1713 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1714 &(*p_sig)->info);
1715 free (*p_sig);
1716 *p_sig = NULL;
1717
1718 if (debug_threads)
1719 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1720 WSTOPSIG (*wstat), lwpid_of (thread));
1721
1722 if (debug_threads)
1723 {
1724 struct pending_signals *sig;
1725
1726 for (sig = lwp->pending_signals_to_report;
1727 sig != NULL;
1728 sig = sig->prev)
1729 debug_printf (" Still queued %d\n",
1730 sig->signal);
1731
1732 debug_printf (" (no more queued signals)\n");
1733 }
1734
1735 return 1;
1736 }
1737
1738 return 0;
1739 }
1740
1741 /* Return true if the event in LP may be caused by breakpoint. */
1742
1743 static int
1744 wstatus_maybe_breakpoint (int wstatus)
1745 {
1746 return (WIFSTOPPED (wstatus)
1747 && (WSTOPSIG (wstatus) == SIGTRAP
1748 /* SIGILL and SIGSEGV are also treated as traps in case a
1749 breakpoint is inserted at the current PC. */
1750 || WSTOPSIG (wstatus) == SIGILL
1751 || WSTOPSIG (wstatus) == SIGSEGV));
1752 }
1753
1754 /* Fetch the possibly triggered data watchpoint info and store it in
1755 CHILD.
1756
1757 On some archs, like x86, that use debug registers to set
1758 watchpoints, it's possible that the way to know which watched
1759 address trapped, is to check the register that is used to select
1760 which address to watch. Problem is, between setting the watchpoint
1761 and reading back which data address trapped, the user may change
1762 the set of watchpoints, and, as a consequence, GDB changes the
1763 debug registers in the inferior. To avoid reading back a stale
1764 stopped-data-address when that happens, we cache in LP the fact
1765 that a watchpoint trapped, and the corresponding data address, as
1766 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1767 registers meanwhile, we have the cached data we can rely on. */
1768
1769 static int
1770 check_stopped_by_watchpoint (struct lwp_info *child)
1771 {
1772 if (the_low_target.stopped_by_watchpoint != NULL)
1773 {
1774 struct thread_info *saved_thread;
1775
1776 saved_thread = current_thread;
1777 current_thread = get_lwp_thread (child);
1778
1779 if (the_low_target.stopped_by_watchpoint ())
1780 {
1781 child->stop_reason = LWP_STOPPED_BY_WATCHPOINT;
1782
1783 if (the_low_target.stopped_data_address != NULL)
1784 child->stopped_data_address
1785 = the_low_target.stopped_data_address ();
1786 else
1787 child->stopped_data_address = 0;
1788 }
1789
1790 current_thread = saved_thread;
1791 }
1792
1793 return child->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
1794 }
1795
1796 /* Do low-level handling of the event, and check if we should go on
1797 and pass it to caller code. Return the affected lwp if we are, or
1798 NULL otherwise. */
1799
1800 static struct lwp_info *
1801 linux_low_filter_event (int lwpid, int wstat)
1802 {
1803 struct lwp_info *child;
1804 struct thread_info *thread;
1805 int have_stop_pc = 0;
1806
1807 child = find_lwp_pid (pid_to_ptid (lwpid));
1808
1809 /* If we didn't find a process, one of two things presumably happened:
1810 - A process we started and then detached from has exited. Ignore it.
1811 - A process we are controlling has forked and the new child's stop
1812 was reported to us by the kernel. Save its PID. */
1813 if (child == NULL && WIFSTOPPED (wstat))
1814 {
1815 add_to_pid_list (&stopped_pids, lwpid, wstat);
1816 return NULL;
1817 }
1818 else if (child == NULL)
1819 return NULL;
1820
1821 thread = get_lwp_thread (child);
1822
1823 child->stopped = 1;
1824
1825 child->last_status = wstat;
1826
1827 /* Check if the thread has exited. */
1828 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
1829 {
1830 if (debug_threads)
1831 debug_printf ("LLFE: %d exited.\n", lwpid);
1832 if (num_lwps (pid_of (thread)) > 1)
1833 {
1834
1835 /* If there is at least one more LWP, then the exit signal was
1836 not the end of the debugged application and should be
1837 ignored. */
1838 delete_lwp (child);
1839 return NULL;
1840 }
1841 else
1842 {
1843 /* This was the last lwp in the process. Since events are
1844 serialized to GDB core, and we can't report this one
1845 right now, but GDB core and the other target layers will
1846 want to be notified about the exit code/signal, leave the
1847 status pending for the next time we're able to report
1848 it. */
1849 mark_lwp_dead (child, wstat);
1850 return child;
1851 }
1852 }
1853
1854 gdb_assert (WIFSTOPPED (wstat));
1855
1856 if (WIFSTOPPED (wstat))
1857 {
1858 struct process_info *proc;
1859
1860 /* Architecture-specific setup after inferior is running. This
1861 needs to happen after we have attached to the inferior and it
1862 is stopped for the first time, but before we access any
1863 inferior registers. */
1864 proc = find_process_pid (pid_of (thread));
1865 if (proc->private->new_inferior)
1866 {
1867 struct thread_info *saved_thread;
1868
1869 saved_thread = current_thread;
1870 current_thread = thread;
1871
1872 the_low_target.arch_setup ();
1873
1874 current_thread = saved_thread;
1875
1876 proc->private->new_inferior = 0;
1877 }
1878 }
1879
1880 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1881 {
1882 struct process_info *proc = find_process_pid (pid_of (thread));
1883
1884 linux_enable_event_reporting (lwpid, proc->attached);
1885 child->must_set_ptrace_flags = 0;
1886 }
1887
1888 /* Be careful to not overwrite stop_pc until
1889 check_stopped_by_breakpoint is called. */
1890 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1891 && linux_is_extended_waitstatus (wstat))
1892 {
1893 child->stop_pc = get_pc (child);
1894 handle_extended_wait (child, wstat);
1895 return NULL;
1896 }
1897
1898 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1899 && check_stopped_by_watchpoint (child))
1900 ;
1901 else if (WIFSTOPPED (wstat) && wstatus_maybe_breakpoint (wstat))
1902 {
1903 if (check_stopped_by_breakpoint (child))
1904 have_stop_pc = 1;
1905 }
1906
1907 if (!have_stop_pc)
1908 child->stop_pc = get_pc (child);
1909
1910 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
1911 && child->stop_expected)
1912 {
1913 if (debug_threads)
1914 debug_printf ("Expected stop.\n");
1915 child->stop_expected = 0;
1916
1917 if (thread->last_resume_kind == resume_stop)
1918 {
1919 /* We want to report the stop to the core. Treat the
1920 SIGSTOP as a normal event. */
1921 }
1922 else if (stopping_threads != NOT_STOPPING_THREADS)
1923 {
1924 /* Stopping threads. We don't want this SIGSTOP to end up
1925 pending. */
1926 return NULL;
1927 }
1928 else
1929 {
1930 /* Filter out the event. */
1931 linux_resume_one_lwp (child, child->stepping, 0, NULL);
1932 return NULL;
1933 }
1934 }
1935
1936 child->status_pending_p = 1;
1937 child->status_pending = wstat;
1938 return child;
1939 }
1940
1941 /* Resume LWPs that are currently stopped without any pending status
1942 to report, but are resumed from the core's perspective. */
1943
1944 static void
1945 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
1946 {
1947 struct thread_info *thread = (struct thread_info *) entry;
1948 struct lwp_info *lp = get_thread_lwp (thread);
1949
1950 if (lp->stopped
1951 && !lp->status_pending_p
1952 && thread->last_resume_kind != resume_stop
1953 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1954 {
1955 int step = thread->last_resume_kind == resume_step;
1956
1957 if (debug_threads)
1958 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
1959 target_pid_to_str (ptid_of (thread)),
1960 paddress (lp->stop_pc),
1961 step);
1962
1963 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
1964 }
1965 }
1966
1967 /* Wait for an event from child(ren) WAIT_PTID, and return any that
1968 match FILTER_PTID (leaving others pending). The PTIDs can be:
1969 minus_one_ptid, to specify any child; a pid PTID, specifying all
1970 lwps of a thread group; or a PTID representing a single lwp. Store
1971 the stop status through the status pointer WSTAT. OPTIONS is
1972 passed to the waitpid call. Return 0 if no event was found and
1973 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
1974 was found. Return the PID of the stopped child otherwise. */
1975
1976 static int
1977 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
1978 int *wstatp, int options)
1979 {
1980 struct thread_info *event_thread;
1981 struct lwp_info *event_child, *requested_child;
1982 sigset_t block_mask, prev_mask;
1983
1984 retry:
1985 /* N.B. event_thread points to the thread_info struct that contains
1986 event_child. Keep them in sync. */
1987 event_thread = NULL;
1988 event_child = NULL;
1989 requested_child = NULL;
1990
1991 /* Check for a lwp with a pending status. */
1992
1993 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
1994 {
1995 event_thread = (struct thread_info *)
1996 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
1997 if (event_thread != NULL)
1998 event_child = get_thread_lwp (event_thread);
1999 if (debug_threads && event_thread)
2000 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2001 }
2002 else if (!ptid_equal (filter_ptid, null_ptid))
2003 {
2004 requested_child = find_lwp_pid (filter_ptid);
2005
2006 if (stopping_threads == NOT_STOPPING_THREADS
2007 && requested_child->status_pending_p
2008 && requested_child->collecting_fast_tracepoint)
2009 {
2010 enqueue_one_deferred_signal (requested_child,
2011 &requested_child->status_pending);
2012 requested_child->status_pending_p = 0;
2013 requested_child->status_pending = 0;
2014 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2015 }
2016
2017 if (requested_child->suspended
2018 && requested_child->status_pending_p)
2019 {
2020 internal_error (__FILE__, __LINE__,
2021 "requesting an event out of a"
2022 " suspended child?");
2023 }
2024
2025 if (requested_child->status_pending_p)
2026 {
2027 event_child = requested_child;
2028 event_thread = get_lwp_thread (event_child);
2029 }
2030 }
2031
2032 if (event_child != NULL)
2033 {
2034 if (debug_threads)
2035 debug_printf ("Got an event from pending child %ld (%04x)\n",
2036 lwpid_of (event_thread), event_child->status_pending);
2037 *wstatp = event_child->status_pending;
2038 event_child->status_pending_p = 0;
2039 event_child->status_pending = 0;
2040 current_thread = event_thread;
2041 return lwpid_of (event_thread);
2042 }
2043
2044 /* But if we don't find a pending event, we'll have to wait.
2045
2046 We only enter this loop if no process has a pending wait status.
2047 Thus any action taken in response to a wait status inside this
2048 loop is responding as soon as we detect the status, not after any
2049 pending events. */
2050
2051 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2052 all signals while here. */
2053 sigfillset (&block_mask);
2054 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2055
2056 /* Always pull all events out of the kernel. We'll randomly select
2057 an event LWP out of all that have events, to prevent
2058 starvation. */
2059 while (event_child == NULL)
2060 {
2061 pid_t ret = 0;
2062
2063 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2064 quirks:
2065
2066 - If the thread group leader exits while other threads in the
2067 thread group still exist, waitpid(TGID, ...) hangs. That
2068 waitpid won't return an exit status until the other threads
2069 in the group are reaped.
2070
2071 - When a non-leader thread execs, that thread just vanishes
2072 without reporting an exit (so we'd hang if we waited for it
2073 explicitly in that case). The exec event is reported to
2074 the TGID pid (although we don't currently enable exec
2075 events). */
2076 errno = 0;
2077 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2078
2079 if (debug_threads)
2080 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2081 ret, errno ? strerror (errno) : "ERRNO-OK");
2082
2083 if (ret > 0)
2084 {
2085 if (debug_threads)
2086 {
2087 debug_printf ("LLW: waitpid %ld received %s\n",
2088 (long) ret, status_to_str (*wstatp));
2089 }
2090
2091 /* Filter all events. IOW, leave all events pending. We'll
2092 randomly select an event LWP out of all that have events
2093 below. */
2094 linux_low_filter_event (ret, *wstatp);
2095 /* Retry until nothing comes out of waitpid. A single
2096 SIGCHLD can indicate more than one child stopped. */
2097 continue;
2098 }
2099
2100 /* Now that we've pulled all events out of the kernel, resume
2101 LWPs that don't have an interesting event to report. */
2102 if (stopping_threads == NOT_STOPPING_THREADS)
2103 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2104
2105 /* ... and find an LWP with a status to report to the core, if
2106 any. */
2107 event_thread = (struct thread_info *)
2108 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2109 if (event_thread != NULL)
2110 {
2111 event_child = get_thread_lwp (event_thread);
2112 *wstatp = event_child->status_pending;
2113 event_child->status_pending_p = 0;
2114 event_child->status_pending = 0;
2115 break;
2116 }
2117
2118 /* Check for zombie thread group leaders. Those can't be reaped
2119 until all other threads in the thread group are. */
2120 check_zombie_leaders ();
2121
2122 /* If there are no resumed children left in the set of LWPs we
2123 want to wait for, bail. We can't just block in
2124 waitpid/sigsuspend, because lwps might have been left stopped
2125 in trace-stop state, and we'd be stuck forever waiting for
2126 their status to change (which would only happen if we resumed
2127 them). Even if WNOHANG is set, this return code is preferred
2128 over 0 (below), as it is more detailed. */
2129 if ((find_inferior (&all_threads,
2130 not_stopped_callback,
2131 &wait_ptid) == NULL))
2132 {
2133 if (debug_threads)
2134 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2135 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2136 return -1;
2137 }
2138
2139 /* No interesting event to report to the caller. */
2140 if ((options & WNOHANG))
2141 {
2142 if (debug_threads)
2143 debug_printf ("WNOHANG set, no event found\n");
2144
2145 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2146 return 0;
2147 }
2148
2149 /* Block until we get an event reported with SIGCHLD. */
2150 if (debug_threads)
2151 debug_printf ("sigsuspend'ing\n");
2152
2153 sigsuspend (&prev_mask);
2154 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2155 goto retry;
2156 }
2157
2158 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2159
2160 current_thread = event_thread;
2161
2162 /* Check for thread exit. */
2163 if (! WIFSTOPPED (*wstatp))
2164 {
2165 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2166
2167 if (debug_threads)
2168 debug_printf ("LWP %d is the last lwp of process. "
2169 "Process %ld exiting.\n",
2170 pid_of (event_thread), lwpid_of (event_thread));
2171 return lwpid_of (event_thread);
2172 }
2173
2174 return lwpid_of (event_thread);
2175 }
2176
2177 /* Wait for an event from child(ren) PTID. PTIDs can be:
2178 minus_one_ptid, to specify any child; a pid PTID, specifying all
2179 lwps of a thread group; or a PTID representing a single lwp. Store
2180 the stop status through the status pointer WSTAT. OPTIONS is
2181 passed to the waitpid call. Return 0 if no event was found and
2182 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2183 was found. Return the PID of the stopped child otherwise. */
2184
2185 static int
2186 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2187 {
2188 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2189 }
2190
2191 /* Count the LWP's that have had events. */
2192
2193 static int
2194 count_events_callback (struct inferior_list_entry *entry, void *data)
2195 {
2196 struct thread_info *thread = (struct thread_info *) entry;
2197 int *count = data;
2198
2199 gdb_assert (count != NULL);
2200
2201 /* Count only resumed LWPs that have an event pending. */
2202 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2203 && thread->last_resume_kind != resume_stop
2204 && thread->status_pending_p)
2205 (*count)++;
2206
2207 return 0;
2208 }
2209
2210 /* Select the LWP (if any) that is currently being single-stepped. */
2211
2212 static int
2213 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2214 {
2215 struct thread_info *thread = (struct thread_info *) entry;
2216 struct lwp_info *lp = get_thread_lwp (thread);
2217
2218 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2219 && thread->last_resume_kind == resume_step
2220 && lp->status_pending_p)
2221 return 1;
2222 else
2223 return 0;
2224 }
2225
2226 /* Select the Nth LWP that has had a SIGTRAP event that should be
2227 reported to GDB. */
2228
2229 static int
2230 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2231 {
2232 struct thread_info *thread = (struct thread_info *) entry;
2233 int *selector = data;
2234
2235 gdb_assert (selector != NULL);
2236
2237 /* Select only resumed LWPs that have an event pending. */
2238 if (thread->last_resume_kind != resume_stop
2239 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2240 && thread->status_pending_p)
2241 if ((*selector)-- == 0)
2242 return 1;
2243
2244 return 0;
2245 }
2246
2247 /* Select one LWP out of those that have events pending. */
2248
2249 static void
2250 select_event_lwp (struct lwp_info **orig_lp)
2251 {
2252 int num_events = 0;
2253 int random_selector;
2254 struct thread_info *event_thread = NULL;
2255
2256 /* In all-stop, give preference to the LWP that is being
2257 single-stepped. There will be at most one, and it's the LWP that
2258 the core is most interested in. If we didn't do this, then we'd
2259 have to handle pending step SIGTRAPs somehow in case the core
2260 later continues the previously-stepped thread, otherwise we'd
2261 report the pending SIGTRAP, and the core, not having stepped the
2262 thread, wouldn't understand what the trap was for, and therefore
2263 would report it to the user as a random signal. */
2264 if (!non_stop)
2265 {
2266 event_thread
2267 = (struct thread_info *) find_inferior (&all_threads,
2268 select_singlestep_lwp_callback,
2269 NULL);
2270 if (event_thread != NULL)
2271 {
2272 if (debug_threads)
2273 debug_printf ("SEL: Select single-step %s\n",
2274 target_pid_to_str (ptid_of (event_thread)));
2275 }
2276 }
2277 if (event_thread == NULL)
2278 {
2279 /* No single-stepping LWP. Select one at random, out of those
2280 which have had SIGTRAP events. */
2281
2282 /* First see how many SIGTRAP events we have. */
2283 find_inferior (&all_threads, count_events_callback, &num_events);
2284
2285 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2286 random_selector = (int)
2287 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2288
2289 if (debug_threads && num_events > 1)
2290 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2291 num_events, random_selector);
2292
2293 event_thread
2294 = (struct thread_info *) find_inferior (&all_threads,
2295 select_event_lwp_callback,
2296 &random_selector);
2297 }
2298
2299 if (event_thread != NULL)
2300 {
2301 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2302
2303 /* Switch the event LWP. */
2304 *orig_lp = event_lp;
2305 }
2306 }
2307
2308 /* Decrement the suspend count of an LWP. */
2309
2310 static int
2311 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2312 {
2313 struct thread_info *thread = (struct thread_info *) entry;
2314 struct lwp_info *lwp = get_thread_lwp (thread);
2315
2316 /* Ignore EXCEPT. */
2317 if (lwp == except)
2318 return 0;
2319
2320 lwp->suspended--;
2321
2322 gdb_assert (lwp->suspended >= 0);
2323 return 0;
2324 }
2325
2326 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2327 NULL. */
2328
2329 static void
2330 unsuspend_all_lwps (struct lwp_info *except)
2331 {
2332 find_inferior (&all_threads, unsuspend_one_lwp, except);
2333 }
2334
2335 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2336 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2337 void *data);
2338 static int lwp_running (struct inferior_list_entry *entry, void *data);
2339 static ptid_t linux_wait_1 (ptid_t ptid,
2340 struct target_waitstatus *ourstatus,
2341 int target_options);
2342
2343 /* Stabilize threads (move out of jump pads).
2344
2345 If a thread is midway collecting a fast tracepoint, we need to
2346 finish the collection and move it out of the jump pad before
2347 reporting the signal.
2348
2349 This avoids recursion while collecting (when a signal arrives
2350 midway, and the signal handler itself collects), which would trash
2351 the trace buffer. In case the user set a breakpoint in a signal
2352 handler, this avoids the backtrace showing the jump pad, etc..
2353 Most importantly, there are certain things we can't do safely if
2354 threads are stopped in a jump pad (or in its callee's). For
2355 example:
2356
2357 - starting a new trace run. A thread still collecting the
2358 previous run, could trash the trace buffer when resumed. The trace
2359 buffer control structures would have been reset but the thread had
2360 no way to tell. The thread could even midway memcpy'ing to the
2361 buffer, which would mean that when resumed, it would clobber the
2362 trace buffer that had been set for a new run.
2363
2364 - we can't rewrite/reuse the jump pads for new tracepoints
2365 safely. Say you do tstart while a thread is stopped midway while
2366 collecting. When the thread is later resumed, it finishes the
2367 collection, and returns to the jump pad, to execute the original
2368 instruction that was under the tracepoint jump at the time the
2369 older run had been started. If the jump pad had been rewritten
2370 since for something else in the new run, the thread would now
2371 execute the wrong / random instructions. */
2372
2373 static void
2374 linux_stabilize_threads (void)
2375 {
2376 struct thread_info *saved_thread;
2377 struct thread_info *thread_stuck;
2378
2379 thread_stuck
2380 = (struct thread_info *) find_inferior (&all_threads,
2381 stuck_in_jump_pad_callback,
2382 NULL);
2383 if (thread_stuck != NULL)
2384 {
2385 if (debug_threads)
2386 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2387 lwpid_of (thread_stuck));
2388 return;
2389 }
2390
2391 saved_thread = current_thread;
2392
2393 stabilizing_threads = 1;
2394
2395 /* Kick 'em all. */
2396 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2397
2398 /* Loop until all are stopped out of the jump pads. */
2399 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2400 {
2401 struct target_waitstatus ourstatus;
2402 struct lwp_info *lwp;
2403 int wstat;
2404
2405 /* Note that we go through the full wait even loop. While
2406 moving threads out of jump pad, we need to be able to step
2407 over internal breakpoints and such. */
2408 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2409
2410 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2411 {
2412 lwp = get_thread_lwp (current_thread);
2413
2414 /* Lock it. */
2415 lwp->suspended++;
2416
2417 if (ourstatus.value.sig != GDB_SIGNAL_0
2418 || current_thread->last_resume_kind == resume_stop)
2419 {
2420 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2421 enqueue_one_deferred_signal (lwp, &wstat);
2422 }
2423 }
2424 }
2425
2426 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2427
2428 stabilizing_threads = 0;
2429
2430 current_thread = saved_thread;
2431
2432 if (debug_threads)
2433 {
2434 thread_stuck
2435 = (struct thread_info *) find_inferior (&all_threads,
2436 stuck_in_jump_pad_callback,
2437 NULL);
2438 if (thread_stuck != NULL)
2439 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2440 lwpid_of (thread_stuck));
2441 }
2442 }
2443
2444 static void async_file_mark (void);
2445
2446 /* Convenience function that is called when the kernel reports an
2447 event that is not passed out to GDB. */
2448
2449 static ptid_t
2450 ignore_event (struct target_waitstatus *ourstatus)
2451 {
2452 /* If we got an event, there may still be others, as a single
2453 SIGCHLD can indicate more than one child stopped. This forces
2454 another target_wait call. */
2455 async_file_mark ();
2456
2457 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2458 return null_ptid;
2459 }
2460
2461 /* Wait for process, returns status. */
2462
2463 static ptid_t
2464 linux_wait_1 (ptid_t ptid,
2465 struct target_waitstatus *ourstatus, int target_options)
2466 {
2467 int w;
2468 struct lwp_info *event_child;
2469 int options;
2470 int pid;
2471 int step_over_finished;
2472 int bp_explains_trap;
2473 int maybe_internal_trap;
2474 int report_to_gdb;
2475 int trace_event;
2476 int in_step_range;
2477
2478 if (debug_threads)
2479 {
2480 debug_enter ();
2481 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2482 }
2483
2484 /* Translate generic target options into linux options. */
2485 options = __WALL;
2486 if (target_options & TARGET_WNOHANG)
2487 options |= WNOHANG;
2488
2489 bp_explains_trap = 0;
2490 trace_event = 0;
2491 in_step_range = 0;
2492 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2493
2494 if (ptid_equal (step_over_bkpt, null_ptid))
2495 pid = linux_wait_for_event (ptid, &w, options);
2496 else
2497 {
2498 if (debug_threads)
2499 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2500 target_pid_to_str (step_over_bkpt));
2501 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2502 }
2503
2504 if (pid == 0)
2505 {
2506 gdb_assert (target_options & TARGET_WNOHANG);
2507
2508 if (debug_threads)
2509 {
2510 debug_printf ("linux_wait_1 ret = null_ptid, "
2511 "TARGET_WAITKIND_IGNORE\n");
2512 debug_exit ();
2513 }
2514
2515 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2516 return null_ptid;
2517 }
2518 else if (pid == -1)
2519 {
2520 if (debug_threads)
2521 {
2522 debug_printf ("linux_wait_1 ret = null_ptid, "
2523 "TARGET_WAITKIND_NO_RESUMED\n");
2524 debug_exit ();
2525 }
2526
2527 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2528 return null_ptid;
2529 }
2530
2531 event_child = get_thread_lwp (current_thread);
2532
2533 /* linux_wait_for_event only returns an exit status for the last
2534 child of a process. Report it. */
2535 if (WIFEXITED (w) || WIFSIGNALED (w))
2536 {
2537 if (WIFEXITED (w))
2538 {
2539 ourstatus->kind = TARGET_WAITKIND_EXITED;
2540 ourstatus->value.integer = WEXITSTATUS (w);
2541
2542 if (debug_threads)
2543 {
2544 debug_printf ("linux_wait_1 ret = %s, exited with "
2545 "retcode %d\n",
2546 target_pid_to_str (ptid_of (current_thread)),
2547 WEXITSTATUS (w));
2548 debug_exit ();
2549 }
2550 }
2551 else
2552 {
2553 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2554 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2555
2556 if (debug_threads)
2557 {
2558 debug_printf ("linux_wait_1 ret = %s, terminated with "
2559 "signal %d\n",
2560 target_pid_to_str (ptid_of (current_thread)),
2561 WTERMSIG (w));
2562 debug_exit ();
2563 }
2564 }
2565
2566 return ptid_of (current_thread);
2567 }
2568
2569 /* If this event was not handled before, and is not a SIGTRAP, we
2570 report it. SIGILL and SIGSEGV are also treated as traps in case
2571 a breakpoint is inserted at the current PC. If this target does
2572 not support internal breakpoints at all, we also report the
2573 SIGTRAP without further processing; it's of no concern to us. */
2574 maybe_internal_trap
2575 = (supports_breakpoints ()
2576 && (WSTOPSIG (w) == SIGTRAP
2577 || ((WSTOPSIG (w) == SIGILL
2578 || WSTOPSIG (w) == SIGSEGV)
2579 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2580
2581 if (maybe_internal_trap)
2582 {
2583 /* Handle anything that requires bookkeeping before deciding to
2584 report the event or continue waiting. */
2585
2586 /* First check if we can explain the SIGTRAP with an internal
2587 breakpoint, or if we should possibly report the event to GDB.
2588 Do this before anything that may remove or insert a
2589 breakpoint. */
2590 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2591
2592 /* We have a SIGTRAP, possibly a step-over dance has just
2593 finished. If so, tweak the state machine accordingly,
2594 reinsert breakpoints and delete any reinsert (software
2595 single-step) breakpoints. */
2596 step_over_finished = finish_step_over (event_child);
2597
2598 /* Now invoke the callbacks of any internal breakpoints there. */
2599 check_breakpoints (event_child->stop_pc);
2600
2601 /* Handle tracepoint data collecting. This may overflow the
2602 trace buffer, and cause a tracing stop, removing
2603 breakpoints. */
2604 trace_event = handle_tracepoints (event_child);
2605
2606 if (bp_explains_trap)
2607 {
2608 /* If we stepped or ran into an internal breakpoint, we've
2609 already handled it. So next time we resume (from this
2610 PC), we should step over it. */
2611 if (debug_threads)
2612 debug_printf ("Hit a gdbserver breakpoint.\n");
2613
2614 if (breakpoint_here (event_child->stop_pc))
2615 event_child->need_step_over = 1;
2616 }
2617 }
2618 else
2619 {
2620 /* We have some other signal, possibly a step-over dance was in
2621 progress, and it should be cancelled too. */
2622 step_over_finished = finish_step_over (event_child);
2623 }
2624
2625 /* We have all the data we need. Either report the event to GDB, or
2626 resume threads and keep waiting for more. */
2627
2628 /* If we're collecting a fast tracepoint, finish the collection and
2629 move out of the jump pad before delivering a signal. See
2630 linux_stabilize_threads. */
2631
2632 if (WIFSTOPPED (w)
2633 && WSTOPSIG (w) != SIGTRAP
2634 && supports_fast_tracepoints ()
2635 && agent_loaded_p ())
2636 {
2637 if (debug_threads)
2638 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2639 "to defer or adjust it.\n",
2640 WSTOPSIG (w), lwpid_of (current_thread));
2641
2642 /* Allow debugging the jump pad itself. */
2643 if (current_thread->last_resume_kind != resume_step
2644 && maybe_move_out_of_jump_pad (event_child, &w))
2645 {
2646 enqueue_one_deferred_signal (event_child, &w);
2647
2648 if (debug_threads)
2649 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2650 WSTOPSIG (w), lwpid_of (current_thread));
2651
2652 linux_resume_one_lwp (event_child, 0, 0, NULL);
2653
2654 return ignore_event (ourstatus);
2655 }
2656 }
2657
2658 if (event_child->collecting_fast_tracepoint)
2659 {
2660 if (debug_threads)
2661 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2662 "Check if we're already there.\n",
2663 lwpid_of (current_thread),
2664 event_child->collecting_fast_tracepoint);
2665
2666 trace_event = 1;
2667
2668 event_child->collecting_fast_tracepoint
2669 = linux_fast_tracepoint_collecting (event_child, NULL);
2670
2671 if (event_child->collecting_fast_tracepoint != 1)
2672 {
2673 /* No longer need this breakpoint. */
2674 if (event_child->exit_jump_pad_bkpt != NULL)
2675 {
2676 if (debug_threads)
2677 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2678 "stopping all threads momentarily.\n");
2679
2680 /* Other running threads could hit this breakpoint.
2681 We don't handle moribund locations like GDB does,
2682 instead we always pause all threads when removing
2683 breakpoints, so that any step-over or
2684 decr_pc_after_break adjustment is always taken
2685 care of while the breakpoint is still
2686 inserted. */
2687 stop_all_lwps (1, event_child);
2688
2689 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2690 event_child->exit_jump_pad_bkpt = NULL;
2691
2692 unstop_all_lwps (1, event_child);
2693
2694 gdb_assert (event_child->suspended >= 0);
2695 }
2696 }
2697
2698 if (event_child->collecting_fast_tracepoint == 0)
2699 {
2700 if (debug_threads)
2701 debug_printf ("fast tracepoint finished "
2702 "collecting successfully.\n");
2703
2704 /* We may have a deferred signal to report. */
2705 if (dequeue_one_deferred_signal (event_child, &w))
2706 {
2707 if (debug_threads)
2708 debug_printf ("dequeued one signal.\n");
2709 }
2710 else
2711 {
2712 if (debug_threads)
2713 debug_printf ("no deferred signals.\n");
2714
2715 if (stabilizing_threads)
2716 {
2717 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2718 ourstatus->value.sig = GDB_SIGNAL_0;
2719
2720 if (debug_threads)
2721 {
2722 debug_printf ("linux_wait_1 ret = %s, stopped "
2723 "while stabilizing threads\n",
2724 target_pid_to_str (ptid_of (current_thread)));
2725 debug_exit ();
2726 }
2727
2728 return ptid_of (current_thread);
2729 }
2730 }
2731 }
2732 }
2733
2734 /* Check whether GDB would be interested in this event. */
2735
2736 /* If GDB is not interested in this signal, don't stop other
2737 threads, and don't report it to GDB. Just resume the inferior
2738 right away. We do this for threading-related signals as well as
2739 any that GDB specifically requested we ignore. But never ignore
2740 SIGSTOP if we sent it ourselves, and do not ignore signals when
2741 stepping - they may require special handling to skip the signal
2742 handler. */
2743 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2744 thread library? */
2745 if (WIFSTOPPED (w)
2746 && current_thread->last_resume_kind != resume_step
2747 && (
2748 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2749 (current_process ()->private->thread_db != NULL
2750 && (WSTOPSIG (w) == __SIGRTMIN
2751 || WSTOPSIG (w) == __SIGRTMIN + 1))
2752 ||
2753 #endif
2754 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2755 && !(WSTOPSIG (w) == SIGSTOP
2756 && current_thread->last_resume_kind == resume_stop))))
2757 {
2758 siginfo_t info, *info_p;
2759
2760 if (debug_threads)
2761 debug_printf ("Ignored signal %d for LWP %ld.\n",
2762 WSTOPSIG (w), lwpid_of (current_thread));
2763
2764 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2765 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2766 info_p = &info;
2767 else
2768 info_p = NULL;
2769 linux_resume_one_lwp (event_child, event_child->stepping,
2770 WSTOPSIG (w), info_p);
2771 return ignore_event (ourstatus);
2772 }
2773
2774 /* Note that all addresses are always "out of the step range" when
2775 there's no range to begin with. */
2776 in_step_range = lwp_in_step_range (event_child);
2777
2778 /* If GDB wanted this thread to single step, and the thread is out
2779 of the step range, we always want to report the SIGTRAP, and let
2780 GDB handle it. Watchpoints should always be reported. So should
2781 signals we can't explain. A SIGTRAP we can't explain could be a
2782 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2783 do, we're be able to handle GDB breakpoints on top of internal
2784 breakpoints, by handling the internal breakpoint and still
2785 reporting the event to GDB. If we don't, we're out of luck, GDB
2786 won't see the breakpoint hit. */
2787 report_to_gdb = (!maybe_internal_trap
2788 || (current_thread->last_resume_kind == resume_step
2789 && !in_step_range)
2790 || event_child->stop_reason == LWP_STOPPED_BY_WATCHPOINT
2791 || (!step_over_finished && !in_step_range
2792 && !bp_explains_trap && !trace_event)
2793 || (gdb_breakpoint_here (event_child->stop_pc)
2794 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2795 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2796
2797 run_breakpoint_commands (event_child->stop_pc);
2798
2799 /* We found no reason GDB would want us to stop. We either hit one
2800 of our own breakpoints, or finished an internal step GDB
2801 shouldn't know about. */
2802 if (!report_to_gdb)
2803 {
2804 if (debug_threads)
2805 {
2806 if (bp_explains_trap)
2807 debug_printf ("Hit a gdbserver breakpoint.\n");
2808 if (step_over_finished)
2809 debug_printf ("Step-over finished.\n");
2810 if (trace_event)
2811 debug_printf ("Tracepoint event.\n");
2812 if (lwp_in_step_range (event_child))
2813 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2814 paddress (event_child->stop_pc),
2815 paddress (event_child->step_range_start),
2816 paddress (event_child->step_range_end));
2817 }
2818
2819 /* We're not reporting this breakpoint to GDB, so apply the
2820 decr_pc_after_break adjustment to the inferior's regcache
2821 ourselves. */
2822
2823 if (the_low_target.set_pc != NULL)
2824 {
2825 struct regcache *regcache
2826 = get_thread_regcache (current_thread, 1);
2827 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2828 }
2829
2830 /* We may have finished stepping over a breakpoint. If so,
2831 we've stopped and suspended all LWPs momentarily except the
2832 stepping one. This is where we resume them all again. We're
2833 going to keep waiting, so use proceed, which handles stepping
2834 over the next breakpoint. */
2835 if (debug_threads)
2836 debug_printf ("proceeding all threads.\n");
2837
2838 if (step_over_finished)
2839 unsuspend_all_lwps (event_child);
2840
2841 proceed_all_lwps ();
2842 return ignore_event (ourstatus);
2843 }
2844
2845 if (debug_threads)
2846 {
2847 if (current_thread->last_resume_kind == resume_step)
2848 {
2849 if (event_child->step_range_start == event_child->step_range_end)
2850 debug_printf ("GDB wanted to single-step, reporting event.\n");
2851 else if (!lwp_in_step_range (event_child))
2852 debug_printf ("Out of step range, reporting event.\n");
2853 }
2854 if (event_child->stop_reason == LWP_STOPPED_BY_WATCHPOINT)
2855 debug_printf ("Stopped by watchpoint.\n");
2856 else if (gdb_breakpoint_here (event_child->stop_pc))
2857 debug_printf ("Stopped by GDB breakpoint.\n");
2858 if (debug_threads)
2859 debug_printf ("Hit a non-gdbserver trap event.\n");
2860 }
2861
2862 /* Alright, we're going to report a stop. */
2863
2864 if (!stabilizing_threads)
2865 {
2866 /* In all-stop, stop all threads. */
2867 if (!non_stop)
2868 stop_all_lwps (0, NULL);
2869
2870 /* If we're not waiting for a specific LWP, choose an event LWP
2871 from among those that have had events. Giving equal priority
2872 to all LWPs that have had events helps prevent
2873 starvation. */
2874 if (ptid_equal (ptid, minus_one_ptid))
2875 {
2876 event_child->status_pending_p = 1;
2877 event_child->status_pending = w;
2878
2879 select_event_lwp (&event_child);
2880
2881 /* current_thread and event_child must stay in sync. */
2882 current_thread = get_lwp_thread (event_child);
2883
2884 event_child->status_pending_p = 0;
2885 w = event_child->status_pending;
2886 }
2887
2888 if (step_over_finished)
2889 {
2890 if (!non_stop)
2891 {
2892 /* If we were doing a step-over, all other threads but
2893 the stepping one had been paused in start_step_over,
2894 with their suspend counts incremented. We don't want
2895 to do a full unstop/unpause, because we're in
2896 all-stop mode (so we want threads stopped), but we
2897 still need to unsuspend the other threads, to
2898 decrement their `suspended' count back. */
2899 unsuspend_all_lwps (event_child);
2900 }
2901 else
2902 {
2903 /* If we just finished a step-over, then all threads had
2904 been momentarily paused. In all-stop, that's fine,
2905 we want threads stopped by now anyway. In non-stop,
2906 we need to re-resume threads that GDB wanted to be
2907 running. */
2908 unstop_all_lwps (1, event_child);
2909 }
2910 }
2911
2912 /* Stabilize threads (move out of jump pads). */
2913 if (!non_stop)
2914 stabilize_threads ();
2915 }
2916 else
2917 {
2918 /* If we just finished a step-over, then all threads had been
2919 momentarily paused. In all-stop, that's fine, we want
2920 threads stopped by now anyway. In non-stop, we need to
2921 re-resume threads that GDB wanted to be running. */
2922 if (step_over_finished)
2923 unstop_all_lwps (1, event_child);
2924 }
2925
2926 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2927
2928 /* Now that we've selected our final event LWP, un-adjust its PC if
2929 it was a software breakpoint. */
2930 if (event_child->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT)
2931 {
2932 int decr_pc = the_low_target.decr_pc_after_break;
2933
2934 if (decr_pc != 0)
2935 {
2936 struct regcache *regcache
2937 = get_thread_regcache (current_thread, 1);
2938 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
2939 }
2940 }
2941
2942 if (current_thread->last_resume_kind == resume_stop
2943 && WSTOPSIG (w) == SIGSTOP)
2944 {
2945 /* A thread that has been requested to stop by GDB with vCont;t,
2946 and it stopped cleanly, so report as SIG0. The use of
2947 SIGSTOP is an implementation detail. */
2948 ourstatus->value.sig = GDB_SIGNAL_0;
2949 }
2950 else if (current_thread->last_resume_kind == resume_stop
2951 && WSTOPSIG (w) != SIGSTOP)
2952 {
2953 /* A thread that has been requested to stop by GDB with vCont;t,
2954 but, it stopped for other reasons. */
2955 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2956 }
2957 else
2958 {
2959 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2960 }
2961
2962 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2963
2964 if (debug_threads)
2965 {
2966 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
2967 target_pid_to_str (ptid_of (current_thread)),
2968 ourstatus->kind, ourstatus->value.sig);
2969 debug_exit ();
2970 }
2971
2972 return ptid_of (current_thread);
2973 }
2974
2975 /* Get rid of any pending event in the pipe. */
2976 static void
2977 async_file_flush (void)
2978 {
2979 int ret;
2980 char buf;
2981
2982 do
2983 ret = read (linux_event_pipe[0], &buf, 1);
2984 while (ret >= 0 || (ret == -1 && errno == EINTR));
2985 }
2986
2987 /* Put something in the pipe, so the event loop wakes up. */
2988 static void
2989 async_file_mark (void)
2990 {
2991 int ret;
2992
2993 async_file_flush ();
2994
2995 do
2996 ret = write (linux_event_pipe[1], "+", 1);
2997 while (ret == 0 || (ret == -1 && errno == EINTR));
2998
2999 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3000 be awakened anyway. */
3001 }
3002
3003 static ptid_t
3004 linux_wait (ptid_t ptid,
3005 struct target_waitstatus *ourstatus, int target_options)
3006 {
3007 ptid_t event_ptid;
3008
3009 /* Flush the async file first. */
3010 if (target_is_async_p ())
3011 async_file_flush ();
3012
3013 do
3014 {
3015 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3016 }
3017 while ((target_options & TARGET_WNOHANG) == 0
3018 && ptid_equal (event_ptid, null_ptid)
3019 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3020
3021 /* If at least one stop was reported, there may be more. A single
3022 SIGCHLD can signal more than one child stop. */
3023 if (target_is_async_p ()
3024 && (target_options & TARGET_WNOHANG) != 0
3025 && !ptid_equal (event_ptid, null_ptid))
3026 async_file_mark ();
3027
3028 return event_ptid;
3029 }
3030
3031 /* Send a signal to an LWP. */
3032
3033 static int
3034 kill_lwp (unsigned long lwpid, int signo)
3035 {
3036 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3037 fails, then we are not using nptl threads and we should be using kill. */
3038
3039 #ifdef __NR_tkill
3040 {
3041 static int tkill_failed;
3042
3043 if (!tkill_failed)
3044 {
3045 int ret;
3046
3047 errno = 0;
3048 ret = syscall (__NR_tkill, lwpid, signo);
3049 if (errno != ENOSYS)
3050 return ret;
3051 tkill_failed = 1;
3052 }
3053 }
3054 #endif
3055
3056 return kill (lwpid, signo);
3057 }
3058
3059 void
3060 linux_stop_lwp (struct lwp_info *lwp)
3061 {
3062 send_sigstop (lwp);
3063 }
3064
3065 static void
3066 send_sigstop (struct lwp_info *lwp)
3067 {
3068 int pid;
3069
3070 pid = lwpid_of (get_lwp_thread (lwp));
3071
3072 /* If we already have a pending stop signal for this process, don't
3073 send another. */
3074 if (lwp->stop_expected)
3075 {
3076 if (debug_threads)
3077 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3078
3079 return;
3080 }
3081
3082 if (debug_threads)
3083 debug_printf ("Sending sigstop to lwp %d\n", pid);
3084
3085 lwp->stop_expected = 1;
3086 kill_lwp (pid, SIGSTOP);
3087 }
3088
3089 static int
3090 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3091 {
3092 struct thread_info *thread = (struct thread_info *) entry;
3093 struct lwp_info *lwp = get_thread_lwp (thread);
3094
3095 /* Ignore EXCEPT. */
3096 if (lwp == except)
3097 return 0;
3098
3099 if (lwp->stopped)
3100 return 0;
3101
3102 send_sigstop (lwp);
3103 return 0;
3104 }
3105
3106 /* Increment the suspend count of an LWP, and stop it, if not stopped
3107 yet. */
3108 static int
3109 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3110 void *except)
3111 {
3112 struct thread_info *thread = (struct thread_info *) entry;
3113 struct lwp_info *lwp = get_thread_lwp (thread);
3114
3115 /* Ignore EXCEPT. */
3116 if (lwp == except)
3117 return 0;
3118
3119 lwp->suspended++;
3120
3121 return send_sigstop_callback (entry, except);
3122 }
3123
3124 static void
3125 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3126 {
3127 /* It's dead, really. */
3128 lwp->dead = 1;
3129
3130 /* Store the exit status for later. */
3131 lwp->status_pending_p = 1;
3132 lwp->status_pending = wstat;
3133
3134 /* Prevent trying to stop it. */
3135 lwp->stopped = 1;
3136
3137 /* No further stops are expected from a dead lwp. */
3138 lwp->stop_expected = 0;
3139 }
3140
3141 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3142
3143 static void
3144 wait_for_sigstop (void)
3145 {
3146 struct thread_info *saved_thread;
3147 ptid_t saved_tid;
3148 int wstat;
3149 int ret;
3150
3151 saved_thread = current_thread;
3152 if (saved_thread != NULL)
3153 saved_tid = saved_thread->entry.id;
3154 else
3155 saved_tid = null_ptid; /* avoid bogus unused warning */
3156
3157 if (debug_threads)
3158 debug_printf ("wait_for_sigstop: pulling events\n");
3159
3160 /* Passing NULL_PTID as filter indicates we want all events to be
3161 left pending. Eventually this returns when there are no
3162 unwaited-for children left. */
3163 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3164 &wstat, __WALL);
3165 gdb_assert (ret == -1);
3166
3167 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3168 current_thread = saved_thread;
3169 else
3170 {
3171 if (debug_threads)
3172 debug_printf ("Previously current thread died.\n");
3173
3174 if (non_stop)
3175 {
3176 /* We can't change the current inferior behind GDB's back,
3177 otherwise, a subsequent command may apply to the wrong
3178 process. */
3179 current_thread = NULL;
3180 }
3181 else
3182 {
3183 /* Set a valid thread as current. */
3184 set_desired_thread (0);
3185 }
3186 }
3187 }
3188
3189 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3190 move it out, because we need to report the stop event to GDB. For
3191 example, if the user puts a breakpoint in the jump pad, it's
3192 because she wants to debug it. */
3193
3194 static int
3195 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3196 {
3197 struct thread_info *thread = (struct thread_info *) entry;
3198 struct lwp_info *lwp = get_thread_lwp (thread);
3199
3200 gdb_assert (lwp->suspended == 0);
3201 gdb_assert (lwp->stopped);
3202
3203 /* Allow debugging the jump pad, gdb_collect, etc.. */
3204 return (supports_fast_tracepoints ()
3205 && agent_loaded_p ()
3206 && (gdb_breakpoint_here (lwp->stop_pc)
3207 || lwp->stop_reason == LWP_STOPPED_BY_WATCHPOINT
3208 || thread->last_resume_kind == resume_step)
3209 && linux_fast_tracepoint_collecting (lwp, NULL));
3210 }
3211
3212 static void
3213 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3214 {
3215 struct thread_info *thread = (struct thread_info *) entry;
3216 struct lwp_info *lwp = get_thread_lwp (thread);
3217 int *wstat;
3218
3219 gdb_assert (lwp->suspended == 0);
3220 gdb_assert (lwp->stopped);
3221
3222 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3223
3224 /* Allow debugging the jump pad, gdb_collect, etc. */
3225 if (!gdb_breakpoint_here (lwp->stop_pc)
3226 && lwp->stop_reason != LWP_STOPPED_BY_WATCHPOINT
3227 && thread->last_resume_kind != resume_step
3228 && maybe_move_out_of_jump_pad (lwp, wstat))
3229 {
3230 if (debug_threads)
3231 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3232 lwpid_of (thread));
3233
3234 if (wstat)
3235 {
3236 lwp->status_pending_p = 0;
3237 enqueue_one_deferred_signal (lwp, wstat);
3238
3239 if (debug_threads)
3240 debug_printf ("Signal %d for LWP %ld deferred "
3241 "(in jump pad)\n",
3242 WSTOPSIG (*wstat), lwpid_of (thread));
3243 }
3244
3245 linux_resume_one_lwp (lwp, 0, 0, NULL);
3246 }
3247 else
3248 lwp->suspended++;
3249 }
3250
3251 static int
3252 lwp_running (struct inferior_list_entry *entry, void *data)
3253 {
3254 struct thread_info *thread = (struct thread_info *) entry;
3255 struct lwp_info *lwp = get_thread_lwp (thread);
3256
3257 if (lwp->dead)
3258 return 0;
3259 if (lwp->stopped)
3260 return 0;
3261 return 1;
3262 }
3263
3264 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3265 If SUSPEND, then also increase the suspend count of every LWP,
3266 except EXCEPT. */
3267
3268 static void
3269 stop_all_lwps (int suspend, struct lwp_info *except)
3270 {
3271 /* Should not be called recursively. */
3272 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3273
3274 if (debug_threads)
3275 {
3276 debug_enter ();
3277 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3278 suspend ? "stop-and-suspend" : "stop",
3279 except != NULL
3280 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3281 : "none");
3282 }
3283
3284 stopping_threads = (suspend
3285 ? STOPPING_AND_SUSPENDING_THREADS
3286 : STOPPING_THREADS);
3287
3288 if (suspend)
3289 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3290 else
3291 find_inferior (&all_threads, send_sigstop_callback, except);
3292 wait_for_sigstop ();
3293 stopping_threads = NOT_STOPPING_THREADS;
3294
3295 if (debug_threads)
3296 {
3297 debug_printf ("stop_all_lwps done, setting stopping_threads "
3298 "back to !stopping\n");
3299 debug_exit ();
3300 }
3301 }
3302
3303 /* Resume execution of the inferior process.
3304 If STEP is nonzero, single-step it.
3305 If SIGNAL is nonzero, give it that signal. */
3306
3307 static void
3308 linux_resume_one_lwp (struct lwp_info *lwp,
3309 int step, int signal, siginfo_t *info)
3310 {
3311 struct thread_info *thread = get_lwp_thread (lwp);
3312 struct thread_info *saved_thread;
3313 int fast_tp_collecting;
3314
3315 if (lwp->stopped == 0)
3316 return;
3317
3318 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3319
3320 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3321
3322 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3323 user used the "jump" command, or "set $pc = foo"). */
3324 if (lwp->stop_pc != get_pc (lwp))
3325 {
3326 /* Collecting 'while-stepping' actions doesn't make sense
3327 anymore. */
3328 release_while_stepping_state_list (thread);
3329 }
3330
3331 /* If we have pending signals or status, and a new signal, enqueue the
3332 signal. Also enqueue the signal if we are waiting to reinsert a
3333 breakpoint; it will be picked up again below. */
3334 if (signal != 0
3335 && (lwp->status_pending_p
3336 || lwp->pending_signals != NULL
3337 || lwp->bp_reinsert != 0
3338 || fast_tp_collecting))
3339 {
3340 struct pending_signals *p_sig;
3341 p_sig = xmalloc (sizeof (*p_sig));
3342 p_sig->prev = lwp->pending_signals;
3343 p_sig->signal = signal;
3344 if (info == NULL)
3345 memset (&p_sig->info, 0, sizeof (siginfo_t));
3346 else
3347 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3348 lwp->pending_signals = p_sig;
3349 }
3350
3351 if (lwp->status_pending_p)
3352 {
3353 if (debug_threads)
3354 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3355 " has pending status\n",
3356 lwpid_of (thread), step ? "step" : "continue", signal,
3357 lwp->stop_expected ? "expected" : "not expected");
3358 return;
3359 }
3360
3361 saved_thread = current_thread;
3362 current_thread = thread;
3363
3364 if (debug_threads)
3365 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3366 lwpid_of (thread), step ? "step" : "continue", signal,
3367 lwp->stop_expected ? "expected" : "not expected");
3368
3369 /* This bit needs some thinking about. If we get a signal that
3370 we must report while a single-step reinsert is still pending,
3371 we often end up resuming the thread. It might be better to
3372 (ew) allow a stack of pending events; then we could be sure that
3373 the reinsert happened right away and not lose any signals.
3374
3375 Making this stack would also shrink the window in which breakpoints are
3376 uninserted (see comment in linux_wait_for_lwp) but not enough for
3377 complete correctness, so it won't solve that problem. It may be
3378 worthwhile just to solve this one, however. */
3379 if (lwp->bp_reinsert != 0)
3380 {
3381 if (debug_threads)
3382 debug_printf (" pending reinsert at 0x%s\n",
3383 paddress (lwp->bp_reinsert));
3384
3385 if (can_hardware_single_step ())
3386 {
3387 if (fast_tp_collecting == 0)
3388 {
3389 if (step == 0)
3390 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3391 if (lwp->suspended)
3392 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3393 lwp->suspended);
3394 }
3395
3396 step = 1;
3397 }
3398
3399 /* Postpone any pending signal. It was enqueued above. */
3400 signal = 0;
3401 }
3402
3403 if (fast_tp_collecting == 1)
3404 {
3405 if (debug_threads)
3406 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3407 " (exit-jump-pad-bkpt)\n",
3408 lwpid_of (thread));
3409
3410 /* Postpone any pending signal. It was enqueued above. */
3411 signal = 0;
3412 }
3413 else if (fast_tp_collecting == 2)
3414 {
3415 if (debug_threads)
3416 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3417 " single-stepping\n",
3418 lwpid_of (thread));
3419
3420 if (can_hardware_single_step ())
3421 step = 1;
3422 else
3423 {
3424 internal_error (__FILE__, __LINE__,
3425 "moving out of jump pad single-stepping"
3426 " not implemented on this target");
3427 }
3428
3429 /* Postpone any pending signal. It was enqueued above. */
3430 signal = 0;
3431 }
3432
3433 /* If we have while-stepping actions in this thread set it stepping.
3434 If we have a signal to deliver, it may or may not be set to
3435 SIG_IGN, we don't know. Assume so, and allow collecting
3436 while-stepping into a signal handler. A possible smart thing to
3437 do would be to set an internal breakpoint at the signal return
3438 address, continue, and carry on catching this while-stepping
3439 action only when that breakpoint is hit. A future
3440 enhancement. */
3441 if (thread->while_stepping != NULL
3442 && can_hardware_single_step ())
3443 {
3444 if (debug_threads)
3445 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3446 lwpid_of (thread));
3447 step = 1;
3448 }
3449
3450 if (the_low_target.get_pc != NULL)
3451 {
3452 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3453
3454 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3455
3456 if (debug_threads)
3457 {
3458 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3459 (long) lwp->stop_pc);
3460 }
3461 }
3462
3463 /* If we have pending signals, consume one unless we are trying to
3464 reinsert a breakpoint or we're trying to finish a fast tracepoint
3465 collect. */
3466 if (lwp->pending_signals != NULL
3467 && lwp->bp_reinsert == 0
3468 && fast_tp_collecting == 0)
3469 {
3470 struct pending_signals **p_sig;
3471
3472 p_sig = &lwp->pending_signals;
3473 while ((*p_sig)->prev != NULL)
3474 p_sig = &(*p_sig)->prev;
3475
3476 signal = (*p_sig)->signal;
3477 if ((*p_sig)->info.si_signo != 0)
3478 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3479 &(*p_sig)->info);
3480
3481 free (*p_sig);
3482 *p_sig = NULL;
3483 }
3484
3485 if (the_low_target.prepare_to_resume != NULL)
3486 the_low_target.prepare_to_resume (lwp);
3487
3488 regcache_invalidate_thread (thread);
3489 errno = 0;
3490 lwp->stopped = 0;
3491 lwp->stop_reason = LWP_STOPPED_BY_NO_REASON;
3492 lwp->stepping = step;
3493 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3494 (PTRACE_TYPE_ARG3) 0,
3495 /* Coerce to a uintptr_t first to avoid potential gcc warning
3496 of coercing an 8 byte integer to a 4 byte pointer. */
3497 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3498
3499 current_thread = saved_thread;
3500 if (errno)
3501 {
3502 /* ESRCH from ptrace either means that the thread was already
3503 running (an error) or that it is gone (a race condition). If
3504 it's gone, we will get a notification the next time we wait,
3505 so we can ignore the error. We could differentiate these
3506 two, but it's tricky without waiting; the thread still exists
3507 as a zombie, so sending it signal 0 would succeed. So just
3508 ignore ESRCH. */
3509 if (errno == ESRCH)
3510 return;
3511
3512 perror_with_name ("ptrace");
3513 }
3514 }
3515
3516 struct thread_resume_array
3517 {
3518 struct thread_resume *resume;
3519 size_t n;
3520 };
3521
3522 /* This function is called once per thread via find_inferior.
3523 ARG is a pointer to a thread_resume_array struct.
3524 We look up the thread specified by ENTRY in ARG, and mark the thread
3525 with a pointer to the appropriate resume request.
3526
3527 This algorithm is O(threads * resume elements), but resume elements
3528 is small (and will remain small at least until GDB supports thread
3529 suspension). */
3530
3531 static int
3532 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3533 {
3534 struct thread_info *thread = (struct thread_info *) entry;
3535 struct lwp_info *lwp = get_thread_lwp (thread);
3536 int ndx;
3537 struct thread_resume_array *r;
3538
3539 r = arg;
3540
3541 for (ndx = 0; ndx < r->n; ndx++)
3542 {
3543 ptid_t ptid = r->resume[ndx].thread;
3544 if (ptid_equal (ptid, minus_one_ptid)
3545 || ptid_equal (ptid, entry->id)
3546 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3547 of PID'. */
3548 || (ptid_get_pid (ptid) == pid_of (thread)
3549 && (ptid_is_pid (ptid)
3550 || ptid_get_lwp (ptid) == -1)))
3551 {
3552 if (r->resume[ndx].kind == resume_stop
3553 && thread->last_resume_kind == resume_stop)
3554 {
3555 if (debug_threads)
3556 debug_printf ("already %s LWP %ld at GDB's request\n",
3557 (thread->last_status.kind
3558 == TARGET_WAITKIND_STOPPED)
3559 ? "stopped"
3560 : "stopping",
3561 lwpid_of (thread));
3562
3563 continue;
3564 }
3565
3566 lwp->resume = &r->resume[ndx];
3567 thread->last_resume_kind = lwp->resume->kind;
3568
3569 lwp->step_range_start = lwp->resume->step_range_start;
3570 lwp->step_range_end = lwp->resume->step_range_end;
3571
3572 /* If we had a deferred signal to report, dequeue one now.
3573 This can happen if LWP gets more than one signal while
3574 trying to get out of a jump pad. */
3575 if (lwp->stopped
3576 && !lwp->status_pending_p
3577 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3578 {
3579 lwp->status_pending_p = 1;
3580
3581 if (debug_threads)
3582 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3583 "leaving status pending.\n",
3584 WSTOPSIG (lwp->status_pending),
3585 lwpid_of (thread));
3586 }
3587
3588 return 0;
3589 }
3590 }
3591
3592 /* No resume action for this thread. */
3593 lwp->resume = NULL;
3594
3595 return 0;
3596 }
3597
3598 /* find_inferior callback for linux_resume.
3599 Set *FLAG_P if this lwp has an interesting status pending. */
3600
3601 static int
3602 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3603 {
3604 struct thread_info *thread = (struct thread_info *) entry;
3605 struct lwp_info *lwp = get_thread_lwp (thread);
3606
3607 /* LWPs which will not be resumed are not interesting, because
3608 we might not wait for them next time through linux_wait. */
3609 if (lwp->resume == NULL)
3610 return 0;
3611
3612 if (thread_still_has_status_pending_p (thread))
3613 * (int *) flag_p = 1;
3614
3615 return 0;
3616 }
3617
3618 /* Return 1 if this lwp that GDB wants running is stopped at an
3619 internal breakpoint that we need to step over. It assumes that any
3620 required STOP_PC adjustment has already been propagated to the
3621 inferior's regcache. */
3622
3623 static int
3624 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3625 {
3626 struct thread_info *thread = (struct thread_info *) entry;
3627 struct lwp_info *lwp = get_thread_lwp (thread);
3628 struct thread_info *saved_thread;
3629 CORE_ADDR pc;
3630
3631 /* LWPs which will not be resumed are not interesting, because we
3632 might not wait for them next time through linux_wait. */
3633
3634 if (!lwp->stopped)
3635 {
3636 if (debug_threads)
3637 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3638 lwpid_of (thread));
3639 return 0;
3640 }
3641
3642 if (thread->last_resume_kind == resume_stop)
3643 {
3644 if (debug_threads)
3645 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3646 " stopped\n",
3647 lwpid_of (thread));
3648 return 0;
3649 }
3650
3651 gdb_assert (lwp->suspended >= 0);
3652
3653 if (lwp->suspended)
3654 {
3655 if (debug_threads)
3656 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3657 lwpid_of (thread));
3658 return 0;
3659 }
3660
3661 if (!lwp->need_step_over)
3662 {
3663 if (debug_threads)
3664 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
3665 }
3666
3667 if (lwp->status_pending_p)
3668 {
3669 if (debug_threads)
3670 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3671 " status.\n",
3672 lwpid_of (thread));
3673 return 0;
3674 }
3675
3676 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3677 or we have. */
3678 pc = get_pc (lwp);
3679
3680 /* If the PC has changed since we stopped, then don't do anything,
3681 and let the breakpoint/tracepoint be hit. This happens if, for
3682 instance, GDB handled the decr_pc_after_break subtraction itself,
3683 GDB is OOL stepping this thread, or the user has issued a "jump"
3684 command, or poked thread's registers herself. */
3685 if (pc != lwp->stop_pc)
3686 {
3687 if (debug_threads)
3688 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3689 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3690 lwpid_of (thread),
3691 paddress (lwp->stop_pc), paddress (pc));
3692
3693 lwp->need_step_over = 0;
3694 return 0;
3695 }
3696
3697 saved_thread = current_thread;
3698 current_thread = thread;
3699
3700 /* We can only step over breakpoints we know about. */
3701 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3702 {
3703 /* Don't step over a breakpoint that GDB expects to hit
3704 though. If the condition is being evaluated on the target's side
3705 and it evaluate to false, step over this breakpoint as well. */
3706 if (gdb_breakpoint_here (pc)
3707 && gdb_condition_true_at_breakpoint (pc)
3708 && gdb_no_commands_at_breakpoint (pc))
3709 {
3710 if (debug_threads)
3711 debug_printf ("Need step over [LWP %ld]? yes, but found"
3712 " GDB breakpoint at 0x%s; skipping step over\n",
3713 lwpid_of (thread), paddress (pc));
3714
3715 current_thread = saved_thread;
3716 return 0;
3717 }
3718 else
3719 {
3720 if (debug_threads)
3721 debug_printf ("Need step over [LWP %ld]? yes, "
3722 "found breakpoint at 0x%s\n",
3723 lwpid_of (thread), paddress (pc));
3724
3725 /* We've found an lwp that needs stepping over --- return 1 so
3726 that find_inferior stops looking. */
3727 current_thread = saved_thread;
3728
3729 /* If the step over is cancelled, this is set again. */
3730 lwp->need_step_over = 0;
3731 return 1;
3732 }
3733 }
3734
3735 current_thread = saved_thread;
3736
3737 if (debug_threads)
3738 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3739 " at 0x%s\n",
3740 lwpid_of (thread), paddress (pc));
3741
3742 return 0;
3743 }
3744
3745 /* Start a step-over operation on LWP. When LWP stopped at a
3746 breakpoint, to make progress, we need to remove the breakpoint out
3747 of the way. If we let other threads run while we do that, they may
3748 pass by the breakpoint location and miss hitting it. To avoid
3749 that, a step-over momentarily stops all threads while LWP is
3750 single-stepped while the breakpoint is temporarily uninserted from
3751 the inferior. When the single-step finishes, we reinsert the
3752 breakpoint, and let all threads that are supposed to be running,
3753 run again.
3754
3755 On targets that don't support hardware single-step, we don't
3756 currently support full software single-stepping. Instead, we only
3757 support stepping over the thread event breakpoint, by asking the
3758 low target where to place a reinsert breakpoint. Since this
3759 routine assumes the breakpoint being stepped over is a thread event
3760 breakpoint, it usually assumes the return address of the current
3761 function is a good enough place to set the reinsert breakpoint. */
3762
3763 static int
3764 start_step_over (struct lwp_info *lwp)
3765 {
3766 struct thread_info *thread = get_lwp_thread (lwp);
3767 struct thread_info *saved_thread;
3768 CORE_ADDR pc;
3769 int step;
3770
3771 if (debug_threads)
3772 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3773 lwpid_of (thread));
3774
3775 stop_all_lwps (1, lwp);
3776 gdb_assert (lwp->suspended == 0);
3777
3778 if (debug_threads)
3779 debug_printf ("Done stopping all threads for step-over.\n");
3780
3781 /* Note, we should always reach here with an already adjusted PC,
3782 either by GDB (if we're resuming due to GDB's request), or by our
3783 caller, if we just finished handling an internal breakpoint GDB
3784 shouldn't care about. */
3785 pc = get_pc (lwp);
3786
3787 saved_thread = current_thread;
3788 current_thread = thread;
3789
3790 lwp->bp_reinsert = pc;
3791 uninsert_breakpoints_at (pc);
3792 uninsert_fast_tracepoint_jumps_at (pc);
3793
3794 if (can_hardware_single_step ())
3795 {
3796 step = 1;
3797 }
3798 else
3799 {
3800 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3801 set_reinsert_breakpoint (raddr);
3802 step = 0;
3803 }
3804
3805 current_thread = saved_thread;
3806
3807 linux_resume_one_lwp (lwp, step, 0, NULL);
3808
3809 /* Require next event from this LWP. */
3810 step_over_bkpt = thread->entry.id;
3811 return 1;
3812 }
3813
3814 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3815 start_step_over, if still there, and delete any reinsert
3816 breakpoints we've set, on non hardware single-step targets. */
3817
3818 static int
3819 finish_step_over (struct lwp_info *lwp)
3820 {
3821 if (lwp->bp_reinsert != 0)
3822 {
3823 if (debug_threads)
3824 debug_printf ("Finished step over.\n");
3825
3826 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3827 may be no breakpoint to reinsert there by now. */
3828 reinsert_breakpoints_at (lwp->bp_reinsert);
3829 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3830
3831 lwp->bp_reinsert = 0;
3832
3833 /* Delete any software-single-step reinsert breakpoints. No
3834 longer needed. We don't have to worry about other threads
3835 hitting this trap, and later not being able to explain it,
3836 because we were stepping over a breakpoint, and we hold all
3837 threads but LWP stopped while doing that. */
3838 if (!can_hardware_single_step ())
3839 delete_reinsert_breakpoints ();
3840
3841 step_over_bkpt = null_ptid;
3842 return 1;
3843 }
3844 else
3845 return 0;
3846 }
3847
3848 /* This function is called once per thread. We check the thread's resume
3849 request, which will tell us whether to resume, step, or leave the thread
3850 stopped; and what signal, if any, it should be sent.
3851
3852 For threads which we aren't explicitly told otherwise, we preserve
3853 the stepping flag; this is used for stepping over gdbserver-placed
3854 breakpoints.
3855
3856 If pending_flags was set in any thread, we queue any needed
3857 signals, since we won't actually resume. We already have a pending
3858 event to report, so we don't need to preserve any step requests;
3859 they should be re-issued if necessary. */
3860
3861 static int
3862 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3863 {
3864 struct thread_info *thread = (struct thread_info *) entry;
3865 struct lwp_info *lwp = get_thread_lwp (thread);
3866 int step;
3867 int leave_all_stopped = * (int *) arg;
3868 int leave_pending;
3869
3870 if (lwp->resume == NULL)
3871 return 0;
3872
3873 if (lwp->resume->kind == resume_stop)
3874 {
3875 if (debug_threads)
3876 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
3877
3878 if (!lwp->stopped)
3879 {
3880 if (debug_threads)
3881 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
3882
3883 /* Stop the thread, and wait for the event asynchronously,
3884 through the event loop. */
3885 send_sigstop (lwp);
3886 }
3887 else
3888 {
3889 if (debug_threads)
3890 debug_printf ("already stopped LWP %ld\n",
3891 lwpid_of (thread));
3892
3893 /* The LWP may have been stopped in an internal event that
3894 was not meant to be notified back to GDB (e.g., gdbserver
3895 breakpoint), so we should be reporting a stop event in
3896 this case too. */
3897
3898 /* If the thread already has a pending SIGSTOP, this is a
3899 no-op. Otherwise, something later will presumably resume
3900 the thread and this will cause it to cancel any pending
3901 operation, due to last_resume_kind == resume_stop. If
3902 the thread already has a pending status to report, we
3903 will still report it the next time we wait - see
3904 status_pending_p_callback. */
3905
3906 /* If we already have a pending signal to report, then
3907 there's no need to queue a SIGSTOP, as this means we're
3908 midway through moving the LWP out of the jumppad, and we
3909 will report the pending signal as soon as that is
3910 finished. */
3911 if (lwp->pending_signals_to_report == NULL)
3912 send_sigstop (lwp);
3913 }
3914
3915 /* For stop requests, we're done. */
3916 lwp->resume = NULL;
3917 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3918 return 0;
3919 }
3920
3921 /* If this thread which is about to be resumed has a pending status,
3922 then don't resume any threads - we can just report the pending
3923 status. Make sure to queue any signals that would otherwise be
3924 sent. In all-stop mode, we do this decision based on if *any*
3925 thread has a pending status. If there's a thread that needs the
3926 step-over-breakpoint dance, then don't resume any other thread
3927 but that particular one. */
3928 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3929
3930 if (!leave_pending)
3931 {
3932 if (debug_threads)
3933 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
3934
3935 step = (lwp->resume->kind == resume_step);
3936 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3937 }
3938 else
3939 {
3940 if (debug_threads)
3941 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
3942
3943 /* If we have a new signal, enqueue the signal. */
3944 if (lwp->resume->sig != 0)
3945 {
3946 struct pending_signals *p_sig;
3947 p_sig = xmalloc (sizeof (*p_sig));
3948 p_sig->prev = lwp->pending_signals;
3949 p_sig->signal = lwp->resume->sig;
3950 memset (&p_sig->info, 0, sizeof (siginfo_t));
3951
3952 /* If this is the same signal we were previously stopped by,
3953 make sure to queue its siginfo. We can ignore the return
3954 value of ptrace; if it fails, we'll skip
3955 PTRACE_SETSIGINFO. */
3956 if (WIFSTOPPED (lwp->last_status)
3957 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3958 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3959 &p_sig->info);
3960
3961 lwp->pending_signals = p_sig;
3962 }
3963 }
3964
3965 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3966 lwp->resume = NULL;
3967 return 0;
3968 }
3969
3970 static void
3971 linux_resume (struct thread_resume *resume_info, size_t n)
3972 {
3973 struct thread_resume_array array = { resume_info, n };
3974 struct thread_info *need_step_over = NULL;
3975 int any_pending;
3976 int leave_all_stopped;
3977
3978 if (debug_threads)
3979 {
3980 debug_enter ();
3981 debug_printf ("linux_resume:\n");
3982 }
3983
3984 find_inferior (&all_threads, linux_set_resume_request, &array);
3985
3986 /* If there is a thread which would otherwise be resumed, which has
3987 a pending status, then don't resume any threads - we can just
3988 report the pending status. Make sure to queue any signals that
3989 would otherwise be sent. In non-stop mode, we'll apply this
3990 logic to each thread individually. We consume all pending events
3991 before considering to start a step-over (in all-stop). */
3992 any_pending = 0;
3993 if (!non_stop)
3994 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
3995
3996 /* If there is a thread which would otherwise be resumed, which is
3997 stopped at a breakpoint that needs stepping over, then don't
3998 resume any threads - have it step over the breakpoint with all
3999 other threads stopped, then resume all threads again. Make sure
4000 to queue any signals that would otherwise be delivered or
4001 queued. */
4002 if (!any_pending && supports_breakpoints ())
4003 need_step_over
4004 = (struct thread_info *) find_inferior (&all_threads,
4005 need_step_over_p, NULL);
4006
4007 leave_all_stopped = (need_step_over != NULL || any_pending);
4008
4009 if (debug_threads)
4010 {
4011 if (need_step_over != NULL)
4012 debug_printf ("Not resuming all, need step over\n");
4013 else if (any_pending)
4014 debug_printf ("Not resuming, all-stop and found "
4015 "an LWP with pending status\n");
4016 else
4017 debug_printf ("Resuming, no pending status or step over needed\n");
4018 }
4019
4020 /* Even if we're leaving threads stopped, queue all signals we'd
4021 otherwise deliver. */
4022 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4023
4024 if (need_step_over)
4025 start_step_over (get_thread_lwp (need_step_over));
4026
4027 if (debug_threads)
4028 {
4029 debug_printf ("linux_resume done\n");
4030 debug_exit ();
4031 }
4032 }
4033
4034 /* This function is called once per thread. We check the thread's
4035 last resume request, which will tell us whether to resume, step, or
4036 leave the thread stopped. Any signal the client requested to be
4037 delivered has already been enqueued at this point.
4038
4039 If any thread that GDB wants running is stopped at an internal
4040 breakpoint that needs stepping over, we start a step-over operation
4041 on that particular thread, and leave all others stopped. */
4042
4043 static int
4044 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4045 {
4046 struct thread_info *thread = (struct thread_info *) entry;
4047 struct lwp_info *lwp = get_thread_lwp (thread);
4048 int step;
4049
4050 if (lwp == except)
4051 return 0;
4052
4053 if (debug_threads)
4054 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4055
4056 if (!lwp->stopped)
4057 {
4058 if (debug_threads)
4059 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4060 return 0;
4061 }
4062
4063 if (thread->last_resume_kind == resume_stop
4064 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4065 {
4066 if (debug_threads)
4067 debug_printf (" client wants LWP to remain %ld stopped\n",
4068 lwpid_of (thread));
4069 return 0;
4070 }
4071
4072 if (lwp->status_pending_p)
4073 {
4074 if (debug_threads)
4075 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4076 lwpid_of (thread));
4077 return 0;
4078 }
4079
4080 gdb_assert (lwp->suspended >= 0);
4081
4082 if (lwp->suspended)
4083 {
4084 if (debug_threads)
4085 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4086 return 0;
4087 }
4088
4089 if (thread->last_resume_kind == resume_stop
4090 && lwp->pending_signals_to_report == NULL
4091 && lwp->collecting_fast_tracepoint == 0)
4092 {
4093 /* We haven't reported this LWP as stopped yet (otherwise, the
4094 last_status.kind check above would catch it, and we wouldn't
4095 reach here. This LWP may have been momentarily paused by a
4096 stop_all_lwps call while handling for example, another LWP's
4097 step-over. In that case, the pending expected SIGSTOP signal
4098 that was queued at vCont;t handling time will have already
4099 been consumed by wait_for_sigstop, and so we need to requeue
4100 another one here. Note that if the LWP already has a SIGSTOP
4101 pending, this is a no-op. */
4102
4103 if (debug_threads)
4104 debug_printf ("Client wants LWP %ld to stop. "
4105 "Making sure it has a SIGSTOP pending\n",
4106 lwpid_of (thread));
4107
4108 send_sigstop (lwp);
4109 }
4110
4111 step = thread->last_resume_kind == resume_step;
4112 linux_resume_one_lwp (lwp, step, 0, NULL);
4113 return 0;
4114 }
4115
4116 static int
4117 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4118 {
4119 struct thread_info *thread = (struct thread_info *) entry;
4120 struct lwp_info *lwp = get_thread_lwp (thread);
4121
4122 if (lwp == except)
4123 return 0;
4124
4125 lwp->suspended--;
4126 gdb_assert (lwp->suspended >= 0);
4127
4128 return proceed_one_lwp (entry, except);
4129 }
4130
4131 /* When we finish a step-over, set threads running again. If there's
4132 another thread that may need a step-over, now's the time to start
4133 it. Eventually, we'll move all threads past their breakpoints. */
4134
4135 static void
4136 proceed_all_lwps (void)
4137 {
4138 struct thread_info *need_step_over;
4139
4140 /* If there is a thread which would otherwise be resumed, which is
4141 stopped at a breakpoint that needs stepping over, then don't
4142 resume any threads - have it step over the breakpoint with all
4143 other threads stopped, then resume all threads again. */
4144
4145 if (supports_breakpoints ())
4146 {
4147 need_step_over
4148 = (struct thread_info *) find_inferior (&all_threads,
4149 need_step_over_p, NULL);
4150
4151 if (need_step_over != NULL)
4152 {
4153 if (debug_threads)
4154 debug_printf ("proceed_all_lwps: found "
4155 "thread %ld needing a step-over\n",
4156 lwpid_of (need_step_over));
4157
4158 start_step_over (get_thread_lwp (need_step_over));
4159 return;
4160 }
4161 }
4162
4163 if (debug_threads)
4164 debug_printf ("Proceeding, no step-over needed\n");
4165
4166 find_inferior (&all_threads, proceed_one_lwp, NULL);
4167 }
4168
4169 /* Stopped LWPs that the client wanted to be running, that don't have
4170 pending statuses, are set to run again, except for EXCEPT, if not
4171 NULL. This undoes a stop_all_lwps call. */
4172
4173 static void
4174 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4175 {
4176 if (debug_threads)
4177 {
4178 debug_enter ();
4179 if (except)
4180 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4181 lwpid_of (get_lwp_thread (except)));
4182 else
4183 debug_printf ("unstopping all lwps\n");
4184 }
4185
4186 if (unsuspend)
4187 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4188 else
4189 find_inferior (&all_threads, proceed_one_lwp, except);
4190
4191 if (debug_threads)
4192 {
4193 debug_printf ("unstop_all_lwps done\n");
4194 debug_exit ();
4195 }
4196 }
4197
4198
4199 #ifdef HAVE_LINUX_REGSETS
4200
4201 #define use_linux_regsets 1
4202
4203 /* Returns true if REGSET has been disabled. */
4204
4205 static int
4206 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4207 {
4208 return (info->disabled_regsets != NULL
4209 && info->disabled_regsets[regset - info->regsets]);
4210 }
4211
4212 /* Disable REGSET. */
4213
4214 static void
4215 disable_regset (struct regsets_info *info, struct regset_info *regset)
4216 {
4217 int dr_offset;
4218
4219 dr_offset = regset - info->regsets;
4220 if (info->disabled_regsets == NULL)
4221 info->disabled_regsets = xcalloc (1, info->num_regsets);
4222 info->disabled_regsets[dr_offset] = 1;
4223 }
4224
4225 static int
4226 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4227 struct regcache *regcache)
4228 {
4229 struct regset_info *regset;
4230 int saw_general_regs = 0;
4231 int pid;
4232 struct iovec iov;
4233
4234 pid = lwpid_of (current_thread);
4235 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4236 {
4237 void *buf, *data;
4238 int nt_type, res;
4239
4240 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4241 continue;
4242
4243 buf = xmalloc (regset->size);
4244
4245 nt_type = regset->nt_type;
4246 if (nt_type)
4247 {
4248 iov.iov_base = buf;
4249 iov.iov_len = regset->size;
4250 data = (void *) &iov;
4251 }
4252 else
4253 data = buf;
4254
4255 #ifndef __sparc__
4256 res = ptrace (regset->get_request, pid,
4257 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4258 #else
4259 res = ptrace (regset->get_request, pid, data, nt_type);
4260 #endif
4261 if (res < 0)
4262 {
4263 if (errno == EIO)
4264 {
4265 /* If we get EIO on a regset, do not try it again for
4266 this process mode. */
4267 disable_regset (regsets_info, regset);
4268 }
4269 else if (errno == ENODATA)
4270 {
4271 /* ENODATA may be returned if the regset is currently
4272 not "active". This can happen in normal operation,
4273 so suppress the warning in this case. */
4274 }
4275 else
4276 {
4277 char s[256];
4278 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4279 pid);
4280 perror (s);
4281 }
4282 }
4283 else
4284 {
4285 if (regset->type == GENERAL_REGS)
4286 saw_general_regs = 1;
4287 regset->store_function (regcache, buf);
4288 }
4289 free (buf);
4290 }
4291 if (saw_general_regs)
4292 return 0;
4293 else
4294 return 1;
4295 }
4296
4297 static int
4298 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4299 struct regcache *regcache)
4300 {
4301 struct regset_info *regset;
4302 int saw_general_regs = 0;
4303 int pid;
4304 struct iovec iov;
4305
4306 pid = lwpid_of (current_thread);
4307 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4308 {
4309 void *buf, *data;
4310 int nt_type, res;
4311
4312 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4313 || regset->fill_function == NULL)
4314 continue;
4315
4316 buf = xmalloc (regset->size);
4317
4318 /* First fill the buffer with the current register set contents,
4319 in case there are any items in the kernel's regset that are
4320 not in gdbserver's regcache. */
4321
4322 nt_type = regset->nt_type;
4323 if (nt_type)
4324 {
4325 iov.iov_base = buf;
4326 iov.iov_len = regset->size;
4327 data = (void *) &iov;
4328 }
4329 else
4330 data = buf;
4331
4332 #ifndef __sparc__
4333 res = ptrace (regset->get_request, pid,
4334 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4335 #else
4336 res = ptrace (regset->get_request, pid, data, nt_type);
4337 #endif
4338
4339 if (res == 0)
4340 {
4341 /* Then overlay our cached registers on that. */
4342 regset->fill_function (regcache, buf);
4343
4344 /* Only now do we write the register set. */
4345 #ifndef __sparc__
4346 res = ptrace (regset->set_request, pid,
4347 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4348 #else
4349 res = ptrace (regset->set_request, pid, data, nt_type);
4350 #endif
4351 }
4352
4353 if (res < 0)
4354 {
4355 if (errno == EIO)
4356 {
4357 /* If we get EIO on a regset, do not try it again for
4358 this process mode. */
4359 disable_regset (regsets_info, regset);
4360 }
4361 else if (errno == ESRCH)
4362 {
4363 /* At this point, ESRCH should mean the process is
4364 already gone, in which case we simply ignore attempts
4365 to change its registers. See also the related
4366 comment in linux_resume_one_lwp. */
4367 free (buf);
4368 return 0;
4369 }
4370 else
4371 {
4372 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4373 }
4374 }
4375 else if (regset->type == GENERAL_REGS)
4376 saw_general_regs = 1;
4377 free (buf);
4378 }
4379 if (saw_general_regs)
4380 return 0;
4381 else
4382 return 1;
4383 }
4384
4385 #else /* !HAVE_LINUX_REGSETS */
4386
4387 #define use_linux_regsets 0
4388 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4389 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4390
4391 #endif
4392
4393 /* Return 1 if register REGNO is supported by one of the regset ptrace
4394 calls or 0 if it has to be transferred individually. */
4395
4396 static int
4397 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4398 {
4399 unsigned char mask = 1 << (regno % 8);
4400 size_t index = regno / 8;
4401
4402 return (use_linux_regsets
4403 && (regs_info->regset_bitmap == NULL
4404 || (regs_info->regset_bitmap[index] & mask) != 0));
4405 }
4406
4407 #ifdef HAVE_LINUX_USRREGS
4408
4409 int
4410 register_addr (const struct usrregs_info *usrregs, int regnum)
4411 {
4412 int addr;
4413
4414 if (regnum < 0 || regnum >= usrregs->num_regs)
4415 error ("Invalid register number %d.", regnum);
4416
4417 addr = usrregs->regmap[regnum];
4418
4419 return addr;
4420 }
4421
4422 /* Fetch one register. */
4423 static void
4424 fetch_register (const struct usrregs_info *usrregs,
4425 struct regcache *regcache, int regno)
4426 {
4427 CORE_ADDR regaddr;
4428 int i, size;
4429 char *buf;
4430 int pid;
4431
4432 if (regno >= usrregs->num_regs)
4433 return;
4434 if ((*the_low_target.cannot_fetch_register) (regno))
4435 return;
4436
4437 regaddr = register_addr (usrregs, regno);
4438 if (regaddr == -1)
4439 return;
4440
4441 size = ((register_size (regcache->tdesc, regno)
4442 + sizeof (PTRACE_XFER_TYPE) - 1)
4443 & -sizeof (PTRACE_XFER_TYPE));
4444 buf = alloca (size);
4445
4446 pid = lwpid_of (current_thread);
4447 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4448 {
4449 errno = 0;
4450 *(PTRACE_XFER_TYPE *) (buf + i) =
4451 ptrace (PTRACE_PEEKUSER, pid,
4452 /* Coerce to a uintptr_t first to avoid potential gcc warning
4453 of coercing an 8 byte integer to a 4 byte pointer. */
4454 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4455 regaddr += sizeof (PTRACE_XFER_TYPE);
4456 if (errno != 0)
4457 error ("reading register %d: %s", regno, strerror (errno));
4458 }
4459
4460 if (the_low_target.supply_ptrace_register)
4461 the_low_target.supply_ptrace_register (regcache, regno, buf);
4462 else
4463 supply_register (regcache, regno, buf);
4464 }
4465
4466 /* Store one register. */
4467 static void
4468 store_register (const struct usrregs_info *usrregs,
4469 struct regcache *regcache, int regno)
4470 {
4471 CORE_ADDR regaddr;
4472 int i, size;
4473 char *buf;
4474 int pid;
4475
4476 if (regno >= usrregs->num_regs)
4477 return;
4478 if ((*the_low_target.cannot_store_register) (regno))
4479 return;
4480
4481 regaddr = register_addr (usrregs, regno);
4482 if (regaddr == -1)
4483 return;
4484
4485 size = ((register_size (regcache->tdesc, regno)
4486 + sizeof (PTRACE_XFER_TYPE) - 1)
4487 & -sizeof (PTRACE_XFER_TYPE));
4488 buf = alloca (size);
4489 memset (buf, 0, size);
4490
4491 if (the_low_target.collect_ptrace_register)
4492 the_low_target.collect_ptrace_register (regcache, regno, buf);
4493 else
4494 collect_register (regcache, regno, buf);
4495
4496 pid = lwpid_of (current_thread);
4497 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4498 {
4499 errno = 0;
4500 ptrace (PTRACE_POKEUSER, pid,
4501 /* Coerce to a uintptr_t first to avoid potential gcc warning
4502 about coercing an 8 byte integer to a 4 byte pointer. */
4503 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4504 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4505 if (errno != 0)
4506 {
4507 /* At this point, ESRCH should mean the process is
4508 already gone, in which case we simply ignore attempts
4509 to change its registers. See also the related
4510 comment in linux_resume_one_lwp. */
4511 if (errno == ESRCH)
4512 return;
4513
4514 if ((*the_low_target.cannot_store_register) (regno) == 0)
4515 error ("writing register %d: %s", regno, strerror (errno));
4516 }
4517 regaddr += sizeof (PTRACE_XFER_TYPE);
4518 }
4519 }
4520
4521 /* Fetch all registers, or just one, from the child process.
4522 If REGNO is -1, do this for all registers, skipping any that are
4523 assumed to have been retrieved by regsets_fetch_inferior_registers,
4524 unless ALL is non-zero.
4525 Otherwise, REGNO specifies which register (so we can save time). */
4526 static void
4527 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4528 struct regcache *regcache, int regno, int all)
4529 {
4530 struct usrregs_info *usr = regs_info->usrregs;
4531
4532 if (regno == -1)
4533 {
4534 for (regno = 0; regno < usr->num_regs; regno++)
4535 if (all || !linux_register_in_regsets (regs_info, regno))
4536 fetch_register (usr, regcache, regno);
4537 }
4538 else
4539 fetch_register (usr, regcache, regno);
4540 }
4541
4542 /* Store our register values back into the inferior.
4543 If REGNO is -1, do this for all registers, skipping any that are
4544 assumed to have been saved by regsets_store_inferior_registers,
4545 unless ALL is non-zero.
4546 Otherwise, REGNO specifies which register (so we can save time). */
4547 static void
4548 usr_store_inferior_registers (const struct regs_info *regs_info,
4549 struct regcache *regcache, int regno, int all)
4550 {
4551 struct usrregs_info *usr = regs_info->usrregs;
4552
4553 if (regno == -1)
4554 {
4555 for (regno = 0; regno < usr->num_regs; regno++)
4556 if (all || !linux_register_in_regsets (regs_info, regno))
4557 store_register (usr, regcache, regno);
4558 }
4559 else
4560 store_register (usr, regcache, regno);
4561 }
4562
4563 #else /* !HAVE_LINUX_USRREGS */
4564
4565 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4566 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4567
4568 #endif
4569
4570
4571 void
4572 linux_fetch_registers (struct regcache *regcache, int regno)
4573 {
4574 int use_regsets;
4575 int all = 0;
4576 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4577
4578 if (regno == -1)
4579 {
4580 if (the_low_target.fetch_register != NULL
4581 && regs_info->usrregs != NULL)
4582 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4583 (*the_low_target.fetch_register) (regcache, regno);
4584
4585 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4586 if (regs_info->usrregs != NULL)
4587 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4588 }
4589 else
4590 {
4591 if (the_low_target.fetch_register != NULL
4592 && (*the_low_target.fetch_register) (regcache, regno))
4593 return;
4594
4595 use_regsets = linux_register_in_regsets (regs_info, regno);
4596 if (use_regsets)
4597 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4598 regcache);
4599 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4600 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4601 }
4602 }
4603
4604 void
4605 linux_store_registers (struct regcache *regcache, int regno)
4606 {
4607 int use_regsets;
4608 int all = 0;
4609 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4610
4611 if (regno == -1)
4612 {
4613 all = regsets_store_inferior_registers (regs_info->regsets_info,
4614 regcache);
4615 if (regs_info->usrregs != NULL)
4616 usr_store_inferior_registers (regs_info, regcache, regno, all);
4617 }
4618 else
4619 {
4620 use_regsets = linux_register_in_regsets (regs_info, regno);
4621 if (use_regsets)
4622 all = regsets_store_inferior_registers (regs_info->regsets_info,
4623 regcache);
4624 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4625 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4626 }
4627 }
4628
4629
4630 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4631 to debugger memory starting at MYADDR. */
4632
4633 static int
4634 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4635 {
4636 int pid = lwpid_of (current_thread);
4637 register PTRACE_XFER_TYPE *buffer;
4638 register CORE_ADDR addr;
4639 register int count;
4640 char filename[64];
4641 register int i;
4642 int ret;
4643 int fd;
4644
4645 /* Try using /proc. Don't bother for one word. */
4646 if (len >= 3 * sizeof (long))
4647 {
4648 int bytes;
4649
4650 /* We could keep this file open and cache it - possibly one per
4651 thread. That requires some juggling, but is even faster. */
4652 sprintf (filename, "/proc/%d/mem", pid);
4653 fd = open (filename, O_RDONLY | O_LARGEFILE);
4654 if (fd == -1)
4655 goto no_proc;
4656
4657 /* If pread64 is available, use it. It's faster if the kernel
4658 supports it (only one syscall), and it's 64-bit safe even on
4659 32-bit platforms (for instance, SPARC debugging a SPARC64
4660 application). */
4661 #ifdef HAVE_PREAD64
4662 bytes = pread64 (fd, myaddr, len, memaddr);
4663 #else
4664 bytes = -1;
4665 if (lseek (fd, memaddr, SEEK_SET) != -1)
4666 bytes = read (fd, myaddr, len);
4667 #endif
4668
4669 close (fd);
4670 if (bytes == len)
4671 return 0;
4672
4673 /* Some data was read, we'll try to get the rest with ptrace. */
4674 if (bytes > 0)
4675 {
4676 memaddr += bytes;
4677 myaddr += bytes;
4678 len -= bytes;
4679 }
4680 }
4681
4682 no_proc:
4683 /* Round starting address down to longword boundary. */
4684 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4685 /* Round ending address up; get number of longwords that makes. */
4686 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4687 / sizeof (PTRACE_XFER_TYPE));
4688 /* Allocate buffer of that many longwords. */
4689 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4690
4691 /* Read all the longwords */
4692 errno = 0;
4693 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4694 {
4695 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4696 about coercing an 8 byte integer to a 4 byte pointer. */
4697 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4698 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4699 (PTRACE_TYPE_ARG4) 0);
4700 if (errno)
4701 break;
4702 }
4703 ret = errno;
4704
4705 /* Copy appropriate bytes out of the buffer. */
4706 if (i > 0)
4707 {
4708 i *= sizeof (PTRACE_XFER_TYPE);
4709 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4710 memcpy (myaddr,
4711 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4712 i < len ? i : len);
4713 }
4714
4715 return ret;
4716 }
4717
4718 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4719 memory at MEMADDR. On failure (cannot write to the inferior)
4720 returns the value of errno. Always succeeds if LEN is zero. */
4721
4722 static int
4723 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4724 {
4725 register int i;
4726 /* Round starting address down to longword boundary. */
4727 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4728 /* Round ending address up; get number of longwords that makes. */
4729 register int count
4730 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4731 / sizeof (PTRACE_XFER_TYPE);
4732
4733 /* Allocate buffer of that many longwords. */
4734 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4735 alloca (count * sizeof (PTRACE_XFER_TYPE));
4736
4737 int pid = lwpid_of (current_thread);
4738
4739 if (len == 0)
4740 {
4741 /* Zero length write always succeeds. */
4742 return 0;
4743 }
4744
4745 if (debug_threads)
4746 {
4747 /* Dump up to four bytes. */
4748 unsigned int val = * (unsigned int *) myaddr;
4749 if (len == 1)
4750 val = val & 0xff;
4751 else if (len == 2)
4752 val = val & 0xffff;
4753 else if (len == 3)
4754 val = val & 0xffffff;
4755 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4756 val, (long)memaddr);
4757 }
4758
4759 /* Fill start and end extra bytes of buffer with existing memory data. */
4760
4761 errno = 0;
4762 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4763 about coercing an 8 byte integer to a 4 byte pointer. */
4764 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4765 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4766 (PTRACE_TYPE_ARG4) 0);
4767 if (errno)
4768 return errno;
4769
4770 if (count > 1)
4771 {
4772 errno = 0;
4773 buffer[count - 1]
4774 = ptrace (PTRACE_PEEKTEXT, pid,
4775 /* Coerce to a uintptr_t first to avoid potential gcc warning
4776 about coercing an 8 byte integer to a 4 byte pointer. */
4777 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4778 * sizeof (PTRACE_XFER_TYPE)),
4779 (PTRACE_TYPE_ARG4) 0);
4780 if (errno)
4781 return errno;
4782 }
4783
4784 /* Copy data to be written over corresponding part of buffer. */
4785
4786 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4787 myaddr, len);
4788
4789 /* Write the entire buffer. */
4790
4791 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4792 {
4793 errno = 0;
4794 ptrace (PTRACE_POKETEXT, pid,
4795 /* Coerce to a uintptr_t first to avoid potential gcc warning
4796 about coercing an 8 byte integer to a 4 byte pointer. */
4797 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4798 (PTRACE_TYPE_ARG4) buffer[i]);
4799 if (errno)
4800 return errno;
4801 }
4802
4803 return 0;
4804 }
4805
4806 static void
4807 linux_look_up_symbols (void)
4808 {
4809 #ifdef USE_THREAD_DB
4810 struct process_info *proc = current_process ();
4811
4812 if (proc->private->thread_db != NULL)
4813 return;
4814
4815 /* If the kernel supports tracing clones, then we don't need to
4816 use the magic thread event breakpoint to learn about
4817 threads. */
4818 thread_db_init (!linux_supports_traceclone ());
4819 #endif
4820 }
4821
4822 static void
4823 linux_request_interrupt (void)
4824 {
4825 extern unsigned long signal_pid;
4826
4827 /* Send a SIGINT to the process group. This acts just like the user
4828 typed a ^C on the controlling terminal. */
4829 kill (-signal_pid, SIGINT);
4830 }
4831
4832 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4833 to debugger memory starting at MYADDR. */
4834
4835 static int
4836 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4837 {
4838 char filename[PATH_MAX];
4839 int fd, n;
4840 int pid = lwpid_of (current_thread);
4841
4842 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4843
4844 fd = open (filename, O_RDONLY);
4845 if (fd < 0)
4846 return -1;
4847
4848 if (offset != (CORE_ADDR) 0
4849 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4850 n = -1;
4851 else
4852 n = read (fd, myaddr, len);
4853
4854 close (fd);
4855
4856 return n;
4857 }
4858
4859 /* These breakpoint and watchpoint related wrapper functions simply
4860 pass on the function call if the target has registered a
4861 corresponding function. */
4862
4863 static int
4864 linux_supports_z_point_type (char z_type)
4865 {
4866 return (the_low_target.supports_z_point_type != NULL
4867 && the_low_target.supports_z_point_type (z_type));
4868 }
4869
4870 static int
4871 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
4872 int size, struct raw_breakpoint *bp)
4873 {
4874 if (the_low_target.insert_point != NULL)
4875 return the_low_target.insert_point (type, addr, size, bp);
4876 else
4877 /* Unsupported (see target.h). */
4878 return 1;
4879 }
4880
4881 static int
4882 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
4883 int size, struct raw_breakpoint *bp)
4884 {
4885 if (the_low_target.remove_point != NULL)
4886 return the_low_target.remove_point (type, addr, size, bp);
4887 else
4888 /* Unsupported (see target.h). */
4889 return 1;
4890 }
4891
4892 static int
4893 linux_stopped_by_watchpoint (void)
4894 {
4895 struct lwp_info *lwp = get_thread_lwp (current_thread);
4896
4897 return lwp->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
4898 }
4899
4900 static CORE_ADDR
4901 linux_stopped_data_address (void)
4902 {
4903 struct lwp_info *lwp = get_thread_lwp (current_thread);
4904
4905 return lwp->stopped_data_address;
4906 }
4907
4908 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4909 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4910 && defined(PT_TEXT_END_ADDR)
4911
4912 /* This is only used for targets that define PT_TEXT_ADDR,
4913 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4914 the target has different ways of acquiring this information, like
4915 loadmaps. */
4916
4917 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4918 to tell gdb about. */
4919
4920 static int
4921 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4922 {
4923 unsigned long text, text_end, data;
4924 int pid = lwpid_of (get_thread_lwp (current_thread));
4925
4926 errno = 0;
4927
4928 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4929 (PTRACE_TYPE_ARG4) 0);
4930 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4931 (PTRACE_TYPE_ARG4) 0);
4932 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4933 (PTRACE_TYPE_ARG4) 0);
4934
4935 if (errno == 0)
4936 {
4937 /* Both text and data offsets produced at compile-time (and so
4938 used by gdb) are relative to the beginning of the program,
4939 with the data segment immediately following the text segment.
4940 However, the actual runtime layout in memory may put the data
4941 somewhere else, so when we send gdb a data base-address, we
4942 use the real data base address and subtract the compile-time
4943 data base-address from it (which is just the length of the
4944 text segment). BSS immediately follows data in both
4945 cases. */
4946 *text_p = text;
4947 *data_p = data - (text_end - text);
4948
4949 return 1;
4950 }
4951 return 0;
4952 }
4953 #endif
4954
4955 static int
4956 linux_qxfer_osdata (const char *annex,
4957 unsigned char *readbuf, unsigned const char *writebuf,
4958 CORE_ADDR offset, int len)
4959 {
4960 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4961 }
4962
4963 /* Convert a native/host siginfo object, into/from the siginfo in the
4964 layout of the inferiors' architecture. */
4965
4966 static void
4967 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4968 {
4969 int done = 0;
4970
4971 if (the_low_target.siginfo_fixup != NULL)
4972 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4973
4974 /* If there was no callback, or the callback didn't do anything,
4975 then just do a straight memcpy. */
4976 if (!done)
4977 {
4978 if (direction == 1)
4979 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4980 else
4981 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4982 }
4983 }
4984
4985 static int
4986 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4987 unsigned const char *writebuf, CORE_ADDR offset, int len)
4988 {
4989 int pid;
4990 siginfo_t siginfo;
4991 char inf_siginfo[sizeof (siginfo_t)];
4992
4993 if (current_thread == NULL)
4994 return -1;
4995
4996 pid = lwpid_of (current_thread);
4997
4998 if (debug_threads)
4999 debug_printf ("%s siginfo for lwp %d.\n",
5000 readbuf != NULL ? "Reading" : "Writing",
5001 pid);
5002
5003 if (offset >= sizeof (siginfo))
5004 return -1;
5005
5006 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5007 return -1;
5008
5009 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5010 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5011 inferior with a 64-bit GDBSERVER should look the same as debugging it
5012 with a 32-bit GDBSERVER, we need to convert it. */
5013 siginfo_fixup (&siginfo, inf_siginfo, 0);
5014
5015 if (offset + len > sizeof (siginfo))
5016 len = sizeof (siginfo) - offset;
5017
5018 if (readbuf != NULL)
5019 memcpy (readbuf, inf_siginfo + offset, len);
5020 else
5021 {
5022 memcpy (inf_siginfo + offset, writebuf, len);
5023
5024 /* Convert back to ptrace layout before flushing it out. */
5025 siginfo_fixup (&siginfo, inf_siginfo, 1);
5026
5027 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5028 return -1;
5029 }
5030
5031 return len;
5032 }
5033
5034 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5035 so we notice when children change state; as the handler for the
5036 sigsuspend in my_waitpid. */
5037
5038 static void
5039 sigchld_handler (int signo)
5040 {
5041 int old_errno = errno;
5042
5043 if (debug_threads)
5044 {
5045 do
5046 {
5047 /* fprintf is not async-signal-safe, so call write
5048 directly. */
5049 if (write (2, "sigchld_handler\n",
5050 sizeof ("sigchld_handler\n") - 1) < 0)
5051 break; /* just ignore */
5052 } while (0);
5053 }
5054
5055 if (target_is_async_p ())
5056 async_file_mark (); /* trigger a linux_wait */
5057
5058 errno = old_errno;
5059 }
5060
5061 static int
5062 linux_supports_non_stop (void)
5063 {
5064 return 1;
5065 }
5066
5067 static int
5068 linux_async (int enable)
5069 {
5070 int previous = target_is_async_p ();
5071
5072 if (debug_threads)
5073 debug_printf ("linux_async (%d), previous=%d\n",
5074 enable, previous);
5075
5076 if (previous != enable)
5077 {
5078 sigset_t mask;
5079 sigemptyset (&mask);
5080 sigaddset (&mask, SIGCHLD);
5081
5082 sigprocmask (SIG_BLOCK, &mask, NULL);
5083
5084 if (enable)
5085 {
5086 if (pipe (linux_event_pipe) == -1)
5087 {
5088 linux_event_pipe[0] = -1;
5089 linux_event_pipe[1] = -1;
5090 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5091
5092 warning ("creating event pipe failed.");
5093 return previous;
5094 }
5095
5096 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5097 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5098
5099 /* Register the event loop handler. */
5100 add_file_handler (linux_event_pipe[0],
5101 handle_target_event, NULL);
5102
5103 /* Always trigger a linux_wait. */
5104 async_file_mark ();
5105 }
5106 else
5107 {
5108 delete_file_handler (linux_event_pipe[0]);
5109
5110 close (linux_event_pipe[0]);
5111 close (linux_event_pipe[1]);
5112 linux_event_pipe[0] = -1;
5113 linux_event_pipe[1] = -1;
5114 }
5115
5116 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5117 }
5118
5119 return previous;
5120 }
5121
5122 static int
5123 linux_start_non_stop (int nonstop)
5124 {
5125 /* Register or unregister from event-loop accordingly. */
5126 linux_async (nonstop);
5127
5128 if (target_is_async_p () != (nonstop != 0))
5129 return -1;
5130
5131 return 0;
5132 }
5133
5134 static int
5135 linux_supports_multi_process (void)
5136 {
5137 return 1;
5138 }
5139
5140 static int
5141 linux_supports_disable_randomization (void)
5142 {
5143 #ifdef HAVE_PERSONALITY
5144 return 1;
5145 #else
5146 return 0;
5147 #endif
5148 }
5149
5150 static int
5151 linux_supports_agent (void)
5152 {
5153 return 1;
5154 }
5155
5156 static int
5157 linux_supports_range_stepping (void)
5158 {
5159 if (*the_low_target.supports_range_stepping == NULL)
5160 return 0;
5161
5162 return (*the_low_target.supports_range_stepping) ();
5163 }
5164
5165 /* Enumerate spufs IDs for process PID. */
5166 static int
5167 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5168 {
5169 int pos = 0;
5170 int written = 0;
5171 char path[128];
5172 DIR *dir;
5173 struct dirent *entry;
5174
5175 sprintf (path, "/proc/%ld/fd", pid);
5176 dir = opendir (path);
5177 if (!dir)
5178 return -1;
5179
5180 rewinddir (dir);
5181 while ((entry = readdir (dir)) != NULL)
5182 {
5183 struct stat st;
5184 struct statfs stfs;
5185 int fd;
5186
5187 fd = atoi (entry->d_name);
5188 if (!fd)
5189 continue;
5190
5191 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5192 if (stat (path, &st) != 0)
5193 continue;
5194 if (!S_ISDIR (st.st_mode))
5195 continue;
5196
5197 if (statfs (path, &stfs) != 0)
5198 continue;
5199 if (stfs.f_type != SPUFS_MAGIC)
5200 continue;
5201
5202 if (pos >= offset && pos + 4 <= offset + len)
5203 {
5204 *(unsigned int *)(buf + pos - offset) = fd;
5205 written += 4;
5206 }
5207 pos += 4;
5208 }
5209
5210 closedir (dir);
5211 return written;
5212 }
5213
5214 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5215 object type, using the /proc file system. */
5216 static int
5217 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5218 unsigned const char *writebuf,
5219 CORE_ADDR offset, int len)
5220 {
5221 long pid = lwpid_of (current_thread);
5222 char buf[128];
5223 int fd = 0;
5224 int ret = 0;
5225
5226 if (!writebuf && !readbuf)
5227 return -1;
5228
5229 if (!*annex)
5230 {
5231 if (!readbuf)
5232 return -1;
5233 else
5234 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5235 }
5236
5237 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5238 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5239 if (fd <= 0)
5240 return -1;
5241
5242 if (offset != 0
5243 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5244 {
5245 close (fd);
5246 return 0;
5247 }
5248
5249 if (writebuf)
5250 ret = write (fd, writebuf, (size_t) len);
5251 else
5252 ret = read (fd, readbuf, (size_t) len);
5253
5254 close (fd);
5255 return ret;
5256 }
5257
5258 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5259 struct target_loadseg
5260 {
5261 /* Core address to which the segment is mapped. */
5262 Elf32_Addr addr;
5263 /* VMA recorded in the program header. */
5264 Elf32_Addr p_vaddr;
5265 /* Size of this segment in memory. */
5266 Elf32_Word p_memsz;
5267 };
5268
5269 # if defined PT_GETDSBT
5270 struct target_loadmap
5271 {
5272 /* Protocol version number, must be zero. */
5273 Elf32_Word version;
5274 /* Pointer to the DSBT table, its size, and the DSBT index. */
5275 unsigned *dsbt_table;
5276 unsigned dsbt_size, dsbt_index;
5277 /* Number of segments in this map. */
5278 Elf32_Word nsegs;
5279 /* The actual memory map. */
5280 struct target_loadseg segs[/*nsegs*/];
5281 };
5282 # define LINUX_LOADMAP PT_GETDSBT
5283 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5284 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5285 # else
5286 struct target_loadmap
5287 {
5288 /* Protocol version number, must be zero. */
5289 Elf32_Half version;
5290 /* Number of segments in this map. */
5291 Elf32_Half nsegs;
5292 /* The actual memory map. */
5293 struct target_loadseg segs[/*nsegs*/];
5294 };
5295 # define LINUX_LOADMAP PTRACE_GETFDPIC
5296 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5297 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5298 # endif
5299
5300 static int
5301 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5302 unsigned char *myaddr, unsigned int len)
5303 {
5304 int pid = lwpid_of (current_thread);
5305 int addr = -1;
5306 struct target_loadmap *data = NULL;
5307 unsigned int actual_length, copy_length;
5308
5309 if (strcmp (annex, "exec") == 0)
5310 addr = (int) LINUX_LOADMAP_EXEC;
5311 else if (strcmp (annex, "interp") == 0)
5312 addr = (int) LINUX_LOADMAP_INTERP;
5313 else
5314 return -1;
5315
5316 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5317 return -1;
5318
5319 if (data == NULL)
5320 return -1;
5321
5322 actual_length = sizeof (struct target_loadmap)
5323 + sizeof (struct target_loadseg) * data->nsegs;
5324
5325 if (offset < 0 || offset > actual_length)
5326 return -1;
5327
5328 copy_length = actual_length - offset < len ? actual_length - offset : len;
5329 memcpy (myaddr, (char *) data + offset, copy_length);
5330 return copy_length;
5331 }
5332 #else
5333 # define linux_read_loadmap NULL
5334 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5335
5336 static void
5337 linux_process_qsupported (const char *query)
5338 {
5339 if (the_low_target.process_qsupported != NULL)
5340 the_low_target.process_qsupported (query);
5341 }
5342
5343 static int
5344 linux_supports_tracepoints (void)
5345 {
5346 if (*the_low_target.supports_tracepoints == NULL)
5347 return 0;
5348
5349 return (*the_low_target.supports_tracepoints) ();
5350 }
5351
5352 static CORE_ADDR
5353 linux_read_pc (struct regcache *regcache)
5354 {
5355 if (the_low_target.get_pc == NULL)
5356 return 0;
5357
5358 return (*the_low_target.get_pc) (regcache);
5359 }
5360
5361 static void
5362 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5363 {
5364 gdb_assert (the_low_target.set_pc != NULL);
5365
5366 (*the_low_target.set_pc) (regcache, pc);
5367 }
5368
5369 static int
5370 linux_thread_stopped (struct thread_info *thread)
5371 {
5372 return get_thread_lwp (thread)->stopped;
5373 }
5374
5375 /* This exposes stop-all-threads functionality to other modules. */
5376
5377 static void
5378 linux_pause_all (int freeze)
5379 {
5380 stop_all_lwps (freeze, NULL);
5381 }
5382
5383 /* This exposes unstop-all-threads functionality to other gdbserver
5384 modules. */
5385
5386 static void
5387 linux_unpause_all (int unfreeze)
5388 {
5389 unstop_all_lwps (unfreeze, NULL);
5390 }
5391
5392 static int
5393 linux_prepare_to_access_memory (void)
5394 {
5395 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5396 running LWP. */
5397 if (non_stop)
5398 linux_pause_all (1);
5399 return 0;
5400 }
5401
5402 static void
5403 linux_done_accessing_memory (void)
5404 {
5405 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5406 running LWP. */
5407 if (non_stop)
5408 linux_unpause_all (1);
5409 }
5410
5411 static int
5412 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5413 CORE_ADDR collector,
5414 CORE_ADDR lockaddr,
5415 ULONGEST orig_size,
5416 CORE_ADDR *jump_entry,
5417 CORE_ADDR *trampoline,
5418 ULONGEST *trampoline_size,
5419 unsigned char *jjump_pad_insn,
5420 ULONGEST *jjump_pad_insn_size,
5421 CORE_ADDR *adjusted_insn_addr,
5422 CORE_ADDR *adjusted_insn_addr_end,
5423 char *err)
5424 {
5425 return (*the_low_target.install_fast_tracepoint_jump_pad)
5426 (tpoint, tpaddr, collector, lockaddr, orig_size,
5427 jump_entry, trampoline, trampoline_size,
5428 jjump_pad_insn, jjump_pad_insn_size,
5429 adjusted_insn_addr, adjusted_insn_addr_end,
5430 err);
5431 }
5432
5433 static struct emit_ops *
5434 linux_emit_ops (void)
5435 {
5436 if (the_low_target.emit_ops != NULL)
5437 return (*the_low_target.emit_ops) ();
5438 else
5439 return NULL;
5440 }
5441
5442 static int
5443 linux_get_min_fast_tracepoint_insn_len (void)
5444 {
5445 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5446 }
5447
5448 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5449
5450 static int
5451 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5452 CORE_ADDR *phdr_memaddr, int *num_phdr)
5453 {
5454 char filename[PATH_MAX];
5455 int fd;
5456 const int auxv_size = is_elf64
5457 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5458 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5459
5460 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5461
5462 fd = open (filename, O_RDONLY);
5463 if (fd < 0)
5464 return 1;
5465
5466 *phdr_memaddr = 0;
5467 *num_phdr = 0;
5468 while (read (fd, buf, auxv_size) == auxv_size
5469 && (*phdr_memaddr == 0 || *num_phdr == 0))
5470 {
5471 if (is_elf64)
5472 {
5473 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5474
5475 switch (aux->a_type)
5476 {
5477 case AT_PHDR:
5478 *phdr_memaddr = aux->a_un.a_val;
5479 break;
5480 case AT_PHNUM:
5481 *num_phdr = aux->a_un.a_val;
5482 break;
5483 }
5484 }
5485 else
5486 {
5487 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5488
5489 switch (aux->a_type)
5490 {
5491 case AT_PHDR:
5492 *phdr_memaddr = aux->a_un.a_val;
5493 break;
5494 case AT_PHNUM:
5495 *num_phdr = aux->a_un.a_val;
5496 break;
5497 }
5498 }
5499 }
5500
5501 close (fd);
5502
5503 if (*phdr_memaddr == 0 || *num_phdr == 0)
5504 {
5505 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5506 "phdr_memaddr = %ld, phdr_num = %d",
5507 (long) *phdr_memaddr, *num_phdr);
5508 return 2;
5509 }
5510
5511 return 0;
5512 }
5513
5514 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5515
5516 static CORE_ADDR
5517 get_dynamic (const int pid, const int is_elf64)
5518 {
5519 CORE_ADDR phdr_memaddr, relocation;
5520 int num_phdr, i;
5521 unsigned char *phdr_buf;
5522 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5523
5524 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5525 return 0;
5526
5527 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5528 phdr_buf = alloca (num_phdr * phdr_size);
5529
5530 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5531 return 0;
5532
5533 /* Compute relocation: it is expected to be 0 for "regular" executables,
5534 non-zero for PIE ones. */
5535 relocation = -1;
5536 for (i = 0; relocation == -1 && i < num_phdr; i++)
5537 if (is_elf64)
5538 {
5539 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5540
5541 if (p->p_type == PT_PHDR)
5542 relocation = phdr_memaddr - p->p_vaddr;
5543 }
5544 else
5545 {
5546 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5547
5548 if (p->p_type == PT_PHDR)
5549 relocation = phdr_memaddr - p->p_vaddr;
5550 }
5551
5552 if (relocation == -1)
5553 {
5554 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5555 any real world executables, including PIE executables, have always
5556 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5557 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5558 or present DT_DEBUG anyway (fpc binaries are statically linked).
5559
5560 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5561
5562 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5563
5564 return 0;
5565 }
5566
5567 for (i = 0; i < num_phdr; i++)
5568 {
5569 if (is_elf64)
5570 {
5571 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5572
5573 if (p->p_type == PT_DYNAMIC)
5574 return p->p_vaddr + relocation;
5575 }
5576 else
5577 {
5578 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5579
5580 if (p->p_type == PT_DYNAMIC)
5581 return p->p_vaddr + relocation;
5582 }
5583 }
5584
5585 return 0;
5586 }
5587
5588 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5589 can be 0 if the inferior does not yet have the library list initialized.
5590 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5591 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5592
5593 static CORE_ADDR
5594 get_r_debug (const int pid, const int is_elf64)
5595 {
5596 CORE_ADDR dynamic_memaddr;
5597 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5598 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5599 CORE_ADDR map = -1;
5600
5601 dynamic_memaddr = get_dynamic (pid, is_elf64);
5602 if (dynamic_memaddr == 0)
5603 return map;
5604
5605 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5606 {
5607 if (is_elf64)
5608 {
5609 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5610 #ifdef DT_MIPS_RLD_MAP
5611 union
5612 {
5613 Elf64_Xword map;
5614 unsigned char buf[sizeof (Elf64_Xword)];
5615 }
5616 rld_map;
5617
5618 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5619 {
5620 if (linux_read_memory (dyn->d_un.d_val,
5621 rld_map.buf, sizeof (rld_map.buf)) == 0)
5622 return rld_map.map;
5623 else
5624 break;
5625 }
5626 #endif /* DT_MIPS_RLD_MAP */
5627
5628 if (dyn->d_tag == DT_DEBUG && map == -1)
5629 map = dyn->d_un.d_val;
5630
5631 if (dyn->d_tag == DT_NULL)
5632 break;
5633 }
5634 else
5635 {
5636 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5637 #ifdef DT_MIPS_RLD_MAP
5638 union
5639 {
5640 Elf32_Word map;
5641 unsigned char buf[sizeof (Elf32_Word)];
5642 }
5643 rld_map;
5644
5645 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5646 {
5647 if (linux_read_memory (dyn->d_un.d_val,
5648 rld_map.buf, sizeof (rld_map.buf)) == 0)
5649 return rld_map.map;
5650 else
5651 break;
5652 }
5653 #endif /* DT_MIPS_RLD_MAP */
5654
5655 if (dyn->d_tag == DT_DEBUG && map == -1)
5656 map = dyn->d_un.d_val;
5657
5658 if (dyn->d_tag == DT_NULL)
5659 break;
5660 }
5661
5662 dynamic_memaddr += dyn_size;
5663 }
5664
5665 return map;
5666 }
5667
5668 /* Read one pointer from MEMADDR in the inferior. */
5669
5670 static int
5671 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5672 {
5673 int ret;
5674
5675 /* Go through a union so this works on either big or little endian
5676 hosts, when the inferior's pointer size is smaller than the size
5677 of CORE_ADDR. It is assumed the inferior's endianness is the
5678 same of the superior's. */
5679 union
5680 {
5681 CORE_ADDR core_addr;
5682 unsigned int ui;
5683 unsigned char uc;
5684 } addr;
5685
5686 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5687 if (ret == 0)
5688 {
5689 if (ptr_size == sizeof (CORE_ADDR))
5690 *ptr = addr.core_addr;
5691 else if (ptr_size == sizeof (unsigned int))
5692 *ptr = addr.ui;
5693 else
5694 gdb_assert_not_reached ("unhandled pointer size");
5695 }
5696 return ret;
5697 }
5698
5699 struct link_map_offsets
5700 {
5701 /* Offset and size of r_debug.r_version. */
5702 int r_version_offset;
5703
5704 /* Offset and size of r_debug.r_map. */
5705 int r_map_offset;
5706
5707 /* Offset to l_addr field in struct link_map. */
5708 int l_addr_offset;
5709
5710 /* Offset to l_name field in struct link_map. */
5711 int l_name_offset;
5712
5713 /* Offset to l_ld field in struct link_map. */
5714 int l_ld_offset;
5715
5716 /* Offset to l_next field in struct link_map. */
5717 int l_next_offset;
5718
5719 /* Offset to l_prev field in struct link_map. */
5720 int l_prev_offset;
5721 };
5722
5723 /* Construct qXfer:libraries-svr4:read reply. */
5724
5725 static int
5726 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5727 unsigned const char *writebuf,
5728 CORE_ADDR offset, int len)
5729 {
5730 char *document;
5731 unsigned document_len;
5732 struct process_info_private *const priv = current_process ()->private;
5733 char filename[PATH_MAX];
5734 int pid, is_elf64;
5735
5736 static const struct link_map_offsets lmo_32bit_offsets =
5737 {
5738 0, /* r_version offset. */
5739 4, /* r_debug.r_map offset. */
5740 0, /* l_addr offset in link_map. */
5741 4, /* l_name offset in link_map. */
5742 8, /* l_ld offset in link_map. */
5743 12, /* l_next offset in link_map. */
5744 16 /* l_prev offset in link_map. */
5745 };
5746
5747 static const struct link_map_offsets lmo_64bit_offsets =
5748 {
5749 0, /* r_version offset. */
5750 8, /* r_debug.r_map offset. */
5751 0, /* l_addr offset in link_map. */
5752 8, /* l_name offset in link_map. */
5753 16, /* l_ld offset in link_map. */
5754 24, /* l_next offset in link_map. */
5755 32 /* l_prev offset in link_map. */
5756 };
5757 const struct link_map_offsets *lmo;
5758 unsigned int machine;
5759 int ptr_size;
5760 CORE_ADDR lm_addr = 0, lm_prev = 0;
5761 int allocated = 1024;
5762 char *p;
5763 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5764 int header_done = 0;
5765
5766 if (writebuf != NULL)
5767 return -2;
5768 if (readbuf == NULL)
5769 return -1;
5770
5771 pid = lwpid_of (current_thread);
5772 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5773 is_elf64 = elf_64_file_p (filename, &machine);
5774 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5775 ptr_size = is_elf64 ? 8 : 4;
5776
5777 while (annex[0] != '\0')
5778 {
5779 const char *sep;
5780 CORE_ADDR *addrp;
5781 int len;
5782
5783 sep = strchr (annex, '=');
5784 if (sep == NULL)
5785 break;
5786
5787 len = sep - annex;
5788 if (len == 5 && strncmp (annex, "start", 5) == 0)
5789 addrp = &lm_addr;
5790 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5791 addrp = &lm_prev;
5792 else
5793 {
5794 annex = strchr (sep, ';');
5795 if (annex == NULL)
5796 break;
5797 annex++;
5798 continue;
5799 }
5800
5801 annex = decode_address_to_semicolon (addrp, sep + 1);
5802 }
5803
5804 if (lm_addr == 0)
5805 {
5806 int r_version = 0;
5807
5808 if (priv->r_debug == 0)
5809 priv->r_debug = get_r_debug (pid, is_elf64);
5810
5811 /* We failed to find DT_DEBUG. Such situation will not change
5812 for this inferior - do not retry it. Report it to GDB as
5813 E01, see for the reasons at the GDB solib-svr4.c side. */
5814 if (priv->r_debug == (CORE_ADDR) -1)
5815 return -1;
5816
5817 if (priv->r_debug != 0)
5818 {
5819 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5820 (unsigned char *) &r_version,
5821 sizeof (r_version)) != 0
5822 || r_version != 1)
5823 {
5824 warning ("unexpected r_debug version %d", r_version);
5825 }
5826 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5827 &lm_addr, ptr_size) != 0)
5828 {
5829 warning ("unable to read r_map from 0x%lx",
5830 (long) priv->r_debug + lmo->r_map_offset);
5831 }
5832 }
5833 }
5834
5835 document = xmalloc (allocated);
5836 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5837 p = document + strlen (document);
5838
5839 while (lm_addr
5840 && read_one_ptr (lm_addr + lmo->l_name_offset,
5841 &l_name, ptr_size) == 0
5842 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5843 &l_addr, ptr_size) == 0
5844 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5845 &l_ld, ptr_size) == 0
5846 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5847 &l_prev, ptr_size) == 0
5848 && read_one_ptr (lm_addr + lmo->l_next_offset,
5849 &l_next, ptr_size) == 0)
5850 {
5851 unsigned char libname[PATH_MAX];
5852
5853 if (lm_prev != l_prev)
5854 {
5855 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5856 (long) lm_prev, (long) l_prev);
5857 break;
5858 }
5859
5860 /* Ignore the first entry even if it has valid name as the first entry
5861 corresponds to the main executable. The first entry should not be
5862 skipped if the dynamic loader was loaded late by a static executable
5863 (see solib-svr4.c parameter ignore_first). But in such case the main
5864 executable does not have PT_DYNAMIC present and this function already
5865 exited above due to failed get_r_debug. */
5866 if (lm_prev == 0)
5867 {
5868 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5869 p = p + strlen (p);
5870 }
5871 else
5872 {
5873 /* Not checking for error because reading may stop before
5874 we've got PATH_MAX worth of characters. */
5875 libname[0] = '\0';
5876 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5877 libname[sizeof (libname) - 1] = '\0';
5878 if (libname[0] != '\0')
5879 {
5880 /* 6x the size for xml_escape_text below. */
5881 size_t len = 6 * strlen ((char *) libname);
5882 char *name;
5883
5884 if (!header_done)
5885 {
5886 /* Terminate `<library-list-svr4'. */
5887 *p++ = '>';
5888 header_done = 1;
5889 }
5890
5891 while (allocated < p - document + len + 200)
5892 {
5893 /* Expand to guarantee sufficient storage. */
5894 uintptr_t document_len = p - document;
5895
5896 document = xrealloc (document, 2 * allocated);
5897 allocated *= 2;
5898 p = document + document_len;
5899 }
5900
5901 name = xml_escape_text ((char *) libname);
5902 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5903 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5904 name, (unsigned long) lm_addr,
5905 (unsigned long) l_addr, (unsigned long) l_ld);
5906 free (name);
5907 }
5908 }
5909
5910 lm_prev = lm_addr;
5911 lm_addr = l_next;
5912 }
5913
5914 if (!header_done)
5915 {
5916 /* Empty list; terminate `<library-list-svr4'. */
5917 strcpy (p, "/>");
5918 }
5919 else
5920 strcpy (p, "</library-list-svr4>");
5921
5922 document_len = strlen (document);
5923 if (offset < document_len)
5924 document_len -= offset;
5925 else
5926 document_len = 0;
5927 if (len > document_len)
5928 len = document_len;
5929
5930 memcpy (readbuf, document + offset, len);
5931 xfree (document);
5932
5933 return len;
5934 }
5935
5936 #ifdef HAVE_LINUX_BTRACE
5937
5938 /* See to_enable_btrace target method. */
5939
5940 static struct btrace_target_info *
5941 linux_low_enable_btrace (ptid_t ptid)
5942 {
5943 struct btrace_target_info *tinfo;
5944
5945 tinfo = linux_enable_btrace (ptid);
5946
5947 if (tinfo != NULL)
5948 {
5949 struct thread_info *thread = find_thread_ptid (ptid);
5950 struct regcache *regcache = get_thread_regcache (thread, 0);
5951
5952 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5953 }
5954
5955 return tinfo;
5956 }
5957
5958 /* See to_disable_btrace target method. */
5959
5960 static int
5961 linux_low_disable_btrace (struct btrace_target_info *tinfo)
5962 {
5963 enum btrace_error err;
5964
5965 err = linux_disable_btrace (tinfo);
5966 return (err == BTRACE_ERR_NONE ? 0 : -1);
5967 }
5968
5969 /* See to_read_btrace target method. */
5970
5971 static int
5972 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5973 int type)
5974 {
5975 struct btrace_data btrace;
5976 struct btrace_block *block;
5977 enum btrace_error err;
5978 int i;
5979
5980 btrace_data_init (&btrace);
5981
5982 err = linux_read_btrace (&btrace, tinfo, type);
5983 if (err != BTRACE_ERR_NONE)
5984 {
5985 if (err == BTRACE_ERR_OVERFLOW)
5986 buffer_grow_str0 (buffer, "E.Overflow.");
5987 else
5988 buffer_grow_str0 (buffer, "E.Generic Error.");
5989
5990 btrace_data_fini (&btrace);
5991 return -1;
5992 }
5993
5994 switch (btrace.format)
5995 {
5996 case BTRACE_FORMAT_NONE:
5997 buffer_grow_str0 (buffer, "E.No Trace.");
5998 break;
5999
6000 case BTRACE_FORMAT_BTS:
6001 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6002 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6003
6004 for (i = 0;
6005 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6006 i++)
6007 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6008 paddress (block->begin), paddress (block->end));
6009
6010 buffer_grow_str0 (buffer, "</btrace>\n");
6011 break;
6012
6013 default:
6014 buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
6015
6016 btrace_data_fini (&btrace);
6017 return -1;
6018 }
6019
6020 btrace_data_fini (&btrace);
6021 return 0;
6022 }
6023 #endif /* HAVE_LINUX_BTRACE */
6024
6025 static struct target_ops linux_target_ops = {
6026 linux_create_inferior,
6027 linux_attach,
6028 linux_kill,
6029 linux_detach,
6030 linux_mourn,
6031 linux_join,
6032 linux_thread_alive,
6033 linux_resume,
6034 linux_wait,
6035 linux_fetch_registers,
6036 linux_store_registers,
6037 linux_prepare_to_access_memory,
6038 linux_done_accessing_memory,
6039 linux_read_memory,
6040 linux_write_memory,
6041 linux_look_up_symbols,
6042 linux_request_interrupt,
6043 linux_read_auxv,
6044 linux_supports_z_point_type,
6045 linux_insert_point,
6046 linux_remove_point,
6047 linux_stopped_by_watchpoint,
6048 linux_stopped_data_address,
6049 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6050 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6051 && defined(PT_TEXT_END_ADDR)
6052 linux_read_offsets,
6053 #else
6054 NULL,
6055 #endif
6056 #ifdef USE_THREAD_DB
6057 thread_db_get_tls_address,
6058 #else
6059 NULL,
6060 #endif
6061 linux_qxfer_spu,
6062 hostio_last_error_from_errno,
6063 linux_qxfer_osdata,
6064 linux_xfer_siginfo,
6065 linux_supports_non_stop,
6066 linux_async,
6067 linux_start_non_stop,
6068 linux_supports_multi_process,
6069 #ifdef USE_THREAD_DB
6070 thread_db_handle_monitor_command,
6071 #else
6072 NULL,
6073 #endif
6074 linux_common_core_of_thread,
6075 linux_read_loadmap,
6076 linux_process_qsupported,
6077 linux_supports_tracepoints,
6078 linux_read_pc,
6079 linux_write_pc,
6080 linux_thread_stopped,
6081 NULL,
6082 linux_pause_all,
6083 linux_unpause_all,
6084 linux_stabilize_threads,
6085 linux_install_fast_tracepoint_jump_pad,
6086 linux_emit_ops,
6087 linux_supports_disable_randomization,
6088 linux_get_min_fast_tracepoint_insn_len,
6089 linux_qxfer_libraries_svr4,
6090 linux_supports_agent,
6091 #ifdef HAVE_LINUX_BTRACE
6092 linux_supports_btrace,
6093 linux_low_enable_btrace,
6094 linux_low_disable_btrace,
6095 linux_low_read_btrace,
6096 #else
6097 NULL,
6098 NULL,
6099 NULL,
6100 NULL,
6101 #endif
6102 linux_supports_range_stepping,
6103 };
6104
6105 static void
6106 linux_init_signals ()
6107 {
6108 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6109 to find what the cancel signal actually is. */
6110 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6111 signal (__SIGRTMIN+1, SIG_IGN);
6112 #endif
6113 }
6114
6115 #ifdef HAVE_LINUX_REGSETS
6116 void
6117 initialize_regsets_info (struct regsets_info *info)
6118 {
6119 for (info->num_regsets = 0;
6120 info->regsets[info->num_regsets].size >= 0;
6121 info->num_regsets++)
6122 ;
6123 }
6124 #endif
6125
6126 void
6127 initialize_low (void)
6128 {
6129 struct sigaction sigchld_action;
6130 memset (&sigchld_action, 0, sizeof (sigchld_action));
6131 set_target_ops (&linux_target_ops);
6132 set_breakpoint_data (the_low_target.breakpoint,
6133 the_low_target.breakpoint_len);
6134 linux_init_signals ();
6135 linux_ptrace_init_warnings ();
6136
6137 sigchld_action.sa_handler = sigchld_handler;
6138 sigemptyset (&sigchld_action.sa_mask);
6139 sigchld_action.sa_flags = SA_RESTART;
6140 sigaction (SIGCHLD, &sigchld_action, NULL);
6141
6142 initialize_low_arch ();
6143 }