]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
Step over exit with reinsert breakpoints
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
618f726f 2 Copyright (C) 1995-2016 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
58b4daa5 22#include "agent.h"
de0d863e 23#include "tdesc.h"
b20a6524 24#include "rsp-low.h"
da6d8c04 25
96d7229d
LM
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
8bdce1ff 28#include "gdb_wait.h"
5826e159 29#include "nat/gdb_ptrace.h"
125f8a3d
GB
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
8cc73a39 32#include "nat/linux-personality.h"
da6d8c04
DJ
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
0a30fbc4 36#include <unistd.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
602e3198 46#include "filestuff.h"
c144c7a0 47#include "tracepoint.h"
533b0600 48#include "hostio.h"
276d4552 49#include <inttypes.h>
957f3f49
DE
50#ifndef ELFMAG0
51/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55#include <elf.h>
56#endif
14d2069a 57#include "nat/linux-namespaces.h"
efcbbd14
UW
58
59#ifndef SPUFS_MAGIC
60#define SPUFS_MAGIC 0x23c9b64e
61#endif
da6d8c04 62
03583c20
UW
63#ifdef HAVE_PERSONALITY
64# include <sys/personality.h>
65# if !HAVE_DECL_ADDR_NO_RANDOMIZE
66# define ADDR_NO_RANDOMIZE 0x0040000
67# endif
68#endif
69
fd462a61
DJ
70#ifndef O_LARGEFILE
71#define O_LARGEFILE 0
72#endif
1a981360 73
db0dfaa0
LM
74/* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77#if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80#if defined(__mcoldfire__)
81/* These are still undefined in 3.10 kernels. */
82#define PT_TEXT_ADDR 49*4
83#define PT_DATA_ADDR 50*4
84#define PT_TEXT_END_ADDR 51*4
85/* BFIN already defines these since at least 2.6.32 kernels. */
86#elif defined(BFIN)
87#define PT_TEXT_ADDR 220
88#define PT_TEXT_END_ADDR 224
89#define PT_DATA_ADDR 228
90/* These are still undefined in 3.10 kernels. */
91#elif defined(__TMS320C6X__)
92#define PT_TEXT_ADDR (0x10000*4)
93#define PT_DATA_ADDR (0x10004*4)
94#define PT_TEXT_END_ADDR (0x10008*4)
95#endif
96#endif
97
9accd112 98#ifdef HAVE_LINUX_BTRACE
125f8a3d 99# include "nat/linux-btrace.h"
734b0e4b 100# include "btrace-common.h"
9accd112
MM
101#endif
102
8365dcf5
TJB
103#ifndef HAVE_ELF32_AUXV_T
104/* Copied from glibc's elf.h. */
105typedef struct
106{
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115} Elf32_auxv_t;
116#endif
117
118#ifndef HAVE_ELF64_AUXV_T
119/* Copied from glibc's elf.h. */
120typedef struct
121{
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130} Elf64_auxv_t;
131#endif
132
ded48a5e
YQ
133/* Does the current host support PTRACE_GETREGSET? */
134int have_ptrace_getregset = -1;
135
cff068da
GB
136/* LWP accessors. */
137
138/* See nat/linux-nat.h. */
139
140ptid_t
141ptid_of_lwp (struct lwp_info *lwp)
142{
143 return ptid_of (get_lwp_thread (lwp));
144}
145
146/* See nat/linux-nat.h. */
147
4b134ca1
GB
148void
149lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151{
152 lwp->arch_private = info;
153}
154
155/* See nat/linux-nat.h. */
156
157struct arch_lwp_info *
158lwp_arch_private_info (struct lwp_info *lwp)
159{
160 return lwp->arch_private;
161}
162
163/* See nat/linux-nat.h. */
164
cff068da
GB
165int
166lwp_is_stopped (struct lwp_info *lwp)
167{
168 return lwp->stopped;
169}
170
171/* See nat/linux-nat.h. */
172
173enum target_stop_reason
174lwp_stop_reason (struct lwp_info *lwp)
175{
176 return lwp->stop_reason;
177}
178
05044653
PA
179/* A list of all unknown processes which receive stop signals. Some
180 other process will presumably claim each of these as forked
181 children momentarily. */
24a09b5f 182
05044653
PA
183struct simple_pid_list
184{
185 /* The process ID. */
186 int pid;
187
188 /* The status as reported by waitpid. */
189 int status;
190
191 /* Next in chain. */
192 struct simple_pid_list *next;
193};
194struct simple_pid_list *stopped_pids;
195
196/* Trivial list manipulation functions to keep track of a list of new
197 stopped processes. */
198
199static void
200add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
201{
8d749320 202 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
203
204 new_pid->pid = pid;
205 new_pid->status = status;
206 new_pid->next = *listp;
207 *listp = new_pid;
208}
209
210static int
211pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
212{
213 struct simple_pid_list **p;
214
215 for (p = listp; *p != NULL; p = &(*p)->next)
216 if ((*p)->pid == pid)
217 {
218 struct simple_pid_list *next = (*p)->next;
219
220 *statusp = (*p)->status;
221 xfree (*p);
222 *p = next;
223 return 1;
224 }
225 return 0;
226}
24a09b5f 227
bde24c0a
PA
228enum stopping_threads_kind
229 {
230 /* Not stopping threads presently. */
231 NOT_STOPPING_THREADS,
232
233 /* Stopping threads. */
234 STOPPING_THREADS,
235
236 /* Stopping and suspending threads. */
237 STOPPING_AND_SUSPENDING_THREADS
238 };
239
240/* This is set while stop_all_lwps is in effect. */
241enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
242
243/* FIXME make into a target method? */
24a09b5f 244int using_threads = 1;
24a09b5f 245
fa593d66
PA
246/* True if we're presently stabilizing threads (moving them out of
247 jump pads). */
248static int stabilizing_threads;
249
2acc282a 250static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 251 int step, int signal, siginfo_t *info);
2bd7c093 252static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
253static void stop_all_lwps (int suspend, struct lwp_info *except);
254static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
f50bf8e5 255static void unsuspend_all_lwps (struct lwp_info *except);
fa96cb38
PA
256static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
257 int *wstat, int options);
95954743 258static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
b3312d80 259static struct lwp_info *add_lwp (ptid_t ptid);
94585166 260static void linux_mourn (struct process_info *process);
c35fafde 261static int linux_stopped_by_watchpoint (void);
95954743 262static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 263static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 264static void proceed_all_lwps (void);
d50171e4 265static int finish_step_over (struct lwp_info *lwp);
d50171e4 266static int kill_lwp (unsigned long lwpid, int signo);
863d01bd
PA
267static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
268static void complete_ongoing_step_over (void);
ece66d65 269static int linux_low_ptrace_options (int attached);
d50171e4 270
582511be
PA
271/* When the event-loop is doing a step-over, this points at the thread
272 being stepped. */
273ptid_t step_over_bkpt;
274
7d00775e 275/* True if the low target can hardware single-step. */
d50171e4
PA
276
277static int
278can_hardware_single_step (void)
279{
7d00775e
AT
280 if (the_low_target.supports_hardware_single_step != NULL)
281 return the_low_target.supports_hardware_single_step ();
282 else
283 return 0;
284}
285
286/* True if the low target can software single-step. Such targets
fa5308bd 287 implement the GET_NEXT_PCS callback. */
7d00775e
AT
288
289static int
290can_software_single_step (void)
291{
fa5308bd 292 return (the_low_target.get_next_pcs != NULL);
d50171e4
PA
293}
294
295/* True if the low target supports memory breakpoints. If so, we'll
296 have a GET_PC implementation. */
297
298static int
299supports_breakpoints (void)
300{
301 return (the_low_target.get_pc != NULL);
302}
0d62e5e8 303
fa593d66
PA
304/* Returns true if this target can support fast tracepoints. This
305 does not mean that the in-process agent has been loaded in the
306 inferior. */
307
308static int
309supports_fast_tracepoints (void)
310{
311 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
312}
313
c2d6af84
PA
314/* True if LWP is stopped in its stepping range. */
315
316static int
317lwp_in_step_range (struct lwp_info *lwp)
318{
319 CORE_ADDR pc = lwp->stop_pc;
320
321 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
322}
323
0d62e5e8
DJ
324struct pending_signals
325{
326 int signal;
32ca6d61 327 siginfo_t info;
0d62e5e8
DJ
328 struct pending_signals *prev;
329};
611cb4a5 330
bd99dc85
PA
331/* The read/write ends of the pipe registered as waitable file in the
332 event loop. */
333static int linux_event_pipe[2] = { -1, -1 };
334
335/* True if we're currently in async mode. */
336#define target_is_async_p() (linux_event_pipe[0] != -1)
337
02fc4de7 338static void send_sigstop (struct lwp_info *lwp);
fa96cb38 339static void wait_for_sigstop (void);
bd99dc85 340
d0722149
DE
341/* Return non-zero if HEADER is a 64-bit ELF file. */
342
343static int
214d508e 344elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 345{
214d508e
L
346 if (header->e_ident[EI_MAG0] == ELFMAG0
347 && header->e_ident[EI_MAG1] == ELFMAG1
348 && header->e_ident[EI_MAG2] == ELFMAG2
349 && header->e_ident[EI_MAG3] == ELFMAG3)
350 {
351 *machine = header->e_machine;
352 return header->e_ident[EI_CLASS] == ELFCLASS64;
353
354 }
355 *machine = EM_NONE;
356 return -1;
d0722149
DE
357}
358
359/* Return non-zero if FILE is a 64-bit ELF file,
360 zero if the file is not a 64-bit ELF file,
361 and -1 if the file is not accessible or doesn't exist. */
362
be07f1a2 363static int
214d508e 364elf_64_file_p (const char *file, unsigned int *machine)
d0722149 365{
957f3f49 366 Elf64_Ehdr header;
d0722149
DE
367 int fd;
368
369 fd = open (file, O_RDONLY);
370 if (fd < 0)
371 return -1;
372
373 if (read (fd, &header, sizeof (header)) != sizeof (header))
374 {
375 close (fd);
376 return 0;
377 }
378 close (fd);
379
214d508e 380 return elf_64_header_p (&header, machine);
d0722149
DE
381}
382
be07f1a2
PA
383/* Accepts an integer PID; Returns true if the executable PID is
384 running is a 64-bit ELF file.. */
385
386int
214d508e 387linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 388{
d8d2a3ee 389 char file[PATH_MAX];
be07f1a2
PA
390
391 sprintf (file, "/proc/%d/exe", pid);
214d508e 392 return elf_64_file_p (file, machine);
be07f1a2
PA
393}
394
bd99dc85
PA
395static void
396delete_lwp (struct lwp_info *lwp)
397{
fa96cb38
PA
398 struct thread_info *thr = get_lwp_thread (lwp);
399
400 if (debug_threads)
401 debug_printf ("deleting %ld\n", lwpid_of (thr));
402
403 remove_thread (thr);
aa5ca48f 404 free (lwp->arch_private);
bd99dc85
PA
405 free (lwp);
406}
407
95954743
PA
408/* Add a process to the common process list, and set its private
409 data. */
410
411static struct process_info *
412linux_add_process (int pid, int attached)
413{
414 struct process_info *proc;
415
95954743 416 proc = add_process (pid, attached);
8d749320 417 proc->priv = XCNEW (struct process_info_private);
95954743 418
aa5ca48f 419 if (the_low_target.new_process != NULL)
fe978cb0 420 proc->priv->arch_private = the_low_target.new_process ();
aa5ca48f 421
95954743
PA
422 return proc;
423}
424
582511be
PA
425static CORE_ADDR get_pc (struct lwp_info *lwp);
426
ece66d65 427/* Call the target arch_setup function on the current thread. */
94585166
DB
428
429static void
430linux_arch_setup (void)
431{
432 the_low_target.arch_setup ();
433}
434
435/* Call the target arch_setup function on THREAD. */
436
437static void
438linux_arch_setup_thread (struct thread_info *thread)
439{
440 struct thread_info *saved_thread;
441
442 saved_thread = current_thread;
443 current_thread = thread;
444
445 linux_arch_setup ();
446
447 current_thread = saved_thread;
448}
449
450/* Handle a GNU/Linux extended wait response. If we see a clone,
451 fork, or vfork event, we need to add the new LWP to our list
452 (and return 0 so as not to report the trap to higher layers).
453 If we see an exec event, we will modify ORIG_EVENT_LWP to point
454 to a new LWP representing the new program. */
0d62e5e8 455
de0d863e 456static int
94585166 457handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
24a09b5f 458{
94585166 459 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 460 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 461 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 462 struct lwp_info *new_lwp;
24a09b5f 463
65706a29
PA
464 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
465
82075af2
JS
466 /* All extended events we currently use are mid-syscall. Only
467 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
468 you have to be using PTRACE_SEIZE to get that. */
469 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
470
c269dbdb
DB
471 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
472 || (event == PTRACE_EVENT_CLONE))
24a09b5f 473 {
95954743 474 ptid_t ptid;
24a09b5f 475 unsigned long new_pid;
05044653 476 int ret, status;
24a09b5f 477
de0d863e 478 /* Get the pid of the new lwp. */
d86d4aaf 479 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 480 &new_pid);
24a09b5f
DJ
481
482 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 483 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
484 {
485 /* The new child has a pending SIGSTOP. We can't affect it until it
486 hits the SIGSTOP, but we're already attached. */
487
97438e3f 488 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
489
490 if (ret == -1)
491 perror_with_name ("waiting for new child");
492 else if (ret != new_pid)
493 warning ("wait returned unexpected PID %d", ret);
da5898ce 494 else if (!WIFSTOPPED (status))
24a09b5f
DJ
495 warning ("wait returned unexpected status 0x%x", status);
496 }
497
c269dbdb 498 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
499 {
500 struct process_info *parent_proc;
501 struct process_info *child_proc;
502 struct lwp_info *child_lwp;
bfacd19d 503 struct thread_info *child_thr;
de0d863e
DB
504 struct target_desc *tdesc;
505
506 ptid = ptid_build (new_pid, new_pid, 0);
507
508 if (debug_threads)
509 {
510 debug_printf ("HEW: Got fork event from LWP %ld, "
511 "new child is %d\n",
512 ptid_get_lwp (ptid_of (event_thr)),
513 ptid_get_pid (ptid));
514 }
515
516 /* Add the new process to the tables and clone the breakpoint
517 lists of the parent. We need to do this even if the new process
518 will be detached, since we will need the process object and the
519 breakpoints to remove any breakpoints from memory when we
520 detach, and the client side will access registers. */
521 child_proc = linux_add_process (new_pid, 0);
522 gdb_assert (child_proc != NULL);
523 child_lwp = add_lwp (ptid);
524 gdb_assert (child_lwp != NULL);
525 child_lwp->stopped = 1;
bfacd19d
DB
526 child_lwp->must_set_ptrace_flags = 1;
527 child_lwp->status_pending_p = 0;
528 child_thr = get_lwp_thread (child_lwp);
529 child_thr->last_resume_kind = resume_stop;
998d452a
PA
530 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
531
863d01bd 532 /* If we're suspending all threads, leave this one suspended
0f8288ae
YQ
533 too. If the fork/clone parent is stepping over a breakpoint,
534 all other threads have been suspended already. Leave the
535 child suspended too. */
536 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
537 || event_lwp->bp_reinsert != 0)
863d01bd
PA
538 {
539 if (debug_threads)
540 debug_printf ("HEW: leaving child suspended\n");
541 child_lwp->suspended = 1;
542 }
543
de0d863e
DB
544 parent_proc = get_thread_process (event_thr);
545 child_proc->attached = parent_proc->attached;
546 clone_all_breakpoints (&child_proc->breakpoints,
547 &child_proc->raw_breakpoints,
548 parent_proc->breakpoints);
549
8d749320 550 tdesc = XNEW (struct target_desc);
de0d863e
DB
551 copy_target_description (tdesc, parent_proc->tdesc);
552 child_proc->tdesc = tdesc;
de0d863e 553
3a8a0396
DB
554 /* Clone arch-specific process data. */
555 if (the_low_target.new_fork != NULL)
556 the_low_target.new_fork (parent_proc, child_proc);
557
de0d863e 558 /* Save fork info in the parent thread. */
c269dbdb
DB
559 if (event == PTRACE_EVENT_FORK)
560 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
561 else if (event == PTRACE_EVENT_VFORK)
562 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
563
de0d863e 564 event_lwp->waitstatus.value.related_pid = ptid;
c269dbdb 565
de0d863e
DB
566 /* The status_pending field contains bits denoting the
567 extended event, so when the pending event is handled,
568 the handler will look at lwp->waitstatus. */
569 event_lwp->status_pending_p = 1;
570 event_lwp->status_pending = wstat;
571
572 /* Report the event. */
573 return 0;
574 }
575
fa96cb38
PA
576 if (debug_threads)
577 debug_printf ("HEW: Got clone event "
578 "from LWP %ld, new child is LWP %ld\n",
579 lwpid_of (event_thr), new_pid);
580
d86d4aaf 581 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
b3312d80 582 new_lwp = add_lwp (ptid);
24a09b5f 583
e27d73f6
DE
584 /* Either we're going to immediately resume the new thread
585 or leave it stopped. linux_resume_one_lwp is a nop if it
586 thinks the thread is currently running, so set this first
587 before calling linux_resume_one_lwp. */
588 new_lwp->stopped = 1;
589
0f8288ae
YQ
590 /* If we're suspending all threads, leave this one suspended
591 too. If the fork/clone parent is stepping over a breakpoint,
592 all other threads have been suspended already. Leave the
593 child suspended too. */
594 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
595 || event_lwp->bp_reinsert != 0)
bde24c0a
PA
596 new_lwp->suspended = 1;
597
da5898ce
DJ
598 /* Normally we will get the pending SIGSTOP. But in some cases
599 we might get another signal delivered to the group first.
f21cc1a2 600 If we do get another signal, be sure not to lose it. */
20ba1ce6 601 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 602 {
54a0b537 603 new_lwp->stop_expected = 1;
20ba1ce6
PA
604 new_lwp->status_pending_p = 1;
605 new_lwp->status_pending = status;
da5898ce 606 }
65706a29
PA
607 else if (report_thread_events)
608 {
609 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
610 new_lwp->status_pending_p = 1;
611 new_lwp->status_pending = status;
612 }
de0d863e
DB
613
614 /* Don't report the event. */
615 return 1;
24a09b5f 616 }
c269dbdb
DB
617 else if (event == PTRACE_EVENT_VFORK_DONE)
618 {
619 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
620
621 /* Report the event. */
622 return 0;
623 }
94585166
DB
624 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
625 {
626 struct process_info *proc;
82075af2 627 VEC (int) *syscalls_to_catch;
94585166
DB
628 ptid_t event_ptid;
629 pid_t event_pid;
630
631 if (debug_threads)
632 {
633 debug_printf ("HEW: Got exec event from LWP %ld\n",
634 lwpid_of (event_thr));
635 }
636
637 /* Get the event ptid. */
638 event_ptid = ptid_of (event_thr);
639 event_pid = ptid_get_pid (event_ptid);
640
82075af2 641 /* Save the syscall list from the execing process. */
94585166 642 proc = get_thread_process (event_thr);
82075af2
JS
643 syscalls_to_catch = proc->syscalls_to_catch;
644 proc->syscalls_to_catch = NULL;
645
646 /* Delete the execing process and all its threads. */
94585166
DB
647 linux_mourn (proc);
648 current_thread = NULL;
649
650 /* Create a new process/lwp/thread. */
651 proc = linux_add_process (event_pid, 0);
652 event_lwp = add_lwp (event_ptid);
653 event_thr = get_lwp_thread (event_lwp);
654 gdb_assert (current_thread == event_thr);
655 linux_arch_setup_thread (event_thr);
656
657 /* Set the event status. */
658 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
659 event_lwp->waitstatus.value.execd_pathname
660 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
661
662 /* Mark the exec status as pending. */
663 event_lwp->stopped = 1;
664 event_lwp->status_pending_p = 1;
665 event_lwp->status_pending = wstat;
666 event_thr->last_resume_kind = resume_continue;
667 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
668
82075af2
JS
669 /* Update syscall state in the new lwp, effectively mid-syscall too. */
670 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
671
672 /* Restore the list to catch. Don't rely on the client, which is free
673 to avoid sending a new list when the architecture doesn't change.
674 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
675 proc->syscalls_to_catch = syscalls_to_catch;
676
94585166
DB
677 /* Report the event. */
678 *orig_event_lwp = event_lwp;
679 return 0;
680 }
de0d863e
DB
681
682 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
683}
684
d50171e4
PA
685/* Return the PC as read from the regcache of LWP, without any
686 adjustment. */
687
688static CORE_ADDR
689get_pc (struct lwp_info *lwp)
690{
0bfdf32f 691 struct thread_info *saved_thread;
d50171e4
PA
692 struct regcache *regcache;
693 CORE_ADDR pc;
694
695 if (the_low_target.get_pc == NULL)
696 return 0;
697
0bfdf32f
GB
698 saved_thread = current_thread;
699 current_thread = get_lwp_thread (lwp);
d50171e4 700
0bfdf32f 701 regcache = get_thread_regcache (current_thread, 1);
d50171e4
PA
702 pc = (*the_low_target.get_pc) (regcache);
703
704 if (debug_threads)
87ce2a04 705 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 706
0bfdf32f 707 current_thread = saved_thread;
d50171e4
PA
708 return pc;
709}
710
82075af2
JS
711/* This function should only be called if LWP got a SYSCALL_SIGTRAP.
712 Fill *SYSNO with the syscall nr trapped. Fill *SYSRET with the
713 return code. */
714
715static void
716get_syscall_trapinfo (struct lwp_info *lwp, int *sysno, int *sysret)
717{
718 struct thread_info *saved_thread;
719 struct regcache *regcache;
720
721 if (the_low_target.get_syscall_trapinfo == NULL)
722 {
723 /* If we cannot get the syscall trapinfo, report an unknown
724 system call number and -ENOSYS return value. */
725 *sysno = UNKNOWN_SYSCALL;
726 *sysret = -ENOSYS;
727 return;
728 }
729
730 saved_thread = current_thread;
731 current_thread = get_lwp_thread (lwp);
732
733 regcache = get_thread_regcache (current_thread, 1);
734 (*the_low_target.get_syscall_trapinfo) (regcache, sysno, sysret);
735
736 if (debug_threads)
737 {
738 debug_printf ("get_syscall_trapinfo sysno %d sysret %d\n",
739 *sysno, *sysret);
740 }
741
742 current_thread = saved_thread;
743}
744
e7ad2f14 745static int check_stopped_by_watchpoint (struct lwp_info *child);
0d62e5e8 746
e7ad2f14
PA
747/* Called when the LWP stopped for a signal/trap. If it stopped for a
748 trap check what caused it (breakpoint, watchpoint, trace, etc.),
749 and save the result in the LWP's stop_reason field. If it stopped
750 for a breakpoint, decrement the PC if necessary on the lwp's
751 architecture. Returns true if we now have the LWP's stop PC. */
0d62e5e8 752
582511be 753static int
e7ad2f14 754save_stop_reason (struct lwp_info *lwp)
0d62e5e8 755{
582511be
PA
756 CORE_ADDR pc;
757 CORE_ADDR sw_breakpoint_pc;
758 struct thread_info *saved_thread;
3e572f71
PA
759#if USE_SIGTRAP_SIGINFO
760 siginfo_t siginfo;
761#endif
d50171e4
PA
762
763 if (the_low_target.get_pc == NULL)
764 return 0;
0d62e5e8 765
582511be
PA
766 pc = get_pc (lwp);
767 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
d50171e4 768
582511be
PA
769 /* breakpoint_at reads from the current thread. */
770 saved_thread = current_thread;
771 current_thread = get_lwp_thread (lwp);
47c0c975 772
3e572f71
PA
773#if USE_SIGTRAP_SIGINFO
774 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
775 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
776 {
777 if (siginfo.si_signo == SIGTRAP)
778 {
e7ad2f14
PA
779 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
780 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 781 {
e7ad2f14
PA
782 /* The si_code is ambiguous on this arch -- check debug
783 registers. */
784 if (!check_stopped_by_watchpoint (lwp))
785 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
786 }
787 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
788 {
789 /* If we determine the LWP stopped for a SW breakpoint,
790 trust it. Particularly don't check watchpoint
791 registers, because at least on s390, we'd find
792 stopped-by-watchpoint as long as there's a watchpoint
793 set. */
3e572f71 794 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 795 }
e7ad2f14 796 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 797 {
e7ad2f14
PA
798 /* This can indicate either a hardware breakpoint or
799 hardware watchpoint. Check debug registers. */
800 if (!check_stopped_by_watchpoint (lwp))
801 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 802 }
2bf6fb9d
PA
803 else if (siginfo.si_code == TRAP_TRACE)
804 {
e7ad2f14
PA
805 /* We may have single stepped an instruction that
806 triggered a watchpoint. In that case, on some
807 architectures (such as x86), instead of TRAP_HWBKPT,
808 si_code indicates TRAP_TRACE, and we need to check
809 the debug registers separately. */
810 if (!check_stopped_by_watchpoint (lwp))
811 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 812 }
3e572f71
PA
813 }
814 }
815#else
582511be
PA
816 /* We may have just stepped a breakpoint instruction. E.g., in
817 non-stop mode, GDB first tells the thread A to step a range, and
818 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
819 case we need to report the breakpoint PC. */
820 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
582511be 821 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
e7ad2f14
PA
822 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
823
824 if (hardware_breakpoint_inserted_here (pc))
825 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
826
827 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
828 check_stopped_by_watchpoint (lwp);
829#endif
830
831 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be
PA
832 {
833 if (debug_threads)
834 {
835 struct thread_info *thr = get_lwp_thread (lwp);
836
837 debug_printf ("CSBB: %s stopped by software breakpoint\n",
838 target_pid_to_str (ptid_of (thr)));
839 }
840
841 /* Back up the PC if necessary. */
842 if (pc != sw_breakpoint_pc)
e7ad2f14 843 {
582511be
PA
844 struct regcache *regcache
845 = get_thread_regcache (current_thread, 1);
846 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
847 }
848
e7ad2f14
PA
849 /* Update this so we record the correct stop PC below. */
850 pc = sw_breakpoint_pc;
582511be 851 }
e7ad2f14 852 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
582511be
PA
853 {
854 if (debug_threads)
855 {
856 struct thread_info *thr = get_lwp_thread (lwp);
857
858 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
859 target_pid_to_str (ptid_of (thr)));
860 }
e7ad2f14
PA
861 }
862 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
863 {
864 if (debug_threads)
865 {
866 struct thread_info *thr = get_lwp_thread (lwp);
47c0c975 867
e7ad2f14
PA
868 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
869 target_pid_to_str (ptid_of (thr)));
870 }
582511be 871 }
e7ad2f14
PA
872 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
873 {
874 if (debug_threads)
875 {
876 struct thread_info *thr = get_lwp_thread (lwp);
582511be 877
e7ad2f14
PA
878 debug_printf ("CSBB: %s stopped by trace\n",
879 target_pid_to_str (ptid_of (thr)));
880 }
881 }
882
883 lwp->stop_pc = pc;
582511be 884 current_thread = saved_thread;
e7ad2f14 885 return 1;
0d62e5e8 886}
ce3a066d 887
b3312d80 888static struct lwp_info *
95954743 889add_lwp (ptid_t ptid)
611cb4a5 890{
54a0b537 891 struct lwp_info *lwp;
0d62e5e8 892
8d749320 893 lwp = XCNEW (struct lwp_info);
00db26fa
PA
894
895 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
0d62e5e8 896
aa5ca48f 897 if (the_low_target.new_thread != NULL)
34c703da 898 the_low_target.new_thread (lwp);
aa5ca48f 899
f7667f0d 900 lwp->thread = add_thread (ptid, lwp);
0d62e5e8 901
54a0b537 902 return lwp;
0d62e5e8 903}
611cb4a5 904
da6d8c04
DJ
905/* Start an inferior process and returns its pid.
906 ALLARGS is a vector of program-name and args. */
907
ce3a066d
DJ
908static int
909linux_create_inferior (char *program, char **allargs)
da6d8c04 910{
a6dbe5df 911 struct lwp_info *new_lwp;
da6d8c04 912 int pid;
95954743 913 ptid_t ptid;
8cc73a39
SDJ
914 struct cleanup *restore_personality
915 = maybe_disable_address_space_randomization (disable_randomization);
03583c20 916
42c81e2a 917#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
918 pid = vfork ();
919#else
da6d8c04 920 pid = fork ();
52fb6437 921#endif
da6d8c04
DJ
922 if (pid < 0)
923 perror_with_name ("fork");
924
925 if (pid == 0)
926 {
602e3198 927 close_most_fds ();
b8e1b30e 928 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da6d8c04 929
a9fa9f7d
DJ
930 setpgid (0, 0);
931
e0f9f062
DE
932 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
933 stdout to stderr so that inferior i/o doesn't corrupt the connection.
934 Also, redirect stdin to /dev/null. */
935 if (remote_connection_is_stdio ())
936 {
937 close (0);
938 open ("/dev/null", O_RDONLY);
939 dup2 (2, 1);
3e52c33d
JK
940 if (write (2, "stdin/stdout redirected\n",
941 sizeof ("stdin/stdout redirected\n") - 1) < 0)
8c29b58e
YQ
942 {
943 /* Errors ignored. */;
944 }
e0f9f062
DE
945 }
946
2b876972
DJ
947 execv (program, allargs);
948 if (errno == ENOENT)
949 execvp (program, allargs);
da6d8c04
DJ
950
951 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 952 strerror (errno));
da6d8c04
DJ
953 fflush (stderr);
954 _exit (0177);
955 }
956
8cc73a39 957 do_cleanups (restore_personality);
03583c20 958
55d7b841 959 linux_add_process (pid, 0);
95954743
PA
960
961 ptid = ptid_build (pid, pid, 0);
962 new_lwp = add_lwp (ptid);
a6dbe5df 963 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 964
a9fa9f7d 965 return pid;
da6d8c04
DJ
966}
967
ece66d65
JS
968/* Implement the post_create_inferior target_ops method. */
969
970static void
971linux_post_create_inferior (void)
972{
973 struct lwp_info *lwp = get_thread_lwp (current_thread);
974
975 linux_arch_setup ();
976
977 if (lwp->must_set_ptrace_flags)
978 {
979 struct process_info *proc = current_process ();
980 int options = linux_low_ptrace_options (proc->attached);
981
982 linux_enable_event_reporting (lwpid_of (current_thread), options);
983 lwp->must_set_ptrace_flags = 0;
984 }
985}
986
8784d563
PA
987/* Attach to an inferior process. Returns 0 on success, ERRNO on
988 error. */
da6d8c04 989
7ae1a6a6
PA
990int
991linux_attach_lwp (ptid_t ptid)
da6d8c04 992{
54a0b537 993 struct lwp_info *new_lwp;
7ae1a6a6 994 int lwpid = ptid_get_lwp (ptid);
611cb4a5 995
b8e1b30e 996 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 997 != 0)
7ae1a6a6 998 return errno;
24a09b5f 999
b3312d80 1000 new_lwp = add_lwp (ptid);
0d62e5e8 1001
a6dbe5df
PA
1002 /* We need to wait for SIGSTOP before being able to make the next
1003 ptrace call on this LWP. */
1004 new_lwp->must_set_ptrace_flags = 1;
1005
644cebc9 1006 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
1007 {
1008 if (debug_threads)
87ce2a04 1009 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
1010
1011 /* The process is definitely stopped. It is in a job control
1012 stop, unless the kernel predates the TASK_STOPPED /
1013 TASK_TRACED distinction, in which case it might be in a
1014 ptrace stop. Make sure it is in a ptrace stop; from there we
1015 can kill it, signal it, et cetera.
1016
1017 First make sure there is a pending SIGSTOP. Since we are
1018 already attached, the process can not transition from stopped
1019 to running without a PTRACE_CONT; so we know this signal will
1020 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1021 probably already in the queue (unless this kernel is old
1022 enough to use TASK_STOPPED for ptrace stops); but since
1023 SIGSTOP is not an RT signal, it can only be queued once. */
1024 kill_lwp (lwpid, SIGSTOP);
1025
1026 /* Finally, resume the stopped process. This will deliver the
1027 SIGSTOP (or a higher priority signal, just like normal
1028 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1029 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1030 }
1031
0d62e5e8 1032 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1033 brings it to a halt.
1034
1035 There are several cases to consider here:
1036
1037 1) gdbserver has already attached to the process and is being notified
1b3f6016 1038 of a new thread that is being created.
d50171e4
PA
1039 In this case we should ignore that SIGSTOP and resume the
1040 process. This is handled below by setting stop_expected = 1,
8336d594 1041 and the fact that add_thread sets last_resume_kind ==
d50171e4 1042 resume_continue.
0e21c1ec
DE
1043
1044 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1045 to it via attach_inferior.
1046 In this case we want the process thread to stop.
d50171e4
PA
1047 This is handled by having linux_attach set last_resume_kind ==
1048 resume_stop after we return.
e3deef73
LM
1049
1050 If the pid we are attaching to is also the tgid, we attach to and
1051 stop all the existing threads. Otherwise, we attach to pid and
1052 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1053
1054 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1055 existing threads.
1056 In this case we want the thread to stop.
1057 FIXME: This case is currently not properly handled.
1058 We should wait for the SIGSTOP but don't. Things work apparently
1059 because enough time passes between when we ptrace (ATTACH) and when
1060 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1061
1062 On the other hand, if we are currently trying to stop all threads, we
1063 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1064 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1065 end of the list, and so the new thread has not yet reached
1066 wait_for_sigstop (but will). */
d50171e4 1067 new_lwp->stop_expected = 1;
0d62e5e8 1068
7ae1a6a6 1069 return 0;
95954743
PA
1070}
1071
8784d563
PA
1072/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1073 already attached. Returns true if a new LWP is found, false
1074 otherwise. */
1075
1076static int
1077attach_proc_task_lwp_callback (ptid_t ptid)
1078{
1079 /* Is this a new thread? */
1080 if (find_thread_ptid (ptid) == NULL)
1081 {
1082 int lwpid = ptid_get_lwp (ptid);
1083 int err;
1084
1085 if (debug_threads)
1086 debug_printf ("Found new lwp %d\n", lwpid);
1087
1088 err = linux_attach_lwp (ptid);
1089
1090 /* Be quiet if we simply raced with the thread exiting. EPERM
1091 is returned if the thread's task still exists, and is marked
1092 as exited or zombie, as well as other conditions, so in that
1093 case, confirm the status in /proc/PID/status. */
1094 if (err == ESRCH
1095 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1096 {
1097 if (debug_threads)
1098 {
1099 debug_printf ("Cannot attach to lwp %d: "
1100 "thread is gone (%d: %s)\n",
1101 lwpid, err, strerror (err));
1102 }
1103 }
1104 else if (err != 0)
1105 {
1106 warning (_("Cannot attach to lwp %d: %s"),
1107 lwpid,
1108 linux_ptrace_attach_fail_reason_string (ptid, err));
1109 }
1110
1111 return 1;
1112 }
1113 return 0;
1114}
1115
500c1d85
PA
1116static void async_file_mark (void);
1117
e3deef73
LM
1118/* Attach to PID. If PID is the tgid, attach to it and all
1119 of its threads. */
1120
c52daf70 1121static int
a1928bad 1122linux_attach (unsigned long pid)
0d62e5e8 1123{
500c1d85
PA
1124 struct process_info *proc;
1125 struct thread_info *initial_thread;
7ae1a6a6
PA
1126 ptid_t ptid = ptid_build (pid, pid, 0);
1127 int err;
1128
e3deef73
LM
1129 /* Attach to PID. We will check for other threads
1130 soon. */
7ae1a6a6
PA
1131 err = linux_attach_lwp (ptid);
1132 if (err != 0)
1133 error ("Cannot attach to process %ld: %s",
8784d563 1134 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
7ae1a6a6 1135
500c1d85 1136 proc = linux_add_process (pid, 1);
0d62e5e8 1137
500c1d85
PA
1138 /* Don't ignore the initial SIGSTOP if we just attached to this
1139 process. It will be collected by wait shortly. */
1140 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1141 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1142
8784d563
PA
1143 /* We must attach to every LWP. If /proc is mounted, use that to
1144 find them now. On the one hand, the inferior may be using raw
1145 clone instead of using pthreads. On the other hand, even if it
1146 is using pthreads, GDB may not be connected yet (thread_db needs
1147 to do symbol lookups, through qSymbol). Also, thread_db walks
1148 structures in the inferior's address space to find the list of
1149 threads/LWPs, and those structures may well be corrupted. Note
1150 that once thread_db is loaded, we'll still use it to list threads
1151 and associate pthread info with each LWP. */
1152 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
500c1d85
PA
1153
1154 /* GDB will shortly read the xml target description for this
1155 process, to figure out the process' architecture. But the target
1156 description is only filled in when the first process/thread in
1157 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1158 that now, otherwise, if GDB is fast enough, it could read the
1159 target description _before_ that initial stop. */
1160 if (non_stop)
1161 {
1162 struct lwp_info *lwp;
1163 int wstat, lwpid;
1164 ptid_t pid_ptid = pid_to_ptid (pid);
1165
1166 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1167 &wstat, __WALL);
1168 gdb_assert (lwpid > 0);
1169
1170 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1171
1172 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1173 {
1174 lwp->status_pending_p = 1;
1175 lwp->status_pending = wstat;
1176 }
1177
1178 initial_thread->last_resume_kind = resume_continue;
1179
1180 async_file_mark ();
1181
1182 gdb_assert (proc->tdesc != NULL);
1183 }
1184
95954743
PA
1185 return 0;
1186}
1187
1188struct counter
1189{
1190 int pid;
1191 int count;
1192};
1193
1194static int
1195second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1196{
9a3c8263 1197 struct counter *counter = (struct counter *) args;
95954743
PA
1198
1199 if (ptid_get_pid (entry->id) == counter->pid)
1200 {
1201 if (++counter->count > 1)
1202 return 1;
1203 }
d61ddec4 1204
da6d8c04
DJ
1205 return 0;
1206}
1207
95954743 1208static int
fa96cb38 1209last_thread_of_process_p (int pid)
95954743 1210{
95954743 1211 struct counter counter = { pid , 0 };
da6d8c04 1212
95954743
PA
1213 return (find_inferior (&all_threads,
1214 second_thread_of_pid_p, &counter) == NULL);
1215}
1216
da84f473
PA
1217/* Kill LWP. */
1218
1219static void
1220linux_kill_one_lwp (struct lwp_info *lwp)
1221{
d86d4aaf
DE
1222 struct thread_info *thr = get_lwp_thread (lwp);
1223 int pid = lwpid_of (thr);
da84f473
PA
1224
1225 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1226 there is no signal context, and ptrace(PTRACE_KILL) (or
1227 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1228 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1229 alternative is to kill with SIGKILL. We only need one SIGKILL
1230 per process, not one for each thread. But since we still support
4a6ed09b
PA
1231 support debugging programs using raw clone without CLONE_THREAD,
1232 we send one for each thread. For years, we used PTRACE_KILL
1233 only, so we're being a bit paranoid about some old kernels where
1234 PTRACE_KILL might work better (dubious if there are any such, but
1235 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1236 second, and so we're fine everywhere. */
da84f473
PA
1237
1238 errno = 0;
69ff6be5 1239 kill_lwp (pid, SIGKILL);
da84f473 1240 if (debug_threads)
ce9e3fe7
PA
1241 {
1242 int save_errno = errno;
1243
1244 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1245 target_pid_to_str (ptid_of (thr)),
1246 save_errno ? strerror (save_errno) : "OK");
1247 }
da84f473
PA
1248
1249 errno = 0;
b8e1b30e 1250 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1251 if (debug_threads)
ce9e3fe7
PA
1252 {
1253 int save_errno = errno;
1254
1255 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1256 target_pid_to_str (ptid_of (thr)),
1257 save_errno ? strerror (save_errno) : "OK");
1258 }
da84f473
PA
1259}
1260
e76126e8
PA
1261/* Kill LWP and wait for it to die. */
1262
1263static void
1264kill_wait_lwp (struct lwp_info *lwp)
1265{
1266 struct thread_info *thr = get_lwp_thread (lwp);
1267 int pid = ptid_get_pid (ptid_of (thr));
1268 int lwpid = ptid_get_lwp (ptid_of (thr));
1269 int wstat;
1270 int res;
1271
1272 if (debug_threads)
1273 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1274
1275 do
1276 {
1277 linux_kill_one_lwp (lwp);
1278
1279 /* Make sure it died. Notes:
1280
1281 - The loop is most likely unnecessary.
1282
1283 - We don't use linux_wait_for_event as that could delete lwps
1284 while we're iterating over them. We're not interested in
1285 any pending status at this point, only in making sure all
1286 wait status on the kernel side are collected until the
1287 process is reaped.
1288
1289 - We don't use __WALL here as the __WALL emulation relies on
1290 SIGCHLD, and killing a stopped process doesn't generate
1291 one, nor an exit status.
1292 */
1293 res = my_waitpid (lwpid, &wstat, 0);
1294 if (res == -1 && errno == ECHILD)
1295 res = my_waitpid (lwpid, &wstat, __WCLONE);
1296 } while (res > 0 && WIFSTOPPED (wstat));
1297
586b02a9
PA
1298 /* Even if it was stopped, the child may have already disappeared.
1299 E.g., if it was killed by SIGKILL. */
1300 if (res < 0 && errno != ECHILD)
1301 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1302}
1303
da84f473
PA
1304/* Callback for `find_inferior'. Kills an lwp of a given process,
1305 except the leader. */
95954743
PA
1306
1307static int
da84f473 1308kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
da6d8c04 1309{
0d62e5e8 1310 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1311 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
1312 int pid = * (int *) args;
1313
1314 if (ptid_get_pid (entry->id) != pid)
1315 return 0;
0d62e5e8 1316
fd500816
DJ
1317 /* We avoid killing the first thread here, because of a Linux kernel (at
1318 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1319 the children get a chance to be reaped, it will remain a zombie
1320 forever. */
95954743 1321
d86d4aaf 1322 if (lwpid_of (thread) == pid)
95954743
PA
1323 {
1324 if (debug_threads)
87ce2a04
DE
1325 debug_printf ("lkop: is last of process %s\n",
1326 target_pid_to_str (entry->id));
95954743
PA
1327 return 0;
1328 }
fd500816 1329
e76126e8 1330 kill_wait_lwp (lwp);
95954743 1331 return 0;
da6d8c04
DJ
1332}
1333
95954743
PA
1334static int
1335linux_kill (int pid)
0d62e5e8 1336{
95954743 1337 struct process_info *process;
54a0b537 1338 struct lwp_info *lwp;
fd500816 1339
95954743
PA
1340 process = find_process_pid (pid);
1341 if (process == NULL)
1342 return -1;
9d606399 1343
f9e39928
PA
1344 /* If we're killing a running inferior, make sure it is stopped
1345 first, as PTRACE_KILL will not work otherwise. */
7984d532 1346 stop_all_lwps (0, NULL);
f9e39928 1347
da84f473 1348 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
fd500816 1349
54a0b537 1350 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1351 thread in the list, so do so now. */
95954743 1352 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 1353
784867a5 1354 if (lwp == NULL)
fd500816 1355 {
784867a5 1356 if (debug_threads)
d86d4aaf
DE
1357 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1358 pid);
784867a5
JK
1359 }
1360 else
e76126e8 1361 kill_wait_lwp (lwp);
2d717e4f 1362
8336d594 1363 the_target->mourn (process);
f9e39928
PA
1364
1365 /* Since we presently can only stop all lwps of all processes, we
1366 need to unstop lwps of other processes. */
7984d532 1367 unstop_all_lwps (0, NULL);
95954743 1368 return 0;
0d62e5e8
DJ
1369}
1370
9b224c5e
PA
1371/* Get pending signal of THREAD, for detaching purposes. This is the
1372 signal the thread last stopped for, which we need to deliver to the
1373 thread when detaching, otherwise, it'd be suppressed/lost. */
1374
1375static int
1376get_detach_signal (struct thread_info *thread)
1377{
a493e3e2 1378 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1379 int status;
1380 struct lwp_info *lp = get_thread_lwp (thread);
1381
1382 if (lp->status_pending_p)
1383 status = lp->status_pending;
1384 else
1385 {
1386 /* If the thread had been suspended by gdbserver, and it stopped
1387 cleanly, then it'll have stopped with SIGSTOP. But we don't
1388 want to deliver that SIGSTOP. */
1389 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1390 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1391 return 0;
1392
1393 /* Otherwise, we may need to deliver the signal we
1394 intercepted. */
1395 status = lp->last_status;
1396 }
1397
1398 if (!WIFSTOPPED (status))
1399 {
1400 if (debug_threads)
87ce2a04 1401 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1402 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1403 return 0;
1404 }
1405
1406 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1407 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1408 {
1409 if (debug_threads)
87ce2a04
DE
1410 debug_printf ("GPS: lwp %s had stopped with extended "
1411 "status: no pending signal\n",
d86d4aaf 1412 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1413 return 0;
1414 }
1415
2ea28649 1416 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e
PA
1417
1418 if (program_signals_p && !program_signals[signo])
1419 {
1420 if (debug_threads)
87ce2a04 1421 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1422 target_pid_to_str (ptid_of (thread)),
87ce2a04 1423 gdb_signal_to_string (signo));
9b224c5e
PA
1424 return 0;
1425 }
1426 else if (!program_signals_p
1427 /* If we have no way to know which signals GDB does not
1428 want to have passed to the program, assume
1429 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1430 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1431 {
1432 if (debug_threads)
87ce2a04
DE
1433 debug_printf ("GPS: lwp %s had signal %s, "
1434 "but we don't know if we should pass it. "
1435 "Default to not.\n",
d86d4aaf 1436 target_pid_to_str (ptid_of (thread)),
87ce2a04 1437 gdb_signal_to_string (signo));
9b224c5e
PA
1438 return 0;
1439 }
1440 else
1441 {
1442 if (debug_threads)
87ce2a04 1443 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1444 target_pid_to_str (ptid_of (thread)),
87ce2a04 1445 gdb_signal_to_string (signo));
9b224c5e
PA
1446
1447 return WSTOPSIG (status);
1448 }
1449}
1450
95954743
PA
1451static int
1452linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
1453{
1454 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1455 struct lwp_info *lwp = get_thread_lwp (thread);
95954743 1456 int pid = * (int *) args;
9b224c5e 1457 int sig;
95954743
PA
1458
1459 if (ptid_get_pid (entry->id) != pid)
1460 return 0;
6ad8ae5c 1461
9b224c5e 1462 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1463 if (lwp->stop_expected)
ae13219e 1464 {
9b224c5e 1465 if (debug_threads)
87ce2a04 1466 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1467 target_pid_to_str (ptid_of (thread)));
9b224c5e 1468
d86d4aaf 1469 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1470 lwp->stop_expected = 0;
ae13219e
DJ
1471 }
1472
1473 /* Flush any pending changes to the process's registers. */
d86d4aaf 1474 regcache_invalidate_thread (thread);
ae13219e 1475
9b224c5e
PA
1476 /* Pass on any pending signal for this thread. */
1477 sig = get_detach_signal (thread);
1478
ae13219e 1479 /* Finally, let it resume. */
82bfbe7e
PA
1480 if (the_low_target.prepare_to_resume != NULL)
1481 the_low_target.prepare_to_resume (lwp);
d86d4aaf 1482 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1483 (PTRACE_TYPE_ARG4) (long) sig) < 0)
9b224c5e 1484 error (_("Can't detach %s: %s"),
d86d4aaf 1485 target_pid_to_str (ptid_of (thread)),
9b224c5e 1486 strerror (errno));
bd99dc85
PA
1487
1488 delete_lwp (lwp);
95954743 1489 return 0;
6ad8ae5c
DJ
1490}
1491
95954743
PA
1492static int
1493linux_detach (int pid)
1494{
1495 struct process_info *process;
1496
1497 process = find_process_pid (pid);
1498 if (process == NULL)
1499 return -1;
1500
863d01bd
PA
1501 /* As there's a step over already in progress, let it finish first,
1502 otherwise nesting a stabilize_threads operation on top gets real
1503 messy. */
1504 complete_ongoing_step_over ();
1505
f9e39928
PA
1506 /* Stop all threads before detaching. First, ptrace requires that
1507 the thread is stopped to sucessfully detach. Second, thread_db
1508 may need to uninstall thread event breakpoints from memory, which
1509 only works with a stopped process anyway. */
7984d532 1510 stop_all_lwps (0, NULL);
f9e39928 1511
ca5c370d 1512#ifdef USE_THREAD_DB
8336d594 1513 thread_db_detach (process);
ca5c370d
PA
1514#endif
1515
fa593d66
PA
1516 /* Stabilize threads (move out of jump pads). */
1517 stabilize_threads ();
1518
95954743 1519 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
1520
1521 the_target->mourn (process);
f9e39928
PA
1522
1523 /* Since we presently can only stop all lwps of all processes, we
1524 need to unstop lwps of other processes. */
7984d532 1525 unstop_all_lwps (0, NULL);
f9e39928
PA
1526 return 0;
1527}
1528
1529/* Remove all LWPs that belong to process PROC from the lwp list. */
1530
1531static int
1532delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1533{
d86d4aaf
DE
1534 struct thread_info *thread = (struct thread_info *) entry;
1535 struct lwp_info *lwp = get_thread_lwp (thread);
9a3c8263 1536 struct process_info *process = (struct process_info *) proc;
f9e39928 1537
d86d4aaf 1538 if (pid_of (thread) == pid_of (process))
f9e39928
PA
1539 delete_lwp (lwp);
1540
dd6953e1 1541 return 0;
6ad8ae5c
DJ
1542}
1543
8336d594
PA
1544static void
1545linux_mourn (struct process_info *process)
1546{
1547 struct process_info_private *priv;
1548
1549#ifdef USE_THREAD_DB
1550 thread_db_mourn (process);
1551#endif
1552
d86d4aaf 1553 find_inferior (&all_threads, delete_lwp_callback, process);
f9e39928 1554
8336d594 1555 /* Freeing all private data. */
fe978cb0 1556 priv = process->priv;
8336d594
PA
1557 free (priv->arch_private);
1558 free (priv);
fe978cb0 1559 process->priv = NULL;
505106cd
PA
1560
1561 remove_process (process);
8336d594
PA
1562}
1563
444d6139 1564static void
95954743 1565linux_join (int pid)
444d6139 1566{
444d6139
PA
1567 int status, ret;
1568
1569 do {
95954743 1570 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1571 if (WIFEXITED (status) || WIFSIGNALED (status))
1572 break;
1573 } while (ret != -1 || errno != ECHILD);
1574}
1575
6ad8ae5c 1576/* Return nonzero if the given thread is still alive. */
0d62e5e8 1577static int
95954743 1578linux_thread_alive (ptid_t ptid)
0d62e5e8 1579{
95954743
PA
1580 struct lwp_info *lwp = find_lwp_pid (ptid);
1581
1582 /* We assume we always know if a thread exits. If a whole process
1583 exited but we still haven't been able to report it to GDB, we'll
1584 hold on to the last lwp of the dead process. */
1585 if (lwp != NULL)
00db26fa 1586 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1587 else
1588 return 0;
1589}
1590
582511be
PA
1591/* Return 1 if this lwp still has an interesting status pending. If
1592 not (e.g., it had stopped for a breakpoint that is gone), return
1593 false. */
1594
1595static int
1596thread_still_has_status_pending_p (struct thread_info *thread)
1597{
1598 struct lwp_info *lp = get_thread_lwp (thread);
1599
1600 if (!lp->status_pending_p)
1601 return 0;
1602
582511be 1603 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1604 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1605 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1606 {
1607 struct thread_info *saved_thread;
1608 CORE_ADDR pc;
1609 int discard = 0;
1610
1611 gdb_assert (lp->last_status != 0);
1612
1613 pc = get_pc (lp);
1614
1615 saved_thread = current_thread;
1616 current_thread = thread;
1617
1618 if (pc != lp->stop_pc)
1619 {
1620 if (debug_threads)
1621 debug_printf ("PC of %ld changed\n",
1622 lwpid_of (thread));
1623 discard = 1;
1624 }
3e572f71
PA
1625
1626#if !USE_SIGTRAP_SIGINFO
15c66dd6 1627 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
582511be
PA
1628 && !(*the_low_target.breakpoint_at) (pc))
1629 {
1630 if (debug_threads)
1631 debug_printf ("previous SW breakpoint of %ld gone\n",
1632 lwpid_of (thread));
1633 discard = 1;
1634 }
15c66dd6 1635 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1636 && !hardware_breakpoint_inserted_here (pc))
1637 {
1638 if (debug_threads)
1639 debug_printf ("previous HW breakpoint of %ld gone\n",
1640 lwpid_of (thread));
1641 discard = 1;
1642 }
3e572f71 1643#endif
582511be
PA
1644
1645 current_thread = saved_thread;
1646
1647 if (discard)
1648 {
1649 if (debug_threads)
1650 debug_printf ("discarding pending breakpoint status\n");
1651 lp->status_pending_p = 0;
1652 return 0;
1653 }
1654 }
1655
1656 return 1;
1657}
1658
a681f9c9
PA
1659/* Returns true if LWP is resumed from the client's perspective. */
1660
1661static int
1662lwp_resumed (struct lwp_info *lwp)
1663{
1664 struct thread_info *thread = get_lwp_thread (lwp);
1665
1666 if (thread->last_resume_kind != resume_stop)
1667 return 1;
1668
1669 /* Did gdb send us a `vCont;t', but we haven't reported the
1670 corresponding stop to gdb yet? If so, the thread is still
1671 resumed/running from gdb's perspective. */
1672 if (thread->last_resume_kind == resume_stop
1673 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1674 return 1;
1675
1676 return 0;
1677}
1678
6bf5e0ba 1679/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1680static int
d50171e4 1681status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1682{
d86d4aaf 1683 struct thread_info *thread = (struct thread_info *) entry;
582511be 1684 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1685 ptid_t ptid = * (ptid_t *) arg;
1686
1687 /* Check if we're only interested in events from a specific process
afa8d396
PA
1688 or a specific LWP. */
1689 if (!ptid_match (ptid_of (thread), ptid))
95954743 1690 return 0;
0d62e5e8 1691
a681f9c9
PA
1692 if (!lwp_resumed (lp))
1693 return 0;
1694
582511be
PA
1695 if (lp->status_pending_p
1696 && !thread_still_has_status_pending_p (thread))
1697 {
1698 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1699 return 0;
1700 }
0d62e5e8 1701
582511be 1702 return lp->status_pending_p;
0d62e5e8
DJ
1703}
1704
95954743
PA
1705static int
1706same_lwp (struct inferior_list_entry *entry, void *data)
1707{
1708 ptid_t ptid = *(ptid_t *) data;
1709 int lwp;
1710
1711 if (ptid_get_lwp (ptid) != 0)
1712 lwp = ptid_get_lwp (ptid);
1713 else
1714 lwp = ptid_get_pid (ptid);
1715
1716 if (ptid_get_lwp (entry->id) == lwp)
1717 return 1;
1718
1719 return 0;
1720}
1721
1722struct lwp_info *
1723find_lwp_pid (ptid_t ptid)
1724{
d86d4aaf
DE
1725 struct inferior_list_entry *thread
1726 = find_inferior (&all_threads, same_lwp, &ptid);
1727
1728 if (thread == NULL)
1729 return NULL;
1730
1731 return get_thread_lwp ((struct thread_info *) thread);
95954743
PA
1732}
1733
fa96cb38 1734/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1735
fa96cb38
PA
1736static int
1737num_lwps (int pid)
1738{
1739 struct inferior_list_entry *inf, *tmp;
1740 int count = 0;
0d62e5e8 1741
fa96cb38 1742 ALL_INFERIORS (&all_threads, inf, tmp)
24a09b5f 1743 {
fa96cb38
PA
1744 if (ptid_get_pid (inf->id) == pid)
1745 count++;
24a09b5f 1746 }
3aee8918 1747
fa96cb38
PA
1748 return count;
1749}
d61ddec4 1750
6d4ee8c6
GB
1751/* The arguments passed to iterate_over_lwps. */
1752
1753struct iterate_over_lwps_args
1754{
1755 /* The FILTER argument passed to iterate_over_lwps. */
1756 ptid_t filter;
1757
1758 /* The CALLBACK argument passed to iterate_over_lwps. */
1759 iterate_over_lwps_ftype *callback;
1760
1761 /* The DATA argument passed to iterate_over_lwps. */
1762 void *data;
1763};
1764
1765/* Callback for find_inferior used by iterate_over_lwps to filter
1766 calls to the callback supplied to that function. Returning a
1767 nonzero value causes find_inferiors to stop iterating and return
1768 the current inferior_list_entry. Returning zero indicates that
1769 find_inferiors should continue iterating. */
1770
1771static int
1772iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1773{
1774 struct iterate_over_lwps_args *args
1775 = (struct iterate_over_lwps_args *) args_p;
1776
1777 if (ptid_match (entry->id, args->filter))
1778 {
1779 struct thread_info *thr = (struct thread_info *) entry;
1780 struct lwp_info *lwp = get_thread_lwp (thr);
1781
1782 return (*args->callback) (lwp, args->data);
1783 }
1784
1785 return 0;
1786}
1787
1788/* See nat/linux-nat.h. */
1789
1790struct lwp_info *
1791iterate_over_lwps (ptid_t filter,
1792 iterate_over_lwps_ftype callback,
1793 void *data)
1794{
1795 struct iterate_over_lwps_args args = {filter, callback, data};
1796 struct inferior_list_entry *entry;
1797
1798 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1799 if (entry == NULL)
1800 return NULL;
1801
1802 return get_thread_lwp ((struct thread_info *) entry);
1803}
1804
fa96cb38
PA
1805/* Detect zombie thread group leaders, and "exit" them. We can't reap
1806 their exits until all other threads in the group have exited. */
c3adc08c 1807
fa96cb38
PA
1808static void
1809check_zombie_leaders (void)
1810{
1811 struct process_info *proc, *tmp;
c3adc08c 1812
fa96cb38 1813 ALL_PROCESSES (proc, tmp)
c3adc08c 1814 {
fa96cb38
PA
1815 pid_t leader_pid = pid_of (proc);
1816 struct lwp_info *leader_lp;
c3adc08c 1817
fa96cb38 1818 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
c3adc08c 1819
fa96cb38
PA
1820 if (debug_threads)
1821 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1822 "num_lwps=%d, zombie=%d\n",
1823 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1824 linux_proc_pid_is_zombie (leader_pid));
1825
94585166 1826 if (leader_lp != NULL && !leader_lp->stopped
fa96cb38
PA
1827 /* Check if there are other threads in the group, as we may
1828 have raced with the inferior simply exiting. */
1829 && !last_thread_of_process_p (leader_pid)
1830 && linux_proc_pid_is_zombie (leader_pid))
1831 {
1832 /* A leader zombie can mean one of two things:
1833
1834 - It exited, and there's an exit status pending
1835 available, or only the leader exited (not the whole
1836 program). In the latter case, we can't waitpid the
1837 leader's exit status until all other threads are gone.
1838
1839 - There are 3 or more threads in the group, and a thread
1840 other than the leader exec'd. On an exec, the Linux
1841 kernel destroys all other threads (except the execing
1842 one) in the thread group, and resets the execing thread's
1843 tid to the tgid. No exit notification is sent for the
1844 execing thread -- from the ptracer's perspective, it
1845 appears as though the execing thread just vanishes.
1846 Until we reap all other threads except the leader and the
1847 execing thread, the leader will be zombie, and the
1848 execing thread will be in `D (disc sleep)'. As soon as
1849 all other threads are reaped, the execing thread changes
1850 it's tid to the tgid, and the previous (zombie) leader
1851 vanishes, giving place to the "new" leader. We could try
1852 distinguishing the exit and exec cases, by waiting once
1853 more, and seeing if something comes out, but it doesn't
1854 sound useful. The previous leader _does_ go away, and
1855 we'll re-add the new one once we see the exec event
1856 (which is just the same as what would happen if the
1857 previous leader did exit voluntarily before some other
1858 thread execs). */
c3adc08c 1859
fa96cb38
PA
1860 if (debug_threads)
1861 fprintf (stderr,
1862 "CZL: Thread group leader %d zombie "
1863 "(it exited, or another thread execd).\n",
1864 leader_pid);
c3adc08c 1865
fa96cb38 1866 delete_lwp (leader_lp);
c3adc08c
PA
1867 }
1868 }
fa96cb38 1869}
c3adc08c 1870
fa96cb38
PA
1871/* Callback for `find_inferior'. Returns the first LWP that is not
1872 stopped. ARG is a PTID filter. */
d50171e4 1873
fa96cb38
PA
1874static int
1875not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1876{
1877 struct thread_info *thr = (struct thread_info *) entry;
1878 struct lwp_info *lwp;
1879 ptid_t filter = *(ptid_t *) arg;
47c0c975 1880
fa96cb38
PA
1881 if (!ptid_match (ptid_of (thr), filter))
1882 return 0;
bd99dc85 1883
fa96cb38
PA
1884 lwp = get_thread_lwp (thr);
1885 if (!lwp->stopped)
1886 return 1;
1887
1888 return 0;
0d62e5e8 1889}
611cb4a5 1890
863d01bd
PA
1891/* Increment LWP's suspend count. */
1892
1893static void
1894lwp_suspended_inc (struct lwp_info *lwp)
1895{
1896 lwp->suspended++;
1897
1898 if (debug_threads && lwp->suspended > 4)
1899 {
1900 struct thread_info *thread = get_lwp_thread (lwp);
1901
1902 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1903 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1904 }
1905}
1906
1907/* Decrement LWP's suspend count. */
1908
1909static void
1910lwp_suspended_decr (struct lwp_info *lwp)
1911{
1912 lwp->suspended--;
1913
1914 if (lwp->suspended < 0)
1915 {
1916 struct thread_info *thread = get_lwp_thread (lwp);
1917
1918 internal_error (__FILE__, __LINE__,
1919 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1920 lwp->suspended);
1921 }
1922}
1923
219f2f23
PA
1924/* This function should only be called if the LWP got a SIGTRAP.
1925
1926 Handle any tracepoint steps or hits. Return true if a tracepoint
1927 event was handled, 0 otherwise. */
1928
1929static int
1930handle_tracepoints (struct lwp_info *lwp)
1931{
1932 struct thread_info *tinfo = get_lwp_thread (lwp);
1933 int tpoint_related_event = 0;
1934
582511be
PA
1935 gdb_assert (lwp->suspended == 0);
1936
7984d532
PA
1937 /* If this tracepoint hit causes a tracing stop, we'll immediately
1938 uninsert tracepoints. To do this, we temporarily pause all
1939 threads, unpatch away, and then unpause threads. We need to make
1940 sure the unpausing doesn't resume LWP too. */
863d01bd 1941 lwp_suspended_inc (lwp);
7984d532 1942
219f2f23
PA
1943 /* And we need to be sure that any all-threads-stopping doesn't try
1944 to move threads out of the jump pads, as it could deadlock the
1945 inferior (LWP could be in the jump pad, maybe even holding the
1946 lock.) */
1947
1948 /* Do any necessary step collect actions. */
1949 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1950
fa593d66
PA
1951 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1952
219f2f23
PA
1953 /* See if we just hit a tracepoint and do its main collect
1954 actions. */
1955 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1956
863d01bd 1957 lwp_suspended_decr (lwp);
7984d532
PA
1958
1959 gdb_assert (lwp->suspended == 0);
fa593d66 1960 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1961
219f2f23
PA
1962 if (tpoint_related_event)
1963 {
1964 if (debug_threads)
87ce2a04 1965 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1966 return 1;
1967 }
1968
1969 return 0;
1970}
1971
fa593d66
PA
1972/* Convenience wrapper. Returns true if LWP is presently collecting a
1973 fast tracepoint. */
1974
1975static int
1976linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1977 struct fast_tpoint_collect_status *status)
1978{
1979 CORE_ADDR thread_area;
d86d4aaf 1980 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1981
1982 if (the_low_target.get_thread_area == NULL)
1983 return 0;
1984
1985 /* Get the thread area address. This is used to recognize which
1986 thread is which when tracing with the in-process agent library.
1987 We don't read anything from the address, and treat it as opaque;
1988 it's the address itself that we assume is unique per-thread. */
d86d4aaf 1989 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
fa593d66
PA
1990 return 0;
1991
1992 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1993}
1994
1995/* The reason we resume in the caller, is because we want to be able
1996 to pass lwp->status_pending as WSTAT, and we need to clear
1997 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1998 refuses to resume. */
1999
2000static int
2001maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2002{
0bfdf32f 2003 struct thread_info *saved_thread;
fa593d66 2004
0bfdf32f
GB
2005 saved_thread = current_thread;
2006 current_thread = get_lwp_thread (lwp);
fa593d66
PA
2007
2008 if ((wstat == NULL
2009 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2010 && supports_fast_tracepoints ()
58b4daa5 2011 && agent_loaded_p ())
fa593d66
PA
2012 {
2013 struct fast_tpoint_collect_status status;
2014 int r;
2015
2016 if (debug_threads)
87ce2a04
DE
2017 debug_printf ("Checking whether LWP %ld needs to move out of the "
2018 "jump pad.\n",
0bfdf32f 2019 lwpid_of (current_thread));
fa593d66
PA
2020
2021 r = linux_fast_tracepoint_collecting (lwp, &status);
2022
2023 if (wstat == NULL
2024 || (WSTOPSIG (*wstat) != SIGILL
2025 && WSTOPSIG (*wstat) != SIGFPE
2026 && WSTOPSIG (*wstat) != SIGSEGV
2027 && WSTOPSIG (*wstat) != SIGBUS))
2028 {
2029 lwp->collecting_fast_tracepoint = r;
2030
2031 if (r != 0)
2032 {
2033 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2034 {
2035 /* Haven't executed the original instruction yet.
2036 Set breakpoint there, and wait till it's hit,
2037 then single-step until exiting the jump pad. */
2038 lwp->exit_jump_pad_bkpt
2039 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2040 }
2041
2042 if (debug_threads)
87ce2a04
DE
2043 debug_printf ("Checking whether LWP %ld needs to move out of "
2044 "the jump pad...it does\n",
0bfdf32f
GB
2045 lwpid_of (current_thread));
2046 current_thread = saved_thread;
fa593d66
PA
2047
2048 return 1;
2049 }
2050 }
2051 else
2052 {
2053 /* If we get a synchronous signal while collecting, *and*
2054 while executing the (relocated) original instruction,
2055 reset the PC to point at the tpoint address, before
2056 reporting to GDB. Otherwise, it's an IPA lib bug: just
2057 report the signal to GDB, and pray for the best. */
2058
2059 lwp->collecting_fast_tracepoint = 0;
2060
2061 if (r != 0
2062 && (status.adjusted_insn_addr <= lwp->stop_pc
2063 && lwp->stop_pc < status.adjusted_insn_addr_end))
2064 {
2065 siginfo_t info;
2066 struct regcache *regcache;
2067
2068 /* The si_addr on a few signals references the address
2069 of the faulting instruction. Adjust that as
2070 well. */
2071 if ((WSTOPSIG (*wstat) == SIGILL
2072 || WSTOPSIG (*wstat) == SIGFPE
2073 || WSTOPSIG (*wstat) == SIGBUS
2074 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2075 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2076 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2077 /* Final check just to make sure we don't clobber
2078 the siginfo of non-kernel-sent signals. */
2079 && (uintptr_t) info.si_addr == lwp->stop_pc)
2080 {
2081 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2082 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2083 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2084 }
2085
0bfdf32f 2086 regcache = get_thread_regcache (current_thread, 1);
fa593d66
PA
2087 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2088 lwp->stop_pc = status.tpoint_addr;
2089
2090 /* Cancel any fast tracepoint lock this thread was
2091 holding. */
2092 force_unlock_trace_buffer ();
2093 }
2094
2095 if (lwp->exit_jump_pad_bkpt != NULL)
2096 {
2097 if (debug_threads)
87ce2a04
DE
2098 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2099 "stopping all threads momentarily.\n");
fa593d66
PA
2100
2101 stop_all_lwps (1, lwp);
fa593d66
PA
2102
2103 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2104 lwp->exit_jump_pad_bkpt = NULL;
2105
2106 unstop_all_lwps (1, lwp);
2107
2108 gdb_assert (lwp->suspended >= 0);
2109 }
2110 }
2111 }
2112
2113 if (debug_threads)
87ce2a04
DE
2114 debug_printf ("Checking whether LWP %ld needs to move out of the "
2115 "jump pad...no\n",
0bfdf32f 2116 lwpid_of (current_thread));
0cccb683 2117
0bfdf32f 2118 current_thread = saved_thread;
fa593d66
PA
2119 return 0;
2120}
2121
2122/* Enqueue one signal in the "signals to report later when out of the
2123 jump pad" list. */
2124
2125static void
2126enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2127{
2128 struct pending_signals *p_sig;
d86d4aaf 2129 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2130
2131 if (debug_threads)
87ce2a04 2132 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 2133 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2134
2135 if (debug_threads)
2136 {
2137 struct pending_signals *sig;
2138
2139 for (sig = lwp->pending_signals_to_report;
2140 sig != NULL;
2141 sig = sig->prev)
87ce2a04
DE
2142 debug_printf (" Already queued %d\n",
2143 sig->signal);
fa593d66 2144
87ce2a04 2145 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
2146 }
2147
1a981360
PA
2148 /* Don't enqueue non-RT signals if they are already in the deferred
2149 queue. (SIGSTOP being the easiest signal to see ending up here
2150 twice) */
2151 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2152 {
2153 struct pending_signals *sig;
2154
2155 for (sig = lwp->pending_signals_to_report;
2156 sig != NULL;
2157 sig = sig->prev)
2158 {
2159 if (sig->signal == WSTOPSIG (*wstat))
2160 {
2161 if (debug_threads)
87ce2a04
DE
2162 debug_printf ("Not requeuing already queued non-RT signal %d"
2163 " for LWP %ld\n",
2164 sig->signal,
d86d4aaf 2165 lwpid_of (thread));
1a981360
PA
2166 return;
2167 }
2168 }
2169 }
2170
8d749320 2171 p_sig = XCNEW (struct pending_signals);
fa593d66
PA
2172 p_sig->prev = lwp->pending_signals_to_report;
2173 p_sig->signal = WSTOPSIG (*wstat);
8d749320 2174
d86d4aaf 2175 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2176 &p_sig->info);
fa593d66
PA
2177
2178 lwp->pending_signals_to_report = p_sig;
2179}
2180
2181/* Dequeue one signal from the "signals to report later when out of
2182 the jump pad" list. */
2183
2184static int
2185dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2186{
d86d4aaf
DE
2187 struct thread_info *thread = get_lwp_thread (lwp);
2188
fa593d66
PA
2189 if (lwp->pending_signals_to_report != NULL)
2190 {
2191 struct pending_signals **p_sig;
2192
2193 p_sig = &lwp->pending_signals_to_report;
2194 while ((*p_sig)->prev != NULL)
2195 p_sig = &(*p_sig)->prev;
2196
2197 *wstat = W_STOPCODE ((*p_sig)->signal);
2198 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 2199 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2200 &(*p_sig)->info);
fa593d66
PA
2201 free (*p_sig);
2202 *p_sig = NULL;
2203
2204 if (debug_threads)
87ce2a04 2205 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 2206 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2207
2208 if (debug_threads)
2209 {
2210 struct pending_signals *sig;
2211
2212 for (sig = lwp->pending_signals_to_report;
2213 sig != NULL;
2214 sig = sig->prev)
87ce2a04
DE
2215 debug_printf (" Still queued %d\n",
2216 sig->signal);
fa593d66 2217
87ce2a04 2218 debug_printf (" (no more queued signals)\n");
fa593d66
PA
2219 }
2220
2221 return 1;
2222 }
2223
2224 return 0;
2225}
2226
582511be
PA
2227/* Fetch the possibly triggered data watchpoint info and store it in
2228 CHILD.
d50171e4 2229
582511be
PA
2230 On some archs, like x86, that use debug registers to set
2231 watchpoints, it's possible that the way to know which watched
2232 address trapped, is to check the register that is used to select
2233 which address to watch. Problem is, between setting the watchpoint
2234 and reading back which data address trapped, the user may change
2235 the set of watchpoints, and, as a consequence, GDB changes the
2236 debug registers in the inferior. To avoid reading back a stale
2237 stopped-data-address when that happens, we cache in LP the fact
2238 that a watchpoint trapped, and the corresponding data address, as
2239 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2240 registers meanwhile, we have the cached data we can rely on. */
d50171e4 2241
582511be
PA
2242static int
2243check_stopped_by_watchpoint (struct lwp_info *child)
2244{
2245 if (the_low_target.stopped_by_watchpoint != NULL)
d50171e4 2246 {
582511be 2247 struct thread_info *saved_thread;
d50171e4 2248
582511be
PA
2249 saved_thread = current_thread;
2250 current_thread = get_lwp_thread (child);
2251
2252 if (the_low_target.stopped_by_watchpoint ())
d50171e4 2253 {
15c66dd6 2254 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
582511be
PA
2255
2256 if (the_low_target.stopped_data_address != NULL)
2257 child->stopped_data_address
2258 = the_low_target.stopped_data_address ();
2259 else
2260 child->stopped_data_address = 0;
d50171e4
PA
2261 }
2262
0bfdf32f 2263 current_thread = saved_thread;
d50171e4
PA
2264 }
2265
15c66dd6 2266 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
c4d9ceb6
YQ
2267}
2268
de0d863e
DB
2269/* Return the ptrace options that we want to try to enable. */
2270
2271static int
2272linux_low_ptrace_options (int attached)
2273{
2274 int options = 0;
2275
2276 if (!attached)
2277 options |= PTRACE_O_EXITKILL;
2278
2279 if (report_fork_events)
2280 options |= PTRACE_O_TRACEFORK;
2281
c269dbdb
DB
2282 if (report_vfork_events)
2283 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2284
94585166
DB
2285 if (report_exec_events)
2286 options |= PTRACE_O_TRACEEXEC;
2287
82075af2
JS
2288 options |= PTRACE_O_TRACESYSGOOD;
2289
de0d863e
DB
2290 return options;
2291}
2292
fa96cb38
PA
2293/* Do low-level handling of the event, and check if we should go on
2294 and pass it to caller code. Return the affected lwp if we are, or
2295 NULL otherwise. */
2296
2297static struct lwp_info *
582511be 2298linux_low_filter_event (int lwpid, int wstat)
fa96cb38
PA
2299{
2300 struct lwp_info *child;
2301 struct thread_info *thread;
582511be 2302 int have_stop_pc = 0;
fa96cb38
PA
2303
2304 child = find_lwp_pid (pid_to_ptid (lwpid));
2305
94585166
DB
2306 /* Check for stop events reported by a process we didn't already
2307 know about - anything not already in our LWP list.
2308
2309 If we're expecting to receive stopped processes after
2310 fork, vfork, and clone events, then we'll just add the
2311 new one to our list and go back to waiting for the event
2312 to be reported - the stopped process might be returned
2313 from waitpid before or after the event is.
2314
2315 But note the case of a non-leader thread exec'ing after the
2316 leader having exited, and gone from our lists (because
2317 check_zombie_leaders deleted it). The non-leader thread
2318 changes its tid to the tgid. */
2319
2320 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2321 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2322 {
2323 ptid_t child_ptid;
2324
2325 /* A multi-thread exec after we had seen the leader exiting. */
2326 if (debug_threads)
2327 {
2328 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2329 "after exec.\n", lwpid);
2330 }
2331
2332 child_ptid = ptid_build (lwpid, lwpid, 0);
2333 child = add_lwp (child_ptid);
2334 child->stopped = 1;
2335 current_thread = child->thread;
2336 }
2337
fa96cb38
PA
2338 /* If we didn't find a process, one of two things presumably happened:
2339 - A process we started and then detached from has exited. Ignore it.
2340 - A process we are controlling has forked and the new child's stop
2341 was reported to us by the kernel. Save its PID. */
2342 if (child == NULL && WIFSTOPPED (wstat))
2343 {
2344 add_to_pid_list (&stopped_pids, lwpid, wstat);
2345 return NULL;
2346 }
2347 else if (child == NULL)
2348 return NULL;
2349
2350 thread = get_lwp_thread (child);
2351
2352 child->stopped = 1;
2353
2354 child->last_status = wstat;
2355
582511be
PA
2356 /* Check if the thread has exited. */
2357 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2358 {
2359 if (debug_threads)
2360 debug_printf ("LLFE: %d exited.\n", lwpid);
f50bf8e5
YQ
2361
2362 if (finish_step_over (child))
2363 {
2364 /* Unsuspend all other LWPs, and set them back running again. */
2365 unsuspend_all_lwps (child);
2366 }
2367
65706a29
PA
2368 /* If there is at least one more LWP, then the exit signal was
2369 not the end of the debugged application and should be
2370 ignored, unless GDB wants to hear about thread exits. */
2371 if (report_thread_events
2372 || last_thread_of_process_p (pid_of (thread)))
582511be 2373 {
65706a29
PA
2374 /* Since events are serialized to GDB core, and we can't
2375 report this one right now. Leave the status pending for
2376 the next time we're able to report it. */
2377 mark_lwp_dead (child, wstat);
2378 return child;
582511be
PA
2379 }
2380 else
2381 {
65706a29
PA
2382 delete_lwp (child);
2383 return NULL;
582511be
PA
2384 }
2385 }
2386
2387 gdb_assert (WIFSTOPPED (wstat));
2388
fa96cb38
PA
2389 if (WIFSTOPPED (wstat))
2390 {
2391 struct process_info *proc;
2392
c06cbd92 2393 /* Architecture-specific setup after inferior is running. */
fa96cb38 2394 proc = find_process_pid (pid_of (thread));
c06cbd92 2395 if (proc->tdesc == NULL)
fa96cb38 2396 {
c06cbd92
YQ
2397 if (proc->attached)
2398 {
c06cbd92
YQ
2399 /* This needs to happen after we have attached to the
2400 inferior and it is stopped for the first time, but
2401 before we access any inferior registers. */
94585166 2402 linux_arch_setup_thread (thread);
c06cbd92
YQ
2403 }
2404 else
2405 {
2406 /* The process is started, but GDBserver will do
2407 architecture-specific setup after the program stops at
2408 the first instruction. */
2409 child->status_pending_p = 1;
2410 child->status_pending = wstat;
2411 return child;
2412 }
fa96cb38
PA
2413 }
2414 }
2415
fa96cb38
PA
2416 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2417 {
beed38b8 2418 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2419 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2420
de0d863e 2421 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2422 child->must_set_ptrace_flags = 0;
2423 }
2424
82075af2
JS
2425 /* Always update syscall_state, even if it will be filtered later. */
2426 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2427 {
2428 child->syscall_state
2429 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2430 ? TARGET_WAITKIND_SYSCALL_RETURN
2431 : TARGET_WAITKIND_SYSCALL_ENTRY);
2432 }
2433 else
2434 {
2435 /* Almost all other ptrace-stops are known to be outside of system
2436 calls, with further exceptions in handle_extended_wait. */
2437 child->syscall_state = TARGET_WAITKIND_IGNORE;
2438 }
2439
e7ad2f14
PA
2440 /* Be careful to not overwrite stop_pc until save_stop_reason is
2441 called. */
fa96cb38 2442 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2443 && linux_is_extended_waitstatus (wstat))
fa96cb38 2444 {
582511be 2445 child->stop_pc = get_pc (child);
94585166 2446 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2447 {
2448 /* The event has been handled, so just return without
2449 reporting it. */
2450 return NULL;
2451 }
fa96cb38
PA
2452 }
2453
80aea927 2454 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2455 {
e7ad2f14 2456 if (save_stop_reason (child))
582511be
PA
2457 have_stop_pc = 1;
2458 }
2459
2460 if (!have_stop_pc)
2461 child->stop_pc = get_pc (child);
2462
fa96cb38
PA
2463 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2464 && child->stop_expected)
2465 {
2466 if (debug_threads)
2467 debug_printf ("Expected stop.\n");
2468 child->stop_expected = 0;
2469
2470 if (thread->last_resume_kind == resume_stop)
2471 {
2472 /* We want to report the stop to the core. Treat the
2473 SIGSTOP as a normal event. */
2bf6fb9d
PA
2474 if (debug_threads)
2475 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2476 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2477 }
2478 else if (stopping_threads != NOT_STOPPING_THREADS)
2479 {
2480 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2481 pending. */
2bf6fb9d
PA
2482 if (debug_threads)
2483 debug_printf ("LLW: SIGSTOP caught for %s "
2484 "while stopping threads.\n",
2485 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2486 return NULL;
2487 }
2488 else
2489 {
2bf6fb9d
PA
2490 /* This is a delayed SIGSTOP. Filter out the event. */
2491 if (debug_threads)
2492 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2493 child->stepping ? "step" : "continue",
2494 target_pid_to_str (ptid_of (thread)));
2495
fa96cb38
PA
2496 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2497 return NULL;
2498 }
2499 }
2500
582511be
PA
2501 child->status_pending_p = 1;
2502 child->status_pending = wstat;
fa96cb38
PA
2503 return child;
2504}
2505
f79b145d
YQ
2506/* Return true if THREAD is doing hardware single step. */
2507
2508static int
2509maybe_hw_step (struct thread_info *thread)
2510{
2511 if (can_hardware_single_step ())
2512 return 1;
2513 else
2514 {
2515 struct process_info *proc = get_thread_process (thread);
2516
2517 /* GDBserver must insert reinsert breakpoint for software
2518 single step. */
2519 gdb_assert (has_reinsert_breakpoints (proc));
2520 return 0;
2521 }
2522}
2523
20ba1ce6
PA
2524/* Resume LWPs that are currently stopped without any pending status
2525 to report, but are resumed from the core's perspective. */
2526
2527static void
2528resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2529{
2530 struct thread_info *thread = (struct thread_info *) entry;
2531 struct lwp_info *lp = get_thread_lwp (thread);
2532
2533 if (lp->stopped
863d01bd 2534 && !lp->suspended
20ba1ce6 2535 && !lp->status_pending_p
20ba1ce6
PA
2536 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2537 {
2538 int step = thread->last_resume_kind == resume_step;
2539
2540 if (debug_threads)
2541 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2542 target_pid_to_str (ptid_of (thread)),
2543 paddress (lp->stop_pc),
2544 step);
2545
2546 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2547 }
2548}
2549
fa96cb38
PA
2550/* Wait for an event from child(ren) WAIT_PTID, and return any that
2551 match FILTER_PTID (leaving others pending). The PTIDs can be:
2552 minus_one_ptid, to specify any child; a pid PTID, specifying all
2553 lwps of a thread group; or a PTID representing a single lwp. Store
2554 the stop status through the status pointer WSTAT. OPTIONS is
2555 passed to the waitpid call. Return 0 if no event was found and
2556 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2557 was found. Return the PID of the stopped child otherwise. */
bd99dc85 2558
0d62e5e8 2559static int
fa96cb38
PA
2560linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2561 int *wstatp, int options)
0d62e5e8 2562{
d86d4aaf 2563 struct thread_info *event_thread;
d50171e4 2564 struct lwp_info *event_child, *requested_child;
fa96cb38 2565 sigset_t block_mask, prev_mask;
d50171e4 2566
fa96cb38 2567 retry:
d86d4aaf
DE
2568 /* N.B. event_thread points to the thread_info struct that contains
2569 event_child. Keep them in sync. */
2570 event_thread = NULL;
d50171e4
PA
2571 event_child = NULL;
2572 requested_child = NULL;
0d62e5e8 2573
95954743 2574 /* Check for a lwp with a pending status. */
bd99dc85 2575
fa96cb38 2576 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
0d62e5e8 2577 {
d86d4aaf 2578 event_thread = (struct thread_info *)
fa96cb38 2579 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
d86d4aaf
DE
2580 if (event_thread != NULL)
2581 event_child = get_thread_lwp (event_thread);
2582 if (debug_threads && event_thread)
2583 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2584 }
fa96cb38 2585 else if (!ptid_equal (filter_ptid, null_ptid))
0d62e5e8 2586 {
fa96cb38 2587 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2588
bde24c0a 2589 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66
PA
2590 && requested_child->status_pending_p
2591 && requested_child->collecting_fast_tracepoint)
2592 {
2593 enqueue_one_deferred_signal (requested_child,
2594 &requested_child->status_pending);
2595 requested_child->status_pending_p = 0;
2596 requested_child->status_pending = 0;
2597 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2598 }
2599
2600 if (requested_child->suspended
2601 && requested_child->status_pending_p)
38e08fca
GB
2602 {
2603 internal_error (__FILE__, __LINE__,
2604 "requesting an event out of a"
2605 " suspended child?");
2606 }
fa593d66 2607
d50171e4 2608 if (requested_child->status_pending_p)
d86d4aaf
DE
2609 {
2610 event_child = requested_child;
2611 event_thread = get_lwp_thread (event_child);
2612 }
0d62e5e8 2613 }
611cb4a5 2614
0d62e5e8
DJ
2615 if (event_child != NULL)
2616 {
bd99dc85 2617 if (debug_threads)
87ce2a04 2618 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2619 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2620 *wstatp = event_child->status_pending;
bd99dc85
PA
2621 event_child->status_pending_p = 0;
2622 event_child->status_pending = 0;
0bfdf32f 2623 current_thread = event_thread;
d86d4aaf 2624 return lwpid_of (event_thread);
0d62e5e8
DJ
2625 }
2626
fa96cb38
PA
2627 /* But if we don't find a pending event, we'll have to wait.
2628
2629 We only enter this loop if no process has a pending wait status.
2630 Thus any action taken in response to a wait status inside this
2631 loop is responding as soon as we detect the status, not after any
2632 pending events. */
d8301ad1 2633
fa96cb38
PA
2634 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2635 all signals while here. */
2636 sigfillset (&block_mask);
2637 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2638
582511be
PA
2639 /* Always pull all events out of the kernel. We'll randomly select
2640 an event LWP out of all that have events, to prevent
2641 starvation. */
fa96cb38 2642 while (event_child == NULL)
0d62e5e8 2643 {
fa96cb38 2644 pid_t ret = 0;
0d62e5e8 2645
fa96cb38
PA
2646 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2647 quirks:
0d62e5e8 2648
fa96cb38
PA
2649 - If the thread group leader exits while other threads in the
2650 thread group still exist, waitpid(TGID, ...) hangs. That
2651 waitpid won't return an exit status until the other threads
2652 in the group are reaped.
611cb4a5 2653
fa96cb38
PA
2654 - When a non-leader thread execs, that thread just vanishes
2655 without reporting an exit (so we'd hang if we waited for it
2656 explicitly in that case). The exec event is reported to
94585166 2657 the TGID pid. */
fa96cb38
PA
2658 errno = 0;
2659 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2660
fa96cb38
PA
2661 if (debug_threads)
2662 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2663 ret, errno ? strerror (errno) : "ERRNO-OK");
0d62e5e8 2664
fa96cb38 2665 if (ret > 0)
0d62e5e8 2666 {
89be2091 2667 if (debug_threads)
bd99dc85 2668 {
fa96cb38
PA
2669 debug_printf ("LLW: waitpid %ld received %s\n",
2670 (long) ret, status_to_str (*wstatp));
bd99dc85 2671 }
89be2091 2672
582511be
PA
2673 /* Filter all events. IOW, leave all events pending. We'll
2674 randomly select an event LWP out of all that have events
2675 below. */
2676 linux_low_filter_event (ret, *wstatp);
fa96cb38
PA
2677 /* Retry until nothing comes out of waitpid. A single
2678 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2679 continue;
2680 }
2681
20ba1ce6
PA
2682 /* Now that we've pulled all events out of the kernel, resume
2683 LWPs that don't have an interesting event to report. */
2684 if (stopping_threads == NOT_STOPPING_THREADS)
2685 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2686
2687 /* ... and find an LWP with a status to report to the core, if
2688 any. */
582511be
PA
2689 event_thread = (struct thread_info *)
2690 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2691 if (event_thread != NULL)
2692 {
2693 event_child = get_thread_lwp (event_thread);
2694 *wstatp = event_child->status_pending;
2695 event_child->status_pending_p = 0;
2696 event_child->status_pending = 0;
2697 break;
2698 }
2699
fa96cb38
PA
2700 /* Check for zombie thread group leaders. Those can't be reaped
2701 until all other threads in the thread group are. */
2702 check_zombie_leaders ();
2703
2704 /* If there are no resumed children left in the set of LWPs we
2705 want to wait for, bail. We can't just block in
2706 waitpid/sigsuspend, because lwps might have been left stopped
2707 in trace-stop state, and we'd be stuck forever waiting for
2708 their status to change (which would only happen if we resumed
2709 them). Even if WNOHANG is set, this return code is preferred
2710 over 0 (below), as it is more detailed. */
2711 if ((find_inferior (&all_threads,
2712 not_stopped_callback,
2713 &wait_ptid) == NULL))
a6dbe5df 2714 {
fa96cb38
PA
2715 if (debug_threads)
2716 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2717 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2718 return -1;
a6dbe5df
PA
2719 }
2720
fa96cb38
PA
2721 /* No interesting event to report to the caller. */
2722 if ((options & WNOHANG))
24a09b5f 2723 {
fa96cb38
PA
2724 if (debug_threads)
2725 debug_printf ("WNOHANG set, no event found\n");
2726
2727 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2728 return 0;
24a09b5f
DJ
2729 }
2730
fa96cb38
PA
2731 /* Block until we get an event reported with SIGCHLD. */
2732 if (debug_threads)
2733 debug_printf ("sigsuspend'ing\n");
d50171e4 2734
fa96cb38
PA
2735 sigsuspend (&prev_mask);
2736 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2737 goto retry;
2738 }
d50171e4 2739
fa96cb38 2740 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2741
0bfdf32f 2742 current_thread = event_thread;
d50171e4 2743
fa96cb38
PA
2744 return lwpid_of (event_thread);
2745}
2746
2747/* Wait for an event from child(ren) PTID. PTIDs can be:
2748 minus_one_ptid, to specify any child; a pid PTID, specifying all
2749 lwps of a thread group; or a PTID representing a single lwp. Store
2750 the stop status through the status pointer WSTAT. OPTIONS is
2751 passed to the waitpid call. Return 0 if no event was found and
2752 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2753 was found. Return the PID of the stopped child otherwise. */
2754
2755static int
2756linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2757{
2758 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2759}
2760
6bf5e0ba
PA
2761/* Count the LWP's that have had events. */
2762
2763static int
2764count_events_callback (struct inferior_list_entry *entry, void *data)
2765{
d86d4aaf 2766 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2767 struct lwp_info *lp = get_thread_lwp (thread);
9a3c8263 2768 int *count = (int *) data;
6bf5e0ba
PA
2769
2770 gdb_assert (count != NULL);
2771
582511be 2772 /* Count only resumed LWPs that have an event pending. */
8336d594 2773 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2774 && lp->status_pending_p)
6bf5e0ba
PA
2775 (*count)++;
2776
2777 return 0;
2778}
2779
2780/* Select the LWP (if any) that is currently being single-stepped. */
2781
2782static int
2783select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2784{
d86d4aaf
DE
2785 struct thread_info *thread = (struct thread_info *) entry;
2786 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba 2787
8336d594
PA
2788 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2789 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
2790 && lp->status_pending_p)
2791 return 1;
2792 else
2793 return 0;
2794}
2795
b90fc188 2796/* Select the Nth LWP that has had an event. */
6bf5e0ba
PA
2797
2798static int
2799select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2800{
d86d4aaf 2801 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2802 struct lwp_info *lp = get_thread_lwp (thread);
9a3c8263 2803 int *selector = (int *) data;
6bf5e0ba
PA
2804
2805 gdb_assert (selector != NULL);
2806
582511be 2807 /* Select only resumed LWPs that have an event pending. */
91baf43f 2808 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2809 && lp->status_pending_p)
6bf5e0ba
PA
2810 if ((*selector)-- == 0)
2811 return 1;
2812
2813 return 0;
2814}
2815
6bf5e0ba
PA
2816/* Select one LWP out of those that have events pending. */
2817
2818static void
2819select_event_lwp (struct lwp_info **orig_lp)
2820{
2821 int num_events = 0;
2822 int random_selector;
582511be
PA
2823 struct thread_info *event_thread = NULL;
2824
2825 /* In all-stop, give preference to the LWP that is being
2826 single-stepped. There will be at most one, and it's the LWP that
2827 the core is most interested in. If we didn't do this, then we'd
2828 have to handle pending step SIGTRAPs somehow in case the core
2829 later continues the previously-stepped thread, otherwise we'd
2830 report the pending SIGTRAP, and the core, not having stepped the
2831 thread, wouldn't understand what the trap was for, and therefore
2832 would report it to the user as a random signal. */
2833 if (!non_stop)
6bf5e0ba 2834 {
582511be
PA
2835 event_thread
2836 = (struct thread_info *) find_inferior (&all_threads,
2837 select_singlestep_lwp_callback,
2838 NULL);
2839 if (event_thread != NULL)
2840 {
2841 if (debug_threads)
2842 debug_printf ("SEL: Select single-step %s\n",
2843 target_pid_to_str (ptid_of (event_thread)));
2844 }
6bf5e0ba 2845 }
582511be 2846 if (event_thread == NULL)
6bf5e0ba
PA
2847 {
2848 /* No single-stepping LWP. Select one at random, out of those
b90fc188 2849 which have had events. */
6bf5e0ba 2850
b90fc188 2851 /* First see how many events we have. */
d86d4aaf 2852 find_inferior (&all_threads, count_events_callback, &num_events);
8bf3b159 2853 gdb_assert (num_events > 0);
6bf5e0ba 2854
b90fc188
PA
2855 /* Now randomly pick a LWP out of those that have had
2856 events. */
6bf5e0ba
PA
2857 random_selector = (int)
2858 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2859
2860 if (debug_threads && num_events > 1)
87ce2a04
DE
2861 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2862 num_events, random_selector);
6bf5e0ba 2863
d86d4aaf
DE
2864 event_thread
2865 = (struct thread_info *) find_inferior (&all_threads,
2866 select_event_lwp_callback,
2867 &random_selector);
6bf5e0ba
PA
2868 }
2869
d86d4aaf 2870 if (event_thread != NULL)
6bf5e0ba 2871 {
d86d4aaf
DE
2872 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2873
6bf5e0ba
PA
2874 /* Switch the event LWP. */
2875 *orig_lp = event_lp;
2876 }
2877}
2878
7984d532
PA
2879/* Decrement the suspend count of an LWP. */
2880
2881static int
2882unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2883{
d86d4aaf
DE
2884 struct thread_info *thread = (struct thread_info *) entry;
2885 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
2886
2887 /* Ignore EXCEPT. */
2888 if (lwp == except)
2889 return 0;
2890
863d01bd 2891 lwp_suspended_decr (lwp);
7984d532
PA
2892 return 0;
2893}
2894
2895/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2896 NULL. */
2897
2898static void
2899unsuspend_all_lwps (struct lwp_info *except)
2900{
d86d4aaf 2901 find_inferior (&all_threads, unsuspend_one_lwp, except);
7984d532
PA
2902}
2903
fa593d66
PA
2904static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2905static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2906 void *data);
2907static int lwp_running (struct inferior_list_entry *entry, void *data);
2908static ptid_t linux_wait_1 (ptid_t ptid,
2909 struct target_waitstatus *ourstatus,
2910 int target_options);
2911
2912/* Stabilize threads (move out of jump pads).
2913
2914 If a thread is midway collecting a fast tracepoint, we need to
2915 finish the collection and move it out of the jump pad before
2916 reporting the signal.
2917
2918 This avoids recursion while collecting (when a signal arrives
2919 midway, and the signal handler itself collects), which would trash
2920 the trace buffer. In case the user set a breakpoint in a signal
2921 handler, this avoids the backtrace showing the jump pad, etc..
2922 Most importantly, there are certain things we can't do safely if
2923 threads are stopped in a jump pad (or in its callee's). For
2924 example:
2925
2926 - starting a new trace run. A thread still collecting the
2927 previous run, could trash the trace buffer when resumed. The trace
2928 buffer control structures would have been reset but the thread had
2929 no way to tell. The thread could even midway memcpy'ing to the
2930 buffer, which would mean that when resumed, it would clobber the
2931 trace buffer that had been set for a new run.
2932
2933 - we can't rewrite/reuse the jump pads for new tracepoints
2934 safely. Say you do tstart while a thread is stopped midway while
2935 collecting. When the thread is later resumed, it finishes the
2936 collection, and returns to the jump pad, to execute the original
2937 instruction that was under the tracepoint jump at the time the
2938 older run had been started. If the jump pad had been rewritten
2939 since for something else in the new run, the thread would now
2940 execute the wrong / random instructions. */
2941
2942static void
2943linux_stabilize_threads (void)
2944{
0bfdf32f 2945 struct thread_info *saved_thread;
d86d4aaf 2946 struct thread_info *thread_stuck;
fa593d66 2947
d86d4aaf
DE
2948 thread_stuck
2949 = (struct thread_info *) find_inferior (&all_threads,
2950 stuck_in_jump_pad_callback,
2951 NULL);
2952 if (thread_stuck != NULL)
fa593d66 2953 {
b4d51a55 2954 if (debug_threads)
87ce2a04 2955 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2956 lwpid_of (thread_stuck));
fa593d66
PA
2957 return;
2958 }
2959
0bfdf32f 2960 saved_thread = current_thread;
fa593d66
PA
2961
2962 stabilizing_threads = 1;
2963
2964 /* Kick 'em all. */
d86d4aaf 2965 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
fa593d66
PA
2966
2967 /* Loop until all are stopped out of the jump pads. */
d86d4aaf 2968 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
fa593d66
PA
2969 {
2970 struct target_waitstatus ourstatus;
2971 struct lwp_info *lwp;
fa593d66
PA
2972 int wstat;
2973
2974 /* Note that we go through the full wait even loop. While
2975 moving threads out of jump pad, we need to be able to step
2976 over internal breakpoints and such. */
32fcada3 2977 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2978
2979 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2980 {
0bfdf32f 2981 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2982
2983 /* Lock it. */
863d01bd 2984 lwp_suspended_inc (lwp);
fa593d66 2985
a493e3e2 2986 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2987 || current_thread->last_resume_kind == resume_stop)
fa593d66 2988 {
2ea28649 2989 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2990 enqueue_one_deferred_signal (lwp, &wstat);
2991 }
2992 }
2993 }
2994
fcdad592 2995 unsuspend_all_lwps (NULL);
fa593d66
PA
2996
2997 stabilizing_threads = 0;
2998
0bfdf32f 2999 current_thread = saved_thread;
fa593d66 3000
b4d51a55 3001 if (debug_threads)
fa593d66 3002 {
d86d4aaf
DE
3003 thread_stuck
3004 = (struct thread_info *) find_inferior (&all_threads,
3005 stuck_in_jump_pad_callback,
3006 NULL);
3007 if (thread_stuck != NULL)
87ce2a04 3008 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 3009 lwpid_of (thread_stuck));
fa593d66
PA
3010 }
3011}
3012
582511be
PA
3013/* Convenience function that is called when the kernel reports an
3014 event that is not passed out to GDB. */
3015
3016static ptid_t
3017ignore_event (struct target_waitstatus *ourstatus)
3018{
3019 /* If we got an event, there may still be others, as a single
3020 SIGCHLD can indicate more than one child stopped. This forces
3021 another target_wait call. */
3022 async_file_mark ();
3023
3024 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3025 return null_ptid;
3026}
3027
65706a29
PA
3028/* Convenience function that is called when the kernel reports an exit
3029 event. This decides whether to report the event to GDB as a
3030 process exit event, a thread exit event, or to suppress the
3031 event. */
3032
3033static ptid_t
3034filter_exit_event (struct lwp_info *event_child,
3035 struct target_waitstatus *ourstatus)
3036{
3037 struct thread_info *thread = get_lwp_thread (event_child);
3038 ptid_t ptid = ptid_of (thread);
3039
3040 if (!last_thread_of_process_p (pid_of (thread)))
3041 {
3042 if (report_thread_events)
3043 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3044 else
3045 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3046
3047 delete_lwp (event_child);
3048 }
3049 return ptid;
3050}
3051
82075af2
JS
3052/* Returns 1 if GDB is interested in any event_child syscalls. */
3053
3054static int
3055gdb_catching_syscalls_p (struct lwp_info *event_child)
3056{
3057 struct thread_info *thread = get_lwp_thread (event_child);
3058 struct process_info *proc = get_thread_process (thread);
3059
3060 return !VEC_empty (int, proc->syscalls_to_catch);
3061}
3062
3063/* Returns 1 if GDB is interested in the event_child syscall.
3064 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3065
3066static int
3067gdb_catch_this_syscall_p (struct lwp_info *event_child)
3068{
3069 int i, iter;
3070 int sysno, sysret;
3071 struct thread_info *thread = get_lwp_thread (event_child);
3072 struct process_info *proc = get_thread_process (thread);
3073
3074 if (VEC_empty (int, proc->syscalls_to_catch))
3075 return 0;
3076
3077 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3078 return 1;
3079
3080 get_syscall_trapinfo (event_child, &sysno, &sysret);
3081 for (i = 0;
3082 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3083 i++)
3084 if (iter == sysno)
3085 return 1;
3086
3087 return 0;
3088}
3089
0d62e5e8 3090/* Wait for process, returns status. */
da6d8c04 3091
95954743
PA
3092static ptid_t
3093linux_wait_1 (ptid_t ptid,
3094 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 3095{
e5f1222d 3096 int w;
fc7238bb 3097 struct lwp_info *event_child;
bd99dc85 3098 int options;
bd99dc85 3099 int pid;
6bf5e0ba
PA
3100 int step_over_finished;
3101 int bp_explains_trap;
3102 int maybe_internal_trap;
3103 int report_to_gdb;
219f2f23 3104 int trace_event;
c2d6af84 3105 int in_step_range;
f2faf941 3106 int any_resumed;
bd99dc85 3107
87ce2a04
DE
3108 if (debug_threads)
3109 {
3110 debug_enter ();
3111 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3112 }
3113
bd99dc85
PA
3114 /* Translate generic target options into linux options. */
3115 options = __WALL;
3116 if (target_options & TARGET_WNOHANG)
3117 options |= WNOHANG;
0d62e5e8 3118
fa593d66
PA
3119 bp_explains_trap = 0;
3120 trace_event = 0;
c2d6af84 3121 in_step_range = 0;
bd99dc85
PA
3122 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3123
f2faf941
PA
3124 /* Find a resumed LWP, if any. */
3125 if (find_inferior (&all_threads,
3126 status_pending_p_callback,
3127 &minus_one_ptid) != NULL)
3128 any_resumed = 1;
3129 else if ((find_inferior (&all_threads,
3130 not_stopped_callback,
3131 &minus_one_ptid) != NULL))
3132 any_resumed = 1;
3133 else
3134 any_resumed = 0;
3135
6bf5e0ba
PA
3136 if (ptid_equal (step_over_bkpt, null_ptid))
3137 pid = linux_wait_for_event (ptid, &w, options);
3138 else
3139 {
3140 if (debug_threads)
87ce2a04
DE
3141 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3142 target_pid_to_str (step_over_bkpt));
6bf5e0ba
PA
3143 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3144 }
3145
f2faf941 3146 if (pid == 0 || (pid == -1 && !any_resumed))
87ce2a04 3147 {
fa96cb38
PA
3148 gdb_assert (target_options & TARGET_WNOHANG);
3149
87ce2a04
DE
3150 if (debug_threads)
3151 {
fa96cb38
PA
3152 debug_printf ("linux_wait_1 ret = null_ptid, "
3153 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
3154 debug_exit ();
3155 }
fa96cb38
PA
3156
3157 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
3158 return null_ptid;
3159 }
fa96cb38
PA
3160 else if (pid == -1)
3161 {
3162 if (debug_threads)
3163 {
3164 debug_printf ("linux_wait_1 ret = null_ptid, "
3165 "TARGET_WAITKIND_NO_RESUMED\n");
3166 debug_exit ();
3167 }
bd99dc85 3168
fa96cb38
PA
3169 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3170 return null_ptid;
3171 }
0d62e5e8 3172
0bfdf32f 3173 event_child = get_thread_lwp (current_thread);
0d62e5e8 3174
fa96cb38
PA
3175 /* linux_wait_for_event only returns an exit status for the last
3176 child of a process. Report it. */
3177 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3178 {
fa96cb38 3179 if (WIFEXITED (w))
0d62e5e8 3180 {
fa96cb38
PA
3181 ourstatus->kind = TARGET_WAITKIND_EXITED;
3182 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 3183
fa96cb38 3184 if (debug_threads)
bd99dc85 3185 {
fa96cb38
PA
3186 debug_printf ("linux_wait_1 ret = %s, exited with "
3187 "retcode %d\n",
0bfdf32f 3188 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3189 WEXITSTATUS (w));
3190 debug_exit ();
bd99dc85 3191 }
fa96cb38
PA
3192 }
3193 else
3194 {
3195 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3196 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 3197
fa96cb38
PA
3198 if (debug_threads)
3199 {
3200 debug_printf ("linux_wait_1 ret = %s, terminated with "
3201 "signal %d\n",
0bfdf32f 3202 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3203 WTERMSIG (w));
3204 debug_exit ();
3205 }
0d62e5e8 3206 }
fa96cb38 3207
65706a29
PA
3208 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3209 return filter_exit_event (event_child, ourstatus);
3210
0bfdf32f 3211 return ptid_of (current_thread);
da6d8c04
DJ
3212 }
3213
2d97cd35
AT
3214 /* If step-over executes a breakpoint instruction, in the case of a
3215 hardware single step it means a gdb/gdbserver breakpoint had been
3216 planted on top of a permanent breakpoint, in the case of a software
3217 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3218 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3219 the breakpoint address.
3220 So in the case of the hardware single step advance the PC manually
3221 past the breakpoint and in the case of software single step advance only
3222 if it's not the reinsert_breakpoint we are hitting.
3223 This avoids that a program would keep trapping a permanent breakpoint
3224 forever. */
8090aef2 3225 if (!ptid_equal (step_over_bkpt, null_ptid)
2d97cd35
AT
3226 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3227 && (event_child->stepping
3228 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3229 {
dd373349
AT
3230 int increment_pc = 0;
3231 int breakpoint_kind = 0;
3232 CORE_ADDR stop_pc = event_child->stop_pc;
3233
769ef81f
AT
3234 breakpoint_kind =
3235 the_target->breakpoint_kind_from_current_state (&stop_pc);
dd373349 3236 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2
PA
3237
3238 if (debug_threads)
3239 {
3240 debug_printf ("step-over for %s executed software breakpoint\n",
3241 target_pid_to_str (ptid_of (current_thread)));
3242 }
3243
3244 if (increment_pc != 0)
3245 {
3246 struct regcache *regcache
3247 = get_thread_regcache (current_thread, 1);
3248
3249 event_child->stop_pc += increment_pc;
3250 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3251
3252 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
15c66dd6 3253 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3254 }
3255 }
3256
6bf5e0ba
PA
3257 /* If this event was not handled before, and is not a SIGTRAP, we
3258 report it. SIGILL and SIGSEGV are also treated as traps in case
3259 a breakpoint is inserted at the current PC. If this target does
3260 not support internal breakpoints at all, we also report the
3261 SIGTRAP without further processing; it's of no concern to us. */
3262 maybe_internal_trap
3263 = (supports_breakpoints ()
3264 && (WSTOPSIG (w) == SIGTRAP
3265 || ((WSTOPSIG (w) == SIGILL
3266 || WSTOPSIG (w) == SIGSEGV)
3267 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3268
3269 if (maybe_internal_trap)
3270 {
3271 /* Handle anything that requires bookkeeping before deciding to
3272 report the event or continue waiting. */
3273
3274 /* First check if we can explain the SIGTRAP with an internal
3275 breakpoint, or if we should possibly report the event to GDB.
3276 Do this before anything that may remove or insert a
3277 breakpoint. */
3278 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3279
3280 /* We have a SIGTRAP, possibly a step-over dance has just
3281 finished. If so, tweak the state machine accordingly,
3282 reinsert breakpoints and delete any reinsert (software
3283 single-step) breakpoints. */
3284 step_over_finished = finish_step_over (event_child);
3285
3286 /* Now invoke the callbacks of any internal breakpoints there. */
3287 check_breakpoints (event_child->stop_pc);
3288
219f2f23
PA
3289 /* Handle tracepoint data collecting. This may overflow the
3290 trace buffer, and cause a tracing stop, removing
3291 breakpoints. */
3292 trace_event = handle_tracepoints (event_child);
3293
6bf5e0ba
PA
3294 if (bp_explains_trap)
3295 {
6bf5e0ba 3296 if (debug_threads)
87ce2a04 3297 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba
PA
3298 }
3299 }
3300 else
3301 {
3302 /* We have some other signal, possibly a step-over dance was in
3303 progress, and it should be cancelled too. */
3304 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3305 }
3306
3307 /* We have all the data we need. Either report the event to GDB, or
3308 resume threads and keep waiting for more. */
3309
3310 /* If we're collecting a fast tracepoint, finish the collection and
3311 move out of the jump pad before delivering a signal. See
3312 linux_stabilize_threads. */
3313
3314 if (WIFSTOPPED (w)
3315 && WSTOPSIG (w) != SIGTRAP
3316 && supports_fast_tracepoints ()
58b4daa5 3317 && agent_loaded_p ())
fa593d66
PA
3318 {
3319 if (debug_threads)
87ce2a04
DE
3320 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3321 "to defer or adjust it.\n",
0bfdf32f 3322 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3323
3324 /* Allow debugging the jump pad itself. */
0bfdf32f 3325 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3326 && maybe_move_out_of_jump_pad (event_child, &w))
3327 {
3328 enqueue_one_deferred_signal (event_child, &w);
3329
3330 if (debug_threads)
87ce2a04 3331 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 3332 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3333
3334 linux_resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
3335
3336 return ignore_event (ourstatus);
fa593d66
PA
3337 }
3338 }
219f2f23 3339
fa593d66
PA
3340 if (event_child->collecting_fast_tracepoint)
3341 {
3342 if (debug_threads)
87ce2a04
DE
3343 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3344 "Check if we're already there.\n",
0bfdf32f 3345 lwpid_of (current_thread),
87ce2a04 3346 event_child->collecting_fast_tracepoint);
fa593d66
PA
3347
3348 trace_event = 1;
3349
3350 event_child->collecting_fast_tracepoint
3351 = linux_fast_tracepoint_collecting (event_child, NULL);
3352
3353 if (event_child->collecting_fast_tracepoint != 1)
3354 {
3355 /* No longer need this breakpoint. */
3356 if (event_child->exit_jump_pad_bkpt != NULL)
3357 {
3358 if (debug_threads)
87ce2a04
DE
3359 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3360 "stopping all threads momentarily.\n");
fa593d66
PA
3361
3362 /* Other running threads could hit this breakpoint.
3363 We don't handle moribund locations like GDB does,
3364 instead we always pause all threads when removing
3365 breakpoints, so that any step-over or
3366 decr_pc_after_break adjustment is always taken
3367 care of while the breakpoint is still
3368 inserted. */
3369 stop_all_lwps (1, event_child);
fa593d66
PA
3370
3371 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3372 event_child->exit_jump_pad_bkpt = NULL;
3373
3374 unstop_all_lwps (1, event_child);
3375
3376 gdb_assert (event_child->suspended >= 0);
3377 }
3378 }
3379
3380 if (event_child->collecting_fast_tracepoint == 0)
3381 {
3382 if (debug_threads)
87ce2a04
DE
3383 debug_printf ("fast tracepoint finished "
3384 "collecting successfully.\n");
fa593d66
PA
3385
3386 /* We may have a deferred signal to report. */
3387 if (dequeue_one_deferred_signal (event_child, &w))
3388 {
3389 if (debug_threads)
87ce2a04 3390 debug_printf ("dequeued one signal.\n");
fa593d66 3391 }
3c11dd79 3392 else
fa593d66 3393 {
3c11dd79 3394 if (debug_threads)
87ce2a04 3395 debug_printf ("no deferred signals.\n");
fa593d66
PA
3396
3397 if (stabilizing_threads)
3398 {
3399 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 3400 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
3401
3402 if (debug_threads)
3403 {
3404 debug_printf ("linux_wait_1 ret = %s, stopped "
3405 "while stabilizing threads\n",
0bfdf32f 3406 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
3407 debug_exit ();
3408 }
3409
0bfdf32f 3410 return ptid_of (current_thread);
fa593d66
PA
3411 }
3412 }
3413 }
6bf5e0ba
PA
3414 }
3415
e471f25b
PA
3416 /* Check whether GDB would be interested in this event. */
3417
82075af2
JS
3418 /* Check if GDB is interested in this syscall. */
3419 if (WIFSTOPPED (w)
3420 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3421 && !gdb_catch_this_syscall_p (event_child))
3422 {
3423 if (debug_threads)
3424 {
3425 debug_printf ("Ignored syscall for LWP %ld.\n",
3426 lwpid_of (current_thread));
3427 }
3428
3429 linux_resume_one_lwp (event_child, event_child->stepping,
3430 0, NULL);
3431 return ignore_event (ourstatus);
3432 }
3433
e471f25b
PA
3434 /* If GDB is not interested in this signal, don't stop other
3435 threads, and don't report it to GDB. Just resume the inferior
3436 right away. We do this for threading-related signals as well as
3437 any that GDB specifically requested we ignore. But never ignore
3438 SIGSTOP if we sent it ourselves, and do not ignore signals when
3439 stepping - they may require special handling to skip the signal
c9587f88
AT
3440 handler. Also never ignore signals that could be caused by a
3441 breakpoint. */
e471f25b 3442 if (WIFSTOPPED (w)
0bfdf32f 3443 && current_thread->last_resume_kind != resume_step
e471f25b 3444 && (
1a981360 3445#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3446 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3447 && (WSTOPSIG (w) == __SIGRTMIN
3448 || WSTOPSIG (w) == __SIGRTMIN + 1))
3449 ||
3450#endif
2ea28649 3451 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3452 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3453 && current_thread->last_resume_kind == resume_stop)
3454 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3455 {
3456 siginfo_t info, *info_p;
3457
3458 if (debug_threads)
87ce2a04 3459 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 3460 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3461
0bfdf32f 3462 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3463 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3464 info_p = &info;
3465 else
3466 info_p = NULL;
863d01bd
PA
3467
3468 if (step_over_finished)
3469 {
3470 /* We cancelled this thread's step-over above. We still
3471 need to unsuspend all other LWPs, and set them back
3472 running again while the signal handler runs. */
3473 unsuspend_all_lwps (event_child);
3474
3475 /* Enqueue the pending signal info so that proceed_all_lwps
3476 doesn't lose it. */
3477 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3478
3479 proceed_all_lwps ();
3480 }
3481 else
3482 {
3483 linux_resume_one_lwp (event_child, event_child->stepping,
3484 WSTOPSIG (w), info_p);
3485 }
582511be 3486 return ignore_event (ourstatus);
e471f25b
PA
3487 }
3488
c2d6af84
PA
3489 /* Note that all addresses are always "out of the step range" when
3490 there's no range to begin with. */
3491 in_step_range = lwp_in_step_range (event_child);
3492
3493 /* If GDB wanted this thread to single step, and the thread is out
3494 of the step range, we always want to report the SIGTRAP, and let
3495 GDB handle it. Watchpoints should always be reported. So should
3496 signals we can't explain. A SIGTRAP we can't explain could be a
3497 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3498 do, we're be able to handle GDB breakpoints on top of internal
3499 breakpoints, by handling the internal breakpoint and still
3500 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3501 won't see the breakpoint hit. If we see a single-step event but
3502 the thread should be continuing, don't pass the trap to gdb.
3503 That indicates that we had previously finished a single-step but
3504 left the single-step pending -- see
3505 complete_ongoing_step_over. */
6bf5e0ba 3506 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3507 || (current_thread->last_resume_kind == resume_step
c2d6af84 3508 && !in_step_range)
15c66dd6 3509 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3510 || (!in_step_range
3511 && !bp_explains_trap
3512 && !trace_event
3513 && !step_over_finished
3514 && !(current_thread->last_resume_kind == resume_continue
3515 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3516 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3517 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3518 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
00db26fa 3519 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3520
3521 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3522
3523 /* We found no reason GDB would want us to stop. We either hit one
3524 of our own breakpoints, or finished an internal step GDB
3525 shouldn't know about. */
3526 if (!report_to_gdb)
3527 {
3528 if (debug_threads)
3529 {
3530 if (bp_explains_trap)
87ce2a04 3531 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3532 if (step_over_finished)
87ce2a04 3533 debug_printf ("Step-over finished.\n");
219f2f23 3534 if (trace_event)
87ce2a04 3535 debug_printf ("Tracepoint event.\n");
c2d6af84 3536 if (lwp_in_step_range (event_child))
87ce2a04
DE
3537 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3538 paddress (event_child->stop_pc),
3539 paddress (event_child->step_range_start),
3540 paddress (event_child->step_range_end));
6bf5e0ba
PA
3541 }
3542
3543 /* We're not reporting this breakpoint to GDB, so apply the
3544 decr_pc_after_break adjustment to the inferior's regcache
3545 ourselves. */
3546
3547 if (the_low_target.set_pc != NULL)
3548 {
3549 struct regcache *regcache
0bfdf32f 3550 = get_thread_regcache (current_thread, 1);
6bf5e0ba
PA
3551 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3552 }
3553
7984d532
PA
3554 /* We may have finished stepping over a breakpoint. If so,
3555 we've stopped and suspended all LWPs momentarily except the
3556 stepping one. This is where we resume them all again. We're
3557 going to keep waiting, so use proceed, which handles stepping
3558 over the next breakpoint. */
6bf5e0ba 3559 if (debug_threads)
87ce2a04 3560 debug_printf ("proceeding all threads.\n");
7984d532
PA
3561
3562 if (step_over_finished)
3563 unsuspend_all_lwps (event_child);
3564
6bf5e0ba 3565 proceed_all_lwps ();
582511be 3566 return ignore_event (ourstatus);
6bf5e0ba
PA
3567 }
3568
3569 if (debug_threads)
3570 {
00db26fa 3571 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
ad071a30
PA
3572 {
3573 char *str;
3574
3575 str = target_waitstatus_to_string (&event_child->waitstatus);
3576 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3577 lwpid_of (get_lwp_thread (event_child)), str);
3578 xfree (str);
3579 }
0bfdf32f 3580 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3581 {
3582 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3583 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3584 else if (!lwp_in_step_range (event_child))
87ce2a04 3585 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3586 }
15c66dd6 3587 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3588 debug_printf ("Stopped by watchpoint.\n");
582511be 3589 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3590 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3591 if (debug_threads)
87ce2a04 3592 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3593 }
3594
3595 /* Alright, we're going to report a stop. */
3596
582511be 3597 if (!stabilizing_threads)
6bf5e0ba
PA
3598 {
3599 /* In all-stop, stop all threads. */
582511be
PA
3600 if (!non_stop)
3601 stop_all_lwps (0, NULL);
6bf5e0ba
PA
3602
3603 /* If we're not waiting for a specific LWP, choose an event LWP
3604 from among those that have had events. Giving equal priority
3605 to all LWPs that have had events helps prevent
3606 starvation. */
3607 if (ptid_equal (ptid, minus_one_ptid))
3608 {
3609 event_child->status_pending_p = 1;
3610 event_child->status_pending = w;
3611
3612 select_event_lwp (&event_child);
3613
0bfdf32f
GB
3614 /* current_thread and event_child must stay in sync. */
3615 current_thread = get_lwp_thread (event_child);
ee1e2d4f 3616
6bf5e0ba
PA
3617 event_child->status_pending_p = 0;
3618 w = event_child->status_pending;
3619 }
3620
c03e6ccc 3621 if (step_over_finished)
582511be
PA
3622 {
3623 if (!non_stop)
3624 {
3625 /* If we were doing a step-over, all other threads but
3626 the stepping one had been paused in start_step_over,
3627 with their suspend counts incremented. We don't want
3628 to do a full unstop/unpause, because we're in
3629 all-stop mode (so we want threads stopped), but we
3630 still need to unsuspend the other threads, to
3631 decrement their `suspended' count back. */
3632 unsuspend_all_lwps (event_child);
3633 }
3634 else
3635 {
3636 /* If we just finished a step-over, then all threads had
3637 been momentarily paused. In all-stop, that's fine,
3638 we want threads stopped by now anyway. In non-stop,
3639 we need to re-resume threads that GDB wanted to be
3640 running. */
3641 unstop_all_lwps (1, event_child);
3642 }
3643 }
c03e6ccc 3644
fa593d66 3645 /* Stabilize threads (move out of jump pads). */
582511be
PA
3646 if (!non_stop)
3647 stabilize_threads ();
6bf5e0ba
PA
3648 }
3649 else
3650 {
3651 /* If we just finished a step-over, then all threads had been
3652 momentarily paused. In all-stop, that's fine, we want
3653 threads stopped by now anyway. In non-stop, we need to
3654 re-resume threads that GDB wanted to be running. */
3655 if (step_over_finished)
7984d532 3656 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3657 }
3658
00db26fa 3659 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
de0d863e 3660 {
00db26fa
PA
3661 /* If the reported event is an exit, fork, vfork or exec, let
3662 GDB know. */
3663 *ourstatus = event_child->waitstatus;
de0d863e
DB
3664 /* Clear the event lwp's waitstatus since we handled it already. */
3665 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3666 }
3667 else
3668 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3669
582511be 3670 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3671 it was a software breakpoint, and the client doesn't know we can
3672 adjust the breakpoint ourselves. */
3673 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3674 && !swbreak_feature)
582511be
PA
3675 {
3676 int decr_pc = the_low_target.decr_pc_after_break;
3677
3678 if (decr_pc != 0)
3679 {
3680 struct regcache *regcache
3681 = get_thread_regcache (current_thread, 1);
3682 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3683 }
3684 }
3685
82075af2
JS
3686 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3687 {
3688 int sysret;
3689
3690 get_syscall_trapinfo (event_child,
3691 &ourstatus->value.syscall_number, &sysret);
3692 ourstatus->kind = event_child->syscall_state;
3693 }
3694 else if (current_thread->last_resume_kind == resume_stop
3695 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3696 {
3697 /* A thread that has been requested to stop by GDB with vCont;t,
3698 and it stopped cleanly, so report as SIG0. The use of
3699 SIGSTOP is an implementation detail. */
a493e3e2 3700 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3701 }
0bfdf32f 3702 else if (current_thread->last_resume_kind == resume_stop
8336d594 3703 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3704 {
3705 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3706 but, it stopped for other reasons. */
2ea28649 3707 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85 3708 }
de0d863e 3709 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
bd99dc85 3710 {
2ea28649 3711 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3712 }
3713
d50171e4
PA
3714 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3715
bd99dc85 3716 if (debug_threads)
87ce2a04
DE
3717 {
3718 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
0bfdf32f 3719 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3720 ourstatus->kind, ourstatus->value.sig);
3721 debug_exit ();
3722 }
bd99dc85 3723
65706a29
PA
3724 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3725 return filter_exit_event (event_child, ourstatus);
3726
0bfdf32f 3727 return ptid_of (current_thread);
bd99dc85
PA
3728}
3729
3730/* Get rid of any pending event in the pipe. */
3731static void
3732async_file_flush (void)
3733{
3734 int ret;
3735 char buf;
3736
3737 do
3738 ret = read (linux_event_pipe[0], &buf, 1);
3739 while (ret >= 0 || (ret == -1 && errno == EINTR));
3740}
3741
3742/* Put something in the pipe, so the event loop wakes up. */
3743static void
3744async_file_mark (void)
3745{
3746 int ret;
3747
3748 async_file_flush ();
3749
3750 do
3751 ret = write (linux_event_pipe[1], "+", 1);
3752 while (ret == 0 || (ret == -1 && errno == EINTR));
3753
3754 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3755 be awakened anyway. */
3756}
3757
95954743
PA
3758static ptid_t
3759linux_wait (ptid_t ptid,
3760 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 3761{
95954743 3762 ptid_t event_ptid;
bd99dc85 3763
bd99dc85
PA
3764 /* Flush the async file first. */
3765 if (target_is_async_p ())
3766 async_file_flush ();
3767
582511be
PA
3768 do
3769 {
3770 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3771 }
3772 while ((target_options & TARGET_WNOHANG) == 0
3773 && ptid_equal (event_ptid, null_ptid)
3774 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3775
3776 /* If at least one stop was reported, there may be more. A single
3777 SIGCHLD can signal more than one child stop. */
3778 if (target_is_async_p ()
3779 && (target_options & TARGET_WNOHANG) != 0
95954743 3780 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
3781 async_file_mark ();
3782
3783 return event_ptid;
da6d8c04
DJ
3784}
3785
c5f62d5f 3786/* Send a signal to an LWP. */
fd500816
DJ
3787
3788static int
a1928bad 3789kill_lwp (unsigned long lwpid, int signo)
fd500816 3790{
4a6ed09b 3791 int ret;
fd500816 3792
4a6ed09b
PA
3793 errno = 0;
3794 ret = syscall (__NR_tkill, lwpid, signo);
3795 if (errno == ENOSYS)
3796 {
3797 /* If tkill fails, then we are not using nptl threads, a
3798 configuration we no longer support. */
3799 perror_with_name (("tkill"));
3800 }
3801 return ret;
fd500816
DJ
3802}
3803
964e4306
PA
3804void
3805linux_stop_lwp (struct lwp_info *lwp)
3806{
3807 send_sigstop (lwp);
3808}
3809
0d62e5e8 3810static void
02fc4de7 3811send_sigstop (struct lwp_info *lwp)
0d62e5e8 3812{
bd99dc85 3813 int pid;
0d62e5e8 3814
d86d4aaf 3815 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3816
0d62e5e8
DJ
3817 /* If we already have a pending stop signal for this process, don't
3818 send another. */
54a0b537 3819 if (lwp->stop_expected)
0d62e5e8 3820 {
ae13219e 3821 if (debug_threads)
87ce2a04 3822 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3823
0d62e5e8
DJ
3824 return;
3825 }
3826
3827 if (debug_threads)
87ce2a04 3828 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3829
d50171e4 3830 lwp->stop_expected = 1;
bd99dc85 3831 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3832}
3833
7984d532
PA
3834static int
3835send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7 3836{
d86d4aaf
DE
3837 struct thread_info *thread = (struct thread_info *) entry;
3838 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3839
7984d532
PA
3840 /* Ignore EXCEPT. */
3841 if (lwp == except)
3842 return 0;
3843
02fc4de7 3844 if (lwp->stopped)
7984d532 3845 return 0;
02fc4de7
PA
3846
3847 send_sigstop (lwp);
7984d532
PA
3848 return 0;
3849}
3850
3851/* Increment the suspend count of an LWP, and stop it, if not stopped
3852 yet. */
3853static int
3854suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3855 void *except)
3856{
d86d4aaf
DE
3857 struct thread_info *thread = (struct thread_info *) entry;
3858 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3859
3860 /* Ignore EXCEPT. */
3861 if (lwp == except)
3862 return 0;
3863
863d01bd 3864 lwp_suspended_inc (lwp);
7984d532
PA
3865
3866 return send_sigstop_callback (entry, except);
02fc4de7
PA
3867}
3868
95954743
PA
3869static void
3870mark_lwp_dead (struct lwp_info *lwp, int wstat)
3871{
95954743
PA
3872 /* Store the exit status for later. */
3873 lwp->status_pending_p = 1;
3874 lwp->status_pending = wstat;
3875
00db26fa
PA
3876 /* Store in waitstatus as well, as there's nothing else to process
3877 for this event. */
3878 if (WIFEXITED (wstat))
3879 {
3880 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3881 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3882 }
3883 else if (WIFSIGNALED (wstat))
3884 {
3885 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3886 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3887 }
3888
95954743
PA
3889 /* Prevent trying to stop it. */
3890 lwp->stopped = 1;
3891
3892 /* No further stops are expected from a dead lwp. */
3893 lwp->stop_expected = 0;
3894}
3895
00db26fa
PA
3896/* Return true if LWP has exited already, and has a pending exit event
3897 to report to GDB. */
3898
3899static int
3900lwp_is_marked_dead (struct lwp_info *lwp)
3901{
3902 return (lwp->status_pending_p
3903 && (WIFEXITED (lwp->status_pending)
3904 || WIFSIGNALED (lwp->status_pending)));
3905}
3906
fa96cb38
PA
3907/* Wait for all children to stop for the SIGSTOPs we just queued. */
3908
0d62e5e8 3909static void
fa96cb38 3910wait_for_sigstop (void)
0d62e5e8 3911{
0bfdf32f 3912 struct thread_info *saved_thread;
95954743 3913 ptid_t saved_tid;
fa96cb38
PA
3914 int wstat;
3915 int ret;
0d62e5e8 3916
0bfdf32f
GB
3917 saved_thread = current_thread;
3918 if (saved_thread != NULL)
3919 saved_tid = saved_thread->entry.id;
bd99dc85 3920 else
95954743 3921 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3922
d50171e4 3923 if (debug_threads)
fa96cb38 3924 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3925
fa96cb38
PA
3926 /* Passing NULL_PTID as filter indicates we want all events to be
3927 left pending. Eventually this returns when there are no
3928 unwaited-for children left. */
3929 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3930 &wstat, __WALL);
3931 gdb_assert (ret == -1);
0d62e5e8 3932
0bfdf32f
GB
3933 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3934 current_thread = saved_thread;
0d62e5e8
DJ
3935 else
3936 {
3937 if (debug_threads)
87ce2a04 3938 debug_printf ("Previously current thread died.\n");
0d62e5e8 3939
f0db101d
PA
3940 /* We can't change the current inferior behind GDB's back,
3941 otherwise, a subsequent command may apply to the wrong
3942 process. */
3943 current_thread = NULL;
0d62e5e8
DJ
3944 }
3945}
3946
fa593d66
PA
3947/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3948 move it out, because we need to report the stop event to GDB. For
3949 example, if the user puts a breakpoint in the jump pad, it's
3950 because she wants to debug it. */
3951
3952static int
3953stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3954{
d86d4aaf
DE
3955 struct thread_info *thread = (struct thread_info *) entry;
3956 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3957
863d01bd
PA
3958 if (lwp->suspended != 0)
3959 {
3960 internal_error (__FILE__, __LINE__,
3961 "LWP %ld is suspended, suspended=%d\n",
3962 lwpid_of (thread), lwp->suspended);
3963 }
fa593d66
PA
3964 gdb_assert (lwp->stopped);
3965
3966 /* Allow debugging the jump pad, gdb_collect, etc.. */
3967 return (supports_fast_tracepoints ()
58b4daa5 3968 && agent_loaded_p ()
fa593d66 3969 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3970 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3971 || thread->last_resume_kind == resume_step)
3972 && linux_fast_tracepoint_collecting (lwp, NULL));
3973}
3974
3975static void
3976move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3977{
d86d4aaf 3978 struct thread_info *thread = (struct thread_info *) entry;
f0ce0d3a 3979 struct thread_info *saved_thread;
d86d4aaf 3980 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3981 int *wstat;
3982
863d01bd
PA
3983 if (lwp->suspended != 0)
3984 {
3985 internal_error (__FILE__, __LINE__,
3986 "LWP %ld is suspended, suspended=%d\n",
3987 lwpid_of (thread), lwp->suspended);
3988 }
fa593d66
PA
3989 gdb_assert (lwp->stopped);
3990
f0ce0d3a
PA
3991 /* For gdb_breakpoint_here. */
3992 saved_thread = current_thread;
3993 current_thread = thread;
3994
fa593d66
PA
3995 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3996
3997 /* Allow debugging the jump pad, gdb_collect, etc. */
3998 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3999 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
4000 && thread->last_resume_kind != resume_step
4001 && maybe_move_out_of_jump_pad (lwp, wstat))
4002 {
4003 if (debug_threads)
87ce2a04 4004 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 4005 lwpid_of (thread));
fa593d66
PA
4006
4007 if (wstat)
4008 {
4009 lwp->status_pending_p = 0;
4010 enqueue_one_deferred_signal (lwp, wstat);
4011
4012 if (debug_threads)
87ce2a04
DE
4013 debug_printf ("Signal %d for LWP %ld deferred "
4014 "(in jump pad)\n",
d86d4aaf 4015 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
4016 }
4017
4018 linux_resume_one_lwp (lwp, 0, 0, NULL);
4019 }
4020 else
863d01bd 4021 lwp_suspended_inc (lwp);
f0ce0d3a
PA
4022
4023 current_thread = saved_thread;
fa593d66
PA
4024}
4025
4026static int
4027lwp_running (struct inferior_list_entry *entry, void *data)
4028{
d86d4aaf
DE
4029 struct thread_info *thread = (struct thread_info *) entry;
4030 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 4031
00db26fa 4032 if (lwp_is_marked_dead (lwp))
fa593d66
PA
4033 return 0;
4034 if (lwp->stopped)
4035 return 0;
4036 return 1;
4037}
4038
7984d532
PA
4039/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4040 If SUSPEND, then also increase the suspend count of every LWP,
4041 except EXCEPT. */
4042
0d62e5e8 4043static void
7984d532 4044stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8 4045{
bde24c0a
PA
4046 /* Should not be called recursively. */
4047 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4048
87ce2a04
DE
4049 if (debug_threads)
4050 {
4051 debug_enter ();
4052 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4053 suspend ? "stop-and-suspend" : "stop",
4054 except != NULL
d86d4aaf 4055 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
4056 : "none");
4057 }
4058
bde24c0a
PA
4059 stopping_threads = (suspend
4060 ? STOPPING_AND_SUSPENDING_THREADS
4061 : STOPPING_THREADS);
7984d532
PA
4062
4063 if (suspend)
d86d4aaf 4064 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
7984d532 4065 else
d86d4aaf 4066 find_inferior (&all_threads, send_sigstop_callback, except);
fa96cb38 4067 wait_for_sigstop ();
bde24c0a 4068 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
4069
4070 if (debug_threads)
4071 {
4072 debug_printf ("stop_all_lwps done, setting stopping_threads "
4073 "back to !stopping\n");
4074 debug_exit ();
4075 }
0d62e5e8
DJ
4076}
4077
863d01bd
PA
4078/* Enqueue one signal in the chain of signals which need to be
4079 delivered to this process on next resume. */
4080
4081static void
4082enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4083{
8d749320 4084 struct pending_signals *p_sig = XNEW (struct pending_signals);
863d01bd 4085
863d01bd
PA
4086 p_sig->prev = lwp->pending_signals;
4087 p_sig->signal = signal;
4088 if (info == NULL)
4089 memset (&p_sig->info, 0, sizeof (siginfo_t));
4090 else
4091 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4092 lwp->pending_signals = p_sig;
4093}
4094
fa5308bd
AT
4095/* Install breakpoints for software single stepping. */
4096
4097static void
4098install_software_single_step_breakpoints (struct lwp_info *lwp)
4099{
4100 int i;
4101 CORE_ADDR pc;
4102 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4103 VEC (CORE_ADDR) *next_pcs = NULL;
4104 struct cleanup *old_chain = make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
4105
4d18591b 4106 next_pcs = (*the_low_target.get_next_pcs) (regcache);
fa5308bd
AT
4107
4108 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
4109 set_reinsert_breakpoint (pc);
4110
4111 do_cleanups (old_chain);
4112}
4113
7fe5e27e
AT
4114/* Single step via hardware or software single step.
4115 Return 1 if hardware single stepping, 0 if software single stepping
4116 or can't single step. */
4117
4118static int
4119single_step (struct lwp_info* lwp)
4120{
4121 int step = 0;
4122
4123 if (can_hardware_single_step ())
4124 {
4125 step = 1;
4126 }
4127 else if (can_software_single_step ())
4128 {
4129 install_software_single_step_breakpoints (lwp);
4130 step = 0;
4131 }
4132 else
4133 {
4134 if (debug_threads)
4135 debug_printf ("stepping is not implemented on this target");
4136 }
4137
4138 return step;
4139}
4140
35ac8b3e 4141/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
4142 finish a fast tracepoint collect. Since signal can be delivered in
4143 the step-over, the program may go to signal handler and trap again
4144 after return from the signal handler. We can live with the spurious
4145 double traps. */
35ac8b3e
YQ
4146
4147static int
4148lwp_signal_can_be_delivered (struct lwp_info *lwp)
4149{
484b3c32 4150 return !lwp->collecting_fast_tracepoint;
35ac8b3e
YQ
4151}
4152
23f238d3
PA
4153/* Resume execution of LWP. If STEP is nonzero, single-step it. If
4154 SIGNAL is nonzero, give it that signal. */
da6d8c04 4155
ce3a066d 4156static void
23f238d3
PA
4157linux_resume_one_lwp_throw (struct lwp_info *lwp,
4158 int step, int signal, siginfo_t *info)
da6d8c04 4159{
d86d4aaf 4160 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4161 struct thread_info *saved_thread;
fa593d66 4162 int fast_tp_collecting;
82075af2 4163 int ptrace_request;
c06cbd92
YQ
4164 struct process_info *proc = get_thread_process (thread);
4165
4166 /* Note that target description may not be initialised
4167 (proc->tdesc == NULL) at this point because the program hasn't
4168 stopped at the first instruction yet. It means GDBserver skips
4169 the extra traps from the wrapper program (see option --wrapper).
4170 Code in this function that requires register access should be
4171 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 4172
54a0b537 4173 if (lwp->stopped == 0)
0d62e5e8
DJ
4174 return;
4175
65706a29
PA
4176 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4177
fa593d66
PA
4178 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4179
4180 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4181
219f2f23
PA
4182 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4183 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4184 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4185 {
4186 /* Collecting 'while-stepping' actions doesn't make sense
4187 anymore. */
d86d4aaf 4188 release_while_stepping_state_list (thread);
219f2f23
PA
4189 }
4190
0d62e5e8 4191 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4192 signal. Also enqueue the signal if it can't be delivered to the
4193 inferior right now. */
0d62e5e8 4194 if (signal != 0
fa593d66
PA
4195 && (lwp->status_pending_p
4196 || lwp->pending_signals != NULL
35ac8b3e 4197 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4198 {
4199 enqueue_pending_signal (lwp, signal, info);
4200
4201 /* Postpone any pending signal. It was enqueued above. */
4202 signal = 0;
4203 }
0d62e5e8 4204
d50171e4
PA
4205 if (lwp->status_pending_p)
4206 {
4207 if (debug_threads)
94610ec4 4208 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
87ce2a04 4209 " has pending status\n",
94610ec4 4210 lwpid_of (thread), step ? "step" : "continue",
87ce2a04 4211 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4212 return;
4213 }
0d62e5e8 4214
0bfdf32f
GB
4215 saved_thread = current_thread;
4216 current_thread = thread;
0d62e5e8 4217
0d62e5e8
DJ
4218 /* This bit needs some thinking about. If we get a signal that
4219 we must report while a single-step reinsert is still pending,
4220 we often end up resuming the thread. It might be better to
4221 (ew) allow a stack of pending events; then we could be sure that
4222 the reinsert happened right away and not lose any signals.
4223
4224 Making this stack would also shrink the window in which breakpoints are
54a0b537 4225 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4226 complete correctness, so it won't solve that problem. It may be
4227 worthwhile just to solve this one, however. */
54a0b537 4228 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
4229 {
4230 if (debug_threads)
87ce2a04
DE
4231 debug_printf (" pending reinsert at 0x%s\n",
4232 paddress (lwp->bp_reinsert));
d50171e4 4233
85e00e85 4234 if (can_hardware_single_step ())
d50171e4 4235 {
fa593d66
PA
4236 if (fast_tp_collecting == 0)
4237 {
4238 if (step == 0)
4239 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4240 if (lwp->suspended)
4241 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4242 lwp->suspended);
4243 }
d50171e4 4244 }
f79b145d
YQ
4245
4246 step = maybe_hw_step (thread);
0d62e5e8 4247 }
8376a3cb
YQ
4248 else
4249 {
4250 /* If the thread isn't doing step-over, there shouldn't be any
4251 reinsert breakpoints. */
4252 gdb_assert (!has_reinsert_breakpoints (proc));
4253 }
0d62e5e8 4254
fa593d66
PA
4255 if (fast_tp_collecting == 1)
4256 {
4257 if (debug_threads)
87ce2a04
DE
4258 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4259 " (exit-jump-pad-bkpt)\n",
d86d4aaf 4260 lwpid_of (thread));
fa593d66
PA
4261 }
4262 else if (fast_tp_collecting == 2)
4263 {
4264 if (debug_threads)
87ce2a04
DE
4265 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4266 " single-stepping\n",
d86d4aaf 4267 lwpid_of (thread));
fa593d66
PA
4268
4269 if (can_hardware_single_step ())
4270 step = 1;
4271 else
38e08fca
GB
4272 {
4273 internal_error (__FILE__, __LINE__,
4274 "moving out of jump pad single-stepping"
4275 " not implemented on this target");
4276 }
fa593d66
PA
4277 }
4278
219f2f23
PA
4279 /* If we have while-stepping actions in this thread set it stepping.
4280 If we have a signal to deliver, it may or may not be set to
4281 SIG_IGN, we don't know. Assume so, and allow collecting
4282 while-stepping into a signal handler. A possible smart thing to
4283 do would be to set an internal breakpoint at the signal return
4284 address, continue, and carry on catching this while-stepping
4285 action only when that breakpoint is hit. A future
4286 enhancement. */
7fe5e27e 4287 if (thread->while_stepping != NULL)
219f2f23
PA
4288 {
4289 if (debug_threads)
87ce2a04 4290 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 4291 lwpid_of (thread));
7fe5e27e
AT
4292
4293 step = single_step (lwp);
219f2f23
PA
4294 }
4295
c06cbd92 4296 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
0d62e5e8 4297 {
0bfdf32f 4298 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be
PA
4299
4300 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4301
4302 if (debug_threads)
4303 {
4304 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4305 (long) lwp->stop_pc);
4306 }
0d62e5e8
DJ
4307 }
4308
35ac8b3e
YQ
4309 /* If we have pending signals, consume one if it can be delivered to
4310 the inferior. */
4311 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
0d62e5e8
DJ
4312 {
4313 struct pending_signals **p_sig;
4314
54a0b537 4315 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
4316 while ((*p_sig)->prev != NULL)
4317 p_sig = &(*p_sig)->prev;
4318
4319 signal = (*p_sig)->signal;
32ca6d61 4320 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 4321 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4322 &(*p_sig)->info);
32ca6d61 4323
0d62e5e8
DJ
4324 free (*p_sig);
4325 *p_sig = NULL;
4326 }
4327
94610ec4
YQ
4328 if (debug_threads)
4329 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4330 lwpid_of (thread), step ? "step" : "continue", signal,
4331 lwp->stop_expected ? "expected" : "not expected");
4332
aa5ca48f
DE
4333 if (the_low_target.prepare_to_resume != NULL)
4334 the_low_target.prepare_to_resume (lwp);
4335
d86d4aaf 4336 regcache_invalidate_thread (thread);
da6d8c04 4337 errno = 0;
54a0b537 4338 lwp->stepping = step;
82075af2
JS
4339 if (step)
4340 ptrace_request = PTRACE_SINGLESTEP;
4341 else if (gdb_catching_syscalls_p (lwp))
4342 ptrace_request = PTRACE_SYSCALL;
4343 else
4344 ptrace_request = PTRACE_CONT;
4345 ptrace (ptrace_request,
4346 lwpid_of (thread),
b8e1b30e 4347 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4348 /* Coerce to a uintptr_t first to avoid potential gcc warning
4349 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4350 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4351
0bfdf32f 4352 current_thread = saved_thread;
da6d8c04 4353 if (errno)
23f238d3
PA
4354 perror_with_name ("resuming thread");
4355
4356 /* Successfully resumed. Clear state that no longer makes sense,
4357 and mark the LWP as running. Must not do this before resuming
4358 otherwise if that fails other code will be confused. E.g., we'd
4359 later try to stop the LWP and hang forever waiting for a stop
4360 status. Note that we must not throw after this is cleared,
4361 otherwise handle_zombie_lwp_error would get confused. */
4362 lwp->stopped = 0;
4363 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4364}
4365
4366/* Called when we try to resume a stopped LWP and that errors out. If
4367 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4368 or about to become), discard the error, clear any pending status
4369 the LWP may have, and return true (we'll collect the exit status
4370 soon enough). Otherwise, return false. */
4371
4372static int
4373check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4374{
4375 struct thread_info *thread = get_lwp_thread (lp);
4376
4377 /* If we get an error after resuming the LWP successfully, we'd
4378 confuse !T state for the LWP being gone. */
4379 gdb_assert (lp->stopped);
4380
4381 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4382 because even if ptrace failed with ESRCH, the tracee may be "not
4383 yet fully dead", but already refusing ptrace requests. In that
4384 case the tracee has 'R (Running)' state for a little bit
4385 (observed in Linux 3.18). See also the note on ESRCH in the
4386 ptrace(2) man page. Instead, check whether the LWP has any state
4387 other than ptrace-stopped. */
4388
4389 /* Don't assume anything if /proc/PID/status can't be read. */
4390 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4391 {
23f238d3
PA
4392 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4393 lp->status_pending_p = 0;
4394 return 1;
4395 }
4396 return 0;
4397}
4398
4399/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4400 disappears while we try to resume it. */
3221518c 4401
23f238d3
PA
4402static void
4403linux_resume_one_lwp (struct lwp_info *lwp,
4404 int step, int signal, siginfo_t *info)
4405{
4406 TRY
4407 {
4408 linux_resume_one_lwp_throw (lwp, step, signal, info);
4409 }
4410 CATCH (ex, RETURN_MASK_ERROR)
4411 {
4412 if (!check_ptrace_stopped_lwp_gone (lwp))
4413 throw_exception (ex);
3221518c 4414 }
23f238d3 4415 END_CATCH
da6d8c04
DJ
4416}
4417
2bd7c093
PA
4418struct thread_resume_array
4419{
4420 struct thread_resume *resume;
4421 size_t n;
4422};
64386c31 4423
ebcf782c
DE
4424/* This function is called once per thread via find_inferior.
4425 ARG is a pointer to a thread_resume_array struct.
4426 We look up the thread specified by ENTRY in ARG, and mark the thread
4427 with a pointer to the appropriate resume request.
5544ad89
DJ
4428
4429 This algorithm is O(threads * resume elements), but resume elements
4430 is small (and will remain small at least until GDB supports thread
4431 suspension). */
ebcf782c 4432
2bd7c093
PA
4433static int
4434linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 4435{
d86d4aaf
DE
4436 struct thread_info *thread = (struct thread_info *) entry;
4437 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4438 int ndx;
2bd7c093 4439 struct thread_resume_array *r;
64386c31 4440
9a3c8263 4441 r = (struct thread_resume_array *) arg;
64386c31 4442
2bd7c093 4443 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
4444 {
4445 ptid_t ptid = r->resume[ndx].thread;
4446 if (ptid_equal (ptid, minus_one_ptid)
4447 || ptid_equal (ptid, entry->id)
0c9070b3
YQ
4448 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4449 of PID'. */
d86d4aaf 4450 || (ptid_get_pid (ptid) == pid_of (thread)
0c9070b3
YQ
4451 && (ptid_is_pid (ptid)
4452 || ptid_get_lwp (ptid) == -1)))
95954743 4453 {
d50171e4 4454 if (r->resume[ndx].kind == resume_stop
8336d594 4455 && thread->last_resume_kind == resume_stop)
d50171e4
PA
4456 {
4457 if (debug_threads)
87ce2a04
DE
4458 debug_printf ("already %s LWP %ld at GDB's request\n",
4459 (thread->last_status.kind
4460 == TARGET_WAITKIND_STOPPED)
4461 ? "stopped"
4462 : "stopping",
d86d4aaf 4463 lwpid_of (thread));
d50171e4
PA
4464
4465 continue;
4466 }
4467
95954743 4468 lwp->resume = &r->resume[ndx];
8336d594 4469 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4470
c2d6af84
PA
4471 lwp->step_range_start = lwp->resume->step_range_start;
4472 lwp->step_range_end = lwp->resume->step_range_end;
4473
fa593d66
PA
4474 /* If we had a deferred signal to report, dequeue one now.
4475 This can happen if LWP gets more than one signal while
4476 trying to get out of a jump pad. */
4477 if (lwp->stopped
4478 && !lwp->status_pending_p
4479 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4480 {
4481 lwp->status_pending_p = 1;
4482
4483 if (debug_threads)
87ce2a04
DE
4484 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4485 "leaving status pending.\n",
d86d4aaf
DE
4486 WSTOPSIG (lwp->status_pending),
4487 lwpid_of (thread));
fa593d66
PA
4488 }
4489
95954743
PA
4490 return 0;
4491 }
4492 }
2bd7c093
PA
4493
4494 /* No resume action for this thread. */
4495 lwp->resume = NULL;
64386c31 4496
2bd7c093 4497 return 0;
5544ad89
DJ
4498}
4499
20ad9378
DE
4500/* find_inferior callback for linux_resume.
4501 Set *FLAG_P if this lwp has an interesting status pending. */
5544ad89 4502
bd99dc85
PA
4503static int
4504resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 4505{
d86d4aaf
DE
4506 struct thread_info *thread = (struct thread_info *) entry;
4507 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4508
bd99dc85
PA
4509 /* LWPs which will not be resumed are not interesting, because
4510 we might not wait for them next time through linux_wait. */
2bd7c093 4511 if (lwp->resume == NULL)
bd99dc85 4512 return 0;
64386c31 4513
582511be 4514 if (thread_still_has_status_pending_p (thread))
d50171e4
PA
4515 * (int *) flag_p = 1;
4516
4517 return 0;
4518}
4519
4520/* Return 1 if this lwp that GDB wants running is stopped at an
4521 internal breakpoint that we need to step over. It assumes that any
4522 required STOP_PC adjustment has already been propagated to the
4523 inferior's regcache. */
4524
4525static int
4526need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4527{
d86d4aaf
DE
4528 struct thread_info *thread = (struct thread_info *) entry;
4529 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 4530 struct thread_info *saved_thread;
d50171e4 4531 CORE_ADDR pc;
c06cbd92
YQ
4532 struct process_info *proc = get_thread_process (thread);
4533
4534 /* GDBserver is skipping the extra traps from the wrapper program,
4535 don't have to do step over. */
4536 if (proc->tdesc == NULL)
4537 return 0;
d50171e4
PA
4538
4539 /* LWPs which will not be resumed are not interesting, because we
4540 might not wait for them next time through linux_wait. */
4541
4542 if (!lwp->stopped)
4543 {
4544 if (debug_threads)
87ce2a04 4545 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 4546 lwpid_of (thread));
d50171e4
PA
4547 return 0;
4548 }
4549
8336d594 4550 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
4551 {
4552 if (debug_threads)
87ce2a04
DE
4553 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4554 " stopped\n",
d86d4aaf 4555 lwpid_of (thread));
d50171e4
PA
4556 return 0;
4557 }
4558
7984d532
PA
4559 gdb_assert (lwp->suspended >= 0);
4560
4561 if (lwp->suspended)
4562 {
4563 if (debug_threads)
87ce2a04 4564 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 4565 lwpid_of (thread));
7984d532
PA
4566 return 0;
4567 }
4568
bd99dc85 4569 if (lwp->status_pending_p)
d50171e4
PA
4570 {
4571 if (debug_threads)
87ce2a04
DE
4572 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4573 " status.\n",
d86d4aaf 4574 lwpid_of (thread));
d50171e4
PA
4575 return 0;
4576 }
4577
4578 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4579 or we have. */
4580 pc = get_pc (lwp);
4581
4582 /* If the PC has changed since we stopped, then don't do anything,
4583 and let the breakpoint/tracepoint be hit. This happens if, for
4584 instance, GDB handled the decr_pc_after_break subtraction itself,
4585 GDB is OOL stepping this thread, or the user has issued a "jump"
4586 command, or poked thread's registers herself. */
4587 if (pc != lwp->stop_pc)
4588 {
4589 if (debug_threads)
87ce2a04
DE
4590 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4591 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
4592 lwpid_of (thread),
4593 paddress (lwp->stop_pc), paddress (pc));
d50171e4
PA
4594 return 0;
4595 }
4596
484b3c32
YQ
4597 /* On software single step target, resume the inferior with signal
4598 rather than stepping over. */
4599 if (can_software_single_step ()
4600 && lwp->pending_signals != NULL
4601 && lwp_signal_can_be_delivered (lwp))
4602 {
4603 if (debug_threads)
4604 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4605 " signals.\n",
4606 lwpid_of (thread));
4607
4608 return 0;
4609 }
4610
0bfdf32f
GB
4611 saved_thread = current_thread;
4612 current_thread = thread;
d50171e4 4613
8b07ae33 4614 /* We can only step over breakpoints we know about. */
fa593d66 4615 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4616 {
8b07ae33 4617 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4618 though. If the condition is being evaluated on the target's side
4619 and it evaluate to false, step over this breakpoint as well. */
4620 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4621 && gdb_condition_true_at_breakpoint (pc)
4622 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
4623 {
4624 if (debug_threads)
87ce2a04
DE
4625 debug_printf ("Need step over [LWP %ld]? yes, but found"
4626 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 4627 lwpid_of (thread), paddress (pc));
d50171e4 4628
0bfdf32f 4629 current_thread = saved_thread;
8b07ae33
PA
4630 return 0;
4631 }
4632 else
4633 {
4634 if (debug_threads)
87ce2a04
DE
4635 debug_printf ("Need step over [LWP %ld]? yes, "
4636 "found breakpoint at 0x%s\n",
d86d4aaf 4637 lwpid_of (thread), paddress (pc));
d50171e4 4638
8b07ae33
PA
4639 /* We've found an lwp that needs stepping over --- return 1 so
4640 that find_inferior stops looking. */
0bfdf32f 4641 current_thread = saved_thread;
8b07ae33 4642
8b07ae33
PA
4643 return 1;
4644 }
d50171e4
PA
4645 }
4646
0bfdf32f 4647 current_thread = saved_thread;
d50171e4
PA
4648
4649 if (debug_threads)
87ce2a04
DE
4650 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4651 " at 0x%s\n",
d86d4aaf 4652 lwpid_of (thread), paddress (pc));
c6ecbae5 4653
bd99dc85 4654 return 0;
5544ad89
DJ
4655}
4656
d50171e4
PA
4657/* Start a step-over operation on LWP. When LWP stopped at a
4658 breakpoint, to make progress, we need to remove the breakpoint out
4659 of the way. If we let other threads run while we do that, they may
4660 pass by the breakpoint location and miss hitting it. To avoid
4661 that, a step-over momentarily stops all threads while LWP is
c40c8d4b
YQ
4662 single-stepped by either hardware or software while the breakpoint
4663 is temporarily uninserted from the inferior. When the single-step
4664 finishes, we reinsert the breakpoint, and let all threads that are
4665 supposed to be running, run again. */
d50171e4
PA
4666
4667static int
4668start_step_over (struct lwp_info *lwp)
4669{
d86d4aaf 4670 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4671 struct thread_info *saved_thread;
d50171e4
PA
4672 CORE_ADDR pc;
4673 int step;
4674
4675 if (debug_threads)
87ce2a04 4676 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 4677 lwpid_of (thread));
d50171e4 4678
7984d532 4679 stop_all_lwps (1, lwp);
863d01bd
PA
4680
4681 if (lwp->suspended != 0)
4682 {
4683 internal_error (__FILE__, __LINE__,
4684 "LWP %ld suspended=%d\n", lwpid_of (thread),
4685 lwp->suspended);
4686 }
d50171e4
PA
4687
4688 if (debug_threads)
87ce2a04 4689 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
4690
4691 /* Note, we should always reach here with an already adjusted PC,
4692 either by GDB (if we're resuming due to GDB's request), or by our
4693 caller, if we just finished handling an internal breakpoint GDB
4694 shouldn't care about. */
4695 pc = get_pc (lwp);
4696
0bfdf32f
GB
4697 saved_thread = current_thread;
4698 current_thread = thread;
d50171e4
PA
4699
4700 lwp->bp_reinsert = pc;
4701 uninsert_breakpoints_at (pc);
fa593d66 4702 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4703
7fe5e27e 4704 step = single_step (lwp);
d50171e4 4705
0bfdf32f 4706 current_thread = saved_thread;
d50171e4
PA
4707
4708 linux_resume_one_lwp (lwp, step, 0, NULL);
4709
4710 /* Require next event from this LWP. */
d86d4aaf 4711 step_over_bkpt = thread->entry.id;
d50171e4
PA
4712 return 1;
4713}
4714
4715/* Finish a step-over. Reinsert the breakpoint we had uninserted in
4716 start_step_over, if still there, and delete any reinsert
4717 breakpoints we've set, on non hardware single-step targets. */
4718
4719static int
4720finish_step_over (struct lwp_info *lwp)
4721{
4722 if (lwp->bp_reinsert != 0)
4723 {
f79b145d
YQ
4724 struct thread_info *saved_thread = current_thread;
4725
d50171e4 4726 if (debug_threads)
87ce2a04 4727 debug_printf ("Finished step over.\n");
d50171e4 4728
f79b145d
YQ
4729 current_thread = get_lwp_thread (lwp);
4730
d50171e4
PA
4731 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4732 may be no breakpoint to reinsert there by now. */
4733 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4734 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4735
4736 lwp->bp_reinsert = 0;
4737
4738 /* Delete any software-single-step reinsert breakpoints. No
4739 longer needed. We don't have to worry about other threads
4740 hitting this trap, and later not being able to explain it,
4741 because we were stepping over a breakpoint, and we hold all
4742 threads but LWP stopped while doing that. */
4743 if (!can_hardware_single_step ())
f79b145d
YQ
4744 {
4745 gdb_assert (has_reinsert_breakpoints (current_process ()));
4746 delete_reinsert_breakpoints ();
4747 }
d50171e4
PA
4748
4749 step_over_bkpt = null_ptid;
f79b145d 4750 current_thread = saved_thread;
d50171e4
PA
4751 return 1;
4752 }
4753 else
4754 return 0;
4755}
4756
863d01bd
PA
4757/* If there's a step over in progress, wait until all threads stop
4758 (that is, until the stepping thread finishes its step), and
4759 unsuspend all lwps. The stepping thread ends with its status
4760 pending, which is processed later when we get back to processing
4761 events. */
4762
4763static void
4764complete_ongoing_step_over (void)
4765{
4766 if (!ptid_equal (step_over_bkpt, null_ptid))
4767 {
4768 struct lwp_info *lwp;
4769 int wstat;
4770 int ret;
4771
4772 if (debug_threads)
4773 debug_printf ("detach: step over in progress, finish it first\n");
4774
4775 /* Passing NULL_PTID as filter indicates we want all events to
4776 be left pending. Eventually this returns when there are no
4777 unwaited-for children left. */
4778 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4779 &wstat, __WALL);
4780 gdb_assert (ret == -1);
4781
4782 lwp = find_lwp_pid (step_over_bkpt);
4783 if (lwp != NULL)
4784 finish_step_over (lwp);
4785 step_over_bkpt = null_ptid;
4786 unsuspend_all_lwps (lwp);
4787 }
4788}
4789
5544ad89
DJ
4790/* This function is called once per thread. We check the thread's resume
4791 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 4792 stopped; and what signal, if any, it should be sent.
5544ad89 4793
bd99dc85
PA
4794 For threads which we aren't explicitly told otherwise, we preserve
4795 the stepping flag; this is used for stepping over gdbserver-placed
4796 breakpoints.
4797
4798 If pending_flags was set in any thread, we queue any needed
4799 signals, since we won't actually resume. We already have a pending
4800 event to report, so we don't need to preserve any step requests;
4801 they should be re-issued if necessary. */
4802
4803static int
4804linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 4805{
d86d4aaf
DE
4806 struct thread_info *thread = (struct thread_info *) entry;
4807 struct lwp_info *lwp = get_thread_lwp (thread);
bd99dc85 4808 int step;
d50171e4
PA
4809 int leave_all_stopped = * (int *) arg;
4810 int leave_pending;
5544ad89 4811
2bd7c093 4812 if (lwp->resume == NULL)
bd99dc85 4813 return 0;
5544ad89 4814
bd99dc85 4815 if (lwp->resume->kind == resume_stop)
5544ad89 4816 {
bd99dc85 4817 if (debug_threads)
d86d4aaf 4818 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4819
4820 if (!lwp->stopped)
4821 {
4822 if (debug_threads)
d86d4aaf 4823 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4824
d50171e4
PA
4825 /* Stop the thread, and wait for the event asynchronously,
4826 through the event loop. */
02fc4de7 4827 send_sigstop (lwp);
bd99dc85
PA
4828 }
4829 else
4830 {
4831 if (debug_threads)
87ce2a04 4832 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4833 lwpid_of (thread));
d50171e4
PA
4834
4835 /* The LWP may have been stopped in an internal event that
4836 was not meant to be notified back to GDB (e.g., gdbserver
4837 breakpoint), so we should be reporting a stop event in
4838 this case too. */
4839
4840 /* If the thread already has a pending SIGSTOP, this is a
4841 no-op. Otherwise, something later will presumably resume
4842 the thread and this will cause it to cancel any pending
4843 operation, due to last_resume_kind == resume_stop. If
4844 the thread already has a pending status to report, we
4845 will still report it the next time we wait - see
4846 status_pending_p_callback. */
1a981360
PA
4847
4848 /* If we already have a pending signal to report, then
4849 there's no need to queue a SIGSTOP, as this means we're
4850 midway through moving the LWP out of the jumppad, and we
4851 will report the pending signal as soon as that is
4852 finished. */
4853 if (lwp->pending_signals_to_report == NULL)
4854 send_sigstop (lwp);
bd99dc85 4855 }
32ca6d61 4856
bd99dc85
PA
4857 /* For stop requests, we're done. */
4858 lwp->resume = NULL;
fc7238bb 4859 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4860 return 0;
5544ad89
DJ
4861 }
4862
bd99dc85 4863 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4864 then don't resume it - we can just report the pending status.
4865 Likewise if it is suspended, because e.g., another thread is
4866 stepping past a breakpoint. Make sure to queue any signals that
4867 would otherwise be sent. In all-stop mode, we do this decision
4868 based on if *any* thread has a pending status. If there's a
4869 thread that needs the step-over-breakpoint dance, then don't
4870 resume any other thread but that particular one. */
4871 leave_pending = (lwp->suspended
4872 || lwp->status_pending_p
4873 || leave_all_stopped);
5544ad89 4874
d50171e4 4875 if (!leave_pending)
bd99dc85
PA
4876 {
4877 if (debug_threads)
d86d4aaf 4878 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4879
d50171e4 4880 step = (lwp->resume->kind == resume_step);
2acc282a 4881 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
4882 }
4883 else
4884 {
4885 if (debug_threads)
d86d4aaf 4886 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5544ad89 4887
bd99dc85
PA
4888 /* If we have a new signal, enqueue the signal. */
4889 if (lwp->resume->sig != 0)
4890 {
8d749320
SM
4891 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4892
bd99dc85
PA
4893 p_sig->prev = lwp->pending_signals;
4894 p_sig->signal = lwp->resume->sig;
bd99dc85
PA
4895
4896 /* If this is the same signal we were previously stopped by,
4897 make sure to queue its siginfo. We can ignore the return
4898 value of ptrace; if it fails, we'll skip
4899 PTRACE_SETSIGINFO. */
4900 if (WIFSTOPPED (lwp->last_status)
4901 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
d86d4aaf 4902 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4903 &p_sig->info);
bd99dc85
PA
4904
4905 lwp->pending_signals = p_sig;
4906 }
4907 }
5544ad89 4908
fc7238bb 4909 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4910 lwp->resume = NULL;
5544ad89 4911 return 0;
0d62e5e8
DJ
4912}
4913
4914static void
2bd7c093 4915linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 4916{
2bd7c093 4917 struct thread_resume_array array = { resume_info, n };
d86d4aaf 4918 struct thread_info *need_step_over = NULL;
d50171e4
PA
4919 int any_pending;
4920 int leave_all_stopped;
c6ecbae5 4921
87ce2a04
DE
4922 if (debug_threads)
4923 {
4924 debug_enter ();
4925 debug_printf ("linux_resume:\n");
4926 }
4927
2bd7c093 4928 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 4929
d50171e4
PA
4930 /* If there is a thread which would otherwise be resumed, which has
4931 a pending status, then don't resume any threads - we can just
4932 report the pending status. Make sure to queue any signals that
4933 would otherwise be sent. In non-stop mode, we'll apply this
4934 logic to each thread individually. We consume all pending events
4935 before considering to start a step-over (in all-stop). */
4936 any_pending = 0;
bd99dc85 4937 if (!non_stop)
d86d4aaf 4938 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
d50171e4
PA
4939
4940 /* If there is a thread which would otherwise be resumed, which is
4941 stopped at a breakpoint that needs stepping over, then don't
4942 resume any threads - have it step over the breakpoint with all
4943 other threads stopped, then resume all threads again. Make sure
4944 to queue any signals that would otherwise be delivered or
4945 queued. */
4946 if (!any_pending && supports_breakpoints ())
4947 need_step_over
d86d4aaf
DE
4948 = (struct thread_info *) find_inferior (&all_threads,
4949 need_step_over_p, NULL);
d50171e4
PA
4950
4951 leave_all_stopped = (need_step_over != NULL || any_pending);
4952
4953 if (debug_threads)
4954 {
4955 if (need_step_over != NULL)
87ce2a04 4956 debug_printf ("Not resuming all, need step over\n");
d50171e4 4957 else if (any_pending)
87ce2a04
DE
4958 debug_printf ("Not resuming, all-stop and found "
4959 "an LWP with pending status\n");
d50171e4 4960 else
87ce2a04 4961 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4962 }
4963
4964 /* Even if we're leaving threads stopped, queue all signals we'd
4965 otherwise deliver. */
4966 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4967
4968 if (need_step_over)
d86d4aaf 4969 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4970
4971 if (debug_threads)
4972 {
4973 debug_printf ("linux_resume done\n");
4974 debug_exit ();
4975 }
1bebeeca
PA
4976
4977 /* We may have events that were pending that can/should be sent to
4978 the client now. Trigger a linux_wait call. */
4979 if (target_is_async_p ())
4980 async_file_mark ();
d50171e4
PA
4981}
4982
4983/* This function is called once per thread. We check the thread's
4984 last resume request, which will tell us whether to resume, step, or
4985 leave the thread stopped. Any signal the client requested to be
4986 delivered has already been enqueued at this point.
4987
4988 If any thread that GDB wants running is stopped at an internal
4989 breakpoint that needs stepping over, we start a step-over operation
4990 on that particular thread, and leave all others stopped. */
4991
7984d532
PA
4992static int
4993proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 4994{
d86d4aaf
DE
4995 struct thread_info *thread = (struct thread_info *) entry;
4996 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4997 int step;
4998
7984d532
PA
4999 if (lwp == except)
5000 return 0;
d50171e4
PA
5001
5002 if (debug_threads)
d86d4aaf 5003 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
5004
5005 if (!lwp->stopped)
5006 {
5007 if (debug_threads)
d86d4aaf 5008 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
7984d532 5009 return 0;
d50171e4
PA
5010 }
5011
02fc4de7
PA
5012 if (thread->last_resume_kind == resume_stop
5013 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
5014 {
5015 if (debug_threads)
87ce2a04 5016 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 5017 lwpid_of (thread));
7984d532 5018 return 0;
d50171e4
PA
5019 }
5020
5021 if (lwp->status_pending_p)
5022 {
5023 if (debug_threads)
87ce2a04 5024 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 5025 lwpid_of (thread));
7984d532 5026 return 0;
d50171e4
PA
5027 }
5028
7984d532
PA
5029 gdb_assert (lwp->suspended >= 0);
5030
d50171e4
PA
5031 if (lwp->suspended)
5032 {
5033 if (debug_threads)
d86d4aaf 5034 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
7984d532 5035 return 0;
d50171e4
PA
5036 }
5037
1a981360
PA
5038 if (thread->last_resume_kind == resume_stop
5039 && lwp->pending_signals_to_report == NULL
5040 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
5041 {
5042 /* We haven't reported this LWP as stopped yet (otherwise, the
5043 last_status.kind check above would catch it, and we wouldn't
5044 reach here. This LWP may have been momentarily paused by a
5045 stop_all_lwps call while handling for example, another LWP's
5046 step-over. In that case, the pending expected SIGSTOP signal
5047 that was queued at vCont;t handling time will have already
5048 been consumed by wait_for_sigstop, and so we need to requeue
5049 another one here. Note that if the LWP already has a SIGSTOP
5050 pending, this is a no-op. */
5051
5052 if (debug_threads)
87ce2a04
DE
5053 debug_printf ("Client wants LWP %ld to stop. "
5054 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 5055 lwpid_of (thread));
02fc4de7
PA
5056
5057 send_sigstop (lwp);
5058 }
5059
863d01bd
PA
5060 if (thread->last_resume_kind == resume_step)
5061 {
5062 if (debug_threads)
5063 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5064 lwpid_of (thread));
5065 step = 1;
5066 }
5067 else if (lwp->bp_reinsert != 0)
5068 {
5069 if (debug_threads)
5070 debug_printf (" stepping LWP %ld, reinsert set\n",
5071 lwpid_of (thread));
f79b145d
YQ
5072
5073 step = maybe_hw_step (thread);
863d01bd
PA
5074 }
5075 else
5076 step = 0;
5077
d50171e4 5078 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
5079 return 0;
5080}
5081
5082static int
5083unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5084{
d86d4aaf
DE
5085 struct thread_info *thread = (struct thread_info *) entry;
5086 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
5087
5088 if (lwp == except)
5089 return 0;
5090
863d01bd 5091 lwp_suspended_decr (lwp);
7984d532
PA
5092
5093 return proceed_one_lwp (entry, except);
d50171e4
PA
5094}
5095
5096/* When we finish a step-over, set threads running again. If there's
5097 another thread that may need a step-over, now's the time to start
5098 it. Eventually, we'll move all threads past their breakpoints. */
5099
5100static void
5101proceed_all_lwps (void)
5102{
d86d4aaf 5103 struct thread_info *need_step_over;
d50171e4
PA
5104
5105 /* If there is a thread which would otherwise be resumed, which is
5106 stopped at a breakpoint that needs stepping over, then don't
5107 resume any threads - have it step over the breakpoint with all
5108 other threads stopped, then resume all threads again. */
5109
5110 if (supports_breakpoints ())
5111 {
5112 need_step_over
d86d4aaf
DE
5113 = (struct thread_info *) find_inferior (&all_threads,
5114 need_step_over_p, NULL);
d50171e4
PA
5115
5116 if (need_step_over != NULL)
5117 {
5118 if (debug_threads)
87ce2a04
DE
5119 debug_printf ("proceed_all_lwps: found "
5120 "thread %ld needing a step-over\n",
5121 lwpid_of (need_step_over));
d50171e4 5122
d86d4aaf 5123 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
5124 return;
5125 }
5126 }
5544ad89 5127
d50171e4 5128 if (debug_threads)
87ce2a04 5129 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 5130
d86d4aaf 5131 find_inferior (&all_threads, proceed_one_lwp, NULL);
d50171e4
PA
5132}
5133
5134/* Stopped LWPs that the client wanted to be running, that don't have
5135 pending statuses, are set to run again, except for EXCEPT, if not
5136 NULL. This undoes a stop_all_lwps call. */
5137
5138static void
7984d532 5139unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 5140{
5544ad89
DJ
5141 if (debug_threads)
5142 {
87ce2a04 5143 debug_enter ();
d50171e4 5144 if (except)
87ce2a04 5145 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 5146 lwpid_of (get_lwp_thread (except)));
5544ad89 5147 else
87ce2a04 5148 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
5149 }
5150
7984d532 5151 if (unsuspend)
d86d4aaf 5152 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
7984d532 5153 else
d86d4aaf 5154 find_inferior (&all_threads, proceed_one_lwp, except);
87ce2a04
DE
5155
5156 if (debug_threads)
5157 {
5158 debug_printf ("unstop_all_lwps done\n");
5159 debug_exit ();
5160 }
0d62e5e8
DJ
5161}
5162
58caa3dc
DJ
5163
5164#ifdef HAVE_LINUX_REGSETS
5165
1faeff08
MR
5166#define use_linux_regsets 1
5167
030031ee
PA
5168/* Returns true if REGSET has been disabled. */
5169
5170static int
5171regset_disabled (struct regsets_info *info, struct regset_info *regset)
5172{
5173 return (info->disabled_regsets != NULL
5174 && info->disabled_regsets[regset - info->regsets]);
5175}
5176
5177/* Disable REGSET. */
5178
5179static void
5180disable_regset (struct regsets_info *info, struct regset_info *regset)
5181{
5182 int dr_offset;
5183
5184 dr_offset = regset - info->regsets;
5185 if (info->disabled_regsets == NULL)
224c3ddb 5186 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
5187 info->disabled_regsets[dr_offset] = 1;
5188}
5189
58caa3dc 5190static int
3aee8918
PA
5191regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5192 struct regcache *regcache)
58caa3dc
DJ
5193{
5194 struct regset_info *regset;
e9d25b98 5195 int saw_general_regs = 0;
95954743 5196 int pid;
1570b33e 5197 struct iovec iov;
58caa3dc 5198
0bfdf32f 5199 pid = lwpid_of (current_thread);
28eef672 5200 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5201 {
1570b33e
L
5202 void *buf, *data;
5203 int nt_type, res;
58caa3dc 5204
030031ee 5205 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 5206 continue;
58caa3dc 5207
bca929d3 5208 buf = xmalloc (regset->size);
1570b33e
L
5209
5210 nt_type = regset->nt_type;
5211 if (nt_type)
5212 {
5213 iov.iov_base = buf;
5214 iov.iov_len = regset->size;
5215 data = (void *) &iov;
5216 }
5217 else
5218 data = buf;
5219
dfb64f85 5220#ifndef __sparc__
f15f9948 5221 res = ptrace (regset->get_request, pid,
b8e1b30e 5222 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5223#else
1570b33e 5224 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5225#endif
58caa3dc
DJ
5226 if (res < 0)
5227 {
5228 if (errno == EIO)
5229 {
52fa2412 5230 /* If we get EIO on a regset, do not try it again for
3aee8918 5231 this process mode. */
030031ee 5232 disable_regset (regsets_info, regset);
58caa3dc 5233 }
e5a9158d
AA
5234 else if (errno == ENODATA)
5235 {
5236 /* ENODATA may be returned if the regset is currently
5237 not "active". This can happen in normal operation,
5238 so suppress the warning in this case. */
5239 }
58caa3dc
DJ
5240 else
5241 {
0d62e5e8 5242 char s[256];
95954743
PA
5243 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5244 pid);
0d62e5e8 5245 perror (s);
58caa3dc
DJ
5246 }
5247 }
098dbe61
AA
5248 else
5249 {
5250 if (regset->type == GENERAL_REGS)
5251 saw_general_regs = 1;
5252 regset->store_function (regcache, buf);
5253 }
fdeb2a12 5254 free (buf);
58caa3dc 5255 }
e9d25b98
DJ
5256 if (saw_general_regs)
5257 return 0;
5258 else
5259 return 1;
58caa3dc
DJ
5260}
5261
5262static int
3aee8918
PA
5263regsets_store_inferior_registers (struct regsets_info *regsets_info,
5264 struct regcache *regcache)
58caa3dc
DJ
5265{
5266 struct regset_info *regset;
e9d25b98 5267 int saw_general_regs = 0;
95954743 5268 int pid;
1570b33e 5269 struct iovec iov;
58caa3dc 5270
0bfdf32f 5271 pid = lwpid_of (current_thread);
28eef672 5272 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5273 {
1570b33e
L
5274 void *buf, *data;
5275 int nt_type, res;
58caa3dc 5276
feea5f36
AA
5277 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5278 || regset->fill_function == NULL)
28eef672 5279 continue;
58caa3dc 5280
bca929d3 5281 buf = xmalloc (regset->size);
545587ee
DJ
5282
5283 /* First fill the buffer with the current register set contents,
5284 in case there are any items in the kernel's regset that are
5285 not in gdbserver's regcache. */
1570b33e
L
5286
5287 nt_type = regset->nt_type;
5288 if (nt_type)
5289 {
5290 iov.iov_base = buf;
5291 iov.iov_len = regset->size;
5292 data = (void *) &iov;
5293 }
5294 else
5295 data = buf;
5296
dfb64f85 5297#ifndef __sparc__
f15f9948 5298 res = ptrace (regset->get_request, pid,
b8e1b30e 5299 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5300#else
689cc2ae 5301 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5302#endif
545587ee
DJ
5303
5304 if (res == 0)
5305 {
5306 /* Then overlay our cached registers on that. */
442ea881 5307 regset->fill_function (regcache, buf);
545587ee
DJ
5308
5309 /* Only now do we write the register set. */
dfb64f85 5310#ifndef __sparc__
f15f9948 5311 res = ptrace (regset->set_request, pid,
b8e1b30e 5312 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5313#else
1570b33e 5314 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5315#endif
545587ee
DJ
5316 }
5317
58caa3dc
DJ
5318 if (res < 0)
5319 {
5320 if (errno == EIO)
5321 {
52fa2412 5322 /* If we get EIO on a regset, do not try it again for
3aee8918 5323 this process mode. */
030031ee 5324 disable_regset (regsets_info, regset);
58caa3dc 5325 }
3221518c
UW
5326 else if (errno == ESRCH)
5327 {
1b3f6016
PA
5328 /* At this point, ESRCH should mean the process is
5329 already gone, in which case we simply ignore attempts
5330 to change its registers. See also the related
5331 comment in linux_resume_one_lwp. */
fdeb2a12 5332 free (buf);
3221518c
UW
5333 return 0;
5334 }
58caa3dc
DJ
5335 else
5336 {
ce3a066d 5337 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5338 }
5339 }
e9d25b98
DJ
5340 else if (regset->type == GENERAL_REGS)
5341 saw_general_regs = 1;
09ec9b38 5342 free (buf);
58caa3dc 5343 }
e9d25b98
DJ
5344 if (saw_general_regs)
5345 return 0;
5346 else
5347 return 1;
58caa3dc
DJ
5348}
5349
1faeff08 5350#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5351
1faeff08 5352#define use_linux_regsets 0
3aee8918
PA
5353#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5354#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5355
58caa3dc 5356#endif
1faeff08
MR
5357
5358/* Return 1 if register REGNO is supported by one of the regset ptrace
5359 calls or 0 if it has to be transferred individually. */
5360
5361static int
3aee8918 5362linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5363{
5364 unsigned char mask = 1 << (regno % 8);
5365 size_t index = regno / 8;
5366
5367 return (use_linux_regsets
3aee8918
PA
5368 && (regs_info->regset_bitmap == NULL
5369 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5370}
5371
58caa3dc 5372#ifdef HAVE_LINUX_USRREGS
1faeff08 5373
5b3da067 5374static int
3aee8918 5375register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5376{
5377 int addr;
5378
3aee8918 5379 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5380 error ("Invalid register number %d.", regnum);
5381
3aee8918 5382 addr = usrregs->regmap[regnum];
1faeff08
MR
5383
5384 return addr;
5385}
5386
5387/* Fetch one register. */
5388static void
3aee8918
PA
5389fetch_register (const struct usrregs_info *usrregs,
5390 struct regcache *regcache, int regno)
1faeff08
MR
5391{
5392 CORE_ADDR regaddr;
5393 int i, size;
5394 char *buf;
5395 int pid;
5396
3aee8918 5397 if (regno >= usrregs->num_regs)
1faeff08
MR
5398 return;
5399 if ((*the_low_target.cannot_fetch_register) (regno))
5400 return;
5401
3aee8918 5402 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5403 if (regaddr == -1)
5404 return;
5405
3aee8918
PA
5406 size = ((register_size (regcache->tdesc, regno)
5407 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5408 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5409 buf = (char *) alloca (size);
1faeff08 5410
0bfdf32f 5411 pid = lwpid_of (current_thread);
1faeff08
MR
5412 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5413 {
5414 errno = 0;
5415 *(PTRACE_XFER_TYPE *) (buf + i) =
5416 ptrace (PTRACE_PEEKUSER, pid,
5417 /* Coerce to a uintptr_t first to avoid potential gcc warning
5418 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5419 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5420 regaddr += sizeof (PTRACE_XFER_TYPE);
5421 if (errno != 0)
5422 error ("reading register %d: %s", regno, strerror (errno));
5423 }
5424
5425 if (the_low_target.supply_ptrace_register)
5426 the_low_target.supply_ptrace_register (regcache, regno, buf);
5427 else
5428 supply_register (regcache, regno, buf);
5429}
5430
5431/* Store one register. */
5432static void
3aee8918
PA
5433store_register (const struct usrregs_info *usrregs,
5434 struct regcache *regcache, int regno)
1faeff08
MR
5435{
5436 CORE_ADDR regaddr;
5437 int i, size;
5438 char *buf;
5439 int pid;
5440
3aee8918 5441 if (regno >= usrregs->num_regs)
1faeff08
MR
5442 return;
5443 if ((*the_low_target.cannot_store_register) (regno))
5444 return;
5445
3aee8918 5446 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5447 if (regaddr == -1)
5448 return;
5449
3aee8918
PA
5450 size = ((register_size (regcache->tdesc, regno)
5451 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5452 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5453 buf = (char *) alloca (size);
1faeff08
MR
5454 memset (buf, 0, size);
5455
5456 if (the_low_target.collect_ptrace_register)
5457 the_low_target.collect_ptrace_register (regcache, regno, buf);
5458 else
5459 collect_register (regcache, regno, buf);
5460
0bfdf32f 5461 pid = lwpid_of (current_thread);
1faeff08
MR
5462 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5463 {
5464 errno = 0;
5465 ptrace (PTRACE_POKEUSER, pid,
5466 /* Coerce to a uintptr_t first to avoid potential gcc warning
5467 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5468 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5469 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5470 if (errno != 0)
5471 {
5472 /* At this point, ESRCH should mean the process is
5473 already gone, in which case we simply ignore attempts
5474 to change its registers. See also the related
5475 comment in linux_resume_one_lwp. */
5476 if (errno == ESRCH)
5477 return;
5478
5479 if ((*the_low_target.cannot_store_register) (regno) == 0)
5480 error ("writing register %d: %s", regno, strerror (errno));
5481 }
5482 regaddr += sizeof (PTRACE_XFER_TYPE);
5483 }
5484}
5485
5486/* Fetch all registers, or just one, from the child process.
5487 If REGNO is -1, do this for all registers, skipping any that are
5488 assumed to have been retrieved by regsets_fetch_inferior_registers,
5489 unless ALL is non-zero.
5490 Otherwise, REGNO specifies which register (so we can save time). */
5491static void
3aee8918
PA
5492usr_fetch_inferior_registers (const struct regs_info *regs_info,
5493 struct regcache *regcache, int regno, int all)
1faeff08 5494{
3aee8918
PA
5495 struct usrregs_info *usr = regs_info->usrregs;
5496
1faeff08
MR
5497 if (regno == -1)
5498 {
3aee8918
PA
5499 for (regno = 0; regno < usr->num_regs; regno++)
5500 if (all || !linux_register_in_regsets (regs_info, regno))
5501 fetch_register (usr, regcache, regno);
1faeff08
MR
5502 }
5503 else
3aee8918 5504 fetch_register (usr, regcache, regno);
1faeff08
MR
5505}
5506
5507/* Store our register values back into the inferior.
5508 If REGNO is -1, do this for all registers, skipping any that are
5509 assumed to have been saved by regsets_store_inferior_registers,
5510 unless ALL is non-zero.
5511 Otherwise, REGNO specifies which register (so we can save time). */
5512static void
3aee8918
PA
5513usr_store_inferior_registers (const struct regs_info *regs_info,
5514 struct regcache *regcache, int regno, int all)
1faeff08 5515{
3aee8918
PA
5516 struct usrregs_info *usr = regs_info->usrregs;
5517
1faeff08
MR
5518 if (regno == -1)
5519 {
3aee8918
PA
5520 for (regno = 0; regno < usr->num_regs; regno++)
5521 if (all || !linux_register_in_regsets (regs_info, regno))
5522 store_register (usr, regcache, regno);
1faeff08
MR
5523 }
5524 else
3aee8918 5525 store_register (usr, regcache, regno);
1faeff08
MR
5526}
5527
5528#else /* !HAVE_LINUX_USRREGS */
5529
3aee8918
PA
5530#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5531#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
1faeff08 5532
58caa3dc 5533#endif
1faeff08
MR
5534
5535
5b3da067 5536static void
1faeff08
MR
5537linux_fetch_registers (struct regcache *regcache, int regno)
5538{
5539 int use_regsets;
5540 int all = 0;
3aee8918 5541 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
5542
5543 if (regno == -1)
5544 {
3aee8918
PA
5545 if (the_low_target.fetch_register != NULL
5546 && regs_info->usrregs != NULL)
5547 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
c14dfd32
PA
5548 (*the_low_target.fetch_register) (regcache, regno);
5549
3aee8918
PA
5550 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5551 if (regs_info->usrregs != NULL)
5552 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5553 }
5554 else
5555 {
c14dfd32
PA
5556 if (the_low_target.fetch_register != NULL
5557 && (*the_low_target.fetch_register) (regcache, regno))
5558 return;
5559
3aee8918 5560 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5561 if (use_regsets)
3aee8918
PA
5562 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5563 regcache);
5564 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5565 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5566 }
58caa3dc
DJ
5567}
5568
5b3da067 5569static void
442ea881 5570linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 5571{
1faeff08
MR
5572 int use_regsets;
5573 int all = 0;
3aee8918 5574 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
5575
5576 if (regno == -1)
5577 {
3aee8918
PA
5578 all = regsets_store_inferior_registers (regs_info->regsets_info,
5579 regcache);
5580 if (regs_info->usrregs != NULL)
5581 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5582 }
5583 else
5584 {
3aee8918 5585 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5586 if (use_regsets)
3aee8918
PA
5587 all = regsets_store_inferior_registers (regs_info->regsets_info,
5588 regcache);
5589 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5590 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5591 }
58caa3dc
DJ
5592}
5593
da6d8c04 5594
da6d8c04
DJ
5595/* Copy LEN bytes from inferior's memory starting at MEMADDR
5596 to debugger memory starting at MYADDR. */
5597
c3e735a6 5598static int
f450004a 5599linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04 5600{
0bfdf32f 5601 int pid = lwpid_of (current_thread);
4934b29e
MR
5602 register PTRACE_XFER_TYPE *buffer;
5603 register CORE_ADDR addr;
5604 register int count;
5605 char filename[64];
da6d8c04 5606 register int i;
4934b29e 5607 int ret;
fd462a61 5608 int fd;
fd462a61
DJ
5609
5610 /* Try using /proc. Don't bother for one word. */
5611 if (len >= 3 * sizeof (long))
5612 {
4934b29e
MR
5613 int bytes;
5614
fd462a61
DJ
5615 /* We could keep this file open and cache it - possibly one per
5616 thread. That requires some juggling, but is even faster. */
95954743 5617 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5618 fd = open (filename, O_RDONLY | O_LARGEFILE);
5619 if (fd == -1)
5620 goto no_proc;
5621
5622 /* If pread64 is available, use it. It's faster if the kernel
5623 supports it (only one syscall), and it's 64-bit safe even on
5624 32-bit platforms (for instance, SPARC debugging a SPARC64
5625 application). */
5626#ifdef HAVE_PREAD64
4934b29e 5627 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5628#else
4934b29e
MR
5629 bytes = -1;
5630 if (lseek (fd, memaddr, SEEK_SET) != -1)
5631 bytes = read (fd, myaddr, len);
fd462a61 5632#endif
fd462a61
DJ
5633
5634 close (fd);
4934b29e
MR
5635 if (bytes == len)
5636 return 0;
5637
5638 /* Some data was read, we'll try to get the rest with ptrace. */
5639 if (bytes > 0)
5640 {
5641 memaddr += bytes;
5642 myaddr += bytes;
5643 len -= bytes;
5644 }
fd462a61 5645 }
da6d8c04 5646
fd462a61 5647 no_proc:
4934b29e
MR
5648 /* Round starting address down to longword boundary. */
5649 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5650 /* Round ending address up; get number of longwords that makes. */
5651 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5652 / sizeof (PTRACE_XFER_TYPE));
5653 /* Allocate buffer of that many longwords. */
8d749320 5654 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
4934b29e 5655
da6d8c04 5656 /* Read all the longwords */
4934b29e 5657 errno = 0;
da6d8c04
DJ
5658 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5659 {
14ce3065
DE
5660 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5661 about coercing an 8 byte integer to a 4 byte pointer. */
5662 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5663 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5664 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5665 if (errno)
4934b29e 5666 break;
da6d8c04 5667 }
4934b29e 5668 ret = errno;
da6d8c04
DJ
5669
5670 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5671 if (i > 0)
5672 {
5673 i *= sizeof (PTRACE_XFER_TYPE);
5674 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5675 memcpy (myaddr,
5676 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5677 i < len ? i : len);
5678 }
c3e735a6 5679
4934b29e 5680 return ret;
da6d8c04
DJ
5681}
5682
93ae6fdc
PA
5683/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5684 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5685 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5686
ce3a066d 5687static int
f450004a 5688linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
5689{
5690 register int i;
5691 /* Round starting address down to longword boundary. */
5692 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5693 /* Round ending address up; get number of longwords that makes. */
5694 register int count
493e2a69
MS
5695 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5696 / sizeof (PTRACE_XFER_TYPE);
5697
da6d8c04 5698 /* Allocate buffer of that many longwords. */
8d749320 5699 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
493e2a69 5700
0bfdf32f 5701 int pid = lwpid_of (current_thread);
da6d8c04 5702
f0ae6fc3
PA
5703 if (len == 0)
5704 {
5705 /* Zero length write always succeeds. */
5706 return 0;
5707 }
5708
0d62e5e8
DJ
5709 if (debug_threads)
5710 {
58d6951d 5711 /* Dump up to four bytes. */
bf47e248
PA
5712 char str[4 * 2 + 1];
5713 char *p = str;
5714 int dump = len < 4 ? len : 4;
5715
5716 for (i = 0; i < dump; i++)
5717 {
5718 sprintf (p, "%02x", myaddr[i]);
5719 p += 2;
5720 }
5721 *p = '\0';
5722
5723 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5724 str, (long) memaddr, pid);
0d62e5e8
DJ
5725 }
5726
da6d8c04
DJ
5727 /* Fill start and end extra bytes of buffer with existing memory data. */
5728
93ae6fdc 5729 errno = 0;
14ce3065
DE
5730 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5731 about coercing an 8 byte integer to a 4 byte pointer. */
5732 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5733 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5734 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5735 if (errno)
5736 return errno;
da6d8c04
DJ
5737
5738 if (count > 1)
5739 {
93ae6fdc 5740 errno = 0;
da6d8c04 5741 buffer[count - 1]
95954743 5742 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5743 /* Coerce to a uintptr_t first to avoid potential gcc warning
5744 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5745 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5746 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5747 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5748 if (errno)
5749 return errno;
da6d8c04
DJ
5750 }
5751
93ae6fdc 5752 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5753
493e2a69
MS
5754 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5755 myaddr, len);
da6d8c04
DJ
5756
5757 /* Write the entire buffer. */
5758
5759 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5760 {
5761 errno = 0;
14ce3065
DE
5762 ptrace (PTRACE_POKETEXT, pid,
5763 /* Coerce to a uintptr_t first to avoid potential gcc warning
5764 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5765 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5766 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5767 if (errno)
5768 return errno;
5769 }
5770
5771 return 0;
5772}
2f2893d9
DJ
5773
5774static void
5775linux_look_up_symbols (void)
5776{
0d62e5e8 5777#ifdef USE_THREAD_DB
95954743
PA
5778 struct process_info *proc = current_process ();
5779
fe978cb0 5780 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5781 return;
5782
9b4c5f87 5783 thread_db_init ();
0d62e5e8
DJ
5784#endif
5785}
5786
e5379b03 5787static void
ef57601b 5788linux_request_interrupt (void)
e5379b03 5789{
a1928bad 5790 extern unsigned long signal_pid;
e5379b03 5791
78708b7c
PA
5792 /* Send a SIGINT to the process group. This acts just like the user
5793 typed a ^C on the controlling terminal. */
5794 kill (-signal_pid, SIGINT);
e5379b03
DJ
5795}
5796
aa691b87
RM
5797/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5798 to debugger memory starting at MYADDR. */
5799
5800static int
f450004a 5801linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
5802{
5803 char filename[PATH_MAX];
5804 int fd, n;
0bfdf32f 5805 int pid = lwpid_of (current_thread);
aa691b87 5806
6cebaf6e 5807 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5808
5809 fd = open (filename, O_RDONLY);
5810 if (fd < 0)
5811 return -1;
5812
5813 if (offset != (CORE_ADDR) 0
5814 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5815 n = -1;
5816 else
5817 n = read (fd, myaddr, len);
5818
5819 close (fd);
5820
5821 return n;
5822}
5823
d993e290
PA
5824/* These breakpoint and watchpoint related wrapper functions simply
5825 pass on the function call if the target has registered a
5826 corresponding function. */
e013ee27
OF
5827
5828static int
802e8e6d
PA
5829linux_supports_z_point_type (char z_type)
5830{
5831 return (the_low_target.supports_z_point_type != NULL
5832 && the_low_target.supports_z_point_type (z_type));
5833}
5834
5835static int
5836linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5837 int size, struct raw_breakpoint *bp)
e013ee27 5838{
c8f4bfdd
YQ
5839 if (type == raw_bkpt_type_sw)
5840 return insert_memory_breakpoint (bp);
5841 else if (the_low_target.insert_point != NULL)
802e8e6d 5842 return the_low_target.insert_point (type, addr, size, bp);
e013ee27
OF
5843 else
5844 /* Unsupported (see target.h). */
5845 return 1;
5846}
5847
5848static int
802e8e6d
PA
5849linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5850 int size, struct raw_breakpoint *bp)
e013ee27 5851{
c8f4bfdd
YQ
5852 if (type == raw_bkpt_type_sw)
5853 return remove_memory_breakpoint (bp);
5854 else if (the_low_target.remove_point != NULL)
802e8e6d 5855 return the_low_target.remove_point (type, addr, size, bp);
e013ee27
OF
5856 else
5857 /* Unsupported (see target.h). */
5858 return 1;
5859}
5860
3e572f71
PA
5861/* Implement the to_stopped_by_sw_breakpoint target_ops
5862 method. */
5863
5864static int
5865linux_stopped_by_sw_breakpoint (void)
5866{
5867 struct lwp_info *lwp = get_thread_lwp (current_thread);
5868
5869 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5870}
5871
5872/* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5873 method. */
5874
5875static int
5876linux_supports_stopped_by_sw_breakpoint (void)
5877{
5878 return USE_SIGTRAP_SIGINFO;
5879}
5880
5881/* Implement the to_stopped_by_hw_breakpoint target_ops
5882 method. */
5883
5884static int
5885linux_stopped_by_hw_breakpoint (void)
5886{
5887 struct lwp_info *lwp = get_thread_lwp (current_thread);
5888
5889 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5890}
5891
5892/* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5893 method. */
5894
5895static int
5896linux_supports_stopped_by_hw_breakpoint (void)
5897{
5898 return USE_SIGTRAP_SIGINFO;
5899}
5900
70b90b91 5901/* Implement the supports_hardware_single_step target_ops method. */
45614f15
YQ
5902
5903static int
70b90b91 5904linux_supports_hardware_single_step (void)
45614f15 5905{
45614f15
YQ
5906 return can_hardware_single_step ();
5907}
5908
7d00775e
AT
5909static int
5910linux_supports_software_single_step (void)
5911{
5912 return can_software_single_step ();
5913}
5914
e013ee27
OF
5915static int
5916linux_stopped_by_watchpoint (void)
5917{
0bfdf32f 5918 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5919
15c66dd6 5920 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5921}
5922
5923static CORE_ADDR
5924linux_stopped_data_address (void)
5925{
0bfdf32f 5926 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5927
5928 return lwp->stopped_data_address;
e013ee27
OF
5929}
5930
db0dfaa0
LM
5931#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5932 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5933 && defined(PT_TEXT_END_ADDR)
5934
5935/* This is only used for targets that define PT_TEXT_ADDR,
5936 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5937 the target has different ways of acquiring this information, like
5938 loadmaps. */
52fb6437
NS
5939
5940/* Under uClinux, programs are loaded at non-zero offsets, which we need
5941 to tell gdb about. */
5942
5943static int
5944linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5945{
52fb6437 5946 unsigned long text, text_end, data;
62828379 5947 int pid = lwpid_of (current_thread);
52fb6437
NS
5948
5949 errno = 0;
5950
b8e1b30e
LM
5951 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5952 (PTRACE_TYPE_ARG4) 0);
5953 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5954 (PTRACE_TYPE_ARG4) 0);
5955 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5956 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5957
5958 if (errno == 0)
5959 {
5960 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5961 used by gdb) are relative to the beginning of the program,
5962 with the data segment immediately following the text segment.
5963 However, the actual runtime layout in memory may put the data
5964 somewhere else, so when we send gdb a data base-address, we
5965 use the real data base address and subtract the compile-time
5966 data base-address from it (which is just the length of the
5967 text segment). BSS immediately follows data in both
5968 cases. */
52fb6437
NS
5969 *text_p = text;
5970 *data_p = data - (text_end - text);
1b3f6016 5971
52fb6437
NS
5972 return 1;
5973 }
52fb6437
NS
5974 return 0;
5975}
5976#endif
5977
07e059b5
VP
5978static int
5979linux_qxfer_osdata (const char *annex,
1b3f6016
PA
5980 unsigned char *readbuf, unsigned const char *writebuf,
5981 CORE_ADDR offset, int len)
07e059b5 5982{
d26e3629 5983 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5984}
5985
d0722149
DE
5986/* Convert a native/host siginfo object, into/from the siginfo in the
5987 layout of the inferiors' architecture. */
5988
5989static void
8adce034 5990siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
d0722149
DE
5991{
5992 int done = 0;
5993
5994 if (the_low_target.siginfo_fixup != NULL)
5995 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5996
5997 /* If there was no callback, or the callback didn't do anything,
5998 then just do a straight memcpy. */
5999 if (!done)
6000 {
6001 if (direction == 1)
a5362b9a 6002 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 6003 else
a5362b9a 6004 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
6005 }
6006}
6007
4aa995e1
PA
6008static int
6009linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6010 unsigned const char *writebuf, CORE_ADDR offset, int len)
6011{
d0722149 6012 int pid;
a5362b9a 6013 siginfo_t siginfo;
8adce034 6014 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 6015
0bfdf32f 6016 if (current_thread == NULL)
4aa995e1
PA
6017 return -1;
6018
0bfdf32f 6019 pid = lwpid_of (current_thread);
4aa995e1
PA
6020
6021 if (debug_threads)
87ce2a04
DE
6022 debug_printf ("%s siginfo for lwp %d.\n",
6023 readbuf != NULL ? "Reading" : "Writing",
6024 pid);
4aa995e1 6025
0adea5f7 6026 if (offset >= sizeof (siginfo))
4aa995e1
PA
6027 return -1;
6028
b8e1b30e 6029 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6030 return -1;
6031
d0722149
DE
6032 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6033 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6034 inferior with a 64-bit GDBSERVER should look the same as debugging it
6035 with a 32-bit GDBSERVER, we need to convert it. */
6036 siginfo_fixup (&siginfo, inf_siginfo, 0);
6037
4aa995e1
PA
6038 if (offset + len > sizeof (siginfo))
6039 len = sizeof (siginfo) - offset;
6040
6041 if (readbuf != NULL)
d0722149 6042 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
6043 else
6044 {
d0722149
DE
6045 memcpy (inf_siginfo + offset, writebuf, len);
6046
6047 /* Convert back to ptrace layout before flushing it out. */
6048 siginfo_fixup (&siginfo, inf_siginfo, 1);
6049
b8e1b30e 6050 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6051 return -1;
6052 }
6053
6054 return len;
6055}
6056
bd99dc85
PA
6057/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6058 so we notice when children change state; as the handler for the
6059 sigsuspend in my_waitpid. */
6060
6061static void
6062sigchld_handler (int signo)
6063{
6064 int old_errno = errno;
6065
6066 if (debug_threads)
e581f2b4
PA
6067 {
6068 do
6069 {
6070 /* fprintf is not async-signal-safe, so call write
6071 directly. */
6072 if (write (2, "sigchld_handler\n",
6073 sizeof ("sigchld_handler\n") - 1) < 0)
6074 break; /* just ignore */
6075 } while (0);
6076 }
bd99dc85
PA
6077
6078 if (target_is_async_p ())
6079 async_file_mark (); /* trigger a linux_wait */
6080
6081 errno = old_errno;
6082}
6083
6084static int
6085linux_supports_non_stop (void)
6086{
6087 return 1;
6088}
6089
6090static int
6091linux_async (int enable)
6092{
7089dca4 6093 int previous = target_is_async_p ();
bd99dc85 6094
8336d594 6095 if (debug_threads)
87ce2a04
DE
6096 debug_printf ("linux_async (%d), previous=%d\n",
6097 enable, previous);
8336d594 6098
bd99dc85
PA
6099 if (previous != enable)
6100 {
6101 sigset_t mask;
6102 sigemptyset (&mask);
6103 sigaddset (&mask, SIGCHLD);
6104
6105 sigprocmask (SIG_BLOCK, &mask, NULL);
6106
6107 if (enable)
6108 {
6109 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
6110 {
6111 linux_event_pipe[0] = -1;
6112 linux_event_pipe[1] = -1;
6113 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6114
6115 warning ("creating event pipe failed.");
6116 return previous;
6117 }
bd99dc85
PA
6118
6119 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6120 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6121
6122 /* Register the event loop handler. */
6123 add_file_handler (linux_event_pipe[0],
6124 handle_target_event, NULL);
6125
6126 /* Always trigger a linux_wait. */
6127 async_file_mark ();
6128 }
6129 else
6130 {
6131 delete_file_handler (linux_event_pipe[0]);
6132
6133 close (linux_event_pipe[0]);
6134 close (linux_event_pipe[1]);
6135 linux_event_pipe[0] = -1;
6136 linux_event_pipe[1] = -1;
6137 }
6138
6139 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6140 }
6141
6142 return previous;
6143}
6144
6145static int
6146linux_start_non_stop (int nonstop)
6147{
6148 /* Register or unregister from event-loop accordingly. */
6149 linux_async (nonstop);
aa96c426
GB
6150
6151 if (target_is_async_p () != (nonstop != 0))
6152 return -1;
6153
bd99dc85
PA
6154 return 0;
6155}
6156
cf8fd78b
PA
6157static int
6158linux_supports_multi_process (void)
6159{
6160 return 1;
6161}
6162
89245bc0
DB
6163/* Check if fork events are supported. */
6164
6165static int
6166linux_supports_fork_events (void)
6167{
6168 return linux_supports_tracefork ();
6169}
6170
6171/* Check if vfork events are supported. */
6172
6173static int
6174linux_supports_vfork_events (void)
6175{
6176 return linux_supports_tracefork ();
6177}
6178
94585166
DB
6179/* Check if exec events are supported. */
6180
6181static int
6182linux_supports_exec_events (void)
6183{
6184 return linux_supports_traceexec ();
6185}
6186
de0d863e
DB
6187/* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6188 options for the specified lwp. */
6189
6190static int
6191reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6192 void *args)
6193{
6194 struct thread_info *thread = (struct thread_info *) entry;
6195 struct lwp_info *lwp = get_thread_lwp (thread);
6196
6197 if (!lwp->stopped)
6198 {
6199 /* Stop the lwp so we can modify its ptrace options. */
6200 lwp->must_set_ptrace_flags = 1;
6201 linux_stop_lwp (lwp);
6202 }
6203 else
6204 {
6205 /* Already stopped; go ahead and set the ptrace options. */
6206 struct process_info *proc = find_process_pid (pid_of (thread));
6207 int options = linux_low_ptrace_options (proc->attached);
6208
6209 linux_enable_event_reporting (lwpid_of (thread), options);
6210 lwp->must_set_ptrace_flags = 0;
6211 }
6212
6213 return 0;
6214}
6215
6216/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6217 ptrace flags for all inferiors. This is in case the new GDB connection
6218 doesn't support the same set of events that the previous one did. */
6219
6220static void
6221linux_handle_new_gdb_connection (void)
6222{
6223 pid_t pid;
6224
6225 /* Request that all the lwps reset their ptrace options. */
6226 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6227}
6228
03583c20
UW
6229static int
6230linux_supports_disable_randomization (void)
6231{
6232#ifdef HAVE_PERSONALITY
6233 return 1;
6234#else
6235 return 0;
6236#endif
6237}
efcbbd14 6238
d1feda86
YQ
6239static int
6240linux_supports_agent (void)
6241{
6242 return 1;
6243}
6244
c2d6af84
PA
6245static int
6246linux_supports_range_stepping (void)
6247{
6248 if (*the_low_target.supports_range_stepping == NULL)
6249 return 0;
6250
6251 return (*the_low_target.supports_range_stepping) ();
6252}
6253
efcbbd14
UW
6254/* Enumerate spufs IDs for process PID. */
6255static int
6256spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6257{
6258 int pos = 0;
6259 int written = 0;
6260 char path[128];
6261 DIR *dir;
6262 struct dirent *entry;
6263
6264 sprintf (path, "/proc/%ld/fd", pid);
6265 dir = opendir (path);
6266 if (!dir)
6267 return -1;
6268
6269 rewinddir (dir);
6270 while ((entry = readdir (dir)) != NULL)
6271 {
6272 struct stat st;
6273 struct statfs stfs;
6274 int fd;
6275
6276 fd = atoi (entry->d_name);
6277 if (!fd)
6278 continue;
6279
6280 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6281 if (stat (path, &st) != 0)
6282 continue;
6283 if (!S_ISDIR (st.st_mode))
6284 continue;
6285
6286 if (statfs (path, &stfs) != 0)
6287 continue;
6288 if (stfs.f_type != SPUFS_MAGIC)
6289 continue;
6290
6291 if (pos >= offset && pos + 4 <= offset + len)
6292 {
6293 *(unsigned int *)(buf + pos - offset) = fd;
6294 written += 4;
6295 }
6296 pos += 4;
6297 }
6298
6299 closedir (dir);
6300 return written;
6301}
6302
6303/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6304 object type, using the /proc file system. */
6305static int
6306linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6307 unsigned const char *writebuf,
6308 CORE_ADDR offset, int len)
6309{
0bfdf32f 6310 long pid = lwpid_of (current_thread);
efcbbd14
UW
6311 char buf[128];
6312 int fd = 0;
6313 int ret = 0;
6314
6315 if (!writebuf && !readbuf)
6316 return -1;
6317
6318 if (!*annex)
6319 {
6320 if (!readbuf)
6321 return -1;
6322 else
6323 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6324 }
6325
6326 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6327 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6328 if (fd <= 0)
6329 return -1;
6330
6331 if (offset != 0
6332 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6333 {
6334 close (fd);
6335 return 0;
6336 }
6337
6338 if (writebuf)
6339 ret = write (fd, writebuf, (size_t) len);
6340 else
6341 ret = read (fd, readbuf, (size_t) len);
6342
6343 close (fd);
6344 return ret;
6345}
6346
723b724b 6347#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6348struct target_loadseg
6349{
6350 /* Core address to which the segment is mapped. */
6351 Elf32_Addr addr;
6352 /* VMA recorded in the program header. */
6353 Elf32_Addr p_vaddr;
6354 /* Size of this segment in memory. */
6355 Elf32_Word p_memsz;
6356};
6357
723b724b 6358# if defined PT_GETDSBT
78d85199
YQ
6359struct target_loadmap
6360{
6361 /* Protocol version number, must be zero. */
6362 Elf32_Word version;
6363 /* Pointer to the DSBT table, its size, and the DSBT index. */
6364 unsigned *dsbt_table;
6365 unsigned dsbt_size, dsbt_index;
6366 /* Number of segments in this map. */
6367 Elf32_Word nsegs;
6368 /* The actual memory map. */
6369 struct target_loadseg segs[/*nsegs*/];
6370};
723b724b
MF
6371# define LINUX_LOADMAP PT_GETDSBT
6372# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6373# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6374# else
6375struct target_loadmap
6376{
6377 /* Protocol version number, must be zero. */
6378 Elf32_Half version;
6379 /* Number of segments in this map. */
6380 Elf32_Half nsegs;
6381 /* The actual memory map. */
6382 struct target_loadseg segs[/*nsegs*/];
6383};
6384# define LINUX_LOADMAP PTRACE_GETFDPIC
6385# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6386# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6387# endif
78d85199 6388
78d85199
YQ
6389static int
6390linux_read_loadmap (const char *annex, CORE_ADDR offset,
6391 unsigned char *myaddr, unsigned int len)
6392{
0bfdf32f 6393 int pid = lwpid_of (current_thread);
78d85199
YQ
6394 int addr = -1;
6395 struct target_loadmap *data = NULL;
6396 unsigned int actual_length, copy_length;
6397
6398 if (strcmp (annex, "exec") == 0)
723b724b 6399 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6400 else if (strcmp (annex, "interp") == 0)
723b724b 6401 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6402 else
6403 return -1;
6404
723b724b 6405 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6406 return -1;
6407
6408 if (data == NULL)
6409 return -1;
6410
6411 actual_length = sizeof (struct target_loadmap)
6412 + sizeof (struct target_loadseg) * data->nsegs;
6413
6414 if (offset < 0 || offset > actual_length)
6415 return -1;
6416
6417 copy_length = actual_length - offset < len ? actual_length - offset : len;
6418 memcpy (myaddr, (char *) data + offset, copy_length);
6419 return copy_length;
6420}
723b724b
MF
6421#else
6422# define linux_read_loadmap NULL
6423#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6424
1570b33e 6425static void
06e03fff 6426linux_process_qsupported (char **features, int count)
1570b33e
L
6427{
6428 if (the_low_target.process_qsupported != NULL)
06e03fff 6429 the_low_target.process_qsupported (features, count);
1570b33e
L
6430}
6431
82075af2
JS
6432static int
6433linux_supports_catch_syscall (void)
6434{
6435 return (the_low_target.get_syscall_trapinfo != NULL
6436 && linux_supports_tracesysgood ());
6437}
6438
ae91f625
MK
6439static int
6440linux_get_ipa_tdesc_idx (void)
6441{
6442 if (the_low_target.get_ipa_tdesc_idx == NULL)
6443 return 0;
6444
6445 return (*the_low_target.get_ipa_tdesc_idx) ();
6446}
6447
219f2f23
PA
6448static int
6449linux_supports_tracepoints (void)
6450{
6451 if (*the_low_target.supports_tracepoints == NULL)
6452 return 0;
6453
6454 return (*the_low_target.supports_tracepoints) ();
6455}
6456
6457static CORE_ADDR
6458linux_read_pc (struct regcache *regcache)
6459{
6460 if (the_low_target.get_pc == NULL)
6461 return 0;
6462
6463 return (*the_low_target.get_pc) (regcache);
6464}
6465
6466static void
6467linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6468{
6469 gdb_assert (the_low_target.set_pc != NULL);
6470
6471 (*the_low_target.set_pc) (regcache, pc);
6472}
6473
8336d594
PA
6474static int
6475linux_thread_stopped (struct thread_info *thread)
6476{
6477 return get_thread_lwp (thread)->stopped;
6478}
6479
6480/* This exposes stop-all-threads functionality to other modules. */
6481
6482static void
7984d532 6483linux_pause_all (int freeze)
8336d594 6484{
7984d532
PA
6485 stop_all_lwps (freeze, NULL);
6486}
6487
6488/* This exposes unstop-all-threads functionality to other gdbserver
6489 modules. */
6490
6491static void
6492linux_unpause_all (int unfreeze)
6493{
6494 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6495}
6496
90d74c30
PA
6497static int
6498linux_prepare_to_access_memory (void)
6499{
6500 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6501 running LWP. */
6502 if (non_stop)
6503 linux_pause_all (1);
6504 return 0;
6505}
6506
6507static void
0146f85b 6508linux_done_accessing_memory (void)
90d74c30
PA
6509{
6510 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6511 running LWP. */
6512 if (non_stop)
6513 linux_unpause_all (1);
6514}
6515
fa593d66
PA
6516static int
6517linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6518 CORE_ADDR collector,
6519 CORE_ADDR lockaddr,
6520 ULONGEST orig_size,
6521 CORE_ADDR *jump_entry,
405f8e94
SS
6522 CORE_ADDR *trampoline,
6523 ULONGEST *trampoline_size,
fa593d66
PA
6524 unsigned char *jjump_pad_insn,
6525 ULONGEST *jjump_pad_insn_size,
6526 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
6527 CORE_ADDR *adjusted_insn_addr_end,
6528 char *err)
fa593d66
PA
6529{
6530 return (*the_low_target.install_fast_tracepoint_jump_pad)
6531 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
6532 jump_entry, trampoline, trampoline_size,
6533 jjump_pad_insn, jjump_pad_insn_size,
6534 adjusted_insn_addr, adjusted_insn_addr_end,
6535 err);
fa593d66
PA
6536}
6537
6a271cae
PA
6538static struct emit_ops *
6539linux_emit_ops (void)
6540{
6541 if (the_low_target.emit_ops != NULL)
6542 return (*the_low_target.emit_ops) ();
6543 else
6544 return NULL;
6545}
6546
405f8e94
SS
6547static int
6548linux_get_min_fast_tracepoint_insn_len (void)
6549{
6550 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6551}
6552
2268b414
JK
6553/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6554
6555static int
6556get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6557 CORE_ADDR *phdr_memaddr, int *num_phdr)
6558{
6559 char filename[PATH_MAX];
6560 int fd;
6561 const int auxv_size = is_elf64
6562 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6563 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6564
6565 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6566
6567 fd = open (filename, O_RDONLY);
6568 if (fd < 0)
6569 return 1;
6570
6571 *phdr_memaddr = 0;
6572 *num_phdr = 0;
6573 while (read (fd, buf, auxv_size) == auxv_size
6574 && (*phdr_memaddr == 0 || *num_phdr == 0))
6575 {
6576 if (is_elf64)
6577 {
6578 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6579
6580 switch (aux->a_type)
6581 {
6582 case AT_PHDR:
6583 *phdr_memaddr = aux->a_un.a_val;
6584 break;
6585 case AT_PHNUM:
6586 *num_phdr = aux->a_un.a_val;
6587 break;
6588 }
6589 }
6590 else
6591 {
6592 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6593
6594 switch (aux->a_type)
6595 {
6596 case AT_PHDR:
6597 *phdr_memaddr = aux->a_un.a_val;
6598 break;
6599 case AT_PHNUM:
6600 *num_phdr = aux->a_un.a_val;
6601 break;
6602 }
6603 }
6604 }
6605
6606 close (fd);
6607
6608 if (*phdr_memaddr == 0 || *num_phdr == 0)
6609 {
6610 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6611 "phdr_memaddr = %ld, phdr_num = %d",
6612 (long) *phdr_memaddr, *num_phdr);
6613 return 2;
6614 }
6615
6616 return 0;
6617}
6618
6619/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6620
6621static CORE_ADDR
6622get_dynamic (const int pid, const int is_elf64)
6623{
6624 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6625 int num_phdr, i;
2268b414 6626 unsigned char *phdr_buf;
db1ff28b 6627 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6628
6629 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6630 return 0;
6631
6632 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6633 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6634
6635 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6636 return 0;
6637
6638 /* Compute relocation: it is expected to be 0 for "regular" executables,
6639 non-zero for PIE ones. */
6640 relocation = -1;
db1ff28b
JK
6641 for (i = 0; relocation == -1 && i < num_phdr; i++)
6642 if (is_elf64)
6643 {
6644 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6645
6646 if (p->p_type == PT_PHDR)
6647 relocation = phdr_memaddr - p->p_vaddr;
6648 }
6649 else
6650 {
6651 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6652
6653 if (p->p_type == PT_PHDR)
6654 relocation = phdr_memaddr - p->p_vaddr;
6655 }
6656
2268b414
JK
6657 if (relocation == -1)
6658 {
e237a7e2
JK
6659 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6660 any real world executables, including PIE executables, have always
6661 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6662 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6663 or present DT_DEBUG anyway (fpc binaries are statically linked).
6664
6665 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6666
6667 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6668
2268b414
JK
6669 return 0;
6670 }
6671
db1ff28b
JK
6672 for (i = 0; i < num_phdr; i++)
6673 {
6674 if (is_elf64)
6675 {
6676 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6677
6678 if (p->p_type == PT_DYNAMIC)
6679 return p->p_vaddr + relocation;
6680 }
6681 else
6682 {
6683 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6684
db1ff28b
JK
6685 if (p->p_type == PT_DYNAMIC)
6686 return p->p_vaddr + relocation;
6687 }
6688 }
2268b414
JK
6689
6690 return 0;
6691}
6692
6693/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6694 can be 0 if the inferior does not yet have the library list initialized.
6695 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6696 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6697
6698static CORE_ADDR
6699get_r_debug (const int pid, const int is_elf64)
6700{
6701 CORE_ADDR dynamic_memaddr;
6702 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6703 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6704 CORE_ADDR map = -1;
2268b414
JK
6705
6706 dynamic_memaddr = get_dynamic (pid, is_elf64);
6707 if (dynamic_memaddr == 0)
367ba2c2 6708 return map;
2268b414
JK
6709
6710 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6711 {
6712 if (is_elf64)
6713 {
6714 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6715#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6716 union
6717 {
6718 Elf64_Xword map;
6719 unsigned char buf[sizeof (Elf64_Xword)];
6720 }
6721 rld_map;
a738da3a
MF
6722#endif
6723#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6724 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6725 {
6726 if (linux_read_memory (dyn->d_un.d_val,
6727 rld_map.buf, sizeof (rld_map.buf)) == 0)
6728 return rld_map.map;
6729 else
6730 break;
6731 }
75f62ce7 6732#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6733#ifdef DT_MIPS_RLD_MAP_REL
6734 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6735 {
6736 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6737 rld_map.buf, sizeof (rld_map.buf)) == 0)
6738 return rld_map.map;
6739 else
6740 break;
6741 }
6742#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6743
367ba2c2
MR
6744 if (dyn->d_tag == DT_DEBUG && map == -1)
6745 map = dyn->d_un.d_val;
2268b414
JK
6746
6747 if (dyn->d_tag == DT_NULL)
6748 break;
6749 }
6750 else
6751 {
6752 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6753#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6754 union
6755 {
6756 Elf32_Word map;
6757 unsigned char buf[sizeof (Elf32_Word)];
6758 }
6759 rld_map;
a738da3a
MF
6760#endif
6761#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6762 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6763 {
6764 if (linux_read_memory (dyn->d_un.d_val,
6765 rld_map.buf, sizeof (rld_map.buf)) == 0)
6766 return rld_map.map;
6767 else
6768 break;
6769 }
75f62ce7 6770#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6771#ifdef DT_MIPS_RLD_MAP_REL
6772 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6773 {
6774 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6775 rld_map.buf, sizeof (rld_map.buf)) == 0)
6776 return rld_map.map;
6777 else
6778 break;
6779 }
6780#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6781
367ba2c2
MR
6782 if (dyn->d_tag == DT_DEBUG && map == -1)
6783 map = dyn->d_un.d_val;
2268b414
JK
6784
6785 if (dyn->d_tag == DT_NULL)
6786 break;
6787 }
6788
6789 dynamic_memaddr += dyn_size;
6790 }
6791
367ba2c2 6792 return map;
2268b414
JK
6793}
6794
6795/* Read one pointer from MEMADDR in the inferior. */
6796
6797static int
6798read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6799{
485f1ee4
PA
6800 int ret;
6801
6802 /* Go through a union so this works on either big or little endian
6803 hosts, when the inferior's pointer size is smaller than the size
6804 of CORE_ADDR. It is assumed the inferior's endianness is the
6805 same of the superior's. */
6806 union
6807 {
6808 CORE_ADDR core_addr;
6809 unsigned int ui;
6810 unsigned char uc;
6811 } addr;
6812
6813 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6814 if (ret == 0)
6815 {
6816 if (ptr_size == sizeof (CORE_ADDR))
6817 *ptr = addr.core_addr;
6818 else if (ptr_size == sizeof (unsigned int))
6819 *ptr = addr.ui;
6820 else
6821 gdb_assert_not_reached ("unhandled pointer size");
6822 }
6823 return ret;
2268b414
JK
6824}
6825
6826struct link_map_offsets
6827 {
6828 /* Offset and size of r_debug.r_version. */
6829 int r_version_offset;
6830
6831 /* Offset and size of r_debug.r_map. */
6832 int r_map_offset;
6833
6834 /* Offset to l_addr field in struct link_map. */
6835 int l_addr_offset;
6836
6837 /* Offset to l_name field in struct link_map. */
6838 int l_name_offset;
6839
6840 /* Offset to l_ld field in struct link_map. */
6841 int l_ld_offset;
6842
6843 /* Offset to l_next field in struct link_map. */
6844 int l_next_offset;
6845
6846 /* Offset to l_prev field in struct link_map. */
6847 int l_prev_offset;
6848 };
6849
fb723180 6850/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
6851
6852static int
6853linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6854 unsigned const char *writebuf,
6855 CORE_ADDR offset, int len)
6856{
6857 char *document;
6858 unsigned document_len;
fe978cb0 6859 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6860 char filename[PATH_MAX];
6861 int pid, is_elf64;
6862
6863 static const struct link_map_offsets lmo_32bit_offsets =
6864 {
6865 0, /* r_version offset. */
6866 4, /* r_debug.r_map offset. */
6867 0, /* l_addr offset in link_map. */
6868 4, /* l_name offset in link_map. */
6869 8, /* l_ld offset in link_map. */
6870 12, /* l_next offset in link_map. */
6871 16 /* l_prev offset in link_map. */
6872 };
6873
6874 static const struct link_map_offsets lmo_64bit_offsets =
6875 {
6876 0, /* r_version offset. */
6877 8, /* r_debug.r_map offset. */
6878 0, /* l_addr offset in link_map. */
6879 8, /* l_name offset in link_map. */
6880 16, /* l_ld offset in link_map. */
6881 24, /* l_next offset in link_map. */
6882 32 /* l_prev offset in link_map. */
6883 };
6884 const struct link_map_offsets *lmo;
214d508e 6885 unsigned int machine;
b1fbec62
GB
6886 int ptr_size;
6887 CORE_ADDR lm_addr = 0, lm_prev = 0;
6888 int allocated = 1024;
6889 char *p;
6890 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6891 int header_done = 0;
2268b414
JK
6892
6893 if (writebuf != NULL)
6894 return -2;
6895 if (readbuf == NULL)
6896 return -1;
6897
0bfdf32f 6898 pid = lwpid_of (current_thread);
2268b414 6899 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6900 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 6901 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 6902 ptr_size = is_elf64 ? 8 : 4;
2268b414 6903
b1fbec62
GB
6904 while (annex[0] != '\0')
6905 {
6906 const char *sep;
6907 CORE_ADDR *addrp;
6908 int len;
2268b414 6909
b1fbec62
GB
6910 sep = strchr (annex, '=');
6911 if (sep == NULL)
6912 break;
0c5bf5a9 6913
b1fbec62 6914 len = sep - annex;
61012eef 6915 if (len == 5 && startswith (annex, "start"))
b1fbec62 6916 addrp = &lm_addr;
61012eef 6917 else if (len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6918 addrp = &lm_prev;
6919 else
6920 {
6921 annex = strchr (sep, ';');
6922 if (annex == NULL)
6923 break;
6924 annex++;
6925 continue;
6926 }
6927
6928 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6929 }
b1fbec62
GB
6930
6931 if (lm_addr == 0)
2268b414 6932 {
b1fbec62
GB
6933 int r_version = 0;
6934
6935 if (priv->r_debug == 0)
6936 priv->r_debug = get_r_debug (pid, is_elf64);
6937
6938 /* We failed to find DT_DEBUG. Such situation will not change
6939 for this inferior - do not retry it. Report it to GDB as
6940 E01, see for the reasons at the GDB solib-svr4.c side. */
6941 if (priv->r_debug == (CORE_ADDR) -1)
6942 return -1;
6943
6944 if (priv->r_debug != 0)
2268b414 6945 {
b1fbec62
GB
6946 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6947 (unsigned char *) &r_version,
6948 sizeof (r_version)) != 0
6949 || r_version != 1)
6950 {
6951 warning ("unexpected r_debug version %d", r_version);
6952 }
6953 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6954 &lm_addr, ptr_size) != 0)
6955 {
6956 warning ("unable to read r_map from 0x%lx",
6957 (long) priv->r_debug + lmo->r_map_offset);
6958 }
2268b414 6959 }
b1fbec62 6960 }
2268b414 6961
224c3ddb 6962 document = (char *) xmalloc (allocated);
b1fbec62
GB
6963 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6964 p = document + strlen (document);
6965
6966 while (lm_addr
6967 && read_one_ptr (lm_addr + lmo->l_name_offset,
6968 &l_name, ptr_size) == 0
6969 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6970 &l_addr, ptr_size) == 0
6971 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6972 &l_ld, ptr_size) == 0
6973 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6974 &l_prev, ptr_size) == 0
6975 && read_one_ptr (lm_addr + lmo->l_next_offset,
6976 &l_next, ptr_size) == 0)
6977 {
6978 unsigned char libname[PATH_MAX];
6979
6980 if (lm_prev != l_prev)
2268b414 6981 {
b1fbec62
GB
6982 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6983 (long) lm_prev, (long) l_prev);
6984 break;
2268b414
JK
6985 }
6986
d878444c
JK
6987 /* Ignore the first entry even if it has valid name as the first entry
6988 corresponds to the main executable. The first entry should not be
6989 skipped if the dynamic loader was loaded late by a static executable
6990 (see solib-svr4.c parameter ignore_first). But in such case the main
6991 executable does not have PT_DYNAMIC present and this function already
6992 exited above due to failed get_r_debug. */
6993 if (lm_prev == 0)
2268b414 6994 {
d878444c
JK
6995 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6996 p = p + strlen (p);
6997 }
6998 else
6999 {
7000 /* Not checking for error because reading may stop before
7001 we've got PATH_MAX worth of characters. */
7002 libname[0] = '\0';
7003 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7004 libname[sizeof (libname) - 1] = '\0';
7005 if (libname[0] != '\0')
2268b414 7006 {
d878444c
JK
7007 /* 6x the size for xml_escape_text below. */
7008 size_t len = 6 * strlen ((char *) libname);
7009 char *name;
2268b414 7010
d878444c
JK
7011 if (!header_done)
7012 {
7013 /* Terminate `<library-list-svr4'. */
7014 *p++ = '>';
7015 header_done = 1;
7016 }
2268b414 7017
db1ff28b 7018 while (allocated < p - document + len + 200)
d878444c
JK
7019 {
7020 /* Expand to guarantee sufficient storage. */
7021 uintptr_t document_len = p - document;
2268b414 7022
224c3ddb 7023 document = (char *) xrealloc (document, 2 * allocated);
d878444c
JK
7024 allocated *= 2;
7025 p = document + document_len;
7026 }
7027
7028 name = xml_escape_text ((char *) libname);
7029 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
db1ff28b 7030 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
d878444c
JK
7031 name, (unsigned long) lm_addr,
7032 (unsigned long) l_addr, (unsigned long) l_ld);
7033 free (name);
7034 }
0afae3cf 7035 }
b1fbec62
GB
7036
7037 lm_prev = lm_addr;
7038 lm_addr = l_next;
2268b414
JK
7039 }
7040
b1fbec62
GB
7041 if (!header_done)
7042 {
7043 /* Empty list; terminate `<library-list-svr4'. */
7044 strcpy (p, "/>");
7045 }
7046 else
7047 strcpy (p, "</library-list-svr4>");
7048
2268b414
JK
7049 document_len = strlen (document);
7050 if (offset < document_len)
7051 document_len -= offset;
7052 else
7053 document_len = 0;
7054 if (len > document_len)
7055 len = document_len;
7056
7057 memcpy (readbuf, document + offset, len);
7058 xfree (document);
7059
7060 return len;
7061}
7062
9accd112
MM
7063#ifdef HAVE_LINUX_BTRACE
7064
969c39fb 7065/* See to_disable_btrace target method. */
9accd112 7066
969c39fb
MM
7067static int
7068linux_low_disable_btrace (struct btrace_target_info *tinfo)
7069{
7070 enum btrace_error err;
7071
7072 err = linux_disable_btrace (tinfo);
7073 return (err == BTRACE_ERR_NONE ? 0 : -1);
7074}
7075
bc504a31 7076/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
7077
7078static void
7079linux_low_encode_pt_config (struct buffer *buffer,
7080 const struct btrace_data_pt_config *config)
7081{
7082 buffer_grow_str (buffer, "<pt-config>\n");
7083
7084 switch (config->cpu.vendor)
7085 {
7086 case CV_INTEL:
7087 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7088 "model=\"%u\" stepping=\"%u\"/>\n",
7089 config->cpu.family, config->cpu.model,
7090 config->cpu.stepping);
7091 break;
7092
7093 default:
7094 break;
7095 }
7096
7097 buffer_grow_str (buffer, "</pt-config>\n");
7098}
7099
7100/* Encode a raw buffer. */
7101
7102static void
7103linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7104 unsigned int size)
7105{
7106 if (size == 0)
7107 return;
7108
7109 /* We use hex encoding - see common/rsp-low.h. */
7110 buffer_grow_str (buffer, "<raw>\n");
7111
7112 while (size-- > 0)
7113 {
7114 char elem[2];
7115
7116 elem[0] = tohex ((*data >> 4) & 0xf);
7117 elem[1] = tohex (*data++ & 0xf);
7118
7119 buffer_grow (buffer, elem, 2);
7120 }
7121
7122 buffer_grow_str (buffer, "</raw>\n");
7123}
7124
969c39fb
MM
7125/* See to_read_btrace target method. */
7126
7127static int
9accd112 7128linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
add67df8 7129 enum btrace_read_type type)
9accd112 7130{
734b0e4b 7131 struct btrace_data btrace;
9accd112 7132 struct btrace_block *block;
969c39fb 7133 enum btrace_error err;
9accd112
MM
7134 int i;
7135
734b0e4b
MM
7136 btrace_data_init (&btrace);
7137
969c39fb
MM
7138 err = linux_read_btrace (&btrace, tinfo, type);
7139 if (err != BTRACE_ERR_NONE)
7140 {
7141 if (err == BTRACE_ERR_OVERFLOW)
7142 buffer_grow_str0 (buffer, "E.Overflow.");
7143 else
7144 buffer_grow_str0 (buffer, "E.Generic Error.");
7145
b20a6524 7146 goto err;
969c39fb 7147 }
9accd112 7148
734b0e4b
MM
7149 switch (btrace.format)
7150 {
7151 case BTRACE_FORMAT_NONE:
7152 buffer_grow_str0 (buffer, "E.No Trace.");
b20a6524 7153 goto err;
734b0e4b
MM
7154
7155 case BTRACE_FORMAT_BTS:
7156 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7157 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 7158
734b0e4b
MM
7159 for (i = 0;
7160 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7161 i++)
7162 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7163 paddress (block->begin), paddress (block->end));
9accd112 7164
734b0e4b
MM
7165 buffer_grow_str0 (buffer, "</btrace>\n");
7166 break;
7167
b20a6524
MM
7168 case BTRACE_FORMAT_PT:
7169 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7170 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7171 buffer_grow_str (buffer, "<pt>\n");
7172
7173 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 7174
b20a6524
MM
7175 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7176 btrace.variant.pt.size);
7177
7178 buffer_grow_str (buffer, "</pt>\n");
7179 buffer_grow_str0 (buffer, "</btrace>\n");
7180 break;
7181
7182 default:
7183 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7184 goto err;
734b0e4b 7185 }
969c39fb 7186
734b0e4b 7187 btrace_data_fini (&btrace);
969c39fb 7188 return 0;
b20a6524
MM
7189
7190err:
7191 btrace_data_fini (&btrace);
7192 return -1;
9accd112 7193}
f4abbc16
MM
7194
7195/* See to_btrace_conf target method. */
7196
7197static int
7198linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7199 struct buffer *buffer)
7200{
7201 const struct btrace_config *conf;
7202
7203 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7204 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7205
7206 conf = linux_btrace_conf (tinfo);
7207 if (conf != NULL)
7208 {
7209 switch (conf->format)
7210 {
7211 case BTRACE_FORMAT_NONE:
7212 break;
7213
7214 case BTRACE_FORMAT_BTS:
d33501a5
MM
7215 buffer_xml_printf (buffer, "<bts");
7216 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7217 buffer_xml_printf (buffer, " />\n");
f4abbc16 7218 break;
b20a6524
MM
7219
7220 case BTRACE_FORMAT_PT:
7221 buffer_xml_printf (buffer, "<pt");
7222 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7223 buffer_xml_printf (buffer, "/>\n");
7224 break;
f4abbc16
MM
7225 }
7226 }
7227
7228 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7229 return 0;
7230}
9accd112
MM
7231#endif /* HAVE_LINUX_BTRACE */
7232
7b669087
GB
7233/* See nat/linux-nat.h. */
7234
7235ptid_t
7236current_lwp_ptid (void)
7237{
7238 return ptid_of (current_thread);
7239}
7240
dd373349
AT
7241/* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7242
7243static int
7244linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7245{
7246 if (the_low_target.breakpoint_kind_from_pc != NULL)
7247 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7248 else
1652a986 7249 return default_breakpoint_kind_from_pc (pcptr);
dd373349
AT
7250}
7251
7252/* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7253
7254static const gdb_byte *
7255linux_sw_breakpoint_from_kind (int kind, int *size)
7256{
7257 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7258
7259 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7260}
7261
769ef81f
AT
7262/* Implementation of the target_ops method
7263 "breakpoint_kind_from_current_state". */
7264
7265static int
7266linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7267{
7268 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7269 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7270 else
7271 return linux_breakpoint_kind_from_pc (pcptr);
7272}
7273
276d4552
YQ
7274/* Default implementation of linux_target_ops method "set_pc" for
7275 32-bit pc register which is literally named "pc". */
7276
7277void
7278linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7279{
7280 uint32_t newpc = pc;
7281
7282 supply_register_by_name (regcache, "pc", &newpc);
7283}
7284
7285/* Default implementation of linux_target_ops method "get_pc" for
7286 32-bit pc register which is literally named "pc". */
7287
7288CORE_ADDR
7289linux_get_pc_32bit (struct regcache *regcache)
7290{
7291 uint32_t pc;
7292
7293 collect_register_by_name (regcache, "pc", &pc);
7294 if (debug_threads)
7295 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7296 return pc;
7297}
7298
6f69e520
YQ
7299/* Default implementation of linux_target_ops method "set_pc" for
7300 64-bit pc register which is literally named "pc". */
7301
7302void
7303linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7304{
7305 uint64_t newpc = pc;
7306
7307 supply_register_by_name (regcache, "pc", &newpc);
7308}
7309
7310/* Default implementation of linux_target_ops method "get_pc" for
7311 64-bit pc register which is literally named "pc". */
7312
7313CORE_ADDR
7314linux_get_pc_64bit (struct regcache *regcache)
7315{
7316 uint64_t pc;
7317
7318 collect_register_by_name (regcache, "pc", &pc);
7319 if (debug_threads)
7320 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7321 return pc;
7322}
7323
7324
ce3a066d
DJ
7325static struct target_ops linux_target_ops = {
7326 linux_create_inferior,
ece66d65 7327 linux_post_create_inferior,
ce3a066d
DJ
7328 linux_attach,
7329 linux_kill,
6ad8ae5c 7330 linux_detach,
8336d594 7331 linux_mourn,
444d6139 7332 linux_join,
ce3a066d
DJ
7333 linux_thread_alive,
7334 linux_resume,
7335 linux_wait,
7336 linux_fetch_registers,
7337 linux_store_registers,
90d74c30 7338 linux_prepare_to_access_memory,
0146f85b 7339 linux_done_accessing_memory,
ce3a066d
DJ
7340 linux_read_memory,
7341 linux_write_memory,
2f2893d9 7342 linux_look_up_symbols,
ef57601b 7343 linux_request_interrupt,
aa691b87 7344 linux_read_auxv,
802e8e6d 7345 linux_supports_z_point_type,
d993e290
PA
7346 linux_insert_point,
7347 linux_remove_point,
3e572f71
PA
7348 linux_stopped_by_sw_breakpoint,
7349 linux_supports_stopped_by_sw_breakpoint,
7350 linux_stopped_by_hw_breakpoint,
7351 linux_supports_stopped_by_hw_breakpoint,
70b90b91 7352 linux_supports_hardware_single_step,
e013ee27
OF
7353 linux_stopped_by_watchpoint,
7354 linux_stopped_data_address,
db0dfaa0
LM
7355#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7356 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7357 && defined(PT_TEXT_END_ADDR)
52fb6437 7358 linux_read_offsets,
dae5f5cf
DJ
7359#else
7360 NULL,
7361#endif
7362#ifdef USE_THREAD_DB
7363 thread_db_get_tls_address,
7364#else
7365 NULL,
52fb6437 7366#endif
efcbbd14 7367 linux_qxfer_spu,
59a016f0 7368 hostio_last_error_from_errno,
07e059b5 7369 linux_qxfer_osdata,
4aa995e1 7370 linux_xfer_siginfo,
bd99dc85
PA
7371 linux_supports_non_stop,
7372 linux_async,
7373 linux_start_non_stop,
cdbfd419 7374 linux_supports_multi_process,
89245bc0
DB
7375 linux_supports_fork_events,
7376 linux_supports_vfork_events,
94585166 7377 linux_supports_exec_events,
de0d863e 7378 linux_handle_new_gdb_connection,
cdbfd419 7379#ifdef USE_THREAD_DB
dc146f7c 7380 thread_db_handle_monitor_command,
cdbfd419 7381#else
dc146f7c 7382 NULL,
cdbfd419 7383#endif
d26e3629 7384 linux_common_core_of_thread,
78d85199 7385 linux_read_loadmap,
219f2f23
PA
7386 linux_process_qsupported,
7387 linux_supports_tracepoints,
7388 linux_read_pc,
8336d594
PA
7389 linux_write_pc,
7390 linux_thread_stopped,
7984d532 7391 NULL,
711e434b 7392 linux_pause_all,
7984d532 7393 linux_unpause_all,
fa593d66 7394 linux_stabilize_threads,
6a271cae 7395 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
7396 linux_emit_ops,
7397 linux_supports_disable_randomization,
405f8e94 7398 linux_get_min_fast_tracepoint_insn_len,
2268b414 7399 linux_qxfer_libraries_svr4,
d1feda86 7400 linux_supports_agent,
9accd112
MM
7401#ifdef HAVE_LINUX_BTRACE
7402 linux_supports_btrace,
0568462b 7403 linux_enable_btrace,
969c39fb 7404 linux_low_disable_btrace,
9accd112 7405 linux_low_read_btrace,
f4abbc16 7406 linux_low_btrace_conf,
9accd112
MM
7407#else
7408 NULL,
7409 NULL,
7410 NULL,
7411 NULL,
f4abbc16 7412 NULL,
9accd112 7413#endif
c2d6af84 7414 linux_supports_range_stepping,
e57f1de3 7415 linux_proc_pid_to_exec_file,
14d2069a
GB
7416 linux_mntns_open_cloexec,
7417 linux_mntns_unlink,
7418 linux_mntns_readlink,
dd373349 7419 linux_breakpoint_kind_from_pc,
79efa585
SM
7420 linux_sw_breakpoint_from_kind,
7421 linux_proc_tid_get_name,
7d00775e 7422 linux_breakpoint_kind_from_current_state,
82075af2
JS
7423 linux_supports_software_single_step,
7424 linux_supports_catch_syscall,
ae91f625 7425 linux_get_ipa_tdesc_idx,
ce3a066d
DJ
7426};
7427
3aee8918
PA
7428#ifdef HAVE_LINUX_REGSETS
7429void
7430initialize_regsets_info (struct regsets_info *info)
7431{
7432 for (info->num_regsets = 0;
7433 info->regsets[info->num_regsets].size >= 0;
7434 info->num_regsets++)
7435 ;
3aee8918
PA
7436}
7437#endif
7438
da6d8c04
DJ
7439void
7440initialize_low (void)
7441{
bd99dc85 7442 struct sigaction sigchld_action;
dd373349 7443
bd99dc85 7444 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 7445 set_target_ops (&linux_target_ops);
dd373349 7446
aa7c7447 7447 linux_ptrace_init_warnings ();
bd99dc85
PA
7448
7449 sigchld_action.sa_handler = sigchld_handler;
7450 sigemptyset (&sigchld_action.sa_mask);
7451 sigchld_action.sa_flags = SA_RESTART;
7452 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7453
7454 initialize_low_arch ();
89245bc0
DB
7455
7456 linux_check_ptrace_features ();
da6d8c04 7457}