]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-low.c
Create sub classes of 'struct breakpoint'
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
618f726f 2 Copyright (C) 1995-2016 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
58b4daa5 22#include "agent.h"
de0d863e 23#include "tdesc.h"
b20a6524 24#include "rsp-low.h"
da6d8c04 25
96d7229d
LM
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
8bdce1ff 28#include "gdb_wait.h"
5826e159 29#include "nat/gdb_ptrace.h"
125f8a3d
GB
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
8cc73a39 32#include "nat/linux-personality.h"
da6d8c04
DJ
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
0a30fbc4 36#include <unistd.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
602e3198 46#include "filestuff.h"
c144c7a0 47#include "tracepoint.h"
533b0600 48#include "hostio.h"
276d4552 49#include <inttypes.h>
957f3f49
DE
50#ifndef ELFMAG0
51/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55#include <elf.h>
56#endif
14d2069a 57#include "nat/linux-namespaces.h"
efcbbd14
UW
58
59#ifndef SPUFS_MAGIC
60#define SPUFS_MAGIC 0x23c9b64e
61#endif
da6d8c04 62
03583c20
UW
63#ifdef HAVE_PERSONALITY
64# include <sys/personality.h>
65# if !HAVE_DECL_ADDR_NO_RANDOMIZE
66# define ADDR_NO_RANDOMIZE 0x0040000
67# endif
68#endif
69
fd462a61
DJ
70#ifndef O_LARGEFILE
71#define O_LARGEFILE 0
72#endif
1a981360 73
db0dfaa0
LM
74/* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77#if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80#if defined(__mcoldfire__)
81/* These are still undefined in 3.10 kernels. */
82#define PT_TEXT_ADDR 49*4
83#define PT_DATA_ADDR 50*4
84#define PT_TEXT_END_ADDR 51*4
85/* BFIN already defines these since at least 2.6.32 kernels. */
86#elif defined(BFIN)
87#define PT_TEXT_ADDR 220
88#define PT_TEXT_END_ADDR 224
89#define PT_DATA_ADDR 228
90/* These are still undefined in 3.10 kernels. */
91#elif defined(__TMS320C6X__)
92#define PT_TEXT_ADDR (0x10000*4)
93#define PT_DATA_ADDR (0x10004*4)
94#define PT_TEXT_END_ADDR (0x10008*4)
95#endif
96#endif
97
9accd112 98#ifdef HAVE_LINUX_BTRACE
125f8a3d 99# include "nat/linux-btrace.h"
734b0e4b 100# include "btrace-common.h"
9accd112
MM
101#endif
102
8365dcf5
TJB
103#ifndef HAVE_ELF32_AUXV_T
104/* Copied from glibc's elf.h. */
105typedef struct
106{
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115} Elf32_auxv_t;
116#endif
117
118#ifndef HAVE_ELF64_AUXV_T
119/* Copied from glibc's elf.h. */
120typedef struct
121{
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130} Elf64_auxv_t;
131#endif
132
ded48a5e
YQ
133/* Does the current host support PTRACE_GETREGSET? */
134int have_ptrace_getregset = -1;
135
cff068da
GB
136/* LWP accessors. */
137
138/* See nat/linux-nat.h. */
139
140ptid_t
141ptid_of_lwp (struct lwp_info *lwp)
142{
143 return ptid_of (get_lwp_thread (lwp));
144}
145
146/* See nat/linux-nat.h. */
147
4b134ca1
GB
148void
149lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151{
152 lwp->arch_private = info;
153}
154
155/* See nat/linux-nat.h. */
156
157struct arch_lwp_info *
158lwp_arch_private_info (struct lwp_info *lwp)
159{
160 return lwp->arch_private;
161}
162
163/* See nat/linux-nat.h. */
164
cff068da
GB
165int
166lwp_is_stopped (struct lwp_info *lwp)
167{
168 return lwp->stopped;
169}
170
171/* See nat/linux-nat.h. */
172
173enum target_stop_reason
174lwp_stop_reason (struct lwp_info *lwp)
175{
176 return lwp->stop_reason;
177}
178
05044653
PA
179/* A list of all unknown processes which receive stop signals. Some
180 other process will presumably claim each of these as forked
181 children momentarily. */
24a09b5f 182
05044653
PA
183struct simple_pid_list
184{
185 /* The process ID. */
186 int pid;
187
188 /* The status as reported by waitpid. */
189 int status;
190
191 /* Next in chain. */
192 struct simple_pid_list *next;
193};
194struct simple_pid_list *stopped_pids;
195
196/* Trivial list manipulation functions to keep track of a list of new
197 stopped processes. */
198
199static void
200add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
201{
8d749320 202 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
203
204 new_pid->pid = pid;
205 new_pid->status = status;
206 new_pid->next = *listp;
207 *listp = new_pid;
208}
209
210static int
211pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
212{
213 struct simple_pid_list **p;
214
215 for (p = listp; *p != NULL; p = &(*p)->next)
216 if ((*p)->pid == pid)
217 {
218 struct simple_pid_list *next = (*p)->next;
219
220 *statusp = (*p)->status;
221 xfree (*p);
222 *p = next;
223 return 1;
224 }
225 return 0;
226}
24a09b5f 227
bde24c0a
PA
228enum stopping_threads_kind
229 {
230 /* Not stopping threads presently. */
231 NOT_STOPPING_THREADS,
232
233 /* Stopping threads. */
234 STOPPING_THREADS,
235
236 /* Stopping and suspending threads. */
237 STOPPING_AND_SUSPENDING_THREADS
238 };
239
240/* This is set while stop_all_lwps is in effect. */
241enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
242
243/* FIXME make into a target method? */
24a09b5f 244int using_threads = 1;
24a09b5f 245
fa593d66
PA
246/* True if we're presently stabilizing threads (moving them out of
247 jump pads). */
248static int stabilizing_threads;
249
2acc282a 250static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 251 int step, int signal, siginfo_t *info);
2bd7c093 252static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
253static void stop_all_lwps (int suspend, struct lwp_info *except);
254static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
f50bf8e5 255static void unsuspend_all_lwps (struct lwp_info *except);
fa96cb38
PA
256static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
257 int *wstat, int options);
95954743 258static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
b3312d80 259static struct lwp_info *add_lwp (ptid_t ptid);
94585166 260static void linux_mourn (struct process_info *process);
c35fafde 261static int linux_stopped_by_watchpoint (void);
95954743 262static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 263static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 264static void proceed_all_lwps (void);
d50171e4 265static int finish_step_over (struct lwp_info *lwp);
d50171e4 266static int kill_lwp (unsigned long lwpid, int signo);
863d01bd
PA
267static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
268static void complete_ongoing_step_over (void);
ece66d65 269static int linux_low_ptrace_options (int attached);
ced2dffb 270static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
d50171e4 271
582511be
PA
272/* When the event-loop is doing a step-over, this points at the thread
273 being stepped. */
274ptid_t step_over_bkpt;
275
7d00775e 276/* True if the low target can hardware single-step. */
d50171e4
PA
277
278static int
279can_hardware_single_step (void)
280{
7d00775e
AT
281 if (the_low_target.supports_hardware_single_step != NULL)
282 return the_low_target.supports_hardware_single_step ();
283 else
284 return 0;
285}
286
287/* True if the low target can software single-step. Such targets
fa5308bd 288 implement the GET_NEXT_PCS callback. */
7d00775e
AT
289
290static int
291can_software_single_step (void)
292{
fa5308bd 293 return (the_low_target.get_next_pcs != NULL);
d50171e4
PA
294}
295
296/* True if the low target supports memory breakpoints. If so, we'll
297 have a GET_PC implementation. */
298
299static int
300supports_breakpoints (void)
301{
302 return (the_low_target.get_pc != NULL);
303}
0d62e5e8 304
fa593d66
PA
305/* Returns true if this target can support fast tracepoints. This
306 does not mean that the in-process agent has been loaded in the
307 inferior. */
308
309static int
310supports_fast_tracepoints (void)
311{
312 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
313}
314
c2d6af84
PA
315/* True if LWP is stopped in its stepping range. */
316
317static int
318lwp_in_step_range (struct lwp_info *lwp)
319{
320 CORE_ADDR pc = lwp->stop_pc;
321
322 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
323}
324
0d62e5e8
DJ
325struct pending_signals
326{
327 int signal;
32ca6d61 328 siginfo_t info;
0d62e5e8
DJ
329 struct pending_signals *prev;
330};
611cb4a5 331
bd99dc85
PA
332/* The read/write ends of the pipe registered as waitable file in the
333 event loop. */
334static int linux_event_pipe[2] = { -1, -1 };
335
336/* True if we're currently in async mode. */
337#define target_is_async_p() (linux_event_pipe[0] != -1)
338
02fc4de7 339static void send_sigstop (struct lwp_info *lwp);
fa96cb38 340static void wait_for_sigstop (void);
bd99dc85 341
d0722149
DE
342/* Return non-zero if HEADER is a 64-bit ELF file. */
343
344static int
214d508e 345elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 346{
214d508e
L
347 if (header->e_ident[EI_MAG0] == ELFMAG0
348 && header->e_ident[EI_MAG1] == ELFMAG1
349 && header->e_ident[EI_MAG2] == ELFMAG2
350 && header->e_ident[EI_MAG3] == ELFMAG3)
351 {
352 *machine = header->e_machine;
353 return header->e_ident[EI_CLASS] == ELFCLASS64;
354
355 }
356 *machine = EM_NONE;
357 return -1;
d0722149
DE
358}
359
360/* Return non-zero if FILE is a 64-bit ELF file,
361 zero if the file is not a 64-bit ELF file,
362 and -1 if the file is not accessible or doesn't exist. */
363
be07f1a2 364static int
214d508e 365elf_64_file_p (const char *file, unsigned int *machine)
d0722149 366{
957f3f49 367 Elf64_Ehdr header;
d0722149
DE
368 int fd;
369
370 fd = open (file, O_RDONLY);
371 if (fd < 0)
372 return -1;
373
374 if (read (fd, &header, sizeof (header)) != sizeof (header))
375 {
376 close (fd);
377 return 0;
378 }
379 close (fd);
380
214d508e 381 return elf_64_header_p (&header, machine);
d0722149
DE
382}
383
be07f1a2
PA
384/* Accepts an integer PID; Returns true if the executable PID is
385 running is a 64-bit ELF file.. */
386
387int
214d508e 388linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 389{
d8d2a3ee 390 char file[PATH_MAX];
be07f1a2
PA
391
392 sprintf (file, "/proc/%d/exe", pid);
214d508e 393 return elf_64_file_p (file, machine);
be07f1a2
PA
394}
395
bd99dc85
PA
396static void
397delete_lwp (struct lwp_info *lwp)
398{
fa96cb38
PA
399 struct thread_info *thr = get_lwp_thread (lwp);
400
401 if (debug_threads)
402 debug_printf ("deleting %ld\n", lwpid_of (thr));
403
404 remove_thread (thr);
aa5ca48f 405 free (lwp->arch_private);
bd99dc85
PA
406 free (lwp);
407}
408
95954743
PA
409/* Add a process to the common process list, and set its private
410 data. */
411
412static struct process_info *
413linux_add_process (int pid, int attached)
414{
415 struct process_info *proc;
416
95954743 417 proc = add_process (pid, attached);
8d749320 418 proc->priv = XCNEW (struct process_info_private);
95954743 419
aa5ca48f 420 if (the_low_target.new_process != NULL)
fe978cb0 421 proc->priv->arch_private = the_low_target.new_process ();
aa5ca48f 422
95954743
PA
423 return proc;
424}
425
582511be
PA
426static CORE_ADDR get_pc (struct lwp_info *lwp);
427
ece66d65 428/* Call the target arch_setup function on the current thread. */
94585166
DB
429
430static void
431linux_arch_setup (void)
432{
433 the_low_target.arch_setup ();
434}
435
436/* Call the target arch_setup function on THREAD. */
437
438static void
439linux_arch_setup_thread (struct thread_info *thread)
440{
441 struct thread_info *saved_thread;
442
443 saved_thread = current_thread;
444 current_thread = thread;
445
446 linux_arch_setup ();
447
448 current_thread = saved_thread;
449}
450
451/* Handle a GNU/Linux extended wait response. If we see a clone,
452 fork, or vfork event, we need to add the new LWP to our list
453 (and return 0 so as not to report the trap to higher layers).
454 If we see an exec event, we will modify ORIG_EVENT_LWP to point
455 to a new LWP representing the new program. */
0d62e5e8 456
de0d863e 457static int
94585166 458handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
24a09b5f 459{
94585166 460 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 461 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 462 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 463 struct lwp_info *new_lwp;
24a09b5f 464
65706a29
PA
465 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
466
82075af2
JS
467 /* All extended events we currently use are mid-syscall. Only
468 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
469 you have to be using PTRACE_SEIZE to get that. */
470 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
471
c269dbdb
DB
472 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
473 || (event == PTRACE_EVENT_CLONE))
24a09b5f 474 {
95954743 475 ptid_t ptid;
24a09b5f 476 unsigned long new_pid;
05044653 477 int ret, status;
24a09b5f 478
de0d863e 479 /* Get the pid of the new lwp. */
d86d4aaf 480 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 481 &new_pid);
24a09b5f
DJ
482
483 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 484 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
485 {
486 /* The new child has a pending SIGSTOP. We can't affect it until it
487 hits the SIGSTOP, but we're already attached. */
488
97438e3f 489 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
490
491 if (ret == -1)
492 perror_with_name ("waiting for new child");
493 else if (ret != new_pid)
494 warning ("wait returned unexpected PID %d", ret);
da5898ce 495 else if (!WIFSTOPPED (status))
24a09b5f
DJ
496 warning ("wait returned unexpected status 0x%x", status);
497 }
498
c269dbdb 499 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
500 {
501 struct process_info *parent_proc;
502 struct process_info *child_proc;
503 struct lwp_info *child_lwp;
bfacd19d 504 struct thread_info *child_thr;
de0d863e
DB
505 struct target_desc *tdesc;
506
507 ptid = ptid_build (new_pid, new_pid, 0);
508
509 if (debug_threads)
510 {
511 debug_printf ("HEW: Got fork event from LWP %ld, "
512 "new child is %d\n",
513 ptid_get_lwp (ptid_of (event_thr)),
514 ptid_get_pid (ptid));
515 }
516
517 /* Add the new process to the tables and clone the breakpoint
518 lists of the parent. We need to do this even if the new process
519 will be detached, since we will need the process object and the
520 breakpoints to remove any breakpoints from memory when we
521 detach, and the client side will access registers. */
522 child_proc = linux_add_process (new_pid, 0);
523 gdb_assert (child_proc != NULL);
524 child_lwp = add_lwp (ptid);
525 gdb_assert (child_lwp != NULL);
526 child_lwp->stopped = 1;
bfacd19d
DB
527 child_lwp->must_set_ptrace_flags = 1;
528 child_lwp->status_pending_p = 0;
529 child_thr = get_lwp_thread (child_lwp);
530 child_thr->last_resume_kind = resume_stop;
998d452a
PA
531 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
532
863d01bd 533 /* If we're suspending all threads, leave this one suspended
0f8288ae
YQ
534 too. If the fork/clone parent is stepping over a breakpoint,
535 all other threads have been suspended already. Leave the
536 child suspended too. */
537 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
538 || event_lwp->bp_reinsert != 0)
863d01bd
PA
539 {
540 if (debug_threads)
541 debug_printf ("HEW: leaving child suspended\n");
542 child_lwp->suspended = 1;
543 }
544
de0d863e
DB
545 parent_proc = get_thread_process (event_thr);
546 child_proc->attached = parent_proc->attached;
2e7b624b
YQ
547
548 if (event_lwp->bp_reinsert != 0
549 && can_software_single_step ()
550 && event == PTRACE_EVENT_VFORK)
551 {
552 struct thread_info *saved_thread = current_thread;
553
554 current_thread = event_thr;
555 /* If we leave reinsert breakpoints there, child will
556 hit it, so uninsert reinsert breakpoints from parent
557 (and child). Once vfork child is done, reinsert
558 them back to parent. */
559 uninsert_reinsert_breakpoints ();
560 current_thread = saved_thread;
561 }
562
de0d863e
DB
563 clone_all_breakpoints (&child_proc->breakpoints,
564 &child_proc->raw_breakpoints,
565 parent_proc->breakpoints);
566
8d749320 567 tdesc = XNEW (struct target_desc);
de0d863e
DB
568 copy_target_description (tdesc, parent_proc->tdesc);
569 child_proc->tdesc = tdesc;
de0d863e 570
3a8a0396
DB
571 /* Clone arch-specific process data. */
572 if (the_low_target.new_fork != NULL)
573 the_low_target.new_fork (parent_proc, child_proc);
574
de0d863e 575 /* Save fork info in the parent thread. */
c269dbdb
DB
576 if (event == PTRACE_EVENT_FORK)
577 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
578 else if (event == PTRACE_EVENT_VFORK)
579 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
580
de0d863e 581 event_lwp->waitstatus.value.related_pid = ptid;
c269dbdb 582
de0d863e
DB
583 /* The status_pending field contains bits denoting the
584 extended event, so when the pending event is handled,
585 the handler will look at lwp->waitstatus. */
586 event_lwp->status_pending_p = 1;
587 event_lwp->status_pending = wstat;
588
8a81c5d7 589 /* If the parent thread is doing step-over with reinsert
2e7b624b
YQ
590 breakpoints, the list of reinsert breakpoints are cloned
591 from the parent's. Remove them from the child process.
592 In case of vfork, we'll reinsert them back once vforked
593 child is done. */
8a81c5d7 594 if (event_lwp->bp_reinsert != 0
2e7b624b 595 && can_software_single_step ())
8a81c5d7
YQ
596 {
597 struct thread_info *saved_thread = current_thread;
598
599 /* The child process is forked and stopped, so it is safe
600 to access its memory without stopping all other threads
601 from other processes. */
602 current_thread = child_thr;
603 delete_reinsert_breakpoints ();
604 current_thread = saved_thread;
605
606 gdb_assert (has_reinsert_breakpoints (parent_proc));
607 gdb_assert (!has_reinsert_breakpoints (child_proc));
608 }
609
de0d863e
DB
610 /* Report the event. */
611 return 0;
612 }
613
fa96cb38
PA
614 if (debug_threads)
615 debug_printf ("HEW: Got clone event "
616 "from LWP %ld, new child is LWP %ld\n",
617 lwpid_of (event_thr), new_pid);
618
d86d4aaf 619 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
b3312d80 620 new_lwp = add_lwp (ptid);
24a09b5f 621
e27d73f6
DE
622 /* Either we're going to immediately resume the new thread
623 or leave it stopped. linux_resume_one_lwp is a nop if it
624 thinks the thread is currently running, so set this first
625 before calling linux_resume_one_lwp. */
626 new_lwp->stopped = 1;
627
0f8288ae
YQ
628 /* If we're suspending all threads, leave this one suspended
629 too. If the fork/clone parent is stepping over a breakpoint,
630 all other threads have been suspended already. Leave the
631 child suspended too. */
632 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
633 || event_lwp->bp_reinsert != 0)
bde24c0a
PA
634 new_lwp->suspended = 1;
635
da5898ce
DJ
636 /* Normally we will get the pending SIGSTOP. But in some cases
637 we might get another signal delivered to the group first.
f21cc1a2 638 If we do get another signal, be sure not to lose it. */
20ba1ce6 639 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 640 {
54a0b537 641 new_lwp->stop_expected = 1;
20ba1ce6
PA
642 new_lwp->status_pending_p = 1;
643 new_lwp->status_pending = status;
da5898ce 644 }
65706a29
PA
645 else if (report_thread_events)
646 {
647 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
648 new_lwp->status_pending_p = 1;
649 new_lwp->status_pending = status;
650 }
de0d863e
DB
651
652 /* Don't report the event. */
653 return 1;
24a09b5f 654 }
c269dbdb
DB
655 else if (event == PTRACE_EVENT_VFORK_DONE)
656 {
657 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
658
2e7b624b
YQ
659 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
660 {
661 struct thread_info *saved_thread = current_thread;
662 struct process_info *proc = get_thread_process (event_thr);
663
664 current_thread = event_thr;
665 reinsert_reinsert_breakpoints ();
666 current_thread = saved_thread;
667
668 gdb_assert (has_reinsert_breakpoints (proc));
669 }
670
c269dbdb
DB
671 /* Report the event. */
672 return 0;
673 }
94585166
DB
674 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
675 {
676 struct process_info *proc;
82075af2 677 VEC (int) *syscalls_to_catch;
94585166
DB
678 ptid_t event_ptid;
679 pid_t event_pid;
680
681 if (debug_threads)
682 {
683 debug_printf ("HEW: Got exec event from LWP %ld\n",
684 lwpid_of (event_thr));
685 }
686
687 /* Get the event ptid. */
688 event_ptid = ptid_of (event_thr);
689 event_pid = ptid_get_pid (event_ptid);
690
82075af2 691 /* Save the syscall list from the execing process. */
94585166 692 proc = get_thread_process (event_thr);
82075af2
JS
693 syscalls_to_catch = proc->syscalls_to_catch;
694 proc->syscalls_to_catch = NULL;
695
696 /* Delete the execing process and all its threads. */
94585166
DB
697 linux_mourn (proc);
698 current_thread = NULL;
699
700 /* Create a new process/lwp/thread. */
701 proc = linux_add_process (event_pid, 0);
702 event_lwp = add_lwp (event_ptid);
703 event_thr = get_lwp_thread (event_lwp);
704 gdb_assert (current_thread == event_thr);
705 linux_arch_setup_thread (event_thr);
706
707 /* Set the event status. */
708 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
709 event_lwp->waitstatus.value.execd_pathname
710 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
711
712 /* Mark the exec status as pending. */
713 event_lwp->stopped = 1;
714 event_lwp->status_pending_p = 1;
715 event_lwp->status_pending = wstat;
716 event_thr->last_resume_kind = resume_continue;
717 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
718
82075af2
JS
719 /* Update syscall state in the new lwp, effectively mid-syscall too. */
720 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
721
722 /* Restore the list to catch. Don't rely on the client, which is free
723 to avoid sending a new list when the architecture doesn't change.
724 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
725 proc->syscalls_to_catch = syscalls_to_catch;
726
94585166
DB
727 /* Report the event. */
728 *orig_event_lwp = event_lwp;
729 return 0;
730 }
de0d863e
DB
731
732 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
733}
734
d50171e4
PA
735/* Return the PC as read from the regcache of LWP, without any
736 adjustment. */
737
738static CORE_ADDR
739get_pc (struct lwp_info *lwp)
740{
0bfdf32f 741 struct thread_info *saved_thread;
d50171e4
PA
742 struct regcache *regcache;
743 CORE_ADDR pc;
744
745 if (the_low_target.get_pc == NULL)
746 return 0;
747
0bfdf32f
GB
748 saved_thread = current_thread;
749 current_thread = get_lwp_thread (lwp);
d50171e4 750
0bfdf32f 751 regcache = get_thread_regcache (current_thread, 1);
d50171e4
PA
752 pc = (*the_low_target.get_pc) (regcache);
753
754 if (debug_threads)
87ce2a04 755 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 756
0bfdf32f 757 current_thread = saved_thread;
d50171e4
PA
758 return pc;
759}
760
82075af2 761/* This function should only be called if LWP got a SYSCALL_SIGTRAP.
4cc32bec 762 Fill *SYSNO with the syscall nr trapped. */
82075af2
JS
763
764static void
4cc32bec 765get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
82075af2
JS
766{
767 struct thread_info *saved_thread;
768 struct regcache *regcache;
769
770 if (the_low_target.get_syscall_trapinfo == NULL)
771 {
772 /* If we cannot get the syscall trapinfo, report an unknown
4cc32bec 773 system call number. */
82075af2 774 *sysno = UNKNOWN_SYSCALL;
82075af2
JS
775 return;
776 }
777
778 saved_thread = current_thread;
779 current_thread = get_lwp_thread (lwp);
780
781 regcache = get_thread_regcache (current_thread, 1);
4cc32bec 782 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
82075af2
JS
783
784 if (debug_threads)
4cc32bec 785 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
82075af2
JS
786
787 current_thread = saved_thread;
788}
789
e7ad2f14 790static int check_stopped_by_watchpoint (struct lwp_info *child);
0d62e5e8 791
e7ad2f14
PA
792/* Called when the LWP stopped for a signal/trap. If it stopped for a
793 trap check what caused it (breakpoint, watchpoint, trace, etc.),
794 and save the result in the LWP's stop_reason field. If it stopped
795 for a breakpoint, decrement the PC if necessary on the lwp's
796 architecture. Returns true if we now have the LWP's stop PC. */
0d62e5e8 797
582511be 798static int
e7ad2f14 799save_stop_reason (struct lwp_info *lwp)
0d62e5e8 800{
582511be
PA
801 CORE_ADDR pc;
802 CORE_ADDR sw_breakpoint_pc;
803 struct thread_info *saved_thread;
3e572f71
PA
804#if USE_SIGTRAP_SIGINFO
805 siginfo_t siginfo;
806#endif
d50171e4
PA
807
808 if (the_low_target.get_pc == NULL)
809 return 0;
0d62e5e8 810
582511be
PA
811 pc = get_pc (lwp);
812 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
d50171e4 813
582511be
PA
814 /* breakpoint_at reads from the current thread. */
815 saved_thread = current_thread;
816 current_thread = get_lwp_thread (lwp);
47c0c975 817
3e572f71
PA
818#if USE_SIGTRAP_SIGINFO
819 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
820 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
821 {
822 if (siginfo.si_signo == SIGTRAP)
823 {
e7ad2f14
PA
824 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
825 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 826 {
e7ad2f14
PA
827 /* The si_code is ambiguous on this arch -- check debug
828 registers. */
829 if (!check_stopped_by_watchpoint (lwp))
830 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
831 }
832 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
833 {
834 /* If we determine the LWP stopped for a SW breakpoint,
835 trust it. Particularly don't check watchpoint
836 registers, because at least on s390, we'd find
837 stopped-by-watchpoint as long as there's a watchpoint
838 set. */
3e572f71 839 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 840 }
e7ad2f14 841 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 842 {
e7ad2f14
PA
843 /* This can indicate either a hardware breakpoint or
844 hardware watchpoint. Check debug registers. */
845 if (!check_stopped_by_watchpoint (lwp))
846 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 847 }
2bf6fb9d
PA
848 else if (siginfo.si_code == TRAP_TRACE)
849 {
e7ad2f14
PA
850 /* We may have single stepped an instruction that
851 triggered a watchpoint. In that case, on some
852 architectures (such as x86), instead of TRAP_HWBKPT,
853 si_code indicates TRAP_TRACE, and we need to check
854 the debug registers separately. */
855 if (!check_stopped_by_watchpoint (lwp))
856 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 857 }
3e572f71
PA
858 }
859 }
860#else
582511be
PA
861 /* We may have just stepped a breakpoint instruction. E.g., in
862 non-stop mode, GDB first tells the thread A to step a range, and
863 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
864 case we need to report the breakpoint PC. */
865 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
582511be 866 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
e7ad2f14
PA
867 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
868
869 if (hardware_breakpoint_inserted_here (pc))
870 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
871
872 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
873 check_stopped_by_watchpoint (lwp);
874#endif
875
876 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be
PA
877 {
878 if (debug_threads)
879 {
880 struct thread_info *thr = get_lwp_thread (lwp);
881
882 debug_printf ("CSBB: %s stopped by software breakpoint\n",
883 target_pid_to_str (ptid_of (thr)));
884 }
885
886 /* Back up the PC if necessary. */
887 if (pc != sw_breakpoint_pc)
e7ad2f14 888 {
582511be
PA
889 struct regcache *regcache
890 = get_thread_regcache (current_thread, 1);
891 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
892 }
893
e7ad2f14
PA
894 /* Update this so we record the correct stop PC below. */
895 pc = sw_breakpoint_pc;
582511be 896 }
e7ad2f14 897 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
582511be
PA
898 {
899 if (debug_threads)
900 {
901 struct thread_info *thr = get_lwp_thread (lwp);
902
903 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
904 target_pid_to_str (ptid_of (thr)));
905 }
e7ad2f14
PA
906 }
907 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
908 {
909 if (debug_threads)
910 {
911 struct thread_info *thr = get_lwp_thread (lwp);
47c0c975 912
e7ad2f14
PA
913 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
914 target_pid_to_str (ptid_of (thr)));
915 }
582511be 916 }
e7ad2f14
PA
917 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
918 {
919 if (debug_threads)
920 {
921 struct thread_info *thr = get_lwp_thread (lwp);
582511be 922
e7ad2f14
PA
923 debug_printf ("CSBB: %s stopped by trace\n",
924 target_pid_to_str (ptid_of (thr)));
925 }
926 }
927
928 lwp->stop_pc = pc;
582511be 929 current_thread = saved_thread;
e7ad2f14 930 return 1;
0d62e5e8 931}
ce3a066d 932
b3312d80 933static struct lwp_info *
95954743 934add_lwp (ptid_t ptid)
611cb4a5 935{
54a0b537 936 struct lwp_info *lwp;
0d62e5e8 937
8d749320 938 lwp = XCNEW (struct lwp_info);
00db26fa
PA
939
940 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
0d62e5e8 941
aa5ca48f 942 if (the_low_target.new_thread != NULL)
34c703da 943 the_low_target.new_thread (lwp);
aa5ca48f 944
f7667f0d 945 lwp->thread = add_thread (ptid, lwp);
0d62e5e8 946
54a0b537 947 return lwp;
0d62e5e8 948}
611cb4a5 949
da6d8c04
DJ
950/* Start an inferior process and returns its pid.
951 ALLARGS is a vector of program-name and args. */
952
ce3a066d
DJ
953static int
954linux_create_inferior (char *program, char **allargs)
da6d8c04 955{
a6dbe5df 956 struct lwp_info *new_lwp;
da6d8c04 957 int pid;
95954743 958 ptid_t ptid;
8cc73a39
SDJ
959 struct cleanup *restore_personality
960 = maybe_disable_address_space_randomization (disable_randomization);
03583c20 961
42c81e2a 962#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
963 pid = vfork ();
964#else
da6d8c04 965 pid = fork ();
52fb6437 966#endif
da6d8c04
DJ
967 if (pid < 0)
968 perror_with_name ("fork");
969
970 if (pid == 0)
971 {
602e3198 972 close_most_fds ();
b8e1b30e 973 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da6d8c04 974
a9fa9f7d
DJ
975 setpgid (0, 0);
976
e0f9f062
DE
977 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
978 stdout to stderr so that inferior i/o doesn't corrupt the connection.
979 Also, redirect stdin to /dev/null. */
980 if (remote_connection_is_stdio ())
981 {
982 close (0);
983 open ("/dev/null", O_RDONLY);
984 dup2 (2, 1);
3e52c33d
JK
985 if (write (2, "stdin/stdout redirected\n",
986 sizeof ("stdin/stdout redirected\n") - 1) < 0)
8c29b58e
YQ
987 {
988 /* Errors ignored. */;
989 }
e0f9f062
DE
990 }
991
2b876972
DJ
992 execv (program, allargs);
993 if (errno == ENOENT)
994 execvp (program, allargs);
da6d8c04
DJ
995
996 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 997 strerror (errno));
da6d8c04
DJ
998 fflush (stderr);
999 _exit (0177);
1000 }
1001
8cc73a39 1002 do_cleanups (restore_personality);
03583c20 1003
55d7b841 1004 linux_add_process (pid, 0);
95954743
PA
1005
1006 ptid = ptid_build (pid, pid, 0);
1007 new_lwp = add_lwp (ptid);
a6dbe5df 1008 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 1009
a9fa9f7d 1010 return pid;
da6d8c04
DJ
1011}
1012
ece66d65
JS
1013/* Implement the post_create_inferior target_ops method. */
1014
1015static void
1016linux_post_create_inferior (void)
1017{
1018 struct lwp_info *lwp = get_thread_lwp (current_thread);
1019
1020 linux_arch_setup ();
1021
1022 if (lwp->must_set_ptrace_flags)
1023 {
1024 struct process_info *proc = current_process ();
1025 int options = linux_low_ptrace_options (proc->attached);
1026
1027 linux_enable_event_reporting (lwpid_of (current_thread), options);
1028 lwp->must_set_ptrace_flags = 0;
1029 }
1030}
1031
8784d563
PA
1032/* Attach to an inferior process. Returns 0 on success, ERRNO on
1033 error. */
da6d8c04 1034
7ae1a6a6
PA
1035int
1036linux_attach_lwp (ptid_t ptid)
da6d8c04 1037{
54a0b537 1038 struct lwp_info *new_lwp;
7ae1a6a6 1039 int lwpid = ptid_get_lwp (ptid);
611cb4a5 1040
b8e1b30e 1041 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1042 != 0)
7ae1a6a6 1043 return errno;
24a09b5f 1044
b3312d80 1045 new_lwp = add_lwp (ptid);
0d62e5e8 1046
a6dbe5df
PA
1047 /* We need to wait for SIGSTOP before being able to make the next
1048 ptrace call on this LWP. */
1049 new_lwp->must_set_ptrace_flags = 1;
1050
644cebc9 1051 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
1052 {
1053 if (debug_threads)
87ce2a04 1054 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
1055
1056 /* The process is definitely stopped. It is in a job control
1057 stop, unless the kernel predates the TASK_STOPPED /
1058 TASK_TRACED distinction, in which case it might be in a
1059 ptrace stop. Make sure it is in a ptrace stop; from there we
1060 can kill it, signal it, et cetera.
1061
1062 First make sure there is a pending SIGSTOP. Since we are
1063 already attached, the process can not transition from stopped
1064 to running without a PTRACE_CONT; so we know this signal will
1065 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1066 probably already in the queue (unless this kernel is old
1067 enough to use TASK_STOPPED for ptrace stops); but since
1068 SIGSTOP is not an RT signal, it can only be queued once. */
1069 kill_lwp (lwpid, SIGSTOP);
1070
1071 /* Finally, resume the stopped process. This will deliver the
1072 SIGSTOP (or a higher priority signal, just like normal
1073 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1074 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1075 }
1076
0d62e5e8 1077 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1078 brings it to a halt.
1079
1080 There are several cases to consider here:
1081
1082 1) gdbserver has already attached to the process and is being notified
1b3f6016 1083 of a new thread that is being created.
d50171e4
PA
1084 In this case we should ignore that SIGSTOP and resume the
1085 process. This is handled below by setting stop_expected = 1,
8336d594 1086 and the fact that add_thread sets last_resume_kind ==
d50171e4 1087 resume_continue.
0e21c1ec
DE
1088
1089 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1090 to it via attach_inferior.
1091 In this case we want the process thread to stop.
d50171e4
PA
1092 This is handled by having linux_attach set last_resume_kind ==
1093 resume_stop after we return.
e3deef73
LM
1094
1095 If the pid we are attaching to is also the tgid, we attach to and
1096 stop all the existing threads. Otherwise, we attach to pid and
1097 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1098
1099 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1100 existing threads.
1101 In this case we want the thread to stop.
1102 FIXME: This case is currently not properly handled.
1103 We should wait for the SIGSTOP but don't. Things work apparently
1104 because enough time passes between when we ptrace (ATTACH) and when
1105 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1106
1107 On the other hand, if we are currently trying to stop all threads, we
1108 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1109 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1110 end of the list, and so the new thread has not yet reached
1111 wait_for_sigstop (but will). */
d50171e4 1112 new_lwp->stop_expected = 1;
0d62e5e8 1113
7ae1a6a6 1114 return 0;
95954743
PA
1115}
1116
8784d563
PA
1117/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1118 already attached. Returns true if a new LWP is found, false
1119 otherwise. */
1120
1121static int
1122attach_proc_task_lwp_callback (ptid_t ptid)
1123{
1124 /* Is this a new thread? */
1125 if (find_thread_ptid (ptid) == NULL)
1126 {
1127 int lwpid = ptid_get_lwp (ptid);
1128 int err;
1129
1130 if (debug_threads)
1131 debug_printf ("Found new lwp %d\n", lwpid);
1132
1133 err = linux_attach_lwp (ptid);
1134
1135 /* Be quiet if we simply raced with the thread exiting. EPERM
1136 is returned if the thread's task still exists, and is marked
1137 as exited or zombie, as well as other conditions, so in that
1138 case, confirm the status in /proc/PID/status. */
1139 if (err == ESRCH
1140 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1141 {
1142 if (debug_threads)
1143 {
1144 debug_printf ("Cannot attach to lwp %d: "
1145 "thread is gone (%d: %s)\n",
1146 lwpid, err, strerror (err));
1147 }
1148 }
1149 else if (err != 0)
1150 {
1151 warning (_("Cannot attach to lwp %d: %s"),
1152 lwpid,
1153 linux_ptrace_attach_fail_reason_string (ptid, err));
1154 }
1155
1156 return 1;
1157 }
1158 return 0;
1159}
1160
500c1d85
PA
1161static void async_file_mark (void);
1162
e3deef73
LM
1163/* Attach to PID. If PID is the tgid, attach to it and all
1164 of its threads. */
1165
c52daf70 1166static int
a1928bad 1167linux_attach (unsigned long pid)
0d62e5e8 1168{
500c1d85
PA
1169 struct process_info *proc;
1170 struct thread_info *initial_thread;
7ae1a6a6
PA
1171 ptid_t ptid = ptid_build (pid, pid, 0);
1172 int err;
1173
e3deef73
LM
1174 /* Attach to PID. We will check for other threads
1175 soon. */
7ae1a6a6
PA
1176 err = linux_attach_lwp (ptid);
1177 if (err != 0)
1178 error ("Cannot attach to process %ld: %s",
8784d563 1179 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
7ae1a6a6 1180
500c1d85 1181 proc = linux_add_process (pid, 1);
0d62e5e8 1182
500c1d85
PA
1183 /* Don't ignore the initial SIGSTOP if we just attached to this
1184 process. It will be collected by wait shortly. */
1185 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1186 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1187
8784d563
PA
1188 /* We must attach to every LWP. If /proc is mounted, use that to
1189 find them now. On the one hand, the inferior may be using raw
1190 clone instead of using pthreads. On the other hand, even if it
1191 is using pthreads, GDB may not be connected yet (thread_db needs
1192 to do symbol lookups, through qSymbol). Also, thread_db walks
1193 structures in the inferior's address space to find the list of
1194 threads/LWPs, and those structures may well be corrupted. Note
1195 that once thread_db is loaded, we'll still use it to list threads
1196 and associate pthread info with each LWP. */
1197 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
500c1d85
PA
1198
1199 /* GDB will shortly read the xml target description for this
1200 process, to figure out the process' architecture. But the target
1201 description is only filled in when the first process/thread in
1202 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1203 that now, otherwise, if GDB is fast enough, it could read the
1204 target description _before_ that initial stop. */
1205 if (non_stop)
1206 {
1207 struct lwp_info *lwp;
1208 int wstat, lwpid;
1209 ptid_t pid_ptid = pid_to_ptid (pid);
1210
1211 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1212 &wstat, __WALL);
1213 gdb_assert (lwpid > 0);
1214
1215 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1216
1217 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1218 {
1219 lwp->status_pending_p = 1;
1220 lwp->status_pending = wstat;
1221 }
1222
1223 initial_thread->last_resume_kind = resume_continue;
1224
1225 async_file_mark ();
1226
1227 gdb_assert (proc->tdesc != NULL);
1228 }
1229
95954743
PA
1230 return 0;
1231}
1232
1233struct counter
1234{
1235 int pid;
1236 int count;
1237};
1238
1239static int
1240second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1241{
9a3c8263 1242 struct counter *counter = (struct counter *) args;
95954743
PA
1243
1244 if (ptid_get_pid (entry->id) == counter->pid)
1245 {
1246 if (++counter->count > 1)
1247 return 1;
1248 }
d61ddec4 1249
da6d8c04
DJ
1250 return 0;
1251}
1252
95954743 1253static int
fa96cb38 1254last_thread_of_process_p (int pid)
95954743 1255{
95954743 1256 struct counter counter = { pid , 0 };
da6d8c04 1257
95954743
PA
1258 return (find_inferior (&all_threads,
1259 second_thread_of_pid_p, &counter) == NULL);
1260}
1261
da84f473
PA
1262/* Kill LWP. */
1263
1264static void
1265linux_kill_one_lwp (struct lwp_info *lwp)
1266{
d86d4aaf
DE
1267 struct thread_info *thr = get_lwp_thread (lwp);
1268 int pid = lwpid_of (thr);
da84f473
PA
1269
1270 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1271 there is no signal context, and ptrace(PTRACE_KILL) (or
1272 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1273 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1274 alternative is to kill with SIGKILL. We only need one SIGKILL
1275 per process, not one for each thread. But since we still support
4a6ed09b
PA
1276 support debugging programs using raw clone without CLONE_THREAD,
1277 we send one for each thread. For years, we used PTRACE_KILL
1278 only, so we're being a bit paranoid about some old kernels where
1279 PTRACE_KILL might work better (dubious if there are any such, but
1280 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1281 second, and so we're fine everywhere. */
da84f473
PA
1282
1283 errno = 0;
69ff6be5 1284 kill_lwp (pid, SIGKILL);
da84f473 1285 if (debug_threads)
ce9e3fe7
PA
1286 {
1287 int save_errno = errno;
1288
1289 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1290 target_pid_to_str (ptid_of (thr)),
1291 save_errno ? strerror (save_errno) : "OK");
1292 }
da84f473
PA
1293
1294 errno = 0;
b8e1b30e 1295 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1296 if (debug_threads)
ce9e3fe7
PA
1297 {
1298 int save_errno = errno;
1299
1300 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1301 target_pid_to_str (ptid_of (thr)),
1302 save_errno ? strerror (save_errno) : "OK");
1303 }
da84f473
PA
1304}
1305
e76126e8
PA
1306/* Kill LWP and wait for it to die. */
1307
1308static void
1309kill_wait_lwp (struct lwp_info *lwp)
1310{
1311 struct thread_info *thr = get_lwp_thread (lwp);
1312 int pid = ptid_get_pid (ptid_of (thr));
1313 int lwpid = ptid_get_lwp (ptid_of (thr));
1314 int wstat;
1315 int res;
1316
1317 if (debug_threads)
1318 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1319
1320 do
1321 {
1322 linux_kill_one_lwp (lwp);
1323
1324 /* Make sure it died. Notes:
1325
1326 - The loop is most likely unnecessary.
1327
1328 - We don't use linux_wait_for_event as that could delete lwps
1329 while we're iterating over them. We're not interested in
1330 any pending status at this point, only in making sure all
1331 wait status on the kernel side are collected until the
1332 process is reaped.
1333
1334 - We don't use __WALL here as the __WALL emulation relies on
1335 SIGCHLD, and killing a stopped process doesn't generate
1336 one, nor an exit status.
1337 */
1338 res = my_waitpid (lwpid, &wstat, 0);
1339 if (res == -1 && errno == ECHILD)
1340 res = my_waitpid (lwpid, &wstat, __WCLONE);
1341 } while (res > 0 && WIFSTOPPED (wstat));
1342
586b02a9
PA
1343 /* Even if it was stopped, the child may have already disappeared.
1344 E.g., if it was killed by SIGKILL. */
1345 if (res < 0 && errno != ECHILD)
1346 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1347}
1348
da84f473
PA
1349/* Callback for `find_inferior'. Kills an lwp of a given process,
1350 except the leader. */
95954743
PA
1351
1352static int
da84f473 1353kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
da6d8c04 1354{
0d62e5e8 1355 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1356 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
1357 int pid = * (int *) args;
1358
1359 if (ptid_get_pid (entry->id) != pid)
1360 return 0;
0d62e5e8 1361
fd500816
DJ
1362 /* We avoid killing the first thread here, because of a Linux kernel (at
1363 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1364 the children get a chance to be reaped, it will remain a zombie
1365 forever. */
95954743 1366
d86d4aaf 1367 if (lwpid_of (thread) == pid)
95954743
PA
1368 {
1369 if (debug_threads)
87ce2a04
DE
1370 debug_printf ("lkop: is last of process %s\n",
1371 target_pid_to_str (entry->id));
95954743
PA
1372 return 0;
1373 }
fd500816 1374
e76126e8 1375 kill_wait_lwp (lwp);
95954743 1376 return 0;
da6d8c04
DJ
1377}
1378
95954743
PA
1379static int
1380linux_kill (int pid)
0d62e5e8 1381{
95954743 1382 struct process_info *process;
54a0b537 1383 struct lwp_info *lwp;
fd500816 1384
95954743
PA
1385 process = find_process_pid (pid);
1386 if (process == NULL)
1387 return -1;
9d606399 1388
f9e39928
PA
1389 /* If we're killing a running inferior, make sure it is stopped
1390 first, as PTRACE_KILL will not work otherwise. */
7984d532 1391 stop_all_lwps (0, NULL);
f9e39928 1392
da84f473 1393 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
fd500816 1394
54a0b537 1395 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1396 thread in the list, so do so now. */
95954743 1397 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 1398
784867a5 1399 if (lwp == NULL)
fd500816 1400 {
784867a5 1401 if (debug_threads)
d86d4aaf
DE
1402 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1403 pid);
784867a5
JK
1404 }
1405 else
e76126e8 1406 kill_wait_lwp (lwp);
2d717e4f 1407
8336d594 1408 the_target->mourn (process);
f9e39928
PA
1409
1410 /* Since we presently can only stop all lwps of all processes, we
1411 need to unstop lwps of other processes. */
7984d532 1412 unstop_all_lwps (0, NULL);
95954743 1413 return 0;
0d62e5e8
DJ
1414}
1415
9b224c5e
PA
1416/* Get pending signal of THREAD, for detaching purposes. This is the
1417 signal the thread last stopped for, which we need to deliver to the
1418 thread when detaching, otherwise, it'd be suppressed/lost. */
1419
1420static int
1421get_detach_signal (struct thread_info *thread)
1422{
a493e3e2 1423 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1424 int status;
1425 struct lwp_info *lp = get_thread_lwp (thread);
1426
1427 if (lp->status_pending_p)
1428 status = lp->status_pending;
1429 else
1430 {
1431 /* If the thread had been suspended by gdbserver, and it stopped
1432 cleanly, then it'll have stopped with SIGSTOP. But we don't
1433 want to deliver that SIGSTOP. */
1434 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1435 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1436 return 0;
1437
1438 /* Otherwise, we may need to deliver the signal we
1439 intercepted. */
1440 status = lp->last_status;
1441 }
1442
1443 if (!WIFSTOPPED (status))
1444 {
1445 if (debug_threads)
87ce2a04 1446 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1447 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1448 return 0;
1449 }
1450
1451 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1452 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1453 {
1454 if (debug_threads)
87ce2a04
DE
1455 debug_printf ("GPS: lwp %s had stopped with extended "
1456 "status: no pending signal\n",
d86d4aaf 1457 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1458 return 0;
1459 }
1460
2ea28649 1461 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e
PA
1462
1463 if (program_signals_p && !program_signals[signo])
1464 {
1465 if (debug_threads)
87ce2a04 1466 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1467 target_pid_to_str (ptid_of (thread)),
87ce2a04 1468 gdb_signal_to_string (signo));
9b224c5e
PA
1469 return 0;
1470 }
1471 else if (!program_signals_p
1472 /* If we have no way to know which signals GDB does not
1473 want to have passed to the program, assume
1474 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1475 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1476 {
1477 if (debug_threads)
87ce2a04
DE
1478 debug_printf ("GPS: lwp %s had signal %s, "
1479 "but we don't know if we should pass it. "
1480 "Default to not.\n",
d86d4aaf 1481 target_pid_to_str (ptid_of (thread)),
87ce2a04 1482 gdb_signal_to_string (signo));
9b224c5e
PA
1483 return 0;
1484 }
1485 else
1486 {
1487 if (debug_threads)
87ce2a04 1488 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1489 target_pid_to_str (ptid_of (thread)),
87ce2a04 1490 gdb_signal_to_string (signo));
9b224c5e
PA
1491
1492 return WSTOPSIG (status);
1493 }
1494}
1495
ced2dffb
PA
1496/* Detach from LWP. */
1497
1498static void
1499linux_detach_one_lwp (struct lwp_info *lwp)
6ad8ae5c 1500{
ced2dffb 1501 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1502 int sig;
ced2dffb 1503 int lwpid;
6ad8ae5c 1504
9b224c5e 1505 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1506 if (lwp->stop_expected)
ae13219e 1507 {
9b224c5e 1508 if (debug_threads)
87ce2a04 1509 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1510 target_pid_to_str (ptid_of (thread)));
9b224c5e 1511
d86d4aaf 1512 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1513 lwp->stop_expected = 0;
ae13219e
DJ
1514 }
1515
9b224c5e
PA
1516 /* Pass on any pending signal for this thread. */
1517 sig = get_detach_signal (thread);
1518
ced2dffb
PA
1519 /* Preparing to resume may try to write registers, and fail if the
1520 lwp is zombie. If that happens, ignore the error. We'll handle
1521 it below, when detach fails with ESRCH. */
1522 TRY
1523 {
1524 /* Flush any pending changes to the process's registers. */
1525 regcache_invalidate_thread (thread);
1526
1527 /* Finally, let it resume. */
1528 if (the_low_target.prepare_to_resume != NULL)
1529 the_low_target.prepare_to_resume (lwp);
1530 }
1531 CATCH (ex, RETURN_MASK_ERROR)
1532 {
1533 if (!check_ptrace_stopped_lwp_gone (lwp))
1534 throw_exception (ex);
1535 }
1536 END_CATCH
1537
1538 lwpid = lwpid_of (thread);
1539 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1540 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1541 {
1542 int save_errno = errno;
1543
1544 /* We know the thread exists, so ESRCH must mean the lwp is
1545 zombie. This can happen if one of the already-detached
1546 threads exits the whole thread group. In that case we're
1547 still attached, and must reap the lwp. */
1548 if (save_errno == ESRCH)
1549 {
1550 int ret, status;
1551
1552 ret = my_waitpid (lwpid, &status, __WALL);
1553 if (ret == -1)
1554 {
1555 warning (_("Couldn't reap LWP %d while detaching: %s"),
1556 lwpid, strerror (errno));
1557 }
1558 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1559 {
1560 warning (_("Reaping LWP %d while detaching "
1561 "returned unexpected status 0x%x"),
1562 lwpid, status);
1563 }
1564 }
1565 else
1566 {
1567 error (_("Can't detach %s: %s"),
1568 target_pid_to_str (ptid_of (thread)),
1569 strerror (save_errno));
1570 }
1571 }
1572 else if (debug_threads)
1573 {
1574 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1575 target_pid_to_str (ptid_of (thread)),
1576 strsignal (sig));
1577 }
bd99dc85
PA
1578
1579 delete_lwp (lwp);
ced2dffb
PA
1580}
1581
1582/* Callback for find_inferior. Detaches from non-leader threads of a
1583 given process. */
1584
1585static int
1586linux_detach_lwp_callback (struct inferior_list_entry *entry, void *args)
1587{
1588 struct thread_info *thread = (struct thread_info *) entry;
1589 struct lwp_info *lwp = get_thread_lwp (thread);
1590 int pid = *(int *) args;
1591 int lwpid = lwpid_of (thread);
1592
1593 /* Skip other processes. */
1594 if (ptid_get_pid (entry->id) != pid)
1595 return 0;
1596
1597 /* We don't actually detach from the thread group leader just yet.
1598 If the thread group exits, we must reap the zombie clone lwps
1599 before we're able to reap the leader. */
1600 if (ptid_get_pid (entry->id) == lwpid)
1601 return 0;
1602
1603 linux_detach_one_lwp (lwp);
95954743 1604 return 0;
6ad8ae5c
DJ
1605}
1606
95954743
PA
1607static int
1608linux_detach (int pid)
1609{
1610 struct process_info *process;
ced2dffb 1611 struct lwp_info *main_lwp;
95954743
PA
1612
1613 process = find_process_pid (pid);
1614 if (process == NULL)
1615 return -1;
1616
863d01bd
PA
1617 /* As there's a step over already in progress, let it finish first,
1618 otherwise nesting a stabilize_threads operation on top gets real
1619 messy. */
1620 complete_ongoing_step_over ();
1621
f9e39928
PA
1622 /* Stop all threads before detaching. First, ptrace requires that
1623 the thread is stopped to sucessfully detach. Second, thread_db
1624 may need to uninstall thread event breakpoints from memory, which
1625 only works with a stopped process anyway. */
7984d532 1626 stop_all_lwps (0, NULL);
f9e39928 1627
ca5c370d 1628#ifdef USE_THREAD_DB
8336d594 1629 thread_db_detach (process);
ca5c370d
PA
1630#endif
1631
fa593d66
PA
1632 /* Stabilize threads (move out of jump pads). */
1633 stabilize_threads ();
1634
ced2dffb
PA
1635 /* Detach from the clone lwps first. If the thread group exits just
1636 while we're detaching, we must reap the clone lwps before we're
1637 able to reap the leader. */
1638 find_inferior (&all_threads, linux_detach_lwp_callback, &pid);
1639
1640 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1641 linux_detach_one_lwp (main_lwp);
8336d594
PA
1642
1643 the_target->mourn (process);
f9e39928
PA
1644
1645 /* Since we presently can only stop all lwps of all processes, we
1646 need to unstop lwps of other processes. */
7984d532 1647 unstop_all_lwps (0, NULL);
f9e39928
PA
1648 return 0;
1649}
1650
1651/* Remove all LWPs that belong to process PROC from the lwp list. */
1652
1653static int
1654delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1655{
d86d4aaf
DE
1656 struct thread_info *thread = (struct thread_info *) entry;
1657 struct lwp_info *lwp = get_thread_lwp (thread);
9a3c8263 1658 struct process_info *process = (struct process_info *) proc;
f9e39928 1659
d86d4aaf 1660 if (pid_of (thread) == pid_of (process))
f9e39928
PA
1661 delete_lwp (lwp);
1662
dd6953e1 1663 return 0;
6ad8ae5c
DJ
1664}
1665
8336d594
PA
1666static void
1667linux_mourn (struct process_info *process)
1668{
1669 struct process_info_private *priv;
1670
1671#ifdef USE_THREAD_DB
1672 thread_db_mourn (process);
1673#endif
1674
d86d4aaf 1675 find_inferior (&all_threads, delete_lwp_callback, process);
f9e39928 1676
8336d594 1677 /* Freeing all private data. */
fe978cb0 1678 priv = process->priv;
8336d594
PA
1679 free (priv->arch_private);
1680 free (priv);
fe978cb0 1681 process->priv = NULL;
505106cd
PA
1682
1683 remove_process (process);
8336d594
PA
1684}
1685
444d6139 1686static void
95954743 1687linux_join (int pid)
444d6139 1688{
444d6139
PA
1689 int status, ret;
1690
1691 do {
95954743 1692 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1693 if (WIFEXITED (status) || WIFSIGNALED (status))
1694 break;
1695 } while (ret != -1 || errno != ECHILD);
1696}
1697
6ad8ae5c 1698/* Return nonzero if the given thread is still alive. */
0d62e5e8 1699static int
95954743 1700linux_thread_alive (ptid_t ptid)
0d62e5e8 1701{
95954743
PA
1702 struct lwp_info *lwp = find_lwp_pid (ptid);
1703
1704 /* We assume we always know if a thread exits. If a whole process
1705 exited but we still haven't been able to report it to GDB, we'll
1706 hold on to the last lwp of the dead process. */
1707 if (lwp != NULL)
00db26fa 1708 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1709 else
1710 return 0;
1711}
1712
582511be
PA
1713/* Return 1 if this lwp still has an interesting status pending. If
1714 not (e.g., it had stopped for a breakpoint that is gone), return
1715 false. */
1716
1717static int
1718thread_still_has_status_pending_p (struct thread_info *thread)
1719{
1720 struct lwp_info *lp = get_thread_lwp (thread);
1721
1722 if (!lp->status_pending_p)
1723 return 0;
1724
582511be 1725 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1726 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1727 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1728 {
1729 struct thread_info *saved_thread;
1730 CORE_ADDR pc;
1731 int discard = 0;
1732
1733 gdb_assert (lp->last_status != 0);
1734
1735 pc = get_pc (lp);
1736
1737 saved_thread = current_thread;
1738 current_thread = thread;
1739
1740 if (pc != lp->stop_pc)
1741 {
1742 if (debug_threads)
1743 debug_printf ("PC of %ld changed\n",
1744 lwpid_of (thread));
1745 discard = 1;
1746 }
3e572f71
PA
1747
1748#if !USE_SIGTRAP_SIGINFO
15c66dd6 1749 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
582511be
PA
1750 && !(*the_low_target.breakpoint_at) (pc))
1751 {
1752 if (debug_threads)
1753 debug_printf ("previous SW breakpoint of %ld gone\n",
1754 lwpid_of (thread));
1755 discard = 1;
1756 }
15c66dd6 1757 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1758 && !hardware_breakpoint_inserted_here (pc))
1759 {
1760 if (debug_threads)
1761 debug_printf ("previous HW breakpoint of %ld gone\n",
1762 lwpid_of (thread));
1763 discard = 1;
1764 }
3e572f71 1765#endif
582511be
PA
1766
1767 current_thread = saved_thread;
1768
1769 if (discard)
1770 {
1771 if (debug_threads)
1772 debug_printf ("discarding pending breakpoint status\n");
1773 lp->status_pending_p = 0;
1774 return 0;
1775 }
1776 }
1777
1778 return 1;
1779}
1780
a681f9c9
PA
1781/* Returns true if LWP is resumed from the client's perspective. */
1782
1783static int
1784lwp_resumed (struct lwp_info *lwp)
1785{
1786 struct thread_info *thread = get_lwp_thread (lwp);
1787
1788 if (thread->last_resume_kind != resume_stop)
1789 return 1;
1790
1791 /* Did gdb send us a `vCont;t', but we haven't reported the
1792 corresponding stop to gdb yet? If so, the thread is still
1793 resumed/running from gdb's perspective. */
1794 if (thread->last_resume_kind == resume_stop
1795 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1796 return 1;
1797
1798 return 0;
1799}
1800
6bf5e0ba 1801/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1802static int
d50171e4 1803status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1804{
d86d4aaf 1805 struct thread_info *thread = (struct thread_info *) entry;
582511be 1806 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1807 ptid_t ptid = * (ptid_t *) arg;
1808
1809 /* Check if we're only interested in events from a specific process
afa8d396
PA
1810 or a specific LWP. */
1811 if (!ptid_match (ptid_of (thread), ptid))
95954743 1812 return 0;
0d62e5e8 1813
a681f9c9
PA
1814 if (!lwp_resumed (lp))
1815 return 0;
1816
582511be
PA
1817 if (lp->status_pending_p
1818 && !thread_still_has_status_pending_p (thread))
1819 {
1820 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1821 return 0;
1822 }
0d62e5e8 1823
582511be 1824 return lp->status_pending_p;
0d62e5e8
DJ
1825}
1826
95954743
PA
1827static int
1828same_lwp (struct inferior_list_entry *entry, void *data)
1829{
1830 ptid_t ptid = *(ptid_t *) data;
1831 int lwp;
1832
1833 if (ptid_get_lwp (ptid) != 0)
1834 lwp = ptid_get_lwp (ptid);
1835 else
1836 lwp = ptid_get_pid (ptid);
1837
1838 if (ptid_get_lwp (entry->id) == lwp)
1839 return 1;
1840
1841 return 0;
1842}
1843
1844struct lwp_info *
1845find_lwp_pid (ptid_t ptid)
1846{
d86d4aaf
DE
1847 struct inferior_list_entry *thread
1848 = find_inferior (&all_threads, same_lwp, &ptid);
1849
1850 if (thread == NULL)
1851 return NULL;
1852
1853 return get_thread_lwp ((struct thread_info *) thread);
95954743
PA
1854}
1855
fa96cb38 1856/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1857
fa96cb38
PA
1858static int
1859num_lwps (int pid)
1860{
1861 struct inferior_list_entry *inf, *tmp;
1862 int count = 0;
0d62e5e8 1863
fa96cb38 1864 ALL_INFERIORS (&all_threads, inf, tmp)
24a09b5f 1865 {
fa96cb38
PA
1866 if (ptid_get_pid (inf->id) == pid)
1867 count++;
24a09b5f 1868 }
3aee8918 1869
fa96cb38
PA
1870 return count;
1871}
d61ddec4 1872
6d4ee8c6
GB
1873/* The arguments passed to iterate_over_lwps. */
1874
1875struct iterate_over_lwps_args
1876{
1877 /* The FILTER argument passed to iterate_over_lwps. */
1878 ptid_t filter;
1879
1880 /* The CALLBACK argument passed to iterate_over_lwps. */
1881 iterate_over_lwps_ftype *callback;
1882
1883 /* The DATA argument passed to iterate_over_lwps. */
1884 void *data;
1885};
1886
1887/* Callback for find_inferior used by iterate_over_lwps to filter
1888 calls to the callback supplied to that function. Returning a
1889 nonzero value causes find_inferiors to stop iterating and return
1890 the current inferior_list_entry. Returning zero indicates that
1891 find_inferiors should continue iterating. */
1892
1893static int
1894iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1895{
1896 struct iterate_over_lwps_args *args
1897 = (struct iterate_over_lwps_args *) args_p;
1898
1899 if (ptid_match (entry->id, args->filter))
1900 {
1901 struct thread_info *thr = (struct thread_info *) entry;
1902 struct lwp_info *lwp = get_thread_lwp (thr);
1903
1904 return (*args->callback) (lwp, args->data);
1905 }
1906
1907 return 0;
1908}
1909
1910/* See nat/linux-nat.h. */
1911
1912struct lwp_info *
1913iterate_over_lwps (ptid_t filter,
1914 iterate_over_lwps_ftype callback,
1915 void *data)
1916{
1917 struct iterate_over_lwps_args args = {filter, callback, data};
1918 struct inferior_list_entry *entry;
1919
1920 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1921 if (entry == NULL)
1922 return NULL;
1923
1924 return get_thread_lwp ((struct thread_info *) entry);
1925}
1926
fa96cb38
PA
1927/* Detect zombie thread group leaders, and "exit" them. We can't reap
1928 their exits until all other threads in the group have exited. */
c3adc08c 1929
fa96cb38
PA
1930static void
1931check_zombie_leaders (void)
1932{
1933 struct process_info *proc, *tmp;
c3adc08c 1934
fa96cb38 1935 ALL_PROCESSES (proc, tmp)
c3adc08c 1936 {
fa96cb38
PA
1937 pid_t leader_pid = pid_of (proc);
1938 struct lwp_info *leader_lp;
c3adc08c 1939
fa96cb38 1940 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
c3adc08c 1941
fa96cb38
PA
1942 if (debug_threads)
1943 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1944 "num_lwps=%d, zombie=%d\n",
1945 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1946 linux_proc_pid_is_zombie (leader_pid));
1947
94585166 1948 if (leader_lp != NULL && !leader_lp->stopped
fa96cb38
PA
1949 /* Check if there are other threads in the group, as we may
1950 have raced with the inferior simply exiting. */
1951 && !last_thread_of_process_p (leader_pid)
1952 && linux_proc_pid_is_zombie (leader_pid))
1953 {
1954 /* A leader zombie can mean one of two things:
1955
1956 - It exited, and there's an exit status pending
1957 available, or only the leader exited (not the whole
1958 program). In the latter case, we can't waitpid the
1959 leader's exit status until all other threads are gone.
1960
1961 - There are 3 or more threads in the group, and a thread
1962 other than the leader exec'd. On an exec, the Linux
1963 kernel destroys all other threads (except the execing
1964 one) in the thread group, and resets the execing thread's
1965 tid to the tgid. No exit notification is sent for the
1966 execing thread -- from the ptracer's perspective, it
1967 appears as though the execing thread just vanishes.
1968 Until we reap all other threads except the leader and the
1969 execing thread, the leader will be zombie, and the
1970 execing thread will be in `D (disc sleep)'. As soon as
1971 all other threads are reaped, the execing thread changes
1972 it's tid to the tgid, and the previous (zombie) leader
1973 vanishes, giving place to the "new" leader. We could try
1974 distinguishing the exit and exec cases, by waiting once
1975 more, and seeing if something comes out, but it doesn't
1976 sound useful. The previous leader _does_ go away, and
1977 we'll re-add the new one once we see the exec event
1978 (which is just the same as what would happen if the
1979 previous leader did exit voluntarily before some other
1980 thread execs). */
c3adc08c 1981
fa96cb38
PA
1982 if (debug_threads)
1983 fprintf (stderr,
1984 "CZL: Thread group leader %d zombie "
1985 "(it exited, or another thread execd).\n",
1986 leader_pid);
c3adc08c 1987
fa96cb38 1988 delete_lwp (leader_lp);
c3adc08c
PA
1989 }
1990 }
fa96cb38 1991}
c3adc08c 1992
fa96cb38
PA
1993/* Callback for `find_inferior'. Returns the first LWP that is not
1994 stopped. ARG is a PTID filter. */
d50171e4 1995
fa96cb38
PA
1996static int
1997not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1998{
1999 struct thread_info *thr = (struct thread_info *) entry;
2000 struct lwp_info *lwp;
2001 ptid_t filter = *(ptid_t *) arg;
47c0c975 2002
fa96cb38
PA
2003 if (!ptid_match (ptid_of (thr), filter))
2004 return 0;
bd99dc85 2005
fa96cb38
PA
2006 lwp = get_thread_lwp (thr);
2007 if (!lwp->stopped)
2008 return 1;
2009
2010 return 0;
0d62e5e8 2011}
611cb4a5 2012
863d01bd
PA
2013/* Increment LWP's suspend count. */
2014
2015static void
2016lwp_suspended_inc (struct lwp_info *lwp)
2017{
2018 lwp->suspended++;
2019
2020 if (debug_threads && lwp->suspended > 4)
2021 {
2022 struct thread_info *thread = get_lwp_thread (lwp);
2023
2024 debug_printf ("LWP %ld has a suspiciously high suspend count,"
2025 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
2026 }
2027}
2028
2029/* Decrement LWP's suspend count. */
2030
2031static void
2032lwp_suspended_decr (struct lwp_info *lwp)
2033{
2034 lwp->suspended--;
2035
2036 if (lwp->suspended < 0)
2037 {
2038 struct thread_info *thread = get_lwp_thread (lwp);
2039
2040 internal_error (__FILE__, __LINE__,
2041 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
2042 lwp->suspended);
2043 }
2044}
2045
219f2f23
PA
2046/* This function should only be called if the LWP got a SIGTRAP.
2047
2048 Handle any tracepoint steps or hits. Return true if a tracepoint
2049 event was handled, 0 otherwise. */
2050
2051static int
2052handle_tracepoints (struct lwp_info *lwp)
2053{
2054 struct thread_info *tinfo = get_lwp_thread (lwp);
2055 int tpoint_related_event = 0;
2056
582511be
PA
2057 gdb_assert (lwp->suspended == 0);
2058
7984d532
PA
2059 /* If this tracepoint hit causes a tracing stop, we'll immediately
2060 uninsert tracepoints. To do this, we temporarily pause all
2061 threads, unpatch away, and then unpause threads. We need to make
2062 sure the unpausing doesn't resume LWP too. */
863d01bd 2063 lwp_suspended_inc (lwp);
7984d532 2064
219f2f23
PA
2065 /* And we need to be sure that any all-threads-stopping doesn't try
2066 to move threads out of the jump pads, as it could deadlock the
2067 inferior (LWP could be in the jump pad, maybe even holding the
2068 lock.) */
2069
2070 /* Do any necessary step collect actions. */
2071 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2072
fa593d66
PA
2073 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2074
219f2f23
PA
2075 /* See if we just hit a tracepoint and do its main collect
2076 actions. */
2077 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2078
863d01bd 2079 lwp_suspended_decr (lwp);
7984d532
PA
2080
2081 gdb_assert (lwp->suspended == 0);
fa593d66 2082 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 2083
219f2f23
PA
2084 if (tpoint_related_event)
2085 {
2086 if (debug_threads)
87ce2a04 2087 debug_printf ("got a tracepoint event\n");
219f2f23
PA
2088 return 1;
2089 }
2090
2091 return 0;
2092}
2093
fa593d66
PA
2094/* Convenience wrapper. Returns true if LWP is presently collecting a
2095 fast tracepoint. */
2096
2097static int
2098linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2099 struct fast_tpoint_collect_status *status)
2100{
2101 CORE_ADDR thread_area;
d86d4aaf 2102 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2103
2104 if (the_low_target.get_thread_area == NULL)
2105 return 0;
2106
2107 /* Get the thread area address. This is used to recognize which
2108 thread is which when tracing with the in-process agent library.
2109 We don't read anything from the address, and treat it as opaque;
2110 it's the address itself that we assume is unique per-thread. */
d86d4aaf 2111 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
fa593d66
PA
2112 return 0;
2113
2114 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2115}
2116
2117/* The reason we resume in the caller, is because we want to be able
2118 to pass lwp->status_pending as WSTAT, and we need to clear
2119 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2120 refuses to resume. */
2121
2122static int
2123maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2124{
0bfdf32f 2125 struct thread_info *saved_thread;
fa593d66 2126
0bfdf32f
GB
2127 saved_thread = current_thread;
2128 current_thread = get_lwp_thread (lwp);
fa593d66
PA
2129
2130 if ((wstat == NULL
2131 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2132 && supports_fast_tracepoints ()
58b4daa5 2133 && agent_loaded_p ())
fa593d66
PA
2134 {
2135 struct fast_tpoint_collect_status status;
2136 int r;
2137
2138 if (debug_threads)
87ce2a04
DE
2139 debug_printf ("Checking whether LWP %ld needs to move out of the "
2140 "jump pad.\n",
0bfdf32f 2141 lwpid_of (current_thread));
fa593d66
PA
2142
2143 r = linux_fast_tracepoint_collecting (lwp, &status);
2144
2145 if (wstat == NULL
2146 || (WSTOPSIG (*wstat) != SIGILL
2147 && WSTOPSIG (*wstat) != SIGFPE
2148 && WSTOPSIG (*wstat) != SIGSEGV
2149 && WSTOPSIG (*wstat) != SIGBUS))
2150 {
2151 lwp->collecting_fast_tracepoint = r;
2152
2153 if (r != 0)
2154 {
2155 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2156 {
2157 /* Haven't executed the original instruction yet.
2158 Set breakpoint there, and wait till it's hit,
2159 then single-step until exiting the jump pad. */
2160 lwp->exit_jump_pad_bkpt
2161 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2162 }
2163
2164 if (debug_threads)
87ce2a04
DE
2165 debug_printf ("Checking whether LWP %ld needs to move out of "
2166 "the jump pad...it does\n",
0bfdf32f
GB
2167 lwpid_of (current_thread));
2168 current_thread = saved_thread;
fa593d66
PA
2169
2170 return 1;
2171 }
2172 }
2173 else
2174 {
2175 /* If we get a synchronous signal while collecting, *and*
2176 while executing the (relocated) original instruction,
2177 reset the PC to point at the tpoint address, before
2178 reporting to GDB. Otherwise, it's an IPA lib bug: just
2179 report the signal to GDB, and pray for the best. */
2180
2181 lwp->collecting_fast_tracepoint = 0;
2182
2183 if (r != 0
2184 && (status.adjusted_insn_addr <= lwp->stop_pc
2185 && lwp->stop_pc < status.adjusted_insn_addr_end))
2186 {
2187 siginfo_t info;
2188 struct regcache *regcache;
2189
2190 /* The si_addr on a few signals references the address
2191 of the faulting instruction. Adjust that as
2192 well. */
2193 if ((WSTOPSIG (*wstat) == SIGILL
2194 || WSTOPSIG (*wstat) == SIGFPE
2195 || WSTOPSIG (*wstat) == SIGBUS
2196 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2197 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2198 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2199 /* Final check just to make sure we don't clobber
2200 the siginfo of non-kernel-sent signals. */
2201 && (uintptr_t) info.si_addr == lwp->stop_pc)
2202 {
2203 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2204 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2205 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2206 }
2207
0bfdf32f 2208 regcache = get_thread_regcache (current_thread, 1);
fa593d66
PA
2209 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2210 lwp->stop_pc = status.tpoint_addr;
2211
2212 /* Cancel any fast tracepoint lock this thread was
2213 holding. */
2214 force_unlock_trace_buffer ();
2215 }
2216
2217 if (lwp->exit_jump_pad_bkpt != NULL)
2218 {
2219 if (debug_threads)
87ce2a04
DE
2220 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2221 "stopping all threads momentarily.\n");
fa593d66
PA
2222
2223 stop_all_lwps (1, lwp);
fa593d66
PA
2224
2225 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2226 lwp->exit_jump_pad_bkpt = NULL;
2227
2228 unstop_all_lwps (1, lwp);
2229
2230 gdb_assert (lwp->suspended >= 0);
2231 }
2232 }
2233 }
2234
2235 if (debug_threads)
87ce2a04
DE
2236 debug_printf ("Checking whether LWP %ld needs to move out of the "
2237 "jump pad...no\n",
0bfdf32f 2238 lwpid_of (current_thread));
0cccb683 2239
0bfdf32f 2240 current_thread = saved_thread;
fa593d66
PA
2241 return 0;
2242}
2243
2244/* Enqueue one signal in the "signals to report later when out of the
2245 jump pad" list. */
2246
2247static void
2248enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2249{
2250 struct pending_signals *p_sig;
d86d4aaf 2251 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2252
2253 if (debug_threads)
87ce2a04 2254 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 2255 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2256
2257 if (debug_threads)
2258 {
2259 struct pending_signals *sig;
2260
2261 for (sig = lwp->pending_signals_to_report;
2262 sig != NULL;
2263 sig = sig->prev)
87ce2a04
DE
2264 debug_printf (" Already queued %d\n",
2265 sig->signal);
fa593d66 2266
87ce2a04 2267 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
2268 }
2269
1a981360
PA
2270 /* Don't enqueue non-RT signals if they are already in the deferred
2271 queue. (SIGSTOP being the easiest signal to see ending up here
2272 twice) */
2273 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2274 {
2275 struct pending_signals *sig;
2276
2277 for (sig = lwp->pending_signals_to_report;
2278 sig != NULL;
2279 sig = sig->prev)
2280 {
2281 if (sig->signal == WSTOPSIG (*wstat))
2282 {
2283 if (debug_threads)
87ce2a04
DE
2284 debug_printf ("Not requeuing already queued non-RT signal %d"
2285 " for LWP %ld\n",
2286 sig->signal,
d86d4aaf 2287 lwpid_of (thread));
1a981360
PA
2288 return;
2289 }
2290 }
2291 }
2292
8d749320 2293 p_sig = XCNEW (struct pending_signals);
fa593d66
PA
2294 p_sig->prev = lwp->pending_signals_to_report;
2295 p_sig->signal = WSTOPSIG (*wstat);
8d749320 2296
d86d4aaf 2297 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2298 &p_sig->info);
fa593d66
PA
2299
2300 lwp->pending_signals_to_report = p_sig;
2301}
2302
2303/* Dequeue one signal from the "signals to report later when out of
2304 the jump pad" list. */
2305
2306static int
2307dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2308{
d86d4aaf
DE
2309 struct thread_info *thread = get_lwp_thread (lwp);
2310
fa593d66
PA
2311 if (lwp->pending_signals_to_report != NULL)
2312 {
2313 struct pending_signals **p_sig;
2314
2315 p_sig = &lwp->pending_signals_to_report;
2316 while ((*p_sig)->prev != NULL)
2317 p_sig = &(*p_sig)->prev;
2318
2319 *wstat = W_STOPCODE ((*p_sig)->signal);
2320 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 2321 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2322 &(*p_sig)->info);
fa593d66
PA
2323 free (*p_sig);
2324 *p_sig = NULL;
2325
2326 if (debug_threads)
87ce2a04 2327 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 2328 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2329
2330 if (debug_threads)
2331 {
2332 struct pending_signals *sig;
2333
2334 for (sig = lwp->pending_signals_to_report;
2335 sig != NULL;
2336 sig = sig->prev)
87ce2a04
DE
2337 debug_printf (" Still queued %d\n",
2338 sig->signal);
fa593d66 2339
87ce2a04 2340 debug_printf (" (no more queued signals)\n");
fa593d66
PA
2341 }
2342
2343 return 1;
2344 }
2345
2346 return 0;
2347}
2348
582511be
PA
2349/* Fetch the possibly triggered data watchpoint info and store it in
2350 CHILD.
d50171e4 2351
582511be
PA
2352 On some archs, like x86, that use debug registers to set
2353 watchpoints, it's possible that the way to know which watched
2354 address trapped, is to check the register that is used to select
2355 which address to watch. Problem is, between setting the watchpoint
2356 and reading back which data address trapped, the user may change
2357 the set of watchpoints, and, as a consequence, GDB changes the
2358 debug registers in the inferior. To avoid reading back a stale
2359 stopped-data-address when that happens, we cache in LP the fact
2360 that a watchpoint trapped, and the corresponding data address, as
2361 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2362 registers meanwhile, we have the cached data we can rely on. */
d50171e4 2363
582511be
PA
2364static int
2365check_stopped_by_watchpoint (struct lwp_info *child)
2366{
2367 if (the_low_target.stopped_by_watchpoint != NULL)
d50171e4 2368 {
582511be 2369 struct thread_info *saved_thread;
d50171e4 2370
582511be
PA
2371 saved_thread = current_thread;
2372 current_thread = get_lwp_thread (child);
2373
2374 if (the_low_target.stopped_by_watchpoint ())
d50171e4 2375 {
15c66dd6 2376 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
582511be
PA
2377
2378 if (the_low_target.stopped_data_address != NULL)
2379 child->stopped_data_address
2380 = the_low_target.stopped_data_address ();
2381 else
2382 child->stopped_data_address = 0;
d50171e4
PA
2383 }
2384
0bfdf32f 2385 current_thread = saved_thread;
d50171e4
PA
2386 }
2387
15c66dd6 2388 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
c4d9ceb6
YQ
2389}
2390
de0d863e
DB
2391/* Return the ptrace options that we want to try to enable. */
2392
2393static int
2394linux_low_ptrace_options (int attached)
2395{
2396 int options = 0;
2397
2398 if (!attached)
2399 options |= PTRACE_O_EXITKILL;
2400
2401 if (report_fork_events)
2402 options |= PTRACE_O_TRACEFORK;
2403
c269dbdb
DB
2404 if (report_vfork_events)
2405 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2406
94585166
DB
2407 if (report_exec_events)
2408 options |= PTRACE_O_TRACEEXEC;
2409
82075af2
JS
2410 options |= PTRACE_O_TRACESYSGOOD;
2411
de0d863e
DB
2412 return options;
2413}
2414
fa96cb38
PA
2415/* Do low-level handling of the event, and check if we should go on
2416 and pass it to caller code. Return the affected lwp if we are, or
2417 NULL otherwise. */
2418
2419static struct lwp_info *
582511be 2420linux_low_filter_event (int lwpid, int wstat)
fa96cb38
PA
2421{
2422 struct lwp_info *child;
2423 struct thread_info *thread;
582511be 2424 int have_stop_pc = 0;
fa96cb38
PA
2425
2426 child = find_lwp_pid (pid_to_ptid (lwpid));
2427
94585166
DB
2428 /* Check for stop events reported by a process we didn't already
2429 know about - anything not already in our LWP list.
2430
2431 If we're expecting to receive stopped processes after
2432 fork, vfork, and clone events, then we'll just add the
2433 new one to our list and go back to waiting for the event
2434 to be reported - the stopped process might be returned
2435 from waitpid before or after the event is.
2436
2437 But note the case of a non-leader thread exec'ing after the
2438 leader having exited, and gone from our lists (because
2439 check_zombie_leaders deleted it). The non-leader thread
2440 changes its tid to the tgid. */
2441
2442 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2443 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2444 {
2445 ptid_t child_ptid;
2446
2447 /* A multi-thread exec after we had seen the leader exiting. */
2448 if (debug_threads)
2449 {
2450 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2451 "after exec.\n", lwpid);
2452 }
2453
2454 child_ptid = ptid_build (lwpid, lwpid, 0);
2455 child = add_lwp (child_ptid);
2456 child->stopped = 1;
2457 current_thread = child->thread;
2458 }
2459
fa96cb38
PA
2460 /* If we didn't find a process, one of two things presumably happened:
2461 - A process we started and then detached from has exited. Ignore it.
2462 - A process we are controlling has forked and the new child's stop
2463 was reported to us by the kernel. Save its PID. */
2464 if (child == NULL && WIFSTOPPED (wstat))
2465 {
2466 add_to_pid_list (&stopped_pids, lwpid, wstat);
2467 return NULL;
2468 }
2469 else if (child == NULL)
2470 return NULL;
2471
2472 thread = get_lwp_thread (child);
2473
2474 child->stopped = 1;
2475
2476 child->last_status = wstat;
2477
582511be
PA
2478 /* Check if the thread has exited. */
2479 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2480 {
2481 if (debug_threads)
2482 debug_printf ("LLFE: %d exited.\n", lwpid);
f50bf8e5
YQ
2483
2484 if (finish_step_over (child))
2485 {
2486 /* Unsuspend all other LWPs, and set them back running again. */
2487 unsuspend_all_lwps (child);
2488 }
2489
65706a29
PA
2490 /* If there is at least one more LWP, then the exit signal was
2491 not the end of the debugged application and should be
2492 ignored, unless GDB wants to hear about thread exits. */
2493 if (report_thread_events
2494 || last_thread_of_process_p (pid_of (thread)))
582511be 2495 {
65706a29
PA
2496 /* Since events are serialized to GDB core, and we can't
2497 report this one right now. Leave the status pending for
2498 the next time we're able to report it. */
2499 mark_lwp_dead (child, wstat);
2500 return child;
582511be
PA
2501 }
2502 else
2503 {
65706a29
PA
2504 delete_lwp (child);
2505 return NULL;
582511be
PA
2506 }
2507 }
2508
2509 gdb_assert (WIFSTOPPED (wstat));
2510
fa96cb38
PA
2511 if (WIFSTOPPED (wstat))
2512 {
2513 struct process_info *proc;
2514
c06cbd92 2515 /* Architecture-specific setup after inferior is running. */
fa96cb38 2516 proc = find_process_pid (pid_of (thread));
c06cbd92 2517 if (proc->tdesc == NULL)
fa96cb38 2518 {
c06cbd92
YQ
2519 if (proc->attached)
2520 {
c06cbd92
YQ
2521 /* This needs to happen after we have attached to the
2522 inferior and it is stopped for the first time, but
2523 before we access any inferior registers. */
94585166 2524 linux_arch_setup_thread (thread);
c06cbd92
YQ
2525 }
2526 else
2527 {
2528 /* The process is started, but GDBserver will do
2529 architecture-specific setup after the program stops at
2530 the first instruction. */
2531 child->status_pending_p = 1;
2532 child->status_pending = wstat;
2533 return child;
2534 }
fa96cb38
PA
2535 }
2536 }
2537
fa96cb38
PA
2538 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2539 {
beed38b8 2540 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2541 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2542
de0d863e 2543 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2544 child->must_set_ptrace_flags = 0;
2545 }
2546
82075af2
JS
2547 /* Always update syscall_state, even if it will be filtered later. */
2548 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2549 {
2550 child->syscall_state
2551 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2552 ? TARGET_WAITKIND_SYSCALL_RETURN
2553 : TARGET_WAITKIND_SYSCALL_ENTRY);
2554 }
2555 else
2556 {
2557 /* Almost all other ptrace-stops are known to be outside of system
2558 calls, with further exceptions in handle_extended_wait. */
2559 child->syscall_state = TARGET_WAITKIND_IGNORE;
2560 }
2561
e7ad2f14
PA
2562 /* Be careful to not overwrite stop_pc until save_stop_reason is
2563 called. */
fa96cb38 2564 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2565 && linux_is_extended_waitstatus (wstat))
fa96cb38 2566 {
582511be 2567 child->stop_pc = get_pc (child);
94585166 2568 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2569 {
2570 /* The event has been handled, so just return without
2571 reporting it. */
2572 return NULL;
2573 }
fa96cb38
PA
2574 }
2575
80aea927 2576 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2577 {
e7ad2f14 2578 if (save_stop_reason (child))
582511be
PA
2579 have_stop_pc = 1;
2580 }
2581
2582 if (!have_stop_pc)
2583 child->stop_pc = get_pc (child);
2584
fa96cb38
PA
2585 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2586 && child->stop_expected)
2587 {
2588 if (debug_threads)
2589 debug_printf ("Expected stop.\n");
2590 child->stop_expected = 0;
2591
2592 if (thread->last_resume_kind == resume_stop)
2593 {
2594 /* We want to report the stop to the core. Treat the
2595 SIGSTOP as a normal event. */
2bf6fb9d
PA
2596 if (debug_threads)
2597 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2598 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2599 }
2600 else if (stopping_threads != NOT_STOPPING_THREADS)
2601 {
2602 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2603 pending. */
2bf6fb9d
PA
2604 if (debug_threads)
2605 debug_printf ("LLW: SIGSTOP caught for %s "
2606 "while stopping threads.\n",
2607 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2608 return NULL;
2609 }
2610 else
2611 {
2bf6fb9d
PA
2612 /* This is a delayed SIGSTOP. Filter out the event. */
2613 if (debug_threads)
2614 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2615 child->stepping ? "step" : "continue",
2616 target_pid_to_str (ptid_of (thread)));
2617
fa96cb38
PA
2618 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2619 return NULL;
2620 }
2621 }
2622
582511be
PA
2623 child->status_pending_p = 1;
2624 child->status_pending = wstat;
fa96cb38
PA
2625 return child;
2626}
2627
f79b145d
YQ
2628/* Return true if THREAD is doing hardware single step. */
2629
2630static int
2631maybe_hw_step (struct thread_info *thread)
2632{
2633 if (can_hardware_single_step ())
2634 return 1;
2635 else
2636 {
2637 struct process_info *proc = get_thread_process (thread);
2638
2639 /* GDBserver must insert reinsert breakpoint for software
2640 single step. */
2641 gdb_assert (has_reinsert_breakpoints (proc));
2642 return 0;
2643 }
2644}
2645
20ba1ce6
PA
2646/* Resume LWPs that are currently stopped without any pending status
2647 to report, but are resumed from the core's perspective. */
2648
2649static void
2650resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2651{
2652 struct thread_info *thread = (struct thread_info *) entry;
2653 struct lwp_info *lp = get_thread_lwp (thread);
2654
2655 if (lp->stopped
863d01bd 2656 && !lp->suspended
20ba1ce6 2657 && !lp->status_pending_p
20ba1ce6
PA
2658 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2659 {
2660 int step = thread->last_resume_kind == resume_step;
2661
2662 if (debug_threads)
2663 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2664 target_pid_to_str (ptid_of (thread)),
2665 paddress (lp->stop_pc),
2666 step);
2667
2668 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2669 }
2670}
2671
fa96cb38
PA
2672/* Wait for an event from child(ren) WAIT_PTID, and return any that
2673 match FILTER_PTID (leaving others pending). The PTIDs can be:
2674 minus_one_ptid, to specify any child; a pid PTID, specifying all
2675 lwps of a thread group; or a PTID representing a single lwp. Store
2676 the stop status through the status pointer WSTAT. OPTIONS is
2677 passed to the waitpid call. Return 0 if no event was found and
2678 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2679 was found. Return the PID of the stopped child otherwise. */
bd99dc85 2680
0d62e5e8 2681static int
fa96cb38
PA
2682linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2683 int *wstatp, int options)
0d62e5e8 2684{
d86d4aaf 2685 struct thread_info *event_thread;
d50171e4 2686 struct lwp_info *event_child, *requested_child;
fa96cb38 2687 sigset_t block_mask, prev_mask;
d50171e4 2688
fa96cb38 2689 retry:
d86d4aaf
DE
2690 /* N.B. event_thread points to the thread_info struct that contains
2691 event_child. Keep them in sync. */
2692 event_thread = NULL;
d50171e4
PA
2693 event_child = NULL;
2694 requested_child = NULL;
0d62e5e8 2695
95954743 2696 /* Check for a lwp with a pending status. */
bd99dc85 2697
fa96cb38 2698 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
0d62e5e8 2699 {
d86d4aaf 2700 event_thread = (struct thread_info *)
fa96cb38 2701 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
d86d4aaf
DE
2702 if (event_thread != NULL)
2703 event_child = get_thread_lwp (event_thread);
2704 if (debug_threads && event_thread)
2705 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2706 }
fa96cb38 2707 else if (!ptid_equal (filter_ptid, null_ptid))
0d62e5e8 2708 {
fa96cb38 2709 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2710
bde24c0a 2711 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66
PA
2712 && requested_child->status_pending_p
2713 && requested_child->collecting_fast_tracepoint)
2714 {
2715 enqueue_one_deferred_signal (requested_child,
2716 &requested_child->status_pending);
2717 requested_child->status_pending_p = 0;
2718 requested_child->status_pending = 0;
2719 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2720 }
2721
2722 if (requested_child->suspended
2723 && requested_child->status_pending_p)
38e08fca
GB
2724 {
2725 internal_error (__FILE__, __LINE__,
2726 "requesting an event out of a"
2727 " suspended child?");
2728 }
fa593d66 2729
d50171e4 2730 if (requested_child->status_pending_p)
d86d4aaf
DE
2731 {
2732 event_child = requested_child;
2733 event_thread = get_lwp_thread (event_child);
2734 }
0d62e5e8 2735 }
611cb4a5 2736
0d62e5e8
DJ
2737 if (event_child != NULL)
2738 {
bd99dc85 2739 if (debug_threads)
87ce2a04 2740 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2741 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2742 *wstatp = event_child->status_pending;
bd99dc85
PA
2743 event_child->status_pending_p = 0;
2744 event_child->status_pending = 0;
0bfdf32f 2745 current_thread = event_thread;
d86d4aaf 2746 return lwpid_of (event_thread);
0d62e5e8
DJ
2747 }
2748
fa96cb38
PA
2749 /* But if we don't find a pending event, we'll have to wait.
2750
2751 We only enter this loop if no process has a pending wait status.
2752 Thus any action taken in response to a wait status inside this
2753 loop is responding as soon as we detect the status, not after any
2754 pending events. */
d8301ad1 2755
fa96cb38
PA
2756 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2757 all signals while here. */
2758 sigfillset (&block_mask);
2759 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2760
582511be
PA
2761 /* Always pull all events out of the kernel. We'll randomly select
2762 an event LWP out of all that have events, to prevent
2763 starvation. */
fa96cb38 2764 while (event_child == NULL)
0d62e5e8 2765 {
fa96cb38 2766 pid_t ret = 0;
0d62e5e8 2767
fa96cb38
PA
2768 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2769 quirks:
0d62e5e8 2770
fa96cb38
PA
2771 - If the thread group leader exits while other threads in the
2772 thread group still exist, waitpid(TGID, ...) hangs. That
2773 waitpid won't return an exit status until the other threads
2774 in the group are reaped.
611cb4a5 2775
fa96cb38
PA
2776 - When a non-leader thread execs, that thread just vanishes
2777 without reporting an exit (so we'd hang if we waited for it
2778 explicitly in that case). The exec event is reported to
94585166 2779 the TGID pid. */
fa96cb38
PA
2780 errno = 0;
2781 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2782
fa96cb38
PA
2783 if (debug_threads)
2784 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2785 ret, errno ? strerror (errno) : "ERRNO-OK");
0d62e5e8 2786
fa96cb38 2787 if (ret > 0)
0d62e5e8 2788 {
89be2091 2789 if (debug_threads)
bd99dc85 2790 {
fa96cb38
PA
2791 debug_printf ("LLW: waitpid %ld received %s\n",
2792 (long) ret, status_to_str (*wstatp));
bd99dc85 2793 }
89be2091 2794
582511be
PA
2795 /* Filter all events. IOW, leave all events pending. We'll
2796 randomly select an event LWP out of all that have events
2797 below. */
2798 linux_low_filter_event (ret, *wstatp);
fa96cb38
PA
2799 /* Retry until nothing comes out of waitpid. A single
2800 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2801 continue;
2802 }
2803
20ba1ce6
PA
2804 /* Now that we've pulled all events out of the kernel, resume
2805 LWPs that don't have an interesting event to report. */
2806 if (stopping_threads == NOT_STOPPING_THREADS)
2807 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2808
2809 /* ... and find an LWP with a status to report to the core, if
2810 any. */
582511be
PA
2811 event_thread = (struct thread_info *)
2812 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2813 if (event_thread != NULL)
2814 {
2815 event_child = get_thread_lwp (event_thread);
2816 *wstatp = event_child->status_pending;
2817 event_child->status_pending_p = 0;
2818 event_child->status_pending = 0;
2819 break;
2820 }
2821
fa96cb38
PA
2822 /* Check for zombie thread group leaders. Those can't be reaped
2823 until all other threads in the thread group are. */
2824 check_zombie_leaders ();
2825
2826 /* If there are no resumed children left in the set of LWPs we
2827 want to wait for, bail. We can't just block in
2828 waitpid/sigsuspend, because lwps might have been left stopped
2829 in trace-stop state, and we'd be stuck forever waiting for
2830 their status to change (which would only happen if we resumed
2831 them). Even if WNOHANG is set, this return code is preferred
2832 over 0 (below), as it is more detailed. */
2833 if ((find_inferior (&all_threads,
2834 not_stopped_callback,
2835 &wait_ptid) == NULL))
a6dbe5df 2836 {
fa96cb38
PA
2837 if (debug_threads)
2838 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2839 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2840 return -1;
a6dbe5df
PA
2841 }
2842
fa96cb38
PA
2843 /* No interesting event to report to the caller. */
2844 if ((options & WNOHANG))
24a09b5f 2845 {
fa96cb38
PA
2846 if (debug_threads)
2847 debug_printf ("WNOHANG set, no event found\n");
2848
2849 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2850 return 0;
24a09b5f
DJ
2851 }
2852
fa96cb38
PA
2853 /* Block until we get an event reported with SIGCHLD. */
2854 if (debug_threads)
2855 debug_printf ("sigsuspend'ing\n");
d50171e4 2856
fa96cb38
PA
2857 sigsuspend (&prev_mask);
2858 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2859 goto retry;
2860 }
d50171e4 2861
fa96cb38 2862 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2863
0bfdf32f 2864 current_thread = event_thread;
d50171e4 2865
fa96cb38
PA
2866 return lwpid_of (event_thread);
2867}
2868
2869/* Wait for an event from child(ren) PTID. PTIDs can be:
2870 minus_one_ptid, to specify any child; a pid PTID, specifying all
2871 lwps of a thread group; or a PTID representing a single lwp. Store
2872 the stop status through the status pointer WSTAT. OPTIONS is
2873 passed to the waitpid call. Return 0 if no event was found and
2874 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2875 was found. Return the PID of the stopped child otherwise. */
2876
2877static int
2878linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2879{
2880 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2881}
2882
6bf5e0ba
PA
2883/* Count the LWP's that have had events. */
2884
2885static int
2886count_events_callback (struct inferior_list_entry *entry, void *data)
2887{
d86d4aaf 2888 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2889 struct lwp_info *lp = get_thread_lwp (thread);
9a3c8263 2890 int *count = (int *) data;
6bf5e0ba
PA
2891
2892 gdb_assert (count != NULL);
2893
582511be 2894 /* Count only resumed LWPs that have an event pending. */
8336d594 2895 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2896 && lp->status_pending_p)
6bf5e0ba
PA
2897 (*count)++;
2898
2899 return 0;
2900}
2901
2902/* Select the LWP (if any) that is currently being single-stepped. */
2903
2904static int
2905select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2906{
d86d4aaf
DE
2907 struct thread_info *thread = (struct thread_info *) entry;
2908 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba 2909
8336d594
PA
2910 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2911 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
2912 && lp->status_pending_p)
2913 return 1;
2914 else
2915 return 0;
2916}
2917
b90fc188 2918/* Select the Nth LWP that has had an event. */
6bf5e0ba
PA
2919
2920static int
2921select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2922{
d86d4aaf 2923 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2924 struct lwp_info *lp = get_thread_lwp (thread);
9a3c8263 2925 int *selector = (int *) data;
6bf5e0ba
PA
2926
2927 gdb_assert (selector != NULL);
2928
582511be 2929 /* Select only resumed LWPs that have an event pending. */
91baf43f 2930 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2931 && lp->status_pending_p)
6bf5e0ba
PA
2932 if ((*selector)-- == 0)
2933 return 1;
2934
2935 return 0;
2936}
2937
6bf5e0ba
PA
2938/* Select one LWP out of those that have events pending. */
2939
2940static void
2941select_event_lwp (struct lwp_info **orig_lp)
2942{
2943 int num_events = 0;
2944 int random_selector;
582511be
PA
2945 struct thread_info *event_thread = NULL;
2946
2947 /* In all-stop, give preference to the LWP that is being
2948 single-stepped. There will be at most one, and it's the LWP that
2949 the core is most interested in. If we didn't do this, then we'd
2950 have to handle pending step SIGTRAPs somehow in case the core
2951 later continues the previously-stepped thread, otherwise we'd
2952 report the pending SIGTRAP, and the core, not having stepped the
2953 thread, wouldn't understand what the trap was for, and therefore
2954 would report it to the user as a random signal. */
2955 if (!non_stop)
6bf5e0ba 2956 {
582511be
PA
2957 event_thread
2958 = (struct thread_info *) find_inferior (&all_threads,
2959 select_singlestep_lwp_callback,
2960 NULL);
2961 if (event_thread != NULL)
2962 {
2963 if (debug_threads)
2964 debug_printf ("SEL: Select single-step %s\n",
2965 target_pid_to_str (ptid_of (event_thread)));
2966 }
6bf5e0ba 2967 }
582511be 2968 if (event_thread == NULL)
6bf5e0ba
PA
2969 {
2970 /* No single-stepping LWP. Select one at random, out of those
b90fc188 2971 which have had events. */
6bf5e0ba 2972
b90fc188 2973 /* First see how many events we have. */
d86d4aaf 2974 find_inferior (&all_threads, count_events_callback, &num_events);
8bf3b159 2975 gdb_assert (num_events > 0);
6bf5e0ba 2976
b90fc188
PA
2977 /* Now randomly pick a LWP out of those that have had
2978 events. */
6bf5e0ba
PA
2979 random_selector = (int)
2980 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2981
2982 if (debug_threads && num_events > 1)
87ce2a04
DE
2983 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2984 num_events, random_selector);
6bf5e0ba 2985
d86d4aaf
DE
2986 event_thread
2987 = (struct thread_info *) find_inferior (&all_threads,
2988 select_event_lwp_callback,
2989 &random_selector);
6bf5e0ba
PA
2990 }
2991
d86d4aaf 2992 if (event_thread != NULL)
6bf5e0ba 2993 {
d86d4aaf
DE
2994 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2995
6bf5e0ba
PA
2996 /* Switch the event LWP. */
2997 *orig_lp = event_lp;
2998 }
2999}
3000
7984d532
PA
3001/* Decrement the suspend count of an LWP. */
3002
3003static int
3004unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
3005{
d86d4aaf
DE
3006 struct thread_info *thread = (struct thread_info *) entry;
3007 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3008
3009 /* Ignore EXCEPT. */
3010 if (lwp == except)
3011 return 0;
3012
863d01bd 3013 lwp_suspended_decr (lwp);
7984d532
PA
3014 return 0;
3015}
3016
3017/* Decrement the suspend count of all LWPs, except EXCEPT, if non
3018 NULL. */
3019
3020static void
3021unsuspend_all_lwps (struct lwp_info *except)
3022{
d86d4aaf 3023 find_inferior (&all_threads, unsuspend_one_lwp, except);
7984d532
PA
3024}
3025
fa593d66
PA
3026static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
3027static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
3028 void *data);
3029static int lwp_running (struct inferior_list_entry *entry, void *data);
3030static ptid_t linux_wait_1 (ptid_t ptid,
3031 struct target_waitstatus *ourstatus,
3032 int target_options);
3033
3034/* Stabilize threads (move out of jump pads).
3035
3036 If a thread is midway collecting a fast tracepoint, we need to
3037 finish the collection and move it out of the jump pad before
3038 reporting the signal.
3039
3040 This avoids recursion while collecting (when a signal arrives
3041 midway, and the signal handler itself collects), which would trash
3042 the trace buffer. In case the user set a breakpoint in a signal
3043 handler, this avoids the backtrace showing the jump pad, etc..
3044 Most importantly, there are certain things we can't do safely if
3045 threads are stopped in a jump pad (or in its callee's). For
3046 example:
3047
3048 - starting a new trace run. A thread still collecting the
3049 previous run, could trash the trace buffer when resumed. The trace
3050 buffer control structures would have been reset but the thread had
3051 no way to tell. The thread could even midway memcpy'ing to the
3052 buffer, which would mean that when resumed, it would clobber the
3053 trace buffer that had been set for a new run.
3054
3055 - we can't rewrite/reuse the jump pads for new tracepoints
3056 safely. Say you do tstart while a thread is stopped midway while
3057 collecting. When the thread is later resumed, it finishes the
3058 collection, and returns to the jump pad, to execute the original
3059 instruction that was under the tracepoint jump at the time the
3060 older run had been started. If the jump pad had been rewritten
3061 since for something else in the new run, the thread would now
3062 execute the wrong / random instructions. */
3063
3064static void
3065linux_stabilize_threads (void)
3066{
0bfdf32f 3067 struct thread_info *saved_thread;
d86d4aaf 3068 struct thread_info *thread_stuck;
fa593d66 3069
d86d4aaf
DE
3070 thread_stuck
3071 = (struct thread_info *) find_inferior (&all_threads,
3072 stuck_in_jump_pad_callback,
3073 NULL);
3074 if (thread_stuck != NULL)
fa593d66 3075 {
b4d51a55 3076 if (debug_threads)
87ce2a04 3077 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 3078 lwpid_of (thread_stuck));
fa593d66
PA
3079 return;
3080 }
3081
0bfdf32f 3082 saved_thread = current_thread;
fa593d66
PA
3083
3084 stabilizing_threads = 1;
3085
3086 /* Kick 'em all. */
d86d4aaf 3087 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
fa593d66
PA
3088
3089 /* Loop until all are stopped out of the jump pads. */
d86d4aaf 3090 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
fa593d66
PA
3091 {
3092 struct target_waitstatus ourstatus;
3093 struct lwp_info *lwp;
fa593d66
PA
3094 int wstat;
3095
3096 /* Note that we go through the full wait even loop. While
3097 moving threads out of jump pad, we need to be able to step
3098 over internal breakpoints and such. */
32fcada3 3099 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
3100
3101 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
3102 {
0bfdf32f 3103 lwp = get_thread_lwp (current_thread);
fa593d66
PA
3104
3105 /* Lock it. */
863d01bd 3106 lwp_suspended_inc (lwp);
fa593d66 3107
a493e3e2 3108 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 3109 || current_thread->last_resume_kind == resume_stop)
fa593d66 3110 {
2ea28649 3111 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
3112 enqueue_one_deferred_signal (lwp, &wstat);
3113 }
3114 }
3115 }
3116
fcdad592 3117 unsuspend_all_lwps (NULL);
fa593d66
PA
3118
3119 stabilizing_threads = 0;
3120
0bfdf32f 3121 current_thread = saved_thread;
fa593d66 3122
b4d51a55 3123 if (debug_threads)
fa593d66 3124 {
d86d4aaf
DE
3125 thread_stuck
3126 = (struct thread_info *) find_inferior (&all_threads,
3127 stuck_in_jump_pad_callback,
3128 NULL);
3129 if (thread_stuck != NULL)
87ce2a04 3130 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 3131 lwpid_of (thread_stuck));
fa593d66
PA
3132 }
3133}
3134
582511be
PA
3135/* Convenience function that is called when the kernel reports an
3136 event that is not passed out to GDB. */
3137
3138static ptid_t
3139ignore_event (struct target_waitstatus *ourstatus)
3140{
3141 /* If we got an event, there may still be others, as a single
3142 SIGCHLD can indicate more than one child stopped. This forces
3143 another target_wait call. */
3144 async_file_mark ();
3145
3146 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3147 return null_ptid;
3148}
3149
65706a29
PA
3150/* Convenience function that is called when the kernel reports an exit
3151 event. This decides whether to report the event to GDB as a
3152 process exit event, a thread exit event, or to suppress the
3153 event. */
3154
3155static ptid_t
3156filter_exit_event (struct lwp_info *event_child,
3157 struct target_waitstatus *ourstatus)
3158{
3159 struct thread_info *thread = get_lwp_thread (event_child);
3160 ptid_t ptid = ptid_of (thread);
3161
3162 if (!last_thread_of_process_p (pid_of (thread)))
3163 {
3164 if (report_thread_events)
3165 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3166 else
3167 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3168
3169 delete_lwp (event_child);
3170 }
3171 return ptid;
3172}
3173
82075af2
JS
3174/* Returns 1 if GDB is interested in any event_child syscalls. */
3175
3176static int
3177gdb_catching_syscalls_p (struct lwp_info *event_child)
3178{
3179 struct thread_info *thread = get_lwp_thread (event_child);
3180 struct process_info *proc = get_thread_process (thread);
3181
3182 return !VEC_empty (int, proc->syscalls_to_catch);
3183}
3184
3185/* Returns 1 if GDB is interested in the event_child syscall.
3186 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3187
3188static int
3189gdb_catch_this_syscall_p (struct lwp_info *event_child)
3190{
3191 int i, iter;
4cc32bec 3192 int sysno;
82075af2
JS
3193 struct thread_info *thread = get_lwp_thread (event_child);
3194 struct process_info *proc = get_thread_process (thread);
3195
3196 if (VEC_empty (int, proc->syscalls_to_catch))
3197 return 0;
3198
3199 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3200 return 1;
3201
4cc32bec 3202 get_syscall_trapinfo (event_child, &sysno);
82075af2
JS
3203 for (i = 0;
3204 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3205 i++)
3206 if (iter == sysno)
3207 return 1;
3208
3209 return 0;
3210}
3211
0d62e5e8 3212/* Wait for process, returns status. */
da6d8c04 3213
95954743
PA
3214static ptid_t
3215linux_wait_1 (ptid_t ptid,
3216 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 3217{
e5f1222d 3218 int w;
fc7238bb 3219 struct lwp_info *event_child;
bd99dc85 3220 int options;
bd99dc85 3221 int pid;
6bf5e0ba
PA
3222 int step_over_finished;
3223 int bp_explains_trap;
3224 int maybe_internal_trap;
3225 int report_to_gdb;
219f2f23 3226 int trace_event;
c2d6af84 3227 int in_step_range;
f2faf941 3228 int any_resumed;
bd99dc85 3229
87ce2a04
DE
3230 if (debug_threads)
3231 {
3232 debug_enter ();
3233 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3234 }
3235
bd99dc85
PA
3236 /* Translate generic target options into linux options. */
3237 options = __WALL;
3238 if (target_options & TARGET_WNOHANG)
3239 options |= WNOHANG;
0d62e5e8 3240
fa593d66
PA
3241 bp_explains_trap = 0;
3242 trace_event = 0;
c2d6af84 3243 in_step_range = 0;
bd99dc85
PA
3244 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3245
f2faf941
PA
3246 /* Find a resumed LWP, if any. */
3247 if (find_inferior (&all_threads,
3248 status_pending_p_callback,
3249 &minus_one_ptid) != NULL)
3250 any_resumed = 1;
3251 else if ((find_inferior (&all_threads,
3252 not_stopped_callback,
3253 &minus_one_ptid) != NULL))
3254 any_resumed = 1;
3255 else
3256 any_resumed = 0;
3257
6bf5e0ba
PA
3258 if (ptid_equal (step_over_bkpt, null_ptid))
3259 pid = linux_wait_for_event (ptid, &w, options);
3260 else
3261 {
3262 if (debug_threads)
87ce2a04
DE
3263 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3264 target_pid_to_str (step_over_bkpt));
6bf5e0ba
PA
3265 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3266 }
3267
f2faf941 3268 if (pid == 0 || (pid == -1 && !any_resumed))
87ce2a04 3269 {
fa96cb38
PA
3270 gdb_assert (target_options & TARGET_WNOHANG);
3271
87ce2a04
DE
3272 if (debug_threads)
3273 {
fa96cb38
PA
3274 debug_printf ("linux_wait_1 ret = null_ptid, "
3275 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
3276 debug_exit ();
3277 }
fa96cb38
PA
3278
3279 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
3280 return null_ptid;
3281 }
fa96cb38
PA
3282 else if (pid == -1)
3283 {
3284 if (debug_threads)
3285 {
3286 debug_printf ("linux_wait_1 ret = null_ptid, "
3287 "TARGET_WAITKIND_NO_RESUMED\n");
3288 debug_exit ();
3289 }
bd99dc85 3290
fa96cb38
PA
3291 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3292 return null_ptid;
3293 }
0d62e5e8 3294
0bfdf32f 3295 event_child = get_thread_lwp (current_thread);
0d62e5e8 3296
fa96cb38
PA
3297 /* linux_wait_for_event only returns an exit status for the last
3298 child of a process. Report it. */
3299 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3300 {
fa96cb38 3301 if (WIFEXITED (w))
0d62e5e8 3302 {
fa96cb38
PA
3303 ourstatus->kind = TARGET_WAITKIND_EXITED;
3304 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 3305
fa96cb38 3306 if (debug_threads)
bd99dc85 3307 {
fa96cb38
PA
3308 debug_printf ("linux_wait_1 ret = %s, exited with "
3309 "retcode %d\n",
0bfdf32f 3310 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3311 WEXITSTATUS (w));
3312 debug_exit ();
bd99dc85 3313 }
fa96cb38
PA
3314 }
3315 else
3316 {
3317 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3318 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 3319
fa96cb38
PA
3320 if (debug_threads)
3321 {
3322 debug_printf ("linux_wait_1 ret = %s, terminated with "
3323 "signal %d\n",
0bfdf32f 3324 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3325 WTERMSIG (w));
3326 debug_exit ();
3327 }
0d62e5e8 3328 }
fa96cb38 3329
65706a29
PA
3330 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3331 return filter_exit_event (event_child, ourstatus);
3332
0bfdf32f 3333 return ptid_of (current_thread);
da6d8c04
DJ
3334 }
3335
2d97cd35
AT
3336 /* If step-over executes a breakpoint instruction, in the case of a
3337 hardware single step it means a gdb/gdbserver breakpoint had been
3338 planted on top of a permanent breakpoint, in the case of a software
3339 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3340 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3341 the breakpoint address.
3342 So in the case of the hardware single step advance the PC manually
3343 past the breakpoint and in the case of software single step advance only
3344 if it's not the reinsert_breakpoint we are hitting.
3345 This avoids that a program would keep trapping a permanent breakpoint
3346 forever. */
8090aef2 3347 if (!ptid_equal (step_over_bkpt, null_ptid)
2d97cd35
AT
3348 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3349 && (event_child->stepping
3350 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3351 {
dd373349
AT
3352 int increment_pc = 0;
3353 int breakpoint_kind = 0;
3354 CORE_ADDR stop_pc = event_child->stop_pc;
3355
769ef81f
AT
3356 breakpoint_kind =
3357 the_target->breakpoint_kind_from_current_state (&stop_pc);
dd373349 3358 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2
PA
3359
3360 if (debug_threads)
3361 {
3362 debug_printf ("step-over for %s executed software breakpoint\n",
3363 target_pid_to_str (ptid_of (current_thread)));
3364 }
3365
3366 if (increment_pc != 0)
3367 {
3368 struct regcache *regcache
3369 = get_thread_regcache (current_thread, 1);
3370
3371 event_child->stop_pc += increment_pc;
3372 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3373
3374 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
15c66dd6 3375 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3376 }
3377 }
3378
6bf5e0ba
PA
3379 /* If this event was not handled before, and is not a SIGTRAP, we
3380 report it. SIGILL and SIGSEGV are also treated as traps in case
3381 a breakpoint is inserted at the current PC. If this target does
3382 not support internal breakpoints at all, we also report the
3383 SIGTRAP without further processing; it's of no concern to us. */
3384 maybe_internal_trap
3385 = (supports_breakpoints ()
3386 && (WSTOPSIG (w) == SIGTRAP
3387 || ((WSTOPSIG (w) == SIGILL
3388 || WSTOPSIG (w) == SIGSEGV)
3389 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3390
3391 if (maybe_internal_trap)
3392 {
3393 /* Handle anything that requires bookkeeping before deciding to
3394 report the event or continue waiting. */
3395
3396 /* First check if we can explain the SIGTRAP with an internal
3397 breakpoint, or if we should possibly report the event to GDB.
3398 Do this before anything that may remove or insert a
3399 breakpoint. */
3400 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3401
3402 /* We have a SIGTRAP, possibly a step-over dance has just
3403 finished. If so, tweak the state machine accordingly,
3404 reinsert breakpoints and delete any reinsert (software
3405 single-step) breakpoints. */
3406 step_over_finished = finish_step_over (event_child);
3407
3408 /* Now invoke the callbacks of any internal breakpoints there. */
3409 check_breakpoints (event_child->stop_pc);
3410
219f2f23
PA
3411 /* Handle tracepoint data collecting. This may overflow the
3412 trace buffer, and cause a tracing stop, removing
3413 breakpoints. */
3414 trace_event = handle_tracepoints (event_child);
3415
6bf5e0ba
PA
3416 if (bp_explains_trap)
3417 {
6bf5e0ba 3418 if (debug_threads)
87ce2a04 3419 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba
PA
3420 }
3421 }
3422 else
3423 {
3424 /* We have some other signal, possibly a step-over dance was in
3425 progress, and it should be cancelled too. */
3426 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3427 }
3428
3429 /* We have all the data we need. Either report the event to GDB, or
3430 resume threads and keep waiting for more. */
3431
3432 /* If we're collecting a fast tracepoint, finish the collection and
3433 move out of the jump pad before delivering a signal. See
3434 linux_stabilize_threads. */
3435
3436 if (WIFSTOPPED (w)
3437 && WSTOPSIG (w) != SIGTRAP
3438 && supports_fast_tracepoints ()
58b4daa5 3439 && agent_loaded_p ())
fa593d66
PA
3440 {
3441 if (debug_threads)
87ce2a04
DE
3442 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3443 "to defer or adjust it.\n",
0bfdf32f 3444 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3445
3446 /* Allow debugging the jump pad itself. */
0bfdf32f 3447 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3448 && maybe_move_out_of_jump_pad (event_child, &w))
3449 {
3450 enqueue_one_deferred_signal (event_child, &w);
3451
3452 if (debug_threads)
87ce2a04 3453 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 3454 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3455
3456 linux_resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
3457
3458 return ignore_event (ourstatus);
fa593d66
PA
3459 }
3460 }
219f2f23 3461
fa593d66
PA
3462 if (event_child->collecting_fast_tracepoint)
3463 {
3464 if (debug_threads)
87ce2a04
DE
3465 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3466 "Check if we're already there.\n",
0bfdf32f 3467 lwpid_of (current_thread),
87ce2a04 3468 event_child->collecting_fast_tracepoint);
fa593d66
PA
3469
3470 trace_event = 1;
3471
3472 event_child->collecting_fast_tracepoint
3473 = linux_fast_tracepoint_collecting (event_child, NULL);
3474
3475 if (event_child->collecting_fast_tracepoint != 1)
3476 {
3477 /* No longer need this breakpoint. */
3478 if (event_child->exit_jump_pad_bkpt != NULL)
3479 {
3480 if (debug_threads)
87ce2a04
DE
3481 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3482 "stopping all threads momentarily.\n");
fa593d66
PA
3483
3484 /* Other running threads could hit this breakpoint.
3485 We don't handle moribund locations like GDB does,
3486 instead we always pause all threads when removing
3487 breakpoints, so that any step-over or
3488 decr_pc_after_break adjustment is always taken
3489 care of while the breakpoint is still
3490 inserted. */
3491 stop_all_lwps (1, event_child);
fa593d66
PA
3492
3493 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3494 event_child->exit_jump_pad_bkpt = NULL;
3495
3496 unstop_all_lwps (1, event_child);
3497
3498 gdb_assert (event_child->suspended >= 0);
3499 }
3500 }
3501
3502 if (event_child->collecting_fast_tracepoint == 0)
3503 {
3504 if (debug_threads)
87ce2a04
DE
3505 debug_printf ("fast tracepoint finished "
3506 "collecting successfully.\n");
fa593d66
PA
3507
3508 /* We may have a deferred signal to report. */
3509 if (dequeue_one_deferred_signal (event_child, &w))
3510 {
3511 if (debug_threads)
87ce2a04 3512 debug_printf ("dequeued one signal.\n");
fa593d66 3513 }
3c11dd79 3514 else
fa593d66 3515 {
3c11dd79 3516 if (debug_threads)
87ce2a04 3517 debug_printf ("no deferred signals.\n");
fa593d66
PA
3518
3519 if (stabilizing_threads)
3520 {
3521 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 3522 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
3523
3524 if (debug_threads)
3525 {
3526 debug_printf ("linux_wait_1 ret = %s, stopped "
3527 "while stabilizing threads\n",
0bfdf32f 3528 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
3529 debug_exit ();
3530 }
3531
0bfdf32f 3532 return ptid_of (current_thread);
fa593d66
PA
3533 }
3534 }
3535 }
6bf5e0ba
PA
3536 }
3537
e471f25b
PA
3538 /* Check whether GDB would be interested in this event. */
3539
82075af2
JS
3540 /* Check if GDB is interested in this syscall. */
3541 if (WIFSTOPPED (w)
3542 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3543 && !gdb_catch_this_syscall_p (event_child))
3544 {
3545 if (debug_threads)
3546 {
3547 debug_printf ("Ignored syscall for LWP %ld.\n",
3548 lwpid_of (current_thread));
3549 }
3550
3551 linux_resume_one_lwp (event_child, event_child->stepping,
3552 0, NULL);
3553 return ignore_event (ourstatus);
3554 }
3555
e471f25b
PA
3556 /* If GDB is not interested in this signal, don't stop other
3557 threads, and don't report it to GDB. Just resume the inferior
3558 right away. We do this for threading-related signals as well as
3559 any that GDB specifically requested we ignore. But never ignore
3560 SIGSTOP if we sent it ourselves, and do not ignore signals when
3561 stepping - they may require special handling to skip the signal
c9587f88
AT
3562 handler. Also never ignore signals that could be caused by a
3563 breakpoint. */
e471f25b 3564 if (WIFSTOPPED (w)
0bfdf32f 3565 && current_thread->last_resume_kind != resume_step
e471f25b 3566 && (
1a981360 3567#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3568 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3569 && (WSTOPSIG (w) == __SIGRTMIN
3570 || WSTOPSIG (w) == __SIGRTMIN + 1))
3571 ||
3572#endif
2ea28649 3573 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3574 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3575 && current_thread->last_resume_kind == resume_stop)
3576 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3577 {
3578 siginfo_t info, *info_p;
3579
3580 if (debug_threads)
87ce2a04 3581 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 3582 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3583
0bfdf32f 3584 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3585 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3586 info_p = &info;
3587 else
3588 info_p = NULL;
863d01bd
PA
3589
3590 if (step_over_finished)
3591 {
3592 /* We cancelled this thread's step-over above. We still
3593 need to unsuspend all other LWPs, and set them back
3594 running again while the signal handler runs. */
3595 unsuspend_all_lwps (event_child);
3596
3597 /* Enqueue the pending signal info so that proceed_all_lwps
3598 doesn't lose it. */
3599 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3600
3601 proceed_all_lwps ();
3602 }
3603 else
3604 {
3605 linux_resume_one_lwp (event_child, event_child->stepping,
3606 WSTOPSIG (w), info_p);
3607 }
582511be 3608 return ignore_event (ourstatus);
e471f25b
PA
3609 }
3610
c2d6af84
PA
3611 /* Note that all addresses are always "out of the step range" when
3612 there's no range to begin with. */
3613 in_step_range = lwp_in_step_range (event_child);
3614
3615 /* If GDB wanted this thread to single step, and the thread is out
3616 of the step range, we always want to report the SIGTRAP, and let
3617 GDB handle it. Watchpoints should always be reported. So should
3618 signals we can't explain. A SIGTRAP we can't explain could be a
3619 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3620 do, we're be able to handle GDB breakpoints on top of internal
3621 breakpoints, by handling the internal breakpoint and still
3622 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3623 won't see the breakpoint hit. If we see a single-step event but
3624 the thread should be continuing, don't pass the trap to gdb.
3625 That indicates that we had previously finished a single-step but
3626 left the single-step pending -- see
3627 complete_ongoing_step_over. */
6bf5e0ba 3628 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3629 || (current_thread->last_resume_kind == resume_step
c2d6af84 3630 && !in_step_range)
15c66dd6 3631 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3632 || (!in_step_range
3633 && !bp_explains_trap
3634 && !trace_event
3635 && !step_over_finished
3636 && !(current_thread->last_resume_kind == resume_continue
3637 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3638 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3639 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3640 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
00db26fa 3641 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3642
3643 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3644
3645 /* We found no reason GDB would want us to stop. We either hit one
3646 of our own breakpoints, or finished an internal step GDB
3647 shouldn't know about. */
3648 if (!report_to_gdb)
3649 {
3650 if (debug_threads)
3651 {
3652 if (bp_explains_trap)
87ce2a04 3653 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3654 if (step_over_finished)
87ce2a04 3655 debug_printf ("Step-over finished.\n");
219f2f23 3656 if (trace_event)
87ce2a04 3657 debug_printf ("Tracepoint event.\n");
c2d6af84 3658 if (lwp_in_step_range (event_child))
87ce2a04
DE
3659 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3660 paddress (event_child->stop_pc),
3661 paddress (event_child->step_range_start),
3662 paddress (event_child->step_range_end));
6bf5e0ba
PA
3663 }
3664
3665 /* We're not reporting this breakpoint to GDB, so apply the
3666 decr_pc_after_break adjustment to the inferior's regcache
3667 ourselves. */
3668
3669 if (the_low_target.set_pc != NULL)
3670 {
3671 struct regcache *regcache
0bfdf32f 3672 = get_thread_regcache (current_thread, 1);
6bf5e0ba
PA
3673 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3674 }
3675
7984d532
PA
3676 /* We may have finished stepping over a breakpoint. If so,
3677 we've stopped and suspended all LWPs momentarily except the
3678 stepping one. This is where we resume them all again. We're
3679 going to keep waiting, so use proceed, which handles stepping
3680 over the next breakpoint. */
6bf5e0ba 3681 if (debug_threads)
87ce2a04 3682 debug_printf ("proceeding all threads.\n");
7984d532
PA
3683
3684 if (step_over_finished)
3685 unsuspend_all_lwps (event_child);
3686
6bf5e0ba 3687 proceed_all_lwps ();
582511be 3688 return ignore_event (ourstatus);
6bf5e0ba
PA
3689 }
3690
3691 if (debug_threads)
3692 {
00db26fa 3693 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
ad071a30
PA
3694 {
3695 char *str;
3696
3697 str = target_waitstatus_to_string (&event_child->waitstatus);
3698 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3699 lwpid_of (get_lwp_thread (event_child)), str);
3700 xfree (str);
3701 }
0bfdf32f 3702 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3703 {
3704 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3705 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3706 else if (!lwp_in_step_range (event_child))
87ce2a04 3707 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3708 }
15c66dd6 3709 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3710 debug_printf ("Stopped by watchpoint.\n");
582511be 3711 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3712 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3713 if (debug_threads)
87ce2a04 3714 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3715 }
3716
3717 /* Alright, we're going to report a stop. */
3718
582511be 3719 if (!stabilizing_threads)
6bf5e0ba
PA
3720 {
3721 /* In all-stop, stop all threads. */
582511be
PA
3722 if (!non_stop)
3723 stop_all_lwps (0, NULL);
6bf5e0ba
PA
3724
3725 /* If we're not waiting for a specific LWP, choose an event LWP
3726 from among those that have had events. Giving equal priority
3727 to all LWPs that have had events helps prevent
3728 starvation. */
3729 if (ptid_equal (ptid, minus_one_ptid))
3730 {
3731 event_child->status_pending_p = 1;
3732 event_child->status_pending = w;
3733
3734 select_event_lwp (&event_child);
3735
0bfdf32f
GB
3736 /* current_thread and event_child must stay in sync. */
3737 current_thread = get_lwp_thread (event_child);
ee1e2d4f 3738
6bf5e0ba
PA
3739 event_child->status_pending_p = 0;
3740 w = event_child->status_pending;
3741 }
3742
c03e6ccc 3743 if (step_over_finished)
582511be
PA
3744 {
3745 if (!non_stop)
3746 {
3747 /* If we were doing a step-over, all other threads but
3748 the stepping one had been paused in start_step_over,
3749 with their suspend counts incremented. We don't want
3750 to do a full unstop/unpause, because we're in
3751 all-stop mode (so we want threads stopped), but we
3752 still need to unsuspend the other threads, to
3753 decrement their `suspended' count back. */
3754 unsuspend_all_lwps (event_child);
3755 }
3756 else
3757 {
3758 /* If we just finished a step-over, then all threads had
3759 been momentarily paused. In all-stop, that's fine,
3760 we want threads stopped by now anyway. In non-stop,
3761 we need to re-resume threads that GDB wanted to be
3762 running. */
3763 unstop_all_lwps (1, event_child);
3764 }
3765 }
c03e6ccc 3766
fa593d66 3767 /* Stabilize threads (move out of jump pads). */
582511be
PA
3768 if (!non_stop)
3769 stabilize_threads ();
6bf5e0ba
PA
3770 }
3771 else
3772 {
3773 /* If we just finished a step-over, then all threads had been
3774 momentarily paused. In all-stop, that's fine, we want
3775 threads stopped by now anyway. In non-stop, we need to
3776 re-resume threads that GDB wanted to be running. */
3777 if (step_over_finished)
7984d532 3778 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3779 }
3780
00db26fa 3781 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
de0d863e 3782 {
00db26fa
PA
3783 /* If the reported event is an exit, fork, vfork or exec, let
3784 GDB know. */
3785 *ourstatus = event_child->waitstatus;
de0d863e
DB
3786 /* Clear the event lwp's waitstatus since we handled it already. */
3787 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3788 }
3789 else
3790 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3791
582511be 3792 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3793 it was a software breakpoint, and the client doesn't know we can
3794 adjust the breakpoint ourselves. */
3795 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3796 && !swbreak_feature)
582511be
PA
3797 {
3798 int decr_pc = the_low_target.decr_pc_after_break;
3799
3800 if (decr_pc != 0)
3801 {
3802 struct regcache *regcache
3803 = get_thread_regcache (current_thread, 1);
3804 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3805 }
3806 }
3807
82075af2
JS
3808 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3809 {
82075af2 3810 get_syscall_trapinfo (event_child,
4cc32bec 3811 &ourstatus->value.syscall_number);
82075af2
JS
3812 ourstatus->kind = event_child->syscall_state;
3813 }
3814 else if (current_thread->last_resume_kind == resume_stop
3815 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3816 {
3817 /* A thread that has been requested to stop by GDB with vCont;t,
3818 and it stopped cleanly, so report as SIG0. The use of
3819 SIGSTOP is an implementation detail. */
a493e3e2 3820 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3821 }
0bfdf32f 3822 else if (current_thread->last_resume_kind == resume_stop
8336d594 3823 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3824 {
3825 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3826 but, it stopped for other reasons. */
2ea28649 3827 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85 3828 }
de0d863e 3829 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
bd99dc85 3830 {
2ea28649 3831 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3832 }
3833
d50171e4
PA
3834 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3835
bd99dc85 3836 if (debug_threads)
87ce2a04
DE
3837 {
3838 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
0bfdf32f 3839 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3840 ourstatus->kind, ourstatus->value.sig);
3841 debug_exit ();
3842 }
bd99dc85 3843
65706a29
PA
3844 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3845 return filter_exit_event (event_child, ourstatus);
3846
0bfdf32f 3847 return ptid_of (current_thread);
bd99dc85
PA
3848}
3849
3850/* Get rid of any pending event in the pipe. */
3851static void
3852async_file_flush (void)
3853{
3854 int ret;
3855 char buf;
3856
3857 do
3858 ret = read (linux_event_pipe[0], &buf, 1);
3859 while (ret >= 0 || (ret == -1 && errno == EINTR));
3860}
3861
3862/* Put something in the pipe, so the event loop wakes up. */
3863static void
3864async_file_mark (void)
3865{
3866 int ret;
3867
3868 async_file_flush ();
3869
3870 do
3871 ret = write (linux_event_pipe[1], "+", 1);
3872 while (ret == 0 || (ret == -1 && errno == EINTR));
3873
3874 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3875 be awakened anyway. */
3876}
3877
95954743
PA
3878static ptid_t
3879linux_wait (ptid_t ptid,
3880 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 3881{
95954743 3882 ptid_t event_ptid;
bd99dc85 3883
bd99dc85
PA
3884 /* Flush the async file first. */
3885 if (target_is_async_p ())
3886 async_file_flush ();
3887
582511be
PA
3888 do
3889 {
3890 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3891 }
3892 while ((target_options & TARGET_WNOHANG) == 0
3893 && ptid_equal (event_ptid, null_ptid)
3894 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3895
3896 /* If at least one stop was reported, there may be more. A single
3897 SIGCHLD can signal more than one child stop. */
3898 if (target_is_async_p ()
3899 && (target_options & TARGET_WNOHANG) != 0
95954743 3900 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
3901 async_file_mark ();
3902
3903 return event_ptid;
da6d8c04
DJ
3904}
3905
c5f62d5f 3906/* Send a signal to an LWP. */
fd500816
DJ
3907
3908static int
a1928bad 3909kill_lwp (unsigned long lwpid, int signo)
fd500816 3910{
4a6ed09b 3911 int ret;
fd500816 3912
4a6ed09b
PA
3913 errno = 0;
3914 ret = syscall (__NR_tkill, lwpid, signo);
3915 if (errno == ENOSYS)
3916 {
3917 /* If tkill fails, then we are not using nptl threads, a
3918 configuration we no longer support. */
3919 perror_with_name (("tkill"));
3920 }
3921 return ret;
fd500816
DJ
3922}
3923
964e4306
PA
3924void
3925linux_stop_lwp (struct lwp_info *lwp)
3926{
3927 send_sigstop (lwp);
3928}
3929
0d62e5e8 3930static void
02fc4de7 3931send_sigstop (struct lwp_info *lwp)
0d62e5e8 3932{
bd99dc85 3933 int pid;
0d62e5e8 3934
d86d4aaf 3935 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3936
0d62e5e8
DJ
3937 /* If we already have a pending stop signal for this process, don't
3938 send another. */
54a0b537 3939 if (lwp->stop_expected)
0d62e5e8 3940 {
ae13219e 3941 if (debug_threads)
87ce2a04 3942 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3943
0d62e5e8
DJ
3944 return;
3945 }
3946
3947 if (debug_threads)
87ce2a04 3948 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3949
d50171e4 3950 lwp->stop_expected = 1;
bd99dc85 3951 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3952}
3953
7984d532
PA
3954static int
3955send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7 3956{
d86d4aaf
DE
3957 struct thread_info *thread = (struct thread_info *) entry;
3958 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3959
7984d532
PA
3960 /* Ignore EXCEPT. */
3961 if (lwp == except)
3962 return 0;
3963
02fc4de7 3964 if (lwp->stopped)
7984d532 3965 return 0;
02fc4de7
PA
3966
3967 send_sigstop (lwp);
7984d532
PA
3968 return 0;
3969}
3970
3971/* Increment the suspend count of an LWP, and stop it, if not stopped
3972 yet. */
3973static int
3974suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3975 void *except)
3976{
d86d4aaf
DE
3977 struct thread_info *thread = (struct thread_info *) entry;
3978 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3979
3980 /* Ignore EXCEPT. */
3981 if (lwp == except)
3982 return 0;
3983
863d01bd 3984 lwp_suspended_inc (lwp);
7984d532
PA
3985
3986 return send_sigstop_callback (entry, except);
02fc4de7
PA
3987}
3988
95954743
PA
3989static void
3990mark_lwp_dead (struct lwp_info *lwp, int wstat)
3991{
95954743
PA
3992 /* Store the exit status for later. */
3993 lwp->status_pending_p = 1;
3994 lwp->status_pending = wstat;
3995
00db26fa
PA
3996 /* Store in waitstatus as well, as there's nothing else to process
3997 for this event. */
3998 if (WIFEXITED (wstat))
3999 {
4000 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
4001 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
4002 }
4003 else if (WIFSIGNALED (wstat))
4004 {
4005 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
4006 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
4007 }
4008
95954743
PA
4009 /* Prevent trying to stop it. */
4010 lwp->stopped = 1;
4011
4012 /* No further stops are expected from a dead lwp. */
4013 lwp->stop_expected = 0;
4014}
4015
00db26fa
PA
4016/* Return true if LWP has exited already, and has a pending exit event
4017 to report to GDB. */
4018
4019static int
4020lwp_is_marked_dead (struct lwp_info *lwp)
4021{
4022 return (lwp->status_pending_p
4023 && (WIFEXITED (lwp->status_pending)
4024 || WIFSIGNALED (lwp->status_pending)));
4025}
4026
fa96cb38
PA
4027/* Wait for all children to stop for the SIGSTOPs we just queued. */
4028
0d62e5e8 4029static void
fa96cb38 4030wait_for_sigstop (void)
0d62e5e8 4031{
0bfdf32f 4032 struct thread_info *saved_thread;
95954743 4033 ptid_t saved_tid;
fa96cb38
PA
4034 int wstat;
4035 int ret;
0d62e5e8 4036
0bfdf32f
GB
4037 saved_thread = current_thread;
4038 if (saved_thread != NULL)
4039 saved_tid = saved_thread->entry.id;
bd99dc85 4040 else
95954743 4041 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 4042
d50171e4 4043 if (debug_threads)
fa96cb38 4044 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 4045
fa96cb38
PA
4046 /* Passing NULL_PTID as filter indicates we want all events to be
4047 left pending. Eventually this returns when there are no
4048 unwaited-for children left. */
4049 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4050 &wstat, __WALL);
4051 gdb_assert (ret == -1);
0d62e5e8 4052
0bfdf32f
GB
4053 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4054 current_thread = saved_thread;
0d62e5e8
DJ
4055 else
4056 {
4057 if (debug_threads)
87ce2a04 4058 debug_printf ("Previously current thread died.\n");
0d62e5e8 4059
f0db101d
PA
4060 /* We can't change the current inferior behind GDB's back,
4061 otherwise, a subsequent command may apply to the wrong
4062 process. */
4063 current_thread = NULL;
0d62e5e8
DJ
4064 }
4065}
4066
fa593d66
PA
4067/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
4068 move it out, because we need to report the stop event to GDB. For
4069 example, if the user puts a breakpoint in the jump pad, it's
4070 because she wants to debug it. */
4071
4072static int
4073stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
4074{
d86d4aaf
DE
4075 struct thread_info *thread = (struct thread_info *) entry;
4076 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 4077
863d01bd
PA
4078 if (lwp->suspended != 0)
4079 {
4080 internal_error (__FILE__, __LINE__,
4081 "LWP %ld is suspended, suspended=%d\n",
4082 lwpid_of (thread), lwp->suspended);
4083 }
fa593d66
PA
4084 gdb_assert (lwp->stopped);
4085
4086 /* Allow debugging the jump pad, gdb_collect, etc.. */
4087 return (supports_fast_tracepoints ()
58b4daa5 4088 && agent_loaded_p ()
fa593d66 4089 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 4090 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
4091 || thread->last_resume_kind == resume_step)
4092 && linux_fast_tracepoint_collecting (lwp, NULL));
4093}
4094
4095static void
4096move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
4097{
d86d4aaf 4098 struct thread_info *thread = (struct thread_info *) entry;
f0ce0d3a 4099 struct thread_info *saved_thread;
d86d4aaf 4100 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
4101 int *wstat;
4102
863d01bd
PA
4103 if (lwp->suspended != 0)
4104 {
4105 internal_error (__FILE__, __LINE__,
4106 "LWP %ld is suspended, suspended=%d\n",
4107 lwpid_of (thread), lwp->suspended);
4108 }
fa593d66
PA
4109 gdb_assert (lwp->stopped);
4110
f0ce0d3a
PA
4111 /* For gdb_breakpoint_here. */
4112 saved_thread = current_thread;
4113 current_thread = thread;
4114
fa593d66
PA
4115 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4116
4117 /* Allow debugging the jump pad, gdb_collect, etc. */
4118 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 4119 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
4120 && thread->last_resume_kind != resume_step
4121 && maybe_move_out_of_jump_pad (lwp, wstat))
4122 {
4123 if (debug_threads)
87ce2a04 4124 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 4125 lwpid_of (thread));
fa593d66
PA
4126
4127 if (wstat)
4128 {
4129 lwp->status_pending_p = 0;
4130 enqueue_one_deferred_signal (lwp, wstat);
4131
4132 if (debug_threads)
87ce2a04
DE
4133 debug_printf ("Signal %d for LWP %ld deferred "
4134 "(in jump pad)\n",
d86d4aaf 4135 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
4136 }
4137
4138 linux_resume_one_lwp (lwp, 0, 0, NULL);
4139 }
4140 else
863d01bd 4141 lwp_suspended_inc (lwp);
f0ce0d3a
PA
4142
4143 current_thread = saved_thread;
fa593d66
PA
4144}
4145
4146static int
4147lwp_running (struct inferior_list_entry *entry, void *data)
4148{
d86d4aaf
DE
4149 struct thread_info *thread = (struct thread_info *) entry;
4150 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 4151
00db26fa 4152 if (lwp_is_marked_dead (lwp))
fa593d66
PA
4153 return 0;
4154 if (lwp->stopped)
4155 return 0;
4156 return 1;
4157}
4158
7984d532
PA
4159/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4160 If SUSPEND, then also increase the suspend count of every LWP,
4161 except EXCEPT. */
4162
0d62e5e8 4163static void
7984d532 4164stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8 4165{
bde24c0a
PA
4166 /* Should not be called recursively. */
4167 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4168
87ce2a04
DE
4169 if (debug_threads)
4170 {
4171 debug_enter ();
4172 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4173 suspend ? "stop-and-suspend" : "stop",
4174 except != NULL
d86d4aaf 4175 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
4176 : "none");
4177 }
4178
bde24c0a
PA
4179 stopping_threads = (suspend
4180 ? STOPPING_AND_SUSPENDING_THREADS
4181 : STOPPING_THREADS);
7984d532
PA
4182
4183 if (suspend)
d86d4aaf 4184 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
7984d532 4185 else
d86d4aaf 4186 find_inferior (&all_threads, send_sigstop_callback, except);
fa96cb38 4187 wait_for_sigstop ();
bde24c0a 4188 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
4189
4190 if (debug_threads)
4191 {
4192 debug_printf ("stop_all_lwps done, setting stopping_threads "
4193 "back to !stopping\n");
4194 debug_exit ();
4195 }
0d62e5e8
DJ
4196}
4197
863d01bd
PA
4198/* Enqueue one signal in the chain of signals which need to be
4199 delivered to this process on next resume. */
4200
4201static void
4202enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4203{
8d749320 4204 struct pending_signals *p_sig = XNEW (struct pending_signals);
863d01bd 4205
863d01bd
PA
4206 p_sig->prev = lwp->pending_signals;
4207 p_sig->signal = signal;
4208 if (info == NULL)
4209 memset (&p_sig->info, 0, sizeof (siginfo_t));
4210 else
4211 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4212 lwp->pending_signals = p_sig;
4213}
4214
fa5308bd
AT
4215/* Install breakpoints for software single stepping. */
4216
4217static void
4218install_software_single_step_breakpoints (struct lwp_info *lwp)
4219{
4220 int i;
4221 CORE_ADDR pc;
4222 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4223 VEC (CORE_ADDR) *next_pcs = NULL;
4224 struct cleanup *old_chain = make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
4225
4d18591b 4226 next_pcs = (*the_low_target.get_next_pcs) (regcache);
fa5308bd
AT
4227
4228 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
4229 set_reinsert_breakpoint (pc);
4230
4231 do_cleanups (old_chain);
4232}
4233
7fe5e27e
AT
4234/* Single step via hardware or software single step.
4235 Return 1 if hardware single stepping, 0 if software single stepping
4236 or can't single step. */
4237
4238static int
4239single_step (struct lwp_info* lwp)
4240{
4241 int step = 0;
4242
4243 if (can_hardware_single_step ())
4244 {
4245 step = 1;
4246 }
4247 else if (can_software_single_step ())
4248 {
4249 install_software_single_step_breakpoints (lwp);
4250 step = 0;
4251 }
4252 else
4253 {
4254 if (debug_threads)
4255 debug_printf ("stepping is not implemented on this target");
4256 }
4257
4258 return step;
4259}
4260
35ac8b3e 4261/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
4262 finish a fast tracepoint collect. Since signal can be delivered in
4263 the step-over, the program may go to signal handler and trap again
4264 after return from the signal handler. We can live with the spurious
4265 double traps. */
35ac8b3e
YQ
4266
4267static int
4268lwp_signal_can_be_delivered (struct lwp_info *lwp)
4269{
484b3c32 4270 return !lwp->collecting_fast_tracepoint;
35ac8b3e
YQ
4271}
4272
23f238d3
PA
4273/* Resume execution of LWP. If STEP is nonzero, single-step it. If
4274 SIGNAL is nonzero, give it that signal. */
da6d8c04 4275
ce3a066d 4276static void
23f238d3
PA
4277linux_resume_one_lwp_throw (struct lwp_info *lwp,
4278 int step, int signal, siginfo_t *info)
da6d8c04 4279{
d86d4aaf 4280 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4281 struct thread_info *saved_thread;
fa593d66 4282 int fast_tp_collecting;
82075af2 4283 int ptrace_request;
c06cbd92
YQ
4284 struct process_info *proc = get_thread_process (thread);
4285
4286 /* Note that target description may not be initialised
4287 (proc->tdesc == NULL) at this point because the program hasn't
4288 stopped at the first instruction yet. It means GDBserver skips
4289 the extra traps from the wrapper program (see option --wrapper).
4290 Code in this function that requires register access should be
4291 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 4292
54a0b537 4293 if (lwp->stopped == 0)
0d62e5e8
DJ
4294 return;
4295
65706a29
PA
4296 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4297
fa593d66
PA
4298 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4299
4300 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4301
219f2f23
PA
4302 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4303 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4304 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4305 {
4306 /* Collecting 'while-stepping' actions doesn't make sense
4307 anymore. */
d86d4aaf 4308 release_while_stepping_state_list (thread);
219f2f23
PA
4309 }
4310
0d62e5e8 4311 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4312 signal. Also enqueue the signal if it can't be delivered to the
4313 inferior right now. */
0d62e5e8 4314 if (signal != 0
fa593d66
PA
4315 && (lwp->status_pending_p
4316 || lwp->pending_signals != NULL
35ac8b3e 4317 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4318 {
4319 enqueue_pending_signal (lwp, signal, info);
4320
4321 /* Postpone any pending signal. It was enqueued above. */
4322 signal = 0;
4323 }
0d62e5e8 4324
d50171e4
PA
4325 if (lwp->status_pending_p)
4326 {
4327 if (debug_threads)
94610ec4 4328 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
87ce2a04 4329 " has pending status\n",
94610ec4 4330 lwpid_of (thread), step ? "step" : "continue",
87ce2a04 4331 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4332 return;
4333 }
0d62e5e8 4334
0bfdf32f
GB
4335 saved_thread = current_thread;
4336 current_thread = thread;
0d62e5e8 4337
0d62e5e8
DJ
4338 /* This bit needs some thinking about. If we get a signal that
4339 we must report while a single-step reinsert is still pending,
4340 we often end up resuming the thread. It might be better to
4341 (ew) allow a stack of pending events; then we could be sure that
4342 the reinsert happened right away and not lose any signals.
4343
4344 Making this stack would also shrink the window in which breakpoints are
54a0b537 4345 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4346 complete correctness, so it won't solve that problem. It may be
4347 worthwhile just to solve this one, however. */
54a0b537 4348 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
4349 {
4350 if (debug_threads)
87ce2a04
DE
4351 debug_printf (" pending reinsert at 0x%s\n",
4352 paddress (lwp->bp_reinsert));
d50171e4 4353
85e00e85 4354 if (can_hardware_single_step ())
d50171e4 4355 {
fa593d66
PA
4356 if (fast_tp_collecting == 0)
4357 {
4358 if (step == 0)
4359 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4360 if (lwp->suspended)
4361 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4362 lwp->suspended);
4363 }
d50171e4 4364 }
f79b145d
YQ
4365
4366 step = maybe_hw_step (thread);
0d62e5e8 4367 }
8376a3cb
YQ
4368 else
4369 {
4370 /* If the thread isn't doing step-over, there shouldn't be any
4371 reinsert breakpoints. */
4372 gdb_assert (!has_reinsert_breakpoints (proc));
4373 }
0d62e5e8 4374
fa593d66
PA
4375 if (fast_tp_collecting == 1)
4376 {
4377 if (debug_threads)
87ce2a04
DE
4378 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4379 " (exit-jump-pad-bkpt)\n",
d86d4aaf 4380 lwpid_of (thread));
fa593d66
PA
4381 }
4382 else if (fast_tp_collecting == 2)
4383 {
4384 if (debug_threads)
87ce2a04
DE
4385 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4386 " single-stepping\n",
d86d4aaf 4387 lwpid_of (thread));
fa593d66
PA
4388
4389 if (can_hardware_single_step ())
4390 step = 1;
4391 else
38e08fca
GB
4392 {
4393 internal_error (__FILE__, __LINE__,
4394 "moving out of jump pad single-stepping"
4395 " not implemented on this target");
4396 }
fa593d66
PA
4397 }
4398
219f2f23
PA
4399 /* If we have while-stepping actions in this thread set it stepping.
4400 If we have a signal to deliver, it may or may not be set to
4401 SIG_IGN, we don't know. Assume so, and allow collecting
4402 while-stepping into a signal handler. A possible smart thing to
4403 do would be to set an internal breakpoint at the signal return
4404 address, continue, and carry on catching this while-stepping
4405 action only when that breakpoint is hit. A future
4406 enhancement. */
7fe5e27e 4407 if (thread->while_stepping != NULL)
219f2f23
PA
4408 {
4409 if (debug_threads)
87ce2a04 4410 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 4411 lwpid_of (thread));
7fe5e27e
AT
4412
4413 step = single_step (lwp);
219f2f23
PA
4414 }
4415
c06cbd92 4416 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
0d62e5e8 4417 {
0bfdf32f 4418 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be
PA
4419
4420 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4421
4422 if (debug_threads)
4423 {
4424 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4425 (long) lwp->stop_pc);
4426 }
0d62e5e8
DJ
4427 }
4428
35ac8b3e
YQ
4429 /* If we have pending signals, consume one if it can be delivered to
4430 the inferior. */
4431 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
0d62e5e8
DJ
4432 {
4433 struct pending_signals **p_sig;
4434
54a0b537 4435 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
4436 while ((*p_sig)->prev != NULL)
4437 p_sig = &(*p_sig)->prev;
4438
4439 signal = (*p_sig)->signal;
32ca6d61 4440 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 4441 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4442 &(*p_sig)->info);
32ca6d61 4443
0d62e5e8
DJ
4444 free (*p_sig);
4445 *p_sig = NULL;
4446 }
4447
94610ec4
YQ
4448 if (debug_threads)
4449 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4450 lwpid_of (thread), step ? "step" : "continue", signal,
4451 lwp->stop_expected ? "expected" : "not expected");
4452
aa5ca48f
DE
4453 if (the_low_target.prepare_to_resume != NULL)
4454 the_low_target.prepare_to_resume (lwp);
4455
d86d4aaf 4456 regcache_invalidate_thread (thread);
da6d8c04 4457 errno = 0;
54a0b537 4458 lwp->stepping = step;
82075af2
JS
4459 if (step)
4460 ptrace_request = PTRACE_SINGLESTEP;
4461 else if (gdb_catching_syscalls_p (lwp))
4462 ptrace_request = PTRACE_SYSCALL;
4463 else
4464 ptrace_request = PTRACE_CONT;
4465 ptrace (ptrace_request,
4466 lwpid_of (thread),
b8e1b30e 4467 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4468 /* Coerce to a uintptr_t first to avoid potential gcc warning
4469 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4470 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4471
0bfdf32f 4472 current_thread = saved_thread;
da6d8c04 4473 if (errno)
23f238d3
PA
4474 perror_with_name ("resuming thread");
4475
4476 /* Successfully resumed. Clear state that no longer makes sense,
4477 and mark the LWP as running. Must not do this before resuming
4478 otherwise if that fails other code will be confused. E.g., we'd
4479 later try to stop the LWP and hang forever waiting for a stop
4480 status. Note that we must not throw after this is cleared,
4481 otherwise handle_zombie_lwp_error would get confused. */
4482 lwp->stopped = 0;
4483 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4484}
4485
4486/* Called when we try to resume a stopped LWP and that errors out. If
4487 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4488 or about to become), discard the error, clear any pending status
4489 the LWP may have, and return true (we'll collect the exit status
4490 soon enough). Otherwise, return false. */
4491
4492static int
4493check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4494{
4495 struct thread_info *thread = get_lwp_thread (lp);
4496
4497 /* If we get an error after resuming the LWP successfully, we'd
4498 confuse !T state for the LWP being gone. */
4499 gdb_assert (lp->stopped);
4500
4501 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4502 because even if ptrace failed with ESRCH, the tracee may be "not
4503 yet fully dead", but already refusing ptrace requests. In that
4504 case the tracee has 'R (Running)' state for a little bit
4505 (observed in Linux 3.18). See also the note on ESRCH in the
4506 ptrace(2) man page. Instead, check whether the LWP has any state
4507 other than ptrace-stopped. */
4508
4509 /* Don't assume anything if /proc/PID/status can't be read. */
4510 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4511 {
23f238d3
PA
4512 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4513 lp->status_pending_p = 0;
4514 return 1;
4515 }
4516 return 0;
4517}
4518
4519/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4520 disappears while we try to resume it. */
3221518c 4521
23f238d3
PA
4522static void
4523linux_resume_one_lwp (struct lwp_info *lwp,
4524 int step, int signal, siginfo_t *info)
4525{
4526 TRY
4527 {
4528 linux_resume_one_lwp_throw (lwp, step, signal, info);
4529 }
4530 CATCH (ex, RETURN_MASK_ERROR)
4531 {
4532 if (!check_ptrace_stopped_lwp_gone (lwp))
4533 throw_exception (ex);
3221518c 4534 }
23f238d3 4535 END_CATCH
da6d8c04
DJ
4536}
4537
2bd7c093
PA
4538struct thread_resume_array
4539{
4540 struct thread_resume *resume;
4541 size_t n;
4542};
64386c31 4543
ebcf782c
DE
4544/* This function is called once per thread via find_inferior.
4545 ARG is a pointer to a thread_resume_array struct.
4546 We look up the thread specified by ENTRY in ARG, and mark the thread
4547 with a pointer to the appropriate resume request.
5544ad89
DJ
4548
4549 This algorithm is O(threads * resume elements), but resume elements
4550 is small (and will remain small at least until GDB supports thread
4551 suspension). */
ebcf782c 4552
2bd7c093
PA
4553static int
4554linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 4555{
d86d4aaf
DE
4556 struct thread_info *thread = (struct thread_info *) entry;
4557 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4558 int ndx;
2bd7c093 4559 struct thread_resume_array *r;
64386c31 4560
9a3c8263 4561 r = (struct thread_resume_array *) arg;
64386c31 4562
2bd7c093 4563 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
4564 {
4565 ptid_t ptid = r->resume[ndx].thread;
4566 if (ptid_equal (ptid, minus_one_ptid)
4567 || ptid_equal (ptid, entry->id)
0c9070b3
YQ
4568 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4569 of PID'. */
d86d4aaf 4570 || (ptid_get_pid (ptid) == pid_of (thread)
0c9070b3
YQ
4571 && (ptid_is_pid (ptid)
4572 || ptid_get_lwp (ptid) == -1)))
95954743 4573 {
d50171e4 4574 if (r->resume[ndx].kind == resume_stop
8336d594 4575 && thread->last_resume_kind == resume_stop)
d50171e4
PA
4576 {
4577 if (debug_threads)
87ce2a04
DE
4578 debug_printf ("already %s LWP %ld at GDB's request\n",
4579 (thread->last_status.kind
4580 == TARGET_WAITKIND_STOPPED)
4581 ? "stopped"
4582 : "stopping",
d86d4aaf 4583 lwpid_of (thread));
d50171e4
PA
4584
4585 continue;
4586 }
4587
95954743 4588 lwp->resume = &r->resume[ndx];
8336d594 4589 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4590
c2d6af84
PA
4591 lwp->step_range_start = lwp->resume->step_range_start;
4592 lwp->step_range_end = lwp->resume->step_range_end;
4593
fa593d66
PA
4594 /* If we had a deferred signal to report, dequeue one now.
4595 This can happen if LWP gets more than one signal while
4596 trying to get out of a jump pad. */
4597 if (lwp->stopped
4598 && !lwp->status_pending_p
4599 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4600 {
4601 lwp->status_pending_p = 1;
4602
4603 if (debug_threads)
87ce2a04
DE
4604 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4605 "leaving status pending.\n",
d86d4aaf
DE
4606 WSTOPSIG (lwp->status_pending),
4607 lwpid_of (thread));
fa593d66
PA
4608 }
4609
95954743
PA
4610 return 0;
4611 }
4612 }
2bd7c093
PA
4613
4614 /* No resume action for this thread. */
4615 lwp->resume = NULL;
64386c31 4616
2bd7c093 4617 return 0;
5544ad89
DJ
4618}
4619
20ad9378
DE
4620/* find_inferior callback for linux_resume.
4621 Set *FLAG_P if this lwp has an interesting status pending. */
5544ad89 4622
bd99dc85
PA
4623static int
4624resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 4625{
d86d4aaf
DE
4626 struct thread_info *thread = (struct thread_info *) entry;
4627 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4628
bd99dc85
PA
4629 /* LWPs which will not be resumed are not interesting, because
4630 we might not wait for them next time through linux_wait. */
2bd7c093 4631 if (lwp->resume == NULL)
bd99dc85 4632 return 0;
64386c31 4633
582511be 4634 if (thread_still_has_status_pending_p (thread))
d50171e4
PA
4635 * (int *) flag_p = 1;
4636
4637 return 0;
4638}
4639
4640/* Return 1 if this lwp that GDB wants running is stopped at an
4641 internal breakpoint that we need to step over. It assumes that any
4642 required STOP_PC adjustment has already been propagated to the
4643 inferior's regcache. */
4644
4645static int
4646need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4647{
d86d4aaf
DE
4648 struct thread_info *thread = (struct thread_info *) entry;
4649 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 4650 struct thread_info *saved_thread;
d50171e4 4651 CORE_ADDR pc;
c06cbd92
YQ
4652 struct process_info *proc = get_thread_process (thread);
4653
4654 /* GDBserver is skipping the extra traps from the wrapper program,
4655 don't have to do step over. */
4656 if (proc->tdesc == NULL)
4657 return 0;
d50171e4
PA
4658
4659 /* LWPs which will not be resumed are not interesting, because we
4660 might not wait for them next time through linux_wait. */
4661
4662 if (!lwp->stopped)
4663 {
4664 if (debug_threads)
87ce2a04 4665 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 4666 lwpid_of (thread));
d50171e4
PA
4667 return 0;
4668 }
4669
8336d594 4670 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
4671 {
4672 if (debug_threads)
87ce2a04
DE
4673 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4674 " stopped\n",
d86d4aaf 4675 lwpid_of (thread));
d50171e4
PA
4676 return 0;
4677 }
4678
7984d532
PA
4679 gdb_assert (lwp->suspended >= 0);
4680
4681 if (lwp->suspended)
4682 {
4683 if (debug_threads)
87ce2a04 4684 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 4685 lwpid_of (thread));
7984d532
PA
4686 return 0;
4687 }
4688
bd99dc85 4689 if (lwp->status_pending_p)
d50171e4
PA
4690 {
4691 if (debug_threads)
87ce2a04
DE
4692 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4693 " status.\n",
d86d4aaf 4694 lwpid_of (thread));
d50171e4
PA
4695 return 0;
4696 }
4697
4698 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4699 or we have. */
4700 pc = get_pc (lwp);
4701
4702 /* If the PC has changed since we stopped, then don't do anything,
4703 and let the breakpoint/tracepoint be hit. This happens if, for
4704 instance, GDB handled the decr_pc_after_break subtraction itself,
4705 GDB is OOL stepping this thread, or the user has issued a "jump"
4706 command, or poked thread's registers herself. */
4707 if (pc != lwp->stop_pc)
4708 {
4709 if (debug_threads)
87ce2a04
DE
4710 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4711 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
4712 lwpid_of (thread),
4713 paddress (lwp->stop_pc), paddress (pc));
d50171e4
PA
4714 return 0;
4715 }
4716
484b3c32
YQ
4717 /* On software single step target, resume the inferior with signal
4718 rather than stepping over. */
4719 if (can_software_single_step ()
4720 && lwp->pending_signals != NULL
4721 && lwp_signal_can_be_delivered (lwp))
4722 {
4723 if (debug_threads)
4724 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4725 " signals.\n",
4726 lwpid_of (thread));
4727
4728 return 0;
4729 }
4730
0bfdf32f
GB
4731 saved_thread = current_thread;
4732 current_thread = thread;
d50171e4 4733
8b07ae33 4734 /* We can only step over breakpoints we know about. */
fa593d66 4735 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4736 {
8b07ae33 4737 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4738 though. If the condition is being evaluated on the target's side
4739 and it evaluate to false, step over this breakpoint as well. */
4740 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4741 && gdb_condition_true_at_breakpoint (pc)
4742 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
4743 {
4744 if (debug_threads)
87ce2a04
DE
4745 debug_printf ("Need step over [LWP %ld]? yes, but found"
4746 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 4747 lwpid_of (thread), paddress (pc));
d50171e4 4748
0bfdf32f 4749 current_thread = saved_thread;
8b07ae33
PA
4750 return 0;
4751 }
4752 else
4753 {
4754 if (debug_threads)
87ce2a04
DE
4755 debug_printf ("Need step over [LWP %ld]? yes, "
4756 "found breakpoint at 0x%s\n",
d86d4aaf 4757 lwpid_of (thread), paddress (pc));
d50171e4 4758
8b07ae33
PA
4759 /* We've found an lwp that needs stepping over --- return 1 so
4760 that find_inferior stops looking. */
0bfdf32f 4761 current_thread = saved_thread;
8b07ae33 4762
8b07ae33
PA
4763 return 1;
4764 }
d50171e4
PA
4765 }
4766
0bfdf32f 4767 current_thread = saved_thread;
d50171e4
PA
4768
4769 if (debug_threads)
87ce2a04
DE
4770 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4771 " at 0x%s\n",
d86d4aaf 4772 lwpid_of (thread), paddress (pc));
c6ecbae5 4773
bd99dc85 4774 return 0;
5544ad89
DJ
4775}
4776
d50171e4
PA
4777/* Start a step-over operation on LWP. When LWP stopped at a
4778 breakpoint, to make progress, we need to remove the breakpoint out
4779 of the way. If we let other threads run while we do that, they may
4780 pass by the breakpoint location and miss hitting it. To avoid
4781 that, a step-over momentarily stops all threads while LWP is
c40c8d4b
YQ
4782 single-stepped by either hardware or software while the breakpoint
4783 is temporarily uninserted from the inferior. When the single-step
4784 finishes, we reinsert the breakpoint, and let all threads that are
4785 supposed to be running, run again. */
d50171e4
PA
4786
4787static int
4788start_step_over (struct lwp_info *lwp)
4789{
d86d4aaf 4790 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4791 struct thread_info *saved_thread;
d50171e4
PA
4792 CORE_ADDR pc;
4793 int step;
4794
4795 if (debug_threads)
87ce2a04 4796 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 4797 lwpid_of (thread));
d50171e4 4798
7984d532 4799 stop_all_lwps (1, lwp);
863d01bd
PA
4800
4801 if (lwp->suspended != 0)
4802 {
4803 internal_error (__FILE__, __LINE__,
4804 "LWP %ld suspended=%d\n", lwpid_of (thread),
4805 lwp->suspended);
4806 }
d50171e4
PA
4807
4808 if (debug_threads)
87ce2a04 4809 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
4810
4811 /* Note, we should always reach here with an already adjusted PC,
4812 either by GDB (if we're resuming due to GDB's request), or by our
4813 caller, if we just finished handling an internal breakpoint GDB
4814 shouldn't care about. */
4815 pc = get_pc (lwp);
4816
0bfdf32f
GB
4817 saved_thread = current_thread;
4818 current_thread = thread;
d50171e4
PA
4819
4820 lwp->bp_reinsert = pc;
4821 uninsert_breakpoints_at (pc);
fa593d66 4822 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4823
7fe5e27e 4824 step = single_step (lwp);
d50171e4 4825
0bfdf32f 4826 current_thread = saved_thread;
d50171e4
PA
4827
4828 linux_resume_one_lwp (lwp, step, 0, NULL);
4829
4830 /* Require next event from this LWP. */
d86d4aaf 4831 step_over_bkpt = thread->entry.id;
d50171e4
PA
4832 return 1;
4833}
4834
4835/* Finish a step-over. Reinsert the breakpoint we had uninserted in
4836 start_step_over, if still there, and delete any reinsert
4837 breakpoints we've set, on non hardware single-step targets. */
4838
4839static int
4840finish_step_over (struct lwp_info *lwp)
4841{
4842 if (lwp->bp_reinsert != 0)
4843 {
f79b145d
YQ
4844 struct thread_info *saved_thread = current_thread;
4845
d50171e4 4846 if (debug_threads)
87ce2a04 4847 debug_printf ("Finished step over.\n");
d50171e4 4848
f79b145d
YQ
4849 current_thread = get_lwp_thread (lwp);
4850
d50171e4
PA
4851 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4852 may be no breakpoint to reinsert there by now. */
4853 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4854 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4855
4856 lwp->bp_reinsert = 0;
4857
4858 /* Delete any software-single-step reinsert breakpoints. No
4859 longer needed. We don't have to worry about other threads
4860 hitting this trap, and later not being able to explain it,
4861 because we were stepping over a breakpoint, and we hold all
4862 threads but LWP stopped while doing that. */
4863 if (!can_hardware_single_step ())
f79b145d
YQ
4864 {
4865 gdb_assert (has_reinsert_breakpoints (current_process ()));
4866 delete_reinsert_breakpoints ();
4867 }
d50171e4
PA
4868
4869 step_over_bkpt = null_ptid;
f79b145d 4870 current_thread = saved_thread;
d50171e4
PA
4871 return 1;
4872 }
4873 else
4874 return 0;
4875}
4876
863d01bd
PA
4877/* If there's a step over in progress, wait until all threads stop
4878 (that is, until the stepping thread finishes its step), and
4879 unsuspend all lwps. The stepping thread ends with its status
4880 pending, which is processed later when we get back to processing
4881 events. */
4882
4883static void
4884complete_ongoing_step_over (void)
4885{
4886 if (!ptid_equal (step_over_bkpt, null_ptid))
4887 {
4888 struct lwp_info *lwp;
4889 int wstat;
4890 int ret;
4891
4892 if (debug_threads)
4893 debug_printf ("detach: step over in progress, finish it first\n");
4894
4895 /* Passing NULL_PTID as filter indicates we want all events to
4896 be left pending. Eventually this returns when there are no
4897 unwaited-for children left. */
4898 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4899 &wstat, __WALL);
4900 gdb_assert (ret == -1);
4901
4902 lwp = find_lwp_pid (step_over_bkpt);
4903 if (lwp != NULL)
4904 finish_step_over (lwp);
4905 step_over_bkpt = null_ptid;
4906 unsuspend_all_lwps (lwp);
4907 }
4908}
4909
5544ad89
DJ
4910/* This function is called once per thread. We check the thread's resume
4911 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 4912 stopped; and what signal, if any, it should be sent.
5544ad89 4913
bd99dc85
PA
4914 For threads which we aren't explicitly told otherwise, we preserve
4915 the stepping flag; this is used for stepping over gdbserver-placed
4916 breakpoints.
4917
4918 If pending_flags was set in any thread, we queue any needed
4919 signals, since we won't actually resume. We already have a pending
4920 event to report, so we don't need to preserve any step requests;
4921 they should be re-issued if necessary. */
4922
4923static int
4924linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 4925{
d86d4aaf
DE
4926 struct thread_info *thread = (struct thread_info *) entry;
4927 struct lwp_info *lwp = get_thread_lwp (thread);
bd99dc85 4928 int step;
d50171e4
PA
4929 int leave_all_stopped = * (int *) arg;
4930 int leave_pending;
5544ad89 4931
2bd7c093 4932 if (lwp->resume == NULL)
bd99dc85 4933 return 0;
5544ad89 4934
bd99dc85 4935 if (lwp->resume->kind == resume_stop)
5544ad89 4936 {
bd99dc85 4937 if (debug_threads)
d86d4aaf 4938 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4939
4940 if (!lwp->stopped)
4941 {
4942 if (debug_threads)
d86d4aaf 4943 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4944
d50171e4
PA
4945 /* Stop the thread, and wait for the event asynchronously,
4946 through the event loop. */
02fc4de7 4947 send_sigstop (lwp);
bd99dc85
PA
4948 }
4949 else
4950 {
4951 if (debug_threads)
87ce2a04 4952 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4953 lwpid_of (thread));
d50171e4
PA
4954
4955 /* The LWP may have been stopped in an internal event that
4956 was not meant to be notified back to GDB (e.g., gdbserver
4957 breakpoint), so we should be reporting a stop event in
4958 this case too. */
4959
4960 /* If the thread already has a pending SIGSTOP, this is a
4961 no-op. Otherwise, something later will presumably resume
4962 the thread and this will cause it to cancel any pending
4963 operation, due to last_resume_kind == resume_stop. If
4964 the thread already has a pending status to report, we
4965 will still report it the next time we wait - see
4966 status_pending_p_callback. */
1a981360
PA
4967
4968 /* If we already have a pending signal to report, then
4969 there's no need to queue a SIGSTOP, as this means we're
4970 midway through moving the LWP out of the jumppad, and we
4971 will report the pending signal as soon as that is
4972 finished. */
4973 if (lwp->pending_signals_to_report == NULL)
4974 send_sigstop (lwp);
bd99dc85 4975 }
32ca6d61 4976
bd99dc85
PA
4977 /* For stop requests, we're done. */
4978 lwp->resume = NULL;
fc7238bb 4979 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4980 return 0;
5544ad89
DJ
4981 }
4982
bd99dc85 4983 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4984 then don't resume it - we can just report the pending status.
4985 Likewise if it is suspended, because e.g., another thread is
4986 stepping past a breakpoint. Make sure to queue any signals that
4987 would otherwise be sent. In all-stop mode, we do this decision
4988 based on if *any* thread has a pending status. If there's a
4989 thread that needs the step-over-breakpoint dance, then don't
4990 resume any other thread but that particular one. */
4991 leave_pending = (lwp->suspended
4992 || lwp->status_pending_p
4993 || leave_all_stopped);
5544ad89 4994
d50171e4 4995 if (!leave_pending)
bd99dc85
PA
4996 {
4997 if (debug_threads)
d86d4aaf 4998 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4999
d50171e4 5000 step = (lwp->resume->kind == resume_step);
2acc282a 5001 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
5002 }
5003 else
5004 {
5005 if (debug_threads)
d86d4aaf 5006 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5544ad89 5007
bd99dc85
PA
5008 /* If we have a new signal, enqueue the signal. */
5009 if (lwp->resume->sig != 0)
5010 {
8d749320
SM
5011 struct pending_signals *p_sig = XCNEW (struct pending_signals);
5012
bd99dc85
PA
5013 p_sig->prev = lwp->pending_signals;
5014 p_sig->signal = lwp->resume->sig;
bd99dc85
PA
5015
5016 /* If this is the same signal we were previously stopped by,
5017 make sure to queue its siginfo. We can ignore the return
5018 value of ptrace; if it fails, we'll skip
5019 PTRACE_SETSIGINFO. */
5020 if (WIFSTOPPED (lwp->last_status)
5021 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
d86d4aaf 5022 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 5023 &p_sig->info);
bd99dc85
PA
5024
5025 lwp->pending_signals = p_sig;
5026 }
5027 }
5544ad89 5028
fc7238bb 5029 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 5030 lwp->resume = NULL;
5544ad89 5031 return 0;
0d62e5e8
DJ
5032}
5033
5034static void
2bd7c093 5035linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 5036{
2bd7c093 5037 struct thread_resume_array array = { resume_info, n };
d86d4aaf 5038 struct thread_info *need_step_over = NULL;
d50171e4
PA
5039 int any_pending;
5040 int leave_all_stopped;
c6ecbae5 5041
87ce2a04
DE
5042 if (debug_threads)
5043 {
5044 debug_enter ();
5045 debug_printf ("linux_resume:\n");
5046 }
5047
2bd7c093 5048 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 5049
d50171e4
PA
5050 /* If there is a thread which would otherwise be resumed, which has
5051 a pending status, then don't resume any threads - we can just
5052 report the pending status. Make sure to queue any signals that
5053 would otherwise be sent. In non-stop mode, we'll apply this
5054 logic to each thread individually. We consume all pending events
5055 before considering to start a step-over (in all-stop). */
5056 any_pending = 0;
bd99dc85 5057 if (!non_stop)
d86d4aaf 5058 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
d50171e4
PA
5059
5060 /* If there is a thread which would otherwise be resumed, which is
5061 stopped at a breakpoint that needs stepping over, then don't
5062 resume any threads - have it step over the breakpoint with all
5063 other threads stopped, then resume all threads again. Make sure
5064 to queue any signals that would otherwise be delivered or
5065 queued. */
5066 if (!any_pending && supports_breakpoints ())
5067 need_step_over
d86d4aaf
DE
5068 = (struct thread_info *) find_inferior (&all_threads,
5069 need_step_over_p, NULL);
d50171e4
PA
5070
5071 leave_all_stopped = (need_step_over != NULL || any_pending);
5072
5073 if (debug_threads)
5074 {
5075 if (need_step_over != NULL)
87ce2a04 5076 debug_printf ("Not resuming all, need step over\n");
d50171e4 5077 else if (any_pending)
87ce2a04
DE
5078 debug_printf ("Not resuming, all-stop and found "
5079 "an LWP with pending status\n");
d50171e4 5080 else
87ce2a04 5081 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
5082 }
5083
5084 /* Even if we're leaving threads stopped, queue all signals we'd
5085 otherwise deliver. */
5086 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
5087
5088 if (need_step_over)
d86d4aaf 5089 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
5090
5091 if (debug_threads)
5092 {
5093 debug_printf ("linux_resume done\n");
5094 debug_exit ();
5095 }
1bebeeca
PA
5096
5097 /* We may have events that were pending that can/should be sent to
5098 the client now. Trigger a linux_wait call. */
5099 if (target_is_async_p ())
5100 async_file_mark ();
d50171e4
PA
5101}
5102
5103/* This function is called once per thread. We check the thread's
5104 last resume request, which will tell us whether to resume, step, or
5105 leave the thread stopped. Any signal the client requested to be
5106 delivered has already been enqueued at this point.
5107
5108 If any thread that GDB wants running is stopped at an internal
5109 breakpoint that needs stepping over, we start a step-over operation
5110 on that particular thread, and leave all others stopped. */
5111
7984d532
PA
5112static int
5113proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 5114{
d86d4aaf
DE
5115 struct thread_info *thread = (struct thread_info *) entry;
5116 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
5117 int step;
5118
7984d532
PA
5119 if (lwp == except)
5120 return 0;
d50171e4
PA
5121
5122 if (debug_threads)
d86d4aaf 5123 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
5124
5125 if (!lwp->stopped)
5126 {
5127 if (debug_threads)
d86d4aaf 5128 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
7984d532 5129 return 0;
d50171e4
PA
5130 }
5131
02fc4de7
PA
5132 if (thread->last_resume_kind == resume_stop
5133 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
5134 {
5135 if (debug_threads)
87ce2a04 5136 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 5137 lwpid_of (thread));
7984d532 5138 return 0;
d50171e4
PA
5139 }
5140
5141 if (lwp->status_pending_p)
5142 {
5143 if (debug_threads)
87ce2a04 5144 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 5145 lwpid_of (thread));
7984d532 5146 return 0;
d50171e4
PA
5147 }
5148
7984d532
PA
5149 gdb_assert (lwp->suspended >= 0);
5150
d50171e4
PA
5151 if (lwp->suspended)
5152 {
5153 if (debug_threads)
d86d4aaf 5154 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
7984d532 5155 return 0;
d50171e4
PA
5156 }
5157
1a981360
PA
5158 if (thread->last_resume_kind == resume_stop
5159 && lwp->pending_signals_to_report == NULL
5160 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
5161 {
5162 /* We haven't reported this LWP as stopped yet (otherwise, the
5163 last_status.kind check above would catch it, and we wouldn't
5164 reach here. This LWP may have been momentarily paused by a
5165 stop_all_lwps call while handling for example, another LWP's
5166 step-over. In that case, the pending expected SIGSTOP signal
5167 that was queued at vCont;t handling time will have already
5168 been consumed by wait_for_sigstop, and so we need to requeue
5169 another one here. Note that if the LWP already has a SIGSTOP
5170 pending, this is a no-op. */
5171
5172 if (debug_threads)
87ce2a04
DE
5173 debug_printf ("Client wants LWP %ld to stop. "
5174 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 5175 lwpid_of (thread));
02fc4de7
PA
5176
5177 send_sigstop (lwp);
5178 }
5179
863d01bd
PA
5180 if (thread->last_resume_kind == resume_step)
5181 {
5182 if (debug_threads)
5183 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5184 lwpid_of (thread));
5185 step = 1;
5186 }
5187 else if (lwp->bp_reinsert != 0)
5188 {
5189 if (debug_threads)
5190 debug_printf (" stepping LWP %ld, reinsert set\n",
5191 lwpid_of (thread));
f79b145d
YQ
5192
5193 step = maybe_hw_step (thread);
863d01bd
PA
5194 }
5195 else
5196 step = 0;
5197
d50171e4 5198 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
5199 return 0;
5200}
5201
5202static int
5203unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5204{
d86d4aaf
DE
5205 struct thread_info *thread = (struct thread_info *) entry;
5206 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
5207
5208 if (lwp == except)
5209 return 0;
5210
863d01bd 5211 lwp_suspended_decr (lwp);
7984d532
PA
5212
5213 return proceed_one_lwp (entry, except);
d50171e4
PA
5214}
5215
5216/* When we finish a step-over, set threads running again. If there's
5217 another thread that may need a step-over, now's the time to start
5218 it. Eventually, we'll move all threads past their breakpoints. */
5219
5220static void
5221proceed_all_lwps (void)
5222{
d86d4aaf 5223 struct thread_info *need_step_over;
d50171e4
PA
5224
5225 /* If there is a thread which would otherwise be resumed, which is
5226 stopped at a breakpoint that needs stepping over, then don't
5227 resume any threads - have it step over the breakpoint with all
5228 other threads stopped, then resume all threads again. */
5229
5230 if (supports_breakpoints ())
5231 {
5232 need_step_over
d86d4aaf
DE
5233 = (struct thread_info *) find_inferior (&all_threads,
5234 need_step_over_p, NULL);
d50171e4
PA
5235
5236 if (need_step_over != NULL)
5237 {
5238 if (debug_threads)
87ce2a04
DE
5239 debug_printf ("proceed_all_lwps: found "
5240 "thread %ld needing a step-over\n",
5241 lwpid_of (need_step_over));
d50171e4 5242
d86d4aaf 5243 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
5244 return;
5245 }
5246 }
5544ad89 5247
d50171e4 5248 if (debug_threads)
87ce2a04 5249 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 5250
d86d4aaf 5251 find_inferior (&all_threads, proceed_one_lwp, NULL);
d50171e4
PA
5252}
5253
5254/* Stopped LWPs that the client wanted to be running, that don't have
5255 pending statuses, are set to run again, except for EXCEPT, if not
5256 NULL. This undoes a stop_all_lwps call. */
5257
5258static void
7984d532 5259unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 5260{
5544ad89
DJ
5261 if (debug_threads)
5262 {
87ce2a04 5263 debug_enter ();
d50171e4 5264 if (except)
87ce2a04 5265 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 5266 lwpid_of (get_lwp_thread (except)));
5544ad89 5267 else
87ce2a04 5268 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
5269 }
5270
7984d532 5271 if (unsuspend)
d86d4aaf 5272 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
7984d532 5273 else
d86d4aaf 5274 find_inferior (&all_threads, proceed_one_lwp, except);
87ce2a04
DE
5275
5276 if (debug_threads)
5277 {
5278 debug_printf ("unstop_all_lwps done\n");
5279 debug_exit ();
5280 }
0d62e5e8
DJ
5281}
5282
58caa3dc
DJ
5283
5284#ifdef HAVE_LINUX_REGSETS
5285
1faeff08
MR
5286#define use_linux_regsets 1
5287
030031ee
PA
5288/* Returns true if REGSET has been disabled. */
5289
5290static int
5291regset_disabled (struct regsets_info *info, struct regset_info *regset)
5292{
5293 return (info->disabled_regsets != NULL
5294 && info->disabled_regsets[regset - info->regsets]);
5295}
5296
5297/* Disable REGSET. */
5298
5299static void
5300disable_regset (struct regsets_info *info, struct regset_info *regset)
5301{
5302 int dr_offset;
5303
5304 dr_offset = regset - info->regsets;
5305 if (info->disabled_regsets == NULL)
224c3ddb 5306 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
5307 info->disabled_regsets[dr_offset] = 1;
5308}
5309
58caa3dc 5310static int
3aee8918
PA
5311regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5312 struct regcache *regcache)
58caa3dc
DJ
5313{
5314 struct regset_info *regset;
e9d25b98 5315 int saw_general_regs = 0;
95954743 5316 int pid;
1570b33e 5317 struct iovec iov;
58caa3dc 5318
0bfdf32f 5319 pid = lwpid_of (current_thread);
28eef672 5320 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5321 {
1570b33e
L
5322 void *buf, *data;
5323 int nt_type, res;
58caa3dc 5324
030031ee 5325 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 5326 continue;
58caa3dc 5327
bca929d3 5328 buf = xmalloc (regset->size);
1570b33e
L
5329
5330 nt_type = regset->nt_type;
5331 if (nt_type)
5332 {
5333 iov.iov_base = buf;
5334 iov.iov_len = regset->size;
5335 data = (void *) &iov;
5336 }
5337 else
5338 data = buf;
5339
dfb64f85 5340#ifndef __sparc__
f15f9948 5341 res = ptrace (regset->get_request, pid,
b8e1b30e 5342 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5343#else
1570b33e 5344 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5345#endif
58caa3dc
DJ
5346 if (res < 0)
5347 {
5348 if (errno == EIO)
5349 {
52fa2412 5350 /* If we get EIO on a regset, do not try it again for
3aee8918 5351 this process mode. */
030031ee 5352 disable_regset (regsets_info, regset);
58caa3dc 5353 }
e5a9158d
AA
5354 else if (errno == ENODATA)
5355 {
5356 /* ENODATA may be returned if the regset is currently
5357 not "active". This can happen in normal operation,
5358 so suppress the warning in this case. */
5359 }
58caa3dc
DJ
5360 else
5361 {
0d62e5e8 5362 char s[256];
95954743
PA
5363 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5364 pid);
0d62e5e8 5365 perror (s);
58caa3dc
DJ
5366 }
5367 }
098dbe61
AA
5368 else
5369 {
5370 if (regset->type == GENERAL_REGS)
5371 saw_general_regs = 1;
5372 regset->store_function (regcache, buf);
5373 }
fdeb2a12 5374 free (buf);
58caa3dc 5375 }
e9d25b98
DJ
5376 if (saw_general_regs)
5377 return 0;
5378 else
5379 return 1;
58caa3dc
DJ
5380}
5381
5382static int
3aee8918
PA
5383regsets_store_inferior_registers (struct regsets_info *regsets_info,
5384 struct regcache *regcache)
58caa3dc
DJ
5385{
5386 struct regset_info *regset;
e9d25b98 5387 int saw_general_regs = 0;
95954743 5388 int pid;
1570b33e 5389 struct iovec iov;
58caa3dc 5390
0bfdf32f 5391 pid = lwpid_of (current_thread);
28eef672 5392 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5393 {
1570b33e
L
5394 void *buf, *data;
5395 int nt_type, res;
58caa3dc 5396
feea5f36
AA
5397 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5398 || regset->fill_function == NULL)
28eef672 5399 continue;
58caa3dc 5400
bca929d3 5401 buf = xmalloc (regset->size);
545587ee
DJ
5402
5403 /* First fill the buffer with the current register set contents,
5404 in case there are any items in the kernel's regset that are
5405 not in gdbserver's regcache. */
1570b33e
L
5406
5407 nt_type = regset->nt_type;
5408 if (nt_type)
5409 {
5410 iov.iov_base = buf;
5411 iov.iov_len = regset->size;
5412 data = (void *) &iov;
5413 }
5414 else
5415 data = buf;
5416
dfb64f85 5417#ifndef __sparc__
f15f9948 5418 res = ptrace (regset->get_request, pid,
b8e1b30e 5419 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5420#else
689cc2ae 5421 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5422#endif
545587ee
DJ
5423
5424 if (res == 0)
5425 {
5426 /* Then overlay our cached registers on that. */
442ea881 5427 regset->fill_function (regcache, buf);
545587ee
DJ
5428
5429 /* Only now do we write the register set. */
dfb64f85 5430#ifndef __sparc__
f15f9948 5431 res = ptrace (regset->set_request, pid,
b8e1b30e 5432 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5433#else
1570b33e 5434 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5435#endif
545587ee
DJ
5436 }
5437
58caa3dc
DJ
5438 if (res < 0)
5439 {
5440 if (errno == EIO)
5441 {
52fa2412 5442 /* If we get EIO on a regset, do not try it again for
3aee8918 5443 this process mode. */
030031ee 5444 disable_regset (regsets_info, regset);
58caa3dc 5445 }
3221518c
UW
5446 else if (errno == ESRCH)
5447 {
1b3f6016
PA
5448 /* At this point, ESRCH should mean the process is
5449 already gone, in which case we simply ignore attempts
5450 to change its registers. See also the related
5451 comment in linux_resume_one_lwp. */
fdeb2a12 5452 free (buf);
3221518c
UW
5453 return 0;
5454 }
58caa3dc
DJ
5455 else
5456 {
ce3a066d 5457 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5458 }
5459 }
e9d25b98
DJ
5460 else if (regset->type == GENERAL_REGS)
5461 saw_general_regs = 1;
09ec9b38 5462 free (buf);
58caa3dc 5463 }
e9d25b98
DJ
5464 if (saw_general_regs)
5465 return 0;
5466 else
5467 return 1;
58caa3dc
DJ
5468}
5469
1faeff08 5470#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5471
1faeff08 5472#define use_linux_regsets 0
3aee8918
PA
5473#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5474#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5475
58caa3dc 5476#endif
1faeff08
MR
5477
5478/* Return 1 if register REGNO is supported by one of the regset ptrace
5479 calls or 0 if it has to be transferred individually. */
5480
5481static int
3aee8918 5482linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5483{
5484 unsigned char mask = 1 << (regno % 8);
5485 size_t index = regno / 8;
5486
5487 return (use_linux_regsets
3aee8918
PA
5488 && (regs_info->regset_bitmap == NULL
5489 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5490}
5491
58caa3dc 5492#ifdef HAVE_LINUX_USRREGS
1faeff08 5493
5b3da067 5494static int
3aee8918 5495register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5496{
5497 int addr;
5498
3aee8918 5499 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5500 error ("Invalid register number %d.", regnum);
5501
3aee8918 5502 addr = usrregs->regmap[regnum];
1faeff08
MR
5503
5504 return addr;
5505}
5506
5507/* Fetch one register. */
5508static void
3aee8918
PA
5509fetch_register (const struct usrregs_info *usrregs,
5510 struct regcache *regcache, int regno)
1faeff08
MR
5511{
5512 CORE_ADDR regaddr;
5513 int i, size;
5514 char *buf;
5515 int pid;
5516
3aee8918 5517 if (regno >= usrregs->num_regs)
1faeff08
MR
5518 return;
5519 if ((*the_low_target.cannot_fetch_register) (regno))
5520 return;
5521
3aee8918 5522 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5523 if (regaddr == -1)
5524 return;
5525
3aee8918
PA
5526 size = ((register_size (regcache->tdesc, regno)
5527 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5528 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5529 buf = (char *) alloca (size);
1faeff08 5530
0bfdf32f 5531 pid = lwpid_of (current_thread);
1faeff08
MR
5532 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5533 {
5534 errno = 0;
5535 *(PTRACE_XFER_TYPE *) (buf + i) =
5536 ptrace (PTRACE_PEEKUSER, pid,
5537 /* Coerce to a uintptr_t first to avoid potential gcc warning
5538 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5539 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5540 regaddr += sizeof (PTRACE_XFER_TYPE);
5541 if (errno != 0)
5542 error ("reading register %d: %s", regno, strerror (errno));
5543 }
5544
5545 if (the_low_target.supply_ptrace_register)
5546 the_low_target.supply_ptrace_register (regcache, regno, buf);
5547 else
5548 supply_register (regcache, regno, buf);
5549}
5550
5551/* Store one register. */
5552static void
3aee8918
PA
5553store_register (const struct usrregs_info *usrregs,
5554 struct regcache *regcache, int regno)
1faeff08
MR
5555{
5556 CORE_ADDR regaddr;
5557 int i, size;
5558 char *buf;
5559 int pid;
5560
3aee8918 5561 if (regno >= usrregs->num_regs)
1faeff08
MR
5562 return;
5563 if ((*the_low_target.cannot_store_register) (regno))
5564 return;
5565
3aee8918 5566 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5567 if (regaddr == -1)
5568 return;
5569
3aee8918
PA
5570 size = ((register_size (regcache->tdesc, regno)
5571 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5572 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5573 buf = (char *) alloca (size);
1faeff08
MR
5574 memset (buf, 0, size);
5575
5576 if (the_low_target.collect_ptrace_register)
5577 the_low_target.collect_ptrace_register (regcache, regno, buf);
5578 else
5579 collect_register (regcache, regno, buf);
5580
0bfdf32f 5581 pid = lwpid_of (current_thread);
1faeff08
MR
5582 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5583 {
5584 errno = 0;
5585 ptrace (PTRACE_POKEUSER, pid,
5586 /* Coerce to a uintptr_t first to avoid potential gcc warning
5587 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5588 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5589 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5590 if (errno != 0)
5591 {
5592 /* At this point, ESRCH should mean the process is
5593 already gone, in which case we simply ignore attempts
5594 to change its registers. See also the related
5595 comment in linux_resume_one_lwp. */
5596 if (errno == ESRCH)
5597 return;
5598
5599 if ((*the_low_target.cannot_store_register) (regno) == 0)
5600 error ("writing register %d: %s", regno, strerror (errno));
5601 }
5602 regaddr += sizeof (PTRACE_XFER_TYPE);
5603 }
5604}
5605
5606/* Fetch all registers, or just one, from the child process.
5607 If REGNO is -1, do this for all registers, skipping any that are
5608 assumed to have been retrieved by regsets_fetch_inferior_registers,
5609 unless ALL is non-zero.
5610 Otherwise, REGNO specifies which register (so we can save time). */
5611static void
3aee8918
PA
5612usr_fetch_inferior_registers (const struct regs_info *regs_info,
5613 struct regcache *regcache, int regno, int all)
1faeff08 5614{
3aee8918
PA
5615 struct usrregs_info *usr = regs_info->usrregs;
5616
1faeff08
MR
5617 if (regno == -1)
5618 {
3aee8918
PA
5619 for (regno = 0; regno < usr->num_regs; regno++)
5620 if (all || !linux_register_in_regsets (regs_info, regno))
5621 fetch_register (usr, regcache, regno);
1faeff08
MR
5622 }
5623 else
3aee8918 5624 fetch_register (usr, regcache, regno);
1faeff08
MR
5625}
5626
5627/* Store our register values back into the inferior.
5628 If REGNO is -1, do this for all registers, skipping any that are
5629 assumed to have been saved by regsets_store_inferior_registers,
5630 unless ALL is non-zero.
5631 Otherwise, REGNO specifies which register (so we can save time). */
5632static void
3aee8918
PA
5633usr_store_inferior_registers (const struct regs_info *regs_info,
5634 struct regcache *regcache, int regno, int all)
1faeff08 5635{
3aee8918
PA
5636 struct usrregs_info *usr = regs_info->usrregs;
5637
1faeff08
MR
5638 if (regno == -1)
5639 {
3aee8918
PA
5640 for (regno = 0; regno < usr->num_regs; regno++)
5641 if (all || !linux_register_in_regsets (regs_info, regno))
5642 store_register (usr, regcache, regno);
1faeff08
MR
5643 }
5644 else
3aee8918 5645 store_register (usr, regcache, regno);
1faeff08
MR
5646}
5647
5648#else /* !HAVE_LINUX_USRREGS */
5649
3aee8918
PA
5650#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5651#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
1faeff08 5652
58caa3dc 5653#endif
1faeff08
MR
5654
5655
5b3da067 5656static void
1faeff08
MR
5657linux_fetch_registers (struct regcache *regcache, int regno)
5658{
5659 int use_regsets;
5660 int all = 0;
3aee8918 5661 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
5662
5663 if (regno == -1)
5664 {
3aee8918
PA
5665 if (the_low_target.fetch_register != NULL
5666 && regs_info->usrregs != NULL)
5667 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
c14dfd32
PA
5668 (*the_low_target.fetch_register) (regcache, regno);
5669
3aee8918
PA
5670 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5671 if (regs_info->usrregs != NULL)
5672 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5673 }
5674 else
5675 {
c14dfd32
PA
5676 if (the_low_target.fetch_register != NULL
5677 && (*the_low_target.fetch_register) (regcache, regno))
5678 return;
5679
3aee8918 5680 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5681 if (use_regsets)
3aee8918
PA
5682 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5683 regcache);
5684 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5685 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5686 }
58caa3dc
DJ
5687}
5688
5b3da067 5689static void
442ea881 5690linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 5691{
1faeff08
MR
5692 int use_regsets;
5693 int all = 0;
3aee8918 5694 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
5695
5696 if (regno == -1)
5697 {
3aee8918
PA
5698 all = regsets_store_inferior_registers (regs_info->regsets_info,
5699 regcache);
5700 if (regs_info->usrregs != NULL)
5701 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5702 }
5703 else
5704 {
3aee8918 5705 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5706 if (use_regsets)
3aee8918
PA
5707 all = regsets_store_inferior_registers (regs_info->regsets_info,
5708 regcache);
5709 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5710 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5711 }
58caa3dc
DJ
5712}
5713
da6d8c04 5714
da6d8c04
DJ
5715/* Copy LEN bytes from inferior's memory starting at MEMADDR
5716 to debugger memory starting at MYADDR. */
5717
c3e735a6 5718static int
f450004a 5719linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04 5720{
0bfdf32f 5721 int pid = lwpid_of (current_thread);
4934b29e
MR
5722 register PTRACE_XFER_TYPE *buffer;
5723 register CORE_ADDR addr;
5724 register int count;
5725 char filename[64];
da6d8c04 5726 register int i;
4934b29e 5727 int ret;
fd462a61 5728 int fd;
fd462a61
DJ
5729
5730 /* Try using /proc. Don't bother for one word. */
5731 if (len >= 3 * sizeof (long))
5732 {
4934b29e
MR
5733 int bytes;
5734
fd462a61
DJ
5735 /* We could keep this file open and cache it - possibly one per
5736 thread. That requires some juggling, but is even faster. */
95954743 5737 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5738 fd = open (filename, O_RDONLY | O_LARGEFILE);
5739 if (fd == -1)
5740 goto no_proc;
5741
5742 /* If pread64 is available, use it. It's faster if the kernel
5743 supports it (only one syscall), and it's 64-bit safe even on
5744 32-bit platforms (for instance, SPARC debugging a SPARC64
5745 application). */
5746#ifdef HAVE_PREAD64
4934b29e 5747 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5748#else
4934b29e
MR
5749 bytes = -1;
5750 if (lseek (fd, memaddr, SEEK_SET) != -1)
5751 bytes = read (fd, myaddr, len);
fd462a61 5752#endif
fd462a61
DJ
5753
5754 close (fd);
4934b29e
MR
5755 if (bytes == len)
5756 return 0;
5757
5758 /* Some data was read, we'll try to get the rest with ptrace. */
5759 if (bytes > 0)
5760 {
5761 memaddr += bytes;
5762 myaddr += bytes;
5763 len -= bytes;
5764 }
fd462a61 5765 }
da6d8c04 5766
fd462a61 5767 no_proc:
4934b29e
MR
5768 /* Round starting address down to longword boundary. */
5769 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5770 /* Round ending address up; get number of longwords that makes. */
5771 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5772 / sizeof (PTRACE_XFER_TYPE));
5773 /* Allocate buffer of that many longwords. */
8d749320 5774 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
4934b29e 5775
da6d8c04 5776 /* Read all the longwords */
4934b29e 5777 errno = 0;
da6d8c04
DJ
5778 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5779 {
14ce3065
DE
5780 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5781 about coercing an 8 byte integer to a 4 byte pointer. */
5782 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5783 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5784 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5785 if (errno)
4934b29e 5786 break;
da6d8c04 5787 }
4934b29e 5788 ret = errno;
da6d8c04
DJ
5789
5790 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5791 if (i > 0)
5792 {
5793 i *= sizeof (PTRACE_XFER_TYPE);
5794 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5795 memcpy (myaddr,
5796 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5797 i < len ? i : len);
5798 }
c3e735a6 5799
4934b29e 5800 return ret;
da6d8c04
DJ
5801}
5802
93ae6fdc
PA
5803/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5804 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5805 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5806
ce3a066d 5807static int
f450004a 5808linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
5809{
5810 register int i;
5811 /* Round starting address down to longword boundary. */
5812 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5813 /* Round ending address up; get number of longwords that makes. */
5814 register int count
493e2a69
MS
5815 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5816 / sizeof (PTRACE_XFER_TYPE);
5817
da6d8c04 5818 /* Allocate buffer of that many longwords. */
8d749320 5819 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
493e2a69 5820
0bfdf32f 5821 int pid = lwpid_of (current_thread);
da6d8c04 5822
f0ae6fc3
PA
5823 if (len == 0)
5824 {
5825 /* Zero length write always succeeds. */
5826 return 0;
5827 }
5828
0d62e5e8
DJ
5829 if (debug_threads)
5830 {
58d6951d 5831 /* Dump up to four bytes. */
bf47e248
PA
5832 char str[4 * 2 + 1];
5833 char *p = str;
5834 int dump = len < 4 ? len : 4;
5835
5836 for (i = 0; i < dump; i++)
5837 {
5838 sprintf (p, "%02x", myaddr[i]);
5839 p += 2;
5840 }
5841 *p = '\0';
5842
5843 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5844 str, (long) memaddr, pid);
0d62e5e8
DJ
5845 }
5846
da6d8c04
DJ
5847 /* Fill start and end extra bytes of buffer with existing memory data. */
5848
93ae6fdc 5849 errno = 0;
14ce3065
DE
5850 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5851 about coercing an 8 byte integer to a 4 byte pointer. */
5852 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5853 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5854 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5855 if (errno)
5856 return errno;
da6d8c04
DJ
5857
5858 if (count > 1)
5859 {
93ae6fdc 5860 errno = 0;
da6d8c04 5861 buffer[count - 1]
95954743 5862 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5863 /* Coerce to a uintptr_t first to avoid potential gcc warning
5864 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5865 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5866 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5867 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5868 if (errno)
5869 return errno;
da6d8c04
DJ
5870 }
5871
93ae6fdc 5872 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5873
493e2a69
MS
5874 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5875 myaddr, len);
da6d8c04
DJ
5876
5877 /* Write the entire buffer. */
5878
5879 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5880 {
5881 errno = 0;
14ce3065
DE
5882 ptrace (PTRACE_POKETEXT, pid,
5883 /* Coerce to a uintptr_t first to avoid potential gcc warning
5884 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5885 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5886 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5887 if (errno)
5888 return errno;
5889 }
5890
5891 return 0;
5892}
2f2893d9
DJ
5893
5894static void
5895linux_look_up_symbols (void)
5896{
0d62e5e8 5897#ifdef USE_THREAD_DB
95954743
PA
5898 struct process_info *proc = current_process ();
5899
fe978cb0 5900 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5901 return;
5902
9b4c5f87 5903 thread_db_init ();
0d62e5e8
DJ
5904#endif
5905}
5906
e5379b03 5907static void
ef57601b 5908linux_request_interrupt (void)
e5379b03 5909{
a1928bad 5910 extern unsigned long signal_pid;
e5379b03 5911
78708b7c
PA
5912 /* Send a SIGINT to the process group. This acts just like the user
5913 typed a ^C on the controlling terminal. */
5914 kill (-signal_pid, SIGINT);
e5379b03
DJ
5915}
5916
aa691b87
RM
5917/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5918 to debugger memory starting at MYADDR. */
5919
5920static int
f450004a 5921linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
5922{
5923 char filename[PATH_MAX];
5924 int fd, n;
0bfdf32f 5925 int pid = lwpid_of (current_thread);
aa691b87 5926
6cebaf6e 5927 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5928
5929 fd = open (filename, O_RDONLY);
5930 if (fd < 0)
5931 return -1;
5932
5933 if (offset != (CORE_ADDR) 0
5934 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5935 n = -1;
5936 else
5937 n = read (fd, myaddr, len);
5938
5939 close (fd);
5940
5941 return n;
5942}
5943
d993e290
PA
5944/* These breakpoint and watchpoint related wrapper functions simply
5945 pass on the function call if the target has registered a
5946 corresponding function. */
e013ee27
OF
5947
5948static int
802e8e6d
PA
5949linux_supports_z_point_type (char z_type)
5950{
5951 return (the_low_target.supports_z_point_type != NULL
5952 && the_low_target.supports_z_point_type (z_type));
5953}
5954
5955static int
5956linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5957 int size, struct raw_breakpoint *bp)
e013ee27 5958{
c8f4bfdd
YQ
5959 if (type == raw_bkpt_type_sw)
5960 return insert_memory_breakpoint (bp);
5961 else if (the_low_target.insert_point != NULL)
802e8e6d 5962 return the_low_target.insert_point (type, addr, size, bp);
e013ee27
OF
5963 else
5964 /* Unsupported (see target.h). */
5965 return 1;
5966}
5967
5968static int
802e8e6d
PA
5969linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5970 int size, struct raw_breakpoint *bp)
e013ee27 5971{
c8f4bfdd
YQ
5972 if (type == raw_bkpt_type_sw)
5973 return remove_memory_breakpoint (bp);
5974 else if (the_low_target.remove_point != NULL)
802e8e6d 5975 return the_low_target.remove_point (type, addr, size, bp);
e013ee27
OF
5976 else
5977 /* Unsupported (see target.h). */
5978 return 1;
5979}
5980
3e572f71
PA
5981/* Implement the to_stopped_by_sw_breakpoint target_ops
5982 method. */
5983
5984static int
5985linux_stopped_by_sw_breakpoint (void)
5986{
5987 struct lwp_info *lwp = get_thread_lwp (current_thread);
5988
5989 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5990}
5991
5992/* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5993 method. */
5994
5995static int
5996linux_supports_stopped_by_sw_breakpoint (void)
5997{
5998 return USE_SIGTRAP_SIGINFO;
5999}
6000
6001/* Implement the to_stopped_by_hw_breakpoint target_ops
6002 method. */
6003
6004static int
6005linux_stopped_by_hw_breakpoint (void)
6006{
6007 struct lwp_info *lwp = get_thread_lwp (current_thread);
6008
6009 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6010}
6011
6012/* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6013 method. */
6014
6015static int
6016linux_supports_stopped_by_hw_breakpoint (void)
6017{
6018 return USE_SIGTRAP_SIGINFO;
6019}
6020
70b90b91 6021/* Implement the supports_hardware_single_step target_ops method. */
45614f15
YQ
6022
6023static int
70b90b91 6024linux_supports_hardware_single_step (void)
45614f15 6025{
45614f15
YQ
6026 return can_hardware_single_step ();
6027}
6028
7d00775e
AT
6029static int
6030linux_supports_software_single_step (void)
6031{
6032 return can_software_single_step ();
6033}
6034
e013ee27
OF
6035static int
6036linux_stopped_by_watchpoint (void)
6037{
0bfdf32f 6038 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 6039
15c66dd6 6040 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
6041}
6042
6043static CORE_ADDR
6044linux_stopped_data_address (void)
6045{
0bfdf32f 6046 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
6047
6048 return lwp->stopped_data_address;
e013ee27
OF
6049}
6050
db0dfaa0
LM
6051#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6052 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6053 && defined(PT_TEXT_END_ADDR)
6054
6055/* This is only used for targets that define PT_TEXT_ADDR,
6056 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6057 the target has different ways of acquiring this information, like
6058 loadmaps. */
52fb6437
NS
6059
6060/* Under uClinux, programs are loaded at non-zero offsets, which we need
6061 to tell gdb about. */
6062
6063static int
6064linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6065{
52fb6437 6066 unsigned long text, text_end, data;
62828379 6067 int pid = lwpid_of (current_thread);
52fb6437
NS
6068
6069 errno = 0;
6070
b8e1b30e
LM
6071 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6072 (PTRACE_TYPE_ARG4) 0);
6073 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6074 (PTRACE_TYPE_ARG4) 0);
6075 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6076 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
6077
6078 if (errno == 0)
6079 {
6080 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
6081 used by gdb) are relative to the beginning of the program,
6082 with the data segment immediately following the text segment.
6083 However, the actual runtime layout in memory may put the data
6084 somewhere else, so when we send gdb a data base-address, we
6085 use the real data base address and subtract the compile-time
6086 data base-address from it (which is just the length of the
6087 text segment). BSS immediately follows data in both
6088 cases. */
52fb6437
NS
6089 *text_p = text;
6090 *data_p = data - (text_end - text);
1b3f6016 6091
52fb6437
NS
6092 return 1;
6093 }
52fb6437
NS
6094 return 0;
6095}
6096#endif
6097
07e059b5
VP
6098static int
6099linux_qxfer_osdata (const char *annex,
1b3f6016
PA
6100 unsigned char *readbuf, unsigned const char *writebuf,
6101 CORE_ADDR offset, int len)
07e059b5 6102{
d26e3629 6103 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
6104}
6105
d0722149
DE
6106/* Convert a native/host siginfo object, into/from the siginfo in the
6107 layout of the inferiors' architecture. */
6108
6109static void
8adce034 6110siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
d0722149
DE
6111{
6112 int done = 0;
6113
6114 if (the_low_target.siginfo_fixup != NULL)
6115 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6116
6117 /* If there was no callback, or the callback didn't do anything,
6118 then just do a straight memcpy. */
6119 if (!done)
6120 {
6121 if (direction == 1)
a5362b9a 6122 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 6123 else
a5362b9a 6124 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
6125 }
6126}
6127
4aa995e1
PA
6128static int
6129linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6130 unsigned const char *writebuf, CORE_ADDR offset, int len)
6131{
d0722149 6132 int pid;
a5362b9a 6133 siginfo_t siginfo;
8adce034 6134 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 6135
0bfdf32f 6136 if (current_thread == NULL)
4aa995e1
PA
6137 return -1;
6138
0bfdf32f 6139 pid = lwpid_of (current_thread);
4aa995e1
PA
6140
6141 if (debug_threads)
87ce2a04
DE
6142 debug_printf ("%s siginfo for lwp %d.\n",
6143 readbuf != NULL ? "Reading" : "Writing",
6144 pid);
4aa995e1 6145
0adea5f7 6146 if (offset >= sizeof (siginfo))
4aa995e1
PA
6147 return -1;
6148
b8e1b30e 6149 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6150 return -1;
6151
d0722149
DE
6152 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6153 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6154 inferior with a 64-bit GDBSERVER should look the same as debugging it
6155 with a 32-bit GDBSERVER, we need to convert it. */
6156 siginfo_fixup (&siginfo, inf_siginfo, 0);
6157
4aa995e1
PA
6158 if (offset + len > sizeof (siginfo))
6159 len = sizeof (siginfo) - offset;
6160
6161 if (readbuf != NULL)
d0722149 6162 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
6163 else
6164 {
d0722149
DE
6165 memcpy (inf_siginfo + offset, writebuf, len);
6166
6167 /* Convert back to ptrace layout before flushing it out. */
6168 siginfo_fixup (&siginfo, inf_siginfo, 1);
6169
b8e1b30e 6170 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6171 return -1;
6172 }
6173
6174 return len;
6175}
6176
bd99dc85
PA
6177/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6178 so we notice when children change state; as the handler for the
6179 sigsuspend in my_waitpid. */
6180
6181static void
6182sigchld_handler (int signo)
6183{
6184 int old_errno = errno;
6185
6186 if (debug_threads)
e581f2b4
PA
6187 {
6188 do
6189 {
6190 /* fprintf is not async-signal-safe, so call write
6191 directly. */
6192 if (write (2, "sigchld_handler\n",
6193 sizeof ("sigchld_handler\n") - 1) < 0)
6194 break; /* just ignore */
6195 } while (0);
6196 }
bd99dc85
PA
6197
6198 if (target_is_async_p ())
6199 async_file_mark (); /* trigger a linux_wait */
6200
6201 errno = old_errno;
6202}
6203
6204static int
6205linux_supports_non_stop (void)
6206{
6207 return 1;
6208}
6209
6210static int
6211linux_async (int enable)
6212{
7089dca4 6213 int previous = target_is_async_p ();
bd99dc85 6214
8336d594 6215 if (debug_threads)
87ce2a04
DE
6216 debug_printf ("linux_async (%d), previous=%d\n",
6217 enable, previous);
8336d594 6218
bd99dc85
PA
6219 if (previous != enable)
6220 {
6221 sigset_t mask;
6222 sigemptyset (&mask);
6223 sigaddset (&mask, SIGCHLD);
6224
6225 sigprocmask (SIG_BLOCK, &mask, NULL);
6226
6227 if (enable)
6228 {
6229 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
6230 {
6231 linux_event_pipe[0] = -1;
6232 linux_event_pipe[1] = -1;
6233 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6234
6235 warning ("creating event pipe failed.");
6236 return previous;
6237 }
bd99dc85
PA
6238
6239 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6240 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6241
6242 /* Register the event loop handler. */
6243 add_file_handler (linux_event_pipe[0],
6244 handle_target_event, NULL);
6245
6246 /* Always trigger a linux_wait. */
6247 async_file_mark ();
6248 }
6249 else
6250 {
6251 delete_file_handler (linux_event_pipe[0]);
6252
6253 close (linux_event_pipe[0]);
6254 close (linux_event_pipe[1]);
6255 linux_event_pipe[0] = -1;
6256 linux_event_pipe[1] = -1;
6257 }
6258
6259 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6260 }
6261
6262 return previous;
6263}
6264
6265static int
6266linux_start_non_stop (int nonstop)
6267{
6268 /* Register or unregister from event-loop accordingly. */
6269 linux_async (nonstop);
aa96c426
GB
6270
6271 if (target_is_async_p () != (nonstop != 0))
6272 return -1;
6273
bd99dc85
PA
6274 return 0;
6275}
6276
cf8fd78b
PA
6277static int
6278linux_supports_multi_process (void)
6279{
6280 return 1;
6281}
6282
89245bc0
DB
6283/* Check if fork events are supported. */
6284
6285static int
6286linux_supports_fork_events (void)
6287{
6288 return linux_supports_tracefork ();
6289}
6290
6291/* Check if vfork events are supported. */
6292
6293static int
6294linux_supports_vfork_events (void)
6295{
6296 return linux_supports_tracefork ();
6297}
6298
94585166
DB
6299/* Check if exec events are supported. */
6300
6301static int
6302linux_supports_exec_events (void)
6303{
6304 return linux_supports_traceexec ();
6305}
6306
de0d863e
DB
6307/* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6308 options for the specified lwp. */
6309
6310static int
6311reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6312 void *args)
6313{
6314 struct thread_info *thread = (struct thread_info *) entry;
6315 struct lwp_info *lwp = get_thread_lwp (thread);
6316
6317 if (!lwp->stopped)
6318 {
6319 /* Stop the lwp so we can modify its ptrace options. */
6320 lwp->must_set_ptrace_flags = 1;
6321 linux_stop_lwp (lwp);
6322 }
6323 else
6324 {
6325 /* Already stopped; go ahead and set the ptrace options. */
6326 struct process_info *proc = find_process_pid (pid_of (thread));
6327 int options = linux_low_ptrace_options (proc->attached);
6328
6329 linux_enable_event_reporting (lwpid_of (thread), options);
6330 lwp->must_set_ptrace_flags = 0;
6331 }
6332
6333 return 0;
6334}
6335
6336/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6337 ptrace flags for all inferiors. This is in case the new GDB connection
6338 doesn't support the same set of events that the previous one did. */
6339
6340static void
6341linux_handle_new_gdb_connection (void)
6342{
6343 pid_t pid;
6344
6345 /* Request that all the lwps reset their ptrace options. */
6346 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6347}
6348
03583c20
UW
6349static int
6350linux_supports_disable_randomization (void)
6351{
6352#ifdef HAVE_PERSONALITY
6353 return 1;
6354#else
6355 return 0;
6356#endif
6357}
efcbbd14 6358
d1feda86
YQ
6359static int
6360linux_supports_agent (void)
6361{
6362 return 1;
6363}
6364
c2d6af84
PA
6365static int
6366linux_supports_range_stepping (void)
6367{
6368 if (*the_low_target.supports_range_stepping == NULL)
6369 return 0;
6370
6371 return (*the_low_target.supports_range_stepping) ();
6372}
6373
efcbbd14
UW
6374/* Enumerate spufs IDs for process PID. */
6375static int
6376spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6377{
6378 int pos = 0;
6379 int written = 0;
6380 char path[128];
6381 DIR *dir;
6382 struct dirent *entry;
6383
6384 sprintf (path, "/proc/%ld/fd", pid);
6385 dir = opendir (path);
6386 if (!dir)
6387 return -1;
6388
6389 rewinddir (dir);
6390 while ((entry = readdir (dir)) != NULL)
6391 {
6392 struct stat st;
6393 struct statfs stfs;
6394 int fd;
6395
6396 fd = atoi (entry->d_name);
6397 if (!fd)
6398 continue;
6399
6400 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6401 if (stat (path, &st) != 0)
6402 continue;
6403 if (!S_ISDIR (st.st_mode))
6404 continue;
6405
6406 if (statfs (path, &stfs) != 0)
6407 continue;
6408 if (stfs.f_type != SPUFS_MAGIC)
6409 continue;
6410
6411 if (pos >= offset && pos + 4 <= offset + len)
6412 {
6413 *(unsigned int *)(buf + pos - offset) = fd;
6414 written += 4;
6415 }
6416 pos += 4;
6417 }
6418
6419 closedir (dir);
6420 return written;
6421}
6422
6423/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6424 object type, using the /proc file system. */
6425static int
6426linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6427 unsigned const char *writebuf,
6428 CORE_ADDR offset, int len)
6429{
0bfdf32f 6430 long pid = lwpid_of (current_thread);
efcbbd14
UW
6431 char buf[128];
6432 int fd = 0;
6433 int ret = 0;
6434
6435 if (!writebuf && !readbuf)
6436 return -1;
6437
6438 if (!*annex)
6439 {
6440 if (!readbuf)
6441 return -1;
6442 else
6443 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6444 }
6445
6446 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6447 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6448 if (fd <= 0)
6449 return -1;
6450
6451 if (offset != 0
6452 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6453 {
6454 close (fd);
6455 return 0;
6456 }
6457
6458 if (writebuf)
6459 ret = write (fd, writebuf, (size_t) len);
6460 else
6461 ret = read (fd, readbuf, (size_t) len);
6462
6463 close (fd);
6464 return ret;
6465}
6466
723b724b 6467#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6468struct target_loadseg
6469{
6470 /* Core address to which the segment is mapped. */
6471 Elf32_Addr addr;
6472 /* VMA recorded in the program header. */
6473 Elf32_Addr p_vaddr;
6474 /* Size of this segment in memory. */
6475 Elf32_Word p_memsz;
6476};
6477
723b724b 6478# if defined PT_GETDSBT
78d85199
YQ
6479struct target_loadmap
6480{
6481 /* Protocol version number, must be zero. */
6482 Elf32_Word version;
6483 /* Pointer to the DSBT table, its size, and the DSBT index. */
6484 unsigned *dsbt_table;
6485 unsigned dsbt_size, dsbt_index;
6486 /* Number of segments in this map. */
6487 Elf32_Word nsegs;
6488 /* The actual memory map. */
6489 struct target_loadseg segs[/*nsegs*/];
6490};
723b724b
MF
6491# define LINUX_LOADMAP PT_GETDSBT
6492# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6493# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6494# else
6495struct target_loadmap
6496{
6497 /* Protocol version number, must be zero. */
6498 Elf32_Half version;
6499 /* Number of segments in this map. */
6500 Elf32_Half nsegs;
6501 /* The actual memory map. */
6502 struct target_loadseg segs[/*nsegs*/];
6503};
6504# define LINUX_LOADMAP PTRACE_GETFDPIC
6505# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6506# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6507# endif
78d85199 6508
78d85199
YQ
6509static int
6510linux_read_loadmap (const char *annex, CORE_ADDR offset,
6511 unsigned char *myaddr, unsigned int len)
6512{
0bfdf32f 6513 int pid = lwpid_of (current_thread);
78d85199
YQ
6514 int addr = -1;
6515 struct target_loadmap *data = NULL;
6516 unsigned int actual_length, copy_length;
6517
6518 if (strcmp (annex, "exec") == 0)
723b724b 6519 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6520 else if (strcmp (annex, "interp") == 0)
723b724b 6521 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6522 else
6523 return -1;
6524
723b724b 6525 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6526 return -1;
6527
6528 if (data == NULL)
6529 return -1;
6530
6531 actual_length = sizeof (struct target_loadmap)
6532 + sizeof (struct target_loadseg) * data->nsegs;
6533
6534 if (offset < 0 || offset > actual_length)
6535 return -1;
6536
6537 copy_length = actual_length - offset < len ? actual_length - offset : len;
6538 memcpy (myaddr, (char *) data + offset, copy_length);
6539 return copy_length;
6540}
723b724b
MF
6541#else
6542# define linux_read_loadmap NULL
6543#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6544
1570b33e 6545static void
06e03fff 6546linux_process_qsupported (char **features, int count)
1570b33e
L
6547{
6548 if (the_low_target.process_qsupported != NULL)
06e03fff 6549 the_low_target.process_qsupported (features, count);
1570b33e
L
6550}
6551
82075af2
JS
6552static int
6553linux_supports_catch_syscall (void)
6554{
6555 return (the_low_target.get_syscall_trapinfo != NULL
6556 && linux_supports_tracesysgood ());
6557}
6558
ae91f625
MK
6559static int
6560linux_get_ipa_tdesc_idx (void)
6561{
6562 if (the_low_target.get_ipa_tdesc_idx == NULL)
6563 return 0;
6564
6565 return (*the_low_target.get_ipa_tdesc_idx) ();
6566}
6567
219f2f23
PA
6568static int
6569linux_supports_tracepoints (void)
6570{
6571 if (*the_low_target.supports_tracepoints == NULL)
6572 return 0;
6573
6574 return (*the_low_target.supports_tracepoints) ();
6575}
6576
6577static CORE_ADDR
6578linux_read_pc (struct regcache *regcache)
6579{
6580 if (the_low_target.get_pc == NULL)
6581 return 0;
6582
6583 return (*the_low_target.get_pc) (regcache);
6584}
6585
6586static void
6587linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6588{
6589 gdb_assert (the_low_target.set_pc != NULL);
6590
6591 (*the_low_target.set_pc) (regcache, pc);
6592}
6593
8336d594
PA
6594static int
6595linux_thread_stopped (struct thread_info *thread)
6596{
6597 return get_thread_lwp (thread)->stopped;
6598}
6599
6600/* This exposes stop-all-threads functionality to other modules. */
6601
6602static void
7984d532 6603linux_pause_all (int freeze)
8336d594 6604{
7984d532
PA
6605 stop_all_lwps (freeze, NULL);
6606}
6607
6608/* This exposes unstop-all-threads functionality to other gdbserver
6609 modules. */
6610
6611static void
6612linux_unpause_all (int unfreeze)
6613{
6614 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6615}
6616
90d74c30
PA
6617static int
6618linux_prepare_to_access_memory (void)
6619{
6620 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6621 running LWP. */
6622 if (non_stop)
6623 linux_pause_all (1);
6624 return 0;
6625}
6626
6627static void
0146f85b 6628linux_done_accessing_memory (void)
90d74c30
PA
6629{
6630 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6631 running LWP. */
6632 if (non_stop)
6633 linux_unpause_all (1);
6634}
6635
fa593d66
PA
6636static int
6637linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6638 CORE_ADDR collector,
6639 CORE_ADDR lockaddr,
6640 ULONGEST orig_size,
6641 CORE_ADDR *jump_entry,
405f8e94
SS
6642 CORE_ADDR *trampoline,
6643 ULONGEST *trampoline_size,
fa593d66
PA
6644 unsigned char *jjump_pad_insn,
6645 ULONGEST *jjump_pad_insn_size,
6646 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
6647 CORE_ADDR *adjusted_insn_addr_end,
6648 char *err)
fa593d66
PA
6649{
6650 return (*the_low_target.install_fast_tracepoint_jump_pad)
6651 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
6652 jump_entry, trampoline, trampoline_size,
6653 jjump_pad_insn, jjump_pad_insn_size,
6654 adjusted_insn_addr, adjusted_insn_addr_end,
6655 err);
fa593d66
PA
6656}
6657
6a271cae
PA
6658static struct emit_ops *
6659linux_emit_ops (void)
6660{
6661 if (the_low_target.emit_ops != NULL)
6662 return (*the_low_target.emit_ops) ();
6663 else
6664 return NULL;
6665}
6666
405f8e94
SS
6667static int
6668linux_get_min_fast_tracepoint_insn_len (void)
6669{
6670 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6671}
6672
2268b414
JK
6673/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6674
6675static int
6676get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6677 CORE_ADDR *phdr_memaddr, int *num_phdr)
6678{
6679 char filename[PATH_MAX];
6680 int fd;
6681 const int auxv_size = is_elf64
6682 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6683 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6684
6685 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6686
6687 fd = open (filename, O_RDONLY);
6688 if (fd < 0)
6689 return 1;
6690
6691 *phdr_memaddr = 0;
6692 *num_phdr = 0;
6693 while (read (fd, buf, auxv_size) == auxv_size
6694 && (*phdr_memaddr == 0 || *num_phdr == 0))
6695 {
6696 if (is_elf64)
6697 {
6698 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6699
6700 switch (aux->a_type)
6701 {
6702 case AT_PHDR:
6703 *phdr_memaddr = aux->a_un.a_val;
6704 break;
6705 case AT_PHNUM:
6706 *num_phdr = aux->a_un.a_val;
6707 break;
6708 }
6709 }
6710 else
6711 {
6712 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6713
6714 switch (aux->a_type)
6715 {
6716 case AT_PHDR:
6717 *phdr_memaddr = aux->a_un.a_val;
6718 break;
6719 case AT_PHNUM:
6720 *num_phdr = aux->a_un.a_val;
6721 break;
6722 }
6723 }
6724 }
6725
6726 close (fd);
6727
6728 if (*phdr_memaddr == 0 || *num_phdr == 0)
6729 {
6730 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6731 "phdr_memaddr = %ld, phdr_num = %d",
6732 (long) *phdr_memaddr, *num_phdr);
6733 return 2;
6734 }
6735
6736 return 0;
6737}
6738
6739/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6740
6741static CORE_ADDR
6742get_dynamic (const int pid, const int is_elf64)
6743{
6744 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6745 int num_phdr, i;
2268b414 6746 unsigned char *phdr_buf;
db1ff28b 6747 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6748
6749 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6750 return 0;
6751
6752 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6753 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6754
6755 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6756 return 0;
6757
6758 /* Compute relocation: it is expected to be 0 for "regular" executables,
6759 non-zero for PIE ones. */
6760 relocation = -1;
db1ff28b
JK
6761 for (i = 0; relocation == -1 && i < num_phdr; i++)
6762 if (is_elf64)
6763 {
6764 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6765
6766 if (p->p_type == PT_PHDR)
6767 relocation = phdr_memaddr - p->p_vaddr;
6768 }
6769 else
6770 {
6771 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6772
6773 if (p->p_type == PT_PHDR)
6774 relocation = phdr_memaddr - p->p_vaddr;
6775 }
6776
2268b414
JK
6777 if (relocation == -1)
6778 {
e237a7e2
JK
6779 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6780 any real world executables, including PIE executables, have always
6781 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6782 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6783 or present DT_DEBUG anyway (fpc binaries are statically linked).
6784
6785 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6786
6787 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6788
2268b414
JK
6789 return 0;
6790 }
6791
db1ff28b
JK
6792 for (i = 0; i < num_phdr; i++)
6793 {
6794 if (is_elf64)
6795 {
6796 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6797
6798 if (p->p_type == PT_DYNAMIC)
6799 return p->p_vaddr + relocation;
6800 }
6801 else
6802 {
6803 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6804
db1ff28b
JK
6805 if (p->p_type == PT_DYNAMIC)
6806 return p->p_vaddr + relocation;
6807 }
6808 }
2268b414
JK
6809
6810 return 0;
6811}
6812
6813/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6814 can be 0 if the inferior does not yet have the library list initialized.
6815 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6816 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6817
6818static CORE_ADDR
6819get_r_debug (const int pid, const int is_elf64)
6820{
6821 CORE_ADDR dynamic_memaddr;
6822 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6823 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6824 CORE_ADDR map = -1;
2268b414
JK
6825
6826 dynamic_memaddr = get_dynamic (pid, is_elf64);
6827 if (dynamic_memaddr == 0)
367ba2c2 6828 return map;
2268b414
JK
6829
6830 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6831 {
6832 if (is_elf64)
6833 {
6834 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6835#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6836 union
6837 {
6838 Elf64_Xword map;
6839 unsigned char buf[sizeof (Elf64_Xword)];
6840 }
6841 rld_map;
a738da3a
MF
6842#endif
6843#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6844 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6845 {
6846 if (linux_read_memory (dyn->d_un.d_val,
6847 rld_map.buf, sizeof (rld_map.buf)) == 0)
6848 return rld_map.map;
6849 else
6850 break;
6851 }
75f62ce7 6852#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6853#ifdef DT_MIPS_RLD_MAP_REL
6854 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6855 {
6856 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6857 rld_map.buf, sizeof (rld_map.buf)) == 0)
6858 return rld_map.map;
6859 else
6860 break;
6861 }
6862#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6863
367ba2c2
MR
6864 if (dyn->d_tag == DT_DEBUG && map == -1)
6865 map = dyn->d_un.d_val;
2268b414
JK
6866
6867 if (dyn->d_tag == DT_NULL)
6868 break;
6869 }
6870 else
6871 {
6872 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6873#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6874 union
6875 {
6876 Elf32_Word map;
6877 unsigned char buf[sizeof (Elf32_Word)];
6878 }
6879 rld_map;
a738da3a
MF
6880#endif
6881#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6882 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6883 {
6884 if (linux_read_memory (dyn->d_un.d_val,
6885 rld_map.buf, sizeof (rld_map.buf)) == 0)
6886 return rld_map.map;
6887 else
6888 break;
6889 }
75f62ce7 6890#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6891#ifdef DT_MIPS_RLD_MAP_REL
6892 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6893 {
6894 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6895 rld_map.buf, sizeof (rld_map.buf)) == 0)
6896 return rld_map.map;
6897 else
6898 break;
6899 }
6900#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6901
367ba2c2
MR
6902 if (dyn->d_tag == DT_DEBUG && map == -1)
6903 map = dyn->d_un.d_val;
2268b414
JK
6904
6905 if (dyn->d_tag == DT_NULL)
6906 break;
6907 }
6908
6909 dynamic_memaddr += dyn_size;
6910 }
6911
367ba2c2 6912 return map;
2268b414
JK
6913}
6914
6915/* Read one pointer from MEMADDR in the inferior. */
6916
6917static int
6918read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6919{
485f1ee4
PA
6920 int ret;
6921
6922 /* Go through a union so this works on either big or little endian
6923 hosts, when the inferior's pointer size is smaller than the size
6924 of CORE_ADDR. It is assumed the inferior's endianness is the
6925 same of the superior's. */
6926 union
6927 {
6928 CORE_ADDR core_addr;
6929 unsigned int ui;
6930 unsigned char uc;
6931 } addr;
6932
6933 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6934 if (ret == 0)
6935 {
6936 if (ptr_size == sizeof (CORE_ADDR))
6937 *ptr = addr.core_addr;
6938 else if (ptr_size == sizeof (unsigned int))
6939 *ptr = addr.ui;
6940 else
6941 gdb_assert_not_reached ("unhandled pointer size");
6942 }
6943 return ret;
2268b414
JK
6944}
6945
6946struct link_map_offsets
6947 {
6948 /* Offset and size of r_debug.r_version. */
6949 int r_version_offset;
6950
6951 /* Offset and size of r_debug.r_map. */
6952 int r_map_offset;
6953
6954 /* Offset to l_addr field in struct link_map. */
6955 int l_addr_offset;
6956
6957 /* Offset to l_name field in struct link_map. */
6958 int l_name_offset;
6959
6960 /* Offset to l_ld field in struct link_map. */
6961 int l_ld_offset;
6962
6963 /* Offset to l_next field in struct link_map. */
6964 int l_next_offset;
6965
6966 /* Offset to l_prev field in struct link_map. */
6967 int l_prev_offset;
6968 };
6969
fb723180 6970/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
6971
6972static int
6973linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6974 unsigned const char *writebuf,
6975 CORE_ADDR offset, int len)
6976{
6977 char *document;
6978 unsigned document_len;
fe978cb0 6979 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6980 char filename[PATH_MAX];
6981 int pid, is_elf64;
6982
6983 static const struct link_map_offsets lmo_32bit_offsets =
6984 {
6985 0, /* r_version offset. */
6986 4, /* r_debug.r_map offset. */
6987 0, /* l_addr offset in link_map. */
6988 4, /* l_name offset in link_map. */
6989 8, /* l_ld offset in link_map. */
6990 12, /* l_next offset in link_map. */
6991 16 /* l_prev offset in link_map. */
6992 };
6993
6994 static const struct link_map_offsets lmo_64bit_offsets =
6995 {
6996 0, /* r_version offset. */
6997 8, /* r_debug.r_map offset. */
6998 0, /* l_addr offset in link_map. */
6999 8, /* l_name offset in link_map. */
7000 16, /* l_ld offset in link_map. */
7001 24, /* l_next offset in link_map. */
7002 32 /* l_prev offset in link_map. */
7003 };
7004 const struct link_map_offsets *lmo;
214d508e 7005 unsigned int machine;
b1fbec62
GB
7006 int ptr_size;
7007 CORE_ADDR lm_addr = 0, lm_prev = 0;
7008 int allocated = 1024;
7009 char *p;
7010 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7011 int header_done = 0;
2268b414
JK
7012
7013 if (writebuf != NULL)
7014 return -2;
7015 if (readbuf == NULL)
7016 return -1;
7017
0bfdf32f 7018 pid = lwpid_of (current_thread);
2268b414 7019 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 7020 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 7021 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 7022 ptr_size = is_elf64 ? 8 : 4;
2268b414 7023
b1fbec62
GB
7024 while (annex[0] != '\0')
7025 {
7026 const char *sep;
7027 CORE_ADDR *addrp;
7028 int len;
2268b414 7029
b1fbec62
GB
7030 sep = strchr (annex, '=');
7031 if (sep == NULL)
7032 break;
0c5bf5a9 7033
b1fbec62 7034 len = sep - annex;
61012eef 7035 if (len == 5 && startswith (annex, "start"))
b1fbec62 7036 addrp = &lm_addr;
61012eef 7037 else if (len == 4 && startswith (annex, "prev"))
b1fbec62
GB
7038 addrp = &lm_prev;
7039 else
7040 {
7041 annex = strchr (sep, ';');
7042 if (annex == NULL)
7043 break;
7044 annex++;
7045 continue;
7046 }
7047
7048 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 7049 }
b1fbec62
GB
7050
7051 if (lm_addr == 0)
2268b414 7052 {
b1fbec62
GB
7053 int r_version = 0;
7054
7055 if (priv->r_debug == 0)
7056 priv->r_debug = get_r_debug (pid, is_elf64);
7057
7058 /* We failed to find DT_DEBUG. Such situation will not change
7059 for this inferior - do not retry it. Report it to GDB as
7060 E01, see for the reasons at the GDB solib-svr4.c side. */
7061 if (priv->r_debug == (CORE_ADDR) -1)
7062 return -1;
7063
7064 if (priv->r_debug != 0)
2268b414 7065 {
b1fbec62
GB
7066 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7067 (unsigned char *) &r_version,
7068 sizeof (r_version)) != 0
7069 || r_version != 1)
7070 {
7071 warning ("unexpected r_debug version %d", r_version);
7072 }
7073 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7074 &lm_addr, ptr_size) != 0)
7075 {
7076 warning ("unable to read r_map from 0x%lx",
7077 (long) priv->r_debug + lmo->r_map_offset);
7078 }
2268b414 7079 }
b1fbec62 7080 }
2268b414 7081
224c3ddb 7082 document = (char *) xmalloc (allocated);
b1fbec62
GB
7083 strcpy (document, "<library-list-svr4 version=\"1.0\"");
7084 p = document + strlen (document);
7085
7086 while (lm_addr
7087 && read_one_ptr (lm_addr + lmo->l_name_offset,
7088 &l_name, ptr_size) == 0
7089 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7090 &l_addr, ptr_size) == 0
7091 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7092 &l_ld, ptr_size) == 0
7093 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7094 &l_prev, ptr_size) == 0
7095 && read_one_ptr (lm_addr + lmo->l_next_offset,
7096 &l_next, ptr_size) == 0)
7097 {
7098 unsigned char libname[PATH_MAX];
7099
7100 if (lm_prev != l_prev)
2268b414 7101 {
b1fbec62
GB
7102 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7103 (long) lm_prev, (long) l_prev);
7104 break;
2268b414
JK
7105 }
7106
d878444c
JK
7107 /* Ignore the first entry even if it has valid name as the first entry
7108 corresponds to the main executable. The first entry should not be
7109 skipped if the dynamic loader was loaded late by a static executable
7110 (see solib-svr4.c parameter ignore_first). But in such case the main
7111 executable does not have PT_DYNAMIC present and this function already
7112 exited above due to failed get_r_debug. */
7113 if (lm_prev == 0)
2268b414 7114 {
d878444c
JK
7115 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7116 p = p + strlen (p);
7117 }
7118 else
7119 {
7120 /* Not checking for error because reading may stop before
7121 we've got PATH_MAX worth of characters. */
7122 libname[0] = '\0';
7123 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7124 libname[sizeof (libname) - 1] = '\0';
7125 if (libname[0] != '\0')
2268b414 7126 {
d878444c
JK
7127 /* 6x the size for xml_escape_text below. */
7128 size_t len = 6 * strlen ((char *) libname);
7129 char *name;
2268b414 7130
d878444c
JK
7131 if (!header_done)
7132 {
7133 /* Terminate `<library-list-svr4'. */
7134 *p++ = '>';
7135 header_done = 1;
7136 }
2268b414 7137
db1ff28b 7138 while (allocated < p - document + len + 200)
d878444c
JK
7139 {
7140 /* Expand to guarantee sufficient storage. */
7141 uintptr_t document_len = p - document;
2268b414 7142
224c3ddb 7143 document = (char *) xrealloc (document, 2 * allocated);
d878444c
JK
7144 allocated *= 2;
7145 p = document + document_len;
7146 }
7147
7148 name = xml_escape_text ((char *) libname);
7149 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
db1ff28b 7150 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
d878444c
JK
7151 name, (unsigned long) lm_addr,
7152 (unsigned long) l_addr, (unsigned long) l_ld);
7153 free (name);
7154 }
0afae3cf 7155 }
b1fbec62
GB
7156
7157 lm_prev = lm_addr;
7158 lm_addr = l_next;
2268b414
JK
7159 }
7160
b1fbec62
GB
7161 if (!header_done)
7162 {
7163 /* Empty list; terminate `<library-list-svr4'. */
7164 strcpy (p, "/>");
7165 }
7166 else
7167 strcpy (p, "</library-list-svr4>");
7168
2268b414
JK
7169 document_len = strlen (document);
7170 if (offset < document_len)
7171 document_len -= offset;
7172 else
7173 document_len = 0;
7174 if (len > document_len)
7175 len = document_len;
7176
7177 memcpy (readbuf, document + offset, len);
7178 xfree (document);
7179
7180 return len;
7181}
7182
9accd112
MM
7183#ifdef HAVE_LINUX_BTRACE
7184
969c39fb 7185/* See to_disable_btrace target method. */
9accd112 7186
969c39fb
MM
7187static int
7188linux_low_disable_btrace (struct btrace_target_info *tinfo)
7189{
7190 enum btrace_error err;
7191
7192 err = linux_disable_btrace (tinfo);
7193 return (err == BTRACE_ERR_NONE ? 0 : -1);
7194}
7195
bc504a31 7196/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
7197
7198static void
7199linux_low_encode_pt_config (struct buffer *buffer,
7200 const struct btrace_data_pt_config *config)
7201{
7202 buffer_grow_str (buffer, "<pt-config>\n");
7203
7204 switch (config->cpu.vendor)
7205 {
7206 case CV_INTEL:
7207 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7208 "model=\"%u\" stepping=\"%u\"/>\n",
7209 config->cpu.family, config->cpu.model,
7210 config->cpu.stepping);
7211 break;
7212
7213 default:
7214 break;
7215 }
7216
7217 buffer_grow_str (buffer, "</pt-config>\n");
7218}
7219
7220/* Encode a raw buffer. */
7221
7222static void
7223linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7224 unsigned int size)
7225{
7226 if (size == 0)
7227 return;
7228
7229 /* We use hex encoding - see common/rsp-low.h. */
7230 buffer_grow_str (buffer, "<raw>\n");
7231
7232 while (size-- > 0)
7233 {
7234 char elem[2];
7235
7236 elem[0] = tohex ((*data >> 4) & 0xf);
7237 elem[1] = tohex (*data++ & 0xf);
7238
7239 buffer_grow (buffer, elem, 2);
7240 }
7241
7242 buffer_grow_str (buffer, "</raw>\n");
7243}
7244
969c39fb
MM
7245/* See to_read_btrace target method. */
7246
7247static int
9accd112 7248linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
add67df8 7249 enum btrace_read_type type)
9accd112 7250{
734b0e4b 7251 struct btrace_data btrace;
9accd112 7252 struct btrace_block *block;
969c39fb 7253 enum btrace_error err;
9accd112
MM
7254 int i;
7255
734b0e4b
MM
7256 btrace_data_init (&btrace);
7257
969c39fb
MM
7258 err = linux_read_btrace (&btrace, tinfo, type);
7259 if (err != BTRACE_ERR_NONE)
7260 {
7261 if (err == BTRACE_ERR_OVERFLOW)
7262 buffer_grow_str0 (buffer, "E.Overflow.");
7263 else
7264 buffer_grow_str0 (buffer, "E.Generic Error.");
7265
b20a6524 7266 goto err;
969c39fb 7267 }
9accd112 7268
734b0e4b
MM
7269 switch (btrace.format)
7270 {
7271 case BTRACE_FORMAT_NONE:
7272 buffer_grow_str0 (buffer, "E.No Trace.");
b20a6524 7273 goto err;
734b0e4b
MM
7274
7275 case BTRACE_FORMAT_BTS:
7276 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7277 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 7278
734b0e4b
MM
7279 for (i = 0;
7280 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7281 i++)
7282 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7283 paddress (block->begin), paddress (block->end));
9accd112 7284
734b0e4b
MM
7285 buffer_grow_str0 (buffer, "</btrace>\n");
7286 break;
7287
b20a6524
MM
7288 case BTRACE_FORMAT_PT:
7289 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7290 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7291 buffer_grow_str (buffer, "<pt>\n");
7292
7293 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 7294
b20a6524
MM
7295 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7296 btrace.variant.pt.size);
7297
7298 buffer_grow_str (buffer, "</pt>\n");
7299 buffer_grow_str0 (buffer, "</btrace>\n");
7300 break;
7301
7302 default:
7303 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7304 goto err;
734b0e4b 7305 }
969c39fb 7306
734b0e4b 7307 btrace_data_fini (&btrace);
969c39fb 7308 return 0;
b20a6524
MM
7309
7310err:
7311 btrace_data_fini (&btrace);
7312 return -1;
9accd112 7313}
f4abbc16
MM
7314
7315/* See to_btrace_conf target method. */
7316
7317static int
7318linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7319 struct buffer *buffer)
7320{
7321 const struct btrace_config *conf;
7322
7323 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7324 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7325
7326 conf = linux_btrace_conf (tinfo);
7327 if (conf != NULL)
7328 {
7329 switch (conf->format)
7330 {
7331 case BTRACE_FORMAT_NONE:
7332 break;
7333
7334 case BTRACE_FORMAT_BTS:
d33501a5
MM
7335 buffer_xml_printf (buffer, "<bts");
7336 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7337 buffer_xml_printf (buffer, " />\n");
f4abbc16 7338 break;
b20a6524
MM
7339
7340 case BTRACE_FORMAT_PT:
7341 buffer_xml_printf (buffer, "<pt");
7342 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7343 buffer_xml_printf (buffer, "/>\n");
7344 break;
f4abbc16
MM
7345 }
7346 }
7347
7348 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7349 return 0;
7350}
9accd112
MM
7351#endif /* HAVE_LINUX_BTRACE */
7352
7b669087
GB
7353/* See nat/linux-nat.h. */
7354
7355ptid_t
7356current_lwp_ptid (void)
7357{
7358 return ptid_of (current_thread);
7359}
7360
dd373349
AT
7361/* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7362
7363static int
7364linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7365{
7366 if (the_low_target.breakpoint_kind_from_pc != NULL)
7367 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7368 else
1652a986 7369 return default_breakpoint_kind_from_pc (pcptr);
dd373349
AT
7370}
7371
7372/* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7373
7374static const gdb_byte *
7375linux_sw_breakpoint_from_kind (int kind, int *size)
7376{
7377 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7378
7379 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7380}
7381
769ef81f
AT
7382/* Implementation of the target_ops method
7383 "breakpoint_kind_from_current_state". */
7384
7385static int
7386linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7387{
7388 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7389 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7390 else
7391 return linux_breakpoint_kind_from_pc (pcptr);
7392}
7393
276d4552
YQ
7394/* Default implementation of linux_target_ops method "set_pc" for
7395 32-bit pc register which is literally named "pc". */
7396
7397void
7398linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7399{
7400 uint32_t newpc = pc;
7401
7402 supply_register_by_name (regcache, "pc", &newpc);
7403}
7404
7405/* Default implementation of linux_target_ops method "get_pc" for
7406 32-bit pc register which is literally named "pc". */
7407
7408CORE_ADDR
7409linux_get_pc_32bit (struct regcache *regcache)
7410{
7411 uint32_t pc;
7412
7413 collect_register_by_name (regcache, "pc", &pc);
7414 if (debug_threads)
7415 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7416 return pc;
7417}
7418
6f69e520
YQ
7419/* Default implementation of linux_target_ops method "set_pc" for
7420 64-bit pc register which is literally named "pc". */
7421
7422void
7423linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7424{
7425 uint64_t newpc = pc;
7426
7427 supply_register_by_name (regcache, "pc", &newpc);
7428}
7429
7430/* Default implementation of linux_target_ops method "get_pc" for
7431 64-bit pc register which is literally named "pc". */
7432
7433CORE_ADDR
7434linux_get_pc_64bit (struct regcache *regcache)
7435{
7436 uint64_t pc;
7437
7438 collect_register_by_name (regcache, "pc", &pc);
7439 if (debug_threads)
7440 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7441 return pc;
7442}
7443
7444
ce3a066d
DJ
7445static struct target_ops linux_target_ops = {
7446 linux_create_inferior,
ece66d65 7447 linux_post_create_inferior,
ce3a066d
DJ
7448 linux_attach,
7449 linux_kill,
6ad8ae5c 7450 linux_detach,
8336d594 7451 linux_mourn,
444d6139 7452 linux_join,
ce3a066d
DJ
7453 linux_thread_alive,
7454 linux_resume,
7455 linux_wait,
7456 linux_fetch_registers,
7457 linux_store_registers,
90d74c30 7458 linux_prepare_to_access_memory,
0146f85b 7459 linux_done_accessing_memory,
ce3a066d
DJ
7460 linux_read_memory,
7461 linux_write_memory,
2f2893d9 7462 linux_look_up_symbols,
ef57601b 7463 linux_request_interrupt,
aa691b87 7464 linux_read_auxv,
802e8e6d 7465 linux_supports_z_point_type,
d993e290
PA
7466 linux_insert_point,
7467 linux_remove_point,
3e572f71
PA
7468 linux_stopped_by_sw_breakpoint,
7469 linux_supports_stopped_by_sw_breakpoint,
7470 linux_stopped_by_hw_breakpoint,
7471 linux_supports_stopped_by_hw_breakpoint,
70b90b91 7472 linux_supports_hardware_single_step,
e013ee27
OF
7473 linux_stopped_by_watchpoint,
7474 linux_stopped_data_address,
db0dfaa0
LM
7475#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7476 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7477 && defined(PT_TEXT_END_ADDR)
52fb6437 7478 linux_read_offsets,
dae5f5cf
DJ
7479#else
7480 NULL,
7481#endif
7482#ifdef USE_THREAD_DB
7483 thread_db_get_tls_address,
7484#else
7485 NULL,
52fb6437 7486#endif
efcbbd14 7487 linux_qxfer_spu,
59a016f0 7488 hostio_last_error_from_errno,
07e059b5 7489 linux_qxfer_osdata,
4aa995e1 7490 linux_xfer_siginfo,
bd99dc85
PA
7491 linux_supports_non_stop,
7492 linux_async,
7493 linux_start_non_stop,
cdbfd419 7494 linux_supports_multi_process,
89245bc0
DB
7495 linux_supports_fork_events,
7496 linux_supports_vfork_events,
94585166 7497 linux_supports_exec_events,
de0d863e 7498 linux_handle_new_gdb_connection,
cdbfd419 7499#ifdef USE_THREAD_DB
dc146f7c 7500 thread_db_handle_monitor_command,
cdbfd419 7501#else
dc146f7c 7502 NULL,
cdbfd419 7503#endif
d26e3629 7504 linux_common_core_of_thread,
78d85199 7505 linux_read_loadmap,
219f2f23
PA
7506 linux_process_qsupported,
7507 linux_supports_tracepoints,
7508 linux_read_pc,
8336d594
PA
7509 linux_write_pc,
7510 linux_thread_stopped,
7984d532 7511 NULL,
711e434b 7512 linux_pause_all,
7984d532 7513 linux_unpause_all,
fa593d66 7514 linux_stabilize_threads,
6a271cae 7515 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
7516 linux_emit_ops,
7517 linux_supports_disable_randomization,
405f8e94 7518 linux_get_min_fast_tracepoint_insn_len,
2268b414 7519 linux_qxfer_libraries_svr4,
d1feda86 7520 linux_supports_agent,
9accd112
MM
7521#ifdef HAVE_LINUX_BTRACE
7522 linux_supports_btrace,
0568462b 7523 linux_enable_btrace,
969c39fb 7524 linux_low_disable_btrace,
9accd112 7525 linux_low_read_btrace,
f4abbc16 7526 linux_low_btrace_conf,
9accd112
MM
7527#else
7528 NULL,
7529 NULL,
7530 NULL,
7531 NULL,
f4abbc16 7532 NULL,
9accd112 7533#endif
c2d6af84 7534 linux_supports_range_stepping,
e57f1de3 7535 linux_proc_pid_to_exec_file,
14d2069a
GB
7536 linux_mntns_open_cloexec,
7537 linux_mntns_unlink,
7538 linux_mntns_readlink,
dd373349 7539 linux_breakpoint_kind_from_pc,
79efa585
SM
7540 linux_sw_breakpoint_from_kind,
7541 linux_proc_tid_get_name,
7d00775e 7542 linux_breakpoint_kind_from_current_state,
82075af2
JS
7543 linux_supports_software_single_step,
7544 linux_supports_catch_syscall,
ae91f625 7545 linux_get_ipa_tdesc_idx,
ce3a066d
DJ
7546};
7547
3aee8918
PA
7548#ifdef HAVE_LINUX_REGSETS
7549void
7550initialize_regsets_info (struct regsets_info *info)
7551{
7552 for (info->num_regsets = 0;
7553 info->regsets[info->num_regsets].size >= 0;
7554 info->num_regsets++)
7555 ;
3aee8918
PA
7556}
7557#endif
7558
da6d8c04
DJ
7559void
7560initialize_low (void)
7561{
bd99dc85 7562 struct sigaction sigchld_action;
dd373349 7563
bd99dc85 7564 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 7565 set_target_ops (&linux_target_ops);
dd373349 7566
aa7c7447 7567 linux_ptrace_init_warnings ();
bd99dc85
PA
7568
7569 sigchld_action.sa_handler = sigchld_handler;
7570 sigemptyset (&sigchld_action.sa_mask);
7571 sigchld_action.sa_flags = SA_RESTART;
7572 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7573
7574 initialize_low_arch ();
89245bc0
DB
7575
7576 linux_check_ptrace_features ();
da6d8c04 7577}