]>
Commit | Line | Data |
---|---|---|
da6d8c04 | 1 | /* Low level interface to ptrace, for the remote server for GDB. |
545587ee | 2 | Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, |
6aba47ca | 3 | 2006, 2007 Free Software Foundation, Inc. |
da6d8c04 DJ |
4 | |
5 | This file is part of GDB. | |
6 | ||
7 | This program is free software; you can redistribute it and/or modify | |
8 | it under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation; either version 2 of the License, or | |
10 | (at your option) any later version. | |
11 | ||
12 | This program is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with this program; if not, write to the Free Software | |
6f0f660e EZ |
19 | Foundation, Inc., 51 Franklin Street, Fifth Floor, |
20 | Boston, MA 02110-1301, USA. */ | |
da6d8c04 DJ |
21 | |
22 | #include "server.h" | |
58caa3dc | 23 | #include "linux-low.h" |
da6d8c04 | 24 | |
58caa3dc | 25 | #include <sys/wait.h> |
da6d8c04 DJ |
26 | #include <stdio.h> |
27 | #include <sys/param.h> | |
28 | #include <sys/dir.h> | |
29 | #include <sys/ptrace.h> | |
30 | #include <sys/user.h> | |
31 | #include <signal.h> | |
32 | #include <sys/ioctl.h> | |
33 | #include <fcntl.h> | |
d07c63e7 | 34 | #include <string.h> |
0a30fbc4 DJ |
35 | #include <stdlib.h> |
36 | #include <unistd.h> | |
fa6a77dc | 37 | #include <errno.h> |
fd500816 | 38 | #include <sys/syscall.h> |
da6d8c04 | 39 | |
32ca6d61 DJ |
40 | #ifndef PTRACE_GETSIGINFO |
41 | # define PTRACE_GETSIGINFO 0x4202 | |
42 | # define PTRACE_SETSIGINFO 0x4203 | |
43 | #endif | |
44 | ||
0d62e5e8 DJ |
45 | /* ``all_threads'' is keyed by the LWP ID - it should be the thread ID instead, |
46 | however. This requires changing the ID in place when we go from !using_threads | |
47 | to using_threads, immediately. | |
611cb4a5 | 48 | |
0d62e5e8 DJ |
49 | ``all_processes'' is keyed by the process ID - which on Linux is (presently) |
50 | the same as the LWP ID. */ | |
51 | ||
52 | struct inferior_list all_processes; | |
53 | ||
54 | /* FIXME this is a bit of a hack, and could be removed. */ | |
55 | int stopping_threads; | |
56 | ||
57 | /* FIXME make into a target method? */ | |
58 | int using_threads; | |
59 | ||
60 | static void linux_resume_one_process (struct inferior_list_entry *entry, | |
32ca6d61 | 61 | int step, int signal, siginfo_t *info); |
64386c31 | 62 | static void linux_resume (struct thread_resume *resume_info); |
0d62e5e8 DJ |
63 | static void stop_all_processes (void); |
64 | static int linux_wait_for_event (struct thread_info *child); | |
65 | ||
66 | struct pending_signals | |
67 | { | |
68 | int signal; | |
32ca6d61 | 69 | siginfo_t info; |
0d62e5e8 DJ |
70 | struct pending_signals *prev; |
71 | }; | |
611cb4a5 | 72 | |
d844cde6 | 73 | #define PTRACE_ARG3_TYPE long |
c6ecbae5 | 74 | #define PTRACE_XFER_TYPE long |
da6d8c04 | 75 | |
58caa3dc DJ |
76 | #ifdef HAVE_LINUX_REGSETS |
77 | static int use_regsets_p = 1; | |
78 | #endif | |
79 | ||
0d62e5e8 DJ |
80 | int debug_threads = 0; |
81 | ||
82 | #define pid_of(proc) ((proc)->head.id) | |
83 | ||
84 | /* FIXME: Delete eventually. */ | |
85 | #define inferior_pid (pid_of (get_thread_process (current_inferior))) | |
86 | ||
87 | /* This function should only be called if the process got a SIGTRAP. | |
88 | The SIGTRAP could mean several things. | |
89 | ||
90 | On i386, where decr_pc_after_break is non-zero: | |
91 | If we were single-stepping this process using PTRACE_SINGLESTEP, | |
92 | we will get only the one SIGTRAP (even if the instruction we | |
93 | stepped over was a breakpoint). The value of $eip will be the | |
94 | next instruction. | |
95 | If we continue the process using PTRACE_CONT, we will get a | |
96 | SIGTRAP when we hit a breakpoint. The value of $eip will be | |
97 | the instruction after the breakpoint (i.e. needs to be | |
98 | decremented). If we report the SIGTRAP to GDB, we must also | |
99 | report the undecremented PC. If we cancel the SIGTRAP, we | |
100 | must resume at the decremented PC. | |
101 | ||
102 | (Presumably, not yet tested) On a non-decr_pc_after_break machine | |
103 | with hardware or kernel single-step: | |
104 | If we single-step over a breakpoint instruction, our PC will | |
105 | point at the following instruction. If we continue and hit a | |
106 | breakpoint instruction, our PC will point at the breakpoint | |
107 | instruction. */ | |
108 | ||
109 | static CORE_ADDR | |
110 | get_stop_pc (void) | |
111 | { | |
112 | CORE_ADDR stop_pc = (*the_low_target.get_pc) (); | |
113 | ||
114 | if (get_thread_process (current_inferior)->stepping) | |
115 | return stop_pc; | |
116 | else | |
117 | return stop_pc - the_low_target.decr_pc_after_break; | |
118 | } | |
ce3a066d | 119 | |
0d62e5e8 | 120 | static void * |
a1928bad | 121 | add_process (unsigned long pid) |
611cb4a5 | 122 | { |
0d62e5e8 DJ |
123 | struct process_info *process; |
124 | ||
125 | process = (struct process_info *) malloc (sizeof (*process)); | |
126 | memset (process, 0, sizeof (*process)); | |
127 | ||
128 | process->head.id = pid; | |
129 | ||
130 | /* Default to tid == lwpid == pid. */ | |
131 | process->tid = pid; | |
132 | process->lwpid = pid; | |
133 | ||
134 | add_inferior_to_list (&all_processes, &process->head); | |
135 | ||
136 | return process; | |
137 | } | |
611cb4a5 | 138 | |
da6d8c04 DJ |
139 | /* Start an inferior process and returns its pid. |
140 | ALLARGS is a vector of program-name and args. */ | |
141 | ||
ce3a066d DJ |
142 | static int |
143 | linux_create_inferior (char *program, char **allargs) | |
da6d8c04 | 144 | { |
0d62e5e8 | 145 | void *new_process; |
da6d8c04 DJ |
146 | int pid; |
147 | ||
52fb6437 NS |
148 | #if defined(__UCLIBC__) && !defined(__UCLIBC_HAS_MMU__) |
149 | pid = vfork (); | |
150 | #else | |
da6d8c04 | 151 | pid = fork (); |
52fb6437 | 152 | #endif |
da6d8c04 DJ |
153 | if (pid < 0) |
154 | perror_with_name ("fork"); | |
155 | ||
156 | if (pid == 0) | |
157 | { | |
158 | ptrace (PTRACE_TRACEME, 0, 0, 0); | |
159 | ||
254787d4 | 160 | signal (__SIGRTMIN + 1, SIG_DFL); |
0d62e5e8 | 161 | |
a9fa9f7d DJ |
162 | setpgid (0, 0); |
163 | ||
da6d8c04 DJ |
164 | execv (program, allargs); |
165 | ||
166 | fprintf (stderr, "Cannot exec %s: %s.\n", program, | |
d07c63e7 | 167 | strerror (errno)); |
da6d8c04 DJ |
168 | fflush (stderr); |
169 | _exit (0177); | |
170 | } | |
171 | ||
0d62e5e8 | 172 | new_process = add_process (pid); |
a06660f7 | 173 | add_thread (pid, new_process, pid); |
611cb4a5 | 174 | |
a9fa9f7d | 175 | return pid; |
da6d8c04 DJ |
176 | } |
177 | ||
178 | /* Attach to an inferior process. */ | |
179 | ||
0d62e5e8 | 180 | void |
a1928bad | 181 | linux_attach_lwp (unsigned long pid, unsigned long tid) |
da6d8c04 | 182 | { |
0d62e5e8 | 183 | struct process_info *new_process; |
611cb4a5 | 184 | |
da6d8c04 DJ |
185 | if (ptrace (PTRACE_ATTACH, pid, 0, 0) != 0) |
186 | { | |
a1928bad | 187 | fprintf (stderr, "Cannot attach to process %ld: %s (%d)\n", pid, |
43d5792c | 188 | strerror (errno), errno); |
da6d8c04 | 189 | fflush (stderr); |
0d62e5e8 DJ |
190 | |
191 | /* If we fail to attach to an LWP, just return. */ | |
192 | if (!using_threads) | |
193 | _exit (0177); | |
194 | return; | |
da6d8c04 DJ |
195 | } |
196 | ||
0d62e5e8 | 197 | new_process = (struct process_info *) add_process (pid); |
a06660f7 | 198 | add_thread (tid, new_process, pid); |
0d62e5e8 DJ |
199 | |
200 | /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH | |
201 | brings it to a halt. We should ignore that SIGSTOP and resume the process | |
202 | (unless this is the first process, in which case the flag will be cleared | |
203 | in linux_attach). | |
204 | ||
205 | On the other hand, if we are currently trying to stop all threads, we | |
206 | should treat the new thread as if we had sent it a SIGSTOP. This works | |
207 | because we are guaranteed that add_process added us to the end of the | |
208 | list, and so the new thread has not yet reached wait_for_sigstop (but | |
209 | will). */ | |
210 | if (! stopping_threads) | |
211 | new_process->stop_expected = 1; | |
212 | } | |
213 | ||
214 | int | |
a1928bad | 215 | linux_attach (unsigned long pid) |
0d62e5e8 DJ |
216 | { |
217 | struct process_info *process; | |
218 | ||
219 | linux_attach_lwp (pid, pid); | |
220 | ||
221 | /* Don't ignore the initial SIGSTOP if we just attached to this process. */ | |
222 | process = (struct process_info *) find_inferior_id (&all_processes, pid); | |
223 | process->stop_expected = 0; | |
224 | ||
da6d8c04 DJ |
225 | return 0; |
226 | } | |
227 | ||
228 | /* Kill the inferior process. Make us have no inferior. */ | |
229 | ||
ce3a066d | 230 | static void |
0d62e5e8 | 231 | linux_kill_one_process (struct inferior_list_entry *entry) |
da6d8c04 | 232 | { |
0d62e5e8 DJ |
233 | struct thread_info *thread = (struct thread_info *) entry; |
234 | struct process_info *process = get_thread_process (thread); | |
235 | int wstat; | |
236 | ||
fd500816 DJ |
237 | /* We avoid killing the first thread here, because of a Linux kernel (at |
238 | least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before | |
239 | the children get a chance to be reaped, it will remain a zombie | |
240 | forever. */ | |
241 | if (entry == all_threads.head) | |
242 | return; | |
243 | ||
0d62e5e8 DJ |
244 | do |
245 | { | |
246 | ptrace (PTRACE_KILL, pid_of (process), 0, 0); | |
247 | ||
248 | /* Make sure it died. The loop is most likely unnecessary. */ | |
249 | wstat = linux_wait_for_event (thread); | |
250 | } while (WIFSTOPPED (wstat)); | |
da6d8c04 DJ |
251 | } |
252 | ||
0d62e5e8 DJ |
253 | static void |
254 | linux_kill (void) | |
255 | { | |
fd500816 | 256 | struct thread_info *thread = (struct thread_info *) all_threads.head; |
9d606399 | 257 | struct process_info *process; |
fd500816 DJ |
258 | int wstat; |
259 | ||
9d606399 DJ |
260 | if (thread == NULL) |
261 | return; | |
262 | ||
0d62e5e8 | 263 | for_each_inferior (&all_threads, linux_kill_one_process); |
fd500816 DJ |
264 | |
265 | /* See the comment in linux_kill_one_process. We did not kill the first | |
266 | thread in the list, so do so now. */ | |
9d606399 | 267 | process = get_thread_process (thread); |
fd500816 DJ |
268 | do |
269 | { | |
270 | ptrace (PTRACE_KILL, pid_of (process), 0, 0); | |
271 | ||
272 | /* Make sure it died. The loop is most likely unnecessary. */ | |
273 | wstat = linux_wait_for_event (thread); | |
274 | } while (WIFSTOPPED (wstat)); | |
0d62e5e8 DJ |
275 | } |
276 | ||
6ad8ae5c DJ |
277 | static void |
278 | linux_detach_one_process (struct inferior_list_entry *entry) | |
279 | { | |
280 | struct thread_info *thread = (struct thread_info *) entry; | |
281 | struct process_info *process = get_thread_process (thread); | |
282 | ||
283 | ptrace (PTRACE_DETACH, pid_of (process), 0, 0); | |
284 | } | |
285 | ||
286 | static void | |
287 | linux_detach (void) | |
288 | { | |
289 | for_each_inferior (&all_threads, linux_detach_one_process); | |
290 | } | |
291 | ||
292 | /* Return nonzero if the given thread is still alive. */ | |
0d62e5e8 | 293 | static int |
a1928bad | 294 | linux_thread_alive (unsigned long tid) |
0d62e5e8 DJ |
295 | { |
296 | if (find_inferior_id (&all_threads, tid) != NULL) | |
297 | return 1; | |
298 | else | |
299 | return 0; | |
300 | } | |
301 | ||
302 | /* Return nonzero if this process stopped at a breakpoint which | |
303 | no longer appears to be inserted. Also adjust the PC | |
304 | appropriately to resume where the breakpoint used to be. */ | |
ce3a066d | 305 | static int |
0d62e5e8 | 306 | check_removed_breakpoint (struct process_info *event_child) |
da6d8c04 | 307 | { |
0d62e5e8 DJ |
308 | CORE_ADDR stop_pc; |
309 | struct thread_info *saved_inferior; | |
310 | ||
311 | if (event_child->pending_is_breakpoint == 0) | |
312 | return 0; | |
313 | ||
314 | if (debug_threads) | |
315 | fprintf (stderr, "Checking for breakpoint.\n"); | |
316 | ||
317 | saved_inferior = current_inferior; | |
318 | current_inferior = get_process_thread (event_child); | |
319 | ||
320 | stop_pc = get_stop_pc (); | |
321 | ||
322 | /* If the PC has changed since we stopped, then we shouldn't do | |
323 | anything. This happens if, for instance, GDB handled the | |
324 | decr_pc_after_break subtraction itself. */ | |
325 | if (stop_pc != event_child->pending_stop_pc) | |
326 | { | |
327 | if (debug_threads) | |
328 | fprintf (stderr, "Ignoring, PC was changed.\n"); | |
329 | ||
330 | event_child->pending_is_breakpoint = 0; | |
331 | current_inferior = saved_inferior; | |
332 | return 0; | |
333 | } | |
334 | ||
335 | /* If the breakpoint is still there, we will report hitting it. */ | |
336 | if ((*the_low_target.breakpoint_at) (stop_pc)) | |
337 | { | |
338 | if (debug_threads) | |
339 | fprintf (stderr, "Ignoring, breakpoint is still present.\n"); | |
340 | current_inferior = saved_inferior; | |
341 | return 0; | |
342 | } | |
343 | ||
344 | if (debug_threads) | |
345 | fprintf (stderr, "Removed breakpoint.\n"); | |
346 | ||
347 | /* For decr_pc_after_break targets, here is where we perform the | |
348 | decrement. We go immediately from this function to resuming, | |
349 | and can not safely call get_stop_pc () again. */ | |
350 | if (the_low_target.set_pc != NULL) | |
351 | (*the_low_target.set_pc) (stop_pc); | |
352 | ||
353 | /* We consumed the pending SIGTRAP. */ | |
5544ad89 | 354 | event_child->pending_is_breakpoint = 0; |
0d62e5e8 DJ |
355 | event_child->status_pending_p = 0; |
356 | event_child->status_pending = 0; | |
357 | ||
358 | current_inferior = saved_inferior; | |
da6d8c04 DJ |
359 | return 1; |
360 | } | |
361 | ||
0d62e5e8 DJ |
362 | /* Return 1 if this process has an interesting status pending. This function |
363 | may silently resume an inferior process. */ | |
611cb4a5 | 364 | static int |
0d62e5e8 DJ |
365 | status_pending_p (struct inferior_list_entry *entry, void *dummy) |
366 | { | |
367 | struct process_info *process = (struct process_info *) entry; | |
368 | ||
369 | if (process->status_pending_p) | |
370 | if (check_removed_breakpoint (process)) | |
371 | { | |
372 | /* This thread was stopped at a breakpoint, and the breakpoint | |
373 | is now gone. We were told to continue (or step...) all threads, | |
374 | so GDB isn't trying to single-step past this breakpoint. | |
375 | So instead of reporting the old SIGTRAP, pretend we got to | |
376 | the breakpoint just after it was removed instead of just | |
377 | before; resume the process. */ | |
32ca6d61 | 378 | linux_resume_one_process (&process->head, 0, 0, NULL); |
0d62e5e8 DJ |
379 | return 0; |
380 | } | |
381 | ||
382 | return process->status_pending_p; | |
383 | } | |
384 | ||
385 | static void | |
386 | linux_wait_for_process (struct process_info **childp, int *wstatp) | |
611cb4a5 | 387 | { |
0d62e5e8 DJ |
388 | int ret; |
389 | int to_wait_for = -1; | |
390 | ||
391 | if (*childp != NULL) | |
392 | to_wait_for = (*childp)->lwpid; | |
611cb4a5 DJ |
393 | |
394 | while (1) | |
395 | { | |
0d62e5e8 DJ |
396 | ret = waitpid (to_wait_for, wstatp, WNOHANG); |
397 | ||
398 | if (ret == -1) | |
399 | { | |
400 | if (errno != ECHILD) | |
401 | perror_with_name ("waitpid"); | |
402 | } | |
403 | else if (ret > 0) | |
404 | break; | |
405 | ||
406 | ret = waitpid (to_wait_for, wstatp, WNOHANG | __WCLONE); | |
407 | ||
408 | if (ret == -1) | |
409 | { | |
410 | if (errno != ECHILD) | |
411 | perror_with_name ("waitpid (WCLONE)"); | |
412 | } | |
413 | else if (ret > 0) | |
414 | break; | |
415 | ||
416 | usleep (1000); | |
417 | } | |
418 | ||
419 | if (debug_threads | |
420 | && (!WIFSTOPPED (*wstatp) | |
421 | || (WSTOPSIG (*wstatp) != 32 | |
422 | && WSTOPSIG (*wstatp) != 33))) | |
423 | fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp); | |
424 | ||
425 | if (to_wait_for == -1) | |
426 | *childp = (struct process_info *) find_inferior_id (&all_processes, ret); | |
427 | ||
428 | (*childp)->stopped = 1; | |
429 | (*childp)->pending_is_breakpoint = 0; | |
430 | ||
32ca6d61 DJ |
431 | (*childp)->last_status = *wstatp; |
432 | ||
0d62e5e8 DJ |
433 | if (debug_threads |
434 | && WIFSTOPPED (*wstatp)) | |
435 | { | |
436 | current_inferior = (struct thread_info *) | |
437 | find_inferior_id (&all_threads, (*childp)->tid); | |
438 | /* For testing only; i386_stop_pc prints out a diagnostic. */ | |
439 | if (the_low_target.get_pc != NULL) | |
440 | get_stop_pc (); | |
441 | } | |
442 | } | |
611cb4a5 | 443 | |
0d62e5e8 DJ |
444 | static int |
445 | linux_wait_for_event (struct thread_info *child) | |
446 | { | |
447 | CORE_ADDR stop_pc; | |
448 | struct process_info *event_child; | |
449 | int wstat; | |
450 | ||
451 | /* Check for a process with a pending status. */ | |
452 | /* It is possible that the user changed the pending task's registers since | |
453 | it stopped. We correctly handle the change of PC if we hit a breakpoint | |
e5379b03 | 454 | (in check_removed_breakpoint); signals should be reported anyway. */ |
0d62e5e8 DJ |
455 | if (child == NULL) |
456 | { | |
457 | event_child = (struct process_info *) | |
458 | find_inferior (&all_processes, status_pending_p, NULL); | |
459 | if (debug_threads && event_child) | |
a1928bad | 460 | fprintf (stderr, "Got a pending child %ld\n", event_child->lwpid); |
0d62e5e8 DJ |
461 | } |
462 | else | |
463 | { | |
464 | event_child = get_thread_process (child); | |
465 | if (event_child->status_pending_p | |
466 | && check_removed_breakpoint (event_child)) | |
467 | event_child = NULL; | |
468 | } | |
611cb4a5 | 469 | |
0d62e5e8 DJ |
470 | if (event_child != NULL) |
471 | { | |
472 | if (event_child->status_pending_p) | |
611cb4a5 | 473 | { |
0d62e5e8 | 474 | if (debug_threads) |
a1928bad | 475 | fprintf (stderr, "Got an event from pending child %ld (%04x)\n", |
0d62e5e8 DJ |
476 | event_child->lwpid, event_child->status_pending); |
477 | wstat = event_child->status_pending; | |
478 | event_child->status_pending_p = 0; | |
479 | event_child->status_pending = 0; | |
480 | current_inferior = get_process_thread (event_child); | |
481 | return wstat; | |
482 | } | |
483 | } | |
484 | ||
485 | /* We only enter this loop if no process has a pending wait status. Thus | |
486 | any action taken in response to a wait status inside this loop is | |
487 | responding as soon as we detect the status, not after any pending | |
488 | events. */ | |
489 | while (1) | |
490 | { | |
491 | if (child == NULL) | |
492 | event_child = NULL; | |
493 | else | |
494 | event_child = get_thread_process (child); | |
495 | ||
496 | linux_wait_for_process (&event_child, &wstat); | |
497 | ||
498 | if (event_child == NULL) | |
499 | error ("event from unknown child"); | |
611cb4a5 | 500 | |
0d62e5e8 DJ |
501 | current_inferior = (struct thread_info *) |
502 | find_inferior_id (&all_threads, event_child->tid); | |
503 | ||
89be2091 DJ |
504 | /* Check for thread exit. */ |
505 | if (using_threads && ! WIFSTOPPED (wstat)) | |
0d62e5e8 | 506 | { |
89be2091 DJ |
507 | if (debug_threads) |
508 | fprintf (stderr, "Thread %ld (LWP %ld) exiting\n", | |
509 | event_child->tid, event_child->head.id); | |
510 | ||
511 | /* If the last thread is exiting, just return. */ | |
512 | if (all_threads.head == all_threads.tail) | |
513 | return wstat; | |
514 | ||
515 | dead_thread_notify (event_child->tid); | |
516 | ||
517 | remove_inferior (&all_processes, &event_child->head); | |
518 | free (event_child); | |
519 | remove_thread (current_inferior); | |
520 | current_inferior = (struct thread_info *) all_threads.head; | |
521 | ||
522 | /* If we were waiting for this particular child to do something... | |
523 | well, it did something. */ | |
524 | if (child != NULL) | |
525 | return wstat; | |
526 | ||
527 | /* Wait for a more interesting event. */ | |
528 | continue; | |
529 | } | |
530 | ||
531 | if (using_threads | |
532 | && WIFSTOPPED (wstat) | |
533 | && WSTOPSIG (wstat) == SIGSTOP | |
534 | && event_child->stop_expected) | |
535 | { | |
536 | if (debug_threads) | |
537 | fprintf (stderr, "Expected stop.\n"); | |
538 | event_child->stop_expected = 0; | |
539 | linux_resume_one_process (&event_child->head, | |
540 | event_child->stepping, 0, NULL); | |
541 | continue; | |
542 | } | |
543 | ||
544 | /* If GDB is not interested in this signal, don't stop other | |
545 | threads, and don't report it to GDB. Just resume the | |
546 | inferior right away. We do this for threading-related | |
547 | signals as well as any that GDB specifically requested | |
548 | we ignore. But never ignore SIGSTOP if we sent it | |
549 | ourselves. */ | |
550 | /* FIXME drow/2002-06-09: Get signal numbers from the inferior's | |
551 | thread library? */ | |
552 | if (WIFSTOPPED (wstat) | |
553 | && ((using_threads && (WSTOPSIG (wstat) == __SIGRTMIN | |
554 | || WSTOPSIG (wstat) == __SIGRTMIN + 1)) | |
555 | || (pass_signals[target_signal_from_host (WSTOPSIG (wstat))] | |
556 | && (WSTOPSIG (wstat) != SIGSTOP | |
557 | || !event_child->sigstop_sent)))) | |
558 | { | |
559 | siginfo_t info, *info_p; | |
560 | ||
561 | if (debug_threads) | |
562 | fprintf (stderr, "Ignored signal %d for %ld (LWP %ld).\n", | |
563 | WSTOPSIG (wstat), event_child->tid, | |
564 | event_child->head.id); | |
565 | ||
566 | if (ptrace (PTRACE_GETSIGINFO, event_child->lwpid, 0, &info) == 0) | |
567 | info_p = &info; | |
568 | else | |
569 | info_p = NULL; | |
570 | linux_resume_one_process (&event_child->head, | |
571 | event_child->stepping, | |
572 | WSTOPSIG (wstat), info_p); | |
573 | continue; | |
0d62e5e8 | 574 | } |
611cb4a5 | 575 | |
0d62e5e8 DJ |
576 | /* If this event was not handled above, and is not a SIGTRAP, report |
577 | it. */ | |
578 | if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGTRAP) | |
579 | return wstat; | |
611cb4a5 | 580 | |
0d62e5e8 DJ |
581 | /* If this target does not support breakpoints, we simply report the |
582 | SIGTRAP; it's of no concern to us. */ | |
583 | if (the_low_target.get_pc == NULL) | |
584 | return wstat; | |
585 | ||
586 | stop_pc = get_stop_pc (); | |
587 | ||
588 | /* bp_reinsert will only be set if we were single-stepping. | |
589 | Notice that we will resume the process after hitting | |
590 | a gdbserver breakpoint; single-stepping to/over one | |
591 | is not supported (yet). */ | |
592 | if (event_child->bp_reinsert != 0) | |
593 | { | |
594 | if (debug_threads) | |
595 | fprintf (stderr, "Reinserted breakpoint.\n"); | |
596 | reinsert_breakpoint (event_child->bp_reinsert); | |
597 | event_child->bp_reinsert = 0; | |
598 | ||
599 | /* Clear the single-stepping flag and SIGTRAP as we resume. */ | |
32ca6d61 | 600 | linux_resume_one_process (&event_child->head, 0, 0, NULL); |
0d62e5e8 DJ |
601 | continue; |
602 | } | |
603 | ||
604 | if (debug_threads) | |
605 | fprintf (stderr, "Hit a (non-reinsert) breakpoint.\n"); | |
606 | ||
607 | if (check_breakpoints (stop_pc) != 0) | |
608 | { | |
609 | /* We hit one of our own breakpoints. We mark it as a pending | |
e5379b03 | 610 | breakpoint, so that check_removed_breakpoint () will do the PC |
0d62e5e8 DJ |
611 | adjustment for us at the appropriate time. */ |
612 | event_child->pending_is_breakpoint = 1; | |
613 | event_child->pending_stop_pc = stop_pc; | |
614 | ||
615 | /* Now we need to put the breakpoint back. We continue in the event | |
616 | loop instead of simply replacing the breakpoint right away, | |
617 | in order to not lose signals sent to the thread that hit the | |
618 | breakpoint. Unfortunately this increases the window where another | |
619 | thread could sneak past the removed breakpoint. For the current | |
620 | use of server-side breakpoints (thread creation) this is | |
621 | acceptable; but it needs to be considered before this breakpoint | |
622 | mechanism can be used in more general ways. For some breakpoints | |
623 | it may be necessary to stop all other threads, but that should | |
624 | be avoided where possible. | |
625 | ||
626 | If breakpoint_reinsert_addr is NULL, that means that we can | |
627 | use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint, | |
628 | mark it for reinsertion, and single-step. | |
629 | ||
630 | Otherwise, call the target function to figure out where we need | |
631 | our temporary breakpoint, create it, and continue executing this | |
632 | process. */ | |
633 | if (the_low_target.breakpoint_reinsert_addr == NULL) | |
634 | { | |
635 | event_child->bp_reinsert = stop_pc; | |
636 | uninsert_breakpoint (stop_pc); | |
32ca6d61 | 637 | linux_resume_one_process (&event_child->head, 1, 0, NULL); |
0d62e5e8 DJ |
638 | } |
639 | else | |
640 | { | |
641 | reinsert_breakpoint_by_bp | |
642 | (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ()); | |
32ca6d61 | 643 | linux_resume_one_process (&event_child->head, 0, 0, NULL); |
611cb4a5 | 644 | } |
0d62e5e8 DJ |
645 | |
646 | continue; | |
647 | } | |
648 | ||
649 | /* If we were single-stepping, we definitely want to report the | |
650 | SIGTRAP. The single-step operation has completed, so also | |
aa691b87 | 651 | clear the stepping flag; in general this does not matter, |
0d62e5e8 DJ |
652 | because the SIGTRAP will be reported to the client, which |
653 | will give us a new action for this thread, but clear it for | |
654 | consistency anyway. It's safe to clear the stepping flag | |
655 | because the only consumer of get_stop_pc () after this point | |
e5379b03 | 656 | is check_removed_breakpoint, and pending_is_breakpoint is not |
0d62e5e8 DJ |
657 | set. It might be wiser to use a step_completed flag instead. */ |
658 | if (event_child->stepping) | |
659 | { | |
660 | event_child->stepping = 0; | |
661 | return wstat; | |
662 | } | |
663 | ||
664 | /* A SIGTRAP that we can't explain. It may have been a breakpoint. | |
665 | Check if it is a breakpoint, and if so mark the process information | |
666 | accordingly. This will handle both the necessary fiddling with the | |
667 | PC on decr_pc_after_break targets and suppressing extra threads | |
668 | hitting a breakpoint if two hit it at once and then GDB removes it | |
669 | after the first is reported. Arguably it would be better to report | |
670 | multiple threads hitting breakpoints simultaneously, but the current | |
671 | remote protocol does not allow this. */ | |
672 | if ((*the_low_target.breakpoint_at) (stop_pc)) | |
673 | { | |
674 | event_child->pending_is_breakpoint = 1; | |
675 | event_child->pending_stop_pc = stop_pc; | |
611cb4a5 DJ |
676 | } |
677 | ||
678 | return wstat; | |
679 | } | |
0d62e5e8 | 680 | |
611cb4a5 DJ |
681 | /* NOTREACHED */ |
682 | return 0; | |
683 | } | |
684 | ||
0d62e5e8 | 685 | /* Wait for process, returns status. */ |
da6d8c04 | 686 | |
ce3a066d DJ |
687 | static unsigned char |
688 | linux_wait (char *status) | |
da6d8c04 | 689 | { |
e5f1222d | 690 | int w; |
0d62e5e8 DJ |
691 | struct thread_info *child = NULL; |
692 | ||
693 | retry: | |
694 | /* If we were only supposed to resume one thread, only wait for | |
695 | that thread - if it's still alive. If it died, however - which | |
696 | can happen if we're coming from the thread death case below - | |
697 | then we need to make sure we restart the other threads. We could | |
698 | pick a thread at random or restart all; restarting all is less | |
699 | arbitrary. */ | |
d592fa2f | 700 | if (cont_thread != 0 && cont_thread != -1) |
0d62e5e8 DJ |
701 | { |
702 | child = (struct thread_info *) find_inferior_id (&all_threads, | |
703 | cont_thread); | |
704 | ||
705 | /* No stepping, no signal - unless one is pending already, of course. */ | |
706 | if (child == NULL) | |
64386c31 DJ |
707 | { |
708 | struct thread_resume resume_info; | |
709 | resume_info.thread = -1; | |
710 | resume_info.step = resume_info.sig = resume_info.leave_stopped = 0; | |
711 | linux_resume (&resume_info); | |
712 | } | |
0d62e5e8 | 713 | } |
da6d8c04 DJ |
714 | |
715 | enable_async_io (); | |
62ea82f5 | 716 | unblock_async_io (); |
0d62e5e8 DJ |
717 | w = linux_wait_for_event (child); |
718 | stop_all_processes (); | |
da6d8c04 | 719 | disable_async_io (); |
da6d8c04 | 720 | |
0d62e5e8 DJ |
721 | /* If we are waiting for a particular child, and it exited, |
722 | linux_wait_for_event will return its exit status. Similarly if | |
723 | the last child exited. If this is not the last child, however, | |
724 | do not report it as exited until there is a 'thread exited' response | |
725 | available in the remote protocol. Instead, just wait for another event. | |
726 | This should be safe, because if the thread crashed we will already | |
727 | have reported the termination signal to GDB; that should stop any | |
728 | in-progress stepping operations, etc. | |
729 | ||
730 | Report the exit status of the last thread to exit. This matches | |
731 | LinuxThreads' behavior. */ | |
732 | ||
733 | if (all_threads.head == all_threads.tail) | |
da6d8c04 | 734 | { |
0d62e5e8 DJ |
735 | if (WIFEXITED (w)) |
736 | { | |
737 | fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w)); | |
738 | *status = 'W'; | |
739 | clear_inferiors (); | |
075b3282 DJ |
740 | free (all_processes.head); |
741 | all_processes.head = all_processes.tail = NULL; | |
b80864fb | 742 | return WEXITSTATUS (w); |
0d62e5e8 DJ |
743 | } |
744 | else if (!WIFSTOPPED (w)) | |
745 | { | |
746 | fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w)); | |
0d62e5e8 | 747 | *status = 'X'; |
075b3282 DJ |
748 | clear_inferiors (); |
749 | free (all_processes.head); | |
750 | all_processes.head = all_processes.tail = NULL; | |
b80864fb | 751 | return target_signal_from_host (WTERMSIG (w)); |
0d62e5e8 | 752 | } |
da6d8c04 | 753 | } |
0d62e5e8 | 754 | else |
da6d8c04 | 755 | { |
0d62e5e8 DJ |
756 | if (!WIFSTOPPED (w)) |
757 | goto retry; | |
da6d8c04 DJ |
758 | } |
759 | ||
da6d8c04 | 760 | *status = 'T'; |
b80864fb | 761 | return target_signal_from_host (WSTOPSIG (w)); |
da6d8c04 DJ |
762 | } |
763 | ||
fd500816 DJ |
764 | /* Send a signal to an LWP. For LinuxThreads, kill is enough; however, if |
765 | thread groups are in use, we need to use tkill. */ | |
766 | ||
767 | static int | |
a1928bad | 768 | kill_lwp (unsigned long lwpid, int signo) |
fd500816 DJ |
769 | { |
770 | static int tkill_failed; | |
771 | ||
772 | errno = 0; | |
773 | ||
774 | #ifdef SYS_tkill | |
775 | if (!tkill_failed) | |
776 | { | |
777 | int ret = syscall (SYS_tkill, lwpid, signo); | |
778 | if (errno != ENOSYS) | |
779 | return ret; | |
780 | errno = 0; | |
781 | tkill_failed = 1; | |
782 | } | |
783 | #endif | |
784 | ||
785 | return kill (lwpid, signo); | |
786 | } | |
787 | ||
0d62e5e8 DJ |
788 | static void |
789 | send_sigstop (struct inferior_list_entry *entry) | |
790 | { | |
791 | struct process_info *process = (struct process_info *) entry; | |
792 | ||
793 | if (process->stopped) | |
794 | return; | |
795 | ||
796 | /* If we already have a pending stop signal for this process, don't | |
797 | send another. */ | |
798 | if (process->stop_expected) | |
799 | { | |
800 | process->stop_expected = 0; | |
801 | return; | |
802 | } | |
803 | ||
804 | if (debug_threads) | |
a1928bad | 805 | fprintf (stderr, "Sending sigstop to process %ld\n", process->head.id); |
0d62e5e8 | 806 | |
fd500816 | 807 | kill_lwp (process->head.id, SIGSTOP); |
0d62e5e8 DJ |
808 | process->sigstop_sent = 1; |
809 | } | |
810 | ||
811 | static void | |
812 | wait_for_sigstop (struct inferior_list_entry *entry) | |
813 | { | |
814 | struct process_info *process = (struct process_info *) entry; | |
815 | struct thread_info *saved_inferior, *thread; | |
a1928bad DJ |
816 | int wstat; |
817 | unsigned long saved_tid; | |
0d62e5e8 DJ |
818 | |
819 | if (process->stopped) | |
820 | return; | |
821 | ||
822 | saved_inferior = current_inferior; | |
823 | saved_tid = ((struct inferior_list_entry *) saved_inferior)->id; | |
824 | thread = (struct thread_info *) find_inferior_id (&all_threads, | |
825 | process->tid); | |
826 | wstat = linux_wait_for_event (thread); | |
827 | ||
828 | /* If we stopped with a non-SIGSTOP signal, save it for later | |
829 | and record the pending SIGSTOP. If the process exited, just | |
830 | return. */ | |
831 | if (WIFSTOPPED (wstat) | |
832 | && WSTOPSIG (wstat) != SIGSTOP) | |
833 | { | |
834 | if (debug_threads) | |
835 | fprintf (stderr, "Stopped with non-sigstop signal\n"); | |
836 | process->status_pending_p = 1; | |
837 | process->status_pending = wstat; | |
838 | process->stop_expected = 1; | |
839 | } | |
840 | ||
841 | if (linux_thread_alive (saved_tid)) | |
842 | current_inferior = saved_inferior; | |
843 | else | |
844 | { | |
845 | if (debug_threads) | |
846 | fprintf (stderr, "Previously current thread died.\n"); | |
847 | ||
848 | /* Set a valid thread as current. */ | |
849 | set_desired_inferior (0); | |
850 | } | |
851 | } | |
852 | ||
853 | static void | |
854 | stop_all_processes (void) | |
855 | { | |
856 | stopping_threads = 1; | |
857 | for_each_inferior (&all_processes, send_sigstop); | |
858 | for_each_inferior (&all_processes, wait_for_sigstop); | |
859 | stopping_threads = 0; | |
860 | } | |
861 | ||
da6d8c04 DJ |
862 | /* Resume execution of the inferior process. |
863 | If STEP is nonzero, single-step it. | |
864 | If SIGNAL is nonzero, give it that signal. */ | |
865 | ||
ce3a066d | 866 | static void |
0d62e5e8 | 867 | linux_resume_one_process (struct inferior_list_entry *entry, |
32ca6d61 | 868 | int step, int signal, siginfo_t *info) |
da6d8c04 | 869 | { |
0d62e5e8 DJ |
870 | struct process_info *process = (struct process_info *) entry; |
871 | struct thread_info *saved_inferior; | |
872 | ||
873 | if (process->stopped == 0) | |
874 | return; | |
875 | ||
876 | /* If we have pending signals or status, and a new signal, enqueue the | |
877 | signal. Also enqueue the signal if we are waiting to reinsert a | |
878 | breakpoint; it will be picked up again below. */ | |
879 | if (signal != 0 | |
880 | && (process->status_pending_p || process->pending_signals != NULL | |
881 | || process->bp_reinsert != 0)) | |
882 | { | |
883 | struct pending_signals *p_sig; | |
884 | p_sig = malloc (sizeof (*p_sig)); | |
885 | p_sig->prev = process->pending_signals; | |
886 | p_sig->signal = signal; | |
32ca6d61 DJ |
887 | if (info == NULL) |
888 | memset (&p_sig->info, 0, sizeof (siginfo_t)); | |
889 | else | |
890 | memcpy (&p_sig->info, info, sizeof (siginfo_t)); | |
0d62e5e8 DJ |
891 | process->pending_signals = p_sig; |
892 | } | |
893 | ||
e5379b03 | 894 | if (process->status_pending_p && !check_removed_breakpoint (process)) |
0d62e5e8 DJ |
895 | return; |
896 | ||
897 | saved_inferior = current_inferior; | |
898 | current_inferior = get_process_thread (process); | |
899 | ||
900 | if (debug_threads) | |
a1928bad | 901 | fprintf (stderr, "Resuming process %ld (%s, signal %d, stop %s)\n", inferior_pid, |
0d62e5e8 DJ |
902 | step ? "step" : "continue", signal, |
903 | process->stop_expected ? "expected" : "not expected"); | |
904 | ||
905 | /* This bit needs some thinking about. If we get a signal that | |
906 | we must report while a single-step reinsert is still pending, | |
907 | we often end up resuming the thread. It might be better to | |
908 | (ew) allow a stack of pending events; then we could be sure that | |
909 | the reinsert happened right away and not lose any signals. | |
910 | ||
911 | Making this stack would also shrink the window in which breakpoints are | |
912 | uninserted (see comment in linux_wait_for_process) but not enough for | |
913 | complete correctness, so it won't solve that problem. It may be | |
914 | worthwhile just to solve this one, however. */ | |
915 | if (process->bp_reinsert != 0) | |
916 | { | |
917 | if (debug_threads) | |
918 | fprintf (stderr, " pending reinsert at %08lx", (long)process->bp_reinsert); | |
919 | if (step == 0) | |
920 | fprintf (stderr, "BAD - reinserting but not stepping.\n"); | |
921 | step = 1; | |
922 | ||
923 | /* Postpone any pending signal. It was enqueued above. */ | |
924 | signal = 0; | |
925 | } | |
926 | ||
927 | check_removed_breakpoint (process); | |
928 | ||
aa691b87 | 929 | if (debug_threads && the_low_target.get_pc != NULL) |
0d62e5e8 DJ |
930 | { |
931 | fprintf (stderr, " "); | |
52fb6437 | 932 | (*the_low_target.get_pc) (); |
0d62e5e8 DJ |
933 | } |
934 | ||
935 | /* If we have pending signals, consume one unless we are trying to reinsert | |
936 | a breakpoint. */ | |
937 | if (process->pending_signals != NULL && process->bp_reinsert == 0) | |
938 | { | |
939 | struct pending_signals **p_sig; | |
940 | ||
941 | p_sig = &process->pending_signals; | |
942 | while ((*p_sig)->prev != NULL) | |
943 | p_sig = &(*p_sig)->prev; | |
944 | ||
945 | signal = (*p_sig)->signal; | |
32ca6d61 DJ |
946 | if ((*p_sig)->info.si_signo != 0) |
947 | ptrace (PTRACE_SETSIGINFO, process->lwpid, 0, &(*p_sig)->info); | |
948 | ||
0d62e5e8 DJ |
949 | free (*p_sig); |
950 | *p_sig = NULL; | |
951 | } | |
952 | ||
953 | regcache_invalidate_one ((struct inferior_list_entry *) | |
954 | get_process_thread (process)); | |
da6d8c04 | 955 | errno = 0; |
0d62e5e8 DJ |
956 | process->stopped = 0; |
957 | process->stepping = step; | |
958 | ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, process->lwpid, 0, signal); | |
959 | ||
960 | current_inferior = saved_inferior; | |
da6d8c04 DJ |
961 | if (errno) |
962 | perror_with_name ("ptrace"); | |
963 | } | |
964 | ||
64386c31 DJ |
965 | static struct thread_resume *resume_ptr; |
966 | ||
967 | /* This function is called once per thread. We look up the thread | |
5544ad89 DJ |
968 | in RESUME_PTR, and mark the thread with a pointer to the appropriate |
969 | resume request. | |
970 | ||
971 | This algorithm is O(threads * resume elements), but resume elements | |
972 | is small (and will remain small at least until GDB supports thread | |
973 | suspension). */ | |
0d62e5e8 | 974 | static void |
5544ad89 | 975 | linux_set_resume_request (struct inferior_list_entry *entry) |
0d62e5e8 DJ |
976 | { |
977 | struct process_info *process; | |
64386c31 | 978 | struct thread_info *thread; |
5544ad89 | 979 | int ndx; |
64386c31 DJ |
980 | |
981 | thread = (struct thread_info *) entry; | |
982 | process = get_thread_process (thread); | |
983 | ||
984 | ndx = 0; | |
985 | while (resume_ptr[ndx].thread != -1 && resume_ptr[ndx].thread != entry->id) | |
986 | ndx++; | |
987 | ||
5544ad89 DJ |
988 | process->resume = &resume_ptr[ndx]; |
989 | } | |
990 | ||
991 | /* This function is called once per thread. We check the thread's resume | |
992 | request, which will tell us whether to resume, step, or leave the thread | |
993 | stopped; and what signal, if any, it should be sent. For threads which | |
994 | we aren't explicitly told otherwise, we preserve the stepping flag; this | |
995 | is used for stepping over gdbserver-placed breakpoints. */ | |
996 | ||
997 | static void | |
998 | linux_continue_one_thread (struct inferior_list_entry *entry) | |
999 | { | |
1000 | struct process_info *process; | |
1001 | struct thread_info *thread; | |
1002 | int step; | |
1003 | ||
1004 | thread = (struct thread_info *) entry; | |
1005 | process = get_thread_process (thread); | |
1006 | ||
1007 | if (process->resume->leave_stopped) | |
64386c31 DJ |
1008 | return; |
1009 | ||
5544ad89 DJ |
1010 | if (process->resume->thread == -1) |
1011 | step = process->stepping || process->resume->step; | |
64386c31 | 1012 | else |
5544ad89 DJ |
1013 | step = process->resume->step; |
1014 | ||
32ca6d61 | 1015 | linux_resume_one_process (&process->head, step, process->resume->sig, NULL); |
c6ecbae5 | 1016 | |
5544ad89 DJ |
1017 | process->resume = NULL; |
1018 | } | |
1019 | ||
1020 | /* This function is called once per thread. We check the thread's resume | |
1021 | request, which will tell us whether to resume, step, or leave the thread | |
1022 | stopped; and what signal, if any, it should be sent. We queue any needed | |
1023 | signals, since we won't actually resume. We already have a pending event | |
1024 | to report, so we don't need to preserve any step requests; they should | |
1025 | be re-issued if necessary. */ | |
1026 | ||
1027 | static void | |
1028 | linux_queue_one_thread (struct inferior_list_entry *entry) | |
1029 | { | |
1030 | struct process_info *process; | |
1031 | struct thread_info *thread; | |
1032 | ||
1033 | thread = (struct thread_info *) entry; | |
1034 | process = get_thread_process (thread); | |
1035 | ||
1036 | if (process->resume->leave_stopped) | |
1037 | return; | |
1038 | ||
1039 | /* If we have a new signal, enqueue the signal. */ | |
1040 | if (process->resume->sig != 0) | |
1041 | { | |
1042 | struct pending_signals *p_sig; | |
1043 | p_sig = malloc (sizeof (*p_sig)); | |
1044 | p_sig->prev = process->pending_signals; | |
1045 | p_sig->signal = process->resume->sig; | |
32ca6d61 DJ |
1046 | memset (&p_sig->info, 0, sizeof (siginfo_t)); |
1047 | ||
1048 | /* If this is the same signal we were previously stopped by, | |
1049 | make sure to queue its siginfo. We can ignore the return | |
1050 | value of ptrace; if it fails, we'll skip | |
1051 | PTRACE_SETSIGINFO. */ | |
1052 | if (WIFSTOPPED (process->last_status) | |
1053 | && WSTOPSIG (process->last_status) == process->resume->sig) | |
1054 | ptrace (PTRACE_GETSIGINFO, process->lwpid, 0, &p_sig->info); | |
1055 | ||
5544ad89 DJ |
1056 | process->pending_signals = p_sig; |
1057 | } | |
1058 | ||
1059 | process->resume = NULL; | |
1060 | } | |
1061 | ||
1062 | /* Set DUMMY if this process has an interesting status pending. */ | |
1063 | static int | |
1064 | resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p) | |
1065 | { | |
1066 | struct process_info *process = (struct process_info *) entry; | |
1067 | ||
1068 | /* Processes which will not be resumed are not interesting, because | |
1069 | we might not wait for them next time through linux_wait. */ | |
1070 | if (process->resume->leave_stopped) | |
1071 | return 0; | |
1072 | ||
1073 | /* If this thread has a removed breakpoint, we won't have any | |
1074 | events to report later, so check now. check_removed_breakpoint | |
1075 | may clear status_pending_p. We avoid calling check_removed_breakpoint | |
1076 | for any thread that we are not otherwise going to resume - this | |
1077 | lets us preserve stopped status when two threads hit a breakpoint. | |
1078 | GDB removes the breakpoint to single-step a particular thread | |
1079 | past it, then re-inserts it and resumes all threads. We want | |
1080 | to report the second thread without resuming it in the interim. */ | |
1081 | if (process->status_pending_p) | |
1082 | check_removed_breakpoint (process); | |
1083 | ||
1084 | if (process->status_pending_p) | |
1085 | * (int *) flag_p = 1; | |
1086 | ||
1087 | return 0; | |
0d62e5e8 DJ |
1088 | } |
1089 | ||
1090 | static void | |
64386c31 | 1091 | linux_resume (struct thread_resume *resume_info) |
0d62e5e8 | 1092 | { |
5544ad89 | 1093 | int pending_flag; |
c6ecbae5 | 1094 | |
5544ad89 | 1095 | /* Yes, the use of a global here is rather ugly. */ |
64386c31 | 1096 | resume_ptr = resume_info; |
5544ad89 DJ |
1097 | |
1098 | for_each_inferior (&all_threads, linux_set_resume_request); | |
1099 | ||
1100 | /* If there is a thread which would otherwise be resumed, which | |
1101 | has a pending status, then don't resume any threads - we can just | |
1102 | report the pending status. Make sure to queue any signals | |
1103 | that would otherwise be sent. */ | |
1104 | pending_flag = 0; | |
1105 | find_inferior (&all_processes, resume_status_pending_p, &pending_flag); | |
1106 | ||
1107 | if (debug_threads) | |
1108 | { | |
1109 | if (pending_flag) | |
1110 | fprintf (stderr, "Not resuming, pending status\n"); | |
1111 | else | |
1112 | fprintf (stderr, "Resuming, no pending status\n"); | |
1113 | } | |
1114 | ||
1115 | if (pending_flag) | |
1116 | for_each_inferior (&all_threads, linux_queue_one_thread); | |
1117 | else | |
62ea82f5 DJ |
1118 | { |
1119 | block_async_io (); | |
1120 | enable_async_io (); | |
1121 | for_each_inferior (&all_threads, linux_continue_one_thread); | |
1122 | } | |
0d62e5e8 DJ |
1123 | } |
1124 | ||
1125 | #ifdef HAVE_LINUX_USRREGS | |
da6d8c04 DJ |
1126 | |
1127 | int | |
0a30fbc4 | 1128 | register_addr (int regnum) |
da6d8c04 DJ |
1129 | { |
1130 | int addr; | |
1131 | ||
2ec06d2e | 1132 | if (regnum < 0 || regnum >= the_low_target.num_regs) |
da6d8c04 DJ |
1133 | error ("Invalid register number %d.", regnum); |
1134 | ||
2ec06d2e | 1135 | addr = the_low_target.regmap[regnum]; |
da6d8c04 DJ |
1136 | |
1137 | return addr; | |
1138 | } | |
1139 | ||
58caa3dc | 1140 | /* Fetch one register. */ |
da6d8c04 DJ |
1141 | static void |
1142 | fetch_register (int regno) | |
1143 | { | |
1144 | CORE_ADDR regaddr; | |
48d93c75 | 1145 | int i, size; |
0d62e5e8 | 1146 | char *buf; |
da6d8c04 | 1147 | |
2ec06d2e | 1148 | if (regno >= the_low_target.num_regs) |
0a30fbc4 | 1149 | return; |
2ec06d2e | 1150 | if ((*the_low_target.cannot_fetch_register) (regno)) |
0a30fbc4 | 1151 | return; |
da6d8c04 | 1152 | |
0a30fbc4 DJ |
1153 | regaddr = register_addr (regno); |
1154 | if (regaddr == -1) | |
1155 | return; | |
48d93c75 UW |
1156 | size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1) |
1157 | & - sizeof (PTRACE_XFER_TYPE); | |
1158 | buf = alloca (size); | |
1159 | for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE)) | |
da6d8c04 DJ |
1160 | { |
1161 | errno = 0; | |
0d62e5e8 | 1162 | *(PTRACE_XFER_TYPE *) (buf + i) = |
da6d8c04 DJ |
1163 | ptrace (PTRACE_PEEKUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr, 0); |
1164 | regaddr += sizeof (PTRACE_XFER_TYPE); | |
1165 | if (errno != 0) | |
1166 | { | |
1167 | /* Warning, not error, in case we are attached; sometimes the | |
1168 | kernel doesn't let us at the registers. */ | |
1169 | char *err = strerror (errno); | |
1170 | char *msg = alloca (strlen (err) + 128); | |
1171 | sprintf (msg, "reading register %d: %s", regno, err); | |
1172 | error (msg); | |
1173 | goto error_exit; | |
1174 | } | |
1175 | } | |
5a1f5858 DJ |
1176 | if (the_low_target.left_pad_xfer |
1177 | && register_size (regno) < sizeof (PTRACE_XFER_TYPE)) | |
1178 | supply_register (regno, (buf + sizeof (PTRACE_XFER_TYPE) | |
1179 | - register_size (regno))); | |
1180 | else | |
1181 | supply_register (regno, buf); | |
0d62e5e8 | 1182 | |
da6d8c04 DJ |
1183 | error_exit:; |
1184 | } | |
1185 | ||
1186 | /* Fetch all registers, or just one, from the child process. */ | |
58caa3dc DJ |
1187 | static void |
1188 | usr_fetch_inferior_registers (int regno) | |
da6d8c04 DJ |
1189 | { |
1190 | if (regno == -1 || regno == 0) | |
2ec06d2e | 1191 | for (regno = 0; regno < the_low_target.num_regs; regno++) |
da6d8c04 DJ |
1192 | fetch_register (regno); |
1193 | else | |
1194 | fetch_register (regno); | |
1195 | } | |
1196 | ||
1197 | /* Store our register values back into the inferior. | |
1198 | If REGNO is -1, do this for all registers. | |
1199 | Otherwise, REGNO specifies which register (so we can save time). */ | |
58caa3dc DJ |
1200 | static void |
1201 | usr_store_inferior_registers (int regno) | |
da6d8c04 DJ |
1202 | { |
1203 | CORE_ADDR regaddr; | |
48d93c75 | 1204 | int i, size; |
0d62e5e8 | 1205 | char *buf; |
da6d8c04 DJ |
1206 | |
1207 | if (regno >= 0) | |
1208 | { | |
2ec06d2e | 1209 | if (regno >= the_low_target.num_regs) |
0a30fbc4 DJ |
1210 | return; |
1211 | ||
bc1e36ca | 1212 | if ((*the_low_target.cannot_store_register) (regno) == 1) |
0a30fbc4 DJ |
1213 | return; |
1214 | ||
1215 | regaddr = register_addr (regno); | |
1216 | if (regaddr == -1) | |
da6d8c04 | 1217 | return; |
da6d8c04 | 1218 | errno = 0; |
48d93c75 UW |
1219 | size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1) |
1220 | & - sizeof (PTRACE_XFER_TYPE); | |
1221 | buf = alloca (size); | |
1222 | memset (buf, 0, size); | |
5a1f5858 DJ |
1223 | if (the_low_target.left_pad_xfer |
1224 | && register_size (regno) < sizeof (PTRACE_XFER_TYPE)) | |
1225 | collect_register (regno, (buf + sizeof (PTRACE_XFER_TYPE) | |
1226 | - register_size (regno))); | |
1227 | else | |
1228 | collect_register (regno, buf); | |
48d93c75 | 1229 | for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE)) |
da6d8c04 | 1230 | { |
0a30fbc4 DJ |
1231 | errno = 0; |
1232 | ptrace (PTRACE_POKEUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr, | |
2ff29de4 | 1233 | *(PTRACE_XFER_TYPE *) (buf + i)); |
da6d8c04 DJ |
1234 | if (errno != 0) |
1235 | { | |
bc1e36ca DJ |
1236 | if ((*the_low_target.cannot_store_register) (regno) == 0) |
1237 | { | |
1238 | char *err = strerror (errno); | |
1239 | char *msg = alloca (strlen (err) + 128); | |
1240 | sprintf (msg, "writing register %d: %s", | |
1241 | regno, err); | |
1242 | error (msg); | |
1243 | return; | |
1244 | } | |
da6d8c04 | 1245 | } |
2ff29de4 | 1246 | regaddr += sizeof (PTRACE_XFER_TYPE); |
da6d8c04 | 1247 | } |
da6d8c04 DJ |
1248 | } |
1249 | else | |
2ec06d2e | 1250 | for (regno = 0; regno < the_low_target.num_regs; regno++) |
0d62e5e8 | 1251 | usr_store_inferior_registers (regno); |
da6d8c04 | 1252 | } |
58caa3dc DJ |
1253 | #endif /* HAVE_LINUX_USRREGS */ |
1254 | ||
1255 | ||
1256 | ||
1257 | #ifdef HAVE_LINUX_REGSETS | |
1258 | ||
1259 | static int | |
0d62e5e8 | 1260 | regsets_fetch_inferior_registers () |
58caa3dc DJ |
1261 | { |
1262 | struct regset_info *regset; | |
e9d25b98 | 1263 | int saw_general_regs = 0; |
58caa3dc DJ |
1264 | |
1265 | regset = target_regsets; | |
1266 | ||
1267 | while (regset->size >= 0) | |
1268 | { | |
1269 | void *buf; | |
1270 | int res; | |
1271 | ||
1272 | if (regset->size == 0) | |
1273 | { | |
1274 | regset ++; | |
1275 | continue; | |
1276 | } | |
1277 | ||
1278 | buf = malloc (regset->size); | |
d06f167a | 1279 | res = ptrace (regset->get_request, inferior_pid, 0, buf); |
58caa3dc DJ |
1280 | if (res < 0) |
1281 | { | |
1282 | if (errno == EIO) | |
1283 | { | |
1284 | /* If we get EIO on the first regset, do not try regsets again. | |
1285 | If we get EIO on a later regset, disable that regset. */ | |
1286 | if (regset == target_regsets) | |
1287 | { | |
1288 | use_regsets_p = 0; | |
1289 | return -1; | |
1290 | } | |
1291 | else | |
1292 | { | |
1293 | regset->size = 0; | |
1294 | continue; | |
1295 | } | |
1296 | } | |
1297 | else | |
1298 | { | |
0d62e5e8 | 1299 | char s[256]; |
a1928bad | 1300 | sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%ld", |
0d62e5e8 DJ |
1301 | inferior_pid); |
1302 | perror (s); | |
58caa3dc DJ |
1303 | } |
1304 | } | |
e9d25b98 DJ |
1305 | else if (regset->type == GENERAL_REGS) |
1306 | saw_general_regs = 1; | |
58caa3dc DJ |
1307 | regset->store_function (buf); |
1308 | regset ++; | |
1309 | } | |
e9d25b98 DJ |
1310 | if (saw_general_regs) |
1311 | return 0; | |
1312 | else | |
1313 | return 1; | |
58caa3dc DJ |
1314 | } |
1315 | ||
1316 | static int | |
0d62e5e8 | 1317 | regsets_store_inferior_registers () |
58caa3dc DJ |
1318 | { |
1319 | struct regset_info *regset; | |
e9d25b98 | 1320 | int saw_general_regs = 0; |
58caa3dc DJ |
1321 | |
1322 | regset = target_regsets; | |
1323 | ||
1324 | while (regset->size >= 0) | |
1325 | { | |
1326 | void *buf; | |
1327 | int res; | |
1328 | ||
1329 | if (regset->size == 0) | |
1330 | { | |
1331 | regset ++; | |
1332 | continue; | |
1333 | } | |
1334 | ||
1335 | buf = malloc (regset->size); | |
545587ee DJ |
1336 | |
1337 | /* First fill the buffer with the current register set contents, | |
1338 | in case there are any items in the kernel's regset that are | |
1339 | not in gdbserver's regcache. */ | |
1340 | res = ptrace (regset->get_request, inferior_pid, 0, buf); | |
1341 | ||
1342 | if (res == 0) | |
1343 | { | |
1344 | /* Then overlay our cached registers on that. */ | |
1345 | regset->fill_function (buf); | |
1346 | ||
1347 | /* Only now do we write the register set. */ | |
1348 | res = ptrace (regset->set_request, inferior_pid, 0, buf); | |
1349 | } | |
1350 | ||
58caa3dc DJ |
1351 | if (res < 0) |
1352 | { | |
1353 | if (errno == EIO) | |
1354 | { | |
1355 | /* If we get EIO on the first regset, do not try regsets again. | |
1356 | If we get EIO on a later regset, disable that regset. */ | |
1357 | if (regset == target_regsets) | |
1358 | { | |
1359 | use_regsets_p = 0; | |
1360 | return -1; | |
1361 | } | |
1362 | else | |
1363 | { | |
1364 | regset->size = 0; | |
1365 | continue; | |
1366 | } | |
1367 | } | |
1368 | else | |
1369 | { | |
ce3a066d | 1370 | perror ("Warning: ptrace(regsets_store_inferior_registers)"); |
58caa3dc DJ |
1371 | } |
1372 | } | |
e9d25b98 DJ |
1373 | else if (regset->type == GENERAL_REGS) |
1374 | saw_general_regs = 1; | |
58caa3dc | 1375 | regset ++; |
09ec9b38 | 1376 | free (buf); |
58caa3dc | 1377 | } |
e9d25b98 DJ |
1378 | if (saw_general_regs) |
1379 | return 0; | |
1380 | else | |
1381 | return 1; | |
ce3a066d | 1382 | return 0; |
58caa3dc DJ |
1383 | } |
1384 | ||
1385 | #endif /* HAVE_LINUX_REGSETS */ | |
1386 | ||
1387 | ||
1388 | void | |
ce3a066d | 1389 | linux_fetch_registers (int regno) |
58caa3dc DJ |
1390 | { |
1391 | #ifdef HAVE_LINUX_REGSETS | |
1392 | if (use_regsets_p) | |
1393 | { | |
1394 | if (regsets_fetch_inferior_registers () == 0) | |
1395 | return; | |
1396 | } | |
1397 | #endif | |
1398 | #ifdef HAVE_LINUX_USRREGS | |
1399 | usr_fetch_inferior_registers (regno); | |
1400 | #endif | |
1401 | } | |
1402 | ||
1403 | void | |
ce3a066d | 1404 | linux_store_registers (int regno) |
58caa3dc DJ |
1405 | { |
1406 | #ifdef HAVE_LINUX_REGSETS | |
1407 | if (use_regsets_p) | |
1408 | { | |
1409 | if (regsets_store_inferior_registers () == 0) | |
1410 | return; | |
1411 | } | |
1412 | #endif | |
1413 | #ifdef HAVE_LINUX_USRREGS | |
1414 | usr_store_inferior_registers (regno); | |
1415 | #endif | |
1416 | } | |
1417 | ||
da6d8c04 | 1418 | |
da6d8c04 DJ |
1419 | /* Copy LEN bytes from inferior's memory starting at MEMADDR |
1420 | to debugger memory starting at MYADDR. */ | |
1421 | ||
c3e735a6 | 1422 | static int |
f450004a | 1423 | linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len) |
da6d8c04 DJ |
1424 | { |
1425 | register int i; | |
1426 | /* Round starting address down to longword boundary. */ | |
1427 | register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE); | |
1428 | /* Round ending address up; get number of longwords that makes. */ | |
aa691b87 RM |
1429 | register int count |
1430 | = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) | |
da6d8c04 DJ |
1431 | / sizeof (PTRACE_XFER_TYPE); |
1432 | /* Allocate buffer of that many longwords. */ | |
aa691b87 | 1433 | register PTRACE_XFER_TYPE *buffer |
da6d8c04 DJ |
1434 | = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE)); |
1435 | ||
1436 | /* Read all the longwords */ | |
1437 | for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE)) | |
1438 | { | |
c3e735a6 | 1439 | errno = 0; |
d844cde6 | 1440 | buffer[i] = ptrace (PTRACE_PEEKTEXT, inferior_pid, (PTRACE_ARG3_TYPE) addr, 0); |
c3e735a6 DJ |
1441 | if (errno) |
1442 | return errno; | |
da6d8c04 DJ |
1443 | } |
1444 | ||
1445 | /* Copy appropriate bytes out of the buffer. */ | |
1446 | memcpy (myaddr, (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), len); | |
c3e735a6 DJ |
1447 | |
1448 | return 0; | |
da6d8c04 DJ |
1449 | } |
1450 | ||
1451 | /* Copy LEN bytes of data from debugger memory at MYADDR | |
1452 | to inferior's memory at MEMADDR. | |
1453 | On failure (cannot write the inferior) | |
1454 | returns the value of errno. */ | |
1455 | ||
ce3a066d | 1456 | static int |
f450004a | 1457 | linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len) |
da6d8c04 DJ |
1458 | { |
1459 | register int i; | |
1460 | /* Round starting address down to longword boundary. */ | |
1461 | register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE); | |
1462 | /* Round ending address up; get number of longwords that makes. */ | |
1463 | register int count | |
1464 | = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE); | |
1465 | /* Allocate buffer of that many longwords. */ | |
1466 | register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE)); | |
1467 | extern int errno; | |
1468 | ||
0d62e5e8 DJ |
1469 | if (debug_threads) |
1470 | { | |
1471 | fprintf (stderr, "Writing %02x to %08lx\n", (unsigned)myaddr[0], (long)memaddr); | |
1472 | } | |
1473 | ||
da6d8c04 DJ |
1474 | /* Fill start and end extra bytes of buffer with existing memory data. */ |
1475 | ||
d844cde6 DJ |
1476 | buffer[0] = ptrace (PTRACE_PEEKTEXT, inferior_pid, |
1477 | (PTRACE_ARG3_TYPE) addr, 0); | |
da6d8c04 DJ |
1478 | |
1479 | if (count > 1) | |
1480 | { | |
1481 | buffer[count - 1] | |
1482 | = ptrace (PTRACE_PEEKTEXT, inferior_pid, | |
d844cde6 DJ |
1483 | (PTRACE_ARG3_TYPE) (addr + (count - 1) |
1484 | * sizeof (PTRACE_XFER_TYPE)), | |
1485 | 0); | |
da6d8c04 DJ |
1486 | } |
1487 | ||
1488 | /* Copy data to be written over corresponding part of buffer */ | |
1489 | ||
1490 | memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len); | |
1491 | ||
1492 | /* Write the entire buffer. */ | |
1493 | ||
1494 | for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE)) | |
1495 | { | |
1496 | errno = 0; | |
d844cde6 | 1497 | ptrace (PTRACE_POKETEXT, inferior_pid, (PTRACE_ARG3_TYPE) addr, buffer[i]); |
da6d8c04 DJ |
1498 | if (errno) |
1499 | return errno; | |
1500 | } | |
1501 | ||
1502 | return 0; | |
1503 | } | |
2f2893d9 DJ |
1504 | |
1505 | static void | |
1506 | linux_look_up_symbols (void) | |
1507 | { | |
0d62e5e8 DJ |
1508 | #ifdef USE_THREAD_DB |
1509 | if (using_threads) | |
1510 | return; | |
1511 | ||
1512 | using_threads = thread_db_init (); | |
1513 | #endif | |
1514 | } | |
1515 | ||
e5379b03 DJ |
1516 | static void |
1517 | linux_send_signal (int signum) | |
1518 | { | |
a1928bad | 1519 | extern unsigned long signal_pid; |
e5379b03 | 1520 | |
d592fa2f | 1521 | if (cont_thread != 0 && cont_thread != -1) |
e5379b03 DJ |
1522 | { |
1523 | struct process_info *process; | |
1524 | ||
1525 | process = get_thread_process (current_inferior); | |
fd500816 | 1526 | kill_lwp (process->lwpid, signum); |
e5379b03 DJ |
1527 | } |
1528 | else | |
fd500816 | 1529 | kill_lwp (signal_pid, signum); |
e5379b03 DJ |
1530 | } |
1531 | ||
aa691b87 RM |
1532 | /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET |
1533 | to debugger memory starting at MYADDR. */ | |
1534 | ||
1535 | static int | |
f450004a | 1536 | linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len) |
aa691b87 RM |
1537 | { |
1538 | char filename[PATH_MAX]; | |
1539 | int fd, n; | |
1540 | ||
a1928bad | 1541 | snprintf (filename, sizeof filename, "/proc/%ld/auxv", inferior_pid); |
aa691b87 RM |
1542 | |
1543 | fd = open (filename, O_RDONLY); | |
1544 | if (fd < 0) | |
1545 | return -1; | |
1546 | ||
1547 | if (offset != (CORE_ADDR) 0 | |
1548 | && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset) | |
1549 | n = -1; | |
1550 | else | |
1551 | n = read (fd, myaddr, len); | |
1552 | ||
1553 | close (fd); | |
1554 | ||
1555 | return n; | |
1556 | } | |
1557 | ||
e013ee27 OF |
1558 | /* These watchpoint related wrapper functions simply pass on the function call |
1559 | if the target has registered a corresponding function. */ | |
1560 | ||
1561 | static int | |
1562 | linux_insert_watchpoint (char type, CORE_ADDR addr, int len) | |
1563 | { | |
1564 | if (the_low_target.insert_watchpoint != NULL) | |
1565 | return the_low_target.insert_watchpoint (type, addr, len); | |
1566 | else | |
1567 | /* Unsupported (see target.h). */ | |
1568 | return 1; | |
1569 | } | |
1570 | ||
1571 | static int | |
1572 | linux_remove_watchpoint (char type, CORE_ADDR addr, int len) | |
1573 | { | |
1574 | if (the_low_target.remove_watchpoint != NULL) | |
1575 | return the_low_target.remove_watchpoint (type, addr, len); | |
1576 | else | |
1577 | /* Unsupported (see target.h). */ | |
1578 | return 1; | |
1579 | } | |
1580 | ||
1581 | static int | |
1582 | linux_stopped_by_watchpoint (void) | |
1583 | { | |
1584 | if (the_low_target.stopped_by_watchpoint != NULL) | |
1585 | return the_low_target.stopped_by_watchpoint (); | |
1586 | else | |
1587 | return 0; | |
1588 | } | |
1589 | ||
1590 | static CORE_ADDR | |
1591 | linux_stopped_data_address (void) | |
1592 | { | |
1593 | if (the_low_target.stopped_data_address != NULL) | |
1594 | return the_low_target.stopped_data_address (); | |
1595 | else | |
1596 | return 0; | |
1597 | } | |
1598 | ||
52fb6437 NS |
1599 | #if defined(__UCLIBC__) && !defined(__UCLIBC_HAS_MMU__) |
1600 | #if defined(__mcoldfire__) | |
1601 | /* These should really be defined in the kernel's ptrace.h header. */ | |
1602 | #define PT_TEXT_ADDR 49*4 | |
1603 | #define PT_DATA_ADDR 50*4 | |
1604 | #define PT_TEXT_END_ADDR 51*4 | |
1605 | #endif | |
1606 | ||
1607 | /* Under uClinux, programs are loaded at non-zero offsets, which we need | |
1608 | to tell gdb about. */ | |
1609 | ||
1610 | static int | |
1611 | linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p) | |
1612 | { | |
1613 | #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR) | |
1614 | unsigned long text, text_end, data; | |
1615 | int pid = get_thread_process (current_inferior)->head.id; | |
1616 | ||
1617 | errno = 0; | |
1618 | ||
1619 | text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0); | |
1620 | text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0); | |
1621 | data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0); | |
1622 | ||
1623 | if (errno == 0) | |
1624 | { | |
1625 | /* Both text and data offsets produced at compile-time (and so | |
1626 | used by gdb) are relative to the beginning of the program, | |
1627 | with the data segment immediately following the text segment. | |
1628 | However, the actual runtime layout in memory may put the data | |
1629 | somewhere else, so when we send gdb a data base-address, we | |
1630 | use the real data base address and subtract the compile-time | |
1631 | data base-address from it (which is just the length of the | |
1632 | text segment). BSS immediately follows data in both | |
1633 | cases. */ | |
1634 | *text_p = text; | |
1635 | *data_p = data - (text_end - text); | |
1636 | ||
1637 | return 1; | |
1638 | } | |
1639 | #endif | |
1640 | return 0; | |
1641 | } | |
1642 | #endif | |
1643 | ||
ce3a066d DJ |
1644 | static struct target_ops linux_target_ops = { |
1645 | linux_create_inferior, | |
1646 | linux_attach, | |
1647 | linux_kill, | |
6ad8ae5c | 1648 | linux_detach, |
ce3a066d DJ |
1649 | linux_thread_alive, |
1650 | linux_resume, | |
1651 | linux_wait, | |
1652 | linux_fetch_registers, | |
1653 | linux_store_registers, | |
1654 | linux_read_memory, | |
1655 | linux_write_memory, | |
2f2893d9 | 1656 | linux_look_up_symbols, |
e5379b03 | 1657 | linux_send_signal, |
aa691b87 | 1658 | linux_read_auxv, |
e013ee27 OF |
1659 | linux_insert_watchpoint, |
1660 | linux_remove_watchpoint, | |
1661 | linux_stopped_by_watchpoint, | |
1662 | linux_stopped_data_address, | |
52fb6437 NS |
1663 | #if defined(__UCLIBC__) && !defined(__UCLIBC_HAS_MMU__) |
1664 | linux_read_offsets, | |
dae5f5cf DJ |
1665 | #else |
1666 | NULL, | |
1667 | #endif | |
1668 | #ifdef USE_THREAD_DB | |
1669 | thread_db_get_tls_address, | |
1670 | #else | |
1671 | NULL, | |
52fb6437 | 1672 | #endif |
ce3a066d DJ |
1673 | }; |
1674 | ||
0d62e5e8 DJ |
1675 | static void |
1676 | linux_init_signals () | |
1677 | { | |
1678 | /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads | |
1679 | to find what the cancel signal actually is. */ | |
254787d4 | 1680 | signal (__SIGRTMIN+1, SIG_IGN); |
0d62e5e8 DJ |
1681 | } |
1682 | ||
da6d8c04 DJ |
1683 | void |
1684 | initialize_low (void) | |
1685 | { | |
0d62e5e8 | 1686 | using_threads = 0; |
ce3a066d | 1687 | set_target_ops (&linux_target_ops); |
611cb4a5 DJ |
1688 | set_breakpoint_data (the_low_target.breakpoint, |
1689 | the_low_target.breakpoint_len); | |
0a30fbc4 | 1690 | init_registers (); |
0d62e5e8 | 1691 | linux_init_signals (); |
da6d8c04 | 1692 | } |