]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/mem-break.c
* mem-break.c [HAVE_MALLOC_H]: Include malloc.h.
[thirdparty/binutils-gdb.git] / gdb / gdbserver / mem-break.c
1 /* Memory breakpoint operations for the remote server for GDB.
2 Copyright (C) 2002, 2003, 2005, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4
5 Contributed by MontaVista Software.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23 #if HAVE_MALLOC_H
24 #include <malloc.h>
25 #endif
26
27 const unsigned char *breakpoint_data;
28 int breakpoint_len;
29
30 #define MAX_BREAKPOINT_LEN 8
31
32 /* GDB will never try to install multiple breakpoints at the same
33 address. But, we need to keep track of internal breakpoints too,
34 and so we do need to be able to install multiple breakpoints at the
35 same address transparently. We keep track of two different, and
36 closely related structures. A raw breakpoint, which manages the
37 low level, close to the metal aspect of a breakpoint. It holds the
38 breakpoint address, and a buffer holding a copy of the instructions
39 that would be in memory had not been a breakpoint there (we call
40 that the shadow memory of the breakpoint). We occasionally need to
41 temporarilly uninsert a breakpoint without the client knowing about
42 it (e.g., to step over an internal breakpoint), so we keep an
43 `inserted' state associated with this low level breakpoint
44 structure. There can only be one such object for a given address.
45 Then, we have (a bit higher level) breakpoints. This structure
46 holds a callback to be called whenever a breakpoint is hit, a
47 high-level type, and a link to a low level raw breakpoint. There
48 can be many high-level breakpoints at the same address, and all of
49 them will point to the same raw breakpoint, which is reference
50 counted. */
51
52 /* The low level, physical, raw breakpoint. */
53 struct raw_breakpoint
54 {
55 struct raw_breakpoint *next;
56
57 /* A reference count. Each high level breakpoint referencing this
58 raw breakpoint accounts for one reference. */
59 int refcount;
60
61 /* The breakpoint's insertion address. There can only be one raw
62 breakpoint for a given PC. */
63 CORE_ADDR pc;
64
65 /* The breakpoint's shadow memory. */
66 unsigned char old_data[MAX_BREAKPOINT_LEN];
67
68 /* Non-zero if this breakpoint is currently inserted in the
69 inferior. */
70 int inserted;
71
72 /* Non-zero if this breakpoint is currently disabled because we no
73 longer detect it as inserted. */
74 int shlib_disabled;
75 };
76
77 /* The type of a breakpoint. */
78 enum bkpt_type
79 {
80 /* A GDB breakpoint, requested with a Z0 packet. */
81 gdb_breakpoint,
82
83 /* A basic-software-single-step breakpoint. */
84 reinsert_breakpoint,
85
86 /* Any other breakpoint type that doesn't require specific
87 treatment goes here. E.g., an event breakpoint. */
88 other_breakpoint,
89 };
90
91 /* A high level (in gdbserver's perspective) breakpoint. */
92 struct breakpoint
93 {
94 struct breakpoint *next;
95
96 /* The breakpoint's type. */
97 enum bkpt_type type;
98
99 /* Link to this breakpoint's raw breakpoint. This is always
100 non-NULL. */
101 struct raw_breakpoint *raw;
102
103 /* Function to call when we hit this breakpoint. If it returns 1,
104 the breakpoint shall be deleted; 0 or if this callback is NULL,
105 it will be left inserted. */
106 int (*handler) (CORE_ADDR);
107 };
108
109 static struct raw_breakpoint *
110 find_raw_breakpoint_at (CORE_ADDR where)
111 {
112 struct process_info *proc = current_process ();
113 struct raw_breakpoint *bp;
114
115 for (bp = proc->raw_breakpoints; bp != NULL; bp = bp->next)
116 if (bp->pc == where)
117 return bp;
118
119 return NULL;
120 }
121
122 static struct raw_breakpoint *
123 set_raw_breakpoint_at (CORE_ADDR where)
124 {
125 struct process_info *proc = current_process ();
126 struct raw_breakpoint *bp;
127 int err;
128
129 if (breakpoint_data == NULL)
130 error ("Target does not support breakpoints.");
131
132 bp = find_raw_breakpoint_at (where);
133 if (bp != NULL)
134 {
135 bp->refcount++;
136 return bp;
137 }
138
139 bp = xcalloc (1, sizeof (*bp));
140 bp->pc = where;
141 bp->refcount = 1;
142
143 /* Note that there can be fast tracepoint jumps installed in the
144 same memory range, so to get at the original memory, we need to
145 use read_inferior_memory, which masks those out. */
146 err = read_inferior_memory (where, bp->old_data, breakpoint_len);
147 if (err != 0)
148 {
149 if (debug_threads)
150 fprintf (stderr,
151 "Failed to read shadow memory of"
152 " breakpoint at 0x%s (%s).\n",
153 paddress (where), strerror (err));
154 free (bp);
155 return NULL;
156 }
157
158 err = (*the_target->write_memory) (where, breakpoint_data,
159 breakpoint_len);
160 if (err != 0)
161 {
162 if (debug_threads)
163 fprintf (stderr,
164 "Failed to insert breakpoint at 0x%s (%s).\n",
165 paddress (where), strerror (err));
166 free (bp);
167 return NULL;
168 }
169
170 /* Link the breakpoint in. */
171 bp->inserted = 1;
172 bp->next = proc->raw_breakpoints;
173 proc->raw_breakpoints = bp;
174 return bp;
175 }
176
177 /* Notice that breakpoint traps are always installed on top of fast
178 tracepoint jumps. This is even if the fast tracepoint is installed
179 at a later time compared to when the breakpoint was installed.
180 This means that a stopping breakpoint or tracepoint has higher
181 "priority". In turn, this allows having fast and slow tracepoints
182 (and breakpoints) at the same address behave correctly. */
183
184
185 /* A fast tracepoint jump. */
186
187 struct fast_tracepoint_jump
188 {
189 struct fast_tracepoint_jump *next;
190
191 /* A reference count. GDB can install more than one fast tracepoint
192 at the same address (each with its own action list, for
193 example). */
194 int refcount;
195
196 /* The fast tracepoint's insertion address. There can only be one
197 of these for a given PC. */
198 CORE_ADDR pc;
199
200 /* Non-zero if this fast tracepoint jump is currently inserted in
201 the inferior. */
202 int inserted;
203
204 /* The length of the jump instruction. */
205 int length;
206
207 /* A poor-man's flexible array member, holding both the jump
208 instruction to insert, and a copy of the instruction that would
209 be in memory had not been a jump there (the shadow memory of the
210 tracepoint jump). */
211 unsigned char insn_and_shadow[0];
212 };
213
214 /* Fast tracepoint FP's jump instruction to insert. */
215 #define fast_tracepoint_jump_insn(fp) \
216 ((fp)->insn_and_shadow + 0)
217
218 /* The shadow memory of fast tracepoint jump FP. */
219 #define fast_tracepoint_jump_shadow(fp) \
220 ((fp)->insn_and_shadow + (fp)->length)
221
222
223 /* Return the fast tracepoint jump set at WHERE. */
224
225 static struct fast_tracepoint_jump *
226 find_fast_tracepoint_jump_at (CORE_ADDR where)
227 {
228 struct process_info *proc = current_process ();
229 struct fast_tracepoint_jump *jp;
230
231 for (jp = proc->fast_tracepoint_jumps; jp != NULL; jp = jp->next)
232 if (jp->pc == where)
233 return jp;
234
235 return NULL;
236 }
237
238 int
239 fast_tracepoint_jump_here (CORE_ADDR where)
240 {
241 struct fast_tracepoint_jump *jp = find_fast_tracepoint_jump_at (where);
242
243 return (jp != NULL);
244 }
245
246 int
247 delete_fast_tracepoint_jump (struct fast_tracepoint_jump *todel)
248 {
249 struct fast_tracepoint_jump *bp, **bp_link;
250 int ret;
251 struct process_info *proc = current_process ();
252
253 bp = proc->fast_tracepoint_jumps;
254 bp_link = &proc->fast_tracepoint_jumps;
255
256 while (bp)
257 {
258 if (bp == todel)
259 {
260 if (--bp->refcount == 0)
261 {
262 struct fast_tracepoint_jump *prev_bp_link = *bp_link;
263
264 /* Unlink it. */
265 *bp_link = bp->next;
266
267 /* Since there can be breakpoints inserted in the same
268 address range, we use `write_inferior_memory', which
269 takes care of layering breakpoints on top of fast
270 tracepoints, and on top of the buffer we pass it.
271 This works because we've already unlinked the fast
272 tracepoint jump above. Also note that we need to
273 pass the current shadow contents, because
274 write_inferior_memory updates any shadow memory with
275 what we pass here, and we want that to be a nop. */
276 ret = write_inferior_memory (bp->pc,
277 fast_tracepoint_jump_shadow (bp),
278 bp->length);
279 if (ret != 0)
280 {
281 /* Something went wrong, relink the jump. */
282 *bp_link = prev_bp_link;
283
284 if (debug_threads)
285 fprintf (stderr,
286 "Failed to uninsert fast tracepoint jump "
287 "at 0x%s (%s) while deleting it.\n",
288 paddress (bp->pc), strerror (ret));
289 return ret;
290 }
291
292 free (bp);
293 }
294
295 return 0;
296 }
297 else
298 {
299 bp_link = &bp->next;
300 bp = *bp_link;
301 }
302 }
303
304 warning ("Could not find fast tracepoint jump in list.");
305 return ENOENT;
306 }
307
308 struct fast_tracepoint_jump *
309 set_fast_tracepoint_jump (CORE_ADDR where,
310 unsigned char *insn, ULONGEST length)
311 {
312 struct process_info *proc = current_process ();
313 struct fast_tracepoint_jump *jp;
314 int err;
315
316 /* We refcount fast tracepoint jumps. Check if we already know
317 about a jump at this address. */
318 jp = find_fast_tracepoint_jump_at (where);
319 if (jp != NULL)
320 {
321 jp->refcount++;
322 return jp;
323 }
324
325 /* We don't, so create a new object. Double the length, because the
326 flexible array member holds both the jump insn, and the
327 shadow. */
328 jp = xcalloc (1, sizeof (*jp) + (length * 2));
329 jp->pc = where;
330 jp->length = length;
331 memcpy (fast_tracepoint_jump_insn (jp), insn, length);
332 jp->refcount = 1;
333
334 /* Note that there can be trap breakpoints inserted in the same
335 address range. To access the original memory contents, we use
336 `read_inferior_memory', which masks out breakpoints. */
337 err = read_inferior_memory (where,
338 fast_tracepoint_jump_shadow (jp), jp->length);
339 if (err != 0)
340 {
341 if (debug_threads)
342 fprintf (stderr,
343 "Failed to read shadow memory of"
344 " fast tracepoint at 0x%s (%s).\n",
345 paddress (where), strerror (err));
346 free (jp);
347 return NULL;
348 }
349
350 /* Link the jump in. */
351 jp->inserted = 1;
352 jp->next = proc->fast_tracepoint_jumps;
353 proc->fast_tracepoint_jumps = jp;
354
355 /* Since there can be trap breakpoints inserted in the same address
356 range, we use use `write_inferior_memory', which takes care of
357 layering breakpoints on top of fast tracepoints, on top of the
358 buffer we pass it. This works because we've already linked in
359 the fast tracepoint jump above. Also note that we need to pass
360 the current shadow contents, because write_inferior_memory
361 updates any shadow memory with what we pass here, and we want
362 that to be a nop. */
363 err = write_inferior_memory (where, fast_tracepoint_jump_shadow (jp), length);
364 if (err != 0)
365 {
366 if (debug_threads)
367 fprintf (stderr,
368 "Failed to insert fast tracepoint jump at 0x%s (%s).\n",
369 paddress (where), strerror (err));
370
371 /* Unlink it. */
372 proc->fast_tracepoint_jumps = jp->next;
373 free (jp);
374
375 return NULL;
376 }
377
378 return jp;
379 }
380
381 void
382 uninsert_fast_tracepoint_jumps_at (CORE_ADDR pc)
383 {
384 struct fast_tracepoint_jump *jp;
385 int err;
386
387 jp = find_fast_tracepoint_jump_at (pc);
388 if (jp == NULL)
389 {
390 /* This can happen when we remove all breakpoints while handling
391 a step-over. */
392 if (debug_threads)
393 fprintf (stderr,
394 "Could not find fast tracepoint jump at 0x%s "
395 "in list (uninserting).\n",
396 paddress (pc));
397 return;
398 }
399
400 if (jp->inserted)
401 {
402 jp->inserted = 0;
403
404 /* Since there can be trap breakpoints inserted in the same
405 address range, we use use `write_inferior_memory', which
406 takes care of layering breakpoints on top of fast
407 tracepoints, and on top of the buffer we pass it. This works
408 because we've already marked the fast tracepoint fast
409 tracepoint jump uninserted above. Also note that we need to
410 pass the current shadow contents, because
411 write_inferior_memory updates any shadow memory with what we
412 pass here, and we want that to be a nop. */
413 err = write_inferior_memory (jp->pc,
414 fast_tracepoint_jump_shadow (jp),
415 jp->length);
416 if (err != 0)
417 {
418 jp->inserted = 1;
419
420 if (debug_threads)
421 fprintf (stderr,
422 "Failed to uninsert fast tracepoint jump at 0x%s (%s).\n",
423 paddress (pc), strerror (err));
424 }
425 }
426 }
427
428 void
429 reinsert_fast_tracepoint_jumps_at (CORE_ADDR where)
430 {
431 struct fast_tracepoint_jump *jp;
432 int err;
433
434 jp = find_fast_tracepoint_jump_at (where);
435 if (jp == NULL)
436 {
437 /* This can happen when we remove breakpoints when a tracepoint
438 hit causes a tracing stop, while handling a step-over. */
439 if (debug_threads)
440 fprintf (stderr,
441 "Could not find fast tracepoint jump at 0x%s "
442 "in list (reinserting).\n",
443 paddress (where));
444 return;
445 }
446
447 if (jp->inserted)
448 error ("Jump already inserted at reinsert time.");
449
450 jp->inserted = 1;
451
452 /* Since there can be trap breakpoints inserted in the same address
453 range, we use `write_inferior_memory', which takes care of
454 layering breakpoints on top of fast tracepoints, and on top of
455 the buffer we pass it. This works because we've already marked
456 the fast tracepoint jump inserted above. Also note that we need
457 to pass the current shadow contents, because
458 write_inferior_memory updates any shadow memory with what we pass
459 here, and we want that to be a nop. */
460 err = write_inferior_memory (where,
461 fast_tracepoint_jump_shadow (jp), jp->length);
462 if (err != 0)
463 {
464 jp->inserted = 0;
465
466 if (debug_threads)
467 fprintf (stderr,
468 "Failed to reinsert fast tracepoint jump at 0x%s (%s).\n",
469 paddress (where), strerror (err));
470 }
471 }
472
473 struct breakpoint *
474 set_breakpoint_at (CORE_ADDR where, int (*handler) (CORE_ADDR))
475 {
476 struct process_info *proc = current_process ();
477 struct breakpoint *bp;
478 struct raw_breakpoint *raw;
479
480 raw = set_raw_breakpoint_at (where);
481
482 if (raw == NULL)
483 {
484 /* warn? */
485 return NULL;
486 }
487
488 bp = xcalloc (1, sizeof (struct breakpoint));
489 bp->type = other_breakpoint;
490
491 bp->raw = raw;
492 bp->handler = handler;
493
494 bp->next = proc->breakpoints;
495 proc->breakpoints = bp;
496
497 return bp;
498 }
499
500 static int
501 delete_raw_breakpoint (struct process_info *proc, struct raw_breakpoint *todel)
502 {
503 struct raw_breakpoint *bp, **bp_link;
504 int ret;
505
506 bp = proc->raw_breakpoints;
507 bp_link = &proc->raw_breakpoints;
508
509 while (bp)
510 {
511 if (bp == todel)
512 {
513 if (bp->inserted)
514 {
515 struct raw_breakpoint *prev_bp_link = *bp_link;
516
517 *bp_link = bp->next;
518
519 /* Since there can be trap breakpoints inserted in the
520 same address range, we use `write_inferior_memory',
521 which takes care of layering breakpoints on top of
522 fast tracepoints, and on top of the buffer we pass
523 it. This works because we've already unlinked the
524 fast tracepoint jump above. Also note that we need
525 to pass the current shadow contents, because
526 write_inferior_memory updates any shadow memory with
527 what we pass here, and we want that to be a nop. */
528 ret = write_inferior_memory (bp->pc, bp->old_data,
529 breakpoint_len);
530 if (ret != 0)
531 {
532 /* Something went wrong, relink the breakpoint. */
533 *bp_link = prev_bp_link;
534
535 if (debug_threads)
536 fprintf (stderr,
537 "Failed to uninsert raw breakpoint "
538 "at 0x%s (%s) while deleting it.\n",
539 paddress (bp->pc), strerror (ret));
540 return ret;
541 }
542
543 }
544 else
545 *bp_link = bp->next;
546
547 free (bp);
548 return 0;
549 }
550 else
551 {
552 bp_link = &bp->next;
553 bp = *bp_link;
554 }
555 }
556
557 warning ("Could not find raw breakpoint in list.");
558 return ENOENT;
559 }
560
561 static int
562 release_breakpoint (struct process_info *proc, struct breakpoint *bp)
563 {
564 int newrefcount;
565 int ret;
566
567 newrefcount = bp->raw->refcount - 1;
568 if (newrefcount == 0)
569 {
570 ret = delete_raw_breakpoint (proc, bp->raw);
571 if (ret != 0)
572 return ret;
573 }
574 else
575 bp->raw->refcount = newrefcount;
576
577 free (bp);
578
579 return 0;
580 }
581
582 static int
583 delete_breakpoint_1 (struct process_info *proc, struct breakpoint *todel)
584 {
585 struct breakpoint *bp, **bp_link;
586 int err;
587
588 bp = proc->breakpoints;
589 bp_link = &proc->breakpoints;
590
591 while (bp)
592 {
593 if (bp == todel)
594 {
595 *bp_link = bp->next;
596
597 err = release_breakpoint (proc, bp);
598 if (err != 0)
599 return err;
600
601 bp = *bp_link;
602 return 0;
603 }
604 else
605 {
606 bp_link = &bp->next;
607 bp = *bp_link;
608 }
609 }
610
611 warning ("Could not find breakpoint in list.");
612 return ENOENT;
613 }
614
615 int
616 delete_breakpoint (struct breakpoint *todel)
617 {
618 struct process_info *proc = current_process ();
619 return delete_breakpoint_1 (proc, todel);
620 }
621
622 static struct breakpoint *
623 find_gdb_breakpoint_at (CORE_ADDR where)
624 {
625 struct process_info *proc = current_process ();
626 struct breakpoint *bp;
627
628 for (bp = proc->breakpoints; bp != NULL; bp = bp->next)
629 if (bp->type == gdb_breakpoint && bp->raw->pc == where)
630 return bp;
631
632 return NULL;
633 }
634
635 int
636 set_gdb_breakpoint_at (CORE_ADDR where)
637 {
638 struct breakpoint *bp;
639
640 if (breakpoint_data == NULL)
641 return 1;
642
643 /* If we see GDB inserting a second breakpoint at the same address,
644 then the first breakpoint must have disappeared due to a shared
645 library unload. On targets where the shared libraries are
646 handled by userspace, like SVR4, for example, GDBserver can't
647 tell if a library was loaded or unloaded. Since we refcount
648 breakpoints, if we didn't do this, we'd just increase the
649 refcount of the previous breakpoint at this address, but the trap
650 was not planted in the inferior anymore, thus the breakpoint
651 would never be hit. */
652 bp = find_gdb_breakpoint_at (where);
653 if (bp != NULL)
654 {
655 delete_gdb_breakpoint_at (where);
656
657 /* Might as well validate all other breakpoints. */
658 validate_breakpoints ();
659 }
660
661 bp = set_breakpoint_at (where, NULL);
662 if (bp == NULL)
663 return -1;
664
665 bp->type = gdb_breakpoint;
666 return 0;
667 }
668
669 int
670 delete_gdb_breakpoint_at (CORE_ADDR addr)
671 {
672 struct breakpoint *bp;
673 int err;
674
675 if (breakpoint_data == NULL)
676 return 1;
677
678 bp = find_gdb_breakpoint_at (addr);
679 if (bp == NULL)
680 return -1;
681
682 err = delete_breakpoint (bp);
683 if (err)
684 return -1;
685
686 return 0;
687 }
688
689 int
690 gdb_breakpoint_here (CORE_ADDR where)
691 {
692 struct breakpoint *bp = find_gdb_breakpoint_at (where);
693
694 return (bp != NULL);
695 }
696
697 void
698 set_reinsert_breakpoint (CORE_ADDR stop_at)
699 {
700 struct breakpoint *bp;
701
702 bp = set_breakpoint_at (stop_at, NULL);
703 bp->type = reinsert_breakpoint;
704 }
705
706 void
707 delete_reinsert_breakpoints (void)
708 {
709 struct process_info *proc = current_process ();
710 struct breakpoint *bp, **bp_link;
711
712 bp = proc->breakpoints;
713 bp_link = &proc->breakpoints;
714
715 while (bp)
716 {
717 if (bp->type == reinsert_breakpoint)
718 {
719 *bp_link = bp->next;
720 release_breakpoint (proc, bp);
721 bp = *bp_link;
722 }
723 else
724 {
725 bp_link = &bp->next;
726 bp = *bp_link;
727 }
728 }
729 }
730
731 static void
732 uninsert_raw_breakpoint (struct raw_breakpoint *bp)
733 {
734 if (bp->inserted)
735 {
736 int err;
737
738 bp->inserted = 0;
739 /* Since there can be fast tracepoint jumps inserted in the same
740 address range, we use `write_inferior_memory', which takes
741 care of layering breakpoints on top of fast tracepoints, and
742 on top of the buffer we pass it. This works because we've
743 already unlinked the fast tracepoint jump above. Also note
744 that we need to pass the current shadow contents, because
745 write_inferior_memory updates any shadow memory with what we
746 pass here, and we want that to be a nop. */
747 err = write_inferior_memory (bp->pc, bp->old_data,
748 breakpoint_len);
749 if (err != 0)
750 {
751 bp->inserted = 1;
752
753 if (debug_threads)
754 fprintf (stderr,
755 "Failed to uninsert raw breakpoint at 0x%s (%s).\n",
756 paddress (bp->pc), strerror (err));
757 }
758 }
759 }
760
761 void
762 uninsert_breakpoints_at (CORE_ADDR pc)
763 {
764 struct raw_breakpoint *bp;
765
766 bp = find_raw_breakpoint_at (pc);
767 if (bp == NULL)
768 {
769 /* This can happen when we remove all breakpoints while handling
770 a step-over. */
771 if (debug_threads)
772 fprintf (stderr,
773 "Could not find breakpoint at 0x%s "
774 "in list (uninserting).\n",
775 paddress (pc));
776 return;
777 }
778
779 if (bp->inserted)
780 uninsert_raw_breakpoint (bp);
781 }
782
783 void
784 uninsert_all_breakpoints (void)
785 {
786 struct process_info *proc = current_process ();
787 struct raw_breakpoint *bp;
788
789 for (bp = proc->raw_breakpoints; bp != NULL; bp = bp->next)
790 if (bp->inserted)
791 uninsert_raw_breakpoint (bp);
792 }
793
794 static void
795 reinsert_raw_breakpoint (struct raw_breakpoint *bp)
796 {
797 int err;
798
799 if (bp->inserted)
800 error ("Breakpoint already inserted at reinsert time.");
801
802 err = (*the_target->write_memory) (bp->pc, breakpoint_data,
803 breakpoint_len);
804 if (err == 0)
805 bp->inserted = 1;
806 else if (debug_threads)
807 fprintf (stderr,
808 "Failed to reinsert breakpoint at 0x%s (%s).\n",
809 paddress (bp->pc), strerror (err));
810 }
811
812 void
813 reinsert_breakpoints_at (CORE_ADDR pc)
814 {
815 struct raw_breakpoint *bp;
816
817 bp = find_raw_breakpoint_at (pc);
818 if (bp == NULL)
819 {
820 /* This can happen when we remove all breakpoints while handling
821 a step-over. */
822 if (debug_threads)
823 fprintf (stderr,
824 "Could not find raw breakpoint at 0x%s "
825 "in list (reinserting).\n",
826 paddress (pc));
827 return;
828 }
829
830 reinsert_raw_breakpoint (bp);
831 }
832
833 void
834 reinsert_all_breakpoints (void)
835 {
836 struct process_info *proc = current_process ();
837 struct raw_breakpoint *bp;
838
839 for (bp = proc->raw_breakpoints; bp != NULL; bp = bp->next)
840 if (!bp->inserted)
841 reinsert_raw_breakpoint (bp);
842 }
843
844 void
845 check_breakpoints (CORE_ADDR stop_pc)
846 {
847 struct process_info *proc = current_process ();
848 struct breakpoint *bp, **bp_link;
849
850 bp = proc->breakpoints;
851 bp_link = &proc->breakpoints;
852
853 while (bp)
854 {
855 if (bp->raw->pc == stop_pc)
856 {
857 if (!bp->raw->inserted)
858 {
859 warning ("Hit a removed breakpoint?");
860 return;
861 }
862
863 if (bp->handler != NULL && (*bp->handler) (stop_pc))
864 {
865 *bp_link = bp->next;
866
867 release_breakpoint (proc, bp);
868
869 bp = *bp_link;
870 continue;
871 }
872 }
873
874 bp_link = &bp->next;
875 bp = *bp_link;
876 }
877 }
878
879 void
880 set_breakpoint_data (const unsigned char *bp_data, int bp_len)
881 {
882 breakpoint_data = bp_data;
883 breakpoint_len = bp_len;
884 }
885
886 int
887 breakpoint_here (CORE_ADDR addr)
888 {
889 return (find_raw_breakpoint_at (addr) != NULL);
890 }
891
892 int
893 breakpoint_inserted_here (CORE_ADDR addr)
894 {
895 struct raw_breakpoint *bp;
896
897 bp = find_raw_breakpoint_at (addr);
898
899 return (bp != NULL && bp->inserted);
900 }
901
902 static int
903 validate_inserted_breakpoint (struct raw_breakpoint *bp)
904 {
905 unsigned char *buf;
906 int err;
907
908 gdb_assert (bp->inserted);
909
910 buf = alloca (breakpoint_len);
911 err = (*the_target->read_memory) (bp->pc, buf, breakpoint_len);
912 if (err || memcmp (buf, breakpoint_data, breakpoint_len) != 0)
913 {
914 /* Tag it as gone. */
915 bp->inserted = 0;
916 bp->shlib_disabled = 1;
917 return 0;
918 }
919
920 return 1;
921 }
922
923 static void
924 delete_disabled_breakpoints (void)
925 {
926 struct process_info *proc = current_process ();
927 struct breakpoint *bp, *next;
928
929 for (bp = proc->breakpoints; bp != NULL; bp = next)
930 {
931 next = bp->next;
932 if (bp->raw->shlib_disabled)
933 delete_breakpoint_1 (proc, bp);
934 }
935 }
936
937 /* Check if breakpoints we inserted still appear to be inserted. They
938 may disappear due to a shared library unload, and worse, a new
939 shared library may be reloaded at the same address as the
940 previously unloaded one. If that happens, we should make sure that
941 the shadow memory of the old breakpoints isn't used when reading or
942 writing memory. */
943
944 void
945 validate_breakpoints (void)
946 {
947 struct process_info *proc = current_process ();
948 struct breakpoint *bp;
949
950 for (bp = proc->breakpoints; bp != NULL; bp = bp->next)
951 {
952 if (bp->raw->inserted)
953 validate_inserted_breakpoint (bp->raw);
954 }
955
956 delete_disabled_breakpoints ();
957 }
958
959 void
960 check_mem_read (CORE_ADDR mem_addr, unsigned char *buf, int mem_len)
961 {
962 struct process_info *proc = current_process ();
963 struct raw_breakpoint *bp = proc->raw_breakpoints;
964 struct fast_tracepoint_jump *jp = proc->fast_tracepoint_jumps;
965 CORE_ADDR mem_end = mem_addr + mem_len;
966 int disabled_one = 0;
967
968 for (; jp != NULL; jp = jp->next)
969 {
970 CORE_ADDR bp_end = jp->pc + jp->length;
971 CORE_ADDR start, end;
972 int copy_offset, copy_len, buf_offset;
973
974 if (mem_addr >= bp_end)
975 continue;
976 if (jp->pc >= mem_end)
977 continue;
978
979 start = jp->pc;
980 if (mem_addr > start)
981 start = mem_addr;
982
983 end = bp_end;
984 if (end > mem_end)
985 end = mem_end;
986
987 copy_len = end - start;
988 copy_offset = start - jp->pc;
989 buf_offset = start - mem_addr;
990
991 if (jp->inserted)
992 memcpy (buf + buf_offset,
993 fast_tracepoint_jump_shadow (jp) + copy_offset,
994 copy_len);
995 }
996
997 for (; bp != NULL; bp = bp->next)
998 {
999 CORE_ADDR bp_end = bp->pc + breakpoint_len;
1000 CORE_ADDR start, end;
1001 int copy_offset, copy_len, buf_offset;
1002
1003 if (mem_addr >= bp_end)
1004 continue;
1005 if (bp->pc >= mem_end)
1006 continue;
1007
1008 start = bp->pc;
1009 if (mem_addr > start)
1010 start = mem_addr;
1011
1012 end = bp_end;
1013 if (end > mem_end)
1014 end = mem_end;
1015
1016 copy_len = end - start;
1017 copy_offset = start - bp->pc;
1018 buf_offset = start - mem_addr;
1019
1020 if (bp->inserted)
1021 {
1022 if (validate_inserted_breakpoint (bp))
1023 memcpy (buf + buf_offset, bp->old_data + copy_offset, copy_len);
1024 else
1025 disabled_one = 1;
1026 }
1027 }
1028
1029 if (disabled_one)
1030 delete_disabled_breakpoints ();
1031 }
1032
1033 void
1034 check_mem_write (CORE_ADDR mem_addr, unsigned char *buf, int mem_len)
1035 {
1036 struct process_info *proc = current_process ();
1037 struct raw_breakpoint *bp = proc->raw_breakpoints;
1038 struct fast_tracepoint_jump *jp = proc->fast_tracepoint_jumps;
1039 CORE_ADDR mem_end = mem_addr + mem_len;
1040 int disabled_one = 0;
1041
1042 /* First fast tracepoint jumps, then breakpoint traps on top. */
1043
1044 for (; jp != NULL; jp = jp->next)
1045 {
1046 CORE_ADDR jp_end = jp->pc + jp->length;
1047 CORE_ADDR start, end;
1048 int copy_offset, copy_len, buf_offset;
1049
1050 if (mem_addr >= jp_end)
1051 continue;
1052 if (jp->pc >= mem_end)
1053 continue;
1054
1055 start = jp->pc;
1056 if (mem_addr > start)
1057 start = mem_addr;
1058
1059 end = jp_end;
1060 if (end > mem_end)
1061 end = mem_end;
1062
1063 copy_len = end - start;
1064 copy_offset = start - jp->pc;
1065 buf_offset = start - mem_addr;
1066
1067 memcpy (fast_tracepoint_jump_shadow (jp) + copy_offset,
1068 buf + buf_offset, copy_len);
1069 if (jp->inserted)
1070 memcpy (buf + buf_offset,
1071 fast_tracepoint_jump_insn (jp) + copy_offset, copy_len);
1072 }
1073
1074 for (; bp != NULL; bp = bp->next)
1075 {
1076 CORE_ADDR bp_end = bp->pc + breakpoint_len;
1077 CORE_ADDR start, end;
1078 int copy_offset, copy_len, buf_offset;
1079
1080 if (mem_addr >= bp_end)
1081 continue;
1082 if (bp->pc >= mem_end)
1083 continue;
1084
1085 start = bp->pc;
1086 if (mem_addr > start)
1087 start = mem_addr;
1088
1089 end = bp_end;
1090 if (end > mem_end)
1091 end = mem_end;
1092
1093 copy_len = end - start;
1094 copy_offset = start - bp->pc;
1095 buf_offset = start - mem_addr;
1096
1097 memcpy (bp->old_data + copy_offset, buf + buf_offset, copy_len);
1098 if (bp->inserted)
1099 {
1100 if (validate_inserted_breakpoint (bp))
1101 memcpy (buf + buf_offset, breakpoint_data + copy_offset, copy_len);
1102 else
1103 disabled_one = 1;
1104 }
1105 }
1106
1107 if (disabled_one)
1108 delete_disabled_breakpoints ();
1109 }
1110
1111 /* Delete all breakpoints, and un-insert them from the inferior. */
1112
1113 void
1114 delete_all_breakpoints (void)
1115 {
1116 struct process_info *proc = current_process ();
1117
1118 while (proc->breakpoints)
1119 delete_breakpoint_1 (proc, proc->breakpoints);
1120 }
1121
1122 /* Clear the "inserted" flag in all breakpoints. */
1123
1124 void
1125 mark_breakpoints_out (struct process_info *proc)
1126 {
1127 struct raw_breakpoint *raw_bp;
1128
1129 for (raw_bp = proc->raw_breakpoints; raw_bp != NULL; raw_bp = raw_bp->next)
1130 raw_bp->inserted = 0;
1131 }
1132
1133 /* Release all breakpoints, but do not try to un-insert them from the
1134 inferior. */
1135
1136 void
1137 free_all_breakpoints (struct process_info *proc)
1138 {
1139 mark_breakpoints_out (proc);
1140
1141 /* Note: use PROC explicitly instead of deferring to
1142 delete_all_breakpoints --- CURRENT_INFERIOR may already have been
1143 released when we get here. There should be no call to
1144 current_process from here on. */
1145 while (proc->breakpoints)
1146 delete_breakpoint_1 (proc, proc->breakpoints);
1147 }