]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/mem-break.c
run copyright.sh for 2011.
[thirdparty/binutils-gdb.git] / gdb / gdbserver / mem-break.c
1 /* Memory breakpoint operations for the remote server for GDB.
2 Copyright (C) 2002, 2003, 2005, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4
5 Contributed by MontaVista Software.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23
24 const unsigned char *breakpoint_data;
25 int breakpoint_len;
26
27 #define MAX_BREAKPOINT_LEN 8
28
29 /* GDB will never try to install multiple breakpoints at the same
30 address. But, we need to keep track of internal breakpoints too,
31 and so we do need to be able to install multiple breakpoints at the
32 same address transparently. We keep track of two different, and
33 closely related structures. A raw breakpoint, which manages the
34 low level, close to the metal aspect of a breakpoint. It holds the
35 breakpoint address, and a buffer holding a copy of the instructions
36 that would be in memory had not been a breakpoint there (we call
37 that the shadow memory of the breakpoint). We occasionally need to
38 temporarilly uninsert a breakpoint without the client knowing about
39 it (e.g., to step over an internal breakpoint), so we keep an
40 `inserted' state associated with this low level breakpoint
41 structure. There can only be one such object for a given address.
42 Then, we have (a bit higher level) breakpoints. This structure
43 holds a callback to be called whenever a breakpoint is hit, a
44 high-level type, and a link to a low level raw breakpoint. There
45 can be many high-level breakpoints at the same address, and all of
46 them will point to the same raw breakpoint, which is reference
47 counted. */
48
49 /* The low level, physical, raw breakpoint. */
50 struct raw_breakpoint
51 {
52 struct raw_breakpoint *next;
53
54 /* A reference count. Each high level breakpoint referencing this
55 raw breakpoint accounts for one reference. */
56 int refcount;
57
58 /* The breakpoint's insertion address. There can only be one raw
59 breakpoint for a given PC. */
60 CORE_ADDR pc;
61
62 /* The breakpoint's shadow memory. */
63 unsigned char old_data[MAX_BREAKPOINT_LEN];
64
65 /* Non-zero if this breakpoint is currently inserted in the
66 inferior. */
67 int inserted;
68
69 /* Non-zero if this breakpoint is currently disabled because we no
70 longer detect it as inserted. */
71 int shlib_disabled;
72 };
73
74 /* The type of a breakpoint. */
75 enum bkpt_type
76 {
77 /* A GDB breakpoint, requested with a Z0 packet. */
78 gdb_breakpoint,
79
80 /* A basic-software-single-step breakpoint. */
81 reinsert_breakpoint,
82
83 /* Any other breakpoint type that doesn't require specific
84 treatment goes here. E.g., an event breakpoint. */
85 other_breakpoint,
86 };
87
88 /* A high level (in gdbserver's perspective) breakpoint. */
89 struct breakpoint
90 {
91 struct breakpoint *next;
92
93 /* The breakpoint's type. */
94 enum bkpt_type type;
95
96 /* Link to this breakpoint's raw breakpoint. This is always
97 non-NULL. */
98 struct raw_breakpoint *raw;
99
100 /* Function to call when we hit this breakpoint. If it returns 1,
101 the breakpoint shall be deleted; 0 or if this callback is NULL,
102 it will be left inserted. */
103 int (*handler) (CORE_ADDR);
104 };
105
106 static struct raw_breakpoint *
107 find_raw_breakpoint_at (CORE_ADDR where)
108 {
109 struct process_info *proc = current_process ();
110 struct raw_breakpoint *bp;
111
112 for (bp = proc->raw_breakpoints; bp != NULL; bp = bp->next)
113 if (bp->pc == where)
114 return bp;
115
116 return NULL;
117 }
118
119 static struct raw_breakpoint *
120 set_raw_breakpoint_at (CORE_ADDR where)
121 {
122 struct process_info *proc = current_process ();
123 struct raw_breakpoint *bp;
124 int err;
125
126 if (breakpoint_data == NULL)
127 error ("Target does not support breakpoints.");
128
129 bp = find_raw_breakpoint_at (where);
130 if (bp != NULL)
131 {
132 bp->refcount++;
133 return bp;
134 }
135
136 bp = xcalloc (1, sizeof (*bp));
137 bp->pc = where;
138 bp->refcount = 1;
139
140 /* Note that there can be fast tracepoint jumps installed in the
141 same memory range, so to get at the original memory, we need to
142 use read_inferior_memory, which masks those out. */
143 err = read_inferior_memory (where, bp->old_data, breakpoint_len);
144 if (err != 0)
145 {
146 if (debug_threads)
147 fprintf (stderr,
148 "Failed to read shadow memory of"
149 " breakpoint at 0x%s (%s).\n",
150 paddress (where), strerror (err));
151 free (bp);
152 return NULL;
153 }
154
155 err = (*the_target->write_memory) (where, breakpoint_data,
156 breakpoint_len);
157 if (err != 0)
158 {
159 if (debug_threads)
160 fprintf (stderr,
161 "Failed to insert breakpoint at 0x%s (%s).\n",
162 paddress (where), strerror (err));
163 free (bp);
164 return NULL;
165 }
166
167 /* Link the breakpoint in. */
168 bp->inserted = 1;
169 bp->next = proc->raw_breakpoints;
170 proc->raw_breakpoints = bp;
171 return bp;
172 }
173
174 /* Notice that breakpoint traps are always installed on top of fast
175 tracepoint jumps. This is even if the fast tracepoint is installed
176 at a later time compared to when the breakpoint was installed.
177 This means that a stopping breakpoint or tracepoint has higher
178 "priority". In turn, this allows having fast and slow tracepoints
179 (and breakpoints) at the same address behave correctly. */
180
181
182 /* A fast tracepoint jump. */
183
184 struct fast_tracepoint_jump
185 {
186 struct fast_tracepoint_jump *next;
187
188 /* A reference count. GDB can install more than one fast tracepoint
189 at the same address (each with its own action list, for
190 example). */
191 int refcount;
192
193 /* The fast tracepoint's insertion address. There can only be one
194 of these for a given PC. */
195 CORE_ADDR pc;
196
197 /* Non-zero if this fast tracepoint jump is currently inserted in
198 the inferior. */
199 int inserted;
200
201 /* The length of the jump instruction. */
202 int length;
203
204 /* A poor-man's flexible array member, holding both the jump
205 instruction to insert, and a copy of the instruction that would
206 be in memory had not been a jump there (the shadow memory of the
207 tracepoint jump). */
208 unsigned char insn_and_shadow[0];
209 };
210
211 /* Fast tracepoint FP's jump instruction to insert. */
212 #define fast_tracepoint_jump_insn(fp) \
213 ((fp)->insn_and_shadow + 0)
214
215 /* The shadow memory of fast tracepoint jump FP. */
216 #define fast_tracepoint_jump_shadow(fp) \
217 ((fp)->insn_and_shadow + (fp)->length)
218
219
220 /* Return the fast tracepoint jump set at WHERE. */
221
222 static struct fast_tracepoint_jump *
223 find_fast_tracepoint_jump_at (CORE_ADDR where)
224 {
225 struct process_info *proc = current_process ();
226 struct fast_tracepoint_jump *jp;
227
228 for (jp = proc->fast_tracepoint_jumps; jp != NULL; jp = jp->next)
229 if (jp->pc == where)
230 return jp;
231
232 return NULL;
233 }
234
235 int
236 fast_tracepoint_jump_here (CORE_ADDR where)
237 {
238 struct fast_tracepoint_jump *jp = find_fast_tracepoint_jump_at (where);
239
240 return (jp != NULL);
241 }
242
243 int
244 delete_fast_tracepoint_jump (struct fast_tracepoint_jump *todel)
245 {
246 struct fast_tracepoint_jump *bp, **bp_link;
247 int ret;
248 struct process_info *proc = current_process ();
249
250 bp = proc->fast_tracepoint_jumps;
251 bp_link = &proc->fast_tracepoint_jumps;
252
253 while (bp)
254 {
255 if (bp == todel)
256 {
257 if (--bp->refcount == 0)
258 {
259 struct fast_tracepoint_jump *prev_bp_link = *bp_link;
260
261 /* Unlink it. */
262 *bp_link = bp->next;
263
264 /* Since there can be breakpoints inserted in the same
265 address range, we use `write_inferior_memory', which
266 takes care of layering breakpoints on top of fast
267 tracepoints, and on top of the buffer we pass it.
268 This works because we've already unlinked the fast
269 tracepoint jump above. Also note that we need to
270 pass the current shadow contents, because
271 write_inferior_memory updates any shadow memory with
272 what we pass here, and we want that to be a nop. */
273 ret = write_inferior_memory (bp->pc,
274 fast_tracepoint_jump_shadow (bp),
275 bp->length);
276 if (ret != 0)
277 {
278 /* Something went wrong, relink the jump. */
279 *bp_link = prev_bp_link;
280
281 if (debug_threads)
282 fprintf (stderr,
283 "Failed to uninsert fast tracepoint jump "
284 "at 0x%s (%s) while deleting it.\n",
285 paddress (bp->pc), strerror (ret));
286 return ret;
287 }
288
289 free (bp);
290 }
291
292 return 0;
293 }
294 else
295 {
296 bp_link = &bp->next;
297 bp = *bp_link;
298 }
299 }
300
301 warning ("Could not find fast tracepoint jump in list.");
302 return ENOENT;
303 }
304
305 struct fast_tracepoint_jump *
306 set_fast_tracepoint_jump (CORE_ADDR where,
307 unsigned char *insn, ULONGEST length)
308 {
309 struct process_info *proc = current_process ();
310 struct fast_tracepoint_jump *jp;
311 int err;
312
313 /* We refcount fast tracepoint jumps. Check if we already know
314 about a jump at this address. */
315 jp = find_fast_tracepoint_jump_at (where);
316 if (jp != NULL)
317 {
318 jp->refcount++;
319 return jp;
320 }
321
322 /* We don't, so create a new object. Double the length, because the
323 flexible array member holds both the jump insn, and the
324 shadow. */
325 jp = xcalloc (1, sizeof (*jp) + (length * 2));
326 jp->pc = where;
327 jp->length = length;
328 memcpy (fast_tracepoint_jump_insn (jp), insn, length);
329 jp->refcount = 1;
330
331 /* Note that there can be trap breakpoints inserted in the same
332 address range. To access the original memory contents, we use
333 `read_inferior_memory', which masks out breakpoints. */
334 err = read_inferior_memory (where,
335 fast_tracepoint_jump_shadow (jp), jp->length);
336 if (err != 0)
337 {
338 if (debug_threads)
339 fprintf (stderr,
340 "Failed to read shadow memory of"
341 " fast tracepoint at 0x%s (%s).\n",
342 paddress (where), strerror (err));
343 free (jp);
344 return NULL;
345 }
346
347 /* Link the jump in. */
348 jp->inserted = 1;
349 jp->next = proc->fast_tracepoint_jumps;
350 proc->fast_tracepoint_jumps = jp;
351
352 /* Since there can be trap breakpoints inserted in the same address
353 range, we use use `write_inferior_memory', which takes care of
354 layering breakpoints on top of fast tracepoints, on top of the
355 buffer we pass it. This works because we've already linked in
356 the fast tracepoint jump above. Also note that we need to pass
357 the current shadow contents, because write_inferior_memory
358 updates any shadow memory with what we pass here, and we want
359 that to be a nop. */
360 err = write_inferior_memory (where, fast_tracepoint_jump_shadow (jp), length);
361 if (err != 0)
362 {
363 if (debug_threads)
364 fprintf (stderr,
365 "Failed to insert fast tracepoint jump at 0x%s (%s).\n",
366 paddress (where), strerror (err));
367
368 /* Unlink it. */
369 proc->fast_tracepoint_jumps = jp->next;
370 free (jp);
371
372 return NULL;
373 }
374
375 return jp;
376 }
377
378 void
379 uninsert_fast_tracepoint_jumps_at (CORE_ADDR pc)
380 {
381 struct fast_tracepoint_jump *jp;
382 int err;
383
384 jp = find_fast_tracepoint_jump_at (pc);
385 if (jp == NULL)
386 {
387 /* This can happen when we remove all breakpoints while handling
388 a step-over. */
389 if (debug_threads)
390 fprintf (stderr,
391 "Could not find fast tracepoint jump at 0x%s "
392 "in list (uninserting).\n",
393 paddress (pc));
394 return;
395 }
396
397 if (jp->inserted)
398 {
399 jp->inserted = 0;
400
401 /* Since there can be trap breakpoints inserted in the same
402 address range, we use use `write_inferior_memory', which
403 takes care of layering breakpoints on top of fast
404 tracepoints, and on top of the buffer we pass it. This works
405 because we've already marked the fast tracepoint fast
406 tracepoint jump uninserted above. Also note that we need to
407 pass the current shadow contents, because
408 write_inferior_memory updates any shadow memory with what we
409 pass here, and we want that to be a nop. */
410 err = write_inferior_memory (jp->pc,
411 fast_tracepoint_jump_shadow (jp),
412 jp->length);
413 if (err != 0)
414 {
415 jp->inserted = 1;
416
417 if (debug_threads)
418 fprintf (stderr,
419 "Failed to uninsert fast tracepoint jump at 0x%s (%s).\n",
420 paddress (pc), strerror (err));
421 }
422 }
423 }
424
425 void
426 reinsert_fast_tracepoint_jumps_at (CORE_ADDR where)
427 {
428 struct fast_tracepoint_jump *jp;
429 int err;
430
431 jp = find_fast_tracepoint_jump_at (where);
432 if (jp == NULL)
433 {
434 /* This can happen when we remove breakpoints when a tracepoint
435 hit causes a tracing stop, while handling a step-over. */
436 if (debug_threads)
437 fprintf (stderr,
438 "Could not find fast tracepoint jump at 0x%s "
439 "in list (reinserting).\n",
440 paddress (where));
441 return;
442 }
443
444 if (jp->inserted)
445 error ("Jump already inserted at reinsert time.");
446
447 jp->inserted = 1;
448
449 /* Since there can be trap breakpoints inserted in the same address
450 range, we use `write_inferior_memory', which takes care of
451 layering breakpoints on top of fast tracepoints, and on top of
452 the buffer we pass it. This works because we've already marked
453 the fast tracepoint jump inserted above. Also note that we need
454 to pass the current shadow contents, because
455 write_inferior_memory updates any shadow memory with what we pass
456 here, and we want that to be a nop. */
457 err = write_inferior_memory (where,
458 fast_tracepoint_jump_shadow (jp), jp->length);
459 if (err != 0)
460 {
461 jp->inserted = 0;
462
463 if (debug_threads)
464 fprintf (stderr,
465 "Failed to reinsert fast tracepoint jump at 0x%s (%s).\n",
466 paddress (where), strerror (err));
467 }
468 }
469
470 struct breakpoint *
471 set_breakpoint_at (CORE_ADDR where, int (*handler) (CORE_ADDR))
472 {
473 struct process_info *proc = current_process ();
474 struct breakpoint *bp;
475 struct raw_breakpoint *raw;
476
477 raw = set_raw_breakpoint_at (where);
478
479 if (raw == NULL)
480 {
481 /* warn? */
482 return NULL;
483 }
484
485 bp = xcalloc (1, sizeof (struct breakpoint));
486 bp->type = other_breakpoint;
487
488 bp->raw = raw;
489 bp->handler = handler;
490
491 bp->next = proc->breakpoints;
492 proc->breakpoints = bp;
493
494 return bp;
495 }
496
497 static int
498 delete_raw_breakpoint (struct process_info *proc, struct raw_breakpoint *todel)
499 {
500 struct raw_breakpoint *bp, **bp_link;
501 int ret;
502
503 bp = proc->raw_breakpoints;
504 bp_link = &proc->raw_breakpoints;
505
506 while (bp)
507 {
508 if (bp == todel)
509 {
510 if (bp->inserted)
511 {
512 struct raw_breakpoint *prev_bp_link = *bp_link;
513
514 *bp_link = bp->next;
515
516 /* Since there can be trap breakpoints inserted in the
517 same address range, we use `write_inferior_memory',
518 which takes care of layering breakpoints on top of
519 fast tracepoints, and on top of the buffer we pass
520 it. This works because we've already unlinked the
521 fast tracepoint jump above. Also note that we need
522 to pass the current shadow contents, because
523 write_inferior_memory updates any shadow memory with
524 what we pass here, and we want that to be a nop. */
525 ret = write_inferior_memory (bp->pc, bp->old_data,
526 breakpoint_len);
527 if (ret != 0)
528 {
529 /* Something went wrong, relink the breakpoint. */
530 *bp_link = prev_bp_link;
531
532 if (debug_threads)
533 fprintf (stderr,
534 "Failed to uninsert raw breakpoint "
535 "at 0x%s (%s) while deleting it.\n",
536 paddress (bp->pc), strerror (ret));
537 return ret;
538 }
539
540 }
541 else
542 *bp_link = bp->next;
543
544 free (bp);
545 return 0;
546 }
547 else
548 {
549 bp_link = &bp->next;
550 bp = *bp_link;
551 }
552 }
553
554 warning ("Could not find raw breakpoint in list.");
555 return ENOENT;
556 }
557
558 static int
559 release_breakpoint (struct process_info *proc, struct breakpoint *bp)
560 {
561 int newrefcount;
562 int ret;
563
564 newrefcount = bp->raw->refcount - 1;
565 if (newrefcount == 0)
566 {
567 ret = delete_raw_breakpoint (proc, bp->raw);
568 if (ret != 0)
569 return ret;
570 }
571 else
572 bp->raw->refcount = newrefcount;
573
574 free (bp);
575
576 return 0;
577 }
578
579 static int
580 delete_breakpoint_1 (struct process_info *proc, struct breakpoint *todel)
581 {
582 struct breakpoint *bp, **bp_link;
583 int err;
584
585 bp = proc->breakpoints;
586 bp_link = &proc->breakpoints;
587
588 while (bp)
589 {
590 if (bp == todel)
591 {
592 *bp_link = bp->next;
593
594 err = release_breakpoint (proc, bp);
595 if (err != 0)
596 return err;
597
598 bp = *bp_link;
599 return 0;
600 }
601 else
602 {
603 bp_link = &bp->next;
604 bp = *bp_link;
605 }
606 }
607
608 warning ("Could not find breakpoint in list.");
609 return ENOENT;
610 }
611
612 int
613 delete_breakpoint (struct breakpoint *todel)
614 {
615 struct process_info *proc = current_process ();
616 return delete_breakpoint_1 (proc, todel);
617 }
618
619 static struct breakpoint *
620 find_gdb_breakpoint_at (CORE_ADDR where)
621 {
622 struct process_info *proc = current_process ();
623 struct breakpoint *bp;
624
625 for (bp = proc->breakpoints; bp != NULL; bp = bp->next)
626 if (bp->type == gdb_breakpoint && bp->raw->pc == where)
627 return bp;
628
629 return NULL;
630 }
631
632 int
633 set_gdb_breakpoint_at (CORE_ADDR where)
634 {
635 struct breakpoint *bp;
636
637 if (breakpoint_data == NULL)
638 return 1;
639
640 /* If we see GDB inserting a second breakpoint at the same address,
641 then the first breakpoint must have disappeared due to a shared
642 library unload. On targets where the shared libraries are
643 handled by userspace, like SVR4, for example, GDBserver can't
644 tell if a library was loaded or unloaded. Since we refcount
645 breakpoints, if we didn't do this, we'd just increase the
646 refcount of the previous breakpoint at this address, but the trap
647 was not planted in the inferior anymore, thus the breakpoint
648 would never be hit. */
649 bp = find_gdb_breakpoint_at (where);
650 if (bp != NULL)
651 {
652 delete_gdb_breakpoint_at (where);
653
654 /* Might as well validate all other breakpoints. */
655 validate_breakpoints ();
656 }
657
658 bp = set_breakpoint_at (where, NULL);
659 if (bp == NULL)
660 return -1;
661
662 bp->type = gdb_breakpoint;
663 return 0;
664 }
665
666 int
667 delete_gdb_breakpoint_at (CORE_ADDR addr)
668 {
669 struct breakpoint *bp;
670 int err;
671
672 if (breakpoint_data == NULL)
673 return 1;
674
675 bp = find_gdb_breakpoint_at (addr);
676 if (bp == NULL)
677 return -1;
678
679 err = delete_breakpoint (bp);
680 if (err)
681 return -1;
682
683 return 0;
684 }
685
686 int
687 gdb_breakpoint_here (CORE_ADDR where)
688 {
689 struct breakpoint *bp = find_gdb_breakpoint_at (where);
690
691 return (bp != NULL);
692 }
693
694 void
695 set_reinsert_breakpoint (CORE_ADDR stop_at)
696 {
697 struct breakpoint *bp;
698
699 bp = set_breakpoint_at (stop_at, NULL);
700 bp->type = reinsert_breakpoint;
701 }
702
703 void
704 delete_reinsert_breakpoints (void)
705 {
706 struct process_info *proc = current_process ();
707 struct breakpoint *bp, **bp_link;
708
709 bp = proc->breakpoints;
710 bp_link = &proc->breakpoints;
711
712 while (bp)
713 {
714 if (bp->type == reinsert_breakpoint)
715 {
716 *bp_link = bp->next;
717 release_breakpoint (proc, bp);
718 bp = *bp_link;
719 }
720 else
721 {
722 bp_link = &bp->next;
723 bp = *bp_link;
724 }
725 }
726 }
727
728 static void
729 uninsert_raw_breakpoint (struct raw_breakpoint *bp)
730 {
731 if (bp->inserted)
732 {
733 int err;
734
735 bp->inserted = 0;
736 /* Since there can be fast tracepoint jumps inserted in the same
737 address range, we use `write_inferior_memory', which takes
738 care of layering breakpoints on top of fast tracepoints, and
739 on top of the buffer we pass it. This works because we've
740 already unlinked the fast tracepoint jump above. Also note
741 that we need to pass the current shadow contents, because
742 write_inferior_memory updates any shadow memory with what we
743 pass here, and we want that to be a nop. */
744 err = write_inferior_memory (bp->pc, bp->old_data,
745 breakpoint_len);
746 if (err != 0)
747 {
748 bp->inserted = 1;
749
750 if (debug_threads)
751 fprintf (stderr,
752 "Failed to uninsert raw breakpoint at 0x%s (%s).\n",
753 paddress (bp->pc), strerror (err));
754 }
755 }
756 }
757
758 void
759 uninsert_breakpoints_at (CORE_ADDR pc)
760 {
761 struct raw_breakpoint *bp;
762
763 bp = find_raw_breakpoint_at (pc);
764 if (bp == NULL)
765 {
766 /* This can happen when we remove all breakpoints while handling
767 a step-over. */
768 if (debug_threads)
769 fprintf (stderr,
770 "Could not find breakpoint at 0x%s "
771 "in list (uninserting).\n",
772 paddress (pc));
773 return;
774 }
775
776 if (bp->inserted)
777 uninsert_raw_breakpoint (bp);
778 }
779
780 void
781 uninsert_all_breakpoints (void)
782 {
783 struct process_info *proc = current_process ();
784 struct raw_breakpoint *bp;
785
786 for (bp = proc->raw_breakpoints; bp != NULL; bp = bp->next)
787 if (bp->inserted)
788 uninsert_raw_breakpoint (bp);
789 }
790
791 static void
792 reinsert_raw_breakpoint (struct raw_breakpoint *bp)
793 {
794 int err;
795
796 if (bp->inserted)
797 error ("Breakpoint already inserted at reinsert time.");
798
799 err = (*the_target->write_memory) (bp->pc, breakpoint_data,
800 breakpoint_len);
801 if (err == 0)
802 bp->inserted = 1;
803 else if (debug_threads)
804 fprintf (stderr,
805 "Failed to reinsert breakpoint at 0x%s (%s).\n",
806 paddress (bp->pc), strerror (err));
807 }
808
809 void
810 reinsert_breakpoints_at (CORE_ADDR pc)
811 {
812 struct raw_breakpoint *bp;
813
814 bp = find_raw_breakpoint_at (pc);
815 if (bp == NULL)
816 {
817 /* This can happen when we remove all breakpoints while handling
818 a step-over. */
819 if (debug_threads)
820 fprintf (stderr,
821 "Could not find raw breakpoint at 0x%s "
822 "in list (reinserting).\n",
823 paddress (pc));
824 return;
825 }
826
827 reinsert_raw_breakpoint (bp);
828 }
829
830 void
831 reinsert_all_breakpoints (void)
832 {
833 struct process_info *proc = current_process ();
834 struct raw_breakpoint *bp;
835
836 for (bp = proc->raw_breakpoints; bp != NULL; bp = bp->next)
837 if (!bp->inserted)
838 reinsert_raw_breakpoint (bp);
839 }
840
841 void
842 check_breakpoints (CORE_ADDR stop_pc)
843 {
844 struct process_info *proc = current_process ();
845 struct breakpoint *bp, **bp_link;
846
847 bp = proc->breakpoints;
848 bp_link = &proc->breakpoints;
849
850 while (bp)
851 {
852 if (bp->raw->pc == stop_pc)
853 {
854 if (!bp->raw->inserted)
855 {
856 warning ("Hit a removed breakpoint?");
857 return;
858 }
859
860 if (bp->handler != NULL && (*bp->handler) (stop_pc))
861 {
862 *bp_link = bp->next;
863
864 release_breakpoint (proc, bp);
865
866 bp = *bp_link;
867 continue;
868 }
869 }
870
871 bp_link = &bp->next;
872 bp = *bp_link;
873 }
874 }
875
876 void
877 set_breakpoint_data (const unsigned char *bp_data, int bp_len)
878 {
879 breakpoint_data = bp_data;
880 breakpoint_len = bp_len;
881 }
882
883 int
884 breakpoint_here (CORE_ADDR addr)
885 {
886 return (find_raw_breakpoint_at (addr) != NULL);
887 }
888
889 int
890 breakpoint_inserted_here (CORE_ADDR addr)
891 {
892 struct raw_breakpoint *bp;
893
894 bp = find_raw_breakpoint_at (addr);
895
896 return (bp != NULL && bp->inserted);
897 }
898
899 static int
900 validate_inserted_breakpoint (struct raw_breakpoint *bp)
901 {
902 unsigned char *buf;
903 int err;
904
905 gdb_assert (bp->inserted);
906
907 buf = alloca (breakpoint_len);
908 err = (*the_target->read_memory) (bp->pc, buf, breakpoint_len);
909 if (err || memcmp (buf, breakpoint_data, breakpoint_len) != 0)
910 {
911 /* Tag it as gone. */
912 bp->inserted = 0;
913 bp->shlib_disabled = 1;
914 return 0;
915 }
916
917 return 1;
918 }
919
920 static void
921 delete_disabled_breakpoints (void)
922 {
923 struct process_info *proc = current_process ();
924 struct breakpoint *bp, *next;
925
926 for (bp = proc->breakpoints; bp != NULL; bp = next)
927 {
928 next = bp->next;
929 if (bp->raw->shlib_disabled)
930 delete_breakpoint_1 (proc, bp);
931 }
932 }
933
934 /* Check if breakpoints we inserted still appear to be inserted. They
935 may disappear due to a shared library unload, and worse, a new
936 shared library may be reloaded at the same address as the
937 previously unloaded one. If that happens, we should make sure that
938 the shadow memory of the old breakpoints isn't used when reading or
939 writing memory. */
940
941 void
942 validate_breakpoints (void)
943 {
944 struct process_info *proc = current_process ();
945 struct breakpoint *bp;
946
947 for (bp = proc->breakpoints; bp != NULL; bp = bp->next)
948 {
949 if (bp->raw->inserted)
950 validate_inserted_breakpoint (bp->raw);
951 }
952
953 delete_disabled_breakpoints ();
954 }
955
956 void
957 check_mem_read (CORE_ADDR mem_addr, unsigned char *buf, int mem_len)
958 {
959 struct process_info *proc = current_process ();
960 struct raw_breakpoint *bp = proc->raw_breakpoints;
961 struct fast_tracepoint_jump *jp = proc->fast_tracepoint_jumps;
962 CORE_ADDR mem_end = mem_addr + mem_len;
963 int disabled_one = 0;
964
965 for (; jp != NULL; jp = jp->next)
966 {
967 CORE_ADDR bp_end = jp->pc + jp->length;
968 CORE_ADDR start, end;
969 int copy_offset, copy_len, buf_offset;
970
971 if (mem_addr >= bp_end)
972 continue;
973 if (jp->pc >= mem_end)
974 continue;
975
976 start = jp->pc;
977 if (mem_addr > start)
978 start = mem_addr;
979
980 end = bp_end;
981 if (end > mem_end)
982 end = mem_end;
983
984 copy_len = end - start;
985 copy_offset = start - jp->pc;
986 buf_offset = start - mem_addr;
987
988 if (jp->inserted)
989 memcpy (buf + buf_offset,
990 fast_tracepoint_jump_shadow (jp) + copy_offset,
991 copy_len);
992 }
993
994 for (; bp != NULL; bp = bp->next)
995 {
996 CORE_ADDR bp_end = bp->pc + breakpoint_len;
997 CORE_ADDR start, end;
998 int copy_offset, copy_len, buf_offset;
999
1000 if (mem_addr >= bp_end)
1001 continue;
1002 if (bp->pc >= mem_end)
1003 continue;
1004
1005 start = bp->pc;
1006 if (mem_addr > start)
1007 start = mem_addr;
1008
1009 end = bp_end;
1010 if (end > mem_end)
1011 end = mem_end;
1012
1013 copy_len = end - start;
1014 copy_offset = start - bp->pc;
1015 buf_offset = start - mem_addr;
1016
1017 if (bp->inserted)
1018 {
1019 if (validate_inserted_breakpoint (bp))
1020 memcpy (buf + buf_offset, bp->old_data + copy_offset, copy_len);
1021 else
1022 disabled_one = 1;
1023 }
1024 }
1025
1026 if (disabled_one)
1027 delete_disabled_breakpoints ();
1028 }
1029
1030 void
1031 check_mem_write (CORE_ADDR mem_addr, unsigned char *buf, int mem_len)
1032 {
1033 struct process_info *proc = current_process ();
1034 struct raw_breakpoint *bp = proc->raw_breakpoints;
1035 struct fast_tracepoint_jump *jp = proc->fast_tracepoint_jumps;
1036 CORE_ADDR mem_end = mem_addr + mem_len;
1037 int disabled_one = 0;
1038
1039 /* First fast tracepoint jumps, then breakpoint traps on top. */
1040
1041 for (; jp != NULL; jp = jp->next)
1042 {
1043 CORE_ADDR jp_end = jp->pc + jp->length;
1044 CORE_ADDR start, end;
1045 int copy_offset, copy_len, buf_offset;
1046
1047 if (mem_addr >= jp_end)
1048 continue;
1049 if (jp->pc >= mem_end)
1050 continue;
1051
1052 start = jp->pc;
1053 if (mem_addr > start)
1054 start = mem_addr;
1055
1056 end = jp_end;
1057 if (end > mem_end)
1058 end = mem_end;
1059
1060 copy_len = end - start;
1061 copy_offset = start - jp->pc;
1062 buf_offset = start - mem_addr;
1063
1064 memcpy (fast_tracepoint_jump_shadow (jp) + copy_offset,
1065 buf + buf_offset, copy_len);
1066 if (jp->inserted)
1067 memcpy (buf + buf_offset,
1068 fast_tracepoint_jump_insn (jp) + copy_offset, copy_len);
1069 }
1070
1071 for (; bp != NULL; bp = bp->next)
1072 {
1073 CORE_ADDR bp_end = bp->pc + breakpoint_len;
1074 CORE_ADDR start, end;
1075 int copy_offset, copy_len, buf_offset;
1076
1077 if (mem_addr >= bp_end)
1078 continue;
1079 if (bp->pc >= mem_end)
1080 continue;
1081
1082 start = bp->pc;
1083 if (mem_addr > start)
1084 start = mem_addr;
1085
1086 end = bp_end;
1087 if (end > mem_end)
1088 end = mem_end;
1089
1090 copy_len = end - start;
1091 copy_offset = start - bp->pc;
1092 buf_offset = start - mem_addr;
1093
1094 memcpy (bp->old_data + copy_offset, buf + buf_offset, copy_len);
1095 if (bp->inserted)
1096 {
1097 if (validate_inserted_breakpoint (bp))
1098 memcpy (buf + buf_offset, breakpoint_data + copy_offset, copy_len);
1099 else
1100 disabled_one = 1;
1101 }
1102 }
1103
1104 if (disabled_one)
1105 delete_disabled_breakpoints ();
1106 }
1107
1108 /* Delete all breakpoints, and un-insert them from the inferior. */
1109
1110 void
1111 delete_all_breakpoints (void)
1112 {
1113 struct process_info *proc = current_process ();
1114
1115 while (proc->breakpoints)
1116 delete_breakpoint_1 (proc, proc->breakpoints);
1117 }
1118
1119 /* Clear the "inserted" flag in all breakpoints. */
1120
1121 void
1122 mark_breakpoints_out (struct process_info *proc)
1123 {
1124 struct raw_breakpoint *raw_bp;
1125
1126 for (raw_bp = proc->raw_breakpoints; raw_bp != NULL; raw_bp = raw_bp->next)
1127 raw_bp->inserted = 0;
1128 }
1129
1130 /* Release all breakpoints, but do not try to un-insert them from the
1131 inferior. */
1132
1133 void
1134 free_all_breakpoints (struct process_info *proc)
1135 {
1136 mark_breakpoints_out (proc);
1137
1138 /* Note: use PROC explicitly instead of deferring to
1139 delete_all_breakpoints --- CURRENT_INFERIOR may already have been
1140 released when we get here. There should be no call to
1141 current_process from here on. */
1142 while (proc->breakpoints)
1143 delete_breakpoint_1 (proc, proc->breakpoints);
1144 }