]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/mem-break.c
43b8eadf56077cf74156ad289f04af40480ce2b8
[thirdparty/binutils-gdb.git] / gdb / gdbserver / mem-break.c
1 /* Memory breakpoint operations for the remote server for GDB.
2 Copyright (C) 2002, 2003, 2005, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4
5 Contributed by MontaVista Software.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23
24 const unsigned char *breakpoint_data;
25 int breakpoint_len;
26
27 #define MAX_BREAKPOINT_LEN 8
28
29 /* GDB will never try to install multiple breakpoints at the same
30 address. But, we need to keep track of internal breakpoints too,
31 and so we do need to be able to install multiple breakpoints at the
32 same address transparently. We keep track of two different, and
33 closely related structures. A raw breakpoint, which manages the
34 low level, close to the metal aspect of a breakpoint. It holds the
35 breakpoint address, and a buffer holding a copy of the instructions
36 that would be in memory had not been a breakpoint there (we call
37 that the shadow memory of the breakpoint). We occasionally need to
38 temporarilly uninsert a breakpoint without the client knowing about
39 it (e.g., to step over an internal breakpoint), so we keep an
40 `inserted' state associated with this low level breakpoint
41 structure. There can only be one such object for a given address.
42 Then, we have (a bit higher level) breakpoints. This structure
43 holds a callback to be called whenever a breakpoint is hit, a
44 high-level type, and a link to a low level raw breakpoint. There
45 can be many high-level breakpoints at the same address, and all of
46 them will point to the same raw breakpoint, which is reference
47 counted. */
48
49 /* The low level, physical, raw breakpoint. */
50 struct raw_breakpoint
51 {
52 struct raw_breakpoint *next;
53
54 /* A reference count. Each high level breakpoint referencing this
55 raw breakpoint accounts for one reference. */
56 int refcount;
57
58 /* The breakpoint's insertion address. There can only be one raw
59 breakpoint for a given PC. */
60 CORE_ADDR pc;
61
62 /* The breakpoint's shadow memory. */
63 unsigned char old_data[MAX_BREAKPOINT_LEN];
64
65 /* Non-zero if this breakpoint is currently inserted in the
66 inferior. */
67 int inserted;
68
69 /* Non-zero if this breakpoint is currently disabled because we no
70 longer detect it as inserted. */
71 int shlib_disabled;
72 };
73
74 /* The type of a breakpoint. */
75 enum bkpt_type
76 {
77 /* A GDB breakpoint, requested with a Z0 packet. */
78 gdb_breakpoint,
79
80 /* A basic-software-single-step breakpoint. */
81 reinsert_breakpoint,
82
83 /* Any other breakpoint type that doesn't require specific
84 treatment goes here. E.g., an event breakpoint. */
85 other_breakpoint,
86 };
87
88 /* A high level (in gdbserver's perspective) breakpoint. */
89 struct breakpoint
90 {
91 struct breakpoint *next;
92
93 /* The breakpoint's type. */
94 enum bkpt_type type;
95
96 /* Link to this breakpoint's raw breakpoint. This is always
97 non-NULL. */
98 struct raw_breakpoint *raw;
99
100 /* Function to call when we hit this breakpoint. If it returns 1,
101 the breakpoint shall be deleted; 0 or if this callback is NULL,
102 it will be left inserted. */
103 int (*handler) (CORE_ADDR);
104 };
105
106 static struct raw_breakpoint *
107 find_raw_breakpoint_at (CORE_ADDR where)
108 {
109 struct process_info *proc = current_process ();
110 struct raw_breakpoint *bp;
111
112 for (bp = proc->raw_breakpoints; bp != NULL; bp = bp->next)
113 if (bp->pc == where)
114 return bp;
115
116 return NULL;
117 }
118
119 static struct raw_breakpoint *
120 set_raw_breakpoint_at (CORE_ADDR where)
121 {
122 struct process_info *proc = current_process ();
123 struct raw_breakpoint *bp;
124 int err;
125
126 if (breakpoint_data == NULL)
127 error ("Target does not support breakpoints.");
128
129 bp = find_raw_breakpoint_at (where);
130 if (bp != NULL)
131 {
132 bp->refcount++;
133 return bp;
134 }
135
136 bp = xcalloc (1, sizeof (*bp));
137 bp->pc = where;
138 bp->refcount = 1;
139
140 /* Note that there can be fast tracepoint jumps installed in the
141 same memory range, so to get at the original memory, we need to
142 use read_inferior_memory, which masks those out. */
143 err = read_inferior_memory (where, bp->old_data, breakpoint_len);
144 if (err != 0)
145 {
146 if (debug_threads)
147 fprintf (stderr,
148 "Failed to read shadow memory of"
149 " breakpoint at 0x%s (%s).\n",
150 paddress (where), strerror (err));
151 free (bp);
152 return NULL;
153 }
154
155 err = (*the_target->write_memory) (where, breakpoint_data,
156 breakpoint_len);
157 if (err != 0)
158 {
159 if (debug_threads)
160 fprintf (stderr,
161 "Failed to insert breakpoint at 0x%s (%s).\n",
162 paddress (where), strerror (err));
163 free (bp);
164 return NULL;
165 }
166
167 /* Link the breakpoint in. */
168 bp->inserted = 1;
169 bp->next = proc->raw_breakpoints;
170 proc->raw_breakpoints = bp;
171 return bp;
172 }
173
174 /* Notice that breakpoint traps are always installed on top of fast
175 tracepoint jumps. This is even if the fast tracepoint is installed
176 at a later time compared to when the breakpoint was installed.
177 This means that a stopping breakpoint or tracepoint has higher
178 "priority". In turn, this allows having fast and slow tracepoints
179 (and breakpoints) at the same address behave correctly. */
180
181
182 /* A fast tracepoint jump. */
183
184 struct fast_tracepoint_jump
185 {
186 struct fast_tracepoint_jump *next;
187
188 /* A reference count. GDB can install more than one fast tracepoint
189 at the same address (each with its own action list, for
190 example). */
191 int refcount;
192
193 /* The fast tracepoint's insertion address. There can only be one
194 of these for a given PC. */
195 CORE_ADDR pc;
196
197 /* Non-zero if this fast tracepoint jump is currently inserted in
198 the inferior. */
199 int inserted;
200
201 /* The length of the jump instruction. */
202 int length;
203
204 /* A poor-man's flexible array member, holding both the jump
205 instruction to insert, and a copy of the instruction that would
206 be in memory had not been a jump there (the shadow memory of the
207 tracepoint jump). */
208 unsigned char insn_and_shadow[0];
209 };
210
211 /* Fast tracepoint FP's jump instruction to insert. */
212 #define fast_tracepoint_jump_insn(fp) \
213 ((fp)->insn_and_shadow + 0)
214
215 /* The shadow memory of fast tracepoint jump FP. */
216 #define fast_tracepoint_jump_shadow(fp) \
217 ((fp)->insn_and_shadow + (fp)->length)
218
219
220 /* Return the fast tracepoint jump set at WHERE. */
221
222 static struct fast_tracepoint_jump *
223 find_fast_tracepoint_jump_at (CORE_ADDR where)
224 {
225 struct process_info *proc = current_process ();
226 struct fast_tracepoint_jump *jp;
227
228 for (jp = proc->fast_tracepoint_jumps; jp != NULL; jp = jp->next)
229 if (jp->pc == where)
230 return jp;
231
232 return NULL;
233 }
234
235 int
236 fast_tracepoint_jump_here (CORE_ADDR where)
237 {
238 struct fast_tracepoint_jump *jp = find_fast_tracepoint_jump_at (where);
239
240 return (jp != NULL);
241 }
242
243 int
244 delete_fast_tracepoint_jump (struct fast_tracepoint_jump *todel)
245 {
246 struct fast_tracepoint_jump *bp, **bp_link;
247 int ret;
248 struct process_info *proc = current_process ();
249
250 bp = proc->fast_tracepoint_jumps;
251 bp_link = &proc->fast_tracepoint_jumps;
252
253 while (bp)
254 {
255 if (bp == todel)
256 {
257 if (--bp->refcount == 0)
258 {
259 struct fast_tracepoint_jump *prev_bp_link = *bp_link;
260
261 /* Unlink it. */
262 *bp_link = bp->next;
263
264 /* Since there can be breakpoints inserted in the same
265 address range, we use `write_inferior_memory', which
266 takes care of layering breakpoints on top of fast
267 tracepoints, and on top of the buffer we pass it.
268 This works because we've already unlinked the fast
269 tracepoint jump above. Also note that we need to
270 pass the current shadow contents, because
271 write_inferior_memory updates any shadow memory with
272 what we pass here, and we want that to be a nop. */
273 ret = write_inferior_memory (bp->pc,
274 fast_tracepoint_jump_shadow (bp),
275 bp->length);
276 if (ret != 0)
277 {
278 /* Something went wrong, relink the jump. */
279 *bp_link = prev_bp_link;
280
281 if (debug_threads)
282 fprintf (stderr,
283 "Failed to uninsert fast tracepoint jump "
284 "at 0x%s (%s) while deleting it.\n",
285 paddress (bp->pc), strerror (ret));
286 return ret;
287 }
288
289 free (bp);
290 }
291
292 return 0;
293 }
294 else
295 {
296 bp_link = &bp->next;
297 bp = *bp_link;
298 }
299 }
300
301 warning ("Could not find fast tracepoint jump in list.");
302 return ENOENT;
303 }
304
305 struct fast_tracepoint_jump *
306 set_fast_tracepoint_jump (CORE_ADDR where,
307 unsigned char *insn, ULONGEST length)
308 {
309 struct process_info *proc = current_process ();
310 struct fast_tracepoint_jump *jp;
311 int err;
312
313 /* We refcount fast tracepoint jumps. Check if we already know
314 about a jump at this address. */
315 jp = find_fast_tracepoint_jump_at (where);
316 if (jp != NULL)
317 {
318 jp->refcount++;
319 return jp;
320 }
321
322 /* We don't, so create a new object. Double the length, because the
323 flexible array member holds both the jump insn, and the
324 shadow. */
325 jp = xcalloc (1, sizeof (*jp) + (length * 2));
326 jp->pc = where;
327 jp->length = length;
328 memcpy (fast_tracepoint_jump_insn (jp), insn, length);
329 jp->refcount = 1;
330
331 /* Note that there can be trap breakpoints inserted in the same
332 address range. To access the original memory contents, we use
333 `read_inferior_memory', which masks out breakpoints. */
334 err = read_inferior_memory (where,
335 fast_tracepoint_jump_shadow (jp), jp->length);
336 if (err != 0)
337 {
338 if (debug_threads)
339 fprintf (stderr,
340 "Failed to read shadow memory of"
341 " fast tracepoint at 0x%s (%s).\n",
342 paddress (where), strerror (err));
343 free (jp);
344 return NULL;
345 }
346
347 /* Link the jump in. */
348 jp->inserted = 1;
349 jp->next = proc->fast_tracepoint_jumps;
350 proc->fast_tracepoint_jumps = jp;
351
352 /* Since there can be trap breakpoints inserted in the same address
353 range, we use use `write_inferior_memory', which takes care of
354 layering breakpoints on top of fast tracepoints, on top of the
355 buffer we pass it. This works because we've already linked in
356 the fast tracepoint jump above. Also note that we need to pass
357 the current shadow contents, because write_inferior_memory
358 updates any shadow memory with what we pass here, and we want
359 that to be a nop. */
360 err = write_inferior_memory (where, fast_tracepoint_jump_shadow (jp), length);
361 if (err != 0)
362 {
363 if (debug_threads)
364 fprintf (stderr,
365 "Failed to insert fast tracepoint jump at 0x%s (%s).\n",
366 paddress (where), strerror (err));
367
368 /* Unlink it. */
369 proc->fast_tracepoint_jumps = jp->next;
370 free (jp);
371
372 return NULL;
373 }
374
375 return jp;
376 }
377
378 void
379 uninsert_fast_tracepoint_jumps_at (CORE_ADDR pc)
380 {
381 struct fast_tracepoint_jump *jp;
382 int err;
383
384 jp = find_fast_tracepoint_jump_at (pc);
385 if (jp == NULL)
386 {
387 /* This can happen when we remove all breakpoints while handling
388 a step-over. */
389 if (debug_threads)
390 fprintf (stderr,
391 "Could not find fast tracepoint jump at 0x%s "
392 "in list (uninserting).\n",
393 paddress (pc));
394 return;
395 }
396
397 if (jp->inserted)
398 {
399 jp->inserted = 0;
400
401 /* Since there can be trap breakpoints inserted in the same
402 address range, we use use `write_inferior_memory', which
403 takes care of layering breakpoints on top of fast
404 tracepoints, and on top of the buffer we pass it. This works
405 because we've already marked the fast tracepoint fast
406 tracepoint jump uninserted above. Also note that we need to
407 pass the current shadow contents, because
408 write_inferior_memory updates any shadow memory with what we
409 pass here, and we want that to be a nop. */
410 err = write_inferior_memory (jp->pc,
411 fast_tracepoint_jump_shadow (jp),
412 jp->length);
413 if (err != 0)
414 {
415 jp->inserted = 1;
416
417 if (debug_threads)
418 fprintf (stderr,
419 "Failed to uninsert fast tracepoint jump at 0x%s (%s).\n",
420 paddress (pc), strerror (err));
421 }
422 }
423 }
424
425 void
426 reinsert_fast_tracepoint_jumps_at (CORE_ADDR where)
427 {
428 struct fast_tracepoint_jump *jp;
429 int err;
430
431 jp = find_fast_tracepoint_jump_at (where);
432 if (jp == NULL)
433 {
434 /* This can happen when we remove breakpoints when a tracepoint
435 hit causes a tracing stop, while handling a step-over. */
436 if (debug_threads)
437 fprintf (stderr,
438 "Could not find fast tracepoint jump at 0x%s "
439 "in list (reinserting).\n",
440 paddress (where));
441 return;
442 }
443
444 if (jp->inserted)
445 error ("Jump already inserted at reinsert time.");
446
447 jp->inserted = 1;
448
449 /* Since there can be trap breakpoints inserted in the same address
450 range, we use `write_inferior_memory', which takes care of
451 layering breakpoints on top of fast tracepoints, and on top of
452 the buffer we pass it. This works because we've already marked
453 the fast tracepoint jump inserted above. Also note that we need
454 to pass the current shadow contents, because
455 write_inferior_memory updates any shadow memory with what we pass
456 here, and we want that to be a nop. */
457 err = write_inferior_memory (where,
458 fast_tracepoint_jump_shadow (jp), jp->length);
459 if (err != 0)
460 {
461 jp->inserted = 0;
462
463 if (debug_threads)
464 fprintf (stderr,
465 "Failed to reinsert fast tracepoint jump at 0x%s (%s).\n",
466 paddress (where), strerror (err));
467 }
468 }
469
470 struct breakpoint *
471 set_breakpoint_at (CORE_ADDR where, int (*handler) (CORE_ADDR))
472 {
473 struct process_info *proc = current_process ();
474 struct breakpoint *bp;
475 struct raw_breakpoint *raw;
476
477 raw = set_raw_breakpoint_at (where);
478
479 if (raw == NULL)
480 {
481 /* warn? */
482 return NULL;
483 }
484
485 bp = xcalloc (1, sizeof (struct breakpoint));
486 bp->type = other_breakpoint;
487
488 bp->raw = raw;
489 bp->handler = handler;
490
491 bp->next = proc->breakpoints;
492 proc->breakpoints = bp;
493
494 return bp;
495 }
496
497 static int
498 delete_raw_breakpoint (struct process_info *proc, struct raw_breakpoint *todel)
499 {
500 struct raw_breakpoint *bp, **bp_link;
501 int ret;
502
503 bp = proc->raw_breakpoints;
504 bp_link = &proc->raw_breakpoints;
505
506 while (bp)
507 {
508 if (bp == todel)
509 {
510 if (bp->inserted)
511 {
512 struct raw_breakpoint *prev_bp_link = *bp_link;
513
514 *bp_link = bp->next;
515
516 /* Since there can be trap breakpoints inserted in the
517 same address range, we use `write_inferior_memory',
518 which takes care of layering breakpoints on top of
519 fast tracepoints, and on top of the buffer we pass
520 it. This works because we've already unlinked the
521 fast tracepoint jump above. Also note that we need
522 to pass the current shadow contents, because
523 write_inferior_memory updates any shadow memory with
524 what we pass here, and we want that to be a nop. */
525 ret = write_inferior_memory (bp->pc, bp->old_data,
526 breakpoint_len);
527 if (ret != 0)
528 {
529 /* Something went wrong, relink the breakpoint. */
530 *bp_link = prev_bp_link;
531
532 if (debug_threads)
533 fprintf (stderr,
534 "Failed to uninsert raw breakpoint "
535 "at 0x%s (%s) while deleting it.\n",
536 paddress (bp->pc), strerror (ret));
537 return ret;
538 }
539
540 }
541 else
542 *bp_link = bp->next;
543
544 free (bp);
545 return 0;
546 }
547 else
548 {
549 bp_link = &bp->next;
550 bp = *bp_link;
551 }
552 }
553
554 warning ("Could not find raw breakpoint in list.");
555 return ENOENT;
556 }
557
558 static int
559 release_breakpoint (struct process_info *proc, struct breakpoint *bp)
560 {
561 int newrefcount;
562 int ret;
563
564 newrefcount = bp->raw->refcount - 1;
565 if (newrefcount == 0)
566 {
567 ret = delete_raw_breakpoint (proc, bp->raw);
568 if (ret != 0)
569 return ret;
570 }
571 else
572 bp->raw->refcount = newrefcount;
573
574 free (bp);
575
576 return 0;
577 }
578
579 static int
580 delete_breakpoint_1 (struct process_info *proc, struct breakpoint *todel)
581 {
582 struct breakpoint *bp, **bp_link;
583 int err;
584
585 bp = proc->breakpoints;
586 bp_link = &proc->breakpoints;
587
588 while (bp)
589 {
590 if (bp == todel)
591 {
592 *bp_link = bp->next;
593
594 err = release_breakpoint (proc, bp);
595 if (err != 0)
596 return err;
597
598 bp = *bp_link;
599 return 0;
600 }
601 else
602 {
603 bp_link = &bp->next;
604 bp = *bp_link;
605 }
606 }
607
608 warning ("Could not find breakpoint in list.");
609 return ENOENT;
610 }
611
612 int
613 delete_breakpoint (struct breakpoint *todel)
614 {
615 struct process_info *proc = current_process ();
616 return delete_breakpoint_1 (proc, todel);
617 }
618
619 static struct breakpoint *
620 find_gdb_breakpoint_at (CORE_ADDR where)
621 {
622 struct process_info *proc = current_process ();
623 struct breakpoint *bp;
624
625 for (bp = proc->breakpoints; bp != NULL; bp = bp->next)
626 if (bp->type == gdb_breakpoint && bp->raw->pc == where)
627 return bp;
628
629 return NULL;
630 }
631
632 int
633 set_gdb_breakpoint_at (CORE_ADDR where)
634 {
635 struct breakpoint *bp;
636
637 if (breakpoint_data == NULL)
638 return 1;
639
640 /* If we see GDB inserting a second breakpoint at the same address,
641 then the first breakpoint must have disappeared due to a shared
642 library unload. On targets where the shared libraries are
643 handled by userspace, like SVR4, for example, GDBserver can't
644 tell if a library was loaded or unloaded. Since we refcount
645 breakpoints, if we didn't do this, we'd just increase the
646 refcount of the previous breakpoint at this address, but the trap
647 was not planted in the inferior anymore, thus the breakpoint
648 would never be hit. */
649 bp = find_gdb_breakpoint_at (where);
650 if (bp != NULL)
651 {
652 delete_gdb_breakpoint_at (where);
653
654 /* Might as well validate all other breakpoints. */
655 validate_breakpoints ();
656 }
657
658 bp = set_breakpoint_at (where, NULL);
659 if (bp == NULL)
660 return -1;
661
662 bp->type = gdb_breakpoint;
663 return 0;
664 }
665
666 int
667 delete_gdb_breakpoint_at (CORE_ADDR addr)
668 {
669 struct breakpoint *bp;
670 int err;
671
672 if (breakpoint_data == NULL)
673 return 1;
674
675 bp = find_gdb_breakpoint_at (addr);
676 if (bp == NULL)
677 return -1;
678
679 err = delete_breakpoint (bp);
680 if (err)
681 return -1;
682
683 return 0;
684 }
685
686 int
687 gdb_breakpoint_here (CORE_ADDR where)
688 {
689 struct breakpoint *bp = find_gdb_breakpoint_at (where);
690
691 return (bp != NULL);
692 }
693
694 void
695 set_reinsert_breakpoint (CORE_ADDR stop_at)
696 {
697 struct breakpoint *bp;
698
699 bp = set_breakpoint_at (stop_at, NULL);
700 bp->type = reinsert_breakpoint;
701 }
702
703 void
704 delete_reinsert_breakpoints (void)
705 {
706 struct process_info *proc = current_process ();
707 struct breakpoint *bp, **bp_link;
708
709 bp = proc->breakpoints;
710 bp_link = &proc->breakpoints;
711
712 while (bp)
713 {
714 if (bp->type == reinsert_breakpoint)
715 {
716 *bp_link = bp->next;
717 release_breakpoint (proc, bp);
718 bp = *bp_link;
719 }
720 else
721 {
722 bp_link = &bp->next;
723 bp = *bp_link;
724 }
725 }
726 }
727
728 static void
729 uninsert_raw_breakpoint (struct raw_breakpoint *bp)
730 {
731 if (bp->inserted)
732 {
733 int err;
734
735 bp->inserted = 0;
736 /* Since there can be fast tracepoint jumps inserted in the same
737 address range, we use `write_inferior_memory', which takes
738 care of layering breakpoints on top of fast tracepoints, and
739 on top of the buffer we pass it. This works because we've
740 already unlinked the fast tracepoint jump above. Also note
741 that we need to pass the current shadow contents, because
742 write_inferior_memory updates any shadow memory with what we
743 pass here, and we want that to be a nop. */
744 err = write_inferior_memory (bp->pc, bp->old_data,
745 breakpoint_len);
746 if (err != 0)
747 {
748 bp->inserted = 1;
749
750 if (debug_threads)
751 fprintf (stderr,
752 "Failed to uninsert raw breakpoint at 0x%s (%s).\n",
753 paddress (bp->pc), strerror (err));
754 }
755 }
756 }
757
758 void
759 uninsert_breakpoints_at (CORE_ADDR pc)
760 {
761 struct raw_breakpoint *bp;
762
763 bp = find_raw_breakpoint_at (pc);
764 if (bp == NULL)
765 {
766 /* This can happen when we remove all breakpoints while handling
767 a step-over. */
768 if (debug_threads)
769 fprintf (stderr,
770 "Could not find breakpoint at 0x%s "
771 "in list (uninserting).\n",
772 paddress (pc));
773 return;
774 }
775
776 if (bp->inserted)
777 uninsert_raw_breakpoint (bp);
778 }
779
780 static void
781 reinsert_raw_breakpoint (struct raw_breakpoint *bp)
782 {
783 int err;
784
785 if (bp->inserted)
786 error ("Breakpoint already inserted at reinsert time.");
787
788 err = (*the_target->write_memory) (bp->pc, breakpoint_data,
789 breakpoint_len);
790 if (err == 0)
791 bp->inserted = 1;
792 else if (debug_threads)
793 fprintf (stderr,
794 "Failed to reinsert breakpoint at 0x%s (%s).\n",
795 paddress (bp->pc), strerror (err));
796 }
797
798 void
799 reinsert_breakpoints_at (CORE_ADDR pc)
800 {
801 struct raw_breakpoint *bp;
802
803 bp = find_raw_breakpoint_at (pc);
804 if (bp == NULL)
805 {
806 /* This can happen when we remove all breakpoints while handling
807 a step-over. */
808 if (debug_threads)
809 fprintf (stderr,
810 "Could not find raw breakpoint at 0x%s "
811 "in list (reinserting).\n",
812 paddress (pc));
813 return;
814 }
815
816 reinsert_raw_breakpoint (bp);
817 }
818
819 void
820 check_breakpoints (CORE_ADDR stop_pc)
821 {
822 struct process_info *proc = current_process ();
823 struct breakpoint *bp, **bp_link;
824
825 bp = proc->breakpoints;
826 bp_link = &proc->breakpoints;
827
828 while (bp)
829 {
830 if (bp->raw->pc == stop_pc)
831 {
832 if (!bp->raw->inserted)
833 {
834 warning ("Hit a removed breakpoint?");
835 return;
836 }
837
838 if (bp->handler != NULL && (*bp->handler) (stop_pc))
839 {
840 *bp_link = bp->next;
841
842 release_breakpoint (proc, bp);
843
844 bp = *bp_link;
845 continue;
846 }
847 }
848
849 bp_link = &bp->next;
850 bp = *bp_link;
851 }
852 }
853
854 void
855 set_breakpoint_data (const unsigned char *bp_data, int bp_len)
856 {
857 breakpoint_data = bp_data;
858 breakpoint_len = bp_len;
859 }
860
861 int
862 breakpoint_here (CORE_ADDR addr)
863 {
864 return (find_raw_breakpoint_at (addr) != NULL);
865 }
866
867 int
868 breakpoint_inserted_here (CORE_ADDR addr)
869 {
870 struct raw_breakpoint *bp;
871
872 bp = find_raw_breakpoint_at (addr);
873
874 return (bp != NULL && bp->inserted);
875 }
876
877 static int
878 validate_inserted_breakpoint (struct raw_breakpoint *bp)
879 {
880 unsigned char *buf;
881 int err;
882
883 gdb_assert (bp->inserted);
884
885 buf = alloca (breakpoint_len);
886 err = (*the_target->read_memory) (bp->pc, buf, breakpoint_len);
887 if (err || memcmp (buf, breakpoint_data, breakpoint_len) != 0)
888 {
889 /* Tag it as gone. */
890 bp->inserted = 0;
891 bp->shlib_disabled = 1;
892 return 0;
893 }
894
895 return 1;
896 }
897
898 static void
899 delete_disabled_breakpoints (void)
900 {
901 struct process_info *proc = current_process ();
902 struct breakpoint *bp, *next;
903
904 for (bp = proc->breakpoints; bp != NULL; bp = next)
905 {
906 next = bp->next;
907 if (bp->raw->shlib_disabled)
908 delete_breakpoint_1 (proc, bp);
909 }
910 }
911
912 /* Check if breakpoints we inserted still appear to be inserted. They
913 may disappear due to a shared library unload, and worse, a new
914 shared library may be reloaded at the same address as the
915 previously unloaded one. If that happens, we should make sure that
916 the shadow memory of the old breakpoints isn't used when reading or
917 writing memory. */
918
919 void
920 validate_breakpoints (void)
921 {
922 struct process_info *proc = current_process ();
923 struct breakpoint *bp;
924
925 for (bp = proc->breakpoints; bp != NULL; bp = bp->next)
926 {
927 if (bp->raw->inserted)
928 validate_inserted_breakpoint (bp->raw);
929 }
930
931 delete_disabled_breakpoints ();
932 }
933
934 void
935 check_mem_read (CORE_ADDR mem_addr, unsigned char *buf, int mem_len)
936 {
937 struct process_info *proc = current_process ();
938 struct raw_breakpoint *bp = proc->raw_breakpoints;
939 struct fast_tracepoint_jump *jp = proc->fast_tracepoint_jumps;
940 CORE_ADDR mem_end = mem_addr + mem_len;
941 int disabled_one = 0;
942
943 for (; jp != NULL; jp = jp->next)
944 {
945 CORE_ADDR bp_end = jp->pc + jp->length;
946 CORE_ADDR start, end;
947 int copy_offset, copy_len, buf_offset;
948
949 if (mem_addr >= bp_end)
950 continue;
951 if (jp->pc >= mem_end)
952 continue;
953
954 start = jp->pc;
955 if (mem_addr > start)
956 start = mem_addr;
957
958 end = bp_end;
959 if (end > mem_end)
960 end = mem_end;
961
962 copy_len = end - start;
963 copy_offset = start - jp->pc;
964 buf_offset = start - mem_addr;
965
966 if (jp->inserted)
967 memcpy (buf + buf_offset,
968 fast_tracepoint_jump_shadow (jp) + copy_offset,
969 copy_len);
970 }
971
972 for (; bp != NULL; bp = bp->next)
973 {
974 CORE_ADDR bp_end = bp->pc + breakpoint_len;
975 CORE_ADDR start, end;
976 int copy_offset, copy_len, buf_offset;
977
978 if (mem_addr >= bp_end)
979 continue;
980 if (bp->pc >= mem_end)
981 continue;
982
983 start = bp->pc;
984 if (mem_addr > start)
985 start = mem_addr;
986
987 end = bp_end;
988 if (end > mem_end)
989 end = mem_end;
990
991 copy_len = end - start;
992 copy_offset = start - bp->pc;
993 buf_offset = start - mem_addr;
994
995 if (bp->inserted)
996 {
997 if (validate_inserted_breakpoint (bp))
998 memcpy (buf + buf_offset, bp->old_data + copy_offset, copy_len);
999 else
1000 disabled_one = 1;
1001 }
1002 }
1003
1004 if (disabled_one)
1005 delete_disabled_breakpoints ();
1006 }
1007
1008 void
1009 check_mem_write (CORE_ADDR mem_addr, unsigned char *buf, int mem_len)
1010 {
1011 struct process_info *proc = current_process ();
1012 struct raw_breakpoint *bp = proc->raw_breakpoints;
1013 struct fast_tracepoint_jump *jp = proc->fast_tracepoint_jumps;
1014 CORE_ADDR mem_end = mem_addr + mem_len;
1015 int disabled_one = 0;
1016
1017 /* First fast tracepoint jumps, then breakpoint traps on top. */
1018
1019 for (; jp != NULL; jp = jp->next)
1020 {
1021 CORE_ADDR jp_end = jp->pc + jp->length;
1022 CORE_ADDR start, end;
1023 int copy_offset, copy_len, buf_offset;
1024
1025 if (mem_addr >= jp_end)
1026 continue;
1027 if (jp->pc >= mem_end)
1028 continue;
1029
1030 start = jp->pc;
1031 if (mem_addr > start)
1032 start = mem_addr;
1033
1034 end = jp_end;
1035 if (end > mem_end)
1036 end = mem_end;
1037
1038 copy_len = end - start;
1039 copy_offset = start - jp->pc;
1040 buf_offset = start - mem_addr;
1041
1042 memcpy (fast_tracepoint_jump_shadow (jp) + copy_offset,
1043 buf + buf_offset, copy_len);
1044 if (jp->inserted)
1045 memcpy (buf + buf_offset,
1046 fast_tracepoint_jump_insn (jp) + copy_offset, copy_len);
1047 }
1048
1049 for (; bp != NULL; bp = bp->next)
1050 {
1051 CORE_ADDR bp_end = bp->pc + breakpoint_len;
1052 CORE_ADDR start, end;
1053 int copy_offset, copy_len, buf_offset;
1054
1055 if (mem_addr >= bp_end)
1056 continue;
1057 if (bp->pc >= mem_end)
1058 continue;
1059
1060 start = bp->pc;
1061 if (mem_addr > start)
1062 start = mem_addr;
1063
1064 end = bp_end;
1065 if (end > mem_end)
1066 end = mem_end;
1067
1068 copy_len = end - start;
1069 copy_offset = start - bp->pc;
1070 buf_offset = start - mem_addr;
1071
1072 memcpy (bp->old_data + copy_offset, buf + buf_offset, copy_len);
1073 if (bp->inserted)
1074 {
1075 if (validate_inserted_breakpoint (bp))
1076 memcpy (buf + buf_offset, breakpoint_data + copy_offset, copy_len);
1077 else
1078 disabled_one = 1;
1079 }
1080 }
1081
1082 if (disabled_one)
1083 delete_disabled_breakpoints ();
1084 }
1085
1086 /* Delete all breakpoints, and un-insert them from the inferior. */
1087
1088 void
1089 delete_all_breakpoints (void)
1090 {
1091 struct process_info *proc = current_process ();
1092
1093 while (proc->breakpoints)
1094 delete_breakpoint_1 (proc, proc->breakpoints);
1095 }
1096
1097 /* Clear the "inserted" flag in all breakpoints. */
1098
1099 void
1100 mark_breakpoints_out (struct process_info *proc)
1101 {
1102 struct raw_breakpoint *raw_bp;
1103
1104 for (raw_bp = proc->raw_breakpoints; raw_bp != NULL; raw_bp = raw_bp->next)
1105 raw_bp->inserted = 0;
1106 }
1107
1108 /* Release all breakpoints, but do not try to un-insert them from the
1109 inferior. */
1110
1111 void
1112 free_all_breakpoints (struct process_info *proc)
1113 {
1114 mark_breakpoints_out (proc);
1115
1116 /* Note: use PROC explicitly instead of deferring to
1117 delete_all_breakpoints --- CURRENT_INFERIOR may already have been
1118 released when we get here. There should be no call to
1119 current_process from here on. */
1120 while (proc->breakpoints)
1121 delete_breakpoint_1 (proc, proc->breakpoints);
1122 }