]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/btrace.c
Don't memset non-POD types: struct bp_location
[thirdparty/binutils-gdb.git] / gdb / btrace.c
CommitLineData
02d27625
MM
1/* Branch trace support for GDB, the GNU debugger.
2
61baf725 3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
02d27625
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
d41f6d8e 22#include "defs.h"
02d27625
MM
23#include "btrace.h"
24#include "gdbthread.h"
02d27625
MM
25#include "inferior.h"
26#include "target.h"
27#include "record.h"
28#include "symtab.h"
29#include "disasm.h"
30#include "source.h"
31#include "filenames.h"
c12a2917 32#include "xml-support.h"
6e07b1d2 33#include "regcache.h"
b20a6524 34#include "rsp-low.h"
b0627500
MM
35#include "gdbcmd.h"
36#include "cli/cli-utils.h"
b20a6524
MM
37
38#include <inttypes.h>
b0627500 39#include <ctype.h>
325fac50 40#include <algorithm>
b0627500
MM
41
42/* Command lists for btrace maintenance commands. */
43static struct cmd_list_element *maint_btrace_cmdlist;
44static struct cmd_list_element *maint_btrace_set_cmdlist;
45static struct cmd_list_element *maint_btrace_show_cmdlist;
46static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49/* Control whether to skip PAD packets when computing the packet history. */
50static int maint_btrace_pt_skip_pad = 1;
b20a6524 51
d87fdac3
MM
52/* A vector of function segments. */
53typedef struct btrace_function * bfun_s;
54DEF_VEC_P (bfun_s);
55
b20a6524 56static void btrace_add_pc (struct thread_info *tp);
02d27625
MM
57
58/* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
60
61#define DEBUG(msg, args...) \
62 do \
63 { \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
67 } \
68 while (0)
69
70#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
71
02d27625
MM
72/* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
74
75static const char *
23a7fe75 76ftrace_print_function_name (const struct btrace_function *bfun)
02d27625
MM
77{
78 struct minimal_symbol *msym;
79 struct symbol *sym;
80
81 msym = bfun->msym;
82 sym = bfun->sym;
83
84 if (sym != NULL)
85 return SYMBOL_PRINT_NAME (sym);
86
87 if (msym != NULL)
efd66ac6 88 return MSYMBOL_PRINT_NAME (msym);
02d27625
MM
89
90 return "<unknown>";
91}
92
93/* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
95
96static const char *
23a7fe75 97ftrace_print_filename (const struct btrace_function *bfun)
02d27625
MM
98{
99 struct symbol *sym;
100 const char *filename;
101
102 sym = bfun->sym;
103
104 if (sym != NULL)
08be3fe3 105 filename = symtab_to_filename_for_display (symbol_symtab (sym));
02d27625
MM
106 else
107 filename = "<unknown>";
108
109 return filename;
110}
111
23a7fe75
MM
112/* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
02d27625 114
23a7fe75
MM
115static const char *
116ftrace_print_insn_addr (const struct btrace_insn *insn)
02d27625 117{
23a7fe75
MM
118 if (insn == NULL)
119 return "<nil>";
120
121 return core_addr_to_string_nz (insn->pc);
02d27625
MM
122}
123
23a7fe75 124/* Print an ftrace debug status message. */
02d27625
MM
125
126static void
23a7fe75 127ftrace_debug (const struct btrace_function *bfun, const char *prefix)
02d27625 128{
23a7fe75
MM
129 const char *fun, *file;
130 unsigned int ibegin, iend;
ce0dfbea 131 int level;
23a7fe75
MM
132
133 fun = ftrace_print_function_name (bfun);
134 file = ftrace_print_filename (bfun);
135 level = bfun->level;
136
23a7fe75
MM
137 ibegin = bfun->insn_offset;
138 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
139
ce0dfbea
MM
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix, fun, file, level, ibegin, iend);
02d27625
MM
142}
143
69090cee
TW
144/* Return the number of instructions in a given function call segment. */
145
146static unsigned int
147ftrace_call_num_insn (const struct btrace_function* bfun)
148{
149 if (bfun == NULL)
150 return 0;
151
152 /* A gap is always counted as one instruction. */
153 if (bfun->errcode != 0)
154 return 1;
155
156 return VEC_length (btrace_insn_s, bfun->insn);
157}
158
23a7fe75
MM
159/* Return non-zero if BFUN does not match MFUN and FUN,
160 return zero otherwise. */
02d27625
MM
161
162static int
23a7fe75
MM
163ftrace_function_switched (const struct btrace_function *bfun,
164 const struct minimal_symbol *mfun,
165 const struct symbol *fun)
02d27625
MM
166{
167 struct minimal_symbol *msym;
168 struct symbol *sym;
169
02d27625
MM
170 msym = bfun->msym;
171 sym = bfun->sym;
172
173 /* If the minimal symbol changed, we certainly switched functions. */
174 if (mfun != NULL && msym != NULL
efd66ac6 175 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
02d27625
MM
176 return 1;
177
178 /* If the symbol changed, we certainly switched functions. */
179 if (fun != NULL && sym != NULL)
180 {
181 const char *bfname, *fname;
182
183 /* Check the function name. */
184 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
185 return 1;
186
187 /* Check the location of those functions, as well. */
08be3fe3
DE
188 bfname = symtab_to_fullname (symbol_symtab (sym));
189 fname = symtab_to_fullname (symbol_symtab (fun));
02d27625
MM
190 if (filename_cmp (fname, bfname) != 0)
191 return 1;
192 }
193
23a7fe75
MM
194 /* If we lost symbol information, we switched functions. */
195 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
196 return 1;
197
198 /* If we gained symbol information, we switched functions. */
199 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
200 return 1;
201
02d27625
MM
202 return 0;
203}
204
23a7fe75
MM
205/* Allocate and initialize a new branch trace function segment.
206 PREV is the chronologically preceding function segment.
207 MFUN and FUN are the symbol information we have for this function. */
208
209static struct btrace_function *
210ftrace_new_function (struct btrace_function *prev,
211 struct minimal_symbol *mfun,
212 struct symbol *fun)
213{
214 struct btrace_function *bfun;
215
8d749320 216 bfun = XCNEW (struct btrace_function);
23a7fe75
MM
217
218 bfun->msym = mfun;
219 bfun->sym = fun;
220 bfun->flow.prev = prev;
221
5de9129b
MM
222 if (prev == NULL)
223 {
224 /* Start counting at one. */
225 bfun->number = 1;
226 bfun->insn_offset = 1;
227 }
228 else
23a7fe75
MM
229 {
230 gdb_assert (prev->flow.next == NULL);
231 prev->flow.next = bfun;
02d27625 232
23a7fe75 233 bfun->number = prev->number + 1;
69090cee 234 bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
31fd9caa 235 bfun->level = prev->level;
23a7fe75
MM
236 }
237
238 return bfun;
02d27625
MM
239}
240
23a7fe75 241/* Update the UP field of a function segment. */
02d27625 242
23a7fe75
MM
243static void
244ftrace_update_caller (struct btrace_function *bfun,
245 struct btrace_function *caller,
246 enum btrace_function_flag flags)
02d27625 247{
23a7fe75
MM
248 if (bfun->up != NULL)
249 ftrace_debug (bfun, "updating caller");
02d27625 250
23a7fe75
MM
251 bfun->up = caller;
252 bfun->flags = flags;
253
254 ftrace_debug (bfun, "set caller");
d87fdac3 255 ftrace_debug (caller, "..to");
23a7fe75
MM
256}
257
258/* Fix up the caller for all segments of a function. */
259
260static void
261ftrace_fixup_caller (struct btrace_function *bfun,
262 struct btrace_function *caller,
263 enum btrace_function_flag flags)
264{
265 struct btrace_function *prev, *next;
266
267 ftrace_update_caller (bfun, caller, flags);
268
269 /* Update all function segments belonging to the same function. */
270 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
271 ftrace_update_caller (prev, caller, flags);
272
273 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
274 ftrace_update_caller (next, caller, flags);
275}
276
277/* Add a new function segment for a call.
278 CALLER is the chronologically preceding function segment.
279 MFUN and FUN are the symbol information we have for this function. */
280
281static struct btrace_function *
282ftrace_new_call (struct btrace_function *caller,
283 struct minimal_symbol *mfun,
284 struct symbol *fun)
285{
286 struct btrace_function *bfun;
287
288 bfun = ftrace_new_function (caller, mfun, fun);
289 bfun->up = caller;
31fd9caa 290 bfun->level += 1;
23a7fe75
MM
291
292 ftrace_debug (bfun, "new call");
293
294 return bfun;
295}
296
297/* Add a new function segment for a tail call.
298 CALLER is the chronologically preceding function segment.
299 MFUN and FUN are the symbol information we have for this function. */
300
301static struct btrace_function *
302ftrace_new_tailcall (struct btrace_function *caller,
303 struct minimal_symbol *mfun,
304 struct symbol *fun)
305{
306 struct btrace_function *bfun;
02d27625 307
23a7fe75
MM
308 bfun = ftrace_new_function (caller, mfun, fun);
309 bfun->up = caller;
31fd9caa 310 bfun->level += 1;
23a7fe75 311 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
02d27625 312
23a7fe75
MM
313 ftrace_debug (bfun, "new tail call");
314
315 return bfun;
316}
317
d87fdac3
MM
318/* Return the caller of BFUN or NULL if there is none. This function skips
319 tail calls in the call chain. */
320static struct btrace_function *
321ftrace_get_caller (struct btrace_function *bfun)
322{
323 for (; bfun != NULL; bfun = bfun->up)
324 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
325 return bfun->up;
326
327 return NULL;
328}
329
23a7fe75
MM
330/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
331 symbol information. */
332
333static struct btrace_function *
334ftrace_find_caller (struct btrace_function *bfun,
335 struct minimal_symbol *mfun,
336 struct symbol *fun)
337{
338 for (; bfun != NULL; bfun = bfun->up)
339 {
340 /* Skip functions with incompatible symbol information. */
341 if (ftrace_function_switched (bfun, mfun, fun))
342 continue;
343
344 /* This is the function segment we're looking for. */
345 break;
346 }
347
348 return bfun;
349}
350
351/* Find the innermost caller in the back trace of BFUN, skipping all
352 function segments that do not end with a call instruction (e.g.
353 tail calls ending with a jump). */
354
355static struct btrace_function *
7d5c24b3 356ftrace_find_call (struct btrace_function *bfun)
23a7fe75
MM
357{
358 for (; bfun != NULL; bfun = bfun->up)
02d27625 359 {
23a7fe75 360 struct btrace_insn *last;
02d27625 361
31fd9caa
MM
362 /* Skip gaps. */
363 if (bfun->errcode != 0)
364 continue;
23a7fe75
MM
365
366 last = VEC_last (btrace_insn_s, bfun->insn);
02d27625 367
7d5c24b3 368 if (last->iclass == BTRACE_INSN_CALL)
23a7fe75
MM
369 break;
370 }
371
372 return bfun;
373}
374
375/* Add a continuation segment for a function into which we return.
376 PREV is the chronologically preceding function segment.
377 MFUN and FUN are the symbol information we have for this function. */
378
379static struct btrace_function *
7d5c24b3 380ftrace_new_return (struct btrace_function *prev,
23a7fe75
MM
381 struct minimal_symbol *mfun,
382 struct symbol *fun)
383{
384 struct btrace_function *bfun, *caller;
385
386 bfun = ftrace_new_function (prev, mfun, fun);
387
388 /* It is important to start at PREV's caller. Otherwise, we might find
389 PREV itself, if PREV is a recursive function. */
390 caller = ftrace_find_caller (prev->up, mfun, fun);
391 if (caller != NULL)
392 {
393 /* The caller of PREV is the preceding btrace function segment in this
394 function instance. */
395 gdb_assert (caller->segment.next == NULL);
396
397 caller->segment.next = bfun;
398 bfun->segment.prev = caller;
399
400 /* Maintain the function level. */
401 bfun->level = caller->level;
402
403 /* Maintain the call stack. */
404 bfun->up = caller->up;
405 bfun->flags = caller->flags;
406
407 ftrace_debug (bfun, "new return");
408 }
409 else
410 {
411 /* We did not find a caller. This could mean that something went
412 wrong or that the call is simply not included in the trace. */
02d27625 413
23a7fe75 414 /* Let's search for some actual call. */
7d5c24b3 415 caller = ftrace_find_call (prev->up);
23a7fe75 416 if (caller == NULL)
02d27625 417 {
23a7fe75
MM
418 /* There is no call in PREV's back trace. We assume that the
419 branch trace did not include it. */
420
259ba1e8
MM
421 /* Let's find the topmost function and add a new caller for it.
422 This should handle a series of initial tail calls. */
23a7fe75
MM
423 while (prev->up != NULL)
424 prev = prev->up;
02d27625 425
259ba1e8 426 bfun->level = prev->level - 1;
23a7fe75
MM
427
428 /* Fix up the call stack for PREV. */
429 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
430
431 ftrace_debug (bfun, "new return - no caller");
432 }
433 else
02d27625 434 {
23a7fe75 435 /* There is a call in PREV's back trace to which we should have
259ba1e8
MM
436 returned but didn't. Let's start a new, separate back trace
437 from PREV's level. */
438 bfun->level = prev->level - 1;
439
440 /* We fix up the back trace for PREV but leave other function segments
441 on the same level as they are.
442 This should handle things like schedule () correctly where we're
443 switching contexts. */
444 prev->up = bfun;
445 prev->flags = BFUN_UP_LINKS_TO_RET;
02d27625 446
23a7fe75 447 ftrace_debug (bfun, "new return - unknown caller");
02d27625 448 }
23a7fe75
MM
449 }
450
451 return bfun;
452}
453
454/* Add a new function segment for a function switch.
455 PREV is the chronologically preceding function segment.
456 MFUN and FUN are the symbol information we have for this function. */
457
458static struct btrace_function *
459ftrace_new_switch (struct btrace_function *prev,
460 struct minimal_symbol *mfun,
461 struct symbol *fun)
462{
463 struct btrace_function *bfun;
464
4c2c7ac6
MM
465 /* This is an unexplained function switch. We can't really be sure about the
466 call stack, yet the best I can think of right now is to preserve it. */
23a7fe75 467 bfun = ftrace_new_function (prev, mfun, fun);
4c2c7ac6
MM
468 bfun->up = prev->up;
469 bfun->flags = prev->flags;
02d27625 470
23a7fe75
MM
471 ftrace_debug (bfun, "new switch");
472
473 return bfun;
474}
475
31fd9caa
MM
476/* Add a new function segment for a gap in the trace due to a decode error.
477 PREV is the chronologically preceding function segment.
478 ERRCODE is the format-specific error code. */
479
480static struct btrace_function *
481ftrace_new_gap (struct btrace_function *prev, int errcode)
482{
483 struct btrace_function *bfun;
484
485 /* We hijack prev if it was empty. */
486 if (prev != NULL && prev->errcode == 0
487 && VEC_empty (btrace_insn_s, prev->insn))
488 bfun = prev;
489 else
490 bfun = ftrace_new_function (prev, NULL, NULL);
491
492 bfun->errcode = errcode;
493
494 ftrace_debug (bfun, "new gap");
495
496 return bfun;
497}
498
23a7fe75
MM
499/* Update BFUN with respect to the instruction at PC. This may create new
500 function segments.
501 Return the chronologically latest function segment, never NULL. */
502
503static struct btrace_function *
7d5c24b3 504ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
23a7fe75
MM
505{
506 struct bound_minimal_symbol bmfun;
507 struct minimal_symbol *mfun;
508 struct symbol *fun;
509 struct btrace_insn *last;
510
511 /* Try to determine the function we're in. We use both types of symbols
512 to avoid surprises when we sometimes get a full symbol and sometimes
513 only a minimal symbol. */
514 fun = find_pc_function (pc);
515 bmfun = lookup_minimal_symbol_by_pc (pc);
516 mfun = bmfun.minsym;
517
518 if (fun == NULL && mfun == NULL)
519 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
520
31fd9caa
MM
521 /* If we didn't have a function or if we had a gap before, we create one. */
522 if (bfun == NULL || bfun->errcode != 0)
23a7fe75
MM
523 return ftrace_new_function (bfun, mfun, fun);
524
525 /* Check the last instruction, if we have one.
526 We do this check first, since it allows us to fill in the call stack
527 links in addition to the normal flow links. */
528 last = NULL;
529 if (!VEC_empty (btrace_insn_s, bfun->insn))
530 last = VEC_last (btrace_insn_s, bfun->insn);
531
532 if (last != NULL)
533 {
7d5c24b3
MM
534 switch (last->iclass)
535 {
536 case BTRACE_INSN_RETURN:
986b6601
MM
537 {
538 const char *fname;
539
540 /* On some systems, _dl_runtime_resolve returns to the resolved
541 function instead of jumping to it. From our perspective,
542 however, this is a tailcall.
543 If we treated it as return, we wouldn't be able to find the
544 resolved function in our stack back trace. Hence, we would
545 lose the current stack back trace and start anew with an empty
546 back trace. When the resolved function returns, we would then
547 create a stack back trace with the same function names but
548 different frame id's. This will confuse stepping. */
549 fname = ftrace_print_function_name (bfun);
550 if (strcmp (fname, "_dl_runtime_resolve") == 0)
551 return ftrace_new_tailcall (bfun, mfun, fun);
552
553 return ftrace_new_return (bfun, mfun, fun);
554 }
23a7fe75 555
7d5c24b3
MM
556 case BTRACE_INSN_CALL:
557 /* Ignore calls to the next instruction. They are used for PIC. */
558 if (last->pc + last->size == pc)
559 break;
23a7fe75 560
7d5c24b3 561 return ftrace_new_call (bfun, mfun, fun);
23a7fe75 562
7d5c24b3
MM
563 case BTRACE_INSN_JUMP:
564 {
565 CORE_ADDR start;
23a7fe75 566
7d5c24b3 567 start = get_pc_function_start (pc);
23a7fe75 568
2dfdb47a
MM
569 /* A jump to the start of a function is (typically) a tail call. */
570 if (start == pc)
571 return ftrace_new_tailcall (bfun, mfun, fun);
572
7d5c24b3 573 /* If we can't determine the function for PC, we treat a jump at
2dfdb47a
MM
574 the end of the block as tail call if we're switching functions
575 and as an intra-function branch if we don't. */
576 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
7d5c24b3 577 return ftrace_new_tailcall (bfun, mfun, fun);
2dfdb47a
MM
578
579 break;
7d5c24b3 580 }
02d27625 581 }
23a7fe75
MM
582 }
583
584 /* Check if we're switching functions for some other reason. */
585 if (ftrace_function_switched (bfun, mfun, fun))
586 {
587 DEBUG_FTRACE ("switching from %s in %s at %s",
588 ftrace_print_insn_addr (last),
589 ftrace_print_function_name (bfun),
590 ftrace_print_filename (bfun));
02d27625 591
23a7fe75
MM
592 return ftrace_new_switch (bfun, mfun, fun);
593 }
594
595 return bfun;
596}
597
23a7fe75
MM
598/* Add the instruction at PC to BFUN's instructions. */
599
600static void
7d5c24b3
MM
601ftrace_update_insns (struct btrace_function *bfun,
602 const struct btrace_insn *insn)
23a7fe75 603{
7d5c24b3 604 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
23a7fe75
MM
605
606 if (record_debug > 1)
607 ftrace_debug (bfun, "update insn");
608}
609
7d5c24b3
MM
610/* Classify the instruction at PC. */
611
612static enum btrace_insn_class
613ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
614{
7d5c24b3
MM
615 enum btrace_insn_class iclass;
616
617 iclass = BTRACE_INSN_OTHER;
492d29ea 618 TRY
7d5c24b3
MM
619 {
620 if (gdbarch_insn_is_call (gdbarch, pc))
621 iclass = BTRACE_INSN_CALL;
622 else if (gdbarch_insn_is_ret (gdbarch, pc))
623 iclass = BTRACE_INSN_RETURN;
624 else if (gdbarch_insn_is_jump (gdbarch, pc))
625 iclass = BTRACE_INSN_JUMP;
626 }
492d29ea
PA
627 CATCH (error, RETURN_MASK_ERROR)
628 {
629 }
630 END_CATCH
7d5c24b3
MM
631
632 return iclass;
633}
634
d87fdac3
MM
635/* Try to match the back trace at LHS to the back trace at RHS. Returns the
636 number of matching function segments or zero if the back traces do not
637 match. */
638
639static int
640ftrace_match_backtrace (struct btrace_function *lhs,
641 struct btrace_function *rhs)
642{
643 int matches;
644
645 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
646 {
647 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
648 return 0;
649
650 lhs = ftrace_get_caller (lhs);
651 rhs = ftrace_get_caller (rhs);
652 }
653
654 return matches;
655}
656
657/* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */
658
659static void
660ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
661{
662 if (adjustment == 0)
663 return;
664
665 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
666 ftrace_debug (bfun, "..bfun");
667
668 for (; bfun != NULL; bfun = bfun->flow.next)
669 bfun->level += adjustment;
670}
671
672/* Recompute the global level offset. Traverse the function trace and compute
673 the global level offset as the negative of the minimal function level. */
674
675static void
676ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
677{
678 struct btrace_function *bfun, *end;
679 int level;
680
681 if (btinfo == NULL)
682 return;
683
684 bfun = btinfo->begin;
685 if (bfun == NULL)
686 return;
687
688 /* The last function segment contains the current instruction, which is not
689 really part of the trace. If it contains just this one instruction, we
690 stop when we reach it; otherwise, we let the below loop run to the end. */
691 end = btinfo->end;
692 if (VEC_length (btrace_insn_s, end->insn) > 1)
693 end = NULL;
694
695 level = INT_MAX;
696 for (; bfun != end; bfun = bfun->flow.next)
697 level = std::min (level, bfun->level);
698
699 DEBUG_FTRACE ("setting global level offset: %d", -level);
700 btinfo->level = -level;
701}
702
703/* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
704 ftrace_connect_backtrace. */
705
706static void
707ftrace_connect_bfun (struct btrace_function *prev,
708 struct btrace_function *next)
709{
710 DEBUG_FTRACE ("connecting...");
711 ftrace_debug (prev, "..prev");
712 ftrace_debug (next, "..next");
713
714 /* The function segments are not yet connected. */
715 gdb_assert (prev->segment.next == NULL);
716 gdb_assert (next->segment.prev == NULL);
717
718 prev->segment.next = next;
719 next->segment.prev = prev;
720
721 /* We may have moved NEXT to a different function level. */
722 ftrace_fixup_level (next, prev->level - next->level);
723
724 /* If we run out of back trace for one, let's use the other's. */
725 if (prev->up == NULL)
726 {
727 if (next->up != NULL)
728 {
729 DEBUG_FTRACE ("using next's callers");
730 ftrace_fixup_caller (prev, next->up, next->flags);
731 }
732 }
733 else if (next->up == NULL)
734 {
735 if (prev->up != NULL)
736 {
737 DEBUG_FTRACE ("using prev's callers");
738 ftrace_fixup_caller (next, prev->up, prev->flags);
739 }
740 }
741 else
742 {
743 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
744 link to add the tail callers to NEXT's back trace.
745
746 This removes NEXT->UP from NEXT's back trace. It will be added back
747 when connecting NEXT and PREV's callers - provided they exist.
748
749 If PREV's back trace consists of a series of tail calls without an
750 actual call, there will be no further connection and NEXT's caller will
751 be removed for good. To catch this case, we handle it here and connect
752 the top of PREV's back trace to NEXT's caller. */
753 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
754 {
755 struct btrace_function *caller;
756 btrace_function_flags flags;
757
758 /* We checked NEXT->UP above so CALLER can't be NULL. */
759 caller = next->up;
760 flags = next->flags;
761
762 DEBUG_FTRACE ("adding prev's tail calls to next");
763
764 ftrace_fixup_caller (next, prev->up, prev->flags);
765
766 for (prev = prev->up; prev != NULL; prev = prev->up)
767 {
768 /* At the end of PREV's back trace, continue with CALLER. */
769 if (prev->up == NULL)
770 {
771 DEBUG_FTRACE ("fixing up link for tailcall chain");
772 ftrace_debug (prev, "..top");
773 ftrace_debug (caller, "..up");
774
775 ftrace_fixup_caller (prev, caller, flags);
776
777 /* If we skipped any tail calls, this may move CALLER to a
778 different function level.
779
780 Note that changing CALLER's level is only OK because we
781 know that this is the last iteration of the bottom-to-top
782 walk in ftrace_connect_backtrace.
783
784 Otherwise we will fix up CALLER's level when we connect it
785 to PREV's caller in the next iteration. */
786 ftrace_fixup_level (caller, prev->level - caller->level - 1);
787 break;
788 }
789
790 /* There's nothing to do if we find a real call. */
791 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
792 {
793 DEBUG_FTRACE ("will fix up link in next iteration");
794 break;
795 }
796 }
797 }
798 }
799}
800
801/* Connect function segments on the same level in the back trace at LHS and RHS.
802 The back traces at LHS and RHS are expected to match according to
803 ftrace_match_backtrace. */
804
805static void
806ftrace_connect_backtrace (struct btrace_function *lhs,
807 struct btrace_function *rhs)
808{
809 while (lhs != NULL && rhs != NULL)
810 {
811 struct btrace_function *prev, *next;
812
813 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
814
815 /* Connecting LHS and RHS may change the up link. */
816 prev = lhs;
817 next = rhs;
818
819 lhs = ftrace_get_caller (lhs);
820 rhs = ftrace_get_caller (rhs);
821
822 ftrace_connect_bfun (prev, next);
823 }
824}
825
826/* Bridge the gap between two function segments left and right of a gap if their
827 respective back traces match in at least MIN_MATCHES functions.
828
829 Returns non-zero if the gap could be bridged, zero otherwise. */
830
831static int
832ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
833 int min_matches)
834{
835 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
836 int best_matches;
837
838 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
839 rhs->insn_offset - 1, min_matches);
840
841 best_matches = 0;
842 best_l = NULL;
843 best_r = NULL;
844
845 /* We search the back traces of LHS and RHS for valid connections and connect
846 the two functon segments that give the longest combined back trace. */
847
848 for (cand_l = lhs; cand_l != NULL; cand_l = ftrace_get_caller (cand_l))
849 for (cand_r = rhs; cand_r != NULL; cand_r = ftrace_get_caller (cand_r))
850 {
851 int matches;
852
853 matches = ftrace_match_backtrace (cand_l, cand_r);
854 if (best_matches < matches)
855 {
856 best_matches = matches;
857 best_l = cand_l;
858 best_r = cand_r;
859 }
860 }
861
862 /* We need at least MIN_MATCHES matches. */
863 gdb_assert (min_matches > 0);
864 if (best_matches < min_matches)
865 return 0;
866
867 DEBUG_FTRACE ("..matches: %d", best_matches);
868
869 /* We will fix up the level of BEST_R and succeeding function segments such
870 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
871
872 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
873 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
874
875 To catch this, we already fix up the level here where we can start at RHS
876 instead of at BEST_R. We will ignore the level fixup when connecting
877 BEST_L to BEST_R as they will already be on the same level. */
878 ftrace_fixup_level (rhs, best_l->level - best_r->level);
879
880 ftrace_connect_backtrace (best_l, best_r);
881
882 return best_matches;
883}
884
885/* Try to bridge gaps due to overflow or decode errors by connecting the
886 function segments that are separated by the gap. */
887
888static void
889btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
890{
891 VEC (bfun_s) *remaining;
892 struct cleanup *old_chain;
893 int min_matches;
894
895 DEBUG ("bridge gaps");
896
897 remaining = NULL;
898 old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
899
900 /* We require a minimum amount of matches for bridging a gap. The number of
901 required matches will be lowered with each iteration.
902
903 The more matches the higher our confidence that the bridging is correct.
904 For big gaps or small traces, however, it may not be feasible to require a
905 high number of matches. */
906 for (min_matches = 5; min_matches > 0; --min_matches)
907 {
908 /* Let's try to bridge as many gaps as we can. In some cases, we need to
909 skip a gap and revisit it again after we closed later gaps. */
910 while (!VEC_empty (bfun_s, *gaps))
911 {
912 struct btrace_function *gap;
913 unsigned int idx;
914
915 for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
916 {
917 struct btrace_function *lhs, *rhs;
918 int bridged;
919
920 /* We may have a sequence of gaps if we run from one error into
921 the next as we try to re-sync onto the trace stream. Ignore
922 all but the leftmost gap in such a sequence.
923
924 Also ignore gaps at the beginning of the trace. */
925 lhs = gap->flow.prev;
926 if (lhs == NULL || lhs->errcode != 0)
927 continue;
928
929 /* Skip gaps to the right. */
930 for (rhs = gap->flow.next; rhs != NULL; rhs = rhs->flow.next)
931 if (rhs->errcode == 0)
932 break;
933
934 /* Ignore gaps at the end of the trace. */
935 if (rhs == NULL)
936 continue;
937
938 bridged = ftrace_bridge_gap (lhs, rhs, min_matches);
939
940 /* Keep track of gaps we were not able to bridge and try again.
941 If we just pushed them to the end of GAPS we would risk an
942 infinite loop in case we simply cannot bridge a gap. */
943 if (bridged == 0)
944 VEC_safe_push (bfun_s, remaining, gap);
945 }
946
947 /* Let's see if we made any progress. */
948 if (VEC_length (bfun_s, remaining) == VEC_length (bfun_s, *gaps))
949 break;
950
951 VEC_free (bfun_s, *gaps);
952
953 *gaps = remaining;
954 remaining = NULL;
955 }
956
957 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
958 if (VEC_empty (bfun_s, *gaps))
959 break;
960
961 VEC_free (bfun_s, remaining);
962 }
963
964 do_cleanups (old_chain);
965
966 /* We may omit this in some cases. Not sure it is worth the extra
967 complication, though. */
968 ftrace_compute_global_level_offset (&tp->btrace);
969}
970
734b0e4b 971/* Compute the function branch trace from BTS trace. */
23a7fe75
MM
972
973static void
76235df1 974btrace_compute_ftrace_bts (struct thread_info *tp,
d87fdac3
MM
975 const struct btrace_data_bts *btrace,
976 VEC (bfun_s) **gaps)
23a7fe75 977{
76235df1 978 struct btrace_thread_info *btinfo;
23a7fe75
MM
979 struct btrace_function *begin, *end;
980 struct gdbarch *gdbarch;
d87fdac3 981 unsigned int blk;
23a7fe75
MM
982 int level;
983
23a7fe75 984 gdbarch = target_gdbarch ();
76235df1 985 btinfo = &tp->btrace;
969c39fb
MM
986 begin = btinfo->begin;
987 end = btinfo->end;
988 level = begin != NULL ? -btinfo->level : INT_MAX;
734b0e4b 989 blk = VEC_length (btrace_block_s, btrace->blocks);
23a7fe75
MM
990
991 while (blk != 0)
992 {
993 btrace_block_s *block;
994 CORE_ADDR pc;
995
996 blk -= 1;
997
734b0e4b 998 block = VEC_index (btrace_block_s, btrace->blocks, blk);
23a7fe75
MM
999 pc = block->begin;
1000
1001 for (;;)
1002 {
7d5c24b3 1003 struct btrace_insn insn;
23a7fe75
MM
1004 int size;
1005
1006 /* We should hit the end of the block. Warn if we went too far. */
1007 if (block->end < pc)
1008 {
b61ce85c
MM
1009 /* Indicate the gap in the trace. */
1010 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
1011 if (begin == NULL)
1012 begin = end;
1013
d87fdac3 1014 VEC_safe_push (bfun_s, *gaps, end);
b61ce85c
MM
1015
1016 warning (_("Recorded trace may be corrupted at instruction "
1017 "%u (pc = %s)."), end->insn_offset - 1,
1018 core_addr_to_string_nz (pc));
63ab433e 1019
23a7fe75
MM
1020 break;
1021 }
1022
7d5c24b3 1023 end = ftrace_update_function (end, pc);
23a7fe75
MM
1024 if (begin == NULL)
1025 begin = end;
1026
8710b709
MM
1027 /* Maintain the function level offset.
1028 For all but the last block, we do it here. */
1029 if (blk != 0)
325fac50 1030 level = std::min (level, end->level);
23a7fe75 1031
7d5c24b3 1032 size = 0;
492d29ea
PA
1033 TRY
1034 {
1035 size = gdb_insn_length (gdbarch, pc);
1036 }
1037 CATCH (error, RETURN_MASK_ERROR)
1038 {
1039 }
1040 END_CATCH
7d5c24b3
MM
1041
1042 insn.pc = pc;
1043 insn.size = size;
1044 insn.iclass = ftrace_classify_insn (gdbarch, pc);
da8c46d2 1045 insn.flags = 0;
7d5c24b3
MM
1046
1047 ftrace_update_insns (end, &insn);
23a7fe75
MM
1048
1049 /* We're done once we pushed the instruction at the end. */
1050 if (block->end == pc)
1051 break;
1052
7d5c24b3 1053 /* We can't continue if we fail to compute the size. */
23a7fe75
MM
1054 if (size <= 0)
1055 {
31fd9caa
MM
1056 /* Indicate the gap in the trace. We just added INSN so we're
1057 not at the beginning. */
1058 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
d87fdac3
MM
1059
1060 VEC_safe_push (bfun_s, *gaps, end);
31fd9caa 1061
63ab433e
MM
1062 warning (_("Recorded trace may be incomplete at instruction %u "
1063 "(pc = %s)."), end->insn_offset - 1,
1064 core_addr_to_string_nz (pc));
1065
23a7fe75
MM
1066 break;
1067 }
1068
1069 pc += size;
8710b709
MM
1070
1071 /* Maintain the function level offset.
1072 For the last block, we do it here to not consider the last
1073 instruction.
1074 Since the last instruction corresponds to the current instruction
1075 and is not really part of the execution history, it shouldn't
1076 affect the level. */
1077 if (blk == 0)
325fac50 1078 level = std::min (level, end->level);
23a7fe75 1079 }
02d27625
MM
1080 }
1081
23a7fe75
MM
1082 btinfo->begin = begin;
1083 btinfo->end = end;
1084
1085 /* LEVEL is the minimal function level of all btrace function segments.
1086 Define the global level offset to -LEVEL so all function levels are
1087 normalized to start at zero. */
1088 btinfo->level = -level;
02d27625
MM
1089}
1090
b20a6524
MM
1091#if defined (HAVE_LIBIPT)
1092
1093static enum btrace_insn_class
1094pt_reclassify_insn (enum pt_insn_class iclass)
1095{
1096 switch (iclass)
1097 {
1098 case ptic_call:
1099 return BTRACE_INSN_CALL;
1100
1101 case ptic_return:
1102 return BTRACE_INSN_RETURN;
1103
1104 case ptic_jump:
1105 return BTRACE_INSN_JUMP;
1106
1107 default:
1108 return BTRACE_INSN_OTHER;
1109 }
1110}
1111
da8c46d2
MM
1112/* Return the btrace instruction flags for INSN. */
1113
d7abe101 1114static btrace_insn_flags
da8c46d2
MM
1115pt_btrace_insn_flags (const struct pt_insn *insn)
1116{
d7abe101 1117 btrace_insn_flags flags = 0;
da8c46d2
MM
1118
1119 if (insn->speculative)
1120 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1121
1122 return flags;
1123}
1124
b20a6524
MM
1125/* Add function branch trace using DECODER. */
1126
1127static void
1128ftrace_add_pt (struct pt_insn_decoder *decoder,
1129 struct btrace_function **pbegin,
1130 struct btrace_function **pend, int *plevel,
d87fdac3 1131 VEC (bfun_s) **gaps)
b20a6524
MM
1132{
1133 struct btrace_function *begin, *end, *upd;
1134 uint64_t offset;
63ab433e 1135 int errcode;
b20a6524
MM
1136
1137 begin = *pbegin;
1138 end = *pend;
b20a6524
MM
1139 for (;;)
1140 {
1141 struct btrace_insn btinsn;
1142 struct pt_insn insn;
1143
1144 errcode = pt_insn_sync_forward (decoder);
1145 if (errcode < 0)
1146 {
1147 if (errcode != -pte_eos)
bc504a31 1148 warning (_("Failed to synchronize onto the Intel Processor "
b20a6524
MM
1149 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
1150 break;
1151 }
1152
1153 memset (&btinsn, 0, sizeof (btinsn));
1154 for (;;)
1155 {
1156 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
1157 if (errcode < 0)
1158 break;
1159
1160 /* Look for gaps in the trace - unless we're at the beginning. */
1161 if (begin != NULL)
1162 {
1163 /* Tracing is disabled and re-enabled each time we enter the
1164 kernel. Most times, we continue from the same instruction we
1165 stopped before. This is indicated via the RESUMED instruction
1166 flag. The ENABLED instruction flag means that we continued
1167 from some other instruction. Indicate this as a trace gap. */
1168 if (insn.enabled)
63ab433e
MM
1169 {
1170 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
d87fdac3
MM
1171
1172 VEC_safe_push (bfun_s, *gaps, end);
63ab433e
MM
1173
1174 pt_insn_get_offset (decoder, &offset);
1175
1176 warning (_("Non-contiguous trace at instruction %u (offset "
1177 "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
1178 end->insn_offset - 1, offset, insn.ip);
1179 }
b61ce85c 1180 }
b20a6524 1181
b61ce85c
MM
1182 /* Indicate trace overflows. */
1183 if (insn.resynced)
1184 {
1185 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
1186 if (begin == NULL)
1187 *pbegin = begin = end;
63ab433e 1188
d87fdac3 1189 VEC_safe_push (bfun_s, *gaps, end);
63ab433e 1190
b61ce85c
MM
1191 pt_insn_get_offset (decoder, &offset);
1192
1193 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1194 ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
1195 offset, insn.ip);
b20a6524
MM
1196 }
1197
1198 upd = ftrace_update_function (end, insn.ip);
1199 if (upd != end)
1200 {
1201 *pend = end = upd;
1202
1203 if (begin == NULL)
1204 *pbegin = begin = upd;
1205 }
1206
1207 /* Maintain the function level offset. */
325fac50 1208 *plevel = std::min (*plevel, end->level);
b20a6524
MM
1209
1210 btinsn.pc = (CORE_ADDR) insn.ip;
1211 btinsn.size = (gdb_byte) insn.size;
1212 btinsn.iclass = pt_reclassify_insn (insn.iclass);
da8c46d2 1213 btinsn.flags = pt_btrace_insn_flags (&insn);
b20a6524
MM
1214
1215 ftrace_update_insns (end, &btinsn);
1216 }
1217
1218 if (errcode == -pte_eos)
1219 break;
1220
b20a6524
MM
1221 /* Indicate the gap in the trace. */
1222 *pend = end = ftrace_new_gap (end, errcode);
b61ce85c
MM
1223 if (begin == NULL)
1224 *pbegin = begin = end;
d87fdac3
MM
1225
1226 VEC_safe_push (bfun_s, *gaps, end);
b20a6524 1227
63ab433e
MM
1228 pt_insn_get_offset (decoder, &offset);
1229
1230 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1231 ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
1232 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
1233 }
b20a6524
MM
1234}
1235
1236/* A callback function to allow the trace decoder to read the inferior's
1237 memory. */
1238
1239static int
1240btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
80a2b330 1241 const struct pt_asid *asid, uint64_t pc,
b20a6524
MM
1242 void *context)
1243{
43368e1d 1244 int result, errcode;
b20a6524 1245
43368e1d 1246 result = (int) size;
b20a6524
MM
1247 TRY
1248 {
80a2b330 1249 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
b20a6524 1250 if (errcode != 0)
43368e1d 1251 result = -pte_nomap;
b20a6524
MM
1252 }
1253 CATCH (error, RETURN_MASK_ERROR)
1254 {
43368e1d 1255 result = -pte_nomap;
b20a6524
MM
1256 }
1257 END_CATCH
1258
43368e1d 1259 return result;
b20a6524
MM
1260}
1261
1262/* Translate the vendor from one enum to another. */
1263
1264static enum pt_cpu_vendor
1265pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1266{
1267 switch (vendor)
1268 {
1269 default:
1270 return pcv_unknown;
1271
1272 case CV_INTEL:
1273 return pcv_intel;
1274 }
1275}
1276
1277/* Finalize the function branch trace after decode. */
1278
1279static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1280 struct thread_info *tp, int level)
1281{
1282 pt_insn_free_decoder (decoder);
1283
1284 /* LEVEL is the minimal function level of all btrace function segments.
1285 Define the global level offset to -LEVEL so all function levels are
1286 normalized to start at zero. */
1287 tp->btrace.level = -level;
1288
1289 /* Add a single last instruction entry for the current PC.
1290 This allows us to compute the backtrace at the current PC using both
1291 standard unwind and btrace unwind.
1292 This extra entry is ignored by all record commands. */
1293 btrace_add_pc (tp);
1294}
1295
bc504a31
PA
1296/* Compute the function branch trace from Intel Processor Trace
1297 format. */
b20a6524
MM
1298
1299static void
1300btrace_compute_ftrace_pt (struct thread_info *tp,
d87fdac3
MM
1301 const struct btrace_data_pt *btrace,
1302 VEC (bfun_s) **gaps)
b20a6524
MM
1303{
1304 struct btrace_thread_info *btinfo;
1305 struct pt_insn_decoder *decoder;
1306 struct pt_config config;
1307 int level, errcode;
1308
1309 if (btrace->size == 0)
1310 return;
1311
1312 btinfo = &tp->btrace;
1313 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
1314
1315 pt_config_init(&config);
1316 config.begin = btrace->data;
1317 config.end = btrace->data + btrace->size;
1318
1319 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1320 config.cpu.family = btrace->config.cpu.family;
1321 config.cpu.model = btrace->config.cpu.model;
1322 config.cpu.stepping = btrace->config.cpu.stepping;
1323
1324 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1325 if (errcode < 0)
bc504a31 1326 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
b20a6524
MM
1327 pt_errstr (pt_errcode (errcode)));
1328
1329 decoder = pt_insn_alloc_decoder (&config);
1330 if (decoder == NULL)
bc504a31 1331 error (_("Failed to allocate the Intel Processor Trace decoder."));
b20a6524
MM
1332
1333 TRY
1334 {
1335 struct pt_image *image;
1336
1337 image = pt_insn_get_image(decoder);
1338 if (image == NULL)
bc504a31 1339 error (_("Failed to configure the Intel Processor Trace decoder."));
b20a6524
MM
1340
1341 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1342 if (errcode < 0)
bc504a31 1343 error (_("Failed to configure the Intel Processor Trace decoder: "
b20a6524
MM
1344 "%s."), pt_errstr (pt_errcode (errcode)));
1345
d87fdac3 1346 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level, gaps);
b20a6524
MM
1347 }
1348 CATCH (error, RETURN_MASK_ALL)
1349 {
1350 /* Indicate a gap in the trace if we quit trace processing. */
1351 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
1352 {
1353 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
d87fdac3
MM
1354
1355 VEC_safe_push (bfun_s, *gaps, btinfo->end);
b20a6524
MM
1356 }
1357
1358 btrace_finalize_ftrace_pt (decoder, tp, level);
1359
1360 throw_exception (error);
1361 }
1362 END_CATCH
1363
1364 btrace_finalize_ftrace_pt (decoder, tp, level);
1365}
1366
1367#else /* defined (HAVE_LIBIPT) */
1368
1369static void
1370btrace_compute_ftrace_pt (struct thread_info *tp,
d87fdac3
MM
1371 const struct btrace_data_pt *btrace,
1372 VEC (bfun_s) **gaps)
b20a6524
MM
1373{
1374 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1375}
1376
1377#endif /* defined (HAVE_LIBIPT) */
1378
734b0e4b
MM
1379/* Compute the function branch trace from a block branch trace BTRACE for
1380 a thread given by BTINFO. */
1381
1382static void
d87fdac3
MM
1383btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
1384 VEC (bfun_s) **gaps)
734b0e4b
MM
1385{
1386 DEBUG ("compute ftrace");
1387
1388 switch (btrace->format)
1389 {
1390 case BTRACE_FORMAT_NONE:
1391 return;
1392
1393 case BTRACE_FORMAT_BTS:
d87fdac3 1394 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
734b0e4b 1395 return;
b20a6524
MM
1396
1397 case BTRACE_FORMAT_PT:
d87fdac3 1398 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
b20a6524 1399 return;
734b0e4b
MM
1400 }
1401
1402 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1403}
1404
d87fdac3
MM
1405static void
1406btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
1407{
1408 if (!VEC_empty (bfun_s, *gaps))
1409 {
1410 tp->btrace.ngaps += VEC_length (bfun_s, *gaps);
1411 btrace_bridge_gaps (tp, gaps);
1412 }
1413}
1414
1415static void
1416btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1417{
1418 VEC (bfun_s) *gaps;
1419 struct cleanup *old_chain;
1420
1421 gaps = NULL;
1422 old_chain = make_cleanup (VEC_cleanup (bfun_s), &gaps);
1423
1424 TRY
1425 {
1426 btrace_compute_ftrace_1 (tp, btrace, &gaps);
1427 }
1428 CATCH (error, RETURN_MASK_ALL)
1429 {
1430 btrace_finalize_ftrace (tp, &gaps);
1431
1432 throw_exception (error);
1433 }
1434 END_CATCH
1435
1436 btrace_finalize_ftrace (tp, &gaps);
1437
1438 do_cleanups (old_chain);
1439}
1440
6e07b1d2
MM
1441/* Add an entry for the current PC. */
1442
1443static void
1444btrace_add_pc (struct thread_info *tp)
1445{
734b0e4b 1446 struct btrace_data btrace;
6e07b1d2
MM
1447 struct btrace_block *block;
1448 struct regcache *regcache;
1449 struct cleanup *cleanup;
1450 CORE_ADDR pc;
1451
1452 regcache = get_thread_regcache (tp->ptid);
1453 pc = regcache_read_pc (regcache);
1454
734b0e4b
MM
1455 btrace_data_init (&btrace);
1456 btrace.format = BTRACE_FORMAT_BTS;
1457 btrace.variant.bts.blocks = NULL;
6e07b1d2 1458
734b0e4b
MM
1459 cleanup = make_cleanup_btrace_data (&btrace);
1460
1461 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
6e07b1d2
MM
1462 block->begin = pc;
1463 block->end = pc;
1464
76235df1 1465 btrace_compute_ftrace (tp, &btrace);
6e07b1d2
MM
1466
1467 do_cleanups (cleanup);
1468}
1469
02d27625
MM
1470/* See btrace.h. */
1471
1472void
f4abbc16 1473btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
02d27625
MM
1474{
1475 if (tp->btrace.target != NULL)
1476 return;
1477
46a3515b
MM
1478#if !defined (HAVE_LIBIPT)
1479 if (conf->format == BTRACE_FORMAT_PT)
bc504a31 1480 error (_("GDB does not support Intel Processor Trace."));
46a3515b
MM
1481#endif /* !defined (HAVE_LIBIPT) */
1482
f4abbc16 1483 if (!target_supports_btrace (conf->format))
02d27625
MM
1484 error (_("Target does not support branch tracing."));
1485
43792cf0
PA
1486 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1487 target_pid_to_str (tp->ptid));
02d27625 1488
f4abbc16 1489 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
6e07b1d2 1490
cd4007e4
MM
1491 /* We're done if we failed to enable tracing. */
1492 if (tp->btrace.target == NULL)
1493 return;
1494
1495 /* We need to undo the enable in case of errors. */
1496 TRY
1497 {
1498 /* Add an entry for the current PC so we start tracing from where we
1499 enabled it.
1500
1501 If we can't access TP's registers, TP is most likely running. In this
1502 case, we can't really say where tracing was enabled so it should be
1503 safe to simply skip this step.
1504
1505 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1506 start at the PC at which tracing was enabled. */
1507 if (conf->format != BTRACE_FORMAT_PT
1508 && can_access_registers_ptid (tp->ptid))
1509 btrace_add_pc (tp);
1510 }
1511 CATCH (exception, RETURN_MASK_ALL)
1512 {
1513 btrace_disable (tp);
1514
1515 throw_exception (exception);
1516 }
1517 END_CATCH
02d27625
MM
1518}
1519
1520/* See btrace.h. */
1521
f4abbc16
MM
1522const struct btrace_config *
1523btrace_conf (const struct btrace_thread_info *btinfo)
1524{
1525 if (btinfo->target == NULL)
1526 return NULL;
1527
1528 return target_btrace_conf (btinfo->target);
1529}
1530
1531/* See btrace.h. */
1532
02d27625
MM
1533void
1534btrace_disable (struct thread_info *tp)
1535{
1536 struct btrace_thread_info *btp = &tp->btrace;
1537 int errcode = 0;
1538
1539 if (btp->target == NULL)
1540 return;
1541
43792cf0
PA
1542 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1543 target_pid_to_str (tp->ptid));
02d27625
MM
1544
1545 target_disable_btrace (btp->target);
1546 btp->target = NULL;
1547
1548 btrace_clear (tp);
1549}
1550
1551/* See btrace.h. */
1552
1553void
1554btrace_teardown (struct thread_info *tp)
1555{
1556 struct btrace_thread_info *btp = &tp->btrace;
1557 int errcode = 0;
1558
1559 if (btp->target == NULL)
1560 return;
1561
43792cf0
PA
1562 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1563 target_pid_to_str (tp->ptid));
02d27625
MM
1564
1565 target_teardown_btrace (btp->target);
1566 btp->target = NULL;
1567
1568 btrace_clear (tp);
1569}
1570
734b0e4b 1571/* Stitch branch trace in BTS format. */
969c39fb
MM
1572
1573static int
31fd9caa 1574btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
969c39fb 1575{
31fd9caa 1576 struct btrace_thread_info *btinfo;
969c39fb
MM
1577 struct btrace_function *last_bfun;
1578 struct btrace_insn *last_insn;
1579 btrace_block_s *first_new_block;
1580
31fd9caa 1581 btinfo = &tp->btrace;
969c39fb
MM
1582 last_bfun = btinfo->end;
1583 gdb_assert (last_bfun != NULL);
31fd9caa
MM
1584 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1585
1586 /* If the existing trace ends with a gap, we just glue the traces
1587 together. We need to drop the last (i.e. chronologically first) block
1588 of the new trace, though, since we can't fill in the start address.*/
1589 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1590 {
1591 VEC_pop (btrace_block_s, btrace->blocks);
1592 return 0;
1593 }
969c39fb
MM
1594
1595 /* Beware that block trace starts with the most recent block, so the
1596 chronologically first block in the new trace is the last block in
1597 the new trace's block vector. */
734b0e4b 1598 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
969c39fb
MM
1599 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1600
1601 /* If the current PC at the end of the block is the same as in our current
1602 trace, there are two explanations:
1603 1. we executed the instruction and some branch brought us back.
1604 2. we have not made any progress.
1605 In the first case, the delta trace vector should contain at least two
1606 entries.
1607 In the second case, the delta trace vector should contain exactly one
1608 entry for the partial block containing the current PC. Remove it. */
1609 if (first_new_block->end == last_insn->pc
734b0e4b 1610 && VEC_length (btrace_block_s, btrace->blocks) == 1)
969c39fb 1611 {
734b0e4b 1612 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
1613 return 0;
1614 }
1615
1616 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1617 core_addr_to_string_nz (first_new_block->end));
1618
1619 /* Do a simple sanity check to make sure we don't accidentally end up
1620 with a bad block. This should not occur in practice. */
1621 if (first_new_block->end < last_insn->pc)
1622 {
1623 warning (_("Error while trying to read delta trace. Falling back to "
1624 "a full read."));
1625 return -1;
1626 }
1627
1628 /* We adjust the last block to start at the end of our current trace. */
1629 gdb_assert (first_new_block->begin == 0);
1630 first_new_block->begin = last_insn->pc;
1631
1632 /* We simply pop the last insn so we can insert it again as part of
1633 the normal branch trace computation.
1634 Since instruction iterators are based on indices in the instructions
1635 vector, we don't leave any pointers dangling. */
1636 DEBUG ("pruning insn at %s for stitching",
1637 ftrace_print_insn_addr (last_insn));
1638
1639 VEC_pop (btrace_insn_s, last_bfun->insn);
1640
1641 /* The instructions vector may become empty temporarily if this has
1642 been the only instruction in this function segment.
1643 This violates the invariant but will be remedied shortly by
1644 btrace_compute_ftrace when we add the new trace. */
31fd9caa
MM
1645
1646 /* The only case where this would hurt is if the entire trace consisted
1647 of just that one instruction. If we remove it, we might turn the now
1648 empty btrace function segment into a gap. But we don't want gaps at
1649 the beginning. To avoid this, we remove the entire old trace. */
1650 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1651 btrace_clear (tp);
1652
969c39fb
MM
1653 return 0;
1654}
1655
734b0e4b
MM
1656/* Adjust the block trace in order to stitch old and new trace together.
1657 BTRACE is the new delta trace between the last and the current stop.
31fd9caa
MM
1658 TP is the traced thread.
1659 May modifx BTRACE as well as the existing trace in TP.
734b0e4b
MM
1660 Return 0 on success, -1 otherwise. */
1661
1662static int
31fd9caa 1663btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
734b0e4b
MM
1664{
1665 /* If we don't have trace, there's nothing to do. */
1666 if (btrace_data_empty (btrace))
1667 return 0;
1668
1669 switch (btrace->format)
1670 {
1671 case BTRACE_FORMAT_NONE:
1672 return 0;
1673
1674 case BTRACE_FORMAT_BTS:
31fd9caa 1675 return btrace_stitch_bts (&btrace->variant.bts, tp);
b20a6524
MM
1676
1677 case BTRACE_FORMAT_PT:
1678 /* Delta reads are not supported. */
1679 return -1;
734b0e4b
MM
1680 }
1681
1682 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1683}
1684
969c39fb
MM
1685/* Clear the branch trace histories in BTINFO. */
1686
1687static void
1688btrace_clear_history (struct btrace_thread_info *btinfo)
1689{
1690 xfree (btinfo->insn_history);
1691 xfree (btinfo->call_history);
1692 xfree (btinfo->replay);
1693
1694 btinfo->insn_history = NULL;
1695 btinfo->call_history = NULL;
1696 btinfo->replay = NULL;
1697}
1698
b0627500
MM
1699/* Clear the branch trace maintenance histories in BTINFO. */
1700
1701static void
1702btrace_maint_clear (struct btrace_thread_info *btinfo)
1703{
1704 switch (btinfo->data.format)
1705 {
1706 default:
1707 break;
1708
1709 case BTRACE_FORMAT_BTS:
1710 btinfo->maint.variant.bts.packet_history.begin = 0;
1711 btinfo->maint.variant.bts.packet_history.end = 0;
1712 break;
1713
1714#if defined (HAVE_LIBIPT)
1715 case BTRACE_FORMAT_PT:
1716 xfree (btinfo->maint.variant.pt.packets);
1717
1718 btinfo->maint.variant.pt.packets = NULL;
1719 btinfo->maint.variant.pt.packet_history.begin = 0;
1720 btinfo->maint.variant.pt.packet_history.end = 0;
1721 break;
1722#endif /* defined (HAVE_LIBIPT) */
1723 }
1724}
1725
02d27625
MM
1726/* See btrace.h. */
1727
508352a9
TW
1728const char *
1729btrace_decode_error (enum btrace_format format, int errcode)
1730{
1731 switch (format)
1732 {
1733 case BTRACE_FORMAT_BTS:
1734 switch (errcode)
1735 {
1736 case BDE_BTS_OVERFLOW:
1737 return _("instruction overflow");
1738
1739 case BDE_BTS_INSN_SIZE:
1740 return _("unknown instruction");
1741
1742 default:
1743 break;
1744 }
1745 break;
1746
1747#if defined (HAVE_LIBIPT)
1748 case BTRACE_FORMAT_PT:
1749 switch (errcode)
1750 {
1751 case BDE_PT_USER_QUIT:
1752 return _("trace decode cancelled");
1753
1754 case BDE_PT_DISABLED:
1755 return _("disabled");
1756
1757 case BDE_PT_OVERFLOW:
1758 return _("overflow");
1759
1760 default:
1761 if (errcode < 0)
1762 return pt_errstr (pt_errcode (errcode));
1763 break;
1764 }
1765 break;
1766#endif /* defined (HAVE_LIBIPT) */
1767
1768 default:
1769 break;
1770 }
1771
1772 return _("unknown");
1773}
1774
1775/* See btrace.h. */
1776
02d27625
MM
1777void
1778btrace_fetch (struct thread_info *tp)
1779{
1780 struct btrace_thread_info *btinfo;
969c39fb 1781 struct btrace_target_info *tinfo;
734b0e4b 1782 struct btrace_data btrace;
23a7fe75 1783 struct cleanup *cleanup;
969c39fb 1784 int errcode;
02d27625 1785
43792cf0
PA
1786 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1787 target_pid_to_str (tp->ptid));
02d27625
MM
1788
1789 btinfo = &tp->btrace;
969c39fb
MM
1790 tinfo = btinfo->target;
1791 if (tinfo == NULL)
1792 return;
1793
1794 /* There's no way we could get new trace while replaying.
1795 On the other hand, delta trace would return a partial record with the
1796 current PC, which is the replay PC, not the last PC, as expected. */
1797 if (btinfo->replay != NULL)
02d27625
MM
1798 return;
1799
cd4007e4
MM
1800 /* We should not be called on running or exited threads. */
1801 gdb_assert (can_access_registers_ptid (tp->ptid));
1802
734b0e4b
MM
1803 btrace_data_init (&btrace);
1804 cleanup = make_cleanup_btrace_data (&btrace);
02d27625 1805
969c39fb
MM
1806 /* Let's first try to extend the trace we already have. */
1807 if (btinfo->end != NULL)
1808 {
1809 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1810 if (errcode == 0)
1811 {
1812 /* Success. Let's try to stitch the traces together. */
31fd9caa 1813 errcode = btrace_stitch_trace (&btrace, tp);
969c39fb
MM
1814 }
1815 else
1816 {
1817 /* We failed to read delta trace. Let's try to read new trace. */
1818 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1819
1820 /* If we got any new trace, discard what we have. */
734b0e4b 1821 if (errcode == 0 && !btrace_data_empty (&btrace))
969c39fb
MM
1822 btrace_clear (tp);
1823 }
1824
1825 /* If we were not able to read the trace, we start over. */
1826 if (errcode != 0)
1827 {
1828 btrace_clear (tp);
1829 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1830 }
1831 }
1832 else
1833 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1834
1835 /* If we were not able to read the branch trace, signal an error. */
1836 if (errcode != 0)
1837 error (_("Failed to read branch trace."));
1838
1839 /* Compute the trace, provided we have any. */
734b0e4b 1840 if (!btrace_data_empty (&btrace))
23a7fe75 1841 {
fdd2bd92
TW
1842 struct btrace_function *bfun;
1843
9be54cae
MM
1844 /* Store the raw trace data. The stored data will be cleared in
1845 btrace_clear, so we always append the new trace. */
1846 btrace_data_append (&btinfo->data, &btrace);
b0627500 1847 btrace_maint_clear (btinfo);
9be54cae 1848
fdd2bd92 1849 VEC_truncate (btrace_fun_p, btinfo->functions, 0);
969c39fb 1850 btrace_clear_history (btinfo);
76235df1 1851 btrace_compute_ftrace (tp, &btrace);
fdd2bd92
TW
1852
1853 for (bfun = btinfo->begin; bfun != NULL; bfun = bfun->flow.next)
1854 VEC_safe_push (btrace_fun_p, btinfo->functions, bfun);
23a7fe75 1855 }
02d27625 1856
23a7fe75 1857 do_cleanups (cleanup);
02d27625
MM
1858}
1859
1860/* See btrace.h. */
1861
1862void
1863btrace_clear (struct thread_info *tp)
1864{
1865 struct btrace_thread_info *btinfo;
23a7fe75 1866 struct btrace_function *it, *trash;
02d27625 1867
43792cf0
PA
1868 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1869 target_pid_to_str (tp->ptid));
02d27625 1870
0b722aec
MM
1871 /* Make sure btrace frames that may hold a pointer into the branch
1872 trace data are destroyed. */
1873 reinit_frame_cache ();
1874
02d27625
MM
1875 btinfo = &tp->btrace;
1876
fdd2bd92
TW
1877 VEC_free (btrace_fun_p, btinfo->functions);
1878
23a7fe75
MM
1879 it = btinfo->begin;
1880 while (it != NULL)
1881 {
1882 trash = it;
1883 it = it->flow.next;
02d27625 1884
23a7fe75
MM
1885 xfree (trash);
1886 }
1887
1888 btinfo->begin = NULL;
1889 btinfo->end = NULL;
31fd9caa 1890 btinfo->ngaps = 0;
23a7fe75 1891
b0627500
MM
1892 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1893 btrace_maint_clear (btinfo);
9be54cae 1894 btrace_data_clear (&btinfo->data);
969c39fb 1895 btrace_clear_history (btinfo);
02d27625
MM
1896}
1897
1898/* See btrace.h. */
1899
1900void
1901btrace_free_objfile (struct objfile *objfile)
1902{
1903 struct thread_info *tp;
1904
1905 DEBUG ("free objfile");
1906
034f788c 1907 ALL_NON_EXITED_THREADS (tp)
02d27625
MM
1908 btrace_clear (tp);
1909}
c12a2917
MM
1910
1911#if defined (HAVE_LIBEXPAT)
1912
1913/* Check the btrace document version. */
1914
1915static void
1916check_xml_btrace_version (struct gdb_xml_parser *parser,
1917 const struct gdb_xml_element *element,
1918 void *user_data, VEC (gdb_xml_value_s) *attributes)
1919{
9a3c8263
SM
1920 const char *version
1921 = (const char *) xml_find_attribute (attributes, "version")->value;
c12a2917
MM
1922
1923 if (strcmp (version, "1.0") != 0)
1924 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1925}
1926
1927/* Parse a btrace "block" xml record. */
1928
1929static void
1930parse_xml_btrace_block (struct gdb_xml_parser *parser,
1931 const struct gdb_xml_element *element,
1932 void *user_data, VEC (gdb_xml_value_s) *attributes)
1933{
734b0e4b 1934 struct btrace_data *btrace;
c12a2917
MM
1935 struct btrace_block *block;
1936 ULONGEST *begin, *end;
1937
9a3c8263 1938 btrace = (struct btrace_data *) user_data;
734b0e4b
MM
1939
1940 switch (btrace->format)
1941 {
1942 case BTRACE_FORMAT_BTS:
1943 break;
1944
1945 case BTRACE_FORMAT_NONE:
1946 btrace->format = BTRACE_FORMAT_BTS;
1947 btrace->variant.bts.blocks = NULL;
1948 break;
1949
1950 default:
1951 gdb_xml_error (parser, _("Btrace format error."));
1952 }
c12a2917 1953
bc84451b
SM
1954 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1955 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
c12a2917 1956
734b0e4b 1957 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
c12a2917
MM
1958 block->begin = *begin;
1959 block->end = *end;
1960}
1961
b20a6524
MM
1962/* Parse a "raw" xml record. */
1963
1964static void
1965parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
e7b01ce0 1966 gdb_byte **pdata, size_t *psize)
b20a6524
MM
1967{
1968 struct cleanup *cleanup;
1969 gdb_byte *data, *bin;
e7b01ce0 1970 size_t len, size;
b20a6524
MM
1971
1972 len = strlen (body_text);
e7b01ce0 1973 if (len % 2 != 0)
b20a6524
MM
1974 gdb_xml_error (parser, _("Bad raw data size."));
1975
e7b01ce0
MM
1976 size = len / 2;
1977
224c3ddb 1978 bin = data = (gdb_byte *) xmalloc (size);
b20a6524
MM
1979 cleanup = make_cleanup (xfree, data);
1980
1981 /* We use hex encoding - see common/rsp-low.h. */
1982 while (len > 0)
1983 {
1984 char hi, lo;
1985
1986 hi = *body_text++;
1987 lo = *body_text++;
1988
1989 if (hi == 0 || lo == 0)
1990 gdb_xml_error (parser, _("Bad hex encoding."));
1991
1992 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1993 len -= 2;
1994 }
1995
1996 discard_cleanups (cleanup);
1997
1998 *pdata = data;
1999 *psize = size;
2000}
2001
2002/* Parse a btrace pt-config "cpu" xml record. */
2003
2004static void
2005parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
2006 const struct gdb_xml_element *element,
2007 void *user_data,
2008 VEC (gdb_xml_value_s) *attributes)
2009{
2010 struct btrace_data *btrace;
2011 const char *vendor;
2012 ULONGEST *family, *model, *stepping;
2013
9a3c8263
SM
2014 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
2015 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
2016 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
2017 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
b20a6524 2018
9a3c8263 2019 btrace = (struct btrace_data *) user_data;
b20a6524
MM
2020
2021 if (strcmp (vendor, "GenuineIntel") == 0)
2022 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2023
2024 btrace->variant.pt.config.cpu.family = *family;
2025 btrace->variant.pt.config.cpu.model = *model;
2026 btrace->variant.pt.config.cpu.stepping = *stepping;
2027}
2028
2029/* Parse a btrace pt "raw" xml record. */
2030
2031static void
2032parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2033 const struct gdb_xml_element *element,
2034 void *user_data, const char *body_text)
2035{
2036 struct btrace_data *btrace;
2037
9a3c8263 2038 btrace = (struct btrace_data *) user_data;
b20a6524
MM
2039 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2040 &btrace->variant.pt.size);
2041}
2042
2043/* Parse a btrace "pt" xml record. */
2044
2045static void
2046parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2047 const struct gdb_xml_element *element,
2048 void *user_data, VEC (gdb_xml_value_s) *attributes)
2049{
2050 struct btrace_data *btrace;
2051
9a3c8263 2052 btrace = (struct btrace_data *) user_data;
b20a6524
MM
2053 btrace->format = BTRACE_FORMAT_PT;
2054 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2055 btrace->variant.pt.data = NULL;
2056 btrace->variant.pt.size = 0;
2057}
2058
c12a2917
MM
2059static const struct gdb_xml_attribute block_attributes[] = {
2060 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2061 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2062 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2063};
2064
b20a6524
MM
2065static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2066 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2067 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2068 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2069 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2070 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2071};
2072
2073static const struct gdb_xml_element btrace_pt_config_children[] = {
2074 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2075 parse_xml_btrace_pt_config_cpu, NULL },
2076 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2077};
2078
2079static const struct gdb_xml_element btrace_pt_children[] = {
2080 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2081 NULL },
2082 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2083 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2084};
2085
c12a2917
MM
2086static const struct gdb_xml_attribute btrace_attributes[] = {
2087 { "version", GDB_XML_AF_NONE, NULL, NULL },
2088 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2089};
2090
2091static const struct gdb_xml_element btrace_children[] = {
2092 { "block", block_attributes, NULL,
2093 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
b20a6524
MM
2094 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2095 NULL },
c12a2917
MM
2096 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2097};
2098
2099static const struct gdb_xml_element btrace_elements[] = {
2100 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2101 check_xml_btrace_version, NULL },
2102 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2103};
2104
2105#endif /* defined (HAVE_LIBEXPAT) */
2106
2107/* See btrace.h. */
2108
734b0e4b
MM
2109void
2110parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
c12a2917 2111{
c12a2917
MM
2112 struct cleanup *cleanup;
2113 int errcode;
2114
2115#if defined (HAVE_LIBEXPAT)
2116
734b0e4b
MM
2117 btrace->format = BTRACE_FORMAT_NONE;
2118
2119 cleanup = make_cleanup_btrace_data (btrace);
c12a2917 2120 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
734b0e4b 2121 buffer, btrace);
c12a2917 2122 if (errcode != 0)
969c39fb 2123 error (_("Error parsing branch trace."));
c12a2917
MM
2124
2125 /* Keep parse results. */
2126 discard_cleanups (cleanup);
2127
2128#else /* !defined (HAVE_LIBEXPAT) */
2129
2130 error (_("Cannot process branch trace. XML parsing is not supported."));
2131
2132#endif /* !defined (HAVE_LIBEXPAT) */
c12a2917 2133}
23a7fe75 2134
f4abbc16
MM
2135#if defined (HAVE_LIBEXPAT)
2136
2137/* Parse a btrace-conf "bts" xml record. */
2138
2139static void
2140parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2141 const struct gdb_xml_element *element,
2142 void *user_data, VEC (gdb_xml_value_s) *attributes)
2143{
2144 struct btrace_config *conf;
d33501a5 2145 struct gdb_xml_value *size;
f4abbc16 2146
9a3c8263 2147 conf = (struct btrace_config *) user_data;
f4abbc16 2148 conf->format = BTRACE_FORMAT_BTS;
d33501a5
MM
2149 conf->bts.size = 0;
2150
2151 size = xml_find_attribute (attributes, "size");
2152 if (size != NULL)
b20a6524 2153 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
f4abbc16
MM
2154}
2155
b20a6524
MM
2156/* Parse a btrace-conf "pt" xml record. */
2157
2158static void
2159parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2160 const struct gdb_xml_element *element,
2161 void *user_data, VEC (gdb_xml_value_s) *attributes)
2162{
2163 struct btrace_config *conf;
2164 struct gdb_xml_value *size;
2165
9a3c8263 2166 conf = (struct btrace_config *) user_data;
b20a6524
MM
2167 conf->format = BTRACE_FORMAT_PT;
2168 conf->pt.size = 0;
2169
2170 size = xml_find_attribute (attributes, "size");
2171 if (size != NULL)
2172 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
2173}
2174
2175static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2176 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2177 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2178};
2179
d33501a5
MM
2180static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2181 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2182 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2183};
2184
f4abbc16 2185static const struct gdb_xml_element btrace_conf_children[] = {
d33501a5
MM
2186 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2187 parse_xml_btrace_conf_bts, NULL },
b20a6524
MM
2188 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2189 parse_xml_btrace_conf_pt, NULL },
f4abbc16
MM
2190 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2191};
2192
2193static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2194 { "version", GDB_XML_AF_NONE, NULL, NULL },
2195 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2196};
2197
2198static const struct gdb_xml_element btrace_conf_elements[] = {
2199 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2200 GDB_XML_EF_NONE, NULL, NULL },
2201 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2202};
2203
2204#endif /* defined (HAVE_LIBEXPAT) */
2205
2206/* See btrace.h. */
2207
2208void
2209parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2210{
2211 int errcode;
2212
2213#if defined (HAVE_LIBEXPAT)
2214
2215 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2216 btrace_conf_elements, xml, conf);
2217 if (errcode != 0)
2218 error (_("Error parsing branch trace configuration."));
2219
2220#else /* !defined (HAVE_LIBEXPAT) */
2221
2222 error (_("XML parsing is not supported."));
2223
2224#endif /* !defined (HAVE_LIBEXPAT) */
2225}
2226
23a7fe75
MM
2227/* See btrace.h. */
2228
2229const struct btrace_insn *
2230btrace_insn_get (const struct btrace_insn_iterator *it)
2231{
2232 const struct btrace_function *bfun;
2233 unsigned int index, end;
2234
2235 index = it->index;
2236 bfun = it->function;
2237
31fd9caa
MM
2238 /* Check if the iterator points to a gap in the trace. */
2239 if (bfun->errcode != 0)
2240 return NULL;
2241
23a7fe75
MM
2242 /* The index is within the bounds of this function's instruction vector. */
2243 end = VEC_length (btrace_insn_s, bfun->insn);
2244 gdb_assert (0 < end);
2245 gdb_assert (index < end);
2246
2247 return VEC_index (btrace_insn_s, bfun->insn, index);
2248}
2249
2250/* See btrace.h. */
2251
69090cee
TW
2252int
2253btrace_insn_get_error (const struct btrace_insn_iterator *it)
23a7fe75 2254{
69090cee
TW
2255 return it->function->errcode;
2256}
31fd9caa 2257
69090cee 2258/* See btrace.h. */
31fd9caa 2259
69090cee
TW
2260unsigned int
2261btrace_insn_number (const struct btrace_insn_iterator *it)
2262{
2263 return it->function->insn_offset + it->index;
23a7fe75
MM
2264}
2265
2266/* See btrace.h. */
2267
2268void
2269btrace_insn_begin (struct btrace_insn_iterator *it,
2270 const struct btrace_thread_info *btinfo)
2271{
2272 const struct btrace_function *bfun;
2273
2274 bfun = btinfo->begin;
2275 if (bfun == NULL)
2276 error (_("No trace."));
2277
2278 it->function = bfun;
2279 it->index = 0;
2280}
2281
2282/* See btrace.h. */
2283
2284void
2285btrace_insn_end (struct btrace_insn_iterator *it,
2286 const struct btrace_thread_info *btinfo)
2287{
2288 const struct btrace_function *bfun;
2289 unsigned int length;
2290
2291 bfun = btinfo->end;
2292 if (bfun == NULL)
2293 error (_("No trace."));
2294
23a7fe75
MM
2295 length = VEC_length (btrace_insn_s, bfun->insn);
2296
31fd9caa
MM
2297 /* The last function may either be a gap or it contains the current
2298 instruction, which is one past the end of the execution trace; ignore
2299 it. */
2300 if (length > 0)
2301 length -= 1;
2302
23a7fe75 2303 it->function = bfun;
31fd9caa 2304 it->index = length;
23a7fe75
MM
2305}
2306
2307/* See btrace.h. */
2308
2309unsigned int
2310btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2311{
2312 const struct btrace_function *bfun;
2313 unsigned int index, steps;
2314
2315 bfun = it->function;
2316 steps = 0;
2317 index = it->index;
2318
2319 while (stride != 0)
2320 {
2321 unsigned int end, space, adv;
2322
2323 end = VEC_length (btrace_insn_s, bfun->insn);
2324
31fd9caa
MM
2325 /* An empty function segment represents a gap in the trace. We count
2326 it as one instruction. */
2327 if (end == 0)
2328 {
2329 const struct btrace_function *next;
2330
2331 next = bfun->flow.next;
2332 if (next == NULL)
2333 break;
2334
2335 stride -= 1;
2336 steps += 1;
2337
2338 bfun = next;
2339 index = 0;
2340
2341 continue;
2342 }
2343
23a7fe75
MM
2344 gdb_assert (0 < end);
2345 gdb_assert (index < end);
2346
2347 /* Compute the number of instructions remaining in this segment. */
2348 space = end - index;
2349
2350 /* Advance the iterator as far as possible within this segment. */
325fac50 2351 adv = std::min (space, stride);
23a7fe75
MM
2352 stride -= adv;
2353 index += adv;
2354 steps += adv;
2355
2356 /* Move to the next function if we're at the end of this one. */
2357 if (index == end)
2358 {
2359 const struct btrace_function *next;
2360
2361 next = bfun->flow.next;
2362 if (next == NULL)
2363 {
2364 /* We stepped past the last function.
2365
2366 Let's adjust the index to point to the last instruction in
2367 the previous function. */
2368 index -= 1;
2369 steps -= 1;
2370 break;
2371 }
2372
2373 /* We now point to the first instruction in the new function. */
2374 bfun = next;
2375 index = 0;
2376 }
2377
2378 /* We did make progress. */
2379 gdb_assert (adv > 0);
2380 }
2381
2382 /* Update the iterator. */
2383 it->function = bfun;
2384 it->index = index;
2385
2386 return steps;
2387}
2388
2389/* See btrace.h. */
2390
2391unsigned int
2392btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2393{
2394 const struct btrace_function *bfun;
2395 unsigned int index, steps;
2396
2397 bfun = it->function;
2398 steps = 0;
2399 index = it->index;
2400
2401 while (stride != 0)
2402 {
2403 unsigned int adv;
2404
2405 /* Move to the previous function if we're at the start of this one. */
2406 if (index == 0)
2407 {
2408 const struct btrace_function *prev;
2409
2410 prev = bfun->flow.prev;
2411 if (prev == NULL)
2412 break;
2413
2414 /* We point to one after the last instruction in the new function. */
2415 bfun = prev;
2416 index = VEC_length (btrace_insn_s, bfun->insn);
2417
31fd9caa
MM
2418 /* An empty function segment represents a gap in the trace. We count
2419 it as one instruction. */
2420 if (index == 0)
2421 {
2422 stride -= 1;
2423 steps += 1;
2424
2425 continue;
2426 }
23a7fe75
MM
2427 }
2428
2429 /* Advance the iterator as far as possible within this segment. */
325fac50 2430 adv = std::min (index, stride);
31fd9caa 2431
23a7fe75
MM
2432 stride -= adv;
2433 index -= adv;
2434 steps += adv;
2435
2436 /* We did make progress. */
2437 gdb_assert (adv > 0);
2438 }
2439
2440 /* Update the iterator. */
2441 it->function = bfun;
2442 it->index = index;
2443
2444 return steps;
2445}
2446
2447/* See btrace.h. */
2448
2449int
2450btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2451 const struct btrace_insn_iterator *rhs)
2452{
2453 unsigned int lnum, rnum;
2454
2455 lnum = btrace_insn_number (lhs);
2456 rnum = btrace_insn_number (rhs);
2457
2458 return (int) (lnum - rnum);
2459}
2460
2461/* See btrace.h. */
2462
2463int
2464btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2465 const struct btrace_thread_info *btinfo,
2466 unsigned int number)
2467{
2468 const struct btrace_function *bfun;
fdd2bd92 2469 unsigned int upper, lower;
23a7fe75 2470
fdd2bd92
TW
2471 if (VEC_empty (btrace_fun_p, btinfo->functions))
2472 return 0;
23a7fe75 2473
fdd2bd92
TW
2474 lower = 0;
2475 bfun = VEC_index (btrace_fun_p, btinfo->functions, lower);
2476 if (number < bfun->insn_offset)
23a7fe75
MM
2477 return 0;
2478
fdd2bd92
TW
2479 upper = VEC_length (btrace_fun_p, btinfo->functions) - 1;
2480 bfun = VEC_index (btrace_fun_p, btinfo->functions, upper);
2481 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
23a7fe75
MM
2482 return 0;
2483
fdd2bd92
TW
2484 /* We assume that there are no holes in the numbering. */
2485 for (;;)
2486 {
2487 const unsigned int average = lower + (upper - lower) / 2;
2488
2489 bfun = VEC_index (btrace_fun_p, btinfo->functions, average);
2490
2491 if (number < bfun->insn_offset)
2492 {
2493 upper = average - 1;
2494 continue;
2495 }
2496
2497 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2498 {
2499 lower = average + 1;
2500 continue;
2501 }
2502
2503 break;
2504 }
2505
23a7fe75
MM
2506 it->function = bfun;
2507 it->index = number - bfun->insn_offset;
23a7fe75
MM
2508 return 1;
2509}
2510
2511/* See btrace.h. */
2512
2513const struct btrace_function *
2514btrace_call_get (const struct btrace_call_iterator *it)
2515{
2516 return it->function;
2517}
2518
2519/* See btrace.h. */
2520
2521unsigned int
2522btrace_call_number (const struct btrace_call_iterator *it)
2523{
2524 const struct btrace_thread_info *btinfo;
2525 const struct btrace_function *bfun;
2526 unsigned int insns;
2527
2528 btinfo = it->btinfo;
2529 bfun = it->function;
2530 if (bfun != NULL)
2531 return bfun->number;
2532
2533 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2534 number of the last function. */
2535 bfun = btinfo->end;
2536 insns = VEC_length (btrace_insn_s, bfun->insn);
2537
2538 /* If the function contains only a single instruction (i.e. the current
2539 instruction), it will be skipped and its number is already the number
2540 we seek. */
2541 if (insns == 1)
2542 return bfun->number;
2543
2544 /* Otherwise, return one more than the number of the last function. */
2545 return bfun->number + 1;
2546}
2547
2548/* See btrace.h. */
2549
2550void
2551btrace_call_begin (struct btrace_call_iterator *it,
2552 const struct btrace_thread_info *btinfo)
2553{
2554 const struct btrace_function *bfun;
2555
2556 bfun = btinfo->begin;
2557 if (bfun == NULL)
2558 error (_("No trace."));
2559
2560 it->btinfo = btinfo;
2561 it->function = bfun;
2562}
2563
2564/* See btrace.h. */
2565
2566void
2567btrace_call_end (struct btrace_call_iterator *it,
2568 const struct btrace_thread_info *btinfo)
2569{
2570 const struct btrace_function *bfun;
2571
2572 bfun = btinfo->end;
2573 if (bfun == NULL)
2574 error (_("No trace."));
2575
2576 it->btinfo = btinfo;
2577 it->function = NULL;
2578}
2579
2580/* See btrace.h. */
2581
2582unsigned int
2583btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2584{
2585 const struct btrace_function *bfun;
2586 unsigned int steps;
2587
2588 bfun = it->function;
2589 steps = 0;
2590 while (bfun != NULL)
2591 {
2592 const struct btrace_function *next;
2593 unsigned int insns;
2594
2595 next = bfun->flow.next;
2596 if (next == NULL)
2597 {
2598 /* Ignore the last function if it only contains a single
2599 (i.e. the current) instruction. */
2600 insns = VEC_length (btrace_insn_s, bfun->insn);
2601 if (insns == 1)
2602 steps -= 1;
2603 }
2604
2605 if (stride == steps)
2606 break;
2607
2608 bfun = next;
2609 steps += 1;
2610 }
2611
2612 it->function = bfun;
2613 return steps;
2614}
2615
2616/* See btrace.h. */
2617
2618unsigned int
2619btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2620{
2621 const struct btrace_thread_info *btinfo;
2622 const struct btrace_function *bfun;
2623 unsigned int steps;
2624
2625 bfun = it->function;
2626 steps = 0;
2627
2628 if (bfun == NULL)
2629 {
2630 unsigned int insns;
2631
2632 btinfo = it->btinfo;
2633 bfun = btinfo->end;
2634 if (bfun == NULL)
2635 return 0;
2636
2637 /* Ignore the last function if it only contains a single
2638 (i.e. the current) instruction. */
2639 insns = VEC_length (btrace_insn_s, bfun->insn);
2640 if (insns == 1)
2641 bfun = bfun->flow.prev;
2642
2643 if (bfun == NULL)
2644 return 0;
2645
2646 steps += 1;
2647 }
2648
2649 while (steps < stride)
2650 {
2651 const struct btrace_function *prev;
2652
2653 prev = bfun->flow.prev;
2654 if (prev == NULL)
2655 break;
2656
2657 bfun = prev;
2658 steps += 1;
2659 }
2660
2661 it->function = bfun;
2662 return steps;
2663}
2664
2665/* See btrace.h. */
2666
2667int
2668btrace_call_cmp (const struct btrace_call_iterator *lhs,
2669 const struct btrace_call_iterator *rhs)
2670{
2671 unsigned int lnum, rnum;
2672
2673 lnum = btrace_call_number (lhs);
2674 rnum = btrace_call_number (rhs);
2675
2676 return (int) (lnum - rnum);
2677}
2678
2679/* See btrace.h. */
2680
2681int
2682btrace_find_call_by_number (struct btrace_call_iterator *it,
2683 const struct btrace_thread_info *btinfo,
2684 unsigned int number)
2685{
2686 const struct btrace_function *bfun;
2687
2688 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2689 {
2690 unsigned int bnum;
2691
2692 bnum = bfun->number;
2693 if (number == bnum)
2694 {
2695 it->btinfo = btinfo;
2696 it->function = bfun;
2697 return 1;
2698 }
2699
2700 /* Functions are ordered and numbered consecutively. We could bail out
2701 earlier. On the other hand, it is very unlikely that we search for
2702 a nonexistent function. */
2703 }
2704
2705 return 0;
2706}
2707
2708/* See btrace.h. */
2709
2710void
2711btrace_set_insn_history (struct btrace_thread_info *btinfo,
2712 const struct btrace_insn_iterator *begin,
2713 const struct btrace_insn_iterator *end)
2714{
2715 if (btinfo->insn_history == NULL)
8d749320 2716 btinfo->insn_history = XCNEW (struct btrace_insn_history);
23a7fe75
MM
2717
2718 btinfo->insn_history->begin = *begin;
2719 btinfo->insn_history->end = *end;
2720}
2721
2722/* See btrace.h. */
2723
2724void
2725btrace_set_call_history (struct btrace_thread_info *btinfo,
2726 const struct btrace_call_iterator *begin,
2727 const struct btrace_call_iterator *end)
2728{
2729 gdb_assert (begin->btinfo == end->btinfo);
2730
2731 if (btinfo->call_history == NULL)
8d749320 2732 btinfo->call_history = XCNEW (struct btrace_call_history);
23a7fe75
MM
2733
2734 btinfo->call_history->begin = *begin;
2735 btinfo->call_history->end = *end;
2736}
07bbe694
MM
2737
2738/* See btrace.h. */
2739
2740int
2741btrace_is_replaying (struct thread_info *tp)
2742{
2743 return tp->btrace.replay != NULL;
2744}
6e07b1d2
MM
2745
2746/* See btrace.h. */
2747
2748int
2749btrace_is_empty (struct thread_info *tp)
2750{
2751 struct btrace_insn_iterator begin, end;
2752 struct btrace_thread_info *btinfo;
2753
2754 btinfo = &tp->btrace;
2755
2756 if (btinfo->begin == NULL)
2757 return 1;
2758
2759 btrace_insn_begin (&begin, btinfo);
2760 btrace_insn_end (&end, btinfo);
2761
2762 return btrace_insn_cmp (&begin, &end) == 0;
2763}
734b0e4b
MM
2764
2765/* Forward the cleanup request. */
2766
2767static void
2768do_btrace_data_cleanup (void *arg)
2769{
9a3c8263 2770 btrace_data_fini ((struct btrace_data *) arg);
734b0e4b
MM
2771}
2772
2773/* See btrace.h. */
2774
2775struct cleanup *
2776make_cleanup_btrace_data (struct btrace_data *data)
2777{
2778 return make_cleanup (do_btrace_data_cleanup, data);
2779}
b0627500
MM
2780
2781#if defined (HAVE_LIBIPT)
2782
2783/* Print a single packet. */
2784
2785static void
2786pt_print_packet (const struct pt_packet *packet)
2787{
2788 switch (packet->type)
2789 {
2790 default:
2791 printf_unfiltered (("[??: %x]"), packet->type);
2792 break;
2793
2794 case ppt_psb:
2795 printf_unfiltered (("psb"));
2796 break;
2797
2798 case ppt_psbend:
2799 printf_unfiltered (("psbend"));
2800 break;
2801
2802 case ppt_pad:
2803 printf_unfiltered (("pad"));
2804 break;
2805
2806 case ppt_tip:
2807 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2808 packet->payload.ip.ipc,
2809 packet->payload.ip.ip);
2810 break;
2811
2812 case ppt_tip_pge:
2813 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2814 packet->payload.ip.ipc,
2815 packet->payload.ip.ip);
2816 break;
2817
2818 case ppt_tip_pgd:
2819 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2820 packet->payload.ip.ipc,
2821 packet->payload.ip.ip);
2822 break;
2823
2824 case ppt_fup:
2825 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2826 packet->payload.ip.ipc,
2827 packet->payload.ip.ip);
2828 break;
2829
2830 case ppt_tnt_8:
2831 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2832 packet->payload.tnt.bit_size,
2833 packet->payload.tnt.payload);
2834 break;
2835
2836 case ppt_tnt_64:
2837 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2838 packet->payload.tnt.bit_size,
2839 packet->payload.tnt.payload);
2840 break;
2841
2842 case ppt_pip:
37fdfe4c
MM
2843 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2844 packet->payload.pip.nr ? (" nr") : (""));
b0627500
MM
2845 break;
2846
2847 case ppt_tsc:
2848 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2849 break;
2850
2851 case ppt_cbr:
2852 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2853 break;
2854
2855 case ppt_mode:
2856 switch (packet->payload.mode.leaf)
2857 {
2858 default:
2859 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2860 break;
2861
2862 case pt_mol_exec:
2863 printf_unfiltered (("mode.exec%s%s"),
2864 packet->payload.mode.bits.exec.csl
2865 ? (" cs.l") : (""),
2866 packet->payload.mode.bits.exec.csd
2867 ? (" cs.d") : (""));
2868 break;
2869
2870 case pt_mol_tsx:
2871 printf_unfiltered (("mode.tsx%s%s"),
2872 packet->payload.mode.bits.tsx.intx
2873 ? (" intx") : (""),
2874 packet->payload.mode.bits.tsx.abrt
2875 ? (" abrt") : (""));
2876 break;
2877 }
2878 break;
2879
2880 case ppt_ovf:
2881 printf_unfiltered (("ovf"));
2882 break;
2883
37fdfe4c
MM
2884 case ppt_stop:
2885 printf_unfiltered (("stop"));
2886 break;
2887
2888 case ppt_vmcs:
2889 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2890 break;
2891
2892 case ppt_tma:
2893 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2894 packet->payload.tma.fc);
2895 break;
2896
2897 case ppt_mtc:
2898 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2899 break;
2900
2901 case ppt_cyc:
2902 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2903 break;
2904
2905 case ppt_mnt:
2906 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2907 break;
b0627500
MM
2908 }
2909}
2910
2911/* Decode packets into MAINT using DECODER. */
2912
2913static void
2914btrace_maint_decode_pt (struct btrace_maint_info *maint,
2915 struct pt_packet_decoder *decoder)
2916{
2917 int errcode;
2918
2919 for (;;)
2920 {
2921 struct btrace_pt_packet packet;
2922
2923 errcode = pt_pkt_sync_forward (decoder);
2924 if (errcode < 0)
2925 break;
2926
2927 for (;;)
2928 {
2929 pt_pkt_get_offset (decoder, &packet.offset);
2930
2931 errcode = pt_pkt_next (decoder, &packet.packet,
2932 sizeof(packet.packet));
2933 if (errcode < 0)
2934 break;
2935
2936 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2937 {
2938 packet.errcode = pt_errcode (errcode);
2939 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2940 &packet);
2941 }
2942 }
2943
2944 if (errcode == -pte_eos)
2945 break;
2946
2947 packet.errcode = pt_errcode (errcode);
2948 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2949 &packet);
2950
2951 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2952 packet.offset, pt_errstr (packet.errcode));
2953 }
2954
2955 if (errcode != -pte_eos)
bc504a31 2956 warning (_("Failed to synchronize onto the Intel Processor Trace "
b0627500
MM
2957 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2958}
2959
2960/* Update the packet history in BTINFO. */
2961
2962static void
2963btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2964{
2965 volatile struct gdb_exception except;
2966 struct pt_packet_decoder *decoder;
2967 struct btrace_data_pt *pt;
2968 struct pt_config config;
2969 int errcode;
2970
2971 pt = &btinfo->data.variant.pt;
2972
2973 /* Nothing to do if there is no trace. */
2974 if (pt->size == 0)
2975 return;
2976
2977 memset (&config, 0, sizeof(config));
2978
2979 config.size = sizeof (config);
2980 config.begin = pt->data;
2981 config.end = pt->data + pt->size;
2982
2983 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2984 config.cpu.family = pt->config.cpu.family;
2985 config.cpu.model = pt->config.cpu.model;
2986 config.cpu.stepping = pt->config.cpu.stepping;
2987
2988 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2989 if (errcode < 0)
bc504a31 2990 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
b0627500
MM
2991 pt_errstr (pt_errcode (errcode)));
2992
2993 decoder = pt_pkt_alloc_decoder (&config);
2994 if (decoder == NULL)
bc504a31 2995 error (_("Failed to allocate the Intel Processor Trace decoder."));
b0627500
MM
2996
2997 TRY
2998 {
2999 btrace_maint_decode_pt (&btinfo->maint, decoder);
3000 }
3001 CATCH (except, RETURN_MASK_ALL)
3002 {
3003 pt_pkt_free_decoder (decoder);
3004
3005 if (except.reason < 0)
3006 throw_exception (except);
3007 }
3008 END_CATCH
3009
3010 pt_pkt_free_decoder (decoder);
3011}
3012
3013#endif /* !defined (HAVE_LIBIPT) */
3014
3015/* Update the packet maintenance information for BTINFO and store the
3016 low and high bounds into BEGIN and END, respectively.
3017 Store the current iterator state into FROM and TO. */
3018
3019static void
3020btrace_maint_update_packets (struct btrace_thread_info *btinfo,
3021 unsigned int *begin, unsigned int *end,
3022 unsigned int *from, unsigned int *to)
3023{
3024 switch (btinfo->data.format)
3025 {
3026 default:
3027 *begin = 0;
3028 *end = 0;
3029 *from = 0;
3030 *to = 0;
3031 break;
3032
3033 case BTRACE_FORMAT_BTS:
3034 /* Nothing to do - we operate directly on BTINFO->DATA. */
3035 *begin = 0;
3036 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
3037 *from = btinfo->maint.variant.bts.packet_history.begin;
3038 *to = btinfo->maint.variant.bts.packet_history.end;
3039 break;
3040
3041#if defined (HAVE_LIBIPT)
3042 case BTRACE_FORMAT_PT:
3043 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
3044 btrace_maint_update_pt_packets (btinfo);
3045
3046 *begin = 0;
3047 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
3048 *from = btinfo->maint.variant.pt.packet_history.begin;
3049 *to = btinfo->maint.variant.pt.packet_history.end;
3050 break;
3051#endif /* defined (HAVE_LIBIPT) */
3052 }
3053}
3054
3055/* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3056 update the current iterator position. */
3057
3058static void
3059btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3060 unsigned int begin, unsigned int end)
3061{
3062 switch (btinfo->data.format)
3063 {
3064 default:
3065 break;
3066
3067 case BTRACE_FORMAT_BTS:
3068 {
3069 VEC (btrace_block_s) *blocks;
3070 unsigned int blk;
3071
3072 blocks = btinfo->data.variant.bts.blocks;
3073 for (blk = begin; blk < end; ++blk)
3074 {
3075 const btrace_block_s *block;
3076
3077 block = VEC_index (btrace_block_s, blocks, blk);
3078
3079 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3080 core_addr_to_string_nz (block->begin),
3081 core_addr_to_string_nz (block->end));
3082 }
3083
3084 btinfo->maint.variant.bts.packet_history.begin = begin;
3085 btinfo->maint.variant.bts.packet_history.end = end;
3086 }
3087 break;
3088
3089#if defined (HAVE_LIBIPT)
3090 case BTRACE_FORMAT_PT:
3091 {
3092 VEC (btrace_pt_packet_s) *packets;
3093 unsigned int pkt;
3094
3095 packets = btinfo->maint.variant.pt.packets;
3096 for (pkt = begin; pkt < end; ++pkt)
3097 {
3098 const struct btrace_pt_packet *packet;
3099
3100 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
3101
3102 printf_unfiltered ("%u\t", pkt);
3103 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
3104
3105 if (packet->errcode == pte_ok)
3106 pt_print_packet (&packet->packet);
3107 else
3108 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
3109
3110 printf_unfiltered ("\n");
3111 }
3112
3113 btinfo->maint.variant.pt.packet_history.begin = begin;
3114 btinfo->maint.variant.pt.packet_history.end = end;
3115 }
3116 break;
3117#endif /* defined (HAVE_LIBIPT) */
3118 }
3119}
3120
3121/* Read a number from an argument string. */
3122
3123static unsigned int
3124get_uint (char **arg)
3125{
3126 char *begin, *end, *pos;
3127 unsigned long number;
3128
3129 begin = *arg;
3130 pos = skip_spaces (begin);
3131
3132 if (!isdigit (*pos))
3133 error (_("Expected positive number, got: %s."), pos);
3134
3135 number = strtoul (pos, &end, 10);
3136 if (number > UINT_MAX)
3137 error (_("Number too big."));
3138
3139 *arg += (end - begin);
3140
3141 return (unsigned int) number;
3142}
3143
3144/* Read a context size from an argument string. */
3145
3146static int
3147get_context_size (char **arg)
3148{
3149 char *pos;
3150 int number;
3151
3152 pos = skip_spaces (*arg);
3153
3154 if (!isdigit (*pos))
3155 error (_("Expected positive number, got: %s."), pos);
3156
3157 return strtol (pos, arg, 10);
3158}
3159
3160/* Complain about junk at the end of an argument string. */
3161
3162static void
3163no_chunk (char *arg)
3164{
3165 if (*arg != 0)
3166 error (_("Junk after argument: %s."), arg);
3167}
3168
3169/* The "maintenance btrace packet-history" command. */
3170
3171static void
3172maint_btrace_packet_history_cmd (char *arg, int from_tty)
3173{
3174 struct btrace_thread_info *btinfo;
3175 struct thread_info *tp;
3176 unsigned int size, begin, end, from, to;
3177
3178 tp = find_thread_ptid (inferior_ptid);
3179 if (tp == NULL)
3180 error (_("No thread."));
3181
3182 size = 10;
3183 btinfo = &tp->btrace;
3184
3185 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3186 if (begin == end)
3187 {
3188 printf_unfiltered (_("No trace.\n"));
3189 return;
3190 }
3191
3192 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3193 {
3194 from = to;
3195
3196 if (end - from < size)
3197 size = end - from;
3198 to = from + size;
3199 }
3200 else if (strcmp (arg, "-") == 0)
3201 {
3202 to = from;
3203
3204 if (to - begin < size)
3205 size = to - begin;
3206 from = to - size;
3207 }
3208 else
3209 {
3210 from = get_uint (&arg);
3211 if (end <= from)
3212 error (_("'%u' is out of range."), from);
3213
3214 arg = skip_spaces (arg);
3215 if (*arg == ',')
3216 {
3217 arg = skip_spaces (++arg);
3218
3219 if (*arg == '+')
3220 {
3221 arg += 1;
3222 size = get_context_size (&arg);
3223
3224 no_chunk (arg);
3225
3226 if (end - from < size)
3227 size = end - from;
3228 to = from + size;
3229 }
3230 else if (*arg == '-')
3231 {
3232 arg += 1;
3233 size = get_context_size (&arg);
3234
3235 no_chunk (arg);
3236
3237 /* Include the packet given as first argument. */
3238 from += 1;
3239 to = from;
3240
3241 if (to - begin < size)
3242 size = to - begin;
3243 from = to - size;
3244 }
3245 else
3246 {
3247 to = get_uint (&arg);
3248
3249 /* Include the packet at the second argument and silently
3250 truncate the range. */
3251 if (to < end)
3252 to += 1;
3253 else
3254 to = end;
3255
3256 no_chunk (arg);
3257 }
3258 }
3259 else
3260 {
3261 no_chunk (arg);
3262
3263 if (end - from < size)
3264 size = end - from;
3265 to = from + size;
3266 }
3267
3268 dont_repeat ();
3269 }
3270
3271 btrace_maint_print_packets (btinfo, from, to);
3272}
3273
3274/* The "maintenance btrace clear-packet-history" command. */
3275
3276static void
3277maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
3278{
3279 struct btrace_thread_info *btinfo;
3280 struct thread_info *tp;
3281
3282 if (args != NULL && *args != 0)
3283 error (_("Invalid argument."));
3284
3285 tp = find_thread_ptid (inferior_ptid);
3286 if (tp == NULL)
3287 error (_("No thread."));
3288
3289 btinfo = &tp->btrace;
3290
3291 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3292 btrace_maint_clear (btinfo);
3293 btrace_data_clear (&btinfo->data);
3294}
3295
3296/* The "maintenance btrace clear" command. */
3297
3298static void
3299maint_btrace_clear_cmd (char *args, int from_tty)
3300{
3301 struct btrace_thread_info *btinfo;
3302 struct thread_info *tp;
3303
3304 if (args != NULL && *args != 0)
3305 error (_("Invalid argument."));
3306
3307 tp = find_thread_ptid (inferior_ptid);
3308 if (tp == NULL)
3309 error (_("No thread."));
3310
3311 btrace_clear (tp);
3312}
3313
3314/* The "maintenance btrace" command. */
3315
3316static void
3317maint_btrace_cmd (char *args, int from_tty)
3318{
3319 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3320 gdb_stdout);
3321}
3322
3323/* The "maintenance set btrace" command. */
3324
3325static void
3326maint_btrace_set_cmd (char *args, int from_tty)
3327{
3328 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3329 gdb_stdout);
3330}
3331
3332/* The "maintenance show btrace" command. */
3333
3334static void
3335maint_btrace_show_cmd (char *args, int from_tty)
3336{
3337 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3338 all_commands, gdb_stdout);
3339}
3340
3341/* The "maintenance set btrace pt" command. */
3342
3343static void
3344maint_btrace_pt_set_cmd (char *args, int from_tty)
3345{
3346 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3347 all_commands, gdb_stdout);
3348}
3349
3350/* The "maintenance show btrace pt" command. */
3351
3352static void
3353maint_btrace_pt_show_cmd (char *args, int from_tty)
3354{
3355 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3356 all_commands, gdb_stdout);
3357}
3358
3359/* The "maintenance info btrace" command. */
3360
3361static void
3362maint_info_btrace_cmd (char *args, int from_tty)
3363{
3364 struct btrace_thread_info *btinfo;
3365 struct thread_info *tp;
3366 const struct btrace_config *conf;
3367
3368 if (args != NULL && *args != 0)
3369 error (_("Invalid argument."));
3370
3371 tp = find_thread_ptid (inferior_ptid);
3372 if (tp == NULL)
3373 error (_("No thread."));
3374
3375 btinfo = &tp->btrace;
3376
3377 conf = btrace_conf (btinfo);
3378 if (conf == NULL)
3379 error (_("No btrace configuration."));
3380
3381 printf_unfiltered (_("Format: %s.\n"),
3382 btrace_format_string (conf->format));
3383
3384 switch (conf->format)
3385 {
3386 default:
3387 break;
3388
3389 case BTRACE_FORMAT_BTS:
3390 printf_unfiltered (_("Number of packets: %u.\n"),
3391 VEC_length (btrace_block_s,
3392 btinfo->data.variant.bts.blocks));
3393 break;
3394
3395#if defined (HAVE_LIBIPT)
3396 case BTRACE_FORMAT_PT:
3397 {
3398 struct pt_version version;
3399
3400 version = pt_library_version ();
3401 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3402 version.minor, version.build,
3403 version.ext != NULL ? version.ext : "");
3404
3405 btrace_maint_update_pt_packets (btinfo);
3406 printf_unfiltered (_("Number of packets: %u.\n"),
3407 VEC_length (btrace_pt_packet_s,
3408 btinfo->maint.variant.pt.packets));
3409 }
3410 break;
3411#endif /* defined (HAVE_LIBIPT) */
3412 }
3413}
3414
3415/* The "maint show btrace pt skip-pad" show value function. */
3416
3417static void
3418show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3419 struct cmd_list_element *c,
3420 const char *value)
3421{
3422 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3423}
3424
3425
3426/* Initialize btrace maintenance commands. */
3427
3428void _initialize_btrace (void);
3429void
3430_initialize_btrace (void)
3431{
3432 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3433 _("Info about branch tracing data."), &maintenanceinfolist);
3434
3435 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3436 _("Branch tracing maintenance commands."),
3437 &maint_btrace_cmdlist, "maintenance btrace ",
3438 0, &maintenancelist);
3439
3440 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3441Set branch tracing specific variables."),
3442 &maint_btrace_set_cmdlist, "maintenance set btrace ",
3443 0, &maintenance_set_cmdlist);
3444
3445 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
bc504a31 3446Set Intel Processor Trace specific variables."),
b0627500
MM
3447 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3448 0, &maint_btrace_set_cmdlist);
3449
3450 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3451Show branch tracing specific variables."),
3452 &maint_btrace_show_cmdlist, "maintenance show btrace ",
3453 0, &maintenance_show_cmdlist);
3454
3455 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
bc504a31 3456Show Intel Processor Trace specific variables."),
b0627500
MM
3457 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3458 0, &maint_btrace_show_cmdlist);
3459
3460 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3461 &maint_btrace_pt_skip_pad, _("\
3462Set whether PAD packets should be skipped in the btrace packet history."), _("\
3463Show whether PAD packets should be skipped in the btrace packet history."),_("\
3464When enabled, PAD packets are ignored in the btrace packet history."),
3465 NULL, show_maint_btrace_pt_skip_pad,
3466 &maint_btrace_pt_set_cmdlist,
3467 &maint_btrace_pt_show_cmdlist);
3468
3469 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3470 _("Print the raw branch tracing data.\n\
3471With no argument, print ten more packets after the previous ten-line print.\n\
3472With '-' as argument print ten packets before a previous ten-line print.\n\
3473One argument specifies the starting packet of a ten-line print.\n\
3474Two arguments with comma between specify starting and ending packets to \
3475print.\n\
3476Preceded with '+'/'-' the second argument specifies the distance from the \
3477first.\n"),
3478 &maint_btrace_cmdlist);
3479
3480 add_cmd ("clear-packet-history", class_maintenance,
3481 maint_btrace_clear_packet_history_cmd,
3482 _("Clears the branch tracing packet history.\n\
3483Discards the raw branch tracing data but not the execution history data.\n\
3484"),
3485 &maint_btrace_cmdlist);
3486
3487 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3488 _("Clears the branch tracing data.\n\
3489Discards the raw branch tracing data and the execution history data.\n\
3490The next 'record' command will fetch the branch tracing data anew.\n\
3491"),
3492 &maint_btrace_cmdlist);
3493
3494}